diff --git a/Calorimeter/CaloRecAlgs/CMakeLists.txt b/Calorimeter/CaloRecAlgs/CMakeLists.txt index 219db92a3a8927ae9cc025bea46985387e072ce0..75d9d09a23eb857555e55848ee16b1d2b98b89f3 100644 --- a/Calorimeter/CaloRecAlgs/CMakeLists.txt +++ b/Calorimeter/CaloRecAlgs/CMakeLists.txt @@ -9,7 +9,7 @@ atlas_subdir( CaloRecAlgs ) atlas_add_component( CaloRecAlgs src/*.cxx src/*.h src/components/*.cxx - LINK_LIBRARIES AthenaBaseComps StoreGateLib xAODFaserCalorimeter xAODFaserWaveform) + LINK_LIBRARIES AthenaKernel GaudiKernel AthenaBaseComps AthenaPoolUtilities StoreGateLib xAODFaserCalorimeter xAODFaserWaveform CaloRecToolsLib) atlas_install_python_modules( python/*.py ) diff --git a/Calorimeter/CaloRecAlgs/python/CaloRecAlgsConfig.py b/Calorimeter/CaloRecAlgs/python/CaloRecAlgsConfig.py index 034bf08b5b36a4620567f6716eca5d31338a1c04..83839b9cba1a4c6a50eef52852ed8ee2127dfaa7 100644 --- a/Calorimeter/CaloRecAlgs/python/CaloRecAlgsConfig.py +++ b/Calorimeter/CaloRecAlgs/python/CaloRecAlgsConfig.py @@ -1,30 +1,31 @@ -""" Define methods used to instantiate configured Waveform reconstruction tools and algorithms +""" Define methods used to instantiate configured Calorimeter Calibration reconstruction tools and algorithms -Copyright (C) 2020 CERN for the benefit of the FASER collaboration +Copyright (C) 2022 CERN for the benefit of the FASER collaboration """ from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator from AthenaConfiguration.ComponentFactory import CompFactory - +from IOVDbSvc.IOVDbSvcConfig import addFolders from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg -#CalorimeterReconstructionTool = CompFactory.CalorimeterReconstructionTool +from CaloRecTools.CaloRecToolConfig import CaloRecToolCfg # One stop shopping for normal FASER data def CalorimeterReconstructionCfg(flags, **kwargs): """ Return all algorithms and tools for Waveform reconstruction """ acc = ComponentAccumulator() - #tool = CalorimeterReconstructionTool(name="CaloRecTool", **kwargs) - kwargs.setdefault("CaloWaveHitContainerKey", "CaloWaveformHits") kwargs.setdefault("PreshowerWaveHitContainerKey", "PreshowerWaveformHits") kwargs.setdefault("CaloHitContainerKey", "CaloHits") - #kwargs.setdefault("CalorimeterReconstructionTool", tool) + kwargs.setdefault("PreshowerHitContainerKey", "PreshowerHits") recoAlg = CompFactory.CaloRecAlg("CaloRecAlg", **kwargs) - #recoAlg.CalorimeterReconstructionTool = tool acc.addEventAlgo(recoAlg) + dbInstance = kwargs.get("dbInstance", "TRIGGER_OFL") # what should this be set to??? + + acc.merge(CaloRecToolCfg(flags)) + return acc def CalorimeterReconstructionOutputCfg(flags, **kwargs): diff --git a/Calorimeter/CaloRecAlgs/src/CaloRecAlg.cxx b/Calorimeter/CaloRecAlgs/src/CaloRecAlg.cxx index 4b4d6f158f29478a60443381c5292d254d1c665f..12ddca3a44a5ba4691ff3feffa2adae37d19e2d5 100644 --- a/Calorimeter/CaloRecAlgs/src/CaloRecAlg.cxx +++ b/Calorimeter/CaloRecAlgs/src/CaloRecAlg.cxx @@ -1,4 +1,5 @@ #include "CaloRecAlg.h" +#include <math.h> CaloRecAlg::CaloRecAlg(const std::string& name, ISvcLocator* pSvcLocator) @@ -6,13 +7,9 @@ CaloRecAlg::CaloRecAlg(const std::string& name, } -StatusCode -CaloRecAlg::initialize() { +StatusCode CaloRecAlg::initialize() { ATH_MSG_INFO(name() << "::initalize()" ); - // Initalize tools - //ATH_CHECK( m_recoTool.retrieve() ); - // Set key to read calo hits from ATH_CHECK( m_caloWaveHitContainerKey.initialize() ); @@ -21,19 +18,21 @@ CaloRecAlg::initialize() { // Set key to write container ATH_CHECK( m_caloHitContainerKey.initialize() ); + ATH_CHECK( m_preshowerHitContainerKey.initialize() ); + + // Initalize tools + ATH_CHECK( m_recoCalibTool.retrieve() ); return StatusCode::SUCCESS; } - -StatusCode -CaloRecAlg::finalize() { +//---------------------------------------------------------------------- +StatusCode CaloRecAlg::finalize() { ATH_MSG_INFO(name() << "::finalize()"); return StatusCode::SUCCESS; } - -StatusCode -CaloRecAlg::execute(const EventContext& ctx) const { +//---------------------------------------------------------------------- +StatusCode CaloRecAlg::execute(const EventContext& ctx) const { ATH_MSG_DEBUG("Executing"); ATH_MSG_DEBUG("Run: " << ctx.eventID().run_number() @@ -63,61 +62,115 @@ CaloRecAlg::execute(const EventContext& ctx) const { ATH_CHECK( caloHitContainerHandle.record( std::make_unique<xAOD::CalorimeterHitContainer>(), std::make_unique<xAOD::CalorimeterHitAuxContainer>() ) ); + SG::WriteHandle<xAOD::CalorimeterHitContainer> preshowerHitContainerHandle(m_preshowerHitContainerKey, ctx); + ATH_CHECK( preshowerHitContainerHandle.record( std::make_unique<xAOD::CalorimeterHitContainer>(), + std::make_unique<xAOD::CalorimeterHitAuxContainer>() ) ); + ATH_MSG_DEBUG("WaveformsHitContainer '" << caloHitContainerHandle.name() << "' initialized"); + ATH_MSG_DEBUG("WaveformsHitContainer '" << preshowerHitContainerHandle.name() << "' initialized"); + + // Loop over calo hits and calibrate each primary hit + for( const auto& hit : *caloWaveHitHandle ) { + if (hit->status_bit(xAOD::WaveformStatus::SECONDARY)) continue; + + // Create a new calo hit + xAOD::CalorimeterHit* calo_hit = new xAOD::CalorimeterHit(); + caloHitContainerHandle->push_back(calo_hit); + + ATH_MSG_DEBUG("calo_hit in channel " << hit->channel() ); - // Reconstruct all waveforms - //CHECK( m_recoTool->reconstructAll(*waveformHandle, clockptr, hitContainerHandle.ptr()) ); - - // Find peak time (most significant hit) - const xAOD::WaveformHit* peakHit = findPeakHit(*caloWaveHitHandle); - if (peakHit == NULL) return StatusCode::SUCCESS; - - // Create a new calo hit - xAOD::CalorimeterHit* calo_hit = new xAOD::CalorimeterHit(); - caloHitContainerHandle->push_back(calo_hit); - - calo_hit->set_raw_energy(-1.); // Dummy value - - // Find closest hits in time per channel - std::map<int, const xAOD::WaveformHit*> hitMap; - for ( const auto& hit : *caloWaveHitHandle ) { - int channel = hit->channel(); - if (hitMap.count(channel) == 0) - hitMap[channel] = hit; - else { - if (abs(hitMap[channel]->localtime() - peakHit->localtime()) > - abs(hit->localtime() - peakHit->localtime())) - hitMap[channel] = hit; + float MIPcharge_ref = m_recoCalibTool->getMIPcharge_ref(hit->channel()); // get reference MIP charge from database + + float hit_Edep = 0; + if (m_isMC) { + hit_Edep = hit->integral() / MIPcharge_ref; // MC MIPcharge_ref from database is really digitization scaling factors that take simulated Edep in MeV to mV*ns + ATH_MSG_DEBUG("isMC == True: calo hit Edep = " << hit_Edep << " MeV"); + } else { + float hit_charge = hit->integral()/50.0; // divide by 50 ohms to get charge + ATH_MSG_DEBUG("calo_hit filled has charge of " << hit_charge << " pC"); + + float HVgain_extrap = extrapolateHVgain(hit->channel()); + ATH_MSG_DEBUG("HV gain = " << HVgain_extrap ); + + float hit_MIP = (hit_charge * HVgain_extrap) / MIPcharge_ref; + ATH_MSG_DEBUG("Edep in MIPs = " << hit_MIP ); + + hit_Edep = hit_MIP * m_MIP_sim_Edep_calo; + ATH_MSG_DEBUG("Edep in MeV = " << hit_Edep ); } - } - // For each hit found, insert these into the caloHit - // Clear before association - calo_hit->clearCaloWaveformLinks(); - for ( const auto& [chan, hit] : hitMap ) { - ATH_MSG_VERBOSE("Found hit " << *hit); - calo_hit->addCaloHit(caloWaveHitHandle.get(), hit); + calo_hit->set_Edep(hit_Edep); // set calibrated Edep value + + float hit_raw_Edep = 0.0; + if (hit->integral() != 0.0) { // avoid possibility of division by zero error + hit_raw_Edep = hit_Edep * (hit->raw_integral() / hit->integral()); + } + calo_hit->set_raw_Edep(hit_raw_Edep); // set calibrated raw_Edep value + + calo_hit->set_channel(hit->channel()); // set channel number + + calo_hit->clearWaveformLinks(); + calo_hit->addHit(caloWaveHitHandle.get(), hit); // create link to calo waveform hit } ATH_MSG_DEBUG("CaloHitContainer '" << caloHitContainerHandle.name() << "' filled with "<< caloHitContainerHandle->size() <<" items"); - return StatusCode::SUCCESS; -} + for( const auto& hit : *preshowerWaveHitHandle ) { + if (hit->status_bit(xAOD::WaveformStatus::SECONDARY)) continue; + + // Create a new preshower hit + xAOD::CalorimeterHit* preshower_hit = new xAOD::CalorimeterHit(); + preshowerHitContainerHandle->push_back(preshower_hit); + + ATH_MSG_DEBUG("preshower_hit in channel " << hit->channel() ); -const xAOD::WaveformHit* -CaloRecAlg::findPeakHit(const xAOD::WaveformHitContainer& hitContainer) const { + float MIPcharge_ref = m_recoCalibTool->getMIPcharge_ref(hit->channel()); // get reference MIP charge from database - const xAOD::WaveformHit* peakHit = NULL; - for( const auto& hit : hitContainer ) { - if (peakHit == NULL) { - peakHit = hit; + float hit_Edep = 0; + if (m_isMC) { + hit_Edep = hit->integral() / MIPcharge_ref; // MC MIPcharge_ref from database is really digitization scaling factors that take simulated Edep in MeV to mV*ns + ATH_MSG_DEBUG("isMC == True: preshower hit Edep = " << hit_Edep << " MeV"); } else { - if ( hit->peak() > peakHit->peak() ) peakHit = hit; + float hit_charge = hit->integral()/50.0; // divide by 50 ohms to get charge + ATH_MSG_DEBUG("preshower_hit filled has charge of " << hit_charge << " pC"); + + float hit_MIP = hit_charge / MIPcharge_ref; + ATH_MSG_DEBUG("Edep in MIPs = " << hit_MIP ); + + hit_Edep = hit_MIP * m_MIP_sim_Edep_preshower; + ATH_MSG_DEBUG("Edep in GeV = " << hit_Edep ); + } + + preshower_hit->set_Edep(hit_Edep); // set calibrated Edep value + + float hit_raw_Edep = 0.0; + if (hit->integral() != 0.0) { // avoid possibility of division by zero error + hit_raw_Edep = hit_Edep * (hit->raw_integral() / hit->integral()); } + preshower_hit->set_raw_Edep(hit_raw_Edep); // set calibrated rsw_Edep value + + preshower_hit->set_channel(hit->channel()); // set channel number + + preshower_hit->clearWaveformLinks(); + preshower_hit->addHit(preshowerWaveHitHandle.get(), hit); // create link to preshower waveform hit } - // Didn't find anything? - if (peakHit == NULL) return NULL; - if (peakHit->status_bit(xAOD::WaveformStatus::THRESHOLD_FAILED)) return NULL; - return peakHit; + ATH_MSG_DEBUG("PreshowerHitContainer '" << preshowerHitContainerHandle.name() << "' filled with "<< preshowerHitContainerHandle->size() <<" items"); + + + return StatusCode::SUCCESS; } + +//---------------------------------------------------------------------- +float CaloRecAlg::extrapolateHVgain(int channel) const { + float PMT_hv = m_recoCalibTool->getHV(channel); + float PMT_hv_ref = m_recoCalibTool->getHV_ref(channel); + TF1 gaincurve = m_recoCalibTool->get_PMT_HV_curve(channel); + + float gaincurve_atHV = gaincurve.Eval(PMT_hv); + float gaincurve_atHVref = gaincurve.Eval(PMT_hv_ref); + + return ( gaincurve_atHVref / gaincurve_atHV ) * pow( PMT_hv_ref / PMT_hv , 6.6); +} +//---------------------------------------------------------------------- + diff --git a/Calorimeter/CaloRecAlgs/src/CaloRecAlg.h b/Calorimeter/CaloRecAlgs/src/CaloRecAlg.h index bd98e9afd6b693b8cc4518ebd87ddd72207fddef..c9ff4c5146d9e59c94a279345f6bb5977519c997 100644 --- a/Calorimeter/CaloRecAlgs/src/CaloRecAlg.h +++ b/Calorimeter/CaloRecAlgs/src/CaloRecAlg.h @@ -12,7 +12,11 @@ #include "xAODFaserCalorimeter/CalorimeterHitAuxContainer.h" // Tool classes -//#include "CaloRecTools/ICalorimeterReconstructionTool.h" +#include "CaloRecTools/ICaloRecTool.h" + +// Include Athena stuff for Conditions db reading +#include "AthenaPoolUtilities/CondAttrListCollection.h" +#include "StoreGate/ReadCondHandleKey.h" // Handles #include "StoreGate/ReadHandleKey.h" @@ -21,9 +25,16 @@ // Gaudi #include "GaudiKernel/ServiceHandle.h" #include "GaudiKernel/ToolHandle.h" +#include "GaudiKernel/EventContext.h" +#include "GaudiKernel/ICondSvc.h" +#include "Gaudi/Property.h" + +// ROOT +#include "TF1.h" // STL #include <string> +#include <vector> class CaloRecAlg : public AthReentrantAlgorithm { @@ -51,32 +62,33 @@ class CaloRecAlg : public AthReentrantAlgorithm { /** * @name Reconstruction tool */ - //ToolHandle<ICalorimeterReconstructionTool> m_recoTool - //{this, "CalorimeterReconstructionTool", "CalorimeterReconstructionTool"}; + ToolHandle<ICaloRecTool> m_recoCalibTool {this, "CaloRecTool", "CaloRecTool"}; /** * @name Input raw waveform data using SG::ReadHandleKey */ //@{ - SG::ReadHandleKey<xAOD::WaveformHitContainer> m_caloWaveHitContainerKey - {this, "CaloWaveHitContainerKey", ""}; + SG::ReadHandleKey<xAOD::WaveformHitContainer> m_caloWaveHitContainerKey {this, "CaloWaveHitContainerKey", "CaloWaveformHits"}; //@} //@{ - SG::ReadHandleKey<xAOD::WaveformHitContainer> m_preshowerWaveHitContainerKey - {this, "PreshowerWaveHitContainerKey", ""}; + SG::ReadHandleKey<xAOD::WaveformHitContainer> m_preshowerWaveHitContainerKey {this, "PreshowerWaveHitContainerKey", "PreshowerWaveformHits"}; //@} /** * @name Output data using SG::WriteHandleKey */ //@{ - SG::WriteHandleKey<xAOD::CalorimeterHitContainer> m_caloHitContainerKey - {this, "CaloHitContainerKey", ""}; + SG::WriteHandleKey<xAOD::CalorimeterHitContainer> m_caloHitContainerKey {this, "CaloHitContainerKey", "CaloHits"}; + SG::WriteHandleKey<xAOD::CalorimeterHitContainer> m_preshowerHitContainerKey {this, "PreshowerHitContainerKey", "PreshowerHits"}; //@} - const xAOD::WaveformHit* - findPeakHit(const xAOD::WaveformHitContainer& hitContainer) const; + float extrapolateHVgain(int channel) const; + + FloatProperty m_MIP_sim_Edep_calo {this, "MIP_sim_Edep_calo", 58.5}; // MIP deposits 5.85 MeV of energy in calo + FloatProperty m_MIP_sim_Edep_preshower {this, "MIP_sim_Edep_preshower", 4.894}; // MIP deposits 4.894 MeV of energy in a preshower layer + + Gaudi::Property<bool> m_isMC {this, "isMC", false}; }; diff --git a/Calorimeter/CaloRecTools/CMakeLists.txt b/Calorimeter/CaloRecTools/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..f0e9f07b53f5426b360c2639d4f7953a44d57792 --- /dev/null +++ b/Calorimeter/CaloRecTools/CMakeLists.txt @@ -0,0 +1,30 @@ +################################################################################ +# Package: CaloRecTools +################################################################################ + +# Declare the package name: +atlas_subdir( CaloRecTools ) + +# External dependencies: +find_package( ROOT ) + +# Component(s) in the package: +atlas_add_library( CaloRecToolsLib + CaloRecTools/*.h src/*.cxx src/*.h + PUBLIC_HEADERS CaloRecTools + PRIVATE_INCLUDE_DIRS ${ROOT_INCLUDE_DIRS} + LINK_LIBRARIES AthenaBaseComps AthenaPoolUtilities AthenaKernel GaudiKernel + PRIVATE_LINK_LIBRARIES ${ROOT_LIBRARIES} + ) + +atlas_add_component( CaloRecTools + src/components/*.cxx + INCLUDE_DIRS ${ROOT_INCLUDE_DIRS} + LINK_LIBRARIES ${ROOT_LIBRARIES} + AthenaBaseComps AthenaPoolUtilities AthenaKernel GaudiKernel + CaloRecToolsLib) + +# Install files from the package: +atlas_install_python_modules( python/*.py ) + + diff --git a/Calorimeter/CaloRecTools/CaloRecTools/ICaloRecTool.h b/Calorimeter/CaloRecTools/CaloRecTools/ICaloRecTool.h new file mode 100644 index 0000000000000000000000000000000000000000..feb141d915d6214a681f437d94b8118e14c33820 --- /dev/null +++ b/Calorimeter/CaloRecTools/CaloRecTools/ICaloRecTool.h @@ -0,0 +1,45 @@ +/* + Copyright (C) 2022 CERN for the benefit of the FASER collaboration +*/ + +/** + * @file ICaloRecTool.h + * Header file for the ICaloRecTool class + * @author Deion Fellers, 2022 + */ + + +#ifndef CALORECTOOL_ICALORECTOOL_H +#define CALORECTOOL_ICALORECTOOL_H + +// Base class +#include "GaudiKernel/IAlgTool.h" +#include "GaudiKernel/ToolHandle.h" +#include "GaudiKernel/EventContext.h" + +#include "TF1.h" + +///Interface for Calo reco algorithms +class ICaloRecTool : virtual public IAlgTool +{ + public: + + // InterfaceID + DeclareInterfaceID(ICaloRecTool, 1, 0); + + virtual ~ICaloRecTool() = default; //!< Destructor + + // methods to return calibration database data + virtual float getHV(const EventContext& ctx, int channel) const = 0; + virtual float getHV(int channel) const = 0; + + virtual float getHV_ref(const EventContext& ctx, int channel) const = 0; + virtual float getHV_ref(int channel) const = 0; + + virtual float getMIPcharge_ref(const EventContext& ctx, int channel) const = 0; + virtual float getMIPcharge_ref(int channel) const = 0; + + virtual TF1 get_PMT_HV_curve(int channel) const = 0; +}; + +#endif // CALORECTOOL_ICALORECTOOL_H diff --git a/Calorimeter/CaloRecTools/python/CaloRecToolConfig.py b/Calorimeter/CaloRecTools/python/CaloRecToolConfig.py new file mode 100644 index 0000000000000000000000000000000000000000..e87ac3658ec8f92caf00bc19e061d3d9d95c8e5d --- /dev/null +++ b/Calorimeter/CaloRecTools/python/CaloRecToolConfig.py @@ -0,0 +1,28 @@ +""" Define methods to configure CaloRecTool + +Copyright (C) 2022 CERN for the benefit of the FASER collaboration +""" +from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator +from AthenaConfiguration.ComponentFactory import CompFactory +from IOVDbSvc.IOVDbSvcConfig import addFolders +CaloRecTool=CompFactory.CaloRecTool + + +def CaloRecToolCfg(flags, name="CaloRecTool", **kwargs): + """ Return configured ComponentAccumulator and tool for Calo Calibration + + CaloRecTool may be provided in kwargs + """ + + acc = ComponentAccumulator() + # tool = kwargs.get("CaloRecTool", CaloRecTool(flags)) + # Probably need to figure this out! + dbInstance = kwargs.get("dbInstance", "TRIGGER_OFL") + + acc.merge(addFolders(flags, "/WAVE/Calibration/HV", dbInstance, className="CondAttrListCollection")) + acc.merge(addFolders(flags, "/WAVE/Calibration/HV_ref", dbInstance, className="CondAttrListCollection")) + acc.merge(addFolders(flags, "/WAVE/Calibration/MIP_charge_ref", dbInstance, className="CondAttrListCollection")) + + return acc + + diff --git a/Calorimeter/CaloRecTools/src/CaloRecTool.cxx b/Calorimeter/CaloRecTools/src/CaloRecTool.cxx new file mode 100644 index 0000000000000000000000000000000000000000..a6bb42c5140d10c1d2efdc486201c2bab81c0365 --- /dev/null +++ b/Calorimeter/CaloRecTools/src/CaloRecTool.cxx @@ -0,0 +1,195 @@ +/* + Copyright (C) 2022 CERN for the benefit of the FASER collaboration +*/ + +/** + * @file CaloRecTool.cxx + * Implementation file for the CaloRecTool.cxx + * @ author Deion Fellers, 2022 + **/ + +#include "CaloRecTool.h" + +// Constructor +CaloRecTool::CaloRecTool(const std::string& type, const std::string& name, const IInterface* parent) : + base_class(type, name, parent) +{ +} + +//-------------------------------------------------------------- +StatusCode +CaloRecTool::initialize() { + ATH_MSG_INFO( name() << "::initalize()" ); + + // Set keys to read calibratiion variables from data base + ATH_CHECK( m_PMT_HV_ReadKey.initialize() ); + ATH_CHECK( m_PMT_HV_ref_ReadKey.initialize() ); + ATH_CHECK( m_MIPcharge_ref_ReadKey.initialize() ); + + // access and store calo PMT HV gain curves + HVgaincurves_rootFile = TFile::Open(m_PMT_HV_Gain_Curve_file.value().c_str(),"read"); // open root file that has TF1 gain curves stored + + chan0_HVgaincurve_pntr = HVgaincurves_rootFile->Get<TF1>(m_chan0_HVgaincurve_name.value().c_str()); // make pointers to the gain curves + chan1_HVgaincurve_pntr = HVgaincurves_rootFile->Get<TF1>(m_chan1_HVgaincurve_name.value().c_str()); + chan2_HVgaincurve_pntr = HVgaincurves_rootFile->Get<TF1>(m_chan2_HVgaincurve_name.value().c_str()); + chan3_HVgaincurve_pntr = HVgaincurves_rootFile->Get<TF1>(m_chan3_HVgaincurve_name.value().c_str()); + + m_HVgainCurveMap[0] = *chan0_HVgaincurve_pntr; // store TF1 objects in an array mapped to the calo channel numbers + m_HVgainCurveMap[1] = *chan1_HVgaincurve_pntr; + m_HVgainCurveMap[2] = *chan2_HVgaincurve_pntr; + m_HVgainCurveMap[3] = *chan3_HVgaincurve_pntr; + + HVgaincurves_rootFile->Close(); // close the root file + + + return StatusCode::SUCCESS; +} + +//-------------------------------------------------------------- +StatusCode +CaloRecTool::finalize() { + // Print where you are + return StatusCode::SUCCESS; +} + +//-------------------------------------------------------------- +TF1 CaloRecTool::get_PMT_HV_curve(int channel) const { + if (channel <= 4) { + return m_HVgainCurveMap[channel]; + } else { + ATH_MSG_WARNING("channel "<<channel<<" is not <= 4 and thus not a calorimeter channel, so no HV gain-curve exists!"); + } + return TF1("default", "1.0", 0.0, 2000.0); +} + +//-------------------------------------------------------------- +float CaloRecTool::getHV(const EventContext& ctx, int channel) const { + + ATH_MSG_DEBUG("in getHV("<<channel<<")"); + + float HV=0.; + + // Read Cond Handle + SG::ReadCondHandle<CondAttrListCollection> readHandle{m_PMT_HV_ReadKey, ctx}; + const CondAttrListCollection* readCdo{*readHandle}; + if (readCdo==nullptr) { + ATH_MSG_FATAL("Null pointer to the read conditions object"); + return HV; + } + // Get the validitiy range + EventIDRange rangeW; + if (not readHandle.range(rangeW)) { + ATH_MSG_FATAL("Failed to retrieve validity range for " << readHandle.key()); + return HV; + } + ATH_MSG_DEBUG("Size of CondAttrListCollection " << readHandle.fullKey() << " readCdo->size()= " << readCdo->size()); + ATH_MSG_DEBUG("Range of input is " << rangeW); + + // Read offset for specific channel + const CondAttrListCollection::AttributeList& payload{readCdo->attributeList(channel)}; + + if (payload.exists("HV") and not payload["HV"].isNull()) { + HV = payload["HV"].data<float>(); + ATH_MSG_DEBUG("Found digitizer channel " << channel << ", HV as " << HV); + } else { + ATH_MSG_WARNING("No valid HV found for channel "<<channel<<"!"); + } + + return HV; + +} + +float CaloRecTool::getHV(int channel) const { + const EventContext& ctx(Gaudi::Hive::currentContext()); + return CaloRecTool::getHV(ctx, channel); +} + +//---------------------------------------------------------------------- +float CaloRecTool::getHV_ref(const EventContext& ctx, int channel) const { + + ATH_MSG_DEBUG("in getHV_ref("<<channel<<")"); + + float HV_ref=0.; + + // Read Cond Handle + SG::ReadCondHandle<CondAttrListCollection> readHandle{m_PMT_HV_ref_ReadKey, ctx}; + const CondAttrListCollection* readCdo{*readHandle}; + if (readCdo==nullptr) { + ATH_MSG_FATAL("Null pointer to the read conditions object"); + return HV_ref; + } + // Get the validitiy range + EventIDRange rangeW; + if (not readHandle.range(rangeW)) { + ATH_MSG_FATAL("Failed to retrieve validity range for " << readHandle.key()); + return HV_ref; + } + ATH_MSG_DEBUG("Size of CondAttrListCollection " << readHandle.fullKey() << " readCdo->size()= " << readCdo->size()); + ATH_MSG_DEBUG("Range of input is " << rangeW); + + // Read offset for specific channel + const CondAttrListCollection::AttributeList& payload{readCdo->attributeList(channel)}; + + if (payload.exists("HV_ref") and not payload["HV_ref"].isNull()) { + HV_ref = payload["HV_ref"].data<float>(); + ATH_MSG_DEBUG("Found digitizer channel " << channel << ", HV_ref as " << HV_ref); + } else { + ATH_MSG_WARNING("No valid HV_ref found for channel "<<channel<<"!"); + } + + return HV_ref; + +} + +float CaloRecTool::getHV_ref(int channel) const { + const EventContext& ctx(Gaudi::Hive::currentContext()); + return CaloRecTool::getHV_ref(ctx, channel); +} + +//---------------------------------------------------------------------- +float CaloRecTool::getMIPcharge_ref(const EventContext& ctx, int channel) const { + + ATH_MSG_DEBUG("in getMIPcharge_ref("<<channel<<")"); + + float MIP_charge_ref =0.; + + // Read Cond Handle + SG::ReadCondHandle<CondAttrListCollection> readHandle{m_MIPcharge_ref_ReadKey, ctx}; + const CondAttrListCollection* readCdo{*readHandle}; + if (readCdo==nullptr) { + ATH_MSG_FATAL("Null pointer to the read conditions object"); + return MIP_charge_ref; + } + // Get the validitiy range + EventIDRange rangeW; + if (not readHandle.range(rangeW)) { + ATH_MSG_FATAL("Failed to retrieve validity range for " << readHandle.key()); + return MIP_charge_ref; + } + ATH_MSG_DEBUG("Size of CondAttrListCollection " << readHandle.fullKey() << " readCdo->size()= " << readCdo->size()); + ATH_MSG_DEBUG("Range of input is " << rangeW); + + // Read offset for specific channel + const CondAttrListCollection::AttributeList& payload{readCdo->attributeList(channel)}; + + if (payload.exists("MIP_charge_ref") and not payload["MIP_charge_ref"].isNull()) { + MIP_charge_ref = payload["MIP_charge_ref"].data<float>(); + ATH_MSG_DEBUG("Found digitizer channel " << channel << ", MIP_charge_ref as " << MIP_charge_ref); + } else { + ATH_MSG_WARNING("No valid MIP_charge_ref found for channel "<<channel<<"!"); + } + + return MIP_charge_ref; + +} + +float CaloRecTool::getMIPcharge_ref(int channel) const { + const EventContext& ctx(Gaudi::Hive::currentContext()); + return CaloRecTool::getMIPcharge_ref(ctx, channel); +} + +//-------------------------------------------------------------- + + + + diff --git a/Calorimeter/CaloRecTools/src/CaloRecTool.h b/Calorimeter/CaloRecTools/src/CaloRecTool.h new file mode 100644 index 0000000000000000000000000000000000000000..c6dc951818cb59472fe6a1ff077e97e6753ec509 --- /dev/null +++ b/Calorimeter/CaloRecTools/src/CaloRecTool.h @@ -0,0 +1,86 @@ +/* + Copyright (C) 2021 CERN for the benefit of the FASER collaboration +*/ + +/** @file CaloRecTool.h + * Header file for CaloRecTool.h + * + */ +#ifndef CALORECTOOLS_CALORECTOOL_H +#define CALORECTOOLS_CALORECTOOL_H + +// Include interface class +#include "AthenaBaseComps/AthAlgTool.h" +#include "CaloRecTools/ICaloRecTool.h" + +// Include Athena stuff +#include "AthenaPoolUtilities/CondAttrListCollection.h" +#include "StoreGate/ReadCondHandleKey.h" + +#include "GaudiKernel/ICondSvc.h" +#include "Gaudi/Property.h" + +// Include Gaudi classes +#include "GaudiKernel/ICondSvc.h" +#include "Gaudi/Property.h" +#include "GaudiKernel/EventContext.h" + +// Include ROOT classes +#include "TF1.h" +#include "TFile.h" + +#include <string> + +class CaloRecTool: public extends<AthAlgTool, ICaloRecTool> { + public: + + /// Normal constructor for an AlgTool; 'properties' are also declared here + CaloRecTool(const std::string& type, const std::string& name, const IInterface* parent); + + // Standard Gaudi functions + virtual StatusCode initialize() override; //!< Gaudi initialiser + virtual StatusCode finalize() override; //!< Gaudi finaliser + + // methods for returning calibration data + // Channels indexed by digitizer channel number + // HV is in Volts and MIPcharge is in pC + // + virtual float getHV(const EventContext& ctx, int channel) const override; + virtual float getHV(int channel) const override; + + virtual float getHV_ref(const EventContext& ctx, int channel) const override; + virtual float getHV_ref(int channel) const override; + + virtual float getMIPcharge_ref(const EventContext& ctx, int channel) const override; + virtual float getMIPcharge_ref(int channel) const override; + + // method for returning PMT HV calibration curves from root file + virtual TF1 get_PMT_HV_curve(int channel) const override; + + TFile* HVgaincurves_rootFile; + + TF1* chan0_HVgaincurve_pntr; + TF1* chan1_HVgaincurve_pntr; + TF1* chan2_HVgaincurve_pntr; + TF1* chan3_HVgaincurve_pntr; + + TF1 m_HVgainCurveMap[4]; + + private: + // Propert that points to the location of the root file which contains the HV gain curves for the calorimeter PMTs + StringProperty m_PMT_HV_Gain_Curve_file{this, "PMT_HV_Gain_Curve_file", "/cvmfs/faser.cern.ch/repo/sw/database/DBRelease/current/pmtgain/CaloGainCurves.root"}; + + // properties that map channel name to PMT HV gain curves. PMt names found at https://twiki.cern.ch/twiki/bin/viewauth/FASER/CaloScintillator + StringProperty m_chan0_HVgaincurve_name{this, "Chan0HVgaincurve", "pol5_HV_Gain_Curve_PMT_LB8770"}; + StringProperty m_chan1_HVgaincurve_name{this, "Chan1HVgaincurve", "pol5_HV_Gain_Curve_PMT_LB8733"}; + StringProperty m_chan2_HVgaincurve_name{this, "Chan2HVgaincurve", "pol5_HV_Gain_Curve_PMT_LB8786"}; + StringProperty m_chan3_HVgaincurve_name{this, "Chan3HVgaincurve", "pol5_HV_Gain_Curve_PMT_LB8732"}; + + // Read Cond Handle + SG::ReadCondHandleKey<CondAttrListCollection> m_PMT_HV_ReadKey{this, "PMT_HV_ReadKey", "/WAVE/Calibration/HV", "Key of folder for PMT HV reading"}; + SG::ReadCondHandleKey<CondAttrListCollection> m_PMT_HV_ref_ReadKey{this, "PMT_HV_ref_ReadKey", "/WAVE/Calibration/HV_ref", "Key of folder for PMT HV at time of MIP calibration measurement"}; + SG::ReadCondHandleKey<CondAttrListCollection> m_MIPcharge_ref_ReadKey{this, "MIPcharge_ref_ReadKey", "/WAVE/Calibration/MIP_charge_ref", "Key of folder for MIPcharge calibration measurment"}; + +}; + +#endif // CALORECTOOLS_CALORECTOOL_H diff --git a/Calorimeter/CaloRecTools/src/components/CaloRecTools_entries.cxx b/Calorimeter/CaloRecTools/src/components/CaloRecTools_entries.cxx new file mode 100644 index 0000000000000000000000000000000000000000..c9c5438eb029be7532820e35dee8e41710f5e781 --- /dev/null +++ b/Calorimeter/CaloRecTools/src/components/CaloRecTools_entries.cxx @@ -0,0 +1,3 @@ +#include "../CaloRecTool.h" + +DECLARE_COMPONENT( CaloRecTool ) diff --git a/Control/CalypsoExample/Digitization/scripts/faserMDC_digi.py b/Control/CalypsoExample/Digitization/scripts/faserMDC_digi.py index 1154edd8aad0cd4ac8daadf5559e05ef1cc362b9..4c7dba7e427cb0b545a0cbc0526ed4b6c6918e76 100755 --- a/Control/CalypsoExample/Digitization/scripts/faserMDC_digi.py +++ b/Control/CalypsoExample/Digitization/scripts/faserMDC_digi.py @@ -81,7 +81,7 @@ elif runtype == "TestBeamMC" : # New TI12 geometry (ugh) elif runtype == "TI12MC": ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" else: print("Invalid run type found:", runtype) diff --git a/Control/CalypsoExample/Digitization/scripts/faserMDC_digi_merge.py b/Control/CalypsoExample/Digitization/scripts/faserMDC_digi_merge.py index a421cead6c379b8dd56fc2d80ea86bb9ca8ab342..8ee8d6f744d79bfdcbf9eb4177a89fc3021583e5 100755 --- a/Control/CalypsoExample/Digitization/scripts/faserMDC_digi_merge.py +++ b/Control/CalypsoExample/Digitization/scripts/faserMDC_digi_merge.py @@ -154,7 +154,7 @@ elif runtype == "TestBeamMC" : # New TI12 geometry (ugh) elif runtype == "TI12MC": ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" else: print("Invalid run type found:", runtype) diff --git a/Control/CalypsoExample/Digitization/scripts/faser_digi.py b/Control/CalypsoExample/Digitization/scripts/faser_digi.py index 72f890a3bd3b7f352f60c66207de15aaa2ec4961..40fb1915d50a8b3e6fd25bb6808b04ef1c5d9c16 100755 --- a/Control/CalypsoExample/Digitization/scripts/faser_digi.py +++ b/Control/CalypsoExample/Digitization/scripts/faser_digi.py @@ -78,7 +78,7 @@ elif runtype == "TestBeamMC" : # New TI12 geometry (ugh) elif runtype == "TI12MC": ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" else: print("Invalid geometry type found:", runtype) diff --git a/Control/CalypsoExample/Digitization/scripts/faser_digi_merge.py b/Control/CalypsoExample/Digitization/scripts/faser_digi_merge.py index ea135db8346438f9a392959d7791744997353a1e..1be2af2d92bcd37edeb2d2d20517e1254069822f 100755 --- a/Control/CalypsoExample/Digitization/scripts/faser_digi_merge.py +++ b/Control/CalypsoExample/Digitization/scripts/faser_digi_merge.py @@ -168,7 +168,7 @@ elif runtype == "TestBeamMC" : # New TI12 geometry (ugh) elif runtype == "TI12MC": ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" else: print("Invalid run type found:", runtype) diff --git a/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_1000GeV-101312.json b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_1000GeV-101312.json new file mode 100644 index 0000000000000000000000000000000000000000..ee3ac4eefd1264fb4553b29090772a1b9353282e --- /dev/null +++ b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_1000GeV-101312.json @@ -0,0 +1,14 @@ +{ + "file_length": 5000, + "mass": 105.66, + "maxE": 1000.0, + "minE": 1000.0, + "pid": [-13, 13], + "radius": -25.0, + "angle": 0.0006, + "run": 101312, + "sampler": "const", + "segment": 0, + "short": "MDC_PG_muon_fasernu_1000GeV", + "zpos": -3990.0 +} diff --git a/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_100GeV-101310.json b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_100GeV-101310.json new file mode 100644 index 0000000000000000000000000000000000000000..8c949c0bcdc862ec4b97bfa934e85ad590b78647 --- /dev/null +++ b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_100GeV-101310.json @@ -0,0 +1,14 @@ +{ + "file_length": 5000, + "mass": 105.66, + "maxE": 100.0, + "minE": 100.0, + "pid": [-13, 13], + "radius": -25.0, + "angle": 0.0006, + "run": 101310, + "sampler": "const", + "segment": 0, + "short": "MDC_PG_muon_fasernu_100GeV", + "zpos": -3990.0 +} diff --git a/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_10GeV-101308.json b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_10GeV-101308.json new file mode 100644 index 0000000000000000000000000000000000000000..66abf1a6f5a248b875a779e0888d936aa06263b4 --- /dev/null +++ b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_10GeV-101308.json @@ -0,0 +1,14 @@ +{ + "file_length": 5000, + "mass": 105.66, + "maxE": 10.0, + "minE": 10.0, + "pid": [-13, 13], + "radius": -25.0, + "angle": 0.0006, + "run": 101308, + "sampler": "const", + "segment": 0, + "short": "MDC_PG_muon_fasernu_10GeV", + "zpos": -3990.0 +} diff --git a/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_300GeV-101311.json b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_300GeV-101311.json new file mode 100644 index 0000000000000000000000000000000000000000..5307647e9ca6c5a3d0b1fa311dc0800b23febf51 --- /dev/null +++ b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_300GeV-101311.json @@ -0,0 +1,14 @@ +{ + "file_length": 5000, + "mass": 105.66, + "maxE": 300.0, + "minE": 300.0, + "pid": [-13, 13], + "radius": -25.0, + "angle": 0.0006, + "run": 101311, + "sampler": "const", + "segment": 0, + "short": "MDC_PG_muon_fasernu_300GeV", + "zpos": -3990.0 +} diff --git a/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_30GeV-101309.json b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_30GeV-101309.json new file mode 100644 index 0000000000000000000000000000000000000000..6eda07ec333b0ef2a0685c7cd7d9b6120e65ed43 --- /dev/null +++ b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_30GeV-101309.json @@ -0,0 +1,14 @@ +{ + "file_length": 5000, + "mass": 105.66, + "maxE": 30.0, + "minE": 30.0, + "pid": [-13, 13], + "radius": -25.0, + "angle": 0.0006, + "run": 101309, + "sampler": "const", + "segment": 0, + "short": "MDC_PG_muon_fasernu_30GeV", + "zpos": -3990.0 +} diff --git a/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_logE-101307.json b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_logE-101307.json index db2954f994dd7c74c9fb30b8e9bda28ac3eea966..3e57b2d300eac4ae2caa74cdccd746b2f24c997a 100644 --- a/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_logE-101307.json +++ b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_muon_fasernu_logE-101307.json @@ -4,7 +4,7 @@ "maxE": 5000.0, "minE": 10.0, "pid": [-13, 13], - "radius": -25., + "radius": -25.0, "angle": 0.0006, "run": 101307, "sampler": "log", diff --git a/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_pion_fasernu_1000GeV-121106.json b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_pion_fasernu_1000GeV-121106.json new file mode 100644 index 0000000000000000000000000000000000000000..513974d4cdbdcc23e5750a06d382a0633c9a2bf1 --- /dev/null +++ b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_pion_fasernu_1000GeV-121106.json @@ -0,0 +1,14 @@ +{ + "file_length": 500, + "mass": 105.66, + "maxE": 1000.0, + "minE": 1000.0, + "pid": [211, -211], + "radius": -25.0, + "angle": 0.0006, + "run": 121106, + "sampler": "const", + "segment": 0, + "short": "MDC_PG_pion_fasernu_1000GeV", + "zpos": -3990.0 +} diff --git a/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_pion_fasernu_100GeV-121104.json b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_pion_fasernu_100GeV-121104.json new file mode 100644 index 0000000000000000000000000000000000000000..79b23b151fa1cfce149904689cd1c220ac9ea4e2 --- /dev/null +++ b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_pion_fasernu_100GeV-121104.json @@ -0,0 +1,14 @@ +{ + "file_length": 1000, + "mass": 105.66, + "maxE": 100.0, + "minE": 100.0, + "pid": [211, -211], + "radius": -25.0, + "angle": 0.0006, + "run": 121104, + "sampler": "const", + "segment": 0, + "short": "MDC_PG_pion_fasernu_100GeV", + "zpos": -3990.0 +} diff --git a/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_pion_fasernu_10GeV-121102.json b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_pion_fasernu_10GeV-121102.json new file mode 100644 index 0000000000000000000000000000000000000000..7cec4dd2dcbb6af17ac8a26efb3a90e2d2b6b6ab --- /dev/null +++ b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_pion_fasernu_10GeV-121102.json @@ -0,0 +1,14 @@ +{ + "file_length": 1000, + "mass": 105.66, + "maxE": 10.0, + "minE": 10.0, + "pid": [211, -211], + "radius": -25.0, + "angle": 0.0006, + "run": 121102, + "sampler": "const", + "segment": 0, + "short": "MDC_PG_pion_fasernu_10GeV", + "zpos": -3990.0 +} diff --git a/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_pion_fasernu_300GeV-121105.json b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_pion_fasernu_300GeV-121105.json new file mode 100644 index 0000000000000000000000000000000000000000..255af7e27c014ffc94b16b068af5bc90fb45e824 --- /dev/null +++ b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_pion_fasernu_300GeV-121105.json @@ -0,0 +1,14 @@ +{ + "file_length": 1000, + "mass": 105.66, + "maxE": 300.0, + "minE": 300.0, + "pid": [211, -211], + "radius": -25.0, + "angle": 0.0006, + "run": 121105, + "sampler": "const", + "segment": 0, + "short": "MDC_PG_pion_fasernu_300GeV", + "zpos": -3990.0 +} diff --git a/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_pion_fasernu_30GeV-121103.json b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_pion_fasernu_30GeV-121103.json new file mode 100644 index 0000000000000000000000000000000000000000..6c25133024940086163302a453aff06e15153f76 --- /dev/null +++ b/Control/CalypsoExample/Generation/data/mdc/FaserMC-MDC_PG_pion_fasernu_30GeV-121103.json @@ -0,0 +1,14 @@ +{ + "file_length": 1000, + "mass": 105.66, + "maxE": 30.0, + "minE": 30.0, + "pid": [211, -211], + "radius": -25.0, + "angle": 0.0006, + "run": 121103, + "sampler": "const", + "segment": 0, + "short": "MDC_PG_pion_fasernu_30GeV", + "zpos": -3990.0 +} diff --git a/Control/CalypsoExample/Generation/scripts/faserMDC_foresee.py b/Control/CalypsoExample/Generation/scripts/faserMDC_foresee.py index f16b6a40a0413278097824bb7af63b51811eaa9c..bec911031cfec291fe523d5ded2916c688538ea2 100755 --- a/Control/CalypsoExample/Generation/scripts/faserMDC_foresee.py +++ b/Control/CalypsoExample/Generation/scripts/faserMDC_foresee.py @@ -76,7 +76,7 @@ if __name__ == '__main__': ConfigFlags.addFlag("Sim.Beam.yshift", 12.) ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" # Geometry set-up - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Conditions set-up + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" # Conditions set-up ConfigFlags.addFlag("Input.InitialTimeStamp", 0) # To avoid autoconfig ConfigFlags.GeoModel.Align.Dynamic = False diff --git a/Control/CalypsoExample/Generation/scripts/faserMDC_particlegun.py b/Control/CalypsoExample/Generation/scripts/faserMDC_particlegun.py index 98975a548601209a81b932c7cbbbe03da7184035..32d68b601f4fc43ef9d2942efff26f4562e83813 100755 --- a/Control/CalypsoExample/Generation/scripts/faserMDC_particlegun.py +++ b/Control/CalypsoExample/Generation/scripts/faserMDC_particlegun.py @@ -72,7 +72,7 @@ if __name__ == '__main__': ConfigFlags.addFlag("Sim.Beam.yshift", 0) ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" # Geometry set-up - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Conditions set-up + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" # Conditions set-up ConfigFlags.addFlag("Input.InitialTimeStamp", 0) # To avoid autoconfig ConfigFlags.GeoModel.Align.Dynamic = False diff --git a/Control/CalypsoExample/Generation/scripts/faser_particlegun.py b/Control/CalypsoExample/Generation/scripts/faser_particlegun.py index 6abdfdd4a8c8ae18547462b68532bd51cc4a9d71..cc1628efdb9245e32b7abbbc9ca60a60b2151fad 100755 --- a/Control/CalypsoExample/Generation/scripts/faser_particlegun.py +++ b/Control/CalypsoExample/Generation/scripts/faser_particlegun.py @@ -74,8 +74,8 @@ if __name__ == '__main__': if args.geom == "TI12MC": # 2022 TI12 geometry - ConfigFlags.GeoModel.FaserVersion = "FASERNU-02" # Geometry set-up - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Conditions set-up + ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" # Geometry set-up + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" # Conditions set-up # TI12 detectors detectors = ['Veto', 'VetoNu', 'Preshower', 'FaserSCT', 'Ecal', 'Trigger', 'Dipole', 'Emulsion', 'Trench'] diff --git a/Control/CalypsoExample/Generation/scripts/submit_faserMDC_particlegun.sh b/Control/CalypsoExample/Generation/scripts/submit_faserMDC_particlegun.sh index 5d704e9a51445d2ae408512c4eb0df83c71a8b39..08f21dbfc5b08fcc1225ef2594099bb0e51f0d38 100755 --- a/Control/CalypsoExample/Generation/scripts/submit_faserMDC_particlegun.sh +++ b/Control/CalypsoExample/Generation/scripts/submit_faserMDC_particlegun.sh @@ -199,7 +199,7 @@ if ! [ -z "$outdest" ] then ls -l echo "copy *-HITS.root to $outdest" - mkdir -p $outdest + eos mkdir -p $outdest eos cp *-HITS.root ${outdest}/ || true fi # @@ -209,13 +209,13 @@ then cd .. ls -l echo "copy $logfile to $logdest" - mkdir -p $logdest + eos mkdir -p $logdest eos cp $logfile $logdest/$logfile elif ! [ -z "$outdest" ] then cd .. ls -l echo "copy $logfile to $outdest" - mkdir -p $outdest + eos mkdir -p $outdest eos cp $logfile $outdest/$logfile fi diff --git a/Control/CalypsoExample/GeoModelTest/python/Faser03TestConfig.py b/Control/CalypsoExample/GeoModelTest/python/Faser03TestConfig.py index 70322eb150975256868a84bf7bd2174697d0a0da..bf562e1b7aba756ebdab8e65568a180321c5de12 100644 --- a/Control/CalypsoExample/GeoModelTest/python/Faser03TestConfig.py +++ b/Control/CalypsoExample/GeoModelTest/python/Faser03TestConfig.py @@ -33,7 +33,7 @@ if __name__ == "__main__": # Flags for this job ConfigFlags.Input.isMC = True # Needed to bypass autoconfig - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Always needed; must match FaserVersion + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" # Always needed; must match FaserVersion ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" # Default FASER geometry ConfigFlags.GeoModel.GeoExportFile = "FaserNu03.db" # Writes out a GeoModel file with the full geometry tree (optional, comment out to skip) ConfigFlags.Detector.GeometryEmulsion = True diff --git a/Control/CalypsoExample/Reconstruction/scripts/faserMDC_reco.py b/Control/CalypsoExample/Reconstruction/scripts/faserMDC_reco.py index 6f5b224ee7b2de4bd7dfe66aa277a8b5b755157c..eddc43e211437c8d313510b919a167a7089a2066 100755 --- a/Control/CalypsoExample/Reconstruction/scripts/faserMDC_reco.py +++ b/Control/CalypsoExample/Reconstruction/scripts/faserMDC_reco.py @@ -88,12 +88,12 @@ elif runtype == "TestBeamData" or runtype == "TestBeam2021": # New TI12 geometry (ugh) elif runtype == "TI12Data02": ConfigFlags.GeoModel.FaserVersion = "FASER-02" - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" # Final 2022 TI12 geometry elif runtype == "TI12Data03": ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" else: print("Invalid run type found:", runtype) diff --git a/Control/CalypsoExample/Reconstruction/scripts/faser_reco.py b/Control/CalypsoExample/Reconstruction/scripts/faser_reco.py index 967526945aad16c48b1221aab12e97722c42c52c..4daa95ec84edf0369cc6369d2334bb64c8d71885 100755 --- a/Control/CalypsoExample/Reconstruction/scripts/faser_reco.py +++ b/Control/CalypsoExample/Reconstruction/scripts/faser_reco.py @@ -119,12 +119,14 @@ elif runtype == "TestBeamData" or runtype == "TestBeamMC": # New TI12 geometry (ugh) elif runtype == "TI12Data02": ConfigFlags.GeoModel.FaserVersion = "FASER-02" - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" # Final 2022 TI12 geometry elif runtype == "TI12Data03": ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" + # ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" + # Use the updated field map + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" else: print("Invalid run type found:", runtype) @@ -183,9 +185,13 @@ acc.merge(LHCDataAlgCfg(ConfigFlags)) from WaveRecAlgs.WaveRecAlgsConfig import WaveformReconstructionCfg acc.merge(WaveformReconstructionCfg(ConfigFlags)) -# Not ready for primetime -# from CaloRecAlgs.CaloRecAlgsConfig import CalorimeterReconstructionCfg -# acc.merge(CalorimeterReconstructionCfg(ConfigFlags)) +# Calorimeter reconstruction +if args.isMC: + # Not ready for MC quite yet + pass +else: + from CaloRecAlgs.CaloRecAlgsConfig import CalorimeterReconstructionCfg + acc.merge(CalorimeterReconstructionCfg(ConfigFlags)) # Tracker clusters from TrackerPrepRawDataFormation.TrackerPrepRawDataFormationConfig import FaserSCT_ClusterizationCfg @@ -257,8 +263,8 @@ from WaveRecAlgs.WaveRecAlgsConfig import WaveformReconstructionOutputCfg acc.merge(WaveformReconstructionOutputCfg(ConfigFlags)) # Calorimeter reconstruction output -# from CaloRecAlgs.CaloRecAlgsConfig import CalorimeterReconstructionOutputCfg -# acc.merge(CalorimeterReconstructionOutputCfg(ConfigFlags)) +from CaloRecAlgs.CaloRecAlgsConfig import CalorimeterReconstructionOutputCfg +acc.merge(CalorimeterReconstructionOutputCfg(ConfigFlags)) # Check what we have print( "Writing out xAOD objects:" ) diff --git a/Control/CalypsoExample/Reconstruction/scripts/submit_faserMDC_reco.sh b/Control/CalypsoExample/Reconstruction/scripts/submit_faserMDC_reco.sh index e4d93ba6487000c7c939ee57d79dd704dad96324..0f9cb4479a5130508a58f91efd11b3538b97ebf2 100755 --- a/Control/CalypsoExample/Reconstruction/scripts/submit_faserMDC_reco.sh +++ b/Control/CalypsoExample/Reconstruction/scripts/submit_faserMDC_reco.sh @@ -163,7 +163,7 @@ else tagstr="--reco=$tag" fi # -faser_reco.py "--nevents=$nevents" $tagstr "$file_path" +faser_reco.py "--nevents=$nevents" --isMC $tagstr "$file_path" # # Print out ending time date diff --git a/Control/CalypsoExample/SimHitExample/python/SimHitExampleConfig.py b/Control/CalypsoExample/SimHitExample/python/SimHitExampleConfig.py index 5e71b860f80281d8075e993cc3a41c535bfb4c71..aec94afcf8e3007eb454fe0c28f9a2c42ee7ed28 100644 --- a/Control/CalypsoExample/SimHitExample/python/SimHitExampleConfig.py +++ b/Control/CalypsoExample/SimHitExample/python/SimHitExampleConfig.py @@ -32,7 +32,7 @@ if __name__ == "__main__": # Flags for this job ConfigFlags.Input.Files = ["my.HITS.pool.root"] # input file(s) ConfigFlags.Input.isMC = True # Needed to bypass autoconfig - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Always needed; must match FaserVersion + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" # Always needed; must match FaserVersion ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" # Default FASER geometry ConfigFlags.Detector.GeometryEmulsion = True ConfigFlags.Detector.GeometryTrench = True diff --git a/Control/CalypsoExample/Simulation/scripts/faserMDC_simulate.py b/Control/CalypsoExample/Simulation/scripts/faserMDC_simulate.py index 0fd9622db5de8630ffaef34bd2ae4b6eabe88a5c..1cdb68251fd2f35df0e66382b76822a9bedfc5ec 100755 --- a/Control/CalypsoExample/Simulation/scripts/faserMDC_simulate.py +++ b/Control/CalypsoExample/Simulation/scripts/faserMDC_simulate.py @@ -147,7 +147,7 @@ if __name__ == '__main__': ConfigFlags.addFlag("Sim.Beam.yshift", args.yshift) ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" # Geometry set-up - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Conditions set-up + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" # Conditions set-up ConfigFlags.addFlag("Input.InitialTimeStamp", 0) # To avoid autoconfig ConfigFlags.GeoModel.Align.Dynamic = False diff --git a/Control/CalypsoExample/xAODTruthConversion/scripts/runTruthCnv.py b/Control/CalypsoExample/xAODTruthConversion/scripts/runTruthCnv.py index b51b4d4ba059d571921fe6328e13144ccf104f23..5e63eccee4f4887ff0a986dfe7cdd02e90f59942 100644 --- a/Control/CalypsoExample/xAODTruthConversion/scripts/runTruthCnv.py +++ b/Control/CalypsoExample/xAODTruthConversion/scripts/runTruthCnv.py @@ -52,7 +52,7 @@ if __name__ == "__main__": # Flags for this job ConfigFlags.Input.isMC = True # Needed to bypass autoconfig - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Always needed; must match FaserVersion + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" # Always needed; must match FaserVersion ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" # Default FASER geometry ConfigFlags.Input.Files = ['my.HITS.pool.root'] ConfigFlags.Output.doWriteAOD = True diff --git a/Derviation/DerivationAlgs/share/runDerive.py b/Derviation/DerivationAlgs/share/runDerive.py index b8b22c187c15a39993a68aa722e5d6bae3362f7b..019c6ad29ec5fc1134723c8219cba3caaa440be9 100644 --- a/Derviation/DerivationAlgs/share/runDerive.py +++ b/Derviation/DerivationAlgs/share/runDerive.py @@ -48,7 +48,7 @@ if __name__ == "__main__": #"/bundle/data/FASER/Ti12data/filter/r0008/007983/Faser-Physics-007983-TrigMask08-r0008-xAOD.root" ] - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Always needed; must match FaserVersionS + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" # Always needed; must match FaserVersionS ConfigFlags.IOVDb.DatabaseInstance = "OFLP200" # Use MC conditions for now ConfigFlags.Input.ProjectName = "data21" # Needed to bypass autoconfig ConfigFlags.Input.isMC = False # Needed to bypass autoconfig diff --git a/Derviation/DerivationAlgs/share/run_streaming.py b/Derviation/DerivationAlgs/share/run_streaming.py index 7ec4d11aa0f18643dfc8ee12e8ebf11e12b29166..13fcf2d7011a32bd8a3f14f166c8ff86ca5596a5 100644 --- a/Derviation/DerivationAlgs/share/run_streaming.py +++ b/Derviation/DerivationAlgs/share/run_streaming.py @@ -13,6 +13,7 @@ if __name__ == "__main__": from CalypsoConfiguration.MainServicesConfig import MainServicesCfg from AthenaPoolCnvSvc.PoolReadConfig import PoolReadCfg from AthenaPoolCnvSvc.PoolWriteConfig import PoolWriteCfg + from FaserGeoModel.FaserGeoModelConfig import FaserGeometryCfg # Set up logging and new style config log.setLevel(DEBUG) @@ -22,7 +23,7 @@ if __name__ == "__main__": "/eos/experiment/faser/rec/2022/p0008/007984/Faser-Physics-007984-00000-p0008-xAOD.root" ] - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Always needed; must match FaserVersionS + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" # Always needed; must match FaserVersionS ConfigFlags.IOVDb.DatabaseInstance = "OFLP200" # Use MC conditions for now ConfigFlags.Input.ProjectName = "data21" # Needed to bypass autoconfig ConfigFlags.Input.isMC = False # Needed to bypass autoconfig @@ -42,6 +43,7 @@ if __name__ == "__main__": cfg = MainServicesCfg(ConfigFlags) cfg.merge(PoolReadCfg(ConfigFlags)) cfg.merge(PoolWriteCfg(ConfigFlags)) + cfg.merge(FaserGeometryCfg(ConfigFlags)) # Derivations @@ -75,12 +77,12 @@ if __name__ == "__main__": ) -# # Hack to avoid problem with our use of MC databases when isMC = False -# replicaSvc = acc.getService("DBReplicaSvc") -# replicaSvc.COOLSQLiteVetoPattern = "" -# replicaSvc.UseCOOLSQLite = True -# replicaSvc.UseCOOLFrontier = False -# replicaSvc.UseGeomSQLite = True + # Hack to avoid problem with our use of MC databases when isMC = False + replicaSvc = cfg.getService("DBReplicaSvc") + replicaSvc.COOLSQLiteVetoPattern = "" + replicaSvc.UseCOOLSQLite = True + replicaSvc.UseCOOLFrontier = False + replicaSvc.UseGeomSQLite = True # Execute and finish cfg.printConfig(withDetails = True, summariseProps = True, printDefaults = True) diff --git a/DetectorDescription/GeoModel/FaserGeoModel/python/GeoModelInit.py b/DetectorDescription/GeoModel/FaserGeoModel/python/GeoModelInit.py index fbee2144d985412d2624d31e745eec881f3fb54d..4823fcde4cd212a0f3e6446bf690e29c8cf90616 100644 --- a/DetectorDescription/GeoModel/FaserGeoModel/python/GeoModelInit.py +++ b/DetectorDescription/GeoModel/FaserGeoModel/python/GeoModelInit.py @@ -95,9 +95,9 @@ def _setupGeoModel(): # Deal with SCT alignment conditions folders and algorithms - #conddb.addFolderSplitOnline("SCT","/Tracker/Onl/Align","/Tracker/Align",className="AlignableTransformContainer") - print("Override Alignment dbname to OFLP200, fix this when alignment available in CONDBR3") - conddb.addFolder("/Tracker/Align", "SCT_OFL",className="AlignableTransformContainer") + conddb.addFolderSplitOnline("SCT","/Tracker/Onl/Align","/Tracker/Align",className="AlignableTransformContainer") + # print("Override Alignment dbname to OFLP200, fix this when alignment available in CONDBR3") + # conddb.addFolder("/Tracker/Align", "SCT_OFL",className="AlignableTransformContainer") from AthenaCommon.AlgSequence import AthSequencer condSeq = AthSequencer("AthCondSeq") if not hasattr(condSeq, "FaserSCT_AlignCondAlg"): diff --git a/MagneticField/FaserBFieldData/share/FieldSimulation.py b/MagneticField/FaserBFieldData/share/FieldSimulation.py new file mode 100644 index 0000000000000000000000000000000000000000..e17f617bc727f42de17c5aada7addde94761ac7f --- /dev/null +++ b/MagneticField/FaserBFieldData/share/FieldSimulation.py @@ -0,0 +1,254 @@ +# Calculation of the magnetic field from first principles +# @author Dave Casper, Tobias Boeckh +# see also https://github.com/dwcasper/KalmanFilter/blob/master/Field/BField/__init__.py + + +from math import sin, cos, atan2, sqrt, pi +from auto_diff import true_np as np +from scipy.special import iv, kn +from scipy.integrate import quad + +junk = np.seterr() + +# Geometry description +rMin = 0.104 +rMax = rMin + 0.0825 +zMax = [1.5, 1.0, 1.0] +magnetZ = [-0.81232, 0.637726, 1.837726] +m0 = 1.00 # mag +sectorPhi = np.linspace(0, 2 * pi, 17, True) # central phi value for each sector +magPhi = 2 * sectorPhi # angle of the magnetization in each sector +phiConst = 4 * sqrt(2 - sqrt(2)) # numerical result of phiPrime integration on curved surface +rhoLimit = rMin / 100 # region to use linear approx for rho +proj = np.zeros(17) +magVector = {} + +for j in range(0, 16, True): + magLo = m0 * np.array([cos(magPhi[j]), sin(magPhi[j]), 0]) + magVector[j] = magLo + magHi = m0 * np.array([cos(magPhi[j + 1]), sin(magPhi[j + 1]), 0]) + phiHat = np.array([-sin(j * pi / 8 + pi / 16), cos(j * pi / 8 + pi / 16), 0]) + proj[j] = np.matmul((magLo - magHi), phiHat) + + +def Field(position): + # expects position in millimeters + return bFieldXYZ(position[0] / 1000, position[1] / 1000, position[2] / 1000) + + +# negative derivative (with respect to rho) of integrand over curved surface +def cylDrho(rho, phi, z, rhoPrime, L, k): + if rho < rhoPrime: + bessel_k = kn(1, k * rhoPrime) + if bessel_k == 0: + return 0 + return ( + -m0 * (phiConst / pi**2) * rhoPrime * (iv(0, k * rho) + iv(2, k * rho)) + * bessel_k * cos(k * z) * cos(phi) * sin(k * L / 2) + ) + + else: + bessel_k = kn(0, k * rho) + kn(2, k * rho) + if bessel_k == 0: + return 0 + return ( + -m0 * (phiConst / pi**2) * rhoPrime * iv(1, k * rhoPrime) + * bessel_k * cos(k * z) * cos(phi) * sin(k * L / 2) + ) + + +# negative derivative (with respect to phi) of integrand over curved surface, divided by rho +def cylDphi(rho, phi, z, rhoPrime, L, k): + if rho < rhoLimit: + bessel_k = kn(1, k * rhoPrime) + if bessel_k == 0: + return 0 + return ( + m0 * (phiConst / pi**2) * rhoPrime * bessel_k * cos(k * z) + * sin(k * L / 2) * sin(phi) + ) + elif rho < rhoPrime: + bessel_k = kn(1, k * rhoPrime) + if bessel_k == 0: + return 0 + return ( + m0 * (2 * phiConst / (k * pi**2)) * rhoPrime * iv(1, k * rho) + * bessel_k * cos(k * z) * sin(k * L / 2) * sin(phi) / rho + ) + else: + bessel_k = kn(1, k * rho) + if bessel_k == 0: + return 0 + return ( + m0 * (2 * phiConst / (k * pi**2)) * rhoPrime * iv(1, k * rhoPrime) + * bessel_k * cos(k * z) * sin(k * L / 2) * sin(phi) / rho + ) + + +# negative derivative (with respect to z) of integrand over curved surface; valid for rho < rhoPrime +def cylDz(rho, phi, z, rhoPrime, L, k): + if rho < rhoPrime: + bessel_k = kn(1, k * rhoPrime) + if bessel_k == 0: + return 0 + return ( + -m0 * (2 * phiConst / pi**2) * rhoPrime * iv(1, k * rho) + * bessel_k * sin(k * z) * sin(k * L / 2) * cos(phi) + ) + else: + bessel_k = kn(1, k * rho) + if bessel_k == 0: + return 0 + return ( + m0 * (2 * phiConst / pi**2) * rhoPrime * iv(1, k * rhoPrime) + * bessel_k * sin(k * z) * sin(k * L / 2) * cos(phi) + ) + + +# negative derivative (with respect to rho) of integrand over sector boundary surface +def planeDrho(rho, phi, z, j, L, rhoPrime): + phiPrime = j * pi / 8 + pi / 16 + aSq = rho**2 + rhoPrime**2 - 2 * rho * rhoPrime * cos(phi - phiPrime) + aSqMinus = aSq + (z - L / 2) ** 2 + aSqPlus = aSq + (z + L / 2) ** 2 + return ( + (proj[j] / (4 * pi)) * (rho - rhoPrime * cos(phi - phiPrime)) + * ( + 1 / ((sqrt(aSqPlus) + z + L / 2) * sqrt(aSqPlus)) + - 1 / ((sqrt(aSqMinus) + z - L / 2) * sqrt(aSqMinus)) + ) + ) + + +# negative derivative (with respect to phi) of integrand over sector boundary surface, divided by rho +def planeDphi(rho, phi, z, j, L, rhoPrime): + phiPrime = j * pi / 8 + pi / 16 + aSq = rho**2 + rhoPrime**2 - 2 * rho * rhoPrime * cos(phi - phiPrime) + aSqMinus = aSq + (z - L / 2) ** 2 + aSqPlus = aSq + (z + L / 2) ** 2 + return ( + (proj[j] / (4 * pi)) * rhoPrime + * ( + 1 / ((sqrt(aSqPlus) + z + L / 2) * sqrt(aSqPlus)) + - 1 / ((sqrt(aSqMinus) + z - L / 2) * sqrt(aSqMinus)) + ) + * sin(phi - phiPrime) + ) + + +# negative derivative (with respect to z) of integrand over sector boundary surface, divided by rho +def planeDz(rho, phi, z, j, L, rhoPrime): + phiPrime = j * pi / 8 + pi / 16 + aSq = rho**2 + rhoPrime**2 - 2 * rho * rhoPrime * cos(phi - phiPrime) + aSqMinus = aSq + (z - L / 2) ** 2 + aSqPlus = aSq + (z + L / 2) ** 2 + return (proj[j] / (4 * pi)) * (1 / sqrt(aSqPlus) - 1 / sqrt(aSqMinus)) + + +def field(rho, phi, z, L): + fieldCylRho = lambda k: -cylDrho(rho, phi, z, rMin, L, k) + cylDrho( + rho, phi, z, rMax, L, k + ) + fieldPlaneRho = lambda rhoPrime: sum( + list(map(lambda j: planeDrho(rho, phi, z, j, L, rhoPrime), range(16))) + ) + + fieldCylPhi = lambda k: -cylDphi(rho, phi, z, rMin, L, k) + cylDphi( + rho, phi, z, rMax, L, k + ) + fieldPlanePhi = lambda rhoPrime: sum( + list(map(lambda j: planeDphi(rho, phi, z, j, L, rhoPrime), range(16))) + ) + + fieldCylZ = lambda k: -cylDz(rho, phi, z, rMin, L, k) + cylDz( + rho, phi, z, rMax, L, k + ) + fieldPlaneZ = lambda rhoPrime: sum( + list(map(lambda j: planeDz(rho, phi, z, j, L, rhoPrime), range(16))) + ) + myLimit = 1000 + return np.array( + [ + -quad(fieldCylRho, 0, np.inf, limit=myLimit)[0] + + quad(fieldPlaneRho, rMin, rMax, limit=myLimit)[0], + -quad(fieldCylPhi, 0, np.inf, limit=myLimit)[0] + + quad(fieldPlanePhi, rMin, rMax, limit=myLimit)[0], + quad(fieldCylZ, 0, np.inf, limit=myLimit)[0] + + quad(fieldPlaneZ, rMin, rMax, limit=myLimit)[0], + ] + ) + + +def mField(rho, phi, z): + magnetization = np.zeros(3) + if phi < 0: + phi = phi + 2 * pi + for iz in range(3): + if ( + z < (magnetZ[iz] - zMax[iz] / 2) + or z > (magnetZ[iz] + zMax[iz] / 2) + or rho < rMin + or rho > rMax + ): + continue + j = (phi + pi / 16) // (pi / 8) + j = j % 16 + magnetization = magVector[j] # this is cartesian! + break + rhoUnit = np.array([cos(phi), sin(phi), 0]) + phiUnit = np.array([-sin(phi), cos(phi), 0]) + zUnit = np.array([0, 0, 1]) + return np.array( + [ + np.matmul(magnetization, rhoUnit), + np.matmul(magnetization, phiUnit), + np.matmul(magnetization, zUnit), + ] + ) + + +def mFieldXYZ(x, y, z): + rho = sqrt(x**2 + y**2) + if rho > 0: + phi = atan2(y, x) + else: + phi = 0 + + m = mField(rho, phi, z) + return np.array( + [m[0] * cos(phi) - m[1] * sin(phi), m[0] * sin(phi) + m[1] * cos(phi), m[2]] + ) + + +def bField(rho, phi, z): + return ( + hField(rho, phi, z - magnetZ[0], zMax[0]) + + hField(rho, phi, z - magnetZ[1], zMax[1]) + + hField(rho, phi, z - magnetZ[2], zMax[2]) + + mField(rho, phi, z) + ) + + +def bFieldXYZ(x, y, z): + return hFieldXYZ(x, y, z) + mFieldXYZ(x, y, z) + + +def hField(rho, phi, z): + return ( + field(rho, phi, z - magnetZ[0], zMax[0]) + + field(rho, phi, z - magnetZ[1], zMax[1]) + + field(rho, phi, z - magnetZ[2], zMax[2]) + ) + + +def hFieldXYZ(x, y, z): + rho = sqrt(x**2 + y**2) + if rho > 0: + phi = atan2(y, x) + else: + phi = 0 + + h = hField(rho, phi, z) + return np.array( + [h[0] * cos(phi) - h[1] * sin(phi), h[0] * sin(phi) + h[1] * cos(phi), h[2]] + ) diff --git a/PhysicsAnalysis/NeutrinoSearch/python/FilterSearchConfig.py b/PhysicsAnalysis/NeutrinoSearch/python/FilterSearchConfig.py index 9b76c14b8b6682b6cfe77ed3331467d4f9737cfe..1bea526ebb4ba14c750d89a652ee6fbe166db355 100644 --- a/PhysicsAnalysis/NeutrinoSearch/python/FilterSearchConfig.py +++ b/PhysicsAnalysis/NeutrinoSearch/python/FilterSearchConfig.py @@ -482,7 +482,7 @@ if __name__ == "__main__": ] - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Always needed; must match FaserVersionS + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" # Always needed; must match FaserVersionS ConfigFlags.IOVDb.DatabaseInstance = "OFLP200" # Use MC conditions for now ConfigFlags.Input.ProjectName = "data21" # Needed to bypass autoconfig ConfigFlags.Input.isMC = False # Needed to bypass autoconfig diff --git a/PhysicsAnalysis/NeutrinoSearch/python/FlukaSearchConfig.py b/PhysicsAnalysis/NeutrinoSearch/python/FlukaSearchConfig.py index 4e196a3794670d241bf8a26b91d409d260ac193d..b26b82df91d117c889fead8e5630843620159488 100644 --- a/PhysicsAnalysis/NeutrinoSearch/python/FlukaSearchConfig.py +++ b/PhysicsAnalysis/NeutrinoSearch/python/FlukaSearchConfig.py @@ -92,6 +92,7 @@ if __name__ == "__main__": '/run/media/dcasper/Data/faser/fluka/210002/rec/r0008/FaserMC-MDC_Fluka_unit30_Pm_71m_m3750_v3-210002-00014-s0005-r0008-xAOD.root', '/run/media/dcasper/Data/faser/fluka/210002/rec/r0008/FaserMC-MDC_Fluka_unit30_Pm_71m_m3750_v3-210002-00015-s0005-r0008-xAOD.root' ] + # Update this for samples with new field map ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Always needed; must match FaserVersionS ConfigFlags.IOVDb.DatabaseInstance = "OFLP200" # Use MC conditions for now ConfigFlags.Input.ProjectName = "data21" # Needed to bypass autoconfig diff --git a/PhysicsAnalysis/NtupleDumper/CMakeLists.txt b/PhysicsAnalysis/NtupleDumper/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..944ed9689053915dd526251fabf1c5b1af5dfa4e --- /dev/null +++ b/PhysicsAnalysis/NtupleDumper/CMakeLists.txt @@ -0,0 +1,12 @@ +atlas_subdir(NtupleDumper) + +atlas_add_component( + NtupleDumper + src/NtupleDumperAlg.h + src/NtupleDumperAlg.cxx + src/component/NtupleDumper_entries.cxx + LINK_LIBRARIES AthenaBaseComps StoreGateLib xAODFaserWaveform xAODFaserTrigger ScintIdentifier FaserCaloIdentifier GeneratorObjects FaserActsGeometryLib TrackerSimEvent TrackerSimData TrackerIdentifier TrackerReadoutGeometry TrkTrack GeoPrimitives TrackerRIO_OnTrack TrackerSpacePoint +) + +atlas_install_python_modules(python/*.py) +atlas_install_scripts(scripts/*.py) diff --git a/PhysicsAnalysis/NtupleDumper/python/NtupleDumperConfig.py b/PhysicsAnalysis/NtupleDumper/python/NtupleDumperConfig.py new file mode 100644 index 0000000000000000000000000000000000000000..5bc52c41df03c934465f97658a39aeb342350631 --- /dev/null +++ b/PhysicsAnalysis/NtupleDumper/python/NtupleDumperConfig.py @@ -0,0 +1,104 @@ +""" + Copyright (C) 2002-2022 CERN for the benefit of the ATLAS collaboration +""" + +from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator +from AthenaConfiguration.ComponentFactory import CompFactory +from MagFieldServices.MagFieldServicesConfig import MagneticFieldSvcCfg + +def NtupleDumperAlgCfg(flags, **kwargs): + # Initialize GeoModel + from FaserGeoModel.FaserGeoModelConfig import FaserGeometryCfg + acc = FaserGeometryCfg(flags) + + acc.merge(MagneticFieldSvcCfg(flags)) + # acc.merge(FaserActsTrackingGeometrySvcCfg(flags)) + # acc.merge(FaserActsAlignmentCondAlgCfg(flags)) + + actsExtrapolationTool = CompFactory.FaserActsExtrapolationTool("FaserActsExtrapolationTool") + actsExtrapolationTool.MaxSteps = 1000 + actsExtrapolationTool.TrackingGeometryTool = CompFactory.FaserActsTrackingGeometryTool("TrackingGeometryTool") + + NtupleDumperAlg = CompFactory.NtupleDumperAlg("NtupleDumperAlg",**kwargs) + NtupleDumperAlg.ExtrapolationTool = actsExtrapolationTool + acc.addEventAlgo(NtupleDumperAlg) + + thistSvc = CompFactory.THistSvc() + thistSvc.Output += ["HIST2 DATAFILE='Data-tuple.root' OPT='RECREATE'"] + acc.addService(thistSvc) + + return acc + +if __name__ == "__main__": + + import sys + from AthenaCommon.Logging import log, logging + from AthenaCommon.Constants import DEBUG, VERBOSE, INFO + from AthenaCommon.Configurable import Configurable + from CalypsoConfiguration.AllConfigFlags import ConfigFlags + from AthenaConfiguration.TestDefaults import defaultTestFiles + from CalypsoConfiguration.MainServicesConfig import MainServicesCfg + from AthenaPoolCnvSvc.PoolReadConfig import PoolReadCfg + # from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg + + # Set up logging and new style config + log.setLevel(INFO) + Configurable.configurableRun3Behavior = True + + # Configure + ConfigFlags.Input.Files = [ + '/eos/experiment/faser/rec/2022/p0008//008119/Faser-Physics-008119-00168-p0008-xAOD.root', + + + ] + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Always needed; must match FaserVersionS + ConfigFlags.IOVDb.DatabaseInstance = "OFLP200" # Use MC conditions for now + ConfigFlags.Input.ProjectName = "data21" # Needed to bypass autoconfig + ConfigFlags.Input.isMC = False # Needed to bypass autoconfig + ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" # FASER geometry + ConfigFlags.Common.isOnline = False + ConfigFlags.GeoModel.Align.Dynamic = False + ConfigFlags.Beam.NumberOfCollisions = 0. + + ConfigFlags.Detector.GeometryFaserSCT = True + + ConfigFlags.lock() + + # Core components + acc = MainServicesCfg(ConfigFlags) + acc.merge(PoolReadCfg(ConfigFlags)) + + # algorithm + acc.merge(NtupleDumperAlgCfg(ConfigFlags, UseFlukaWeights=True)) + + # silencio + AthenaEventLoopMgr = CompFactory.AthenaEventLoopMgr() + AthenaEventLoopMgr.EventPrintoutInterval=500 + acc.addService(AthenaEventLoopMgr) + + # # Hack to avoid problem with our use of MC databases when isMC = False + replicaSvc = acc.getService("DBReplicaSvc") + replicaSvc.COOLSQLiteVetoPattern = "" + replicaSvc.UseCOOLSQLite = True + replicaSvc.UseCOOLFrontier = False + replicaSvc.UseGeomSQLite = True + + # Timing + #acc.merge(MergeRecoTimingObjCfg(ConfigFlags)) + + # Dump config + # logging.getLogger('forcomps').setLevel(VERBOSE) + # acc.foreach_component("*").OutputLevel = VERBOSE + # acc.foreach_component("*ClassID*").OutputLevel = INFO + # acc.getCondAlgo("FaserSCT_AlignCondAlg").OutputLevel = VERBOSE + # acc.getCondAlgo("FaserSCT_DetectorElementCondAlg").OutputLevel = VERBOSE + # acc.getService("StoreGateSvc").Dump = True + # acc.getService("ConditionStore").Dump = True + # acc.printConfig(withDetails=True) + # ConfigFlags.dump() + + # Execute and finish + sc = acc.run(maxEvents=-1) + + # Success should be 0 + sys.exit(not sc.isSuccess()) diff --git a/PhysicsAnalysis/NtupleDumper/scripts/analyzeNtuple.py b/PhysicsAnalysis/NtupleDumper/scripts/analyzeNtuple.py new file mode 100755 index 0000000000000000000000000000000000000000..7d735f1599e455001162fff0e5a33acad16d2e35 --- /dev/null +++ b/PhysicsAnalysis/NtupleDumper/scripts/analyzeNtuple.py @@ -0,0 +1,235 @@ +#!/usr/bin/env python + +# Set up (Py)ROOT. +import ROOT +import glob +import sys +import pandas as pd + + +# Define a Landau convoluted with a gaussian for MIP fitting +landguas_conv = ROOT.TF1Convolution("landau","gaus",-10,100,True) # the functions have to be close to zero at min and max bin of convolution or else circular Fourier transform will move convolve values at max and min +landguas_conv.SetNofPointsFFT(10000) +landgaus = ROOT.TF1("landgaus",landguas_conv, -10, 100, landguas_conv.GetNpar()) +landgaus.SetParNames("Landau constant","Landau MPV","Landau width","Gaussian mean","Gaussian width") + +user_input = str(sys.argv[1]) # set to either 'all_high', 'all_low', or a run number + +t = ROOT.TChain("nt") +nfiles = 0 +all_run_paths = glob.glob("/eos/project/f/faser-commissioning/DeionsNtuples/*") + +if user_input=="all_high": + runconfig = "High_gain" + print("processing high-gain runs") + gain = 30.0 + for run_path in all_run_paths: + nfiles += t.Add(run_path+"/Data-tuple-High_gain*.root") # chain all ntuples from all runs that are high gain + rootFile = ROOT.TFile("/eos/project/f/faser-commissioning/DeionsNtuples/7930/Data-tuple-High_gain-007930-00000-100.root"); # load file from largest high gain run to get noise histograms +elif user_input=="all_low": + runconfig = "Low_gain" + print("processing low-gain runs") + gain = 1.0 + for run_path in all_run_paths: + nfiles += t.Add(run_path+"/Data-tuple-Low_gain*.root") # chain all ntuples from all runs that are high gain + rootFile = ROOT.TFile("/eos/project/f/faser-commissioning/DeionsNtuples/8137/Data-tuple-Low_gain-008137-00000-100.root"); # load file from largest low gain run to get noise histograms +else: # assume user_input is a run number + # get run configuration from table oon Brian's website + table_runs = pd.read_html('http://aagaard.web.cern.ch/aagaard/FASERruns.html') # load in run tables from website + df = table_runs[0] # table_runs is a list of all tables on the website, we only want the first one + runconfig=str(df.at[df.loc[df['Run'] == int(user_input)].index[0],'Configuration'].replace(' ','_')) # get config from website run log telling if run is High_gain or Low_gain calo + print("processing run "+runconfig+" ("+runconfig+")") + if runconfig=="High_gain": + gain = 30.0 + elif runconfig=="Low_gain": + gain = 1.0 + else: + print("run config is neither 'High_gain' nor 'Low_gain', calo histogram ranges may be messed up") + gain = 1.0 # assume low gain + + nfiles += t.Add("/eos/project/f/faser-commissioning/DeionsNtuples/"+user_input+"/*.root") # chain all ntuples from all runs that are high gain + rootFile = ROOT.TFile("/eos/project/f/faser-commissioning/DeionsNtuples/"+user_input+"/Data-tuple-"+runconfig+"-00"+user_input+"-00000-100.root"); # load file from largest low gain run to get noise histograms + + + + +print("number of files chained together = ",nfiles) + +#ROOT.gROOT.SetStyle("ATLAS") +#ROOT.gStyle.SetOptStat(111110) #take away option box in histograms +#ROOT.gStyle.SetOptTitle(1) +#ROOT.gStyle.SetOptFit(1) + +# Define histograms here +hCaloCharge = [] +hCaloPeak = [] +hXYvsEcalo = [] +for chan in range(4): + hCaloCharge.append(ROOT.TH1F("hCalo"+str(chan)+"charge", "Charge in calo ch"+str(chan)+";Q (pC);# of events",100,0.2*gain,2.0*gain)) + hCaloPeak.append(ROOT.TH1F("hCalo"+str(chan)+"peak", "Peak in calo ch"+str(chan)+";peak (mV);# of events",100,1.0*gain,5.0*gain)) + hXYvsEcalo.append(ROOT.TProfile2D("hXYvsEcalo"+str(chan)+"" , "Calo ch"+str(chan)+" Charge vs Pos;X pos (mm);Y pos (mm)",26, -130.0, 130.0, 26, -130.0, 130.0)) + +hCaloChargeTotal = ROOT.TH1F("hCaloChargeTotal", "Charge in Calo;Charge (pC);# of events",100,0.2*gain,2.0*gain) +hCaloEdep = ROOT.TH1F("hCaloEdep", "Edep in Calo;Edep (GeV);# of events",100,0.0,1.8) + +hCaloThetaX = ROOT.TH1F("hCaloThetaX", "Track #theta_{x} at Calo face;#theta_{x} (radians);# of tracks",100,-0.1,0.1) +hCaloThetaY = ROOT.TH1F("hCaloThetaY", "Track #theta_{y} at Calo face;#theta_{y} (radians);# of tracks",100,-0.1,0.1) + +hTrackPvsPYdiff = ROOT.TProfile("hTrackPvsPYdiff" , "Track #Deltap_{Y}/p vs p;Track p (MeV);(pY_{upstream} - pY_{downstream}) / p_{total}",100, 1000.0, 200000.0) +hTrackPvsPXdiff = ROOT.TProfile("hTrackPvsPXdiff" , "Track #Deltap_{X}/p vs p;Track p (MeV);(pX_{upstream} - pX_{downstream}) / p_{total}",100, 1000.0, 200000.0) + +#t.Print() # will show you all variables in ttree + +i = 0 +for event in t: + i += 1 + + if i%1000 == 0: + print( "Processing event #%i of %i" % (i, t.GetEntries() ) ) + + if event.longTracks > 0: # only process events with at least one track that has hits in last 3 tracking stations + for j in range(event.longTracks): # loop over all long tracks in the event (long = has hits in last 3 tracking stations) + if event.Track_p0[j] != 0.0: + hTrackPvsPYdiff.Fill(event.Track_p0[j],(event.Track_py0[j] - event.Track_py1[j])/event.Track_p0[j]) + hTrackPvsPXdiff.Fill(event.Track_p0[j],(event.Track_px0[j] - event.Track_px1[j])/event.Track_p0[j]) + + #print("track charge = %i and nLayers = %i" % (event.Track_charge[j],event.Track_nLayers[j])) + #print("track upstream (x,y,z) (px,py,pz) = (%f,%f,%f) (%f,%f,%f)" % (event.Track_x0[j],event.Track_y0[j],event.Track_z0[j],event.Track_px0[j],event.Track_py0[j],event.Track_pz0[j])) + #print("track downstream (x,y,z) (px,py,pz) = (%f,%f,%f) (%f,%f,%f)" % (event.Track_x1[j],event.Track_y1[j],event.Track_z1[j],event.Track_px1[j],event.Track_py1[j],event.Track_pz1[j])) + + #print("track at vetoNu (x,y) (thetaX,thetaY) = (%f,%f) (%f,%f)" % (event.Track_X_atVetoNu[j],event.Track_Y_atVetoNu[j],event.Track_ThetaX_atVetoNu[j],event.Track_ThetaY_atVetoNu[j])) + #print("track at Calo (x,y) (thetaX,thetaY) = (%f,%f) (%f,%f)" % (event.Track_X_atCalo[j],event.Track_Y_atCalo[j],event.Track_ThetaX_atCalo[j],event.Track_ThetaY_atCalo[j])) + + #print("number of track segments = ",event.TrackSegments) + #for j in range(event.TrackSegments): + #print("trackseg (x,y,z) (px,py,pz) = (%f,%f,%f) (%f,%f,%f)" % (event.TrackSegment_x[j],event.TrackSegment_y[j],event.TrackSegment_z[j],event.TrackSegment_px[j],event.TrackSegment_py[j],event.TrackSegment_pz[j])) + #print("trackseg chi2 = %i and ndof = %i" % (event.TrackSegment_Chi2[j],event.TrackSegment_nDoF[j])) + + #print("number of SpacePoints = ",event.SpacePoints) + #for j in range(event.SpacePoints): + #print("Spacepoint #",j) + #print("SpacePoint (x,y,z) = (%f,%f,%f)" % (event.SpacePoint_x[j],event.SpacePoint_y[j],event.SpacePoint_z[j])) + + hCaloEdep.Fill(event.Calo_total_Edep) + hCaloChargeTotal.Fill(event.Calo_total_charge) + + x_calo = event.Track_X_atCalo[0] + y_calo = event.Track_Y_atCalo[0] + + hCaloThetaX.Fill(event.Track_ThetaX_atCalo[0]) + hCaloThetaY.Fill(event.Track_ThetaY_atCalo[0]) + + if abs(event.Track_ThetaX_atCalo[0]) > 0.1 or abs(event.Track_ThetaX_atCalo[0]) > 0.1: continue + + for chan,charge in enumerate([event.Calo0_raw_charge,event.Calo1_raw_charge,event.Calo2_raw_charge,event.Calo3_raw_charge]): + if charge > 0.2*gain and charge < 2.0*gain: + hXYvsEcalo[chan].Fill(x_calo,y_calo,charge) + + if x_calo > -60.0 and x_calo < -20.0 and y_calo > -80.0 and y_calo < -10.0: + hCaloCharge[0].Fill(event.Calo0_raw_charge) + hCaloPeak[0].Fill(event.Calo0_raw_peak) + elif x_calo > 70.0 and x_calo < 100.0 and y_calo > -90.0 and y_calo < -10.0: + hCaloCharge[1].Fill(event.Calo1_raw_charge) + hCaloPeak[1].Fill(event.Calo1_raw_peak) + elif x_calo > -60.0 and x_calo < -20.0 and y_calo > 20.0 and y_calo < 110.0: + hCaloCharge[2].Fill(event.Calo2_raw_charge) + hCaloPeak[2].Fill(event.Calo2_raw_peak) + elif x_calo > 70.0 and x_calo < 100.0 and y_calo > 20.0 and y_calo < 110.0: + hCaloCharge[3].Fill(event.Calo3_raw_charge) + hCaloPeak[3].Fill(event.Calo3_raw_peak) + +# if i > 10000: +# break + +# create a list of histograms of random event integrals +hRandomCharge = [] +for chan in range(15): + hRandomCharge.append(rootFile.Get("hRandomCharge"+str(chan))) + +# Now make some plots +filename = "analyze-"+runconfig+"-Ntuples.pdf" + +c = ROOT.TCanvas() +c.Print(filename+'[') +hCaloEdep.Draw() +ROOT.gPad.SetLogy() +c.Print(filename) + +c = ROOT.TCanvas() +hCaloChargeTotal.Draw() +c.Print(filename) + +c = ROOT.TCanvas() +c.Divide(2,2) +for chan in range(4): + c.cd(1+chan) + hXYvsEcalo[chan].GetZaxis().SetRangeUser(hCaloCharge[chan].GetMean() - 0.3*hCaloCharge[chan].GetStdDev(),hCaloCharge[chan].GetMean() + 0.4*hCaloCharge[chan].GetStdDev()) + hXYvsEcalo[chan].Draw('COLZ') +c.Print(filename) + +leg = [] +c = ROOT.TCanvas() +c.Divide(2,2) +for chan in range(4): + c.cd(1+chan) + hCaloCharge[chan].Fit("landau") + landgaus.SetParameters(hCaloCharge[chan].GetFunction("landau").GetParameter(0),hCaloCharge[chan].GetFunction("landau").GetParameter(1),hCaloCharge[chan].GetFunction("landau").GetParameter(2),0.0,hRandomCharge[chan].GetStdDev()) + landgaus.SetParLimits(0,0.1*hCaloCharge[chan].GetFunction("landau").GetParameter(0),20.0*hCaloCharge[chan].GetFunction("landau").GetParameter(0)) + landgaus.SetParLimits(1,0.5*hCaloCharge[chan].GetFunction("landau").GetParameter(1),1.2*hCaloCharge[chan].GetFunction("landau").GetParameter(1)) + landgaus.SetParLimits(2,0.1*hCaloCharge[chan].GetFunction("landau").GetParameter(2),1.2*hCaloCharge[chan].GetFunction("landau").GetParameter(2)) + landgaus.FixParameter(3,0.0) + landgaus.FixParameter(4,hRandomCharge[chan].GetStdDev()) # fix gaussian smearing to the noise seen in randomly triggered events + hCaloCharge[chan].Fit("landgaus","+") + hCaloCharge[chan].GetFunction("landgaus").SetLineColor(4) + hCaloCharge[chan].Draw() + + leg.append( ROOT.TLegend(0.55,0.55,0.89,0.75) ) + leg[chan].AddEntry(hCaloCharge[chan].GetFunction("landau"),"Landau MPV = "+str(hCaloCharge[chan].GetFunction("landau").GetParameter(1))[:6]+" #pm "+str(hCaloCharge[chan].GetFunction("landau").GetParError(1))[:6],"L") + leg[chan].AddEntry(hCaloCharge[chan].GetFunction("landgaus"),"Landguas MPV = "+str(hCaloCharge[chan].GetFunction("landgaus").GetParameter(1))[:6]+" #pm "+str(hCaloCharge[chan].GetFunction("landgaus").GetParError(1))[:6],"L") + leg[chan].AddEntry(hCaloCharge[chan].GetFunction("landgaus"),"Landguas gaussian width = "+str(hCaloCharge[chan].GetFunction("landgaus").GetParameter(4))[:6],"") + leg[chan].SetBorderSize(0) + leg[chan].Draw() +c.Print(filename) + +leg = [] +c = ROOT.TCanvas() +c.Divide(2,2) +for chan in range(4): + c.cd(1+chan) + hCaloPeak[chan].Fit("landau") + hCaloPeak[chan].Draw() + + leg.append( ROOT.TLegend(0.55,0.55,0.89,0.75) ) + leg[chan].AddEntry(hCaloPeak[chan].GetFunction("landau"),"Landau MPV = "+str(hCaloPeak[chan].GetFunction("landau").GetParameter(1))[:6]+" #pm "+str(hCaloPeak[chan].GetFunction("landau").GetParError(1))[:6],"L") + leg[chan].SetBorderSize(0) + leg[chan].Draw() +c.Print(filename) + +c = ROOT.TCanvas() +c.Divide(1,2) +c.cd(1) +hCaloThetaX.Draw() +c.cd(2) +hCaloThetaY.Draw() +c.Print(filename) + +c = ROOT.TCanvas() +c.Divide(1,2) +c.cd(1) +hTrackPvsPYdiff.GetYaxis().SetRangeUser(hTrackPvsPYdiff.GetMean(2) - hTrackPvsPYdiff.GetStdDev(2), hTrackPvsPYdiff.GetMean(2) + hTrackPvsPYdiff.GetStdDev(2)) +hTrackPvsPYdiff.Draw() +c.cd(2) +hTrackPvsPXdiff.GetYaxis().SetRangeUser(hTrackPvsPXdiff.GetMean(2) - hTrackPvsPXdiff.GetStdDev(2), hTrackPvsPXdiff.GetMean(2) + hTrackPvsPXdiff.GetStdDev(2)) +hTrackPvsPXdiff.Draw() +c.Print(filename) + +c = ROOT.TCanvas() +c.Divide(4,4) +for chan in range(15): + c.cd(1+chan) + hRandomCharge[chan].Draw() +c.Print(filename) + +# Must close file at the end +c.Print(filename+']') + diff --git a/PhysicsAnalysis/NtupleDumper/scripts/analyzeRun.py b/PhysicsAnalysis/NtupleDumper/scripts/analyzeRun.py new file mode 100755 index 0000000000000000000000000000000000000000..c8ab9bb76af0be2c537fd201c6ffb2387bbbd319 --- /dev/null +++ b/PhysicsAnalysis/NtupleDumper/scripts/analyzeRun.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 + +""" + Copyright (C) 2002-2022 CERN for the benefit of the FASER collaboration +""" + +from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator +from AthenaConfiguration.ComponentFactory import CompFactory +from MagFieldServices.MagFieldServicesConfig import MagneticFieldSvcCfg + + +def NtupleDumperAlgCfg(flags, OutName, **kwargs): + # Initialize GeoModel + from FaserGeoModel.FaserGeoModelConfig import FaserGeometryCfg + acc = FaserGeometryCfg(flags) + + acc.merge(MagneticFieldSvcCfg(flags)) + # acc.merge(FaserActsTrackingGeometrySvcCfg(flags)) + # acc.merge(FaserActsAlignmentCondAlgCfg(flags)) + + actsExtrapolationTool = CompFactory.FaserActsExtrapolationTool("FaserActsExtrapolationTool") + actsExtrapolationTool.MaxSteps = 10000 + actsExtrapolationTool.TrackingGeometryTool = CompFactory.FaserActsTrackingGeometryTool("TrackingGeometryTool") + + NtupleDumperAlg = CompFactory.NtupleDumperAlg("NtupleDumperAlg",**kwargs) + NtupleDumperAlg.ExtrapolationTool = actsExtrapolationTool + acc.addEventAlgo(NtupleDumperAlg) + + thistSvc = CompFactory.THistSvc() + thistSvc.Output += [f"HIST2 DATAFILE='{OutName}' OPT='RECREATE'"] + acc.addService(thistSvc) + + return acc + +if __name__ == "__main__": + + import glob + import sys + import ROOT + + runno=int(sys.argv[1]) + num=int(sys.argv[2]) + filesPerJob=int(sys.argv[3]) + run_config=str(sys.argv[4]) + + ptag="p0008" + + from AthenaCommon.Logging import log, logging + from AthenaCommon.Constants import DEBUG, VERBOSE, INFO + from AthenaCommon.Configurable import Configurable + from CalypsoConfiguration.AllConfigFlags import ConfigFlags + from AthenaConfiguration.TestDefaults import defaultTestFiles + from CalypsoConfiguration.MainServicesConfig import MainServicesCfg + from AthenaPoolCnvSvc.PoolReadConfig import PoolReadCfg + # from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg + # Set up logging and new style config + log.setLevel(INFO) + Configurable.configurableRun3Behavior = True + + dataDir=f"/eos/experiment/faser/rec/2022/{ptag}/{runno:06d}" + files=sorted(glob.glob(f"{dataDir}/Faser-Physics*")) + fileListInitial=files[num*filesPerJob:(num+1)*filesPerJob] + fileList=[] + for fName in fileListInitial: + try: + fh=ROOT.TFile(fName) + fileList.append(fName) + except OSError: + print("Warning bad file: ",fName) + + log.info(f"Analyzing Run {runno} files {num*filesPerJob} to {(num+1)*filesPerJob} (num={num})") + log.info(f"Got {len(fileList)} files out of {len(fileListInitial)}") + + outName=f"Data-tuple-{run_config}-{runno:06d}-{num:05d}-{filesPerJob}.root" + + # Configure + ConfigFlags.Input.Files = fileList + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Always needed; must match FaserVersionS + ConfigFlags.IOVDb.DatabaseInstance = "OFLP200" # Use MC conditions for now + ConfigFlags.Input.ProjectName = "data21" # Needed to bypass autoconfig + ConfigFlags.Input.isMC = False # Needed to bypass autoconfig + ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" # FASER geometry + ConfigFlags.Common.isOnline = False + ConfigFlags.GeoModel.Align.Dynamic = False + ConfigFlags.Beam.NumberOfCollisions = 0. + + ConfigFlags.Detector.GeometryFaserSCT = True + + ConfigFlags.lock() + + # Core components + acc = MainServicesCfg(ConfigFlags) + acc.merge(PoolReadCfg(ConfigFlags)) + + # algorithm + acc.merge(NtupleDumperAlgCfg(ConfigFlags, outName, UseFlukaWeights=True, CaloConfig=run_config)) + + AthenaEventLoopMgr = CompFactory.AthenaEventLoopMgr() + AthenaEventLoopMgr.EventPrintoutInterval=1000 + acc.addService(AthenaEventLoopMgr) + + # # Hack to avoid problem with our use of MC databases when isMC = False + replicaSvc = acc.getService("DBReplicaSvc") + replicaSvc.COOLSQLiteVetoPattern = "" + replicaSvc.UseCOOLSQLite = True + replicaSvc.UseCOOLFrontier = False + replicaSvc.UseGeomSQLite = True + + # Timing + #acc.merge(MergeRecoTimingObjCfg(ConfigFlags)) + + # Dump config + # logging.getLogger('forcomps').setLevel(VERBOSE) + # acc.foreach_component("*").OutputLevel = VERBOSE + # acc.foreach_component("*ClassID*").OutputLevel = INFO + # acc.getCondAlgo("FaserSCT_AlignCondAlg").OutputLevel = VERBOSE + # acc.getCondAlgo("FaserSCT_DetectorElementCondAlg").OutputLevel = VERBOSE + # acc.getService("StoreGateSvc").Dump = True + # acc.getService("ConditionStore").Dump = True + # acc.printConfig(withDetails=True) + # ConfigFlags.dump() + + # Execute and finish + sc = acc.run(maxEvents=-1) + + # Success should be 0 + sys.exit(not sc.isSuccess()) diff --git a/PhysicsAnalysis/NtupleDumper/scripts/analyzeRun.sh b/PhysicsAnalysis/NtupleDumper/scripts/analyzeRun.sh new file mode 100755 index 0000000000000000000000000000000000000000..dc42bc521c678a3bed8536e93b6b68ad25549ca7 --- /dev/null +++ b/PhysicsAnalysis/NtupleDumper/scripts/analyzeRun.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +runno=$1 +num=$2 +filesPerJob=$3 +runconfig=$4 + +WD=$PWD + +cd /afs/cern.ch/user/d/dfellers/faser +source setup.sh +cd build +source x86_64-centos7-gcc11-opt/setup.sh +cd $WD +echo "Starting analysis" +analyzeRun.py $runno $num $filesPerJob $runconfig +cp Data-tuple*.root /eos/project/f/faser-commissioning/DeionsNtuples/$runno/ diff --git a/PhysicsAnalysis/NtupleDumper/scripts/submitAllJobsThatHaveErrorLogs.py b/PhysicsAnalysis/NtupleDumper/scripts/submitAllJobsThatHaveErrorLogs.py new file mode 100755 index 0000000000000000000000000000000000000000..5f0805b7d876e4573c5cbab096deba0364314726 --- /dev/null +++ b/PhysicsAnalysis/NtupleDumper/scripts/submitAllJobsThatHaveErrorLogs.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python + +import glob +import os +import sys +import pandas as pd + +print("NtupleDumping all condor jobs that produced non-empty error logs") + +table_runs = pd.read_html('http://aagaard.web.cern.ch/aagaard/FASERruns.html') # load in run tables from website +df = table_runs[0] # table_runs is a list of all tables on the website, we only want the first table + +# make a liist of all runs and batch job numbers that failed and thus have error logs that are not empty +run_list = [] +allErrorLogs_list = glob.glob('/afs/cern.ch/user/d/dfellers/faser/ntuple-dumper/run/logs/*/*err') +for error_log in allErrorLogs_list: + if os.path.getsize(error_log) != 0: + print('Error Log is not empty: ', error_log) + run_num = int(error_log.split('/')[-2].split('-')[-1]) + bath_num = int(error_log.split('.')[-2]) + run_list.append([run_num,bath_num]) + +print("list to be re-submitted:", run_list) + +ptag="p0008" +filesPerJob=100 + +for i,run_info in enumerate(run_list): + runno = run_info[0] + batch_number = run_info[1] + + runconfig=str(df.at[df.loc[df['Run'] == runno].index[0],'Configuration'].replace(' ','_')) # get config from website run log telling if run is High_gain or Low_gain calo + + print("%i of %i runs processed. Currently processing run %i-%i (%s)"%(i,len(run_list),runno,batch_number,runconfig)) + + batchFile=f"batch/Run-{runno:06d}-{batch_number}.sub" + fh=open(batchFile,"w") + pwd=os.getenv("PWD") + fh.write(f""" + executable = {pwd}/analyzeRun.sh + arguments = {runno} {batch_number} {filesPerJob} {runconfig} + output = {pwd}/logs/Run-{runno:06d}/batch.{batch_number}.out + error = {pwd}/logs/Run-{runno:06d}/batch.{batch_number}.err + log = {pwd}/logs/Run-{runno:06d}/batch.log + requirements = (Arch == "X86_64" && OpSysAndVer =?= "CentOS7") + getenv = False + transfer_output_files = "" + +JobFlavour = "workday" + queue 1 + """) + fh.close() + os.system(f"echo condor_submit {batchFile}") + os.system(f"condor_submit {batchFile}") diff --git a/PhysicsAnalysis/NtupleDumper/scripts/submitAllStableRuns.py b/PhysicsAnalysis/NtupleDumper/scripts/submitAllStableRuns.py new file mode 100755 index 0000000000000000000000000000000000000000..92f49d3392476a245892a5a322c8cfd5c352fde3 --- /dev/null +++ b/PhysicsAnalysis/NtupleDumper/scripts/submitAllStableRuns.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python + +import glob +import os +import sys +import pandas as pd + +print("NtupleDumping all stable-beam runs found at http://aagaard.web.cern.ch/aagaard/FASERruns.html") + +table_runs = pd.read_html('http://aagaard.web.cern.ch/aagaard/FASERruns.html') # load in run tables from website +df = table_runs[0] # table_runs is a list of all tables on the website, we only want the first table +df.columns = [c.replace(' ', '_') for c in df.columns] # rename the columns such that names with spaces are replaced with '_' (needed to access 'Stable_Beam' column) +df.drop(df[df.Stable_Beam != 'Yes'].index, inplace=True) # drop all runs that are not stable beam runs +df.drop(df[(df.Configuration != 'Low gain') & (df.Configuration != 'High gain')].index, inplace=True) # drop all runs that are not 'Low gain' or 'High gain' + +run_list = df['Run'].tolist() + +ptag="p0008" +filesPerJob=100 + +for i,runno in enumerate(run_list): + runconfig=str(df.at[df.loc[df['Run'] == runno].index[0],'Configuration'].replace(' ','_')) # get config from website run log telling if run is High_gain or Low_gain calo + + print("%i of %i runs processed. Currently processing run %i (%s)"%(i,len(run_list),runno,runconfig)) + + os.system(f"mkdir -p logs/Run-{runno:06d}") + os.system(f"mkdir -p batch") + os.system(f"mkdir -p /eos/project/f/faser-commissioning/DeionsNtuples/{runno}") + + dataDir=f"/eos/experiment/faser/rec/2022/{ptag}/{runno:06d}" + files=glob.glob(f"{dataDir}/Faser-Physics*") + numFiles=len(files) + numJobs=numFiles//filesPerJob+(numFiles%filesPerJob!=0) + batchFile=f"batch/Run-{runno:06d}.sub" + fh=open(batchFile,"w") + pwd=os.getenv("PWD") + fh.write(f""" + executable = {pwd}/analyzeRun.sh + arguments = {runno} $(ProcId) {filesPerJob} {runconfig} + output = {pwd}/logs/Run-{runno:06d}/batch.$(ProcId).out + error = {pwd}/logs/Run-{runno:06d}/batch.$(ProcId).err + log = {pwd}/logs/Run-{runno:06d}/batch.log + requirements = (Arch == "X86_64" && OpSysAndVer =?= "CentOS7") + getenv = False + transfer_output_files = "" + +JobFlavour = "workday" + queue {numJobs} + """) + fh.close() + os.system(f"echo condor_submit {batchFile}") + os.system(f"condor_submit {batchFile}") + diff --git a/PhysicsAnalysis/NtupleDumper/scripts/submitRun.py b/PhysicsAnalysis/NtupleDumper/scripts/submitRun.py new file mode 100755 index 0000000000000000000000000000000000000000..b408759504ab5a1afc1df332154250cfcd7b4325 --- /dev/null +++ b/PhysicsAnalysis/NtupleDumper/scripts/submitRun.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python + +import glob +import os +import sys +import pandas as pd + +table_runs = pd.read_html('http://aagaard.web.cern.ch/aagaard/FASERruns.html') # load in run tables from website +df = table_runs[0] # table_runs is a list of all tables on the website, we only want the first one + +ptag="p0008" +filesPerJob=100 + +runno=int(sys.argv[1]) +runconfig=str(df.at[df.loc[df['Run'] == runno].index[0],'Configuration'].replace(' ','_')) # get config from website run log telling if run is High_gain or Low_gain calo + +os.system(f"mkdir -p logs/Run-{runno:06d}") +os.system(f"mkdir -p batch") +os.system(f"mkdir -p /eos/project/f/faser-commissioning/DeionsNtuples/{runno}") + +dataDir=f"/eos/experiment/faser/rec/2022/{ptag}/{runno:06d}" +files=glob.glob(f"{dataDir}/Faser-Physics*") +numFiles=len(files) +numJobs=numFiles//filesPerJob+(numFiles%filesPerJob!=0) +batchFile=f"batch/Run-{runno:06d}.sub" +fh=open(batchFile,"w") +pwd=os.getenv("PWD") +fh.write(f""" +executable = {pwd}/analyzeRun.sh +arguments = {runno} $(ProcId) {filesPerJob} {runconfig} +output = {pwd}/logs/Run-{runno:06d}/batch.$(ProcId).out +error = {pwd}/logs/Run-{runno:06d}/batch.$(ProcId).err +log = {pwd}/logs/Run-{runno:06d}/batch.log +requirements = (Arch == "X86_64" && OpSysAndVer =?= "CentOS7") +getenv = False +transfer_output_files = "" ++JobFlavour = "workday" +queue {numJobs} +""") +fh.close() +os.system(f"echo condor_submit {batchFile}") + diff --git a/PhysicsAnalysis/NtupleDumper/src/NtupleDumperAlg.cxx b/PhysicsAnalysis/NtupleDumper/src/NtupleDumperAlg.cxx new file mode 100644 index 0000000000000000000000000000000000000000..d1a07f428c73dfb7d6c7f1bf69ab20b73f12a196 --- /dev/null +++ b/PhysicsAnalysis/NtupleDumper/src/NtupleDumperAlg.cxx @@ -0,0 +1,890 @@ +#include "NtupleDumperAlg.h" +#include "TrkTrack/Track.h" +#include "TrackerRIO_OnTrack/FaserSCT_ClusterOnTrack.h" +#include "TrackerIdentifier/FaserSCT_ID.h" +#include "ScintIdentifier/VetoNuID.h" +#include "ScintIdentifier/VetoID.h" +#include "ScintIdentifier/TriggerID.h" +#include "ScintIdentifier/PreshowerID.h" +#include "FaserCaloIdentifier/EcalID.h" +#include "TrackerPrepRawData/FaserSCT_Cluster.h" +#include "TrackerSpacePoint/FaserSCT_SpacePoint.h" +#include "Identifier/Identifier.h" +#include "TrackerReadoutGeometry/SCT_DetectorManager.h" +#include "TrackerReadoutGeometry/SiDetectorElement.h" +#include "TrackerPrepRawData/FaserSCT_Cluster.h" +#include "xAODTruth/TruthParticle.h" +#include <cmath> +#include <TH1F.h> + + +NtupleDumperAlg::NtupleDumperAlg(const std::string &name, + ISvcLocator *pSvcLocator) + : AthReentrantAlgorithm(name, pSvcLocator), + AthHistogramming(name), + m_histSvc("THistSvc/THistSvc", name) {} + + +void NtupleDumperAlg::addBranch(const std::string &name, + float* var) { + m_tree->Branch(name.c_str(),var,(name+"/F").c_str()); +} +void NtupleDumperAlg::addBranch(const std::string &name, + unsigned int* var) { + m_tree->Branch(name.c_str(),var,(name+"/I").c_str()); +} + +void NtupleDumperAlg::addWaveBranches(const std::string &name, + int nchannels, + int first) { + for(int ch=0;ch<nchannels;ch++) { + std::string base=name+std::to_string(ch)+"_"; + addBranch(base+"time",&m_wave_localtime[first]); + addBranch(base+"peak",&m_wave_peak[first]); + addBranch(base+"width",&m_wave_width[first]); + addBranch(base+"charge",&m_wave_charge[first]); + addBranch(base+"raw_peak",&m_wave_raw_peak[first]); + addBranch(base+"raw_charge",&m_wave_raw_charge[first]); + addBranch(base+"baseline",&m_wave_baseline_mean[first]); + addBranch(base+"baseline_rms",&m_wave_baseline_rms[first]); + addBranch(base+"status",&m_wave_status[first]); + first++; + } +} + +void NtupleDumperAlg::FillWaveBranches(const xAOD::WaveformHitContainer &wave) const { + for (auto hit : wave) { + if ((hit->hit_status()&2)==0) { // dont store secoondary hits as they can overwrite the primary hit + int ch=hit->channel(); + m_wave_localtime[ch]=hit->localtime()+m_clock_phase; + m_wave_peak[ch]=hit->peak(); + m_wave_width[ch]=hit->width(); + m_wave_charge[ch]=hit->integral()/50; + + m_wave_raw_peak[ch]=hit->raw_peak(); + m_wave_raw_charge[ch]=hit->raw_integral()/50; + m_wave_baseline_mean[ch]=hit->baseline_mean(); + m_wave_baseline_rms[ch]=hit->baseline_rms(); + m_wave_status[ch]=hit->hit_status(); + } + } +} + +StatusCode NtupleDumperAlg::initialize() +{ + ATH_CHECK(m_truthEventContainer.initialize()); + ATH_CHECK(m_truthParticleContainer.initialize()); + ATH_CHECK(m_trackCollection.initialize()); + ATH_CHECK(m_trackSegmentCollection.initialize()); + ATH_CHECK(m_vetoNuContainer.initialize()); + ATH_CHECK(m_vetoContainer.initialize()); + ATH_CHECK(m_triggerContainer.initialize()); + ATH_CHECK(m_preshowerContainer.initialize()); + ATH_CHECK(m_ecalContainer.initialize()); + ATH_CHECK(m_clusterContainer.initialize()); + ATH_CHECK(m_simDataCollection.initialize()); + ATH_CHECK(m_FaserTriggerData.initialize()); + ATH_CHECK(m_ClockWaveformContainer.initialize()); + + ATH_CHECK(detStore()->retrieve(m_sctHelper, "FaserSCT_ID")); + ATH_CHECK(detStore()->retrieve(m_vetoNuHelper, "VetoNuID")); + ATH_CHECK(detStore()->retrieve(m_vetoHelper, "VetoID")); + ATH_CHECK(detStore()->retrieve(m_triggerHelper, "TriggerID")); + ATH_CHECK(detStore()->retrieve(m_preshowerHelper, "PreshowerID")); + ATH_CHECK(detStore()->retrieve(m_ecalHelper, "EcalID")); + + ATH_CHECK(detStore()->retrieve(m_detMgr, "SCT")); + ATH_CHECK(m_extrapolationTool.retrieve()); + ATH_CHECK(m_trackingGeometryTool.retrieve()); + + ATH_CHECK(m_spacePointContainerKey.initialize()); + + if (m_useFlukaWeights) + { + m_baseEventCrossSection = (m_flukaCrossSection * kfemtoBarnsPerMilliBarn)/m_flukaCollisions; + } + else if (m_useGenieWeights) + { + m_baseEventCrossSection = 1.0/m_genieLuminosity; + } + else + { + m_baseEventCrossSection = 1.0; + } + + m_tree = new TTree("nt", "NtupleDumper tree"); + m_tree->Branch("run", &m_run_number, "run/I"); + m_tree->Branch("eventID", &m_event_number, "eventID/I"); + m_tree->Branch("eventTime", &m_event_time, "eventTime/I"); + m_tree->Branch("BCID", &m_bcid, "BCID/I"); + + m_tree->Branch("TBP", &m_tbp, "TBP/I"); + m_tree->Branch("TAP", &m_tap, "TAP/I"); + m_tree->Branch("inputBits", &m_inputBits, "inputBits/I"); + m_tree->Branch("inputBitsNext", &m_inputBitsNext, "inputBitsNext/I"); + + addWaveBranches("VetoNu",2,4); + addWaveBranches("VetoSt1",2,6); + addWaveBranches("VetoSt2",1,14); + addWaveBranches("Timing",4,8); + addWaveBranches("Preshower",2,12); + addWaveBranches("Calo",4,0); + addBranch("Calo_total_charge", &m_calo_total); + addBranch("Calo_total_raw_charge", &m_calo_rawtotal); + + addBranch("Calo0_Edep", &m_Calo0_Edep); + addBranch("Calo1_Edep", &m_Calo1_Edep); + addBranch("Calo2_Edep", &m_Calo2_Edep); + addBranch("Calo3_Edep", &m_Calo3_Edep); + addBranch("Calo_total_Edep", &m_Calo_Total_Edep); + addBranch("Preshower12_Edep", &m_Preshower12_Edep); + addBranch("Preshower13_Edep", &m_Preshower13_Edep); + + addBranch("nClusters0",&m_station0Clusters); + addBranch("nClusters1",&m_station1Clusters); + addBranch("nClusters2",&m_station2Clusters); + addBranch("nClusters3",&m_station3Clusters); + + addBranch("SpacePoints",&m_nspacepoints); + m_tree->Branch("SpacePoint_x", &m_spacepointX); + m_tree->Branch("SpacePoint_y", &m_spacepointY); + m_tree->Branch("SpacePoint_z", &m_spacepointZ); + + addBranch("TrackSegments",&m_ntracksegs); + m_tree->Branch("TrackSegment_Chi2", &m_trackseg_Chi2); + m_tree->Branch("TrackSegment_nDoF", &m_trackseg_DoF); + m_tree->Branch("TrackSegment_x", &m_trackseg_x); + m_tree->Branch("TrackSegment_y", &m_trackseg_y); + m_tree->Branch("TrackSegment_z", &m_trackseg_z); + m_tree->Branch("TrackSegment_px", &m_trackseg_px); + m_tree->Branch("TrackSegment_py", &m_trackseg_py); + m_tree->Branch("TrackSegment_pz", &m_trackseg_pz); + + m_tree->Branch("longTracks", &m_longTracks, "longTracks/I"); + m_tree->Branch("Track_Chi2", &m_Chi2); + m_tree->Branch("Track_nDoF", &m_DoF); + m_tree->Branch("Track_x0", &m_xup); + m_tree->Branch("Track_y0", &m_yup); + m_tree->Branch("Track_z0", &m_zup); + m_tree->Branch("Track_px0", &m_pxup); + m_tree->Branch("Track_py0", &m_pyup); + m_tree->Branch("Track_pz0", &m_pzup); + m_tree->Branch("Track_p0", &m_pup); + m_tree->Branch("Track_x1", &m_xdown); + m_tree->Branch("Track_y1", &m_ydown); + m_tree->Branch("Track_z1", &m_zdown); + m_tree->Branch("Track_px1", &m_pxdown); + m_tree->Branch("Track_py1", &m_pydown); + m_tree->Branch("Track_pz1", &m_pzdown); + m_tree->Branch("Track_p1", &m_pdown); + m_tree->Branch("Track_charge", &m_charge); + m_tree->Branch("Track_nLayers", &m_nLayers); + + m_tree->Branch("Track_InStation0",&m_nHit0); + m_tree->Branch("Track_InStation1",&m_nHit1); + m_tree->Branch("Track_InStation2",&m_nHit2); + m_tree->Branch("Track_InStation3",&m_nHit3); + + m_tree->Branch("Track_X_atVetoNu", &m_xVetoNu); + m_tree->Branch("Track_Y_atVetoNu", &m_yVetoNu); + m_tree->Branch("Track_ThetaX_atVetoNu", &m_thetaxVetoNu); + m_tree->Branch("Track_ThetaY_atVetoNu", &m_thetayVetoNu); + + m_tree->Branch("Track_X_atVetoStation1", &m_xVetoStation1); + m_tree->Branch("Track_Y_atVetoStation1", &m_yVetoStation1); + m_tree->Branch("Track_ThetaX_atVetoStation1", &m_thetaxVetoStation1); + m_tree->Branch("Track_ThetaY_atVetoStation1", &m_thetayVetoStation1); + + m_tree->Branch("Track_X_atVetoStation2", &m_xVetoStation2); + m_tree->Branch("Track_Y_atVetoStation2", &m_yVetoStation2); + m_tree->Branch("Track_ThetaX_atVetoStation2", &m_thetaxVetoStation2); + m_tree->Branch("Track_ThetaY_atVetoStation2", &m_thetayVetoStation2); + + m_tree->Branch("Track_X_atTrig", &m_xTrig); + m_tree->Branch("Track_Y_atTrig", &m_yTrig); + m_tree->Branch("Track_ThetaX_atTrig", &m_thetaxTrig); + m_tree->Branch("Track_ThetaY_atTrig", &m_thetayTrig); + + m_tree->Branch("Track_X_atPreshower1", &m_xPreshower1); + m_tree->Branch("Track_Y_atPreshower1", &m_yPreshower1); + m_tree->Branch("Track_ThetaX_atPreshower1", &m_thetaxPreshower1); + m_tree->Branch("Track_ThetaY_atPreshower1", &m_thetayPreshower1); + + m_tree->Branch("Track_X_atPreshower2", &m_xPreshower2); + m_tree->Branch("Track_Y_atPreshower2", &m_yPreshower2); + m_tree->Branch("Track_ThetaX_atPreshower2", &m_thetaxPreshower2); + m_tree->Branch("Track_ThetaY_atPreshower2", &m_thetayPreshower2); + + m_tree->Branch("Track_X_atCalo", &m_xCalo); + m_tree->Branch("Track_Y_atCalo", &m_yCalo); + m_tree->Branch("Track_ThetaX_atCalo", &m_thetaxCalo); + m_tree->Branch("Track_ThetaY_atCalo", &m_thetayCalo); + + m_tree->Branch("pTruthLepton", &m_truthLeptonMomentum, "pTruthLepton/D"); + m_tree->Branch("truthBarcode", &m_truthBarcode, "truthBarcode/I"); + m_tree->Branch("truthPdg", &m_truthPdg, "truthPdg/I"); + m_tree->Branch("CrossSection", &m_crossSection, "crossSection/D"); + + ATH_CHECK(histSvc()->regTree("/HIST2/tree", m_tree)); + + // Register histograms + m_HistRandomCharge[0] = new TH1F("hRandomCharge0", "Calo ch0 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0); + m_HistRandomCharge[1] = new TH1F("hRandomCharge1", "Calo ch1 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0); + m_HistRandomCharge[2] = new TH1F("hRandomCharge2", "Calo ch2 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0); + m_HistRandomCharge[3] = new TH1F("hRandomCharge3", "Calo ch3 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0); + m_HistRandomCharge[4] = new TH1F("hRandomCharge4", "VetoNu ch4 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0); + m_HistRandomCharge[5] = new TH1F("hRandomCharge5", "VetoNu ch5 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0); + m_HistRandomCharge[6] = new TH1F("hRandomCharge6", "Veto ch6 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0); + m_HistRandomCharge[7] = new TH1F("hRandomCharge7", "Veto ch7 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0); + m_HistRandomCharge[8] = new TH1F("hRandomCharge8", "Trig ch8 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0); + m_HistRandomCharge[9] = new TH1F("hRandomCharge9", "Trig ch9 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0); + m_HistRandomCharge[10] = new TH1F("hRandomCharge10", "Trig ch10 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0); + m_HistRandomCharge[11] = new TH1F("hRandomCharge11", "Trig ch11 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0); + m_HistRandomCharge[12] = new TH1F("hRandomCharge12", "Preshower ch12 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0); + m_HistRandomCharge[13] = new TH1F("hRandomCharge13", "Preshower ch13 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0); + m_HistRandomCharge[14] = new TH1F("hRandomCharge14", "Veto ch14 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0); + + ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge0", m_HistRandomCharge[0])); + ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge1", m_HistRandomCharge[1])); + ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge2", m_HistRandomCharge[2])); + ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge3", m_HistRandomCharge[3])); + ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge4", m_HistRandomCharge[4])); + ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge5", m_HistRandomCharge[5])); + ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge6", m_HistRandomCharge[6])); + ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge7", m_HistRandomCharge[7])); + ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge8", m_HistRandomCharge[8])); + ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge9", m_HistRandomCharge[9])); + ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge10", m_HistRandomCharge[10])); + ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge11", m_HistRandomCharge[11])); + ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge12", m_HistRandomCharge[12])); + ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge13", m_HistRandomCharge[13])); + ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge14", m_HistRandomCharge[14])); + + m_MIP_sim_Edep_calo = 0.0585; // MIP deposits 0.0585 GeV of energy in calo + m_MIP_sim_Edep_preshower = 0.004894; // MIP deposits 0.004894 GeV of energy in a preshower layer + + if (m_doBlinding) { + ATH_MSG_INFO("Blinding will be enforced for real data."); + } else { + ATH_MSG_INFO("Blinding will NOT be enforced for real data."); + } + + return StatusCode::SUCCESS; +} + + +StatusCode NtupleDumperAlg::execute(const EventContext &ctx) const +{ + clearTree(); + + // check if real data or simulation data + bool realData = true; + SG::ReadHandle<xAOD::TruthEventContainer> truthEventContainer { m_truthEventContainer, ctx }; + if (truthEventContainer.isValid() && truthEventContainer->size() > 0) + { + realData = false; + } + + // if real data, store charge in histograms from random events and only fill ntuple from coincidence events + if (realData) { //no trigger simulation yet + SG::ReadHandle<xAOD::FaserTriggerData> triggerData(m_FaserTriggerData, ctx); + m_tap=triggerData->tap(); + if (m_tap==16) { // random trigger, store charge of scintillators in histograms + // Read in Waveform containers + SG::ReadHandle<xAOD::WaveformHitContainer> vetoNuContainer { m_vetoNuContainer, ctx }; + ATH_CHECK(vetoNuContainer.isValid()); + + SG::ReadHandle<xAOD::WaveformHitContainer> vetoContainer { m_vetoContainer, ctx }; + ATH_CHECK(vetoContainer.isValid()); + + SG::ReadHandle<xAOD::WaveformHitContainer> triggerContainer { m_triggerContainer, ctx }; + ATH_CHECK(triggerContainer.isValid()); + + SG::ReadHandle<xAOD::WaveformHitContainer> preshowerContainer { m_preshowerContainer, ctx }; + ATH_CHECK(preshowerContainer.isValid()); + + SG::ReadHandle<xAOD::WaveformHitContainer> ecalContainer { m_ecalContainer, ctx }; + ATH_CHECK(ecalContainer.isValid()); + + if (vetoNuContainer.isValid()) { + for (auto hit : *vetoNuContainer) { + int ch=hit->channel(); + m_HistRandomCharge[ch]->Fill(hit->raw_integral()/50.0); + } + } + if (vetoContainer.isValid()) { + for (auto hit : *vetoContainer) { + int ch=hit->channel(); + m_HistRandomCharge[ch]->Fill(hit->raw_integral()/50.0); + } + } + if (triggerContainer.isValid()) { + for (auto hit : *triggerContainer) { + int ch=hit->channel(); + m_HistRandomCharge[ch]->Fill(hit->raw_integral()/50.0); + } + } + if (preshowerContainer.isValid()) { + for (auto hit : *preshowerContainer) { + int ch=hit->channel(); + m_HistRandomCharge[ch]->Fill(hit->raw_integral()/50.0); + } + } + if (ecalContainer.isValid()) { + for (auto hit : *ecalContainer) { + int ch=hit->channel(); + m_HistRandomCharge[ch]->Fill(hit->raw_integral()/50.0); + } + } + + return StatusCode::SUCCESS; // finished with this event + + } else if ( ((m_tap&8)==0) && (((m_tap&4)==0)||((m_tap&2)==0)) && (((m_tap&4)==0)||((m_tap&1)==0)) && (((m_tap&2)==0)||((m_tap&1)==0)) ) { // don't process events that don't trigger coincidence triggers: 1=calo, 2=veotnu|neto1|preshower, 4=TimingLayer, 8=(VetoNu|Veto2)&Preshower + return StatusCode::SUCCESS; + } + m_tbp=triggerData->tbp(); + m_tap=triggerData->tap(); + m_inputBits=triggerData->inputBits(); + m_inputBitsNext=triggerData->inputBitsNextClk(); + } + + m_run_number = ctx.eventID().run_number(); + m_event_number = ctx.eventID().event_number(); + m_event_time = ctx.eventID().time_stamp(); + m_bcid = ctx.eventID().bunch_crossing_id(); + + if (!realData) { // if simulation find MC cross section and primary lepton + // Work out effective cross section for MC + if (m_useFlukaWeights) + { + double flukaWeight = truthEventContainer->at(0)->weights()[0]; + ATH_MSG_ALWAYS("Found fluka weight = " << flukaWeight); + m_crossSection = m_baseEventCrossSection * flukaWeight; + } + else if (m_useGenieWeights) + { + m_crossSection = m_baseEventCrossSection; + } + else + { + //ATH_MSG_WARNING("Monte carlo event with no weighting scheme specified. Setting crossSection (weight) to " << m_baseEventCrossSection << " fb."); + m_crossSection = m_baseEventCrossSection; + } + + // Find the primary lepton (if any) + SG::ReadHandle<xAOD::TruthParticleContainer> truthParticleContainer { m_truthParticleContainer, ctx }; + if (truthParticleContainer.isValid() && truthParticleContainer->size() > 0) + { + for (auto particle : *truthParticleContainer) + { + if ( particle->absPdgId() == 11 || particle->absPdgId() == 13 || particle->absPdgId() == 15 ) + { + if (particle->status() == 1 && (particle->nParents() == 0 || particle->nParents() == 2) ) + { + m_truthLeptonMomentum = particle->p4().P(); + break; + } + } + } + } + } + + if (realData) { // correct waveform time with clock phase + SG::ReadHandle<xAOD::WaveformClock> clockHandle(m_ClockWaveformContainer, ctx); + ATH_CHECK(clockHandle.isValid()); + + if (clockHandle->phase() < -2.0) { // wrap around clock pahse so -pi goes to pi + m_clock_phase = ((clockHandle->phase() + 3.14159) / 3.14159) * 12.5; + } else { + m_clock_phase = (clockHandle->phase() / 3.14159) * 12.5; + } + } + + SG::ReadHandle<xAOD::WaveformHitContainer> vetoNuContainer { m_vetoNuContainer, ctx }; + ATH_CHECK(vetoNuContainer.isValid()); + + SG::ReadHandle<xAOD::WaveformHitContainer> vetoContainer { m_vetoContainer, ctx }; + ATH_CHECK(vetoContainer.isValid()); + + SG::ReadHandle<xAOD::WaveformHitContainer> triggerContainer { m_triggerContainer, ctx }; + ATH_CHECK(triggerContainer.isValid()); + + SG::ReadHandle<xAOD::WaveformHitContainer> preshowerContainer { m_preshowerContainer, ctx }; + ATH_CHECK(preshowerContainer.isValid()); + + SG::ReadHandle<xAOD::WaveformHitContainer> ecalContainer { m_ecalContainer, ctx }; + ATH_CHECK(ecalContainer.isValid()); + + FillWaveBranches(*vetoNuContainer); + FillWaveBranches(*vetoContainer); + FillWaveBranches(*triggerContainer); + FillWaveBranches(*preshowerContainer); + FillWaveBranches(*ecalContainer); + + m_calo_total=m_wave_charge[0]+m_wave_charge[1]+m_wave_charge[2]+m_wave_charge[3]; + m_calo_rawtotal=m_wave_raw_charge[0]+m_wave_raw_charge[1]+m_wave_raw_charge[2]+m_wave_raw_charge[3]; + + // do calibration of calo channels from pC to GeV deposited + if (m_CaloConfig == "High_gain") { + m_Calo0_Edep = (m_wave_charge[0] / 23.709) * m_MIP_sim_Edep_calo; + m_Calo1_Edep = (m_wave_charge[1] / 24.333) * m_MIP_sim_Edep_calo; + m_Calo2_Edep = (m_wave_charge[2] / 24.409) * m_MIP_sim_Edep_calo; + m_Calo3_Edep = (m_wave_charge[3] / 25.555) * m_MIP_sim_Edep_calo; + } else if (m_CaloConfig == "Low_gain") { // assume low gain calo + m_Calo0_Edep = (m_wave_charge[0] / 0.7909) * m_MIP_sim_Edep_calo; + m_Calo1_Edep = (m_wave_charge[1] / 0.8197) * m_MIP_sim_Edep_calo; + m_Calo2_Edep = (m_wave_charge[2] / 0.8256) * m_MIP_sim_Edep_calo; + m_Calo3_Edep = (m_wave_charge[3] / 0.8821) * m_MIP_sim_Edep_calo; + } else { + ATH_MSG_WARNING("Run config is neither High_gain nor Low_gain, it is " << m_CaloConfig << ", calo calibration will be zero"); + } + m_Calo_Total_Edep = m_Calo0_Edep + m_Calo1_Edep + m_Calo2_Edep + m_Calo3_Edep; + + // do calibration of preshower channels from pC to GeV deposited + m_Preshower12_Edep = (m_wave_charge[12] / 5.0) * m_MIP_sim_Edep_preshower; // 5 pC per MIP is rough measurement + m_Preshower13_Edep = (m_wave_charge[12] / 5.0) * m_MIP_sim_Edep_preshower; + + if (realData && m_doBlinding) { // enforce blinding such that events with large calo signals are skipped and not in the output root file + if ((m_Calo_Total_Edep/0.155) > 10.0) { // only save events with a shower less than a 10 GeV e- (assume 10 GeV electron deposits 15.5% of their energy in calo) + return StatusCode::SUCCESS; + } + } + + SG::ReadHandle<Tracker::FaserSCT_ClusterContainer> clusterContainer { m_clusterContainer, ctx }; + ATH_CHECK(clusterContainer.isValid()); + + FaserActsGeometryContext faserGeometryContext = m_trackingGeometryTool->getNominalGeometryContext(); + auto gctx = faserGeometryContext.context(); + + for (auto collection : *clusterContainer) + { + Identifier id = collection->identify(); + int station = m_sctHelper->station(id); + int clusters = (int) collection->size(); + switch (station) + { + case 0: + m_station0Clusters += clusters; + // following lines commented out depict how to access cluster position + //for (auto cluster : *collection) { + // if (cluster == nullptr) continue; + // auto pos = cluster->globalPosition(); + // m_station0ClusterX.push_back(pos.x()); + //} + break; + case 1: + m_station1Clusters += clusters; + break; + case 2: + m_station2Clusters += clusters; + break; + case 3: + m_station3Clusters += clusters; + break; + default: + ATH_MSG_FATAL("Unknown tracker station number " << station); + break; + } + } + + SG::ReadHandle<FaserSCT_SpacePointContainer> spacePointContainer {m_spacePointContainerKey, ctx}; + ATH_CHECK(spacePointContainer.isValid()); + for (const FaserSCT_SpacePointCollection* spacePointCollection : *spacePointContainer) { + m_nspacepoints += spacePointCollection->size(); + for (const Tracker::FaserSCT_SpacePoint *spacePoint: *spacePointCollection) { + auto pos = spacePoint->globalPosition(); + m_spacepointX.push_back(pos.x()); + m_spacepointY.push_back(pos.y()); + m_spacepointZ.push_back(pos.z()); + } + } + + SG::ReadHandle<TrackCollection> trackSegmentCollection {m_trackSegmentCollection, ctx}; + ATH_CHECK(trackSegmentCollection.isValid()); + for (const Trk::Track* trackSeg : *trackSegmentCollection) { + if (trackSeg == nullptr) continue; + m_ntracksegs += 1; + m_trackseg_Chi2.push_back(trackSeg->fitQuality()->chiSquared()); + m_trackseg_DoF.push_back(trackSeg->fitQuality()->numberDoF()); + auto SegParameters = trackSeg->trackParameters()->front(); + const Amg::Vector3D SegPosition = SegParameters->position(); + const Amg::Vector3D SegMomentum = SegParameters->momentum(); + m_trackseg_x.push_back(SegPosition.x()); + m_trackseg_y.push_back(SegPosition.y()); + m_trackseg_z.push_back(SegPosition.z()); + m_trackseg_px.push_back(SegMomentum.x()); + m_trackseg_py.push_back(SegMomentum.y()); + m_trackseg_pz.push_back(SegMomentum.z()); + } + + SG::ReadHandle<TrackCollection> trackCollection {m_trackCollection, ctx}; + ATH_CHECK(trackCollection.isValid()); + const Trk::TrackParameters* candidateParameters {nullptr}; + const Trk::TrackParameters* candidateDownParameters {nullptr}; + for (const Trk::Track* track : *trackCollection) + { + if (track == nullptr) continue; + std::set<std::pair<int, int>> layerMap; + std::set<int> stationMap; + + // Check for hit in the three downstream stations + for (auto measurement : *(track->measurementsOnTrack())) { + const Tracker::FaserSCT_ClusterOnTrack* cluster = dynamic_cast<const Tracker::FaserSCT_ClusterOnTrack*>(measurement); + if (cluster != nullptr) { + Identifier id = cluster->identify(); + int station = m_sctHelper->station(id); + int layer = m_sctHelper->layer(id); + stationMap.emplace(station); + layerMap.emplace(station, layer); + } + } + if (stationMap.count(1) == 0 || stationMap.count(2) == 0 || stationMap.count(3) == 0) continue; + + int nLayers = std::count_if(layerMap.begin(), layerMap.end(), [](std::pair<int,int> p){return p.first != 0;}); + const Trk::TrackParameters* upstreamParameters = track->trackParameters()->front(); + const Trk::TrackParameters* downstreamParameters = track->trackParameters()->back(); + + if (candidateParameters == nullptr || upstreamParameters->momentum().mag() > candidateParameters->momentum().mag()) + { + candidateParameters = upstreamParameters; + candidateDownParameters = downstreamParameters; + } + + if ((candidateParameters == nullptr) || (candidateDownParameters == nullptr)) continue; + + m_nLayers.push_back(nLayers); + + m_Chi2.push_back(track->fitQuality()->chiSquared()); + m_DoF.push_back(track->fitQuality()->numberDoF()); + + m_nHit0.push_back(stationMap.count(0)); + m_nHit1.push_back(stationMap.count(1)); + m_nHit2.push_back(stationMap.count(2)); + m_nHit3.push_back(stationMap.count(3)); + + m_charge.push_back( (int) candidateParameters->charge() ); + + m_xup.push_back(candidateParameters->position().x()); + m_yup.push_back(candidateParameters->position().y()); + m_zup.push_back(candidateParameters->position().z()); + m_pxup.push_back(candidateParameters->momentum().x()); + m_pyup.push_back(candidateParameters->momentum().y()); + m_pzup.push_back(candidateParameters->momentum().z()); + m_pup.push_back(sqrt( pow(candidateParameters->momentum().x(),2) + pow(candidateParameters->momentum().y(),2) + pow(candidateParameters->momentum().z(),2) )); + + m_xdown.push_back(candidateDownParameters->position().x()); + m_ydown.push_back(candidateDownParameters->position().y()); + m_zdown.push_back(candidateDownParameters->position().z()); + m_pxdown.push_back(candidateDownParameters->momentum().x()); + m_pydown.push_back(candidateDownParameters->momentum().y()); + m_pzdown.push_back(candidateDownParameters->momentum().z()); + m_pdown.push_back(sqrt( pow(candidateDownParameters->momentum().x(),2) + pow(candidateDownParameters->momentum().y(),2) + pow(candidateDownParameters->momentum().z(),2) )); + + // fill extrapolation vectors with filler values that get changed iif the track extrapolation succeeds + m_xVetoNu.push_back(-10000); + m_yVetoNu.push_back(-10000); + m_thetaxVetoNu.push_back(-10000); + m_thetayVetoNu.push_back(-10000); + m_xVetoStation1.push_back(-10000); + m_yVetoStation1.push_back(-10000); + m_thetaxVetoStation1.push_back(-10000); + m_thetayVetoStation1.push_back(-10000); + m_xVetoStation2.push_back(-10000); + m_yVetoStation2.push_back(-10000); + m_thetaxVetoStation2.push_back(-10000); + m_thetayVetoStation2.push_back(-10000); + m_xTrig.push_back(-10000); + m_yTrig.push_back(-10000); + m_thetaxTrig.push_back(-10000); + m_thetayTrig.push_back(-10000); + m_xPreshower1.push_back(-10000); + m_yPreshower1.push_back(-10000); + m_thetaxPreshower1.push_back(-10000); + m_thetayPreshower1.push_back(-10000); + m_xPreshower2.push_back(-10000); + m_yPreshower2.push_back(-10000); + m_thetaxPreshower2.push_back(-10000); + m_thetayPreshower2.push_back(-10000); + m_xCalo.push_back(-10000); + m_yCalo.push_back(-10000); + m_thetaxCalo.push_back(-10000); + m_thetayCalo.push_back(-10000); + + // extrapolate track from IFT + if (stationMap.count(0) > 0) { // extrapolation crashes if the track does not start in the IFT, as it is too far away to extrapolate + Amg::Vector3D position = candidateParameters->position(); + Amg::Vector3D momentum = candidateParameters->momentum(); + Acts::BoundVector params = Acts::BoundVector::Zero(); + params[Acts::eBoundLoc0] = -position.y(); + params[Acts::eBoundLoc1] = position.x(); + params[Acts::eBoundPhi] = momentum.phi(); + params[Acts::eBoundTheta] = momentum.theta(); + params[Acts::eBoundQOverP] = candidateParameters->charge() / momentum.mag(); + params[Acts::eBoundTime] = 0; + auto startSurface = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, position.z()), Acts::Vector3(0, 0, 1)); + Acts::BoundTrackParameters startParameters(std::move(startSurface), params, candidateParameters->charge()); + + auto targetSurface_VetoNu = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, -3112.0), Acts::Vector3(0, 0, 1)); // -3112 mm is z position of VetoNu planes touching + std::unique_ptr<const Acts::BoundTrackParameters> targetParameters_VetoNu =m_extrapolationTool->propagate(ctx, startParameters, *targetSurface_VetoNu, Acts::backward); + if (targetParameters_VetoNu != nullptr) { + auto targetPosition_VetoNu = targetParameters_VetoNu->position(gctx); + auto targetMomentum_VetoNu = targetParameters_VetoNu->momentum(); + m_xVetoNu[m_longTracks] = targetPosition_VetoNu.x(); + m_yVetoNu[m_longTracks] = targetPosition_VetoNu.y(); + m_thetaxVetoNu[m_longTracks] = atan(targetMomentum_VetoNu[0]/targetMomentum_VetoNu[2]); + m_thetayVetoNu[m_longTracks] = atan(targetMomentum_VetoNu[1]/targetMomentum_VetoNu[2]); + } else { + ATH_MSG_INFO("vetoNu null targetParameters"); + } + + auto targetSurface_Veto1 = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, -1769.65), Acts::Vector3(0, 0, 1)); // -1769.65 mm is z position of center of operational layer in Veto station 1 + std::unique_ptr<const Acts::BoundTrackParameters> targetParameters_Veto1 =m_extrapolationTool->propagate(ctx, startParameters, *targetSurface_Veto1, Acts::forward); + if (targetParameters_Veto1 != nullptr) { + auto targetPosition_Veto1 = targetParameters_Veto1->position(gctx); + auto targetMomentum_Veto1 = targetParameters_Veto1->momentum(); + m_xVetoStation1[m_longTracks] = targetPosition_Veto1.x(); + m_yVetoStation1[m_longTracks] = targetPosition_Veto1.y(); + m_thetaxVetoStation1[m_longTracks] = atan(targetMomentum_Veto1[0]/targetMomentum_Veto1[2]); + m_thetayVetoStation1[m_longTracks] = atan(targetMomentum_Veto1[1]/targetMomentum_Veto1[2]); + } else { + ATH_MSG_INFO("veto1 null targetParameters"); + } + + auto targetSurface_Veto2 = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, -1609.65), Acts::Vector3(0, 0, 1)); // -1609.65 mm is z position of where planes touch in Veto station 2 + std::unique_ptr<const Acts::BoundTrackParameters> targetParameters_Veto2 =m_extrapolationTool->propagate(ctx, startParameters, *targetSurface_Veto2, Acts::forward); + if (targetParameters_Veto2 != nullptr) { + auto targetPosition_Veto2 = targetParameters_Veto2->position(gctx); + auto targetMomentum_Veto2 = targetParameters_Veto2->momentum(); + m_xVetoStation2[m_longTracks] = targetPosition_Veto2.x(); + m_yVetoStation2[m_longTracks] = targetPosition_Veto2.y(); + m_thetaxVetoStation2[m_longTracks] = atan(targetMomentum_Veto2[0]/targetMomentum_Veto2[2]); + m_thetayVetoStation2[m_longTracks] = atan(targetMomentum_Veto2[1]/targetMomentum_Veto2[2]); + } else { + ATH_MSG_INFO("veto2 null targetParameters"); + } + + auto targetSurface_Trig = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, 0.0), Acts::Vector3(0, 0, 1)); // 0 mm is z position of Trig planes overlapping + std::unique_ptr<const Acts::BoundTrackParameters> targetParameters_Trig =m_extrapolationTool->propagate(ctx, startParameters, *targetSurface_Trig, Acts::forward); // must extrapolate forward to trig plane if track starts in IFT + if (targetParameters_Trig != nullptr) { + auto targetPosition_Trig = targetParameters_Trig->position(gctx); + auto targetMomentum_Trig = targetParameters_Trig->momentum(); + m_xTrig[m_longTracks] = targetPosition_Trig.x(); + m_yTrig[m_longTracks] = targetPosition_Trig.y(); + m_thetaxTrig[m_longTracks] = atan(targetMomentum_Trig[0]/targetMomentum_Trig[2]); + m_thetayTrig[m_longTracks] = atan(targetMomentum_Trig[1]/targetMomentum_Trig[2]); + } else { + ATH_MSG_INFO("Trig null targetParameters"); + } + + } + + // extrapolate track from tracking station 3 + if (stationMap.count(3) > 0) { // extrapolation crashes if the track does not end in the Station 3, as it is too far away to extrapolate + Amg::Vector3D positionDown = candidateDownParameters->position(); + Amg::Vector3D momentumDown = candidateDownParameters->momentum(); + Acts::BoundVector paramsDown = Acts::BoundVector::Zero(); + paramsDown[Acts::eBoundLoc0] = -positionDown.y(); + paramsDown[Acts::eBoundLoc1] = positionDown.x(); + paramsDown[Acts::eBoundPhi] = momentumDown.phi(); + paramsDown[Acts::eBoundTheta] = momentumDown.theta(); + paramsDown[Acts::eBoundQOverP] = candidateDownParameters->charge() / momentumDown.mag(); + paramsDown[Acts::eBoundTime] = 0; + auto startSurfaceDown = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, positionDown.z()), Acts::Vector3(0, 0, 1)); + Acts::BoundTrackParameters startParametersDown(std::move(startSurfaceDown), paramsDown, candidateDownParameters->charge()); + + auto targetSurface_Preshower1 = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, 2582.68), Acts::Vector3(0, 0, 1)); // 2582.68 mm is z position of center of upstream preshower layer + std::unique_ptr<const Acts::BoundTrackParameters> targetParameters_Preshower1 =m_extrapolationTool->propagate(ctx, startParametersDown, *targetSurface_Preshower1, Acts::forward); + if (targetParameters_Preshower1 != nullptr) { + auto targetPosition_Preshower1 = targetParameters_Preshower1->position(gctx); + auto targetMomentum_Preshower1 = targetParameters_Preshower1->momentum(); + m_xPreshower1[m_longTracks] = targetPosition_Preshower1.x(); + m_yPreshower1[m_longTracks] = targetPosition_Preshower1.y(); + m_thetaxPreshower1[m_longTracks] = atan(targetMomentum_Preshower1[0]/targetMomentum_Preshower1[2]); + m_thetayPreshower1[m_longTracks] = atan(targetMomentum_Preshower1[1]/targetMomentum_Preshower1[2]); + } else { + ATH_MSG_INFO("Preshower1 null targetParameters"); + } + + auto targetSurface_Preshower2 = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, 2657.68), Acts::Vector3(0, 0, 1)); // 2657.68 mm is z position of center of downstream preshower layer + std::unique_ptr<const Acts::BoundTrackParameters> targetParameters_Preshower2 =m_extrapolationTool->propagate(ctx, startParametersDown, *targetSurface_Preshower2, Acts::forward); + if (targetParameters_Preshower2 != nullptr) { + auto targetPosition_Preshower2 = targetParameters_Preshower2->position(gctx); + auto targetMomentum_Preshower2 = targetParameters_Preshower2->momentum(); + m_xPreshower2[m_longTracks] = targetPosition_Preshower2.x(); + m_yPreshower2[m_longTracks] = targetPosition_Preshower2.y(); + m_thetaxPreshower2[m_longTracks] = atan(targetMomentum_Preshower2[0]/targetMomentum_Preshower2[2]); + m_thetayPreshower2[m_longTracks] = atan(targetMomentum_Preshower2[1]/targetMomentum_Preshower2[2]); + } else { + ATH_MSG_INFO("Preshower2 null targetParameters"); + } + + auto targetSurface_Calo = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, 2760.0), Acts::Vector3(0, 0, 1)); // 2760 mm is estimated z position of calorimeter face + std::unique_ptr<const Acts::BoundTrackParameters> targetParameters_Calo =m_extrapolationTool->propagate(ctx, startParametersDown, *targetSurface_Calo, Acts::forward); + if (targetParameters_Calo != nullptr) { + auto targetPosition_Calo = targetParameters_Calo->position(gctx); + auto targetMomentum_Calo = targetParameters_Calo->momentum(); + m_xCalo[m_longTracks] = targetPosition_Calo.x(); + m_yCalo[m_longTracks] = targetPosition_Calo.y(); + m_thetaxCalo[m_longTracks] = atan(targetMomentum_Calo[0]/targetMomentum_Calo[2]) ; + m_thetayCalo[m_longTracks] = atan(targetMomentum_Calo[1]/targetMomentum_Calo[2]) ; + } else { + ATH_MSG_INFO("Calo null targetParameters"); + } + } + + m_longTracks++; + } + + /* + // Here we apply the signal selection + // Very simple/unrealistic to start + if (m_vetoUpstream == 0 || m_vetoDownstream == 0 || + m_triggerTotal == 0 || + m_preshower0 == 0 || m_preshower1 == 0 || + // m_ecalTotal == 0 || + candidateParameters == nullptr) + return StatusCode::SUCCESS; + */ + m_tree->Fill(); + + return StatusCode::SUCCESS; +} + + +StatusCode NtupleDumperAlg::finalize() +{ + return StatusCode::SUCCESS; +} + +bool NtupleDumperAlg::waveformHitOK(const xAOD::WaveformHit* hit) const +{ + if (hit->status_bit(xAOD::WaveformStatus::THRESHOLD_FAILED) || hit->status_bit(xAOD::WaveformStatus::SECONDARY)) return false; + return true; +} + +void +NtupleDumperAlg::clearTree() const +{ + m_run_number = 0; + m_event_number = 0; + m_event_time = 0; + m_bcid = 0; + + m_tbp=0; + m_tap=0; + m_inputBits=0; + m_inputBitsNext=0; + + for(int ii=0;ii<15;ii++) { + m_wave_localtime[ii]=0; + m_wave_peak[ii]=0; + m_wave_width[ii]=0; + m_wave_charge[ii]=0; + + m_wave_raw_peak[ii]=0; + m_wave_raw_charge[ii]=0; + m_wave_baseline_mean[ii]=0; + m_wave_baseline_rms[ii]=0; + m_wave_status[ii]=0; + } + + m_calo_total=0; + m_calo_rawtotal=0; + + m_Calo0_Edep=0; + m_Calo1_Edep=0; + m_Calo2_Edep=0; + m_Calo3_Edep=0; + m_Calo_Total_Edep=0; + m_Preshower12_Edep=0; + m_Preshower13_Edep=0; + + m_clock_phase=0; + + m_station0Clusters = 0; + m_station1Clusters = 0; + m_station2Clusters = 0; + m_station3Clusters = 0; + m_crossSection = 0; + + m_nspacepoints = 0; + m_spacepointX.clear(); + m_spacepointY.clear(); + m_spacepointZ.clear(); + + m_ntracksegs = 0; + m_trackseg_Chi2.clear(); + m_trackseg_DoF.clear(); + m_trackseg_x.clear(); + m_trackseg_y.clear(); + m_trackseg_z.clear(); + m_trackseg_px.clear(); + m_trackseg_py.clear(); + m_trackseg_pz.clear(); + + m_xup.clear(); + m_yup.clear(); + m_zup.clear(); + m_pxup.clear(); + m_pyup.clear(); + m_pzup.clear(); + m_pup.clear(); + + m_xdown.clear(); + m_ydown.clear(); + m_zdown.clear(); + m_pxdown.clear(); + m_pydown.clear(); + m_pzdown.clear(); + m_pdown.clear(); + + m_Chi2.clear(); + m_DoF.clear(); + m_charge.clear(); + m_nLayers.clear(); + m_longTracks = 0; + + m_nHit0.clear(); + m_nHit1.clear(); + m_nHit2.clear(); + m_nHit3.clear(); + + m_xVetoNu.clear(); + m_yVetoNu.clear(); + m_thetaxVetoNu.clear(); + m_thetayVetoNu.clear(); + + m_xVetoStation1.clear(); + m_yVetoStation1.clear(); + m_thetaxVetoStation1.clear(); + m_thetayVetoStation1.clear(); + + m_xVetoStation2.clear(); + m_yVetoStation2.clear(); + m_thetaxVetoStation2.clear(); + m_thetayVetoStation2.clear(); + + m_xTrig.clear(); + m_yTrig.clear(); + m_thetaxTrig.clear(); + m_thetayTrig.clear(); + + m_xPreshower1.clear(); + m_yPreshower1.clear(); + m_thetaxPreshower1.clear(); + m_thetayPreshower1.clear(); + + m_xPreshower2.clear(); + m_yPreshower2.clear(); + m_thetaxPreshower2.clear(); + m_thetayPreshower2.clear(); + + m_xCalo.clear(); + m_yCalo.clear(); + m_thetaxCalo.clear(); + m_thetayCalo.clear(); + + m_truthLeptonMomentum = 0; + m_truthBarcode = 0; + m_truthPdg = 0; +} diff --git a/PhysicsAnalysis/NtupleDumper/src/NtupleDumperAlg.h b/PhysicsAnalysis/NtupleDumper/src/NtupleDumperAlg.h new file mode 100644 index 0000000000000000000000000000000000000000..1b26caadeb7290c34a199ef0e941f9203b600bba --- /dev/null +++ b/PhysicsAnalysis/NtupleDumper/src/NtupleDumperAlg.h @@ -0,0 +1,217 @@ +#ifndef NTUPLEDUMPER_NTUPLEDUMPERALG_H +#define NTUPLEDUMPER_NTUPLEDUMPERALG_H + +#include "AthenaBaseComps/AthReentrantAlgorithm.h" +#include "AthenaBaseComps/AthHistogramming.h" +#include "TrkTrack/TrackCollection.h" +#include "xAODFaserTrigger/FaserTriggerData.h" +#include "xAODFaserWaveform/WaveformHitContainer.h" +#include "xAODFaserWaveform/WaveformHit.h" +#include "xAODFaserWaveform/WaveformClock.h" +#include "xAODTruth/TruthEventContainer.h" +#include "xAODTruth/TruthParticleContainer.h" +#include "TrackerPrepRawData/FaserSCT_ClusterContainer.h" +#include "TrackerSpacePoint/FaserSCT_SpacePointContainer.h" +#include "TrackerSimData/TrackerSimDataCollection.h" +#include "FaserActsGeometryInterfaces/IFaserActsExtrapolationTool.h" +#include "FaserActsGeometryInterfaces/IFaserActsTrackingGeometryTool.h" + +#include <vector> + +class TTree; +class TH1; +class FaserSCT_ID; +class VetoNuID; +class VetoID; +class TriggerID; +class PreshowerID; +class EcalID; +namespace TrackerDD +{ + class SCT_DetectorManager; +} + +class NtupleDumperAlg : public AthReentrantAlgorithm, AthHistogramming { +public: + NtupleDumperAlg(const std::string &name, ISvcLocator *pSvcLocator); + virtual ~NtupleDumperAlg() = default; + virtual StatusCode initialize() override; + virtual StatusCode execute(const EventContext &ctx) const override; + virtual StatusCode finalize() override; + const ServiceHandle <ITHistSvc> &histSvc() const; + +private: + + bool waveformHitOK(const xAOD::WaveformHit* hit) const; + void clearTree() const; + void addBranch(const std::string &name,float* var); + void addBranch(const std::string &name,unsigned int* var); + void addWaveBranches(const std::string &name, int nchannels, int first); + void FillWaveBranches(const xAOD::WaveformHitContainer &wave) const; + + ServiceHandle <ITHistSvc> m_histSvc; + + SG::ReadHandleKey<xAOD::TruthEventContainer> m_truthEventContainer { this, "EventContainer", "TruthEvents", "Truth event container name." }; + SG::ReadHandleKey<xAOD::TruthParticleContainer> m_truthParticleContainer { this, "ParticleContainer", "TruthParticles", "Truth particle container name." }; + SG::ReadHandleKey<TrackerSimDataCollection> m_simDataCollection {this, "TrackerSimDataCollection", "SCT_SDO_Map"}; + + SG::ReadHandleKey<TrackCollection> m_trackCollection { this, "TrackCollection", "CKFTrackCollection", "Input track collection name" }; + SG::ReadHandleKey<TrackCollection> m_trackSegmentCollection {this, "TrackSegmentCollection", "SegmentFit", "Input track segment collection name"}; + SG::ReadHandleKey<xAOD::WaveformHitContainer> m_vetoNuContainer { this, "VetoNuContainer", "VetoNuWaveformHits", "VetoNu hit container name" }; + SG::ReadHandleKey<xAOD::WaveformHitContainer> m_vetoContainer { this, "VetoContainer", "VetoWaveformHits", "Veto hit container name" }; + SG::ReadHandleKey<xAOD::WaveformHitContainer> m_triggerContainer { this, "TriggerContainer", "TriggerWaveformHits", "Trigger hit container name" }; + SG::ReadHandleKey<xAOD::WaveformHitContainer> m_preshowerContainer { this, "PreshowerContainer", "PreshowerWaveformHits", "Preshower hit container name" }; + SG::ReadHandleKey<xAOD::WaveformHitContainer> m_ecalContainer { this, "EcalContainer", "CaloWaveformHits", "Ecal hit container name" }; + SG::ReadHandleKey<Tracker::FaserSCT_ClusterContainer> m_clusterContainer { this, "ClusterContainer", "SCT_ClusterContainer", "Tracker cluster container name" }; + SG::ReadHandleKey<FaserSCT_SpacePointContainer> m_spacePointContainerKey { this, "SpacePoints", "SCT_SpacePointContainer", "space point container"}; + + SG::ReadHandleKey<xAOD::FaserTriggerData> m_FaserTriggerData { this, "FaserTriggerDataKey", "FaserTriggerData", "ReadHandleKey for xAOD::FaserTriggerData"}; + SG::ReadHandleKey<xAOD::WaveformClock> m_ClockWaveformContainer { this, "WaveformClockKey", "WaveformClock", "ReadHandleKey for ClockWaveforms Container"}; + ToolHandle<IFaserActsExtrapolationTool> m_extrapolationTool { this, "ExtrapolationTool", "FaserActsExtrapolationTool" }; + ToolHandle<IFaserActsTrackingGeometryTool> m_trackingGeometryTool {this, "TrackingGeometryTool", "FaserActsTrackingGeometryTool"}; + + const TrackerDD::SCT_DetectorManager* m_detMgr {nullptr}; + + const FaserSCT_ID* m_sctHelper; + const VetoNuID* m_vetoNuHelper; + const VetoID* m_vetoHelper; + const TriggerID* m_triggerHelper; + const PreshowerID* m_preshowerHelper; + const EcalID* m_ecalHelper; + + StringProperty m_CaloConfig { this, "CaloConfig", "Low_gain", "Configuration found at http://aagaard.web.cern.ch/aagaard/FASERruns.html (spaces replaced with '_')" }; + BooleanProperty m_doBlinding { this, "DoBlinding", true, "Blinding will not output events with Calo signal > 10 GeV e-" }; + BooleanProperty m_useFlukaWeights { this, "UseFlukaWeights", false, "Flag to weight events according to value stored in HepMC::GenEvent" }; + BooleanProperty m_useGenieWeights { this, "UseGenieWeights", false, "Flag to weight events according to Genie luminosity" }; + IntegerProperty m_flukaCollisions { this, "FlukaCollisions", 137130000, "Number of proton-proton collisions in FLUKA sample." }; + DoubleProperty m_flukaCrossSection { this, "FlukaCrossSection", 80.0, "Fluka p-p inelastic cross-section in millibarns." }; + DoubleProperty m_genieLuminosity { this, "GenieLuminosity", 150.0, "Genie luminosity in inverse fb." }; + + double m_baseEventCrossSection {1.0}; + const double kfemtoBarnsPerMilliBarn {1.0e12}; + + mutable TTree* m_tree; + + mutable TH1* m_HistRandomCharge[15]; + + mutable unsigned int m_run_number; + mutable unsigned int m_event_number; + mutable unsigned int m_event_time; + mutable unsigned int m_bcid; + + mutable unsigned int m_tbp; + mutable unsigned int m_tap; + mutable unsigned int m_inputBits; + mutable unsigned int m_inputBitsNext; + + mutable float m_wave_localtime[15]; + mutable float m_wave_peak[15]; + mutable float m_wave_width[15]; + mutable float m_wave_charge[15]; + + mutable float m_wave_raw_peak[15]; + mutable float m_wave_raw_charge[15]; + mutable float m_wave_baseline_mean[15]; + mutable float m_wave_baseline_rms[15]; + mutable unsigned int m_wave_status[15]; + + mutable float m_calo_total; + mutable float m_calo_rawtotal; + + mutable float m_Calo0_Edep; + mutable float m_Calo1_Edep; + mutable float m_Calo2_Edep; + mutable float m_Calo3_Edep; + mutable float m_Calo_Total_Edep; + mutable float m_Preshower12_Edep; + mutable float m_Preshower13_Edep; + + mutable float m_MIP_sim_Edep_calo; + mutable float m_MIP_sim_Edep_preshower; + + mutable float m_clock_phase; + + mutable unsigned int m_station0Clusters; + mutable unsigned int m_station1Clusters; + mutable unsigned int m_station2Clusters; + mutable unsigned int m_station3Clusters; + + mutable unsigned int m_nspacepoints; + mutable std::vector<double> m_spacepointX; + mutable std::vector<double> m_spacepointY; + mutable std::vector<double> m_spacepointZ; + + mutable unsigned int m_ntracksegs; + mutable std::vector<double> m_trackseg_Chi2; + mutable std::vector<double> m_trackseg_DoF; + mutable std::vector<double> m_trackseg_x; + mutable std::vector<double> m_trackseg_y; + mutable std::vector<double> m_trackseg_z; + mutable std::vector<double> m_trackseg_px; + mutable std::vector<double> m_trackseg_py; + mutable std::vector<double> m_trackseg_pz; + + mutable int m_longTracks; + mutable std::vector<double> m_Chi2; + mutable std::vector<double> m_DoF; + mutable std::vector<double> m_xup; + mutable std::vector<double> m_yup; + mutable std::vector<double> m_zup; + mutable std::vector<double> m_pxup; + mutable std::vector<double> m_pyup; + mutable std::vector<double> m_pzup; + mutable std::vector<double> m_pup; + mutable std::vector<double> m_xdown; + mutable std::vector<double> m_ydown; + mutable std::vector<double> m_zdown; + mutable std::vector<double> m_pxdown; + mutable std::vector<double> m_pydown; + mutable std::vector<double> m_pzdown; + mutable std::vector<double> m_pdown; + mutable std::vector<int> m_charge; + mutable std::vector<unsigned int> m_nLayers; + mutable std::vector<unsigned int> m_nHit0; + mutable std::vector<unsigned int> m_nHit1; + mutable std::vector<unsigned int> m_nHit2; + mutable std::vector<unsigned int> m_nHit3; + mutable std::vector<double> m_xVetoNu; + mutable std::vector<double> m_yVetoNu; + mutable std::vector<double> m_thetaxVetoNu; + mutable std::vector<double> m_thetayVetoNu; + mutable std::vector<double> m_xVetoStation1; + mutable std::vector<double> m_yVetoStation1; + mutable std::vector<double> m_thetaxVetoStation1; + mutable std::vector<double> m_thetayVetoStation1; + mutable std::vector<double> m_xVetoStation2; + mutable std::vector<double> m_yVetoStation2; + mutable std::vector<double> m_thetaxVetoStation2; + mutable std::vector<double> m_thetayVetoStation2; + mutable std::vector<double> m_xTrig; + mutable std::vector<double> m_yTrig; + mutable std::vector<double> m_thetaxTrig; + mutable std::vector<double> m_thetayTrig; + mutable std::vector<double> m_xPreshower1; + mutable std::vector<double> m_yPreshower1; + mutable std::vector<double> m_thetaxPreshower1; + mutable std::vector<double> m_thetayPreshower1; + mutable std::vector<double> m_xPreshower2; + mutable std::vector<double> m_yPreshower2; + mutable std::vector<double> m_thetaxPreshower2; + mutable std::vector<double> m_thetayPreshower2; + mutable std::vector<double> m_xCalo; + mutable std::vector<double> m_yCalo; + mutable std::vector<double> m_thetaxCalo; + mutable std::vector<double> m_thetayCalo; + + mutable double m_truthLeptonMomentum; + mutable int m_truthBarcode; + mutable int m_truthPdg; + mutable double m_crossSection; + +}; + +inline const ServiceHandle <ITHistSvc> &NtupleDumperAlg::histSvc() const { + return m_histSvc; +} + +#endif // NTUPLEDUMPER_NTUPLEDUMPERALG_H diff --git a/PhysicsAnalysis/NtupleDumper/src/component/NtupleDumper_entries.cxx b/PhysicsAnalysis/NtupleDumper/src/component/NtupleDumper_entries.cxx new file mode 100644 index 0000000000000000000000000000000000000000..15dc244e170829e01af0d848e3cac55abbc83619 --- /dev/null +++ b/PhysicsAnalysis/NtupleDumper/src/component/NtupleDumper_entries.cxx @@ -0,0 +1,3 @@ +#include "../NtupleDumperAlg.h" + +DECLARE_COMPONENT(NtupleDumperAlg) diff --git a/README.md b/README.md index 2d7e57abb75b58112939efb7b2726607bcfb6ec3..bcd917f069ec308e9657f013a9398dba24285145 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,8 @@ When compiling, CERN IT recommends using condor to submit batch jobs. The basics ** `ConfigFlags.GeoModel.FaserVersion = "FASERNU-03"` and `ConfigFlags.IOVDb.GlobalTag = OFLCOND-FASER-02` enables the full FaserNu (IFT + emulsion) setup +** `ConfigFlags.GeoModel.FaserVersion = "FASERNU-03"` and `ConfigFlags.IOVDb.GlobalTag = OFLCOND-FASER-03` enables the full FaserNu (IFT + emulsion) setup with updated (10Nov22) magnetic field map + ** `ConfigFlags.GeoModel.FaserVersion = "FASER-TB00"` and `ConfigFlags.IOVDb.GlobalTag = OFLCOND-FASER-TB00` enables the 2021 Test-beam setup. * The command `source /cvmfs/sft.cern.ch/lcg/releases/LCG_101_ATLAS_6/sqlite/3320300/x86_64-centos7-gcc11-opt/sqlite-env.sh` may be necessary to avoid errors when generating a database diff --git a/Simulation/G4Faser/G4FaserAlg/CMakeLists.txt b/Simulation/G4Faser/G4FaserAlg/CMakeLists.txt index 35042d814e913677a0ddf4d643e792a75b362f2d..979897a94c4b476433d0ef3fe2ceeb31105f9ef5 100644 --- a/Simulation/G4Faser/G4FaserAlg/CMakeLists.txt +++ b/Simulation/G4Faser/G4FaserAlg/CMakeLists.txt @@ -36,6 +36,11 @@ atlas_add_test( G4FaserAlgConfig_TestFaserNu PROPERTIES TIMEOUT 300 PROPERTIES WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +atlas_add_test( G4FaserAlgConfig_TestFaserNu_NewField + SCRIPT python ${CMAKE_CURRENT_SOURCE_DIR}/test/G4FaserAlgConfigNew_Test.py GeoModel.FaserVersion="'FASERNU-03'" IOVDb.GlobalTag="'OFLCOND-FASER-03'" Output.HITSFileName='faserNu.HITS.pool.root' + PROPERTIES TIMEOUT 300 + PROPERTIES WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) + atlas_add_test( G4FaserAlgConfig_TestTestbeam SCRIPT python ${CMAKE_CURRENT_SOURCE_DIR}/test/G4FaserAlgConfigNew_Test.py GeoModel.FaserVersion="'FASER-TB00'" IOVDb.GlobalTag="'OFLCOND-FASER-TB00'" Output.HITSFileName='tb.HITS.pool.root' PROPERTIES TIMEOUT 300 diff --git a/Simulation/G4Faser/G4FaserAlg/test/G4FaserAlgConfigNew_Test.py b/Simulation/G4Faser/G4FaserAlg/test/G4FaserAlgConfigNew_Test.py index 998b40c286837a3436e6fc714f4977b355ccdd9d..d7ea3c3ab921b57e3df3c5c4be56c102f873c8f7 100755 --- a/Simulation/G4Faser/G4FaserAlg/test/G4FaserAlgConfigNew_Test.py +++ b/Simulation/G4Faser/G4FaserAlg/test/G4FaserAlgConfigNew_Test.py @@ -49,7 +49,7 @@ if __name__ == '__main__': ConfigFlags.addFlag("Sim.Beam.yshift", 0) ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" # Geometry set-up - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Conditions set-up + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" # Conditions set-up ConfigFlags.addFlag("Input.InitialTimeStamp", 0) # To avoid autoconfig ConfigFlags.GeoModel.Align.Dynamic = False # diff --git a/Simulation/G4Faser/G4FaserAlg/test/runGeantinoScan.py b/Simulation/G4Faser/G4FaserAlg/test/runGeantinoScan.py index 7e0a93719a520de7bb819fe270f8666a00c42f5c..46021eb4bc7a37eefbcdfd0b14ab72349c563364 100644 --- a/Simulation/G4Faser/G4FaserAlg/test/runGeantinoScan.py +++ b/Simulation/G4Faser/G4FaserAlg/test/runGeantinoScan.py @@ -19,41 +19,34 @@ if __name__ == "__main__": from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg from FaserGeoModel.FaserGeoModelConfig import FaserGeometryCfg from G4FaserAlg.G4FaserAlgConfigNew import G4FaserAlgCfg - from G4FaserAlg.G4FaserAlgConfigNew import G4FaserMaterialStepRecorderAlgCfg from G4FaserServices.G4FaserServicesConfigNew import G4GeometryNotifierSvcCfg + from G4FaserServices.G4FaserUserActionConfigNew import UserActionMaterialStepRecorderSvcCfg # # Set up logging and new style config # log.setLevel(VERBOSE) Configurable.configurableRun3Behavior = True -# -# Input settings (Generator file) -# -# from AthenaConfiguration.TestDefaults import defaultTestFiles -# ConfigFlags.Input.Files = defaultTestFiles.EVNT -# -# Alternatively, these must ALL be explicitly set to run without an input file -# (if missing, it will try to read metadata from a non-existent file and crash) -# + from AthenaConfiguration.Enums import ProductionStep + ConfigFlags.Common.ProductionStep = ProductionStep.Simulation + ConfigFlags.Sim.ReleaseGeoModel = False ConfigFlags.Input.Files = [""] ConfigFlags.Input.isMC = True - ConfigFlags.Input.RunNumber = 12345 - ConfigFlags.Input.Collections = [""] - ConfigFlags.Input.ProjectName = "mc19" + ConfigFlags.Input.RunNumber = [12345] + ConfigFlags.Input.OverrideRunNumber = True + ConfigFlags.Input.LumiBlockNumber = [1] + Configurable.configurableRun3Behavior = 1 ConfigFlags.Common.isOnline = False ConfigFlags.Beam.Type = "collisions" ConfigFlags.Beam.Energy = 7*TeV # Informational, does not affect simulation - ConfigFlags.GeoModel.FaserVersion = "FASER-01" # Always needed - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-01" # Always needed; must match FaserVersion -# Workaround for bug/missing flag; unimportant otherwise + ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" # Always needed + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Always needed; must match FaserVersion ConfigFlags.addFlag("Input.InitialTimeStamp", 0) -# Workaround to avoid problematic ISF code - ConfigFlags.GeoModel.Layout = "Development" # # Output settings # ConfigFlags.Output.HITSFileName = "MaterialStepCollection.root" ConfigFlags.GeoModel.GeoExportFile = "faserGeo.db" # Optional dump of geometry for browsing in vp1light + ConfigFlags.GeoModel.Align.Dynamic = False # # Geometry-related settings # Do not change! @@ -61,8 +54,6 @@ if __name__ == "__main__": detectors = ['Veto', 'Trigger', 'Preshower', 'FaserSCT', 'Dipole', 'Ecal'] from CalypsoConfiguration.DetectorConfigFlags import setupDetectorsFromList setupDetectorsFromList(ConfigFlags, detectors, toggle_geometry=True) - ConfigFlags.GeoModel.Align.Dynamic = False - ConfigFlags.Sim.ReleaseGeoModel = False # # All flags should be set before calling lock # @@ -79,9 +70,9 @@ if __name__ == "__main__": pg.McEventKey = "BeamTruthEvent" pg.randomSeed = 123456 pg.sampler.pid = 999 - pg.sampler.mom = PG.EThetaMPhiSampler(energy=1*TeV, theta=[0, pi/20], phi=[0, 2*pi]) - pg.sampler.pos = PG.PosSampler(x=[-5, 5], y=[-5, 5], z=-2100.0, t=0.0) - acc.addEventAlgo(pg, "AthBeginSeq") # to run *before* G4 + pg.sampler.mom = PG.EThetaMPhiSampler(energy=1*TeV, theta=[0, pi/200], phi=[0, 2*pi]) + pg.sampler.pos = PG.PosSampler(x=[-150, 150], y=[-150, 150], z=-2100.0, t=0.0) + acc.addEventAlgo(pg, "AthBeginSeq", primary = True) # to run *before* G4 # # Only one of these two should be used in a given job # (MCEventSelectorCfg for generating events with no input file, @@ -107,7 +98,8 @@ if __name__ == "__main__": # Here is the configuration of the Geant4 pieces # acc.merge(FaserGeometryCfg(ConfigFlags)) - acc.merge(G4FaserMaterialStepRecorderAlgCfg(ConfigFlags)) + acc.merge(UserActionMaterialStepRecorderSvcCfg(ConfigFlags)) + acc.merge(G4FaserAlgCfg(ConfigFlags)) acc.addService(G4GeometryNotifierSvcCfg(ConfigFlags, ActivateLVNotifier=True)) # # Verbosity @@ -125,4 +117,4 @@ if __name__ == "__main__": # # Execute and finish # - sys.exit(int(acc.run(maxEvents=20000).isFailure())) + sys.exit(int(acc.run(maxEvents=1000000).isFailure())) diff --git a/Tracker/TrackerConditions/FaserSCT_ConditionsData/README.md b/Tracker/TrackerConditions/FaserSCT_ConditionsData/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8025a4b1d3244a86163371a4fae298a2108d47ef --- /dev/null +++ b/Tracker/TrackerConditions/FaserSCT_ConditionsData/README.md @@ -0,0 +1,33 @@ +# FaserSCT_ConditionsData + +For more information on conditions in general, see [[https://twiki.cern.ch/twiki/bin/view/FASER/OfflineDatabases][OfflineDatabases]]. + +## Creating databases + +The scripts +``` +data/SCT_Conditions.py +data/BField_DataConditions.py +``` +were used to create the intial databases for MC (OFLP200) and data (CONDBR3). + +## Updating field map + +The following was used to update the magnetic field map on 10 Nov. 22: +``` +cp /cvmfs/faser.cern.ch/repo/sw/database/DBRelease/current/sqlite200/ALLP200.db . + +# This creates new field and scale entries with tag OFLCOND-FASER-03 in mysqlite.db +BField_Conditions_Update03.py + +# Now we need to merge this into the existing DB +AtlCoolCopy "sqlite://;schema=mysqlite.db;dbname=OFLP200" "sqlite://;schema=ALLP200.db;dbname=OFLP200" +AtlCoolCopy "sqlite://;schema=mysqlite.db;dbname=CONDBR3" "sqlite://;schema=ALLP200.db;dbname=CONDBR3" + +# For the MC instance, we also need to associate the existing /SCT and /Tracker tags +AtlCoolConsole.py "sqlite://;schema=ALLP200.db;dbname=OFLP200" +settag /SCT SCT-02 OFLCOND-FASER-03 +settag /Tracker TRACKER-02 OFLCOND-FASER-03 +``` + +After installing the new tags in ALLP200.db, we can test this locally by copying it to the data/sqlite200 subdirectory of the run directory, and eventually install it on cvmfs. diff --git a/Tracker/TrackerConditions/FaserSCT_ConditionsData/data/BField_Conditions_Update03.py b/Tracker/TrackerConditions/FaserSCT_ConditionsData/data/BField_Conditions_Update03.py new file mode 100755 index 0000000000000000000000000000000000000000..8a99a5fba6709ab66c42587507657cb67802030c --- /dev/null +++ b/Tracker/TrackerConditions/FaserSCT_ConditionsData/data/BField_Conditions_Update03.py @@ -0,0 +1,158 @@ +#!/bin/env python + +# This script was used to add a new tag for the BField to both +# the real data (CONDBR3) and MC (OFLP200) instances. +# This writes out to mysqlite.db which then needs to be merged +# with the production database using: +# AtlCoolCopy "sqlite://;schema=mysqlite.db;dbname=CONDBR3" "sqlite://;schema=ALLP200.db;dbname=CONDBR3" + +description = '<timeStamp>run-lumi</timeStamp><addrHeader><address_header clid="1238547719" service_type="71" /></addrHeader><typeName>CondAttrListCollection</typeName>' + +descriptionDCS = '<timeStamp>time</timeStamp><addrHeader><address_header service_type="71" clid="1238547719" /></addrHeader><typeName>CondAttrListCollection</typeName><cache>600</cache>' + +descriptionAlign = '<timeStamp>run-lumi</timeStamp><addrHeader><address_header service_type="256" clid="1170039409" /></addrHeader><typeName>AlignableTransformContainer</typeName>' + +import sys +from PyCool import cool, coral +from CoolConvUtilities.AtlCoolLib import forceOpen + +print('generating new field database') + +dbSvc = cool.DatabaseSvcFactory.databaseService() +connectString = 'sqlite://;schema=mysqlite.db;dbname=CONDBR3' + +# This should open or create as needed +try: + print(f'forceOpen({connectString})') + db = forceOpen( connectString ) +except Exception as e: + print(e) + print('Problem opening DB!') + sys.exit(1) + +# Hierarchy of new tag is: +# OFLCOND-FASER-02 : GLOBAL-03 : GLOBAL-BField-03 : GLOBAL-BField-Maps-03 + +# Create new tag +glob = db.createFolderSet("/GLOBAL") +glob_bfield = db.createFolderSet("/GLOBAL/BField") + +glob_bfield.createTagRelation("GLOBAL-03", "GLOBAL-BField-03") +glob.createTagRelation("OFLCOND-FASER-03", "GLOBAL-03") + +print("Created tag GLOBAL-BField-03 and associated to OFLCOND-FASER-03") + +mapSpec = cool.RecordSpecification() +mapSpec.extend( 'FieldType', cool.StorageType.String4k ) +mapSpec.extend( 'MapFileName', cool.StorageType.String4k ) + +mapFolderSpec = cool.FolderSpecification(cool.FolderVersioning.MULTI_VERSION, mapSpec) +mapFolder = db.createFolder('/GLOBAL/BField/Maps', mapFolderSpec, descriptionDCS, True ) + +# New entry +mapRecord = cool.Record(mapSpec) +mapRecord['FieldType'] = "GlobalMap" +mapRecord['MapFileName'] = "file:MagneticFieldMaps/FaserFieldTable_v2.root" + +mapFolder.storeObject( cool.ValidityKeyMin, cool.ValidityKeyMax, mapRecord, 1, "GLOBAL-BField-Maps-03", True ) +mapFolder.createTagRelation("GLOBAL-BField-03", "GLOBAL-BField-Maps-03") + +print("Created new entry in /GLOBAL/BField/Maps with tag GLOBAL-BField-Maps-03") + +# Also update the scale (since it sits in the same tag area) +scaleSpec = cool.RecordSpecification() +scaleSpec.extend( 'value', cool.StorageType.Float ) + +scaleRecord = cool.Record(scaleSpec) +scaleRecord['value'] = 1.0 + +scaleFolderSpec = cool.FolderSpecification(cool.FolderVersioning.MULTI_VERSION, scaleSpec) +scaleFolder = db.createFolder('/GLOBAL/BField/Scales', scaleFolderSpec, descriptionDCS, True ) + +# Channel names don't seem to be handled properly by Athena +scaleFolder.createChannel( 1, "Dipole_Scale" ) + +scaleFolder.storeObject( cool.ValidityKeyMin, cool.ValidityKeyMax, scaleRecord, 1, "GLOBAL-BField-Scale-03", True ) +scaleFolder.createTagRelation("GLOBAL-BField-03", "GLOBAL-BField-Scale-03") + +print("Created new entry in /GLOBAL/BField/Maps with tag GLOBAL-BField-Scale-03") + +# These are the only tags in the CONDBR3 instance, so we are done + +db.closeDatabase() + +# Now do it again for the MC instance + +connectString = 'sqlite://;schema=mysqlite.db;dbname=OFLP200' + +# This should open or create as needed +try: + print(f'forceOpen({connectString})') + db = forceOpen( connectString ) +except Exception as e: + print(e) + print('Problem opening DB!') + sys.exit(1) + +# Create new tag +glob = db.createFolderSet("/GLOBAL") +glob_bfield = db.createFolderSet("/GLOBAL/BField") + +glob_bfield.createTagRelation("GLOBAL-03", "GLOBAL-BField-03") +glob.createTagRelation("OFLCOND-FASER-03", "GLOBAL-03") + +print("Created tag GLOBAL-BField-03 and associated to OFLCOND-FASER-03") + +mapSpec = cool.RecordSpecification() +mapSpec.extend( 'FieldType', cool.StorageType.String4k ) +mapSpec.extend( 'MapFileName', cool.StorageType.String4k ) + +mapFolderSpec = cool.FolderSpecification(cool.FolderVersioning.MULTI_VERSION, mapSpec) +mapFolder = db.createFolder('/GLOBAL/BField/Maps', mapFolderSpec, descriptionDCS, True ) + +# New entry +mapRecord = cool.Record(mapSpec) +mapRecord['FieldType'] = "GlobalMap" +mapRecord['MapFileName'] = "file:MagneticFieldMaps/FaserFieldTable_v2.root" + +mapFolder.storeObject( cool.ValidityKeyMin, cool.ValidityKeyMax, mapRecord, 1, "GLOBAL-BField-Maps-03", True ) +mapFolder.createTagRelation("GLOBAL-BField-03", "GLOBAL-BField-Maps-03") + +print("Created new entry in /GLOBAL/BField/Maps with tag GLOBAL-BField-Maps-03") + +# Also update the scale (since it sits in the same tag area) +scaleSpec = cool.RecordSpecification() +scaleSpec.extend( 'value', cool.StorageType.Float ) + +scaleRecord = cool.Record(scaleSpec) +scaleRecord['value'] = 1.0 + +scaleFolderSpec = cool.FolderSpecification(cool.FolderVersioning.MULTI_VERSION, scaleSpec) +scaleFolder = db.createFolder('/GLOBAL/BField/Scales', scaleFolderSpec, descriptionDCS, True ) + +# Channel names don't seem to be handled properly by Athena +scaleFolder.createChannel( 1, "Dipole_Scale" ) + +scaleFolder.storeObject( cool.ValidityKeyMin, cool.ValidityKeyMax, scaleRecord, 1, "GLOBAL-BField-Scale-03", True ) +scaleFolder.createTagRelation("GLOBAL-BField-03", "GLOBAL-BField-Scale-03") + +print("Created new entry in /GLOBAL/BField/Maps with tag GLOBAL-BField-Scale-03") + +# Also make associations to /SCT and /TRACKER tags +#sct = db.createFolderSet("/SCT") +#sct.createTagRelation("OFLCOND-FASER-03", "SCT-02") +#tracker = db.createFolderSet("/Tracker") +#tracker.createTagRelation("OFLCOND-FASER-03", "TRACKER-02") +#print("Associated old tags for /SCT and /Tracker to OFLCOND-FASER-03") + +# This doesn't work. +# Instead, we need to go into AtlCoolConsole.py and +# set the association there. +# +# settag /SCT SCT-02 OFLCOND-FASER-03 +# settag /Tracker TRACKER-02 OFLCOND-FASER-03 +# +# Check that this worked: +# tracetags / OFLCOND-FASER-03 + +db.closeDatabase() diff --git a/Tracker/TrackerConditions/FaserSCT_ConditionsData/data/BField_DataConditions.py b/Tracker/TrackerConditions/FaserSCT_ConditionsData/data/BField_DataConditions.py index 94846ee80dedb41dbc2354a6662873203aad9cb6..9fb21099b9dbeeb97f8632dc5a9e64919e40e3a1 100755 --- a/Tracker/TrackerConditions/FaserSCT_ConditionsData/data/BField_DataConditions.py +++ b/Tracker/TrackerConditions/FaserSCT_ConditionsData/data/BField_DataConditions.py @@ -3,6 +3,10 @@ # Use this to add a field map to the CONDBR3 database for real data # Copied the result from the OFLP200 DB # Note that the testbeam turns off the field by setting scale = 0 + +# This file creates the DB from scratch +# To add new tags, look at BField_Conditions_Update03.py + description = '<timeStamp>run-lumi</timeStamp><addrHeader><address_header clid="1238547719" service_type="71" /></addrHeader><typeName>CondAttrListCollection</typeName>' descriptionDCS = '<timeStamp>time</timeStamp><addrHeader><address_header service_type="71" clid="1238547719" /></addrHeader><typeName>CondAttrListCollection</typeName><cache>600</cache>' @@ -11,7 +15,7 @@ descriptionAlign = '<timeStamp>run-lumi</timeStamp><addrHeader><address_header s import sys from PyCool import cool, coral -from CoolConvUtilities.AtlCoolLib import indirectOpen +from CoolConvUtilities.AtlCoolLib import forceOpen dbSvc = cool.DatabaseSvcFactory.databaseService() connectString = 'sqlite://;schema=ALLP200.db;dbname=CONDBR3' @@ -20,8 +24,9 @@ print('generating field database') #dbSvc.dropDatabase( connectString ) try: # Open existing instead? - print('Try indirectOpen') - db = indirectOpen( connectString, readOnly=False ) + # This actually should open or create as needed + print(f'Try forceOpen({connectString})') + db = forceOpen( connectString ) except Exception as e: print(e) print('Problem opening DB, create instead') @@ -36,6 +41,9 @@ glob.createTagRelation("OFLCOND-FASER-01", "GLOBAL-01") glob_bfield.createTagRelation("GLOBAL-02", "GLOBAL-BField-02") glob.createTagRelation("OFLCOND-FASER-02", "GLOBAL-02") +glob_bfield.createTagRelation("GLOBAL-03", "GLOBAL-BField-03") +glob.createTagRelation("OFLCOND-FASER-03", "GLOBAL-03") + glob_bfield.createTagRelation("GLOBAL-TB00", "GLOBAL-BField-TB00") glob.createTagRelation("OFLCOND-FASER-TB00", "GLOBAL-TB00") @@ -43,13 +51,13 @@ mapSpec = cool.RecordSpecification() mapSpec.extend( 'FieldType', cool.StorageType.String4k ) mapSpec.extend( 'MapFileName', cool.StorageType.String4k ) -mapRecord = cool.Record(mapSpec) -mapRecord['FieldType'] = "GlobalMap" -mapRecord['MapFileName'] = "file:MagneticFieldMaps/FaserFieldTable.root" - mapFolderSpec = cool.FolderSpecification(cool.FolderVersioning.MULTI_VERSION, mapSpec) mapFolder = db.createFolder('/GLOBAL/BField/Maps', mapFolderSpec, descriptionDCS, True ) +mapRecord = cool.Record(mapSpec) +mapRecord['FieldType'] = "GlobalMap" +mapRecord['MapFileName'] = "file:MagneticFieldMaps/FaserFieldTable_v101.root" + mapFolder.storeObject( cool.ValidityKeyMin, cool.ValidityKeyMax, mapRecord, 1, "GLOBAL-BField-Maps-01", True ) mapFolder.createTagRelation("GLOBAL-BField-01", "GLOBAL-BField-Maps-01") @@ -59,6 +67,12 @@ mapFolder.createTagRelation("GLOBAL-BField-02", "GLOBAL-BField-Maps-02") mapFolder.storeObject( cool.ValidityKeyMin, cool.ValidityKeyMax, mapRecord, 1, "GLOBAL-BField-Maps-TB00", True ) mapFolder.createTagRelation("GLOBAL-BField-TB00", "GLOBAL-BField-Maps-TB00") +# New record +mapRecord['MapFileName'] = "file:MagneticFieldMaps/FaserFieldTable_v2.root" + +mapFolder.storeObject( cool.ValidityKeyMin, cool.ValidityKeyMax, mapRecord, 1, "GLOBAL-BField-Maps-03", True ) +mapFolder.createTagRelation("GLOBAL-BField-03", "GLOBAL-BField-Maps-03") + scaleSpec = cool.RecordSpecification() scaleSpec.extend( 'value', cool.StorageType.Float ) diff --git a/Tracker/TrackerEventCnv/TrackerEventTPCnv/src/FaserSCT_SpacePointCnv_p0.cxx b/Tracker/TrackerEventCnv/TrackerEventTPCnv/src/FaserSCT_SpacePointCnv_p0.cxx index b6cbc9da2508bd734634af3702165b262885ceff..6dc7c62a91fb18c44e701175173d3df82b4c02b0 100644 --- a/Tracker/TrackerEventCnv/TrackerEventTPCnv/src/FaserSCT_SpacePointCnv_p0.cxx +++ b/Tracker/TrackerEventCnv/TrackerEventTPCnv/src/FaserSCT_SpacePointCnv_p0.cxx @@ -15,7 +15,7 @@ StatusCode FaserSCT_SpacePointCnv_p0::initialize(MsgStream& log ) { // ISvcLocator* svcLocator = Gaudi::svcLocator(); // Get the messaging service, print where you are - log << MSG::INFO << "FaserSCT_SpacePointCnv::initialize()" << endmsg; + log << MSG::DEBUG << "FaserSCT_SpacePointCnv::initialize()" << endmsg; if(m_sctClusContName.initialize()!=StatusCode::SUCCESS) log << MSG::WARNING<< "FaserSCT_SpacePointCnv failed to initialize the sct cluster container" << endmsg; diff --git a/Tracker/TrackerRecAlgs/NoisyStripFinder/share/NoisyStripFinderJob.py b/Tracker/TrackerRecAlgs/NoisyStripFinder/share/NoisyStripFinderJob.py index b6dc5c514d05c558310bf1108e505deca1e8098c..e8c304455b27156d4c0b8983b9c04cd61367b2ce 100755 --- a/Tracker/TrackerRecAlgs/NoisyStripFinder/share/NoisyStripFinderJob.py +++ b/Tracker/TrackerRecAlgs/NoisyStripFinder/share/NoisyStripFinderJob.py @@ -39,7 +39,7 @@ for filename in args.file: filelist.append(filename) ConfigFlags.Input.Files = args.file -ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" +ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" #ConfigFlags.IOVDb.DatabaseInstance = "OFLP200" ConfigFlags.IOVDb.DatabaseInstance = "CONDBR3" ConfigFlags.Input.ProjectName = "data22" diff --git a/Tracker/TrackerRecAlgs/NoisyStripFinder/test/NoisyStripFinderDbg.py b/Tracker/TrackerRecAlgs/NoisyStripFinder/test/NoisyStripFinderDbg.py index f1d2ef1434110fa4a87a4567055e8f3d112f30c0..8b31e1b3db8f98e4cf8a535a7ca5f839fb142799 100755 --- a/Tracker/TrackerRecAlgs/NoisyStripFinder/test/NoisyStripFinderDbg.py +++ b/Tracker/TrackerRecAlgs/NoisyStripFinder/test/NoisyStripFinderDbg.py @@ -24,7 +24,7 @@ log.setLevel(DEBUG) Configurable.configurableRun3Behavior = True ConfigFlags.Input.Files = args.file -ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" +ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" ConfigFlags.IOVDb.DatabaseInstance = "OFLP200" ConfigFlags.Input.ProjectName = "data21" ConfigFlags.Input.isMC = False diff --git a/Tracker/TrackerRecAlgs/TrackerData/test/TI12TrackerSegmentFitDataDbg.py b/Tracker/TrackerRecAlgs/TrackerData/test/TI12TrackerSegmentFitDataDbg.py index b20d313f99654d07487651c645616b93a266789f..e69cdca114c30d2d72f0c27fbeed21746524e51b 100644 --- a/Tracker/TrackerRecAlgs/TrackerData/test/TI12TrackerSegmentFitDataDbg.py +++ b/Tracker/TrackerRecAlgs/TrackerData/test/TI12TrackerSegmentFitDataDbg.py @@ -22,7 +22,7 @@ Configurable.configurableRun3Behavior = True ConfigFlags.Input.Files = ['/home/tboeckh/tmp/Faser-Physics-006470-00093.raw_middleStation.SPs'] ConfigFlags.Output.ESDFileName = "TrackerSegmentFitData.ESD.pool.root" ConfigFlags.addFlag("Output.xAODFileName", f"TrackerSegmentFitData_xAOD.root") -ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" +ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" ConfigFlags.IOVDb.DatabaseInstance = "OFLP200" ConfigFlags.Input.ProjectName = "data21" ConfigFlags.Input.isMC = False diff --git a/Tracker/TrackerRecAlgs/TrackerSegmentFit/test/TrackerSegmentFitDbg.py b/Tracker/TrackerRecAlgs/TrackerSegmentFit/test/TrackerSegmentFitDbg.py index 75c46defbbb45d3c815f9d11e0091c83836a6e1e..7ca10a8b71dd3e80e0c705a09299c84537305876 100644 --- a/Tracker/TrackerRecAlgs/TrackerSegmentFit/test/TrackerSegmentFitDbg.py +++ b/Tracker/TrackerRecAlgs/TrackerSegmentFit/test/TrackerSegmentFitDbg.py @@ -29,7 +29,7 @@ ConfigFlags.Input.Files = [ '/eos/project-f/faser-commissioning/TI12Data/Run-005684/Faser-Physics-005684-00000.raw', ] ConfigFlags.Output.ESDFileName = "run005684-00000.ESD.pool.root" -ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Always needed; must match FaserVersion +ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" # Always needed; must match FaserVersion ConfigFlags.IOVDb.DatabaseInstance = "OFLP200" # Use MC conditions for now ConfigFlags.Input.ProjectName = "data21" # Needed to bypass autoconfig ConfigFlags.Input.isMC = False # Needed to bypass autoconfig @@ -98,4 +98,4 @@ replicaSvc.UseGeomSQLite = True sc = acc.run(maxEvents=-1) # Success should be 0 -sys.exit(not sc.isSuccess()) \ No newline at end of file +sys.exit(not sc.isSuccess()) diff --git a/Tracking/Acts/FaserActsGeometry/FaserActsGeometry/FaserActsLayerBuilder.h b/Tracking/Acts/FaserActsGeometry/FaserActsGeometry/FaserActsLayerBuilder.h index 132fd1ca2069375eceb43531071b3ff5ebd2dbdf..52aa75240b5f548433a165b19ec837827c42107a 100644 --- a/Tracking/Acts/FaserActsGeometry/FaserActsGeometry/FaserActsLayerBuilder.h +++ b/Tracking/Acts/FaserActsGeometry/FaserActsGeometry/FaserActsLayerBuilder.h @@ -51,9 +51,10 @@ public: std::string configurationName = "undefined"; FaserActsDetectorElement::Subdetector subdetector = FaserActsDetectorElement::Subdetector::SCT; - const TrackerDD::SCT_DetectorManager* mng; - const GeoVDetectorManager* vetomng; - const GeoVDetectorManager* triggermng; + const TrackerDD::SCT_DetectorManager* mng=nullptr; + const GeoVDetectorManager* vetomng=nullptr; + const GeoVDetectorManager* triggermng=nullptr; + const GeoVDetectorManager* dipolemng=nullptr; std::shared_ptr<const Acts::LayerCreator> layerCreator = nullptr; std::shared_ptr<std::vector<std::shared_ptr<const FaserActsDetectorElement>>> elementStore; std::shared_ptr<std::map<Identifier, Acts::GeometryIdentifier>> identifierMap; @@ -109,15 +110,16 @@ public: private: - FaserActs::CuboidVolumeBuilder::VolumeConfig buildScintVolume(double , double , std::string ); + FaserActs::CuboidVolumeBuilder::VolumeConfig buildScintVolume(const Acts::GeometryContext& ,double, double, double ,double, double, double , std::string ); +FaserActs::CuboidVolumeBuilder::VolumeConfig buildDipoleVolume(const Acts::GeometryContext& ,double , double , double , double , std::string ); double m_ModuleWidth; double m_ModuleLength; /// configruation object Config m_cfg; - Acts::Vector3 m_worldDimensions = { 400.0_mm, 400.0_mm, 8000.0_mm }; + Acts::Vector3 m_worldDimensions = { 600.0_mm, 600.0_mm, 8000.0_mm }; Acts::Vector3 m_worldCenter = {0.0, 0.0, 0.0}; - Acts::Vector3 m_trackerDimensions = { 400.0_mm, 400.0_mm, 100.0_mm }; + Acts::Vector3 m_trackerDimensions = { 600.0_mm, 600.0_mm, 100.0_mm }; /// Private access to the logger const Acts::Logger& diff --git a/Tracking/Acts/FaserActsGeometry/python/FaserActsMaterialMapping_jobOptions.py b/Tracking/Acts/FaserActsGeometry/python/FaserActsMaterialMapping_jobOptions.py index 99e81e8cd5546cc7db45f34f27978989e0ec0fcd..9a7e4995a8b28b4139826f8274b20ed1911bcf86 100644 --- a/Tracking/Acts/FaserActsGeometry/python/FaserActsMaterialMapping_jobOptions.py +++ b/Tracking/Acts/FaserActsGeometry/python/FaserActsMaterialMapping_jobOptions.py @@ -26,10 +26,6 @@ from FaserGeoModel.FaserGeoModelConfig import FaserGeometryCfg def ActsMaterialMappingCfg(configFlags, name = "FaserActsMaterialMapping", **kwargs): result = ComponentAccumulator() - MaterialStepConverterTool = ActsMaterialStepConverterToolCfg() - kwargs["MaterialStepConverterTool"] = MaterialStepConverterTool.getPrimary() - result.merge(MaterialStepConverterTool) - ActsSurfaceMappingTool = ActsSurfaceMappingToolCfg(configFlags) kwargs["SurfaceMappingTool"] = ActsSurfaceMappingTool.getPrimary() result.merge(ActsSurfaceMappingTool) @@ -64,11 +60,10 @@ if "__main__" == __name__: ## Just enable ID for the moment. ConfigFlags.Input.isMC = True ConfigFlags.Beam.Type = "collisions" - ConfigFlags.GeoModel.FaserVersion = "FASER-01" - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-01" # Always needed; must match FaserVersion + ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" # Always needed; must match FaserVersion ConfigFlags.TrackingGeometry.MaterialSource = "geometry-maps.json" - ConfigFlags.Concurrency.NumThreads = 1 - ConfigFlags.Concurrency.NumConcurrentEvents = 1 + ConfigFlags.GeoModel.Align.Dynamic = False ConfigFlags.lock() ConfigFlags.dump() @@ -101,4 +96,4 @@ if "__main__" == __name__: log.info("CONFIG DONE") - cfg.run(80000) + cfg.run(-1) diff --git a/Tracking/Acts/FaserActsGeometry/python/FaserActsWriteTrackingGeometryConfig.py b/Tracking/Acts/FaserActsGeometry/python/FaserActsWriteTrackingGeometryConfig.py index c760be8845ae6a9afe61c262c0fb064fdb049aca..77e6921bfa63b17816a019964ec94ff80875db5c 100644 --- a/Tracking/Acts/FaserActsGeometry/python/FaserActsWriteTrackingGeometryConfig.py +++ b/Tracking/Acts/FaserActsGeometry/python/FaserActsWriteTrackingGeometryConfig.py @@ -31,7 +31,7 @@ def FaserActsWriteTrackingGeometryBasicCfg(flags, **kwargs): actsWriteTrackingGeometry.ObjWriterTool=FaserActsObjWriterTool("FaserActsObjWriterTool",OutputDirectory="./", SubDetectors=["Station_0","Station_1","Station_2","Station_3"]) actsWriteTrackingGeometry.MaterialJsonWriterTool= FaserActsMaterialJsonWriterTool(OutputFile = "geometry-maps.json", processSensitives = False, - processnonmaterial = True) + processnonmaterial = False) acc.addEventAlgo(actsWriteTrackingGeometry) return acc diff --git a/Tracking/Acts/FaserActsGeometry/src/CuboidVolumeBuilder.cxx b/Tracking/Acts/FaserActsGeometry/src/CuboidVolumeBuilder.cxx index df96a311c852ff26268bb992d211679851a7fba6..e382d4cd31a023ef9a9fb4bb8afa744920bb500e 100644 --- a/Tracking/Acts/FaserActsGeometry/src/CuboidVolumeBuilder.cxx +++ b/Tracking/Acts/FaserActsGeometry/src/CuboidVolumeBuilder.cxx @@ -138,8 +138,6 @@ std::shared_ptr<Acts::TrackingVolume> CuboidVolumeBuilder::buildVolume( cfg.layerCfg.push_back(lCfg); } - else{ - } // Gather the layers Acts::LayerVector layVec; diff --git a/Tracking/Acts/FaserActsGeometry/src/FaserActsLayerBuilder.cxx b/Tracking/Acts/FaserActsGeometry/src/FaserActsLayerBuilder.cxx index 8ed2d78936f6c6fd7c36bb38225ed0a3046bcae7..c5f6a68ab01b8da95f3d62149d23bdf41684263d 100644 --- a/Tracking/Acts/FaserActsGeometry/src/FaserActsLayerBuilder.cxx +++ b/Tracking/Acts/FaserActsGeometry/src/FaserActsLayerBuilder.cxx @@ -12,11 +12,13 @@ #include "FaserActsGeometry/CuboidVolumeBuilder.h" #include "ActsInterop/IdentityHelper.h" #include "GeoModelKernel/GeoBox.h" +#include "GeoModelKernel/GeoTube.h" // ACTS #include "Acts/Material/ProtoSurfaceMaterial.hpp" #include "Acts/Surfaces/CylinderSurface.hpp" #include "Acts/Surfaces/PlaneSurface.hpp" +#include "Acts/Surfaces/DiscSurface.hpp" #include "Acts/Surfaces/RectangleBounds.hpp" #include "Acts/Geometry/GenericApproachDescriptor.hpp" #include "Acts/Geometry/ApproachDescriptor.hpp" @@ -89,41 +91,63 @@ FaserActs::CuboidVolumeBuilder::Config FaserActsLayerBuilder::buildVolume(const //veto station //Veto A, VetoRadiator, Veto B auto vetoManager = static_cast<const GeoVDetectorManager*>(m_cfg.vetomng); + if(vetoManager!=nullptr){ for(unsigned int i=0; i<vetoManager->getNumTreeTops(); i++){ auto vol0 = vetoManager->getTreeTop(i)->getLogVol(); //get the shape params and translation const GeoBox* shape = dynamic_cast<const GeoBox*>(vol0->getShape()); auto trans = vetoManager->getTreeTop(i)->getX(); if(vetoManager->getTreeTop(i)->getNChildVols()==0){ - volumeConfigs.emplace_back(buildScintVolume(trans.translation().z(), shape->getZHalfLength(),vol0->getName())); + volumeConfigs.emplace_back(buildScintVolume(gctx, trans.translation().x(),trans.translation().y(),trans.translation().z(), shape->getXHalfLength(),shape->getYHalfLength(),shape->getZHalfLength(),vol0->getName())); } else{ for(size_t j =0 ;j< vetoManager->getTreeTop(i)->getNChildVols();j++){ auto childtrans=vetoManager->getTreeTop(i)->getXToChildVol(j); const GeoBox* childshape = dynamic_cast<const GeoBox*>(vetoManager->getTreeTop(i)->getChildVol(j)->getLogVol()->getShape()); - volumeConfigs.emplace_back(buildScintVolume(trans.translation().z() + childtrans.translation().z(), childshape->getZHalfLength(),vol0->getName()+std::string("_")+std::to_string(j))); + volumeConfigs.emplace_back(buildScintVolume(gctx, trans.translation().x() + childtrans.translation().x(),trans.translation().y() + childtrans.translation().y(),trans.translation().z() + childtrans.translation().z(), childshape->getXHalfLength(), childshape->getXHalfLength(), childshape->getZHalfLength(),vol0->getName()+std::string("_")+std::to_string(j))); } } } + } //build trigger stations auto triggerManager = static_cast<const GeoVDetectorManager*>(m_cfg.triggermng); + if(triggerManager!=nullptr){ for(unsigned int i=0; i<triggerManager->getNumTreeTops(); i++){ auto vol0 = triggerManager->getTreeTop(i)->getLogVol(); //get the shape params and translation const GeoBox* shape = dynamic_cast<const GeoBox*>(vol0->getShape()); auto trans = triggerManager->getTreeTop(i)->getX(); if(triggerManager->getTreeTop(i)->getNChildVols()==0){ - volumeConfigs.emplace_back(buildScintVolume(trans.translation().z(), shape->getZHalfLength(),vol0->getName())); + volumeConfigs.emplace_back(buildScintVolume(gctx, trans.translation().x(), trans.translation().y(),trans.translation().z(), shape->getXHalfLength(), shape->getYHalfLength(), shape->getZHalfLength(),vol0->getName())); } else{ for(size_t j =0 ;j< triggerManager->getTreeTop(i)->getNChildVols();j++){ auto childtrans=triggerManager->getTreeTop(i)->getXToChildVol(j); const GeoBox* childshape = dynamic_cast<const GeoBox*>(triggerManager->getTreeTop(i)->getChildVol(j)->getLogVol()->getShape()); - volumeConfigs.emplace_back(buildScintVolume(trans.translation().z() + childtrans.translation().z(), childshape->getZHalfLength(),vol0->getName()+std::string("_")+std::to_string(j))); + volumeConfigs.emplace_back(buildScintVolume(gctx, trans.translation().x() + childtrans.translation().x(), trans.translation().y() + childtrans.translation().y(), trans.translation().z() + childtrans.translation().z(), childshape->getXHalfLength(), childshape->getYHalfLength(), childshape->getZHalfLength(),vol0->getName()+std::string("_")+std::to_string(j))); } } } + } + //build dipole magnets + auto dipoleManager = static_cast<const GeoVDetectorManager*>(m_cfg.dipolemng); + for(unsigned int i=0; i<dipoleManager->getNumTreeTops(); i++){ + auto vol0 = dipoleManager->getTreeTop(i)->getLogVol(); + //get the shape params and translation + const GeoTube* shape = dynamic_cast<const GeoTube*>(vol0->getShape()); + auto trans = dipoleManager->getTreeTop(i)->getX(); + if(dipoleManager->getTreeTop(i)->getNChildVols()==0){ + volumeConfigs.emplace_back(buildDipoleVolume(gctx, trans.translation().z(), shape->getZHalfLength(), shape->getRMin(), shape->getRMax(), vol0->getName()+std::string("_")+std::to_string(i))); + } + else{ + for(size_t j =0 ;j< dipoleManager->getTreeTop(i)->getNChildVols();j++){ + auto childtrans=dipoleManager->getTreeTop(i)->getXToChildVol(j); + const GeoTube* childshape = dynamic_cast<const GeoTube*>(dipoleManager->getTreeTop(i)->getChildVol(j)->getLogVol()->getShape()); + volumeConfigs.emplace_back(buildDipoleVolume(gctx, trans.translation().z() + childtrans.translation().z(), childshape->getZHalfLength(), childshape->getRMin(), childshape->getRMax(), vol0->getName()+std::string("_")+std::to_string(j))); + } + } + } //sort the geometry by the Z position, neccessary to have the correct boundary auto sortFunc = [](FaserActs::CuboidVolumeBuilder::VolumeConfig& v1, FaserActs::CuboidVolumeBuilder::VolumeConfig& v2){return v1.position.z()<v2.position.z();}; @@ -242,7 +266,8 @@ FaserActsLayerBuilder::buildLayers(const Acts::GeometryContext& gctx, } -FaserActs::CuboidVolumeBuilder::VolumeConfig FaserActsLayerBuilder::buildScintVolume(double zpos, double zhalflength, std::string name){ + +FaserActs::CuboidVolumeBuilder::VolumeConfig FaserActsLayerBuilder::buildDipoleVolume(const Acts::GeometryContext& gctx, double zpos, double zhalflength, double rmin, double rmax, std::string name){ FaserActs::CuboidVolumeBuilder::VolumeConfig volumeConfig; @@ -253,7 +278,6 @@ FaserActs::CuboidVolumeBuilder::VolumeConfig FaserActsLayerBuilder::buildScintVo FaserActs::CuboidVolumeBuilder::SurfaceConfig surfacecfg; surfacecfg.position = Acts::Vector3(0, 0, zpos); surfacecfg.thickness=zhalflength; - layercfg.active = true; std::vector<std::shared_ptr<const Acts::Surface>> surfaces; surfaces.clear(); @@ -264,37 +288,52 @@ FaserActs::CuboidVolumeBuilder::VolumeConfig FaserActsLayerBuilder::buildScintVo layercfg.active = true; //bounds are hard coded - auto rBounds = std::make_shared<const Acts::RectangleBounds>( 130.*1_mm, 130.*1_mm) ; - surfacecfg.rBounds=rBounds; layercfg.surfaceCfg = surfacecfg; + Transform3 transformCenter(Translation3(0., 0., (zpos)*1_mm)); - Transform3 transformInner(Translation3(0., 0., (zpos)*1_mm)); + std::shared_ptr<Acts::DiscSurface> centerBoundary = + Acts::Surface::makeShared<Acts::DiscSurface>( + transformCenter, rmin, rmax); - std::shared_ptr<Acts::PlaneSurface> innerBoundary - = Acts::Surface::makeShared<Acts::PlaneSurface>(transformInner, rBounds); + Transform3 transformInner(Translation3(0., 0., (zpos-zhalflength)*1_mm)); + + std::shared_ptr<Acts::DiscSurface> innerBoundary = + Acts::Surface::makeShared<Acts::DiscSurface>( + transformInner, rmin, rmax); + + Transform3 transformOuter(Translation3(0., 0., (zpos+zhalflength)*1_mm)); + std::shared_ptr<Acts::DiscSurface> outerBoundary = + Acts::Surface::makeShared<Acts::DiscSurface>( + transformOuter, rmin, rmax); - size_t matBinsX = m_cfg.MaterialBins.first; + +// size_t matBinsX = m_cfg.MaterialBins.first; size_t matBinsY = m_cfg.MaterialBins.second; +// set bin size to 1 at phi and MaterialBins-Y at r direction + Acts::BinUtility materialBinUtil(1, -M_PI, M_PI, Acts::closed, Acts::binPhi); + materialBinUtil += + Acts::BinUtility(matBinsY, rmin, rmax, Acts::open, Acts::binR, transformInner); + + std::shared_ptr<const Acts::ProtoSurfaceMaterial> materialProxy = + std::make_shared<const Acts::ProtoSurfaceMaterial>(materialBinUtil); - Acts::BinUtility materialBinUtil( - matBinsX, -130*1_mm, 130.*1_mm, Acts::open, Acts::binX); - materialBinUtil += Acts::BinUtility( - matBinsY, -130*1_mm, 130.*1_mm, Acts::open, Acts::binY, transformInner); - std::shared_ptr<const Acts::ProtoSurfaceMaterial> materialProxy - = std::make_shared<const Acts::ProtoSurfaceMaterial>(materialBinUtil); innerBoundary->assignSurfaceMaterial(materialProxy); std::vector<std::shared_ptr<const Acts::Surface>> aSurfaces; aSurfaces.push_back(std::move(innerBoundary)); +// aSurfaces.push_back(std::move(centerBoundary)); +// aSurfaces.push_back(std::move(outerBoundary)); + Acts::ProtoLayer pl(gctx, aSurfaces); layercfg.surfaces = aSurfaces; - layerConfigs.push_back(layercfg); + layercfg.approachDescriptor = new Acts::GenericApproachDescriptor(aSurfaces); + layerConfigs.push_back(layercfg); volumeConfig.position = Acts::Vector3(0, 0, zpos); - Acts::Vector3 length= { 300.0*1_mm, 300.0*1_mm, zhalflength*2.*1_mm }; + Acts::Vector3 length= { 500.0*1_mm, 500.0*1_mm, zhalflength*2.*1_mm }; volumeConfig.length = length; //volumeConfig.layerCfg = {}; volumeConfig.layerCfg = layerConfigs; @@ -304,4 +343,79 @@ FaserActs::CuboidVolumeBuilder::VolumeConfig FaserActsLayerBuilder::buildScintVo } +FaserActs::CuboidVolumeBuilder::VolumeConfig FaserActsLayerBuilder::buildScintVolume(const Acts::GeometryContext& gctx, double xpos, double ypos, double zpos, double xhalflength, double yhalflength, double zhalflength, std::string name){ + + FaserActs::CuboidVolumeBuilder::VolumeConfig volumeConfig; + + std::vector<FaserActs::CuboidVolumeBuilder::LayerConfig> layerConfigs; + layerConfigs.clear(); + + FaserActs::CuboidVolumeBuilder::LayerConfig layercfg; + FaserActs::CuboidVolumeBuilder::SurfaceConfig surfacecfg; + surfacecfg.position = Acts::Vector3(xpos, ypos, zpos); + surfacecfg.thickness=zhalflength; + + std::vector<std::shared_ptr<const Acts::Surface>> surfaces; + surfaces.clear(); + + layercfg.binsX = 1; + layercfg.binsY = 1; + + layercfg.active = true; + + //bounds are hard coded + layercfg.surfaceCfg = surfacecfg; + + auto rBounds = std::make_shared<const Acts::RectangleBounds>( xhalflength*1_mm, yhalflength*1_mm) ; +// surfacecfg.rBounds=rBounds; + + + Transform3 transformInner(Translation3(xpos*1_mm, ypos*1_mm, (zpos-zhalflength)*1_mm)); + + std::shared_ptr<Acts::PlaneSurface> innerBoundary = + Acts::Surface::makeShared<Acts::PlaneSurface>( + transformInner, rBounds); + + Transform3 transformCenter(Translation3(xpos*1_mm, ypos*1_mm, zpos*1_mm)); + std::shared_ptr<Acts::PlaneSurface> centerBoundary = + Acts::Surface::makeShared<Acts::PlaneSurface>( + transformCenter, rBounds); + + Transform3 transformOuter(Translation3(xpos*1_mm, ypos*1_mm, (zpos+zhalflength)*1_mm)); + + std::shared_ptr<Acts::PlaneSurface> outerBoundary = + Acts::Surface::makeShared<Acts::PlaneSurface>( + transformOuter, rBounds); + + +// set bin size to 2 + Acts::BinUtility materialBinUtil(2, (0.-xhalflength)*1_mm, xhalflength*1_mm, Acts::open, Acts::binX); + materialBinUtil += + Acts::BinUtility(2, (0.-yhalflength)*1_mm, yhalflength*1_mm, Acts::open, Acts::binY, transformInner); + + std::shared_ptr<const Acts::ProtoSurfaceMaterial> materialProxy = + std::make_shared<const Acts::ProtoSurfaceMaterial>(materialBinUtil); + + + innerBoundary->assignSurfaceMaterial(materialProxy); + + std::vector<std::shared_ptr<const Acts::Surface>> aSurfaces; + aSurfaces.push_back(std::move(innerBoundary)); + aSurfaces.push_back(std::move(centerBoundary)); + aSurfaces.push_back(std::move(outerBoundary)); + Acts::ProtoLayer pl(gctx, aSurfaces); + + layercfg.surfaces = aSurfaces; + layercfg.approachDescriptor = new Acts::GenericApproachDescriptor(aSurfaces); + + layerConfigs.push_back(layercfg); + volumeConfig.position = Acts::Vector3(0, 0, zpos); + Acts::Vector3 length= { 500.0*1_mm, 500.0*1_mm, zhalflength*2.*1_mm }; + volumeConfig.length = length; + //volumeConfig.layerCfg = {}; + volumeConfig.layerCfg = layerConfigs; + volumeConfig.name = name; + + return volumeConfig; +} diff --git a/Tracking/Acts/FaserActsGeometry/src/FaserActsMaterialMapping.cxx b/Tracking/Acts/FaserActsGeometry/src/FaserActsMaterialMapping.cxx index 9edaa72ff056bbeab111cce25c127f9f7382e9a8..2595b42dc613db98997d4ca5c8ded263e65a9963 100644 --- a/Tracking/Acts/FaserActsGeometry/src/FaserActsMaterialMapping.cxx +++ b/Tracking/Acts/FaserActsGeometry/src/FaserActsMaterialMapping.cxx @@ -79,7 +79,8 @@ StatusCode FaserActsMaterialMapping::execute(const EventContext &ctx) const { const Trk::MaterialStep* mat = *iter; Trk::MaterialStep* newmat = const_cast<Trk::MaterialStep*>(mat); //remove the magnets, need to be updated - if((newmat->hitZ()<-1600&&newmat->hitZ()>-1920)||(newmat->hitZ()>-200&&newmat->hitZ()<100)||(newmat->hitZ()>1000&&newmat->hitZ()<1300)||(newmat->hitZ()>2000&&newmat->hitZ()<2500)) +// if((newmat->hitZ()<-1600&&newmat->hitZ()>-1920)||(newmat->hitZ()>-200&&newmat->hitZ()<100)||(newmat->hitZ()>1000&&newmat->hitZ()<1300)||(newmat->hitZ()>2000&&newmat->hitZ()<2500)) + if(newmat->hitZ()>-1920&&newmat->hitZ()<2500) newmaterialStepCollection->push_back(newmat); } if(newmaterialStepCollection->size()<1) diff --git a/Tracking/Acts/FaserActsGeometry/src/FaserActsTrackingGeometrySvc.cxx b/Tracking/Acts/FaserActsGeometry/src/FaserActsTrackingGeometrySvc.cxx index c6f476e7c1292f81e11916f24aa25d1b7a993ded..a45c7c06d37214248a19b189ca40886339c9f1b4 100644 --- a/Tracking/Acts/FaserActsGeometry/src/FaserActsTrackingGeometrySvc.cxx +++ b/Tracking/Acts/FaserActsGeometry/src/FaserActsTrackingGeometrySvc.cxx @@ -63,9 +63,10 @@ FaserActsTrackingGeometrySvc::initialize() //get all the subdetectors const GeoModelExperiment* theExpt = nullptr; - ATH_CHECK( m_detStore->retrieve( theExpt )); + ATH_CHECK( m_detStore->retrieve( theExpt ,"FASER")); const GeoVDetectorManager *vetoManager = theExpt->getManager("Veto"); const GeoVDetectorManager *triggerManager = theExpt->getManager("Trigger"); + const GeoVDetectorManager *dipoleManager = theExpt->getManager("Dipole"); Acts::LayerArrayCreator::Config lacCfg; auto layerArrayCreator = std::make_shared<const Acts::LayerArrayCreator>( @@ -104,7 +105,7 @@ FaserActsTrackingGeometrySvc::initialize() // SCT tgbConfig.trackingVolumeBuilders.push_back([&]( const auto& gctx, const auto& /*inner*/, const auto&) { - auto tv = makeVolumeBuilder(gctx, p_SCTManager, vetoManager, triggerManager); + auto tv = makeVolumeBuilder(gctx, p_SCTManager, vetoManager, triggerManager, dipoleManager); return tv->trackingVolume(gctx); }); } @@ -151,7 +152,7 @@ FaserActsTrackingGeometrySvc::trackingGeometry() { } std::shared_ptr<const Acts::ITrackingVolumeBuilder> -FaserActsTrackingGeometrySvc::makeVolumeBuilder(const Acts::GeometryContext& gctx, const TrackerDD::SCT_DetectorManager* manager, const GeoVDetectorManager* vetoManager, const GeoVDetectorManager* triggerManager) +FaserActsTrackingGeometrySvc::makeVolumeBuilder(const Acts::GeometryContext& gctx, const TrackerDD::SCT_DetectorManager* manager, const GeoVDetectorManager* vetoManager, const GeoVDetectorManager* triggerManager, const GeoVDetectorManager* dipoleManager) { std::string managerName = manager->getName(); @@ -161,8 +162,18 @@ FaserActsTrackingGeometrySvc::makeVolumeBuilder(const Acts::GeometryContext& gct cfg.subdetector = FaserActsDetectorElement::Subdetector::SCT; cfg.mng = static_cast<const TrackerDD::SCT_DetectorManager*>(manager); //get veto and trigger manager - cfg.vetomng = static_cast<const GeoVDetectorManager*>(vetoManager); - cfg.triggermng = static_cast<const GeoVDetectorManager*>(triggerManager); + if(vetoManager!=nullptr) + cfg.vetomng = static_cast<const GeoVDetectorManager*>(vetoManager); + else + ATH_MSG_WARNING(name() << " can not find veto stations"); + if(triggerManager!=nullptr) + cfg.triggermng = static_cast<const GeoVDetectorManager*>(triggerManager); + else + ATH_MSG_WARNING(name() << " can not find trigger stations"); + if(dipoleManager!=nullptr) + cfg.dipolemng = static_cast<const GeoVDetectorManager*>(dipoleManager); + else + ATH_MSG_WARNING(name() << " can not find dipoles"); cfg.elementStore = m_elementStore; cfg.identifierMap = m_identifierMap; std::vector<size_t> matBins(m_MaterialBins); @@ -181,8 +192,6 @@ FaserActsTrackingGeometrySvc::populateAlignmentStore(FaserActsAlignmentStore *st // populate the alignment store with all detector elements m_trackingGeometry->visitSurfaces( [store](const Acts::Surface* srf) { - FaserActsGeometryContext constructionContext; - constructionContext.construction = true; const Acts::DetectorElementBase* detElem = srf->associatedDetectorElement(); if(detElem){ const auto* gmde = dynamic_cast<const FaserActsDetectorElement*>(detElem); diff --git a/Tracking/Acts/FaserActsGeometry/src/FaserActsTrackingGeometrySvc.h b/Tracking/Acts/FaserActsGeometry/src/FaserActsTrackingGeometrySvc.h index 83ef0efaefff6bf315f05bbc7f10bed8b8bd8665..3b191202b5959fc0ee606f7d2a21c4a82155044e 100644 --- a/Tracking/Acts/FaserActsGeometry/src/FaserActsTrackingGeometrySvc.h +++ b/Tracking/Acts/FaserActsGeometry/src/FaserActsTrackingGeometrySvc.h @@ -71,7 +71,7 @@ public: private: std::shared_ptr<const Acts::ITrackingVolumeBuilder> - makeVolumeBuilder(const Acts::GeometryContext& gctx, const TrackerDD::SCT_DetectorManager* manager, const GeoVDetectorManager* vetoManager, const GeoVDetectorManager* triggerManager); + makeVolumeBuilder(const Acts::GeometryContext& gctx, const TrackerDD::SCT_DetectorManager* manager, const GeoVDetectorManager* vetoManager, const GeoVDetectorManager* triggerManager, const GeoVDetectorManager* dipoleManager); ServiceHandle<StoreGateSvc> m_detStore; const TrackerDD::SCT_DetectorManager* p_SCTManager; diff --git a/Tracking/Acts/FaserActsGeometry/test/FaserActsWriteTrackingGeometry.py b/Tracking/Acts/FaserActsGeometry/test/FaserActsWriteTrackingGeometry.py index 21dfba7e51488c1f432807e43c6bdbce5759b1e0..196a86099b66e0750a2b91f4803acbf0252bc321 100644 --- a/Tracking/Acts/FaserActsGeometry/test/FaserActsWriteTrackingGeometry.py +++ b/Tracking/Acts/FaserActsGeometry/test/FaserActsWriteTrackingGeometry.py @@ -17,9 +17,9 @@ log.setLevel(DEBUG) Configurable.configurableRun3Behavior = True # Configure -ConfigFlags.Input.Files = ["my03.HITS.pool.root"] +ConfigFlags.Input.Files = ["myevt4.HITS.pool.root"] #ConfigFlags.Output.RDOFileName = "myRDO_sp.pool.root" -ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Always needed; must match FaserVersion +ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" # Always needed; must match FaserVersion ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" # Always needed #ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" # Always needed # Workaround for bug/missing flag; unimportant otherwise diff --git a/Tracking/Acts/FaserActsKalmanFilter/src/CircleFitTrackSeedTool.cxx b/Tracking/Acts/FaserActsKalmanFilter/src/CircleFitTrackSeedTool.cxx index 066698a478fbea03a6565a056afafd253e449a0d..41611bad5882de7e15ce3de15f6c4833fbf10581 100644 --- a/Tracking/Acts/FaserActsKalmanFilter/src/CircleFitTrackSeedTool.cxx +++ b/Tracking/Acts/FaserActsKalmanFilter/src/CircleFitTrackSeedTool.cxx @@ -7,6 +7,7 @@ #include "Identifier/Identifier.h" #include "Acts/Geometry/GeometryIdentifier.hpp" #include "CircleFit.h" +#include "Identifier/Identifier.h" #include "LinearFit.h" #include "TrackClassification.h" #include <array> @@ -264,12 +265,13 @@ CircleFitTrackSeedTool::Seed::Seed(const std::vector<Segment> &segments) : size = clusters.size(); } - void CircleFitTrackSeedTool::Seed::fit() { CircleFit::CircleData circleData(positions); CircleFit::Circle circle = CircleFit::circleFit(circleData); momentum = circle.r > 0 ? circle.r * 0.001 * 0.3 * 0.55 : 9999999.; - charge = circle.cy < 0 ? -1 : 1; + // the magnetic field bends a positively charged particle downwards, thus the + // y-component of the center of a fitted circle is negative. + charge = circle.cy < 0 ? 1 : -1; } void CircleFitTrackSeedTool::Seed::fakeFit(double B) { @@ -279,7 +281,9 @@ void CircleFitTrackSeedTool::Seed::fakeFit(double B) { cy = circle.cy; r = circle.r; momentum = r * 0.3 * B * m_MeV2GeV; - charge = circle.cy > 0 ? 1 : -1; + // the magnetic field bends a positively charged particle downwards, thus the + // y-component of the center of a fitted circle is negative. + charge = circle.cy < 0 ? 1 : -1; } void CircleFitTrackSeedTool::Seed::linearFit(const std::vector<Acts::Vector2> &points) { @@ -288,7 +292,6 @@ void CircleFitTrackSeedTool::Seed::linearFit(const std::vector<Acts::Vector2> &p c0 = origin[1] - origin[0] * c1; } - double CircleFitTrackSeedTool::Seed::getY(double z) { double sqt = std::sqrt(-cx*cx + 2*cx*z + r*r - z*z); return abs(cy - sqt) < abs(cy + sqt) ? cy - sqt : cy + sqt; diff --git a/Tracking/Acts/FaserActsKalmanFilter/test/CKF2.py b/Tracking/Acts/FaserActsKalmanFilter/test/CKF2.py index 27c50d9a8b397776ccf19e810ff981afa47a6b67..3d8e9e3fb2f03e126b4d7c39f3680126592eaa61 100644 --- a/Tracking/Acts/FaserActsKalmanFilter/test/CKF2.py +++ b/Tracking/Acts/FaserActsKalmanFilter/test/CKF2.py @@ -27,7 +27,7 @@ Configurable.configurableRun3Behavior = True ConfigFlags.Input.Files = args.file ConfigFlags.addFlag("Output.xAODFileName", f"CKF.xAOD.root") ConfigFlags.Output.ESDFileName = "CKF.ESD.pool.root" -ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" +ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" ConfigFlags.GeoModel.FaserVersion = "FASERNU-03" ConfigFlags.GeoModel.Align.Dynamic = False ConfigFlags.Beam.NumberOfCollisions = 0. diff --git a/Tracking/Acts/FaserActsKalmanFilter/test/TI12CKF2.py b/Tracking/Acts/FaserActsKalmanFilter/test/TI12CKF2.py index cf63c3e400de073ad330df6498129996863b6c3a..37ac15b268712bc98cea7ccd4134fe0545176cde 100644 --- a/Tracking/Acts/FaserActsKalmanFilter/test/TI12CKF2.py +++ b/Tracking/Acts/FaserActsKalmanFilter/test/TI12CKF2.py @@ -30,7 +30,7 @@ Configurable.configurableRun3Behavior = True ConfigFlags.Input.Files = args.file ConfigFlags.Output.ESDFileName = "CKF.ESD.pool.root" ConfigFlags.addFlag("Output.xAODFileName", f"CKF.xAOD.root") -ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" +ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" ConfigFlags.IOVDb.DatabaseInstance = "OFLP200" ConfigFlags.Input.ProjectName = "data22" ConfigFlags.Input.isMC = False diff --git a/Tracking/Acts/FaserActsKalmanFilter/test/TI12KalmanFilter.py b/Tracking/Acts/FaserActsKalmanFilter/test/TI12KalmanFilter.py index 11b7e4217bed8400426f4257ef5cf43f5b7adbfd..ed4be62cd88dcb5fa2280509da8af121b9eaaf85 100644 --- a/Tracking/Acts/FaserActsKalmanFilter/test/TI12KalmanFilter.py +++ b/Tracking/Acts/FaserActsKalmanFilter/test/TI12KalmanFilter.py @@ -24,7 +24,7 @@ Configurable.configurableRun3Behavior = True ConfigFlags.Input.Files = ['/home/tboeckh/tmp/Faser-Physics-006470-00093.raw_middleStation.SPs'] ConfigFlags.Output.ESDFileName = "MiddleStation-KalmanFilter.ESD.pool.root" ConfigFlags.Output.AODFileName = "MiddleStation-KalmanFilter.AOD.pool.root" -ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" +ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" ConfigFlags.IOVDb.DatabaseInstance = "OFLP200" ConfigFlags.Input.ProjectName = "data21" ConfigFlags.Input.isMC = False diff --git a/Waveform/WaveDigiTools/share/WaveformDigiAndRecoExample_jobOptions.py b/Waveform/WaveDigiTools/share/WaveformDigiAndRecoExample_jobOptions.py index 779031828c9c5736f9403e403514e19285029499..cbe155043db36bc9dc91ef6383ebd2db1d89edf7 100644 --- a/Waveform/WaveDigiTools/share/WaveformDigiAndRecoExample_jobOptions.py +++ b/Waveform/WaveDigiTools/share/WaveformDigiAndRecoExample_jobOptions.py @@ -21,7 +21,7 @@ if __name__ == "__main__": log.setLevel(VERBOSE) - ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Always needed; must match FaserVersion + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-03" # Always needed; must match FaserVersion ConfigFlags.IOVDb.DatabaseInstance = "OFLP200" # Use MC conditions for now ConfigFlags.Input.ProjectName = "mc21" # Needed to bypass autoconfig ConfigFlags.Input.isMC = True # Needed to bypass autoconfig diff --git a/Waveform/WaveRecAlgs/python/WaveRecAlgsConfig.py b/Waveform/WaveRecAlgs/python/WaveRecAlgsConfig.py index b02a8b7d2b9b22193ff2a7d5e01efa36a89730cc..527c4e289eb1533e130301ee3a9c916d48d8e129 100644 --- a/Waveform/WaveRecAlgs/python/WaveRecAlgsConfig.py +++ b/Waveform/WaveRecAlgs/python/WaveRecAlgsConfig.py @@ -19,12 +19,24 @@ def WaveformReconstructionCfg(flags): if not flags.Input.isMC: acc.merge(WaveformClockRecCfg(flags, "ClockRecAlg")) - if "TB" not in flags.GeoModel.FaserVersion: - acc.merge(WaveformHitRecCfg(flags, "TriggerWaveformRecAlg", "Trigger")) - acc.merge(WaveformHitRecCfg(flags, "VetoNuWaveformRecAlg", "VetoNu")) + if "TB" in flags.GeoModel.FaserVersion: + acc.merge(WaveformHitRecCfg(flags, "CaloWaveformRecAlg", "Calo")) - acc.merge(WaveformHitRecCfg(flags, "VetoWaveformRecAlg", "Veto")) - acc.merge(WaveformHitRecCfg(flags, "PreshowerWaveformRecAlg", "Preshower")) + # Make preshower/veto window 200 ns wide (100 digitizer clock ticks) + acc.merge(WaveformHitRecCfg(flags, "VetoWaveformRecAlg", "Veto", FitWindowWidth=100 )) + acc.merge(WaveformHitRecCfg(flags, "PreshowerWaveformRecAlg", "Preshower", FitWindowWidth=100 )) + + else: + acc.merge(WaveformHitRecCfg(flags, "CaloWaveformRecAlg", "Calo")) + + acc.merge(WaveformHitRecCfg(flags, "VetoWaveformRecAlg", "Veto", FitWindowWidth=100 )) + acc.merge(WaveformHitRecCfg(flags, "PreshowerWaveformRecAlg", "Preshower", FitWindowWidth=100 )) + acc.merge(WaveformHitRecCfg(flags, "TriggerWaveformRecAlg", "Trigger", FitWindowWidth=100)) + acc.merge(WaveformHitRecCfg(flags, "VetoNuWaveformRecAlg", "VetoNu", FitWindowWidth=100)) + + acc.merge(WaveformHitRecCfg(flags, "VetoWaveformRecAlg", "Veto", FitWindowWidth=100 )) + # Make preshower window 200 ns wide (value in digitizer clock ticks) + acc.merge(WaveformHitRecCfg(flags, "PreshowerWaveformRecAlg", "Preshower", FitWindowWidth=100 )) acc.merge(WaveformHitRecCfg(flags, "CaloWaveformRecAlg", "Calo")) acc.merge(WaveformTimingCfg(flags)) @@ -55,9 +67,12 @@ def WaveformHitRecCfg(flags, name="WaveformRecAlg", source="", **kwargs): #if flags.Input.isMC: # kwargs.setdefault("PeakThreshold", 5) + tool = WaveformReconstructionTool(name=source+"WaveformRecTool", **kwargs) + # Remove arguments intended for WaveRecTool if "PeakThreshold" in kwargs: kwargs.pop("PeakThreshold") + if "FitWindowWidth" in kwargs: kwargs.pop("FitWindowWidth") kwargs.setdefault("WaveformContainerKey", source+"Waveforms") kwargs.setdefault("WaveformHitContainerKey", source+"WaveformHits") diff --git a/Waveform/WaveRecTools/src/WaveformReconstructionTool.h b/Waveform/WaveRecTools/src/WaveformReconstructionTool.h index 45402ff505f5fd89420299f69cbc5a1ae9ab593b..3a6224b1f70ea1909da79431f6c54383c4177984 100644 --- a/Waveform/WaveRecTools/src/WaveformReconstructionTool.h +++ b/Waveform/WaveRecTools/src/WaveformReconstructionTool.h @@ -96,8 +96,9 @@ class WaveformReconstructionTool: public extends<AthAlgTool, IWaveformReconstruc // // Window to define fitting range, in samples (2ns/sample) + // Make this longer by default, from 120 to 150 ns IntegerProperty m_windowStart{this, "FitWindowStart", -20}; - IntegerProperty m_windowWidth{this, "FitWindowWidth", 60}; + IntegerProperty m_windowWidth{this, "FitWindowWidth", 75}; // // Remove overflow values from CB fit diff --git a/faser-common b/faser-common index 69a90ec95da88a00097fb809bede6c2bae8c02d6..89ce6a07128eb2ebc367b6b68f29c9c88220e3e6 160000 --- a/faser-common +++ b/faser-common @@ -1 +1 @@ -Subproject commit 69a90ec95da88a00097fb809bede6c2bae8c02d6 +Subproject commit 89ce6a07128eb2ebc367b6b68f29c9c88220e3e6 diff --git a/xAOD/xAODFaserCalorimeter/Root/CalorimeterHitAuxContainer_v1.cxx b/xAOD/xAODFaserCalorimeter/Root/CalorimeterHitAuxContainer_v1.cxx index 6f6edf3e4c520dbf32bb816addb74f4ad3eae23f..f73a83dd127cc0d24d999c0e8f140e913a329e76 100644 --- a/xAOD/xAODFaserCalorimeter/Root/CalorimeterHitAuxContainer_v1.cxx +++ b/xAOD/xAODFaserCalorimeter/Root/CalorimeterHitAuxContainer_v1.cxx @@ -10,11 +10,11 @@ namespace xAOD { CalorimeterHitAuxContainer_v1::CalorimeterHitAuxContainer_v1() : AuxContainerBase() { - AUX_VARIABLE(localtime); - AUX_VARIABLE(bcid_time); - AUX_VARIABLE(raw_energy); + AUX_VARIABLE(channel); + AUX_VARIABLE(Edep); + AUX_VARIABLE(raw_Edep); - AUX_VARIABLE(caloLinks); + AUX_VARIABLE(WaveformLink); } } // namespace xAOD diff --git a/xAOD/xAODFaserCalorimeter/Root/CalorimeterHit_v1.cxx b/xAOD/xAODFaserCalorimeter/Root/CalorimeterHit_v1.cxx index 5e7a69ecc024dc3ecb70df95f1e623c4bc4e09fc..f085cb0c92acca759173260ba56c26813fd72935 100644 --- a/xAOD/xAODFaserCalorimeter/Root/CalorimeterHit_v1.cxx +++ b/xAOD/xAODFaserCalorimeter/Root/CalorimeterHit_v1.cxx @@ -13,41 +13,41 @@ namespace xAOD { CalorimeterHit_v1::CalorimeterHit_v1() : SG::AuxElement() { } - AUXSTORE_PRIMITIVE_SETTER_AND_GETTER( CalorimeterHit_v1, float, localtime, set_localtime ) + AUXSTORE_PRIMITIVE_SETTER_AND_GETTER( CalorimeterHit_v1, unsigned int, channel, set_channel ) - AUXSTORE_PRIMITIVE_SETTER_AND_GETTER( CalorimeterHit_v1, float, bcidtime, set_bcidtime ) + AUXSTORE_PRIMITIVE_SETTER_AND_GETTER( CalorimeterHit_v1, float, Edep, set_Edep ) - AUXSTORE_PRIMITIVE_SETTER_AND_GETTER( CalorimeterHit_v1, float, raw_energy, set_raw_energy ) + AUXSTORE_PRIMITIVE_SETTER_AND_GETTER( CalorimeterHit_v1, float, raw_Edep, set_raw_Edep ) // setters and getters for the Calo WaveformHit links AUXSTORE_OBJECT_SETTER_AND_GETTER( CalorimeterHit_v1, CalorimeterHit_v1::WaveformHitLinks_t, - caloWaveformLinks, - setCaloWaveformLinks ) + WaveformLinks, + setWaveformLinks ) - static const SG::AuxElement::Accessor< CalorimeterHit_v1::WaveformHitLinks_t > caloHitAcc( "caloWaveformLinks" ); + static const SG::AuxElement::Accessor< CalorimeterHit_v1::WaveformHitLinks_t > HitAcc( "WaveformLinks" ); - const WaveformHit* CalorimeterHit_v1::caloHit( size_t i ) const { - return ( *caloHitAcc( *this )[ i ] ); + const WaveformHit* CalorimeterHit_v1::Hit( size_t i ) const { + return ( *HitAcc( *this )[ i ] ); } - size_t CalorimeterHit_v1::nCaloHits() const { - return caloHitAcc( *this ).size(); + size_t CalorimeterHit_v1::nHits() const { + return HitAcc( *this ).size(); } - void CalorimeterHit_v1::addCaloHit( const xAOD::WaveformHitContainer* pWaveformHitContainer, + void CalorimeterHit_v1::addHit( const xAOD::WaveformHitContainer* pWaveformHitContainer, const xAOD::WaveformHit* pWaveformHit) { ElementLink< xAOD::WaveformHitContainer > linkToWaveformHit; linkToWaveformHit.toContainedElement(*pWaveformHitContainer, pWaveformHit); - caloHitAcc( *this ).push_back( linkToWaveformHit ); + HitAcc( *this ).push_back( linkToWaveformHit ); return; } - void CalorimeterHit_v1::clearCaloWaveformLinks() { - caloHitAcc( *this ).clear(); + void CalorimeterHit_v1::clearWaveformLinks() { + HitAcc( *this ).clear(); return; } @@ -57,8 +57,8 @@ namespace xAOD { std::ostream& operator<<(std::ostream& s, const xAOD::CalorimeterHit_v1& hit) { s << "xAODCalorimeterHit:" - << " local time=" << hit.localtime() - << " raw_energy=" << hit.raw_energy() + << " channel = " << hit.channel() + << ", Edep = " << hit.Edep() << std::endl; return s; diff --git a/xAOD/xAODFaserCalorimeter/xAODFaserCalorimeter/versions/CalorimeterHitAuxContainer_v1.h b/xAOD/xAODFaserCalorimeter/xAODFaserCalorimeter/versions/CalorimeterHitAuxContainer_v1.h index 7e61261eb4bd10f106d03b2a85f9918b82ce7944..41a89a0325e9dd5ce78dab40ad6ae08458eb37c3 100644 --- a/xAOD/xAODFaserCalorimeter/xAODFaserCalorimeter/versions/CalorimeterHitAuxContainer_v1.h +++ b/xAOD/xAODFaserCalorimeter/xAODFaserCalorimeter/versions/CalorimeterHitAuxContainer_v1.h @@ -29,13 +29,14 @@ namespace xAOD { private: /// @name Basic variables ///@ { - std::vector<float> localtime; - std::vector<float> bcid_time; - std::vector<float> raw_energy; + std::vector<unsigned int> channel; + std::vector<float> Edep; + std::vector<float> raw_Edep; typedef std::vector< ElementLink< WaveformHitContainer > > WaveformHitLink_t; - std::vector< WaveformHitLink_t > caloLinks; - std::vector< WaveformHitLink_t > preshowerLinks; + std::vector< WaveformHitLink_t > WaveformLink; + //std::vector< WaveformHitLink_t > caloLinks; + //std::vector< WaveformHitLink_t > preshowerLinks; ///@} diff --git a/xAOD/xAODFaserCalorimeter/xAODFaserCalorimeter/versions/CalorimeterHit_v1.h b/xAOD/xAODFaserCalorimeter/xAODFaserCalorimeter/versions/CalorimeterHit_v1.h index d9bb35047e6a75d6dac12dc0ba0eb47e3bceb684..fefc0d5ae3dac013ee734e34ed82c600f8fbde5d 100644 --- a/xAOD/xAODFaserCalorimeter/xAODFaserCalorimeter/versions/CalorimeterHit_v1.h +++ b/xAOD/xAODFaserCalorimeter/xAODFaserCalorimeter/versions/CalorimeterHit_v1.h @@ -35,29 +35,29 @@ namespace xAOD { /// @{ /// Best results - float localtime() const; - void set_localtime(float value); + unsigned int channel() const; + void set_channel(unsigned int value); - float bcidtime() const; - void set_bcidtime(float value); + float Edep() const; + void set_Edep(float value); - float raw_energy() const; - void set_raw_energy(float value); + float raw_Edep() const; + void set_raw_Edep(float value); // Waveform Hits typedef std::vector< ElementLink< xAOD::WaveformHitContainer > > WaveformHitLinks_t; // Contributing Calorimeter Waveform Hits - const WaveformHitLinks_t& caloWaveformLinks() const; - void setCaloWaveformLinks( const WaveformHitLinks_t& caloWaveforms ); + const WaveformHitLinks_t& WaveformLinks() const; + void setWaveformLinks( const WaveformHitLinks_t& Waveforms ); // Remove all waveform hits - void clearCaloWaveformLinks(); + void clearWaveformLinks(); // Get the pointer to a given waveform hit - const WaveformHit* caloHit( size_t i ) const; + const WaveformHit* Hit( size_t i ) const; // Get the number of waveform hits - size_t nCaloHits() const; + size_t nHits() const; // Add a waveform hit - void addCaloHit( const xAOD::WaveformHitContainer*, const xAOD::WaveformHit*); + void addHit( const xAOD::WaveformHitContainer*, const xAOD::WaveformHit*); // // Contributing Preshower Waveform Hits // const WaveformHitLinks_t& preshowerWaveformLinks() const;