diff --git a/Calorimeter/CaloDigiAlgs/CMakeLists.txt b/Calorimeter/CaloDigiAlgs/CMakeLists.txt index 42991aa03cc03af6e51ab161d36d1d61df1a7956..5a4de58a7469a2acb320de9c758ea1aef98ebcac 100644 --- a/Calorimeter/CaloDigiAlgs/CMakeLists.txt +++ b/Calorimeter/CaloDigiAlgs/CMakeLists.txt @@ -9,7 +9,7 @@ atlas_subdir( CaloDigiAlgs ) atlas_add_component( CaloDigiAlgs src/*.cxx src/*.h src/components/*.cxx - LINK_LIBRARIES AthenaBaseComps Identifier StoreGateLib WaveRawEvent FaserCaloSimEvent WaveDigiToolsLib) + LINK_LIBRARIES AthenaBaseComps Identifier FaserCaloIdentifier StoreGateLib WaveRawEvent FaserCaloSimEvent WaveDigiToolsLib) atlas_install_python_modules( python/*.py ) diff --git a/Calorimeter/CaloDigiAlgs/python/CaloDigiAlgsConfig.py b/Calorimeter/CaloDigiAlgs/python/CaloDigiAlgsConfig.py index cd0c9a454139d5906dd2e01ee0d1cc09b1697f76..ce45e6257885956f5edb6284780ff9756b51b9fc 100644 --- a/Calorimeter/CaloDigiAlgs/python/CaloDigiAlgsConfig.py +++ b/Calorimeter/CaloDigiAlgs/python/CaloDigiAlgsConfig.py @@ -36,6 +36,10 @@ def CaloWaveformDigiCfg(flags, name="CaloWaveformDigiAlg", **kwargs): digiAlg.CB_n = 10 digiAlg.CB_sigma = 4 digiAlg.CB_mean = 820 + digiAlg.CB_norm = 2 + + digiAlg.base_mean = 15000 + digiAlg.base_rms = 3 acc.addEventAlgo(digiAlg) diff --git a/Calorimeter/CaloDigiAlgs/src/CaloWaveformDigiAlg.cxx b/Calorimeter/CaloDigiAlgs/src/CaloWaveformDigiAlg.cxx index 8e60aefaf9db417d6a583b2b2ee59dd279d1f05a..c54e1e37344d963f28a12f9c9f8b5d86781f93c4 100644 --- a/Calorimeter/CaloDigiAlgs/src/CaloWaveformDigiAlg.cxx +++ b/Calorimeter/CaloDigiAlgs/src/CaloWaveformDigiAlg.cxx @@ -2,12 +2,15 @@ #include "Identifier/Identifier.h" -#include <vector> +#include "FaserCaloSimEvent/CaloHitIdHelper.h" + #include <map> +#include <utility> CaloWaveformDigiAlg::CaloWaveformDigiAlg(const std::string& name, ISvcLocator* pSvcLocator) - : AthReentrantAlgorithm(name, pSvcLocator) { + : AthReentrantAlgorithm(name, pSvcLocator) +{ } @@ -18,25 +21,25 @@ CaloWaveformDigiAlg::initialize() { // Initalize tools ATH_CHECK( m_digiTool.retrieve() ); - // Set key to read waveform from ATH_CHECK( m_caloHitContainerKey.initialize() ); // Set key to write container ATH_CHECK( m_waveformContainerKey.initialize() ); - // Will eventually depend on the type of detector - // TODO: Vary time at which centre it? - // TODO: Change params compared to scint - // m_kernel = new TF1("PDF", " ROOT::Math::crystalball_pdf(x, -0.9, 10, 4, 900)", 0, 1200); - - m_kernel = new TF1("PDF", "ROOT::Math::crystalball_pdf(x, [0],[1],[2],[3])", 0, 1200); - //m_kernel->SetParameters(-0.25,10,4,900); + // Set up helper + ATH_CHECK(detStore()->retrieve(m_ecalID, "EcalID")); + + // Create CB time kernel and pre-evaluate for number of samples + m_kernel = new TF1("PDF", "[4] * ROOT::Math::crystalball_pdf(x, [0],[1],[2],[3])", 0, 1200); m_kernel->SetParameter(0, m_CB_alpha); m_kernel->SetParameter(1, m_CB_n); m_kernel->SetParameter(2, m_CB_sigma); m_kernel->SetParameter(3, m_CB_mean); + m_kernel->SetParameter(4, m_CB_norm); + // Pre-evaluate time kernel for each bin + m_timekernel = m_digiTool->evaluate_timekernel(m_kernel); return StatusCode::SUCCESS; } @@ -53,12 +56,10 @@ CaloWaveformDigiAlg::finalize() { StatusCode CaloWaveformDigiAlg::execute(const EventContext& ctx) const { ATH_MSG_DEBUG("Executing"); - - ATH_MSG_DEBUG("Run: " << ctx.eventID().run_number() - << " Event: " << ctx.eventID().event_number()); + ATH_MSG_DEBUG("Run: " << ctx.eventID().run_number() << " Event: " << ctx.eventID().event_number()); // Find the input HIT collection - SG::ReadHandle<CaloHitCollection> caloHitHandle(m_caloHitContainerKey, ctx); + SG::ReadHandle<CaloHitCollection> caloHitHandle(m_caloHitContainerKey, ctx); ATH_CHECK( caloHitHandle.isValid() ); ATH_MSG_DEBUG("Found ReadHandle for CaloHitCollection " << m_caloHitContainerKey); @@ -73,10 +74,50 @@ CaloWaveformDigiAlg::execute(const EventContext& ctx) const { ATH_MSG_DEBUG("CaloHitCollection found with zero length!"); return StatusCode::SUCCESS; } - - // Digitise the hits - CHECK( m_digiTool->digitise<CaloHitCollection>(caloHitHandle.ptr(), - waveformContainerHandle.ptr(), m_kernel) ); + + // Create structure to store pulse for each channel + std::map<Identifier, std::vector<uint16_t>> waveforms = m_digiTool->create_waveform_map(m_ecalID); + + for (const auto& tk : m_timekernel) { + std::map<unsigned int, float> counts; + + // Convolve hit energy with evaluated kernel and sum for each hit id (i.e. channel) + for (const auto& hit : *caloHitHandle) { + counts[hit.identify()] += tk * hit.energyLoss(); + } + + // Subtract count from basleine and add result to correct waveform vector + for (const auto& c : counts) { + + unsigned int baseline = m_digiTool->generate_baseline(m_base_mean, m_base_rms); + int value = baseline - c.second; + + if (value < 0) { + ATH_MSG_WARNING("Found pulse " << c.second << " larger than baseline " << c.first); + value = 0; // Protect against scaling signal above baseline + } + + // Convert hit id to Identifier and store + Identifier id = CaloHitIdHelper::GetHelper()->getIdentifier(c.first); + waveforms[id].push_back(value); + } + } + + + //m_chrono->chronoStop("Digit"); + //m_chrono->chronoStart("Write"); + + // Loop over wavefrom vectors to make and store waveform + unsigned int nsamples = m_digiTool->nsamples(); + for (const auto& w : waveforms) { + RawWaveform* wfm = new RawWaveform(); + wfm->setWaveform(0, w.second); + wfm->setIdentifier(w.first); + wfm->setSamples(nsamples); + waveformContainerHandle->push_back(wfm); + } + + //m_chrono->chronoStop("Write"); ATH_MSG_DEBUG("WaveformsHitContainer " << waveformContainerHandle.name() << "' filled with "<< waveformContainerHandle->size() <<" items"); diff --git a/Calorimeter/CaloDigiAlgs/src/CaloWaveformDigiAlg.h b/Calorimeter/CaloDigiAlgs/src/CaloWaveformDigiAlg.h index 9de68257e540890d29143df6a054578334f12503..8eeb0a44eba0dde0ede044036b57406cfcf25efc 100644 --- a/Calorimeter/CaloDigiAlgs/src/CaloWaveformDigiAlg.h +++ b/Calorimeter/CaloDigiAlgs/src/CaloWaveformDigiAlg.h @@ -19,11 +19,15 @@ #include "GaudiKernel/ServiceHandle.h" #include "GaudiKernel/ToolHandle.h" +// Helpers +#include "FaserCaloIdentifier/EcalID.h" + // ROOT #include "TF1.h" // STL #include <string> +#include <vector> class CaloWaveformDigiAlg : public AthReentrantAlgorithm { @@ -48,18 +52,26 @@ class CaloWaveformDigiAlg : public AthReentrantAlgorithm { CaloWaveformDigiAlg &operator=(const CaloWaveformDigiAlg&) = delete; //@} + /** @name Steerable pameters for crystal ball and baseline **/ + //@{ Gaudi::Property<double> m_CB_alpha {this, "CB_alpha", 0, "Alpha of the crystal ball function"}; Gaudi::Property<double> m_CB_n {this, "CB_n", 0, "n of the crystal ball function"}; Gaudi::Property<double> m_CB_mean {this, "CB_mean", 0, "Mean of the crystal ball function"}; Gaudi::Property<double> m_CB_sigma {this, "CB_sigma", 0, "Sigma of the crystal ball function"}; + Gaudi::Property<double> m_CB_norm {this, "CB_norm", 0, "Norm of the crystal ball function"}; + Gaudi::Property<double> m_base_mean {this, "base_mean", 0, "Mean of the baseline"}; + Gaudi::Property<double> m_base_rms {this, "base_rms", 0, "RMS of the baseline"}; + //@} + /** Kernel PDF and evaluated values **/ + //@{ + TF1* m_kernel; + std::vector<float> m_timekernel; + //@} - - - /// Kernel PDF - TF1* m_kernel; - + /// Detector ID helper + const EcalID* m_ecalID{nullptr}; /** * @name Digitisation tool @@ -89,4 +101,6 @@ class CaloWaveformDigiAlg : public AthReentrantAlgorithm { }; + + #endif // CALODIGIALGS_CALODIGIALG_H diff --git a/Calorimeter/FaserCaloSimEvent/CMakeLists.txt b/Calorimeter/FaserCaloSimEvent/CMakeLists.txt index 9777d4178e6e465f0f57de0c82173b0620fbe46a..2b034a3189207f186984b163c0c5a034bee7f956 100644 --- a/Calorimeter/FaserCaloSimEvent/CMakeLists.txt +++ b/Calorimeter/FaserCaloSimEvent/CMakeLists.txt @@ -18,11 +18,11 @@ atlas_add_library( FaserCaloSimEvent PRIVATE_INCLUDE_DIRS ${ROOT_INCLUDE_DIRS} ${GEANT4_INCLUDE_DIRS} DEFINITIONS ${CLHEP_DEFINITIONS} LINK_LIBRARIES ${CLHEP_LIBRARIES} AthAllocators AthenaKernel CxxUtils GeneratorObjects HitManagement StoreGateLib SGtests - PRIVATE_LINK_LIBRARIES ${ROOT_LIBRARIES} FaserCaloIdentifier ) + PRIVATE_LINK_LIBRARIES ${ROOT_LIBRARIES} FaserCaloIdentifier Identifier) atlas_add_dictionary( FaserCaloSimEventDict FaserCaloSimEvent/CaloSimEventDict.h FaserCaloSimEvent/selection.xml INCLUDE_DIRS ${ROOT_INCLUDE_DIRS} ${CLHEP_INCLUDE_DIRS} ${GEANT4_INCLUDE_DIRS} - LINK_LIBRARIES ${ROOT_LIBRARIES} ${CLHEP_LIBRARIES} AthAllocators CxxUtils GeneratorObjects HitManagement StoreGateLib SGtests FaserCaloIdentifier FaserCaloSimEvent ) + LINK_LIBRARIES ${ROOT_LIBRARIES} ${CLHEP_LIBRARIES} AthAllocators CxxUtils GeneratorObjects HitManagement StoreGateLib SGtests FaserCaloIdentifier FaserCaloSimEvent Identifier) diff --git a/Calorimeter/FaserCaloSimEvent/FaserCaloSimEvent/CaloHitIdHelper.h b/Calorimeter/FaserCaloSimEvent/FaserCaloSimEvent/CaloHitIdHelper.h index 5500649bc0c789aa5119503ee357839ee7e11b9e..e94581470c0fa76dddbec99c44aa93ba61484cdb 100644 --- a/Calorimeter/FaserCaloSimEvent/FaserCaloSimEvent/CaloHitIdHelper.h +++ b/Calorimeter/FaserCaloSimEvent/FaserCaloSimEvent/CaloHitIdHelper.h @@ -23,6 +23,11 @@ // This class is singleton and static method and variable are used. #include "CxxUtils/checker_macros.h" + +#include "Identifier/Identifier.h" + +#include "FaserCaloIdentifier/EcalID.h" + ATLAS_NO_CHECK_FILE_THREAD_SAFETY; class CaloHitIdHelper : HitIdHelper { @@ -43,6 +48,8 @@ class CaloHitIdHelper : HitIdHelper { // Left or Right int getModule(const int& hid) const; + Identifier getIdentifier(const int& hid) const; + // // Info packing: int buildHitId(const int, const int) const; @@ -54,6 +61,9 @@ class CaloHitIdHelper : HitIdHelper { // // Initialize the helper, only called by the constructor void Initialize(); + + /// Detector ID helper + const EcalID* m_ecalID{nullptr}; }; #endif // CALOSIMEVENT_CALOHITIDHELPER diff --git a/Calorimeter/FaserCaloSimEvent/src/CaloHitIdHelper.cxx b/Calorimeter/FaserCaloSimEvent/src/CaloHitIdHelper.cxx index eec88553dae53bc6e415b385494b32a7f83645ad..0e98fbd6aa3c1be718b049ab91e74ae92b682ed4 100644 --- a/Calorimeter/FaserCaloSimEvent/src/CaloHitIdHelper.cxx +++ b/Calorimeter/FaserCaloSimEvent/src/CaloHitIdHelper.cxx @@ -6,8 +6,6 @@ #include "FaserCaloSimEvent/CaloHitIdHelper.h" #include "StoreGate/StoreGateSvc.h" -#include "StoreGate/StoreGateSvc.h" -#include "FaserCaloIdentifier/EcalID.h" #include "G4Types.hh" #ifdef G4MULTITHREADED @@ -42,10 +40,9 @@ void CaloHitIdHelper::Initialize() { // determine whether hits were created with an SLHC dictionary // in which case eta module field is expanded. // Need to lock this thread-unsafe retrieval - const EcalID* pix; ServiceHandle<StoreGateSvc> detStore ("DetectorStore", "CaloHitIdHelper"); if (detStore.retrieve().isSuccess()) { - if (detStore->retrieve(pix, "EcalID").isFailure()) { pix = 0; } + if (detStore->retrieve(m_ecalID, "EcalID").isFailure()) { m_ecalID = 0; } } InitializeField("Row", 0, 2); @@ -64,6 +61,14 @@ int CaloHitIdHelper::getModule(const int& hid) const return this->GetFieldValue("Module", hid); } +// identifier +Identifier CaloHitIdHelper::getIdentifier(const int& hid) const +{ + return m_ecalID->pmt_id(getRow(hid), getModule(hid), 0); + +} + + // // Info packing: int CaloHitIdHelper::buildHitId( const int row, diff --git a/Control/CalypsoExample/Reconstruction/CMakeLists.txt b/Control/CalypsoExample/Reconstruction/CMakeLists.txt index dddd2f621013501750a82d0569e2714bbb20c9db..4bccc2d7de74eca17c5dada36a81dac720664028 100644 --- a/Control/CalypsoExample/Reconstruction/CMakeLists.txt +++ b/Control/CalypsoExample/Reconstruction/CMakeLists.txt @@ -20,7 +20,16 @@ atlas_install_scripts( scripts/*.sh scripts/*.py ) atlas_add_test( ProdRecoTI12 SCRIPT scripts/faser_reco.py ${CMAKE_CURRENT_SOURCE_DIR}/../rawdata/Faser-Physics-001920-filtered.raw TI12Data PROPERTIES TIMEOUT 300 ) + atlas_add_test( ProdRecoTestBeam SCRIPT scripts/faser_reco.py ${CMAKE_CURRENT_SOURCE_DIR}/../RAWDATA/Faser-Physics-003613-filtered.raw TestBeamData PROPERTIES TIMEOUT 300 ) +atlas_add_test( ProdRecoPilotTracks + SCRIPT scripts/faser_reco.py ${CMAKE_CURRENT_SOURCE_DIR}/../RAWDATA/Faser-Physics-pilot_tracks-filtered.raw TI12Data + PROPERTIES TIMEOUT 300 ) + +atlas_add_test( ProdRecoTI12-2022 + SCRIPT scripts/faser_reco.py ${CMAKE_CURRENT_SOURCE_DIR}/../rawdata/Faser-Physics-006525-filtered.raw TI12Data02 + PROPERTIES TIMEOUT 300 ) + diff --git a/Control/CalypsoExample/Reconstruction/scripts/faser_reco.py b/Control/CalypsoExample/Reconstruction/scripts/faser_reco.py index 9810f9606947468dad688f10d3547340dc687765..1579f4c1b19b83a3bf7b024060122ba7ce69746b 100755 --- a/Control/CalypsoExample/Reconstruction/scripts/faser_reco.py +++ b/Control/CalypsoExample/Reconstruction/scripts/faser_reco.py @@ -30,6 +30,9 @@ parser.add_argument("-v", "--verbose", action='store_true', help="Turn on DEBUG output") parser.add_argument("--clusterFit", action='store_true', help="Use ClusterFit (old) track finder - default: SegmentFit(new)") +parser.add_argument("--isMC", action='store_true', + help="Running on digitised MC rather than data") + args = parser.parse_args() @@ -86,7 +89,7 @@ from CalypsoConfiguration.AllConfigFlags import ConfigFlags Configurable.configurableRun3Behavior = True # Flags for this job -ConfigFlags.Input.isMC = False # Needed to bypass autoconfig +ConfigFlags.Input.isMC = args.isMC # Needed to bypass autoconfig ConfigFlags.IOVDb.DatabaseInstance = "OFLP200" # Use MC conditions for now ConfigFlags.Input.ProjectName = "data20" @@ -139,8 +142,13 @@ acc.merge(PoolWriteCfg(ConfigFlags)) # # Set up RAW data access -from FaserByteStreamCnvSvc.FaserByteStreamCnvSvcConfig import FaserByteStreamCnvSvcCfg -acc.merge(FaserByteStreamCnvSvcCfg(ConfigFlags)) + +if args.isMC: + from AthenaPoolCnvSvc.PoolReadConfig import PoolReadCfg + acc.merge(PoolReadCfg(ConfigFlags)) +else: + from FaserByteStreamCnvSvc.FaserByteStreamCnvSvcConfig import FaserByteStreamCnvSvcCfg + acc.merge(FaserByteStreamCnvSvcCfg(ConfigFlags)) # # Needed, or move to MainServicesCfg? @@ -204,11 +212,12 @@ print( "Writing out xAOD objects:" ) print( acc.getEventAlgo("OutputStreamxAOD").ItemList ) # Hack to avoid problem with our use of MC databases when isMC = False -replicaSvc = acc.getService("DBReplicaSvc") -replicaSvc.COOLSQLiteVetoPattern = "" -replicaSvc.UseCOOLSQLite = True -replicaSvc.UseCOOLFrontier = False -replicaSvc.UseGeomSQLite = True +if not args.isMC: + replicaSvc = acc.getService("DBReplicaSvc") + replicaSvc.COOLSQLiteVetoPattern = "" + replicaSvc.UseCOOLSQLite = True + replicaSvc.UseCOOLFrontier = False + replicaSvc.UseGeomSQLite = True # Configure verbosity # ConfigFlags.dump() diff --git a/Control/CalypsoExample/WriteAlignment/python/WriteAlignmentConfig_Faser01.py b/Control/CalypsoExample/WriteAlignment/python/WriteAlignmentConfig_Faser01.py index 675f61f5b77cea6bc6fac3c42eb01f17bfc492b8..00f4b9c37cce80a4ca7cb4c669f48d102a199fd4 100644 --- a/Control/CalypsoExample/WriteAlignment/python/WriteAlignmentConfig_Faser01.py +++ b/Control/CalypsoExample/WriteAlignment/python/WriteAlignmentConfig_Faser01.py @@ -5,7 +5,7 @@ import sys from AthenaCommon.Constants import VERBOSE, INFO from AthenaConfiguration.ComponentFactory import CompFactory -def WriteAlignmentCfg(flags, name="WriteAlignmentAlg", **kwargs): +def WriteAlignmentCfg(flags, name="WriteAlignmentAlg", alignmentConstants={}, **kwargs): # Initialize GeoModel from FaserGeoModel.FaserGeoModelConfig import FaserGeometryCfg @@ -32,7 +32,12 @@ def WriteAlignmentCfg(flags, name="WriteAlignmentAlg", **kwargs): PoolContainerPrefix="ConditionsContainer", TopLevelContainerName = "<type>", SubLevelBranchName= "<key>" ) - kwargs.setdefault("AlignDbTool", CompFactory.TrackerAlignDBTool("AlignDbTool", OutputTool = outputTool, OutputLevel=VERBOSE)) + + trackerAlignDBTool = CompFactory.TrackerAlignDBTool("AlignDbTool", OutputTool = outputTool, + OutputLevel=VERBOSE, + AlignmentConstants = {}) + kwargs.setdefault("AlignDbTool", trackerAlignDBTool) + trackerAlignDBTool.AlignmentConstants = alignmentConstants a.addEventAlgo(WriteAlignmentAlg(name, **kwargs)) return a @@ -53,6 +58,10 @@ if __name__ == "__main__": ConfigFlags.IOVDb.DBConnection = "sqlite://;schema=" + ConfigFlags.GeoModel.FaserVersion + "_ALLP200.db;dbname=OFLP200" ConfigFlags.GeoModel.Align.Disable = True # Hack to avoid loading alignment when we want to create it from scratch ConfigFlags.addFlag("WriteAlignment.PoolFileName", ConfigFlags.GeoModel.FaserVersion + "_Align.pool.root") + +# Parse flags from command line and lock + ConfigFlags.addFlag("AlignDbTool.AlignmentConstants", {}) + ConfigFlags.fillFromArgs(sys.argv[1:]) ConfigFlags.lock() # Configure components @@ -60,7 +69,7 @@ if __name__ == "__main__": acc = MainServicesCfg(ConfigFlags) # Set things up to create a conditions DB with neutral Tracker alignment transforms - acc.merge(WriteAlignmentCfg(ConfigFlags, ValidRunStart=1, ValidEvtStart=0, ValidRunEnd=9999999, ValidEvtEnd=9999999, CondTag=ConfigFlags.GeoModel.FaserVersion.replace("FASER", "TRACKER-ALIGN"))) + acc.merge(WriteAlignmentCfg(ConfigFlags, alignmentConstants=ConfigFlags.AlignDbTool.AlignmentConstants, ValidRunStart=1, ValidEvtStart=0, ValidRunEnd=9999999, ValidEvtEnd=9999999, CondTag=ConfigFlags.GeoModel.FaserVersion.replace("FASER", "TRACKER-ALIGN"), )) # Configure verbosity # ConfigFlags.dump() @@ -71,3 +80,4 @@ if __name__ == "__main__": # Execute and finish sys.exit(int(acc.run(maxEvents=1).isFailure())) + diff --git a/Control/CalypsoExample/WriteAlignment/python/WriteAlignmentConfig_Faser02.py b/Control/CalypsoExample/WriteAlignment/python/WriteAlignmentConfig_Faser02.py index b05c15632415e873e301a08d9f63332175ebfe20..2e944e3fc88b76d6794a7df7b9ce4469fc763d51 100644 --- a/Control/CalypsoExample/WriteAlignment/python/WriteAlignmentConfig_Faser02.py +++ b/Control/CalypsoExample/WriteAlignment/python/WriteAlignmentConfig_Faser02.py @@ -5,7 +5,7 @@ import sys from AthenaCommon.Constants import VERBOSE, INFO from AthenaConfiguration.ComponentFactory import CompFactory -def WriteAlignmentCfg(flags, name="WriteAlignmentAlg", **kwargs): +def WriteAlignmentCfg(flags, name="WriteAlignmentAlg", alignmentConstants={}, **kwargs): # Initialize GeoModel from FaserGeoModel.FaserGeoModelConfig import FaserGeometryCfg @@ -32,7 +32,12 @@ def WriteAlignmentCfg(flags, name="WriteAlignmentAlg", **kwargs): PoolContainerPrefix="ConditionsContainer", TopLevelContainerName = "<type>", SubLevelBranchName= "<key>" ) - kwargs.setdefault("AlignDbTool", CompFactory.TrackerAlignDBTool("AlignDbTool", OutputTool = outputTool, OutputLevel=VERBOSE)) + + trackerAlignDBTool = CompFactory.TrackerAlignDBTool("AlignDbTool", OutputTool = outputTool, + OutputLevel=VERBOSE, + AlignmentConstants = {}) + kwargs.setdefault("AlignDbTool", trackerAlignDBTool) + trackerAlignDBTool.AlignmentConstants = alignmentConstants a.addEventAlgo(WriteAlignmentAlg(name, **kwargs)) return a @@ -53,6 +58,10 @@ if __name__ == "__main__": ConfigFlags.IOVDb.DBConnection = "sqlite://;schema=" + ConfigFlags.GeoModel.FaserVersion + "_ALLP200.db;dbname=OFLP200" ConfigFlags.GeoModel.Align.Disable = True # Hack to avoid loading alignment when we want to create it from scratch ConfigFlags.addFlag("WriteAlignment.PoolFileName", ConfigFlags.GeoModel.FaserVersion + "_Align.pool.root") + +# Parse flags from command line and lock + ConfigFlags.addFlag("AlignDbTool.AlignmentConstants", {}) + ConfigFlags.fillFromArgs(sys.argv[1:]) ConfigFlags.lock() # Configure components @@ -60,7 +69,7 @@ if __name__ == "__main__": acc = MainServicesCfg(ConfigFlags) # Set things up to create a conditions DB with neutral Tracker alignment transforms - acc.merge(WriteAlignmentCfg(ConfigFlags, ValidRunStart=1, ValidEvtStart=0, ValidRunEnd=9999999, ValidEvtEnd=9999999, CondTag=ConfigFlags.GeoModel.FaserVersion.replace("FASER", "TRACKER-ALIGN"))) + acc.merge(WriteAlignmentCfg(ConfigFlags, alignmentConstants=ConfigFlags.AlignDbTool.AlignmentConstants, ValidRunStart=1, ValidEvtStart=0, ValidRunEnd=9999999, ValidEvtEnd=9999999, CondTag=ConfigFlags.GeoModel.FaserVersion.replace("FASER", "TRACKER-ALIGN"), )) # Configure verbosity # ConfigFlags.dump() diff --git a/Control/CalypsoExample/WriteAlignment/python/WriteAlignmentConfig_FaserTB00.py b/Control/CalypsoExample/WriteAlignment/python/WriteAlignmentConfig_FaserTB00.py index d57092f2dd8b8c1dcb3bd6568fdb3dac636d3cf2..84fb1b02e855907c7c525c64498b7d4d8b015f18 100644 --- a/Control/CalypsoExample/WriteAlignment/python/WriteAlignmentConfig_FaserTB00.py +++ b/Control/CalypsoExample/WriteAlignment/python/WriteAlignmentConfig_FaserTB00.py @@ -5,7 +5,7 @@ import sys from AthenaCommon.Constants import VERBOSE, INFO from AthenaConfiguration.ComponentFactory import CompFactory -def WriteAlignmentCfg(flags, name="WriteAlignmentAlg", **kwargs): +def WriteAlignmentCfg(flags, name="WriteAlignmentAlg", alignmentConstants={}, **kwargs): # Initialize GeoModel from FaserGeoModel.FaserGeoModelConfig import FaserGeometryCfg @@ -32,7 +32,12 @@ def WriteAlignmentCfg(flags, name="WriteAlignmentAlg", **kwargs): PoolContainerPrefix="ConditionsContainer", TopLevelContainerName = "<type>", SubLevelBranchName= "<key>" ) - kwargs.setdefault("AlignDbTool", CompFactory.TrackerAlignDBTool("AlignDbTool", OutputTool = outputTool, OutputLevel=VERBOSE)) + + trackerAlignDBTool = CompFactory.TrackerAlignDBTool("AlignDbTool", OutputTool = outputTool, + OutputLevel=VERBOSE, + AlignmentConstants = {}) + kwargs.setdefault("AlignDbTool", trackerAlignDBTool) + trackerAlignDBTool.AlignmentConstants = alignmentConstants a.addEventAlgo(WriteAlignmentAlg(name, **kwargs)) return a @@ -53,6 +58,10 @@ if __name__ == "__main__": ConfigFlags.IOVDb.DBConnection = "sqlite://;schema=" + ConfigFlags.GeoModel.FaserVersion + "_ALLP200.db;dbname=OFLP200" ConfigFlags.GeoModel.Align.Disable = True # Hack to avoid loading alignment when we want to create it from scratch ConfigFlags.addFlag("WriteAlignment.PoolFileName", ConfigFlags.GeoModel.FaserVersion + "_Align.pool.root") + +# Parse flags from command line and lock + ConfigFlags.addFlag("AlignDbTool.AlignmentConstants", {}) + ConfigFlags.fillFromArgs(sys.argv[1:]) ConfigFlags.lock() # Configure components @@ -60,7 +69,7 @@ if __name__ == "__main__": acc = MainServicesCfg(ConfigFlags) # Set things up to create a conditions DB with neutral Tracker alignment transforms - acc.merge(WriteAlignmentCfg(ConfigFlags, ValidRunStart=1, ValidEvtStart=0, ValidRunEnd=9999999, ValidEvtEnd=9999999, CondTag=ConfigFlags.GeoModel.FaserVersion.replace("FASER", "TRACKER-ALIGN"))) + acc.merge(WriteAlignmentCfg(ConfigFlags, alignmentConstants=ConfigFlags.AlignDbTool.AlignmentConstants, ValidRunStart=1, ValidEvtStart=0, ValidRunEnd=9999999, ValidEvtEnd=9999999, CondTag=ConfigFlags.GeoModel.FaserVersion.replace("FASER", "TRACKER-ALIGN"), )) # Configure verbosity # ConfigFlags.dump() diff --git a/Control/CalypsoExample/rawdata/Faser-Physics-006525-filtered.raw b/Control/CalypsoExample/rawdata/Faser-Physics-006525-filtered.raw new file mode 100644 index 0000000000000000000000000000000000000000..e7e7da6e539a0f05a064c58c1b58d7c6a1081e7e Binary files /dev/null and b/Control/CalypsoExample/rawdata/Faser-Physics-006525-filtered.raw differ diff --git a/Control/CalypsoExample/rawdata/Faser-Physics-pilot_tracks-filtered.raw b/Control/CalypsoExample/rawdata/Faser-Physics-pilot_tracks-filtered.raw new file mode 100644 index 0000000000000000000000000000000000000000..3079490b5b1d0110887cb2a7de7591e0bf2b0cc2 Binary files /dev/null and b/Control/CalypsoExample/rawdata/Faser-Physics-pilot_tracks-filtered.raw differ diff --git a/Database/ConnectionManagement/FaserAuthentication/data/dblookup.xml b/Database/ConnectionManagement/FaserAuthentication/data/dblookup.xml index 52542a2837125d540824617b1085a5d710c3507f..9cebc2e82b233d6191d7671cf6d9d6997cad23f9 100644 --- a/Database/ConnectionManagement/FaserAuthentication/data/dblookup.xml +++ b/Database/ConnectionManagement/FaserAuthentication/data/dblookup.xml @@ -26,4 +26,9 @@ <service name="sqlite_file:///cvmfs/faser.cern.ch/repo/sw/database/DBRelease/current/sqlite200/CABP200.db" accessMode="read" /> </logicalservice> +<logicalservice name="COOLOFL_TRIGGER"> + <service name="sqlite_file:data/sqlite200/waveform_reco.db" accessMode="read" /> + <service name="sqlite_file:///cvmfs/faser.cern.ch/repo/sw/database/DBRelease/current/sqlite200/waveform_reco.db" accessMode="read" /> +</logicalservice> + </servicelist> diff --git a/DetectorDescription/DetDescrCnvSvc/src/DetDescrCnvSvc.cxx b/DetectorDescription/DetDescrCnvSvc/src/DetDescrCnvSvc.cxx index 50884e7221086392e645855bd885e6f19ee821e0..cc2fca779acd953d177963424389477baa30c856 100644 --- a/DetectorDescription/DetDescrCnvSvc/src/DetDescrCnvSvc.cxx +++ b/DetectorDescription/DetDescrCnvSvc/src/DetDescrCnvSvc.cxx @@ -125,6 +125,8 @@ DetDescrCnvSvc::initialize() { if (status != StatusCode::SUCCESS) return status; status = addToDetStore(55179317, "PreshowerID"); if (status != StatusCode::SUCCESS) return status; + status = addToDetStore(247779284, "VetoNuID"); + if (status != StatusCode::SUCCESS) return status; status = addToDetStore(205618430, "FaserSCT_ID"); if (status != StatusCode::SUCCESS) return status; status = addToDetStore(113753346, "EcalID"); diff --git a/DetectorDescription/FaserDetDescr/FaserDetDescr/FaserDetTechnology.h b/DetectorDescription/FaserDetDescr/FaserDetDescr/FaserDetTechnology.h index 6c0a2b2919f4ef4bc8f16b4519bdd82dbc45bade..0ee1ecc5d64e53a1d5cd0e04ad969421f8344c59 100644 --- a/DetectorDescription/FaserDetDescr/FaserDetDescr/FaserDetTechnology.h +++ b/DetectorDescription/FaserDetDescr/FaserDetDescr/FaserDetTechnology.h @@ -35,20 +35,21 @@ namespace FaserDetDescr { fLastFaserNeutrinoTechnology = 1, // Scintillator fFirstFaserScintillatorTechnology = 2, - fFaserVeto = 2, - fFaserTrigger = 3, - fFaserPreshower = 4, - fLastFaserScintillatorTechnology = 4, + fFaserVetoNu = 2, + fFaserVeto = 3, + fFaserTrigger = 4, + fFaserPreshower = 5, + fLastFaserScintillatorTechnology = 5, // Tracker - fFirstFaserTrackerTechnology = 5, - fFaserSCT = 5, - fLastFaserTrackerTechnology = 5, + fFirstFaserTrackerTechnology = 6, + fFaserSCT = 6, + fLastFaserTrackerTechnology = 6, // Calorimeter - fFirstFaserCalorimeterTechnology = 6, - fFaserECAL = 6, - fLastFaserCalorimeterTechnology = 6, + fFirstFaserCalorimeterTechnology = 7, + fFaserECAL = 7, + fLastFaserCalorimeterTechnology = 7, // number of defined detector technologies - fNumFaserDetTechnologies = 7 + fNumFaserDetTechnologies = 8 }; } // end of namespace diff --git a/DetectorDescription/FaserDetDescr/FaserDetDescr/FaserDetectorID.h b/DetectorDescription/FaserDetDescr/FaserDetDescr/FaserDetectorID.h index 3de4b6428fa65ae333a6bebe92aef8334f78a7b8..3ac1160a9e0c12935f522b0750b09a74fe4d4983 100644 --- a/DetectorDescription/FaserDetDescr/FaserDetDescr/FaserDetectorID.h +++ b/DetectorDescription/FaserDetDescr/FaserDetDescr/FaserDetectorID.h @@ -82,6 +82,7 @@ public: /// @name Scintillator subsystem ids //@{ Identifier veto (void) const; + Identifier vetonu (void) const; Identifier trigger (void) const; Identifier preshower (void) const; //@} @@ -157,6 +158,7 @@ public: bool is_calo (Identifier id) const; bool is_emulsion (Identifier id) const; bool is_veto (Identifier id) const; + bool is_vetonu (Identifier id) const; bool is_trigger (Identifier id) const; bool is_preshower (Identifier id) const; bool is_sct (Identifier id) const; @@ -172,6 +174,7 @@ public: bool is_calo (const ExpandedIdentifier& id) const; bool is_emulsion (const ExpandedIdentifier& id) const; bool is_veto (const ExpandedIdentifier& id) const; + bool is_vetonu (const ExpandedIdentifier& id) const; bool is_trigger (const ExpandedIdentifier& id) const; bool is_preshower (const ExpandedIdentifier& id) const; bool is_sct (const ExpandedIdentifier& id) const; @@ -230,6 +233,7 @@ protected: /// Scintillator: ExpandedIdentifier veto_exp (void) const; + ExpandedIdentifier vetonu_exp (void) const; ExpandedIdentifier trigger_exp (void) const; ExpandedIdentifier preshower_exp (void) const; @@ -247,6 +251,7 @@ protected: int calo_field_value () const; int emulsion_field_value () const; int veto_field_value () const; + int vetonu_field_value () const; int trigger_field_value () const; int preshower_field_value () const; int sct_field_value () const; @@ -306,6 +311,7 @@ private: int m_CALO_ID; int m_EMULSION_ID; int m_VETO_ID; + int m_VETONU_ID; int m_TRIGGER_ID; int m_PRESHOWER_ID; int m_SCT_ID; @@ -382,6 +388,13 @@ FaserDetectorID::veto_exp (void) const return (result << m_VETO_ID); } +inline ExpandedIdentifier +FaserDetectorID::vetonu_exp (void) const +{ + ExpandedIdentifier result(scint_exp()); + return (result << m_VETONU_ID); +} + inline ExpandedIdentifier FaserDetectorID::trigger_exp (void) const { @@ -428,6 +441,9 @@ FaserDetectorID::emulsion_field_value () const {return (m_EMULSION_ID);} inline int FaserDetectorID::veto_field_value () const {return (m_VETO_ID);} +inline int +FaserDetectorID::vetonu_field_value () const {return (m_VETONU_ID);} + inline int FaserDetectorID::trigger_field_value () const {return (m_TRIGGER_ID);} @@ -484,6 +500,16 @@ FaserDetectorID::is_veto (Identifier id) const return result; } +inline bool +FaserDetectorID::is_vetonu (Identifier id) const +{ + bool result = false; + if(is_scint(id)) { + result = (m_scint_part_impl.unpack(id) == m_VETONU_ID); + } + return result; +} + inline bool FaserDetectorID::is_trigger (Identifier id) const { diff --git a/DetectorDescription/FaserDetDescr/src/FaserDetectorID.cxx b/DetectorDescription/FaserDetDescr/src/FaserDetectorID.cxx index 9e8dfbb0dcf18ea616c4e35725b28cbb27e8151a..9653eb67aad080822c7354fbc2229a64693f0b1c 100644 --- a/DetectorDescription/FaserDetDescr/src/FaserDetectorID.cxx +++ b/DetectorDescription/FaserDetDescr/src/FaserDetectorID.cxx @@ -44,6 +44,7 @@ FaserDetectorID::FaserDetectorID() m_VETO_ID(1), m_TRIGGER_ID(2), m_PRESHOWER_ID(3), + m_VETONU_ID(4), m_SCT_ID(1), m_ECAL_ID(1), m_isSLHC(false), @@ -79,6 +80,7 @@ FaserDetectorID::FaserDetectorID(const FaserDetectorID& other) m_VETO_ID (other.m_VETO_ID), m_TRIGGER_ID (other.m_TRIGGER_ID), m_PRESHOWER_ID (other.m_PRESHOWER_ID), + m_VETONU_ID (other.m_VETONU_ID), m_SCT_ID (other.m_SCT_ID), m_ECAL_ID (other.m_ECAL_ID), m_isSLHC (other.m_isSLHC), @@ -122,6 +124,7 @@ FaserDetectorID::operator= (const FaserDetectorID& other) m_VETO_ID = other.m_VETO_ID; m_TRIGGER_ID = other.m_TRIGGER_ID; m_PRESHOWER_ID = other.m_PRESHOWER_ID; + m_VETONU_ID = other.m_VETONU_ID; m_SCT_ID = other.m_SCT_ID; m_ECAL_ID = other.m_ECAL_ID; m_faser_dict = other.m_faser_dict; @@ -204,6 +207,16 @@ FaserDetectorID::veto (void) const return (result); } +Identifier +FaserDetectorID::vetonu (void) const +{ + Identifier result((Identifier::value_type)0); + // Pack field + m_det_impl.pack (scint_field_value(), result); + m_scint_part_impl.pack(m_VETONU_ID, result); + return (result); +} + Identifier FaserDetectorID::trigger (void) const { @@ -444,6 +457,16 @@ FaserDetectorID::is_veto (const ExpandedIdentifier& id) const return result; } +bool +FaserDetectorID::is_vetonu (const ExpandedIdentifier& id) const +{ + bool result = false; + if ( is_scint(id) && id.fields() > 1 ){ + if ( id[1] == m_VETONU_ID ) result = true; + } + return result; +} + bool FaserDetectorID::is_trigger (const ExpandedIdentifier& id) const { @@ -729,6 +752,7 @@ FaserDetectorID::initLevelsFromDict(const IdDictMgr& dict_mgr) m_VETO_ID = -1; m_TRIGGER_ID = -1; m_PRESHOWER_ID = -1; + m_VETONU_ID = -1; m_SCT_ID = -1; m_ECAL_ID = -1; @@ -935,6 +959,38 @@ FaserDetectorID::initLevelsFromDict(const IdDictMgr& dict_mgr) return (1); } + label = field->find_label("VetoNu"); + if (label) { + if (label->m_valued) { + m_VETONU_ID = label->m_value; + } + else { + if(m_msgSvc) { + MsgStream log(m_msgSvc, "FaserDetectorID" ); + log << MSG::ERROR << "initLevelsFromDict - label VetoNu does NOT have a value " + << endmsg; + } + else { + std::cout << "FaserDetectorID::initLevelsFromDict - label VetoNu does NOT have a value " + << std::endl; + } + return (1); + } + } + else { + if(m_msgSvc) { + MsgStream log(m_msgSvc, "FaserDetectorID" ); + log << MSG::ERROR << "initLevelsFromDict - unable to find 'VetoNu' label " + << endmsg; + } + else { + std::cout << "FaserDetectorID::initLevelsFromDict - unable to find 'VetoNu' label " + << std::endl; + } + return (1); + } + + } // Initialize ids for Tracker diff --git a/Event/FaserByteStreamCnvSvcBase/python/FaserByteStreamCnvSvcBaseConfig.py b/Event/FaserByteStreamCnvSvcBase/python/FaserByteStreamCnvSvcBaseConfig.py index 3a50a75d55c0c2a1e06d6edfa6d900722d5a83c1..3a87cc4c75388ff53954f9a75be77b9c5d1dc9cd 100644 --- a/Event/FaserByteStreamCnvSvcBase/python/FaserByteStreamCnvSvcBaseConfig.py +++ b/Event/FaserByteStreamCnvSvcBase/python/FaserByteStreamCnvSvcBaseConfig.py @@ -15,6 +15,7 @@ def FaserByteStreamCnvSvcBaseCfg(flags, **kwargs): adxProvider.TypeNames += [ "RawWaveformContainer/CaloWaveforms", "RawWaveformContainer/VetoWaveforms", + "RawWaveformContainer/VetoNuWaveforms", "RawWaveformContainer/TriggerWaveforms", "RawWaveformContainer/PreshowerWaveforms", "RawWaveformContainer/ClockWaveforms", diff --git a/Generators/DIFGenerator/python/DIFSampler.py b/Generators/DIFGenerator/python/DIFSampler.py index 8ee4bd18b8cfedf9654060ba77745b43adcf3d85..8ec22459aba93a430093c4bd7b2a3f13e2d0f206 100644 --- a/Generators/DIFGenerator/python/DIFSampler.py +++ b/Generators/DIFGenerator/python/DIFSampler.py @@ -115,7 +115,7 @@ class DIFSampler(PG.ParticleSampler): return sqrt(m0**4 - (2*m0**2 * m1**2) + (m1**4) - (2*m0**2 * m2**2) - (2*m1**2 *m2**2) + (m2**4)) / (2*m0) def lorentz_transformation(self): - self.mother_mom = self.mother.mom.shoot() + #self.mother_mom = self.mother.mom.shoot() Bx = self.mother_mom.Px() / self.mother_mom.E() By = self.mother_mom.Py() / self.mother_mom.E() Bz = self.mother_mom.Pz() / self.mother_mom.E() @@ -136,6 +136,11 @@ class DIFSampler(PG.ParticleSampler): ## The magnitude of the momentum will be equal and opposite self.mother = self.mother_sampler + self.mother.mass_override = False + mother_part = self.mother.shoot()[0] + self.mother_mom = mother_part.mom + self.mother_pos = mother_part.pos + p = self.calculate_decay_p(self.mother.mom.mass(),self.daughter1.mass,self.daughter2.mass) self.daughter1.E = sqrt(self.daughter1.mass**2 + p**2) @@ -416,4 +421,4 @@ if __name__ == "__main__": DIFS = DIFSampler() DIFS.mother_sampler = SP() - print("DIFSampler: All unit tests passed") \ No newline at end of file + print("DIFSampler: All unit tests passed") diff --git a/Generators/FaserParticleGun/python/FaserParticleGunConfig.py b/Generators/FaserParticleGun/python/FaserParticleGunConfig.py index dfc4a23fb77071075769c955d139339a8f902f51..ac30494a4815f5bb06a5d67408b4c0551297a63b 100644 --- a/Generators/FaserParticleGun/python/FaserParticleGunConfig.py +++ b/Generators/FaserParticleGun/python/FaserParticleGunConfig.py @@ -44,10 +44,18 @@ def FaserParticleGunSingleParticleCfg(ConfigFlags, **kwargs) : theta = kwargs.setdefault("theta", [0, pi/20]), phi = kwargs.setdefault("phi", [0, 2*pi]), mass = kwargs.setdefault("mass", 0.0) ) - pg.sampler.pos = PG.PosSampler(x = kwargs.setdefault("x", [-5, 5]), - y = kwargs.setdefault("y", [-5, 5]), - z = kwargs.setdefault("z", -3750.0), - t = kwargs.setdefault("t", 0.0) ) + + if "radius" in kwargs: + pg.sampler.pos = RadialPosSampler(x = kwargs.setdefault("x", 0.0), + y = kwargs.setdefault("y", 0.0), + z = kwargs.setdefault("z", -3750.0), + r = kwargs.setdefault("radius", 1.0), + t = kwargs.setdefault("t", 0.0) ) + else: + pg.sampler.pos = PG.PosSampler(x = kwargs.setdefault("x", [-5, 5]), + y = kwargs.setdefault("y", [-5, 5]), + z = kwargs.setdefault("z", -3750.0), + t = kwargs.setdefault("t", 0.0) ) return cfg @@ -140,6 +148,47 @@ def FaserParticleGunDecayInFlightCfg(ConfigFlags, **kwargs) : return cfg + +def FaserParticleGunForeseeCfg(ConfigFlags, **kwargs) : + # Supported keyword arguments: + # model_path (detault: $PWD) + # model_name (default: DarkPhoton) + # mother_mass (default: 0.01 GeV) + # com_energy (default: 14 TeV) + # daughter1_pid (default: 11) + # daughter2_pid (default: -11) + # mother_pid (default: none) + # mother_pos (default: CylinderSampler([0, 100**2],[0, 2*pi],[-1500, 0],0)) + # + # Note that ALL of these can be samplers themselves - either the simple, "literal" variety or a sampler object configured by the caller + # + + cfg = FaserParticleGunCommonCfg(ConfigFlags, **kwargs) + + pg = cfg.getPrimary() + + from ForeseeGenerator.ForeseeSampler import ForeseeNumpySampler + mother_part = ForeseeNumpySampler( + model_path = kwargs.get("model_path", "."), + model_name = kwargs.get("model_name", "DarkPhoton"), + com_energy = kwargs.get("com_energy", "14"), + mother_mass = kwargs.get("mother_mass", 0.01), + mother_pid = kwargs.get("mother_pid", None), + daughter1_pid = kwargs.get("daughter1_pid", 11), + daughter2_pid = kwargs.get("daughter2_pid", -11), + randomSeed = kwargs.get("randomSeed", None) + ) + + from DIFGenerator import DIFSampler + pg.sampler = DIFSampler( + daughter1_pid = kwargs.get("daughter1_pid", 11), + daughter2_pid = kwargs.get("daughter2_pid", -11), + ) + + pg.sampler.mother_sampler = mother_part + + return cfg + def FaserParticleGunCfg(ConfigFlags) : # generator = ConfigFlags.Sim.Gun.setdefault("Generator", "SingleParticle") generator = ConfigFlags.Sim.Gun.setdefault("Generator", "DecayInFlight") @@ -150,5 +199,7 @@ def FaserParticleGunCfg(ConfigFlags) : return FaserParticleGunCosmicsCfg(ConfigFlags, **kwargs) elif generator == "DecayInFlight" : return FaserParticleGunDecayInFlightCfg(ConfigFlags, **kwargs) + elif generator == "Foresee" : + return FaserParticleGunForeseeCfg(ConfigFlags, **kwargs) else : return FaserParticleGunSingleParticleCfg(ConfigFlags, **kwargs ) diff --git a/Generators/FaserParticleGun/python/RadialPosSampler.py b/Generators/FaserParticleGun/python/RadialPosSampler.py index 4a14987a142697816b22cbec202f56c8bf3183b5..a0ae101c5f83e0bd680480a0f4f179c9202aee51 100644 --- a/Generators/FaserParticleGun/python/RadialPosSampler.py +++ b/Generators/FaserParticleGun/python/RadialPosSampler.py @@ -37,8 +37,10 @@ class RadialPosSampler(Sampler): fwhm = 2*self.radius sig = fwhm/(2 * sqrt(2 * log(2))) - # return random.uniform(0, self.radius) - return random.gauss(0, self.radius) + if self.radius < 0: + return random.uniform(0, abs(self.radius)) + else: + return random.gauss(0, self.radius) # return random.gauss(0, sig) @property diff --git a/Generators/FlukaReader/CMakeLists.txt b/Generators/FlukaReader/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..eebebd79e2b466203bb35ba1aa3d3d265d39920d --- /dev/null +++ b/Generators/FlukaReader/CMakeLists.txt @@ -0,0 +1,11 @@ +################################################################################ +# Package: FlukaReader +################################################################################ + +# Declare the package name: +atlas_subdir( FlukaReader ) + +# Install files from the package: +atlas_install_python_modules( python/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} ) + +#atlas_install_joboptions( share/*.py ) diff --git a/Generators/FlukaReader/python/FlukaReaderAlg.py b/Generators/FlukaReader/python/FlukaReaderAlg.py new file mode 100644 index 0000000000000000000000000000000000000000..3a8728e37229892c56f701f7b3ae5985823c8d76 --- /dev/null +++ b/Generators/FlukaReader/python/FlukaReaderAlg.py @@ -0,0 +1,456 @@ +from AthenaCommon.AppMgr import ServiceMgr as svcMgr +from GeneratorModules.EvgenAlg import EvgenAlg +from AthenaPython.PyAthena import StatusCode, EventInfo, EventID, EventType +from AthenaCommon.SystemOfUnits import GeV, MeV, cm +from AthenaCommon.Constants import DEBUG + +from FaserCosmicGenerator import Range + +import ROOT + +import numpy as np +import math + +# TODO: correct angle for beam crossing angle: both in angel itself and position + +class FlukaReader(EvgenAlg): + def __init__(self, name="FlukaReader", MCEventKey="BeamTruthEvent", file_name = "", dist = 0, z = -1.5, randomSeed = None, nsamples = 1, test = False): + super(FlukaReader,self).__init__(name=name) + self.McEventKey = MCEventKey + self.file_name = file_name + self.dist = dist * 100 # cm + self.z = z * 1000 # mm + self.isample = 0 + self.nsamples = nsamples + self.test = test + + self.columns = ["run", "event", "type", "gen", "E", "w", "x", "y", "cosX", "cosY", "age", "_"] + + if randomSeed is not None: + self.msg.info(f"Setting seed to {randomSeed}") + self.rng = np.random.default_rng(randomSeed) + else: + self.rng = np.random.default_rng() + + self.file = open(self.file_name) + + if self.test: + self.before = dict(zip(self.columns, [[] for i in range(len(self.columns))])) + self.after = dict(zip(self.columns, [[] for i in range(len(self.columns))])) + + + return + + def genFinalize(self): + + if self.test: + self.plot() + + self.file.close() + return StatusCode.Success + + + def fillEvent(self, evt): + "This is called for every real event * the number of samplings" + + # If the sample gets to the number requested, then reset to 0 + if self.isample == self.nsamples: + self.msg.debug("Reseting samples") + self.isample = 0 + + # Only if the sample is 0 load the new fluka entry + if self.isample == 0: + self.msg.debug("Loading new fluka event") + try: + l = next(self.file) + except StopIteration: + return StatusCode.Success + + entry = dict(zip(self.columns, l.strip("\n").split())) + for i,c in enumerate(self.columns): + if i < 4: + entry[c] = int(entry[c]) + else: + entry[c] = float(entry[c]) + + self.entry = entry + + # Call for each sample of each event + self.msg.debug(f"Processing sample {self.isample}") + self.process(self.entry, evt) + self.isample += 1 + + return StatusCode.Success + + def angle(self, cosTheta): + "Convert cos(theta) wrt x or y axis to theta wrt z axis" + return np.pi/2 - np.arccos(cosTheta) + + def pid(self, ftype): + "Convert fluka particle type to PID" + if ftype == 10: # mu+ + return -13 + elif ftype == 11: # mu- + return 13 + else: + return 0 + + def path_length(self, z, cosThetaX, cosThetaY): + "Get path length traversed in the material, taking into account incident angles" + + # Convert theta wrt x and y axis to wrt z axis + thetaX = self.angle(cosThetaX) + thetaY = self.angle(cosThetaY) + + # Correct z for angle + zcorr = z / np.abs(np.cos(thetaX)) / np.abs(np.cos(thetaY)) + + return zcorr + + def energy_after_loss_exact(self, e, zcorr): + "Calculate exact energy after loss in material" + return Range.muPropagate(e, zcorr/100.) # meters + + def energy_after_loss(self, e, cosThetaX, cosThetaY, zcorr, a = 2e-3, b = 4e-6): + "Calculate approximate energy after loss in material" + + # Based on + # http://www.bartol.udel.edu/~stanev/lectures/apr17.pdf + # and PDG chapter 27 fig 27.1 + # https://www.google.co.uk/url?sa=t&rct=j&q=&esrc=s&source=web&cd=&cad=rja&uact=8&ved=2ahUKEwiG9YjNtvr2AhUbiVwKHdQfD9sQFnoECAsQAQ&url=https%3A%2F%2Fpdg.lbl.gov%2F2005%2Freviews%2Fpassagerpp.pdf&usg=AOvVaw1HGA5PZtC2UiqA6B7_C5dz + + eps = a/b + return (e + eps) * np.exp(-b * zcorr) - eps + + def mean_scattering_angle(self, e, cosThetaX, cosThetaY, zcorr, X0 = 10.02, m = 105.66e-3, charge = 1, beta = 1): + "Calculate mean scattering angle over many scatters for given energy and length z" + + # Based on PDG chapter 27 eqns 27.10, 27.16, 27.17 + # https://www.google.co.uk/url?sa=t&rct=j&q=&esrc=s&source=web&cd=&cad=rja&uact=8&ved=2ahUKEwiG9YjNtvr2AhUbiVwKHdQfD9sQFnoECAsQAQ&url=https%3A%2F%2Fpdg.lbl.gov%2F2005%2Freviews%2Fpassagerpp.pdf&usg=AOvVaw1HGA5PZtC2UiqA6B7_C5dz + # and + # https://pdg.lbl.gov/2014/AtomicNuclearProperties/HTML/standard_rock.html + + # Convert E to momentum [GeV] + p = np.sqrt(e**2 - m**2) + + # Mean angle [GeV and cm] + c = 1 # n.u + theta0 = 13.6e-3 / (p * c * beta) * charge * np.sqrt(zcorr/X0) * (1 + 0.38 * math.log(zcorr, X0)) + return theta0 + + def scattering_angle(self, cosTheta, theta0, rand1): + "Calculate actual scattering angle over many scatters for given start angle and mean angle" + + # Convert theta wrt x or y axis to wrt z axis + theta = self.angle(cosTheta) + + # Add random scattering angle + return theta + rand1 * theta0 + + def scattering_postition(self, x, cosThetaX, cosThetaY, zcorr, theta0, rand1, rand2): + "Calculate transverse scattering position over many scatters for given start angle and mean angle + length z" + + # Convert theta wrt x to wrt z axis + thetaX = self.angle(cosThetaX) + + xout = np.copy(x) + if xout.ndim == 0: + xout = float(xout) + + # Add displacement due to initial angle + #xang = z * np.tan(thetaX) + xang = zcorr * np.sin(thetaX) + xout += xang + + # Add displacement due to multiple scattering + dx = rand1 * zcorr * theta0 / np.sqrt(12) + rand2 * zcorr * theta0/2 + xout += dx + + return xout + + def propagate(self, entry): + "Propagate the particle through a given distance of standard rock using the small-angle approxiumation" + + if self.dist == 0: + return entry + + # Random numbers + rand1 = self.rng.normal(0, 1) + rand2 = self.rng.normal(0, 1) + + # Get entry info + e = entry["E"] + x = entry["x"] + y = entry["y"] + cosX = entry["cosX"] + cosY = entry["cosY"] + + # Correct path length for angles + z = self.path_length(self.dist, cosX, cosY) + + # Account for energy loss + #eout = self.energy_after_loss(e, cosX, cosY, z) + eout = self.energy_after_loss_exact(e, z) + + # Account for scattering on angle and position + theta0 = self.mean_scattering_angle(e, cosX, cosY, z) + thetaXout = self.scattering_angle(cosX, theta0, rand1) + thetaYout = self.scattering_angle(cosY, theta0, rand1) + xout = self.scattering_postition(x, cosX, cosY, z, theta0, rand1, rand2) + yout = self.scattering_postition(y, cosY, cosX, z, theta0, rand1, rand2) + + # Update entry info using copy for cases when resample so don't change the original + newentry = entry.copy() + newentry["E"] = eout + newentry["x"] = xout + newentry["y"] = yout + newentry["cosX"] = np.cos(np.pi/2 + thetaXout) + newentry["cosY"] = np.cos(np.pi/2 + thetaYout) + + return newentry + + + def process(self, entry, evt): + + if self.test: + for k,v in entry.items(): + self.before[k].append(float(v)) + + if self.msg.level > DEBUG: + print("Original Entry", entry) + + if self.dist != 0: + # Propoagate to FASER + newentry = self.propagate(entry) + elif self.nsamples != 1: + # Else smear if sampling more than once, using copy to avoid changing original + newentry = entry.copy() + newentry["E"] *= self.rng.normal(1, 0.05) + newentry["x"] *= self.rng.normal(1, 0.05) + newentry["y"] *= self.rng.normal(1, 0.05) + newentry["cosX"] = np.cos(np.arccos(entry["cosX"]) * self.rng.normal(1, 0.05)) + newentry["cosY"] = np.cos(np.arccos(entry["cosY"]) * self.rng.normal(1, 0.05)) + else: + # No propagation or smearing + newentry = entry + + if self.msg.level > DEBUG: + print("Propagated/Smeared Entry", newentry) + + + if self.test: + for k,v in newentry.items(): + self.after[k].append(float(v)) + + try: + from AthenaPython.PyAthena import HepMC3 as HepMC + except ImportError: + from AthenaPython.PyAthena import HepMC as HepMC + + # Add weight, correcting for mutliple sampling + evt.weights().push_back(newentry["w"] / self.nsamples) + + + # Setup MC event + mcEventType = EventType() + mcEventType.add_type(EventType.IS_SIMULATION) + + mcEventId = EventID(run_number = newentry["run"], event_number = newentry["event"]) + + mcEventInfo = EventInfo(id = mcEventId, type = mcEventType) + + self.evtStore.record(mcEventInfo, "McEventInfo", True, False) + + ROOT.SetOwnership(mcEventType, False) + ROOT.SetOwnership(mcEventId, False) + ROOT.SetOwnership(mcEventInfo, False) + + # Create HepMC Vertex + pos = HepMC.FourVector(newentry["x"] * cm, newentry["y"] * cm, self.z, 0) + gv = HepMC.GenVertex(pos) + + ROOT.SetOwnership(gv, False) + evt.add_vertex(gv) + + # TODO: skip event if below a certain energy + + # Create HepMC particle + gp = HepMC.GenParticle() + + m = 105.66 + e = newentry["E"] * 1000. #MeV + + # If the energy is less than mass then skip the event + if e < m: + self.setFilterPassed(False) + self.msg.debug("Event failed energy cut") + return False + else: + self.setFilterPassed(True) + + p = np.sqrt(e**2 - m**2) + + thetaX = self.angle(newentry["cosX"]) + thetaY = self.angle(newentry["cosY"]) + + # theta: just above z axis as phi deals with negative + theta = np.abs(thetaY) + # phi: 0 - 2pi + phi = np.arctan2(newentry["cosY"], newentry["cosX"]) + #phi = np.arctan(newentry["cosY"] / newentry["cosX"]) + if phi < 0: phi += 2*np.pi + if phi == 2*np.pi: phi = 0 + + #self.msg.debug(f"INPUT: {e}, {m}, {p}, {theta}, {phi}, {np.sin(theta)}, {np.cos(theta)}, {np.sin(phi)}, {np.cos(phi)}") + + px = p * np.sin(theta) * np.cos(phi) + py = p * np.sin(theta) * np.sin(phi) + pz = p * np.cos(theta) + + mom = HepMC.FourVector(px, py, pz, e) + + gp.set_momentum(mom) + gp.set_generated_mass(m) + gp.set_pdg_id(self.pid(newentry["type"])) + gp.set_status(1) + + #self.msg.debug(f"HEPMC:{px, py, pz, e}") + #gp.print() + + ROOT.SetOwnership(gp, False) + gv.add_particle_out(gp) + + return True + + def plot(self): + "Plot entries before and after propagation/smeating for tests" + + if not self.test: + return + + import matplotlib.pyplot as plt + + plt.figure() + ebins = np.linspace(0, 5000, 50) + plt.xlabel("Energy") + plt.hist(self.before["E"], bins=ebins, histtype='step', color = "g", fill = False, label = "before") + plt.hist(self.after["E"], bins = ebins, histtype='step', color = "r", fill = False, label = "after") + plt.gca().set_yscale('log') + plt.legend() + plt.savefig("energy.png") + + plt.figure() + plt.xlabel("Angle to beam in X dir") + thetaX = np.pi/2. - np.arccos(np.array(self.before["cosX"])) + thetaXout = np.pi/2. - np.arccos(np.array(self.after["cosX"])) + tbins = np.linspace(-0.5, 0.5, 100) + plt.hist(thetaX, bins = tbins, histtype='step', color = "g", fill = False, label = "before") + plt.hist(thetaXout, bins = tbins, histtype='step', color = "r", fill = False, label = "after") + plt.gca().set_yscale('log') + plt.legend() + plt.savefig("thetaX.png") + + plt.figure() + plt.xlabel("Angle to beam in Y dir") + thetaY = np.pi/2. - np.arccos(np.array(self.before["cosY"])) + thetaYout = np.pi/2. - np.arccos(np.array(self.after["cosY"])) + plt.hist(thetaY, bins = tbins, histtype='step', color = "g", fill = False, label = "before") + plt.hist(thetaYout, bins = tbins, histtype='step', color = "r", fill = False, label = "after") + plt.gca().set_yscale('log') + plt.legend() + plt.savefig("thetaY.png") + + plt.figure() + plt.xlabel("Dispacement in X dir") + xbins = np.linspace(-300, 300, 100) + plt.hist(self.before["x"], bins = xbins, histtype='step', color = "g", fill = False, label = "before") + plt.hist(self.after["x"], bins = xbins, histtype='step', color = "r", fill = False, label = "after") + plt.gca().set_yscale('log') + plt.legend() + plt.savefig("x.png") + + plt.figure() + plt.xlabel("Dispacement in Y dir") + plt.hist(self.before["y"], bins = xbins, histtype='step', color = "g", fill = False, label = "before") + plt.hist(self.after["y"], bins = xbins, histtype='step', color = "r", fill = False, label = "after") + plt.gca().set_yscale('log') + plt.legend() + plt.savefig("y.png") + + return + +def getNEvents(fname, maxEvents): + "Work out how many events are in the file" + + n = 0 + with open(fname) as f: + n = sum(1 for _ in f) + + if maxEvents != -1 and n > maxEvents: + n = maxEvents + + print(">>> Setting number of real events to", n) + + return n + +if __name__ == "__main__": + +# from AthenaCommon.AlgSequence import AlgSequence +# job = AlgSequence() +# job += FlukaReader(file_name = "/user/gwilliam/unit30_Nm", dist = (480-409)) +# +# from AthenaPoolCnvSvc.WriteAthenaPool import AthenaPoolOutputStream +# ostream = AthenaPoolOutputStream( "StreamEVGEN" , "evgen.pool.root", noTag=True ) +# ostream.ItemList.remove("EventInfo#*") +# ostream.ItemList += [ "EventInfo#McEventInfo", +# "McEventCollection#*" ] +# +# theApp.EvtMax = 1000 + + + import argparse, sys + parser = argparse.ArgumentParser(description="Run Fluka reader") + parser.add_argument("file", help = "Path to fluka file") + parser.add_argument("--dist", "-d", default = 0, type = float, help = "depth of standard rock to propagate through [m]") + parser.add_argument("--pos", "-z", default = -3.75, type = float, help = "Position in z in FASER coordinate system [m]") + parser.add_argument("--output", "-o", default = "evgen.pool.root", help = "Name of output file") + parser.add_argument("--mcEventKey", "-k", default = "BeamTruthEvent", help = "Name of MC collection") + parser.add_argument("--nevents", "-n", default = -1, type = int, help = "Number of events to process") + parser.add_argument("--randomSeed", "-r", default=12345, type=int, help = "Seed for random number generator") + parser.add_argument("--nsamples", "-s", default = 1, type = int, help = "Number of times to sample each event") + parser.add_argument("--test", "-t", action = "store_true", help = "Make test plots") + args = parser.parse_args() + + + from AthenaCommon.Logging import log + from AthenaCommon.Constants import DEBUG, INFO + + from AthenaCommon.Configurable import Configurable + Configurable.configurableRun3Behavior = 1 + + from CalypsoConfiguration.AllConfigFlags import ConfigFlags + ConfigFlags.Input.isMC = True + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-01" # Always needed; must match FaserVersion + ConfigFlags.GeoModel.FaserVersion = "FASER-01" # Default FASER geometry + ConfigFlags.Detector.EnableFaserSCT = True + ConfigFlags.Output.EVNTFileName = args.output + ConfigFlags.lock() + + from CalypsoConfiguration.MainServicesConfig import MainServicesCfg + cfg = MainServicesCfg(ConfigFlags) + + from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator + from AthenaConfiguration.ComponentFactory import CompFactory + + acc = ComponentAccumulator() + reader = FlukaReader("FlukaReader", MCEventKey=args.mcEventKey, file_name = args.file, dist = args.dist, z = args.pos, randomSeed = args.randomSeed, nsamples = args.nsamples, test = args.test) + reader.OutputLevel = INFO + acc.addEventAlgo(reader) + cfg.merge(acc) + + itemList = [ "EventInfo#McEventInfo", "McEventCollection#*" ] + from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg + cfg.merge(OutputStreamCfg(ConfigFlags, "EVNT", itemList, disableEventTag = True)) + cfg.getEventAlgo("OutputStreamEVNT").AcceptAlgs = ["FlukaReader"] + sc = cfg.run(maxEvents = getNEvents(args.file, args.nevents) * args.nsamples) + sys.exit(not sc.isSuccess()) diff --git a/Generators/ForeseeGenerator/CMakeLists.txt b/Generators/ForeseeGenerator/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..e2e5137b8c50f144c2cef2528f0b6be5c5548270 --- /dev/null +++ b/Generators/ForeseeGenerator/CMakeLists.txt @@ -0,0 +1,12 @@ +################################################################################ +# Package: ForeseeGenerator +################################################################################ + +# Declare the package name: +atlas_subdir( ForeseeGenerator ) + +# Install files from the package: +atlas_install_python_modules( python/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} ) + +atlas_install_joboptions( share/*.py ) + diff --git a/Generators/ForeseeGenerator/data/events_14TeV_m0.1GeV_c1e-05to_11_-11.npy b/Generators/ForeseeGenerator/data/events_14TeV_m0.1GeV_c1e-05to_11_-11.npy new file mode 100644 index 0000000000000000000000000000000000000000..36320c3f1c9cced47c9307b48529b444a2384be0 Binary files /dev/null and b/Generators/ForeseeGenerator/data/events_14TeV_m0.1GeV_c1e-05to_11_-11.npy differ diff --git a/Generators/ForeseeGenerator/python/ForeseeSampler.py b/Generators/ForeseeGenerator/python/ForeseeSampler.py new file mode 100644 index 0000000000000000000000000000000000000000..2f7d7241e560e552b8df3bcd9cb9b62e321f17ff --- /dev/null +++ b/Generators/ForeseeGenerator/python/ForeseeSampler.py @@ -0,0 +1,463 @@ +import os, sys +import random +import numpy as np + +import ParticleGun as PG + +from DIFGenerator.DIFSampler import CylinderSampler + +class ForeseeNumpySampler(PG.ParticleSampler): + """ Sample from the output of Foresee generation in numpy format with columns E, theta, weight""" + def __init__(self, model_path = ".", model_name = "DarkPhoton", com_energy = "14", mother_mass = 0.01, + coupling = None, mother_pid = None, daughter1_pid = 11, daughter2_pid = -11, randomSeed = None): + + self.path = os.path.expanduser(os.path.expandvars(model_path)) + self.modelname = model_name + self.energy = com_energy + self.mass = mother_mass + self.coupling = coupling + self.pid = mother_pid + self.daughter1_pid = daughter1_pid + self.daughter2_pid = daughter2_pid + self.n = 1 + self.distance = 480 # m + self.mass_override = False + + if randomSeed is not None: + print(f"Setting seed to {randomSeed}") + self.rng = np.random.default_rng(randomSeed) + else: + self.rng = np.random.default_rng() + + self.xs = 0 + + self.read() + + def read(self): + "Read data from numpy file in format E, theta, weight" + + if self.path.endswith(".npy"): + filename = self.path + elif self.coupling is None: + filename = f"{self.path}/files/models/{self.modelname}/events/events_{self.energy}TeV_m{self.mass}GeV_to_{self.daughter1_pid}_{self.daughter2_pid}.npy" + else: + filename = f"{self.path}/files/models/{self.modelname}/events/events_{self.energy}TeV_m{self.mass}GeV_c{self.coupling}to_{self.daughter1_pid}_{self.daughter2_pid}.npy" + + print(f"Reading data from file: {filename}") + self.data = np.load(filename) + + # Create probablity for each mode as weight / sum(weights) + self.prob = self.data[2]/np.sum(self.data[2]) + + return + +# def mass(self): +# "Mass converted from GeV to MeV" +# return self._mass * 1000 + + def shoot(self): + "Choose a random item from the data, based on the probability" + + energy, theta, weight = self.rng.choice(self.data, axis = 1, p = self.prob) + + self.xs += weight + + # Convert mass to MeV + mass = self.mass * 1000 + + # Sample phi + phi = self.rng.uniform(0, 2*np.pi) + + # Sample z + z = self.rng.uniform(-1500, 0) + + # Create mother momentum, randomly sampling phi + self.mom = PG.EThetaMPhiSampler(energy, theta, mass, phi) + + # Create mother pos, randomly sampling phi + r = (self.distance * 1000 + abs(z)) * np.tan(theta) + + self.pos = CylinderSampler(r**2, phi, z, 0) + + # Create particle + p = PG.SampledParticle(self.pid) + p.mom = self.mom.shoot() + p.pos = self.pos.shoot() + p.mass = mass + #self.mom.mass = mass + + return [p] + +class ForeseeSampler(PG.MomSampler): + """Create events from foresee directly on the fly + + Requires: + * foresee to be downloaded and in python path + + cd <PATH> + git clone https://github.com/KlingFelix/FORESEE.git + export PYTHONPATH=$PYTHONPATH:<PATH>/FORESEE/src/ + + * scikit-hep installed + + pip install scikit-hep --user + + * forsee files dir symlinked to the run dir + + ln -s <PATH>/FORESEE/files . + + """ + + + def __init__(self, modelname, energy, mass, couplings, daughter1_pid, daughter2_pid, mother_pid = None): + self.modelname = modelname + self.model = Model(self.modelname) + self.energy = energy + self._mass = mass + self.couplings = [couplings] if isinstance(couplings, str) else couplings + self.mother_pid = mother_pid + self.daughter1_pid = daughter1_pid + self.daughter2_pid = daughter2_pid + + self.rng = np.random.default_rng() + self.xs = 0 + + if not os.path.exists("files"): + os.symlink(os.path.expandvars("$Calypso_DIR/../calypso/Generators/foresee/files"), "files") + + self.pid_map = { + (11, 11) : "e_e", + (13, 13) : "mu_mu", + (22, 22) : "gamma_gamma", + } + + self.mode = self.pid_map.get((self.daughter1_pid, self.daughter2_pid), None) + if self.mode is None: + sys.exit(f"Undefined decay to {self.daughter1_pid} + {self.daughter2_pid} for {self.modelname}") + + from foresee import Foresee, Model, Utility + self.foresee = Foresee() + self.foresee.set_detector(selection="np.sqrt(x.x**2 + x.y**2)< 0.1", channels=[self.mode], distance=480, length=1.5 , luminosity=150) + + + if self.modelname == "DarkPhoton": + self.data = self.darkphoton() + elif self.modelname == "ALP-W": + self.data = self.alps() + else: + sys.exit(f"Unknown model {self.modelname}") + + return + + + def mass(self): + return self._mass * 1000 + + def darkphoton(self): + + # Production modes + self.model.add_production_2bodydecay( + pid0 = "111", + pid1 = "22", + br = "2.*0.99 * coupling**2 * pow(1.-pow(mass/self.masses('111'),2),3)", + generator = "EPOSLHC", + energy = self.energy, + nsample = 10) + + self.model.add_production_2bodydecay( + pid0 = "221", + pid1 = "22", + br = "2.*0.39 * coupling**2 * pow(1.-pow(mass/self.masses('221'),2),3)", + generator = "EPOSLHC", + energy = self.energy, + nsample = 10) + + # Handwavey + self.model.add_production_mixing( + pid = "113", + mixing = "coupling * 0.3/5. * 0.77545**2/abs(mass**2-0.77545**2+0.77545*0.147*1j)", + generator = "EPOSLHC", + energy = self.energy, + ) + + # Question on validity as FASER gets larger + self.model.add_production_direct( + label = "Brem", + energy = self.energy, + condition = "p.pt<1", + coupling_ref=1, + ) + + self.model.add_production_direct( + label = "DY", + energy = self.energy, + coupling_ref=1, + massrange=[1.5, 10.] + ) + + return self.decays() + + + def alps(self): + + self.model.add_production_2bodydecay( + pid0 = "5", + pid1 = "321", + br = "2.2e4 * coupling**2 * np.sqrt((1-(mass+0.495)**2/5.279**2)*(1-(mass-0.495)**2/5.279**2))", + generator = "Pythia8", + energy = self.energy, + nsample = 20, # Vary over out of phi and theta + ) + + + self.model.add_production_2bodydecay( + pid0 = "-5", + pid1 = "321", + br = "2.2e4 * coupling**2 * np.sqrt((1-(mass+0.495)**2/5.279**2)*(1-(mass-0.495)**2/5.279**2))", + generator = "Pythia8", + energy = self.energy, + nsample = 20, + ) + + self.model.add_production_2bodydecay( + pid0 = "130", + pid1 = "111", + br = "4.5 * coupling**2 * np.sqrt((1-(mass+0.135)**2/0.495**2)*(1-(mass-0.135)**2/0.495**2))", + generator = "EPOSLHC", + energy = self.energy, + nsample = 10, + ) + + self.model.add_production_2bodydecay( + pid0 = "321", + pid1 = "211", + br = "10.5 * coupling**2 * np.sqrt((1-(mass+0.135)**2/0.495**2)*(1-(mass-0.135)**2/0.495**2))", + generator = "EPOSLHC", + energy = self.energy, + nsample = 10, + ) + + return self.decays() + + + def decays(self): + # Decays + self.model.set_ctau_1d( + filename=f"files/models/{self.modelname}/ctau.txt", + coupling_ref=1 + ) + + # TODO take into account BR + self.model.set_br_1d( + modes = [self.mode], + filenames=[f"files/models/{self.modelname}/br/{self.mode}.txt"] + ) + + # LLP spectrum + self.foresee.set_model(model=self.model) + + plt = self.foresee.get_llp_spectrum(self._mass, coupling=1, do_plot=True) # This is just a reference coupling + plt.savefig(f"{self.modelname}.png") + + def flatten(l): + return [i for sublist in l for i in sublist] + + coups, ctaus, nsigs, energies, weights, thetas = self.foresee.get_events(mass=self._mass, energy=self.energy, couplings=self.couplings) + + return [flatten(thetas), flatten(energies), flatten(weights)] + + def shoot(self): + # Create probablity for each mode as weight / sum(weights) + prob = self.data[2]/np.sum(self.data[2]) + + # Choose a random item from the data, base on the probability + # TODO: what about reuse of events? + theta_mother, e_mother, w = self.rng.choice(self.data, axis = 1, p = prob) + + self.xs += w + + # Create other momentum + mother_mom = PG.EThetaMPhiSampler(e_mother*1000, theta_mother, self.mass(), [0,2*np.pi]) + + return mother_mom.shoot() + +if __name__ == "__main__": + + # Testing ... + + from math import sqrt, log10 + import matplotlib.pyplot as plt + import matplotlib + from DIFGenerator import DIFSampler + + + path = os.path.expandvars("$Calypso_DIR/../calypso/Generators/ForeseeGenerator/data/events_14TeV_m0.1GeV_c1e-05to_11_-11.npy") + path = "files/models/DarkPhoton/events/events_14TeV_m0.1GeV_c1e-05to_11_-11.npy" + + modelname = "DarkPhoton" + mass = 0.1 + + theta = [] + mom = [] + + d0theta = [] + d0mom = [] + + d1theta = [] + d1mom = [] + + # Accounting for rounding + epsilon = 6 + + # Create mother sampler reading data from foresee + mother_sampler = ForeseeNumpySampler(model_path = path, model_name = modelname, com_energy = "14", mother_mass = 0.1, coupling = 1e-5, mother_pid = None, daughter1_pid = 11, daughter2_pid = -11) + + # Create decay-in-flight + d = DIFSampler(11, -11, None) + d.mother_sampler = mother_sampler + + # Loop over a range of events + for i in range(100000): + + # Shoot the decay in flight + daughters = d.shoot() + + # Get mother and sum of daugthers and check these make sense. + mother_mom = d.mother_mom + s = daughters[0].mom+daughters[1].mom + + try: + assert mother_mom.E() - epsilon <= s.E() <= mother_mom.E() + epsilon + assert mother_mom.P() - epsilon <= s.P()<= mother_mom.P() + epsilon + assert mother_mom.Px() - epsilon <= s.Px() <= mother_mom.Px() + epsilon + assert mother_mom.Py() - epsilon <= s.Py() <= mother_mom.Py() + epsilon + assert mother_mom.Pz() - epsilon <= s.Pz() <= mother_mom.Pz() + epsilon + assert daughters[0].pos.X() == daughters[1].pos.X() == d.mother_pos.X() + assert daughters[0].pos.Y() == daughters[1].pos.Y() == d.mother_pos.Y() + assert daughters[0].pos.Z() == daughters[1].pos.Z() == d.mother_pos.Z() + except AssertionError: + print("Error on run " + str(i)) + + print("mother particle:") + print(" E = " + str(mother_mom.E())) + print(" M = " + str(mother_mom.M())) + print(" P = " + str(mother_mom.P())) + print(" Px = " + str(mother_mom.Px())) + print(" Py = " + str(mother_mom.Py())) + print(" Pz = " + str(mother_mom.Pz())) + print(" theta = " + str(mother_mom.Theta())) + print(" phi = " + str(mother_mom.Phi())) + print(" x = " + str(d.mother_pos.X())) + print(" y = " + str(d.mother_pos.Y())) + print(" z = " + str(d.mother_pos.Z())) + + print("daughter 0 particle:") + print(" E = " + str(daughters[0].mom.E())) + print(" M = " + str(daughters[0].mom.M())) + print(" P = " + str(daughters[0].mom.P())) + print(" Px = " + str(daughters[0].mom.Px())) + print(" Py = " + str(daughters[0].mom.Py())) + print(" Pz = " + str(daughters[0].mom.Pz())) + print(" theta = " + str(daughters[0].mom.Theta())) + print(" phi = " + str(daughters[0].mom.Phi())) + print(" x = " + str(daughters[0].pos.X())) + print(" y = " + str(daughters[0].pos.Y())) + print(" z = " + str(daughters[0].pos.Z())) + + print("daughter 1 particle:") + print(" E = " + str(daughters[1].mom.E())) + print(" M = " + str(daughters[1].mom.M())) + print(" P = " + str(daughters[1].mom.P())) + print(" Px = " + str(daughters[1].mom.Px())) + print(" Py = " + str(daughters[1].mom.Py())) + print(" Pz = " + str(daughters[1].mom.Pz())) + print(" theta = " + str(daughters[1].mom.Theta())) + print(" phi = " + str(daughters[1].mom.Phi())) + print(" x = " + str(daughters[1].pos.X())) + print(" y = " + str(daughters[1].pos.Y())) + print(" z = " + str(daughters[1].pos.Z())) + + raise + + # Store mother info to plot + theta.append(log10(mother_mom.Theta())) + mom.append(log10(mother_mom.P()/1000.)) + + # Store mother info to plot + d0theta.append(log10(daughters[0].mom.Theta())) + d0mom.append(log10(daughters[0].mom.P()/1000.)) + d1theta.append(log10(daughters[1].mom.Theta())) + d1mom.append(log10(daughters[1].mom.P()/1000.)) + + + + # Plot mother from sampling events + prange=[[-6, 0, 120],[ 0, 5, 50]] + tmin, tmax, tnum = prange[0] + pmin, pmax, pnum = prange[1] + t_edges = np.logspace(tmin, tmax, num=tnum+1) + p_edges = np.logspace(pmin, pmax, num=pnum+1) + + ticks = np.array([[np.linspace(10**(j),10**(j+1),9)] for j in range(-7,6)]).flatten() + ticks = [np.log10(x) for x in ticks] + ticklabels = np.array([[r"$10^{"+str(j)+"}$","","","","","","","",""] for j in range(-7,6)]).flatten() + matplotlib.rcParams.update({'font.size': 15}) + + + fig = plt.figure(figsize=(8,5.5)) + ax = plt.subplot(1,1,1) + h=ax.hist2d(x=theta,y=mom, + bins=[tnum,pnum],range=[[tmin,tmax],[pmin,pmax]], + norm=matplotlib.colors.LogNorm(), cmap="hsv", + ) + fig.colorbar(h[3], ax=ax) + ax.set_xlabel(r"angle wrt. beam axis $\theta$ [rad]") + ax.set_ylabel(r"momentum $p$ [GeV]") + ax.set_xticks(ticks) + ax.set_xticklabels(ticklabels) + ax.set_yticks(ticks) + ax.set_yticklabels(ticklabels) + ax.set_xlim(tmin, tmax) + ax.set_ylim(pmin, pmax) + plt.savefig(f"{modelname}_PG_m{mass}.png") + + fig = plt.figure(figsize=(8,5.5)) + ax = plt.subplot(1,1,1) + h=ax.hist2d(x=d0theta,y=d0mom, + bins=[tnum,pnum],range=[[tmin,tmax],[pmin,pmax]], + norm=matplotlib.colors.LogNorm(), cmap="hsv", + ) + fig.colorbar(h[3], ax=ax) + ax.set_xlabel(r"angle wrt. beam axis $\theta$ [rad]") + ax.set_ylabel(r"momentum $p$ [GeV]") + ax.set_xticks(ticks) + ax.set_xticklabels(ticklabels) + ax.set_yticks(ticks) + ax.set_yticklabels(ticklabels) + ax.set_xlim(tmin, tmax) + ax.set_ylim(pmin, pmax) + plt.savefig(f"{modelname}_PG_d0_m{mass}.png") + + fig = plt.figure(figsize=(8,5.5)) + ax = plt.subplot(1,1,1) + h=ax.hist2d(x=d1theta,y=d1mom, + bins=[tnum,pnum],range=[[tmin,tmax],[pmin,pmax]], + norm=matplotlib.colors.LogNorm(), cmap="hsv", + ) + fig.colorbar(h[3], ax=ax) + ax.set_xlabel(r"angle wrt. beam axis $\theta$ [rad]") + ax.set_ylabel(r"momentum $p$ [GeV]") + ax.set_xticks(ticks) + ax.set_xticklabels(ticklabels) + ax.set_yticks(ticks) + ax.set_yticklabels(ticklabels) + ax.set_xlim(tmin, tmax) + ax.set_ylim(pmin, pmax) + plt.savefig(f"{modelname}_PG_d1_m{mass}.png") + + + print (f"x-sect = {mother_sampler.xs} pb") + + + + diff --git a/Generators/ForeseeGenerator/python/Validate.py b/Generators/ForeseeGenerator/python/Validate.py new file mode 100644 index 0000000000000000000000000000000000000000..9c6be63475b6e066ece1ceac0aea059b8837a0dc --- /dev/null +++ b/Generators/ForeseeGenerator/python/Validate.py @@ -0,0 +1,227 @@ +from AthenaPython.PyAthena import StatusCode, McEventCollection, HepMC, CLHEP +from GeneratorModules.EvgenAnalysisAlg import EvgenAnalysisAlg + +import ROOT as R +import numpy as np +import os + +def fix(): + "Python Fixes for HepMC" + def add(self, other): + self.set(self.x() + other.x(), self.y() + other.y(), + self.z() + other.z(), self.t() + other.t()) + return self + + HepMC.FourVector.__iadd__ = add + del add + + return + +class HistSvc(object): + "Class to deal with histograms" + + def __init__(self): + self.hists = {} + + def add(self, name, nbinsX = None, loX = None, hiX = None, nbinsY = None, loY = None, hiY = None, title = None, arrayX = None, arrayY = None): + hname = os.path.basename(name) + + if title is None: title = hname + + if nbinsY is not None: + self.hists[name] = R.TH2F(hname, title, nbinsX, loX, hiX, nbinsY, loY, hiY) + elif arrayX is not None and arrayY is not None: + self.hists[name] = R.TH2F(hname, title, len(arrayX) - 1, arrayX, len(arrayY) - 1, arrayY) + elif arrayX is not None and arrayY is None and nbinsY is not None: + self.hists[name] = R.TH2F(hname, title, len(arrayX) - 1, arrayX, nbinsY, loY, hiY) + elif arrayX is None and arrayY is not None: + self.hists[name] = R.TH2F(hname, title, nbinsX, loX, hiX, len(arrayY) - 1, arrayY) + elif arrayX is not None: + self.hists[name] = R.TH1F(hname, title, len(arrayX) - 1, arrayX) + else: + self.hists[name] = R.TH1F(hname, title, nbinsX, loX, hiX) + + def __getitem__(self, name): + return self.hists[name] + + def write(self, name): + + f = R.TFile.Open(name, "RECREATE") + + for n, h in self.hists.items(): + path = os.path.dirname(n) + if path and not f.GetDirectory(path): + f.mkdir(path) + + f.cd(path) + h.Write() + + f.Close() + + return + +class EvgenValidation(EvgenAnalysisAlg): + "Gen-level validation" + + def __init__(self, name = "EvgenValidation", ndaughters = 2, outname = "validation.root"): + super(EvgenValidation, self).__init__(name=name) + self.hists = HistSvc() + self.ndaughters = ndaughters + self.outname = outname + + def binning(self): + "binning for theta vs phi plot" + tmin, tmax, tnum = [-6, 0, 24] + pmin, pmax, pnum = [ 0, 5, 10] + t_edges = np.logspace(tmin, tmax, num=tnum+1) + p_edges = np.logspace(pmin, pmax, num=pnum+1) + return t_edges, p_edges + + def initialize(self): + + # All daughters + self.hists.add("PIDs", 60, -30, 30) + + # Daughter i + tbins, pbins = self.binning() + for i in range(self.ndaughters): + self.hists.add(f"E_d{i}", 100, 0, 10000) + self.hists.add(f"P_d{i}", 100, 0, 10000) + self.hists.add(f"Pz_d{i}", 100, 0, 10000) + self.hists.add(f"Pt_d{i}", 100, 0, 1) + self.hists.add(f"Theta_d{i}", 20, 0, 0.001) + self.hists.add(f"Phi_d{i}", 16, -3.2, 3.2) + self.hists.add(f"ThetaVsP_d{i}", arrayX = tbins, arrayY = pbins) + self.hists.add(f"Mass_d{i}", 200, 0, 0.01) + + # Mother + self.hists.add("E_M", 100, 0, 10000) + self.hists.add("P_M", 100, 0, 10000) + self.hists.add("Pz_M", 100, 0, 10000) + self.hists.add("Pt_M", 100, 0, 1) + self.hists.add("Theta_M", 20, 0, 0.001) + self.hists.add("Phi_M", 16, -3.2, 3.2) + self.hists.add("Mass_M", 200, 0, 1) + self.hists.add("ThetaVsP_M", arrayX = tbins, arrayY = pbins) + + # Vertex + self.hists.add("Vtx_X", 50, -100, 100) + self.hists.add("Vtx_Y", 50, -100, 100) + # For fluka + #self.hists.add("Vtx_X", 100, -3000, 3000) + #self.hists.add("Vtx_Y", 100, -3000, 3000) + self.hists.add("Vtx_Z", 50, -1500, 0) + self.hists.add("Vtx_XY", 50, -100, 100, 50, -100, 100) + + return StatusCode.Success + + + def fillKin(self, label, p, mass = True, twoD = True): + + self.hists[f"E_{label}"].Fill(p.t()/1000, self.weight) + self.hists[f"P_{label}"].Fill(p.rho()/1000, self.weight) + self.hists[f"Pz_{label}"].Fill(p.pz()/1000, self.weight) + self.hists[f"Pt_{label}"].Fill(p.perp()/1000, self.weight) + self.hists[f"Theta_{label}"].Fill(p.theta(), self.weight) + self.hists[f"Phi_{label}"].Fill(p.phi(), self.weight) + + if mass: + self.hists[f"Mass_{label}"].Fill(p.m()/1000, self.weight) + + if twoD: + self.hists[f"ThetaVsP_{label}"].Fill(p.theta(), p.rho()/1000, self.weight) + + return + + def fillDaughter(self, p): + self.hists["PIDs"].Fill(p.pdg_id(), self.weight) + return + + def fillVertex(self, v): + self.hists["Vtx_X"].Fill(v.x(), self.weight) + self.hists["Vtx_Y"].Fill(v.y(), self.weight) + self.hists["Vtx_Z"].Fill(v.z(), self.weight) + self.hists["Vtx_XY"].Fill(v.x(), v.y(), self.weight) + return + + + def execute(self): + evt = self.events()[0] + self.weight = evt.weights()[0] + + # Loop over all particles in events (assuming mother not stored) + momenta = [] + mother = HepMC.FourVector(0,0,0,0) + llp_vtx = None + for i, p in enumerate(evt.particles): + #p.print() + self.fillDaughter(p) + momenta.append(p.momentum()) + mother += p.momentum() + if i == 0: + #p.production_vertex().print() + llp_vtx = p.production_vertex().point3d() + + # Fill daughter plots + for i in range(self.ndaughters): + self.fillKin(f"d{i}", momenta[i]) + + # Fill mother plots + self.fillKin("M", mother, mass = True) + + # Fill vertex plots + self.fillVertex(llp_vtx) + + return StatusCode.Success + + def finalize(self): + self.hists.write(self.outname) + return StatusCode.Success + + +if __name__ == "__main__": + + import argparse, sys + parser = argparse.ArgumentParser(description="Run gen-level validation") + parser.add_argument("file", nargs="+", help = "full path to imput file") + parser.add_argument("--ndaugthers", "-d", default = 2, type = int, help = "Number of daugthers to plot") + parser.add_argument("--output", "-o", default = "validation.root", help = "Name of output file") + parser.add_argument("--mcEventKey", "-k", default = "BeamTruthEvent", help = "Name of MC collection") + parser.add_argument("--nevents", "-n", default = -1, type = int, help = "Number of events to process") + args = parser.parse_args() + + from AthenaCommon.Logging import log + from AthenaCommon.Constants import DEBUG + log.setLevel(DEBUG) + + from AthenaCommon.Configurable import Configurable + Configurable.configurableRun3Behavior = 1 + + from CalypsoConfiguration.AllConfigFlags import ConfigFlags + ConfigFlags.Input.isMC = True + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-01" # Always needed; must match FaserVersion + ConfigFlags.GeoModel.FaserVersion = "FASER-01" # Default FASER geometry + ConfigFlags.Detector.EnableFaserSCT = True + ConfigFlags.Input.Files = args.file + ConfigFlags.lock() + + from CalypsoConfiguration.MainServicesConfig import MainServicesCfg + cfg = MainServicesCfg(ConfigFlags) + + from AthenaPoolCnvSvc.PoolReadConfig import PoolReadCfg + cfg.merge(PoolReadCfg(ConfigFlags)) + + from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator + from AthenaConfiguration.ComponentFactory import CompFactory + + import McParticleEvent.Pythonizations + fix() + + acc = ComponentAccumulator() + valid = EvgenValidation("EvgenValidation", ndaughters = args.ndaugthers, outname = args.output) + valid.McEventKey = args.mcEventKey + acc.addEventAlgo(valid) + cfg.merge(acc) + + sc = cfg.run(maxEvents = args.nevents) + sys.exit(not sc.isSuccess()) diff --git a/Generators/ForeseeGenerator/share/generate_forsee_events.py b/Generators/ForeseeGenerator/share/generate_forsee_events.py new file mode 100644 index 0000000000000000000000000000000000000000..37372b7bd5d574e084dad7842ad43a54eb31f1f9 --- /dev/null +++ b/Generators/ForeseeGenerator/share/generate_forsee_events.py @@ -0,0 +1,393 @@ +import os + +import numpy as np +import matplotlib.pyplot as plt +import matplotlib + +class ForeseeGenerator(object): + """ + Generate LLP particles within FASER acceptance from FORESEE + """ + + def __init__(self, modelname, energy, mass, couplings, daughter1_pid, daughter2_pid, outdir = None, path = '.', randomSeed = 12345): + + self.modelname = modelname + self.energy = energy + self.mass = mass + self.couplings = [couplings] if isinstance(couplings, (str, int, float)) else couplings + self.daughter1_pid = daughter1_pid + self.daughter2_pid = daughter2_pid + self.outdir = outdir + self.path = path + self.version = 1 # Forsee "version": 2 is in testing + self.seed = randomSeed + + # Set decay mode ... + + self.pid_map = { + (-11, 11) : "e_e", + (11, -11) : "e_e", + (-13, 13) : "mu_mu", + (13, -13) : "mu_mu", + (22, 22) : "gamma_gamma", + } + + self.mode = self.pid_map.get((self.daughter1_pid, self.daughter2_pid), None) + if self.mode is None: + sys.exit(f"Undefined decay to {self.daughter1_pid} + {self.daughter2_pid} for {self.modelname}") + + # Set detector ... + if self.version == 1: + self.foresee = Foresee() + else: + self.foresee = Foresee(path = self.path) + + # TODO: relax this a bit as daughters may enter even if mother doesn't + self.foresee.set_detector(selection="np.sqrt(x.x**2 + x.y**2)< 0.1", + channels=[self.mode], distance=480, length=1.5 , + luminosity=1/1000.) # 1 pb-1 + + # Set model ... + if self.version == 1: + self.model = Model(self.modelname) + else: + self.model = Model(self.modelname, path = f"{self.path}/Models/{self.modelname}/") + + if self.modelname == "DarkPhoton": + self.data = self.darkphoton() + elif self.modelname == "ALP-W": + self.data = self.alp_W() + else: + sys.exit(f"Unknown model {self.modelname}") + + return + + def darkphoton(self): + + # Production modes + self.model.add_production_2bodydecay( + pid0 = "111", + pid1 = "22", + br = "2.*0.99 * coupling**2 * pow(1.-pow(mass/self.masses('111'),2),3)", + generator = "EPOSLHC", + energy = self.energy, + nsample = 10) + + self.model.add_production_2bodydecay( + pid0 = "221", + pid1 = "22", + br = "2.*0.39 * coupling**2 * pow(1.-pow(mass/self.masses('221'),2),3)", + generator = "EPOSLHC", + energy = self.energy, + nsample = 10) + + self.model.add_production_mixing( + pid = "113", + mixing = "coupling * 0.3/5. * 0.77545**2/abs(mass**2-0.77545**2+0.77545*0.147*1j)", + generator = "EPOSLHC", + energy = self.energy, + ) + + if self.version == 1: + self.model.add_production_direct( + label = "Brem", + energy = self.energy, + condition = "p.pt<1", + coupling_ref=1, + ) + + self.model.add_production_direct( + label = "DY", + energy = self.energy, + coupling_ref=1, + massrange=[1.5, 10.] + ) + else: + masses_brem = [ + 0.01 , 0.0126, 0.0158, 0.02 , 0.0251, 0.0316, 0.0398, + 0.0501, 0.0631, 0.0794, 0.1 , 0.1122, 0.1259, 0.1413, + 0.1585, 0.1778, 0.1995, 0.2239, 0.2512, 0.2818, 0.3162, + 0.3548, 0.3981, 0.4467, 0.5012, 0.5623, 0.6026, 0.631 , + 0.6457, 0.6607, 0.6761, 0.6918, 0.7079, 0.7244, 0.7413, + 0.7586, 0.7762, 0.7943, 0.8128, 0.8318, 0.8511, 0.871 , + 0.8913, 0.912 , 0.9333, 0.955 , 0.9772, 1. , 1.122 , + 1.2589, 1.4125, 1.5849, 1.7783, 1.9953, 2.2387, 2.5119, + 2.8184, 3.1623, 3.9811, 5.0119, 6.3096, 7.9433, 10. + ] + + self.model.add_production_direct( + label = "Brem", + energy = self.energy, + condition = "p.pt<1", + coupling_ref=1, + masses = masses_brem, + ) + + masses_dy = [ + 1.5849, 1.7783, 1.9953, 2.2387, 2.5119, 2.8184, 3.1623, 3.9811, 5.0119, 6.3096, 7.9433, 10. + ] + + self.model.add_production_direct( + label = "DY", + energy = self.energy, + coupling_ref=1, + masses = masses_dy, + ) + + return self.decays() + + + def alp_W(self): + + self.model.add_production_2bodydecay( + pid0 = "5", + pid1 = "321", + br = "2.2e4 * coupling**2 * np.sqrt((1-(mass+0.495)**2/5.279**2)*(1-(mass-0.495)**2/5.279**2))", + generator = "Pythia8", + energy = self.energy, + nsample = 20, # Vary over phi and theta + ) + + self.model.add_production_2bodydecay( + pid0 = "-5", + pid1 = "321", + br = "2.2e4 * coupling**2 * np.sqrt((1-(mass+0.495)**2/5.279**2)*(1-(mass-0.495)**2/5.279**2))", + generator = "Pythia8", + energy = self.energy, + nsample = 20, + ) + + self.model.add_production_2bodydecay( + pid0 = "130", + pid1 = "111", + br = "4.5 * coupling**2 * np.sqrt((1-(mass+0.135)**2/0.495**2)*(1-(mass-0.135)**2/0.495**2))", + generator = "EPOSLHC", + energy = self.energy, + nsample = 10, + ) + + self.model.add_production_2bodydecay( + pid0 = "321", + pid1 = "211", + br = "10.5 * coupling**2 * np.sqrt((1-(mass+0.135)**2/0.495**2)*(1-(mass-0.135)**2/0.495**2))", + generator = "EPOSLHC", + energy = self.energy, + nsample = 10, + ) + + return self.decays() + + + def decays(self): + # Set up liftime and BRs + + if self.version == 1: + self.model.set_ctau_1d( + filename=f"files/models/{self.modelname}/ctau.txt", + coupling_ref=1 + ) + + self.model.set_br_1d( + modes = [self.mode], + filenames=[f"files/models/{self.modelname}/br/{self.mode}.txt"] + ) + else: + self.model.set_ctau_1d( + filename=f"model/ctau.txt", + coupling_ref=1 + ) + + self.model.set_br_1d( + modes = [self.mode], + finalstates = [[self.daughter1_pid, self.daughter2_pid]], + filenames=[f"model/br/{self.mode}.txt"] + ) + + # Get LLP spectrum + self.foresee.set_model(model=self.model) + # This is just a reference coupling + plt = self.foresee.get_llp_spectrum(self.mass, coupling=1, do_plot=True) + plt.savefig(f"{self.modelname}_m{self.mass}.png") + + def flatten(l): + return [i for sublist in l for i in sublist] + + # Get list of events within detector + output = self.foresee.get_events(mass=self.mass, energy=self.energy, couplings=self.couplings) + coups, ctaus, nsigs, energies, weights, thetas = output + + self.plot(flatten(thetas), flatten(energies), flatten(weights)) + + # Return energy (converting to MeV), theta and weights + return [[e*1000 for e in flatten(energies)], flatten(thetas), flatten(weights)] + + def plot(self, thetas, energies, weights): + # Plot the results in Forsee format + + t = np.array(thetas) + p = np.sqrt(np.array(energies)**2 - self.mass**2) + + prange=[[-6, 0, 120],[ 0, 5, 50]] + tmin, tmax, tnum = prange[0] + pmin, pmax, pnum = prange[1] + t_edges = np.logspace(tmin, tmax, num=tnum+1) + p_edges = np.logspace(pmin, pmax, num=pnum+1) + + ticks = np.array([[np.linspace(10**(j),10**(j+1),9)] for j in range(-7,6)]).flatten() + ticks = [np.log10(x) for x in ticks] + ticklabels = np.array([[r"$10^{"+str(j)+"}$","","","","","","","",""] for j in range(-7,6)]).flatten() + matplotlib.rcParams.update({'font.size': 15}) + + fig = plt.figure(figsize=(8,5.5)) + ax = plt.subplot(1,1,1) + h=ax.hist2d(x=np.log10(t),y=np.log10(p),weights=weights, + bins=[tnum,pnum],range=[[tmin,tmax],[pmin,pmax]], + norm=matplotlib.colors.LogNorm(), cmap="hsv", + ) + + fig.colorbar(h[3], ax=ax) + ax.set_xlabel(r"angle wrt. beam axis $\theta$ [rad]") + ax.set_ylabel(r"momentum $p$ [GeV]") + ax.set_xticks(ticks) + ax.set_xticklabels(ticklabels) + ax.set_yticks(ticks) + ax.set_yticklabels(ticklabels) + ax.set_xlim(tmin, tmax) + ax.set_ylim(pmin, pmax) + plt.savefig(f"{self.modelname}_m{self.mass}_acc.png") + + def write(self): + # Write LLP results to a file + + energies, thetas, weights = self.data + + if self.outdir is None: + if self.version == 1: + self.outdir = f"files/models/{self.modelname}/events" + else: + self.outdir = f"{self.foresee.dirpath}/Models/{self.modelname}/model/events" + + if not os.path.exists(self.outdir): + os.mkdir(self.outdir) + + if len(self.couplings) == 1: + filename = f"{self.outdir}/events_{self.energy}TeV_m{self.mass}GeV_c{self.couplings[0]}to_{self.daughter1_pid}_{self.daughter2_pid}.npy" + else: + filename = f"{self.outdir}/events_{self.energy}TeV_m{self.mass}GeV_to_{self.daughter1_pid}_{self.daughter2_pid}.npy" + + print(f"Generated {len(thetas)} events") + print(f"save data to file: {filename}") + np.save(filename,[energies,thetas, weights]) + + cfgname = filename.replace(".npy", ".cfg") + print(f"save config to file: {cfgname}") + with open(cfgname, "w") as f: + f.write(" ".join(sys.argv)) + + return + + def write_hepmc(self, nevents): + + if self.outdir is None: + self.outdir = "model/events/" + elif not os.path.exists(self.outdir): + os.mkdir(self.outdir) + + filename = f"{self.outdir}/events_{self.energy}TeV_m{self.mass}GeV_c{self.couplings[0]}to_{self.daughter1_pid}_{self.daughter2_pid}.hepmc" + + self.foresee.write_events(self.mass, self.couplings[0], self.energy, filename, nevents, zfront = -1500, seed = self.seed) + +def setup_foresee(path): + + if path is None: + return + + # Add foresee to python path + path = os.path.expandvars(os.path.expanduser(path)) + os.sys.path.append(f"{path}/FORESEE/src") + + # Symlink foresee files/Models dirs to current dir + #if not os.path.exists("files"): + # os.symlink(os.path.expandvars(f"{path}/FORESEE/files"), "files") + #if not os.path.exists("Models"): + # os.symlink(os.path.expandvars(f"{path}/FORESEE/Models"), "files") + + # Install scikit-hep if needed. + + try: + from skhep.math.vectors import LorentzVector, Vector3D + except ModuleNotFoundError: + os.system("pip install scikit-hep --user") + try: + from skhep.math.vectors import LorentzVector, Vector3D + except ModuleNotFoundError: + raise ModuleNotFoundError("Unable to find skhep. Please install the scikit-hep package") + + return + +def add_to_python_path(path): + if path in sys.path: return + path = os.path.expandvars(os.path.expanduser(path)) + os.sys.path.append(path) + return + +def parse_couplings(data, write_hepMC = False): + + if write_hepMC: + try: + couplings = float(couplings) + except ValueError: + sus.exit("Only a single coupling allowed when writing HEPMC events") + + try: + couplings = [float(d) for d in data] + except ValueError: + try: + couplings = np.logspace(*eval(data[0])) + except: + sys.exit("Unable to parse couplings") + + return couplings + +if __name__ == "__main__": + + import argparse, sys + + parser = argparse.ArgumentParser(description="Run FORSEE generation") + parser.add_argument("model", help = "Name of foresee model") + parser.add_argument("--mass", "-m", required = True, type = float, help = "Mass of mother [GeV]") + parser.add_argument("--couplings", "-c", required = True, nargs = "+", help = "Couplings of mother (either single/mulitple values or tuple to pass to np.logspace)") + parser.add_argument("--pid1", required = True, type = int, help = "PID of daughter 1") + parser.add_argument("--pid2", default = None, type = int, help = "PID of daughter 2 (if not set then will be -PID1)") + parser.add_argument("--Ecom", default = "14", help = "Center of mass energy [TeV]") + parser.add_argument("--outdir", "-o", default = None, help = "Output path") + parser.add_argument("--path", default = ".", help = "Path to foresee installation") + parser.add_argument("--hepmc", action = "store_true", help = "Write HepMC events") + parser.add_argument("--nevents", "-n", default = 10, type = int, help = "Number of HepMC events ") + parser.add_argument("--randomSeed", "-s", default = 1234, type = int, help = "Random seed for HepMC generation") + args = parser.parse_args() + + add_to_python_path(f"{args.path}/src") + + from foresee import Foresee, Model, Utility + + # Create PIDs + if args.pid2 is None: + args.pid2 = -args.pid1 + + couplings = parse_couplings(args.couplings) + + print(f"Generating {args.model} events at Ecom = {args.Ecom}") + print(f" mother mass = {args.mass} GeV") + print(f" decay = {args.pid1} {args.pid2}") + print(f" couplings = {couplings}") + + f = ForeseeGenerator(args.model, args.Ecom, args.mass, couplings, args.pid1, args.pid2, outdir = args.outdir, path = args.path, randomSeed = args.randomSeed) + + if args.hepmc: + f.write_hepmc(args.nevents) + else: + f.write() + + + diff --git a/Generators/ForeseeGenerator/share/plot_validation.py b/Generators/ForeseeGenerator/share/plot_validation.py new file mode 100644 index 0000000000000000000000000000000000000000..8d5a04701c82deedadada27fc76d11fa4a6a8d30 --- /dev/null +++ b/Generators/ForeseeGenerator/share/plot_validation.py @@ -0,0 +1,122 @@ +import ROOT as R +from collections import namedtuple + +Hist = namedtuple("Hist", "name, xtitle, ytitle, xlo, xhi, ylo, yhi, r, d, logx, logy, ndiv", + defaults = [None, None, None, None, None, None, 1, "hist", False, False, None]) + +def plot(f, name, xtitle, ytitle, xlo = None, xhi = None, ylo = None, yhi = None, + r = 1, d = "hist", logx = False, logy = False, ndiv = None): + + h = f.Get(name) + + if xlo is not None and xhi is not None: + h.SetAxisRange(xlo, xhi) + + if ylo is not None and yhi is not None: + h.SetAxisRange(ylo, yhi, "Y") + + if isinstance(r, tuple): + h.Rebin2D(r[0], r[1]) + elif r != 1: + h.Rebin(r) + + if xtitle is not None: + h.GetXaxis().SetTitle(xtitle) + + if ytitle is not None: + h.GetYaxis().SetTitle(ytitle) + + if logx: + R.gPad.SetLogx() + + if logy: + R.gPad.SetLogy() + + if ndiv is not None: + h.SetNdivisions(ndiv) + + h.SetLabelSize(0.05, "X") + h.SetTitleSize(0.05, "X") + h.SetLabelSize(0.05, "Y") + h.SetTitleSize(0.05, "Y") + + h.GetXaxis().SetTitleOffset(1.2) + + R.gPad.SetBottomMargin(0.15) + R.gPad.SetLeftMargin(0.12) + R.gPad.SetRightMargin(0.2) + + h.Draw(d) + return h + +def plotn(f, configs, x, y, outname = "valplot"): + + c = R.TCanvas() + c.Divide(x, y) + c._objs = [] + + if isinstance(configs, tuple): + configs = [configs] + + for i, cfg in enumerate(configs): + c.cd(i+1) + c._objs.append(plot(f, *cfg)) + + c.Print(f"{outname}.eps") + + return + +if __name__ == "__main__": + + R.gROOT.SetBatch(True) + R.gStyle.SetOptStat(0) + + fname = "validation.root" + f = R.TFile.Open(fname) + + config = [Hist("P_d0", logy = True, xtitle = "p^{0} [GeV]", ndiv = 5, r = 5), + Hist("Theta_d0", xtitle = "#theta [rad]", ndiv = -4), + Hist("Mass_d0", xtitle = "m^{0} [GeV]", xlo = 0, xhi = 0.001, ndiv = 4), + Hist("Pt_d0", logy = True, xtitle = "p_{T}^{0} [GeV]", ndiv = 10, r = 5), + Hist("Phi_d0", xtitle = "#phi [rad]"), + Hist("ThetaVsP_d0", xtitle = "p^{0} [GeV]", ytitle = "#theta [rad]", logx = True, logy = True, d = "colz") + ] + + plotn(f, config, 3, 2, "daug0") + + config = [Hist("P_d1", logy = True, xtitle = "p^{0} [GeV]", ndiv = 5, r = 5), + Hist("Theta_d1", xtitle = "#theta [rad]", ndiv = -4), + Hist("Mass_d1", xtitle = "m^{0} [GeV]", xlo = 0, xhi = 0.001, ndiv = 4), + Hist("Pt_d1", logy = True, xtitle = "p_{T}^{0} [GeV]", ndiv = 10, r = 5), + Hist("Phi_d1", xtitle = "#phi [rad]"), + Hist("ThetaVsP_d1", xtitle = "p^{0} [GeV]", ytitle = "#theta [rad]", logx = True, logy = True, d = "colz") + ] + + plotn(f, config, 3, 2, "daug1") + + config = [Hist("P_M", logy = True, xtitle = "p^{0} [GeV]", ndiv = 5, r = 5), + Hist("Theta_M", xtitle = "#theta [rad]", ndiv = -4), + Hist("Mass_M", xtitle = "m^{0} [GeV]", xlo = 0, xhi = 0.05), + Hist("Pt_M", logy = True, xtitle = "p_{T}^{0} [GeV]", ndiv = 10, r = 5), + Hist("Phi_M", xtitle = "#phi [rad]"), + Hist("ThetaVsP_M", xtitle = "p^{0} [GeV]", ytitle = "#theta [rad]", logx = True, logy = True, d = "colz") + ] + + plotn(f, config, 3, 2, "mother") + + plotn(f, Hist("PIDs", xtitle="PDG Id"), 1, 1, "pid") + + config = [Hist("ThetaVsP_M", xtitle = "p^{0} [GeV]", ytitle = "#theta [rad]", logx = True, logy = True, d = "colz"), + Hist("ThetaVsP_d0", xtitle = "p^{0} [GeV]", ytitle = "#theta [rad]", logx = True, logy = True, d = "colz"), + Hist("ThetaVsP_d1", xtitle = "p^{0} [GeV]", ytitle = "#theta [rad]", logx = True, logy = True, d = "colz") + ] + + plotn(f, config, 2, 2, "twod") + + config = [Hist("Vtx_X", xtitle = "x [mm]", r = 5), + Hist("Vtx_Y", xtitle = "y [mm]", r = 5), + Hist("Vtx_Z", xtitle = "z [mm]", r = 5), + Hist("Vtx_XY", xtitle = "x [mm]", ytitle = "y [mm]", d = "colz", r = (5,5)) + ] + + plotn(f, config, 2, 2, "vtx") diff --git a/Generators/GeneratorUtils/CMakeLists.txt b/Generators/GeneratorUtils/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..d471fbe183ae3da80c2a0ce1a49785ca0b907387 --- /dev/null +++ b/Generators/GeneratorUtils/CMakeLists.txt @@ -0,0 +1,10 @@ +################################################################################ +# Package: GeneratorUtils +################################################################################ + +# Declare the package name: +atlas_subdir( GeneratorUtils ) + +# Install files from the package: +atlas_install_python_modules( python/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} ) + diff --git a/Generators/GeneratorUtils/python/ShiftLOS.py b/Generators/GeneratorUtils/python/ShiftLOS.py new file mode 100644 index 0000000000000000000000000000000000000000..d7e8f21d0c8fbe67346a9e435239e9858515510b --- /dev/null +++ b/Generators/GeneratorUtils/python/ShiftLOS.py @@ -0,0 +1,165 @@ + +from AthenaCommon.AppMgr import ServiceMgr as svcMgr +from AthenaPython import PyAthena +from AthenaPython.PyAthena import StatusCode, McEventCollection, CLHEP +from AthenaCommon.SystemOfUnits import m +import ROOT + +try: + from AthenaPython.PyAthena import HepMC3 as HepMC +except ImportError: + from AthenaPython.PyAthena import HepMC as HepMC + +class ShiftLOS(PyAthena.Alg): + def __init__(self, name="ShiftLOS", InputMCEventKey="BeamTruthEvent", OutputMCEventKey="BeamTruthEventShifted", xcross = 0, ycross = 0): + super(ShiftLOS,self).__init__(name=name) + self.InputMCEventKey = InputMCEventKey + self.OutputMCEventKey = OutputMCEventKey + self.xcross = xcross * 1e-6 + self.ycross = ycross * 1e-6 + self.distance = 480*m # Assumes 480m is 0 of FASER coordinate system + return + + def shift_vertices(self, evt): + + # Don't need to shift if at IP + if not self.distance: + return evt + + # Loop over all vertices + for v in evt.vertices: + # Get position + pos = v.position() + x = pos.x() + y = pos.y() + z = pos.z() + dz = self.distance + z + + # Shift x or y by appropriate crossing angle + if self.xcross: + x += dz * self.xcross + self.msg.debug(f"Shifting x by {self.xcross} over {dz}: {pos.x()} -> {x} ") + elif self.ycross: + y += dz * self.ycross + self.msg.debug(f"Shifting y by {self.ycross} over {dz}: {pos.y()} -> {y} ") + + v.set_position(HepMC.FourVector(x, y, z, pos.t())) + + return evt + + + def boost_particles(self, evt): + + pxsum, pysum = 0,0 + pxsum_orig, pysum_orig = 0,0 + + # Loop over all particles + for p in evt.particles: + # Get momentum + mom = p.momentum() + + pxsum_orig += mom.x() + pysum_orig += mom.y() + + # Boost in x or y using CLHEP + boost = CLHEP.Hep3Vector(self.xcross, self.ycross, 0.0) + tmp = CLHEP.HepLorentzVector(mom.px(), mom.py(), mom.pz(), mom.e()) + tmp.boost(boost) + + pxsum += tmp.x() - mom.x() + pysum += tmp.y() - mom.y() + + # Convert back to HepMC + p.set_momentum(HepMC.FourVector(tmp.px(), tmp.py(), tmp.pz(), tmp.e())) + + self.msg.debug(f"Change in total px = {pxsum:.1f} MeV ({pxsum/pxsum_orig * 100: .3f} %), change in total py = {pysum:.1f} MeV ({pysum/pysum_orig * 100: .3f} %)") + + return evt + + def execute(self): + self.msg.debug(f"Exectuing {self.getName()}") + + print (self.xcross, self.ycross) + + if not self.xcross and not self.ycross: + return StatusCode.Success + + self.msg.debug(f"Reading {self.InputMCEventKey}") + inevt = self.evtStore[self.InputMCEventKey][0] + + self.msg.debug("Creating output event and collection") + outcoll = McEventCollection() + ROOT.SetOwnership(outcoll, False) + + # Clone input event + outevt = HepMC.GenEvent(inevt.__follow__()) # go from ElementProxy to element itself + + # Modify + outevt = self.shift_vertices(outevt) + outevt = self.boost_particles(outevt) + + # Write output + outcoll.push_back(outevt) + ROOT.SetOwnership(outevt, False) + + self.msg.debug(f"Recording {self.OutputMCEventKey}") + self.evtStore.record(outcoll, self.OutputMCEventKey, True, False) + + return StatusCode.Success + +if __name__ == "__main__": + import argparse, sys + parser = argparse.ArgumentParser(description="Run ShiftLOS") + parser.add_argument("infile", help = "Path to input EVNT file") + parser.add_argument("outfile", help = "Path to output EVNT file") + parser.add_argument("--InputMCEventKey", "-i", default = "BeamTruthEvent", help = "Name of Input MC collection") + parser.add_argument("--OutputMCEventKey", "-o", default = "BeamTruthEventShifted", help = "Name of Output MC collection") + parser.add_argument("--xcross", "-x", default = 0, type = float, help = "Crossing angle of LHC beam in x [urad]") + parser.add_argument("--ycross", "-y", default = 0, type = float, help = "Crossing angle of LHC beam in y [urad]") + parser.add_argument("--nevents", "-n", default = -1, type = int, help = "Number of events to process") + args = parser.parse_args() + + from AthenaCommon.Logging import log + from AthenaCommon.Constants import DEBUG, INFO + + from AthenaCommon.Configurable import Configurable + Configurable.configurableRun3Behavior = 1 + + from CalypsoConfiguration.AllConfigFlags import ConfigFlags + ConfigFlags.Input.isMC = True + ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-01" # Always needed; must match FaserVersion + ConfigFlags.GeoModel.FaserVersion = "FASER-01" # Default FASER geometry + ConfigFlags.Detector.EnableFaserSCT = True + ConfigFlags.Input.Files = [ args.infile ] + ConfigFlags.Output.EVNTFileName = args.outfile + ConfigFlags.lock() + + # Configure components + from CalypsoConfiguration.MainServicesConfig import MainServicesCfg + cfg = MainServicesCfg(ConfigFlags) + + from AthenaPoolCnvSvc.PoolReadConfig import PoolReadCfg + from AthenaPoolCnvSvc.PoolWriteConfig import PoolWriteCfg + + cfg = MainServicesCfg(ConfigFlags) + cfg.merge(PoolReadCfg(ConfigFlags)) + + import McParticleEvent.Pythonizations + + from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator + from AthenaConfiguration.ComponentFactory import CompFactory + + acc = ComponentAccumulator() + alg = ShiftLOS("ShiftLOS", InputMCEventKey=args.InputMCEventKey, OutputMCEventKey=args.OutputMCEventKey, xcross = args.xcross, ycross = args.ycross) + alg.OutputLevel = INFO + acc.addEventAlgo(alg) + cfg.merge(acc) + + itemList = [ "EventInfo#McEventInfo", "McEventCollection#*" ] + from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg + cfg.merge(OutputStreamCfg(ConfigFlags, "EVNT", itemList, disableEventTag = True)) + + sc = cfg.run(maxEvents = args.nevents) + sys.exit(not sc.isSuccess()) + + diff --git a/Generators/GeneratorUtils/python/ShiftLOSConfig.py b/Generators/GeneratorUtils/python/ShiftLOSConfig.py new file mode 100644 index 0000000000000000000000000000000000000000..9683df1f8b3a2b4ee7f831533415315b2972cfda --- /dev/null +++ b/Generators/GeneratorUtils/python/ShiftLOSConfig.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python + +# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration + +# import sys +from AthenaConfiguration.MainServicesConfig import AthSequencer +from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator +from AthenaConfiguration.ComponentFactory import CompFactory + +from GeneratorUtils.ShiftLOS import ShiftLOS + + +def ShiftLOSCfg(ConfigFlags, **kwargs) : + import McParticleEvent.Pythonizations + + cfg = ComponentAccumulator() + shift = ShiftLOS(name = kwargs.setdefault("name", "ShiftLOS")) + shift.InputMCEventKey = kwargs.setdefault("InputMCEventKey", "BeamTruthEvent") + shift.OutputMCEventKey = kwargs.setdefault("OutputMCEventKey", "BeamTruthEventShifted") + shift.xcross = kwargs.setdefault("xcross", 0) + shift.ycross = kwargs.setdefault("ycross", 0) + cfg.addEventAlgo(shift, sequenceName = "AthBeginSeq", primary = True) # to run *before* G4 + + return cfg diff --git a/Generators/HEPMCReader/CMakeLists.txt b/Generators/HEPMCReader/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..3076e7abc887d5cc1fabaedddc42d5db36286270 --- /dev/null +++ b/Generators/HEPMCReader/CMakeLists.txt @@ -0,0 +1,9 @@ +################################################################################ +# Package: HEPMCGenie +################################################################################ + +# Declare the package name: +atlas_subdir( HEPMCReader ) + +# Install files from the package: +atlas_install_python_modules( python/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} ) diff --git a/Generators/HEPMCReader/python/HepMCReaderConfig.py b/Generators/HEPMCReader/python/HepMCReaderConfig.py new file mode 100644 index 0000000000000000000000000000000000000000..72081a86f237a8a99154a61bf7d7713f12282d06 --- /dev/null +++ b/Generators/HEPMCReader/python/HepMCReaderConfig.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python + +# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration + +# import sys +from AthenaConfiguration.MainServicesConfig import AthSequencer +from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator +from AthenaConfiguration.ComponentFactory import CompFactory + +from TruthIO.TruthIOConf import HepMCReadFromFile + + +def HepMCReaderCfg(ConfigFlags, **kwargs) : + cfg = ComponentAccumulator(AthSequencer("AthBeginSeq", Sequential = True)) + + + from TruthIO.TruthIOConf import HepMCReadFromFile + hepmc = CompFactory.HepMCReadFromFile(name = kwargs.setdefault("name", "FASERHepMCReader")) + hepmc.InputFile = ConfigFlags.Input.Files[0] + hepmc.McEventKey = kwargs.setdefault("McEventKey", "BeamTruthEvent") + + cfg.addEventAlgo(hepmc, sequenceName = "AthBeginSeq", primary = True) # to run *before* G4 + + return cfg diff --git a/Generators/ParticleGun/CMakeLists.txt b/Generators/ParticleGun/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..1794305ed0291e77478aae72b1216db87fce0810 --- /dev/null +++ b/Generators/ParticleGun/CMakeLists.txt @@ -0,0 +1,14 @@ +################################################################################ +# Package: ParticleGun +################################################################################ + +# Declare the package name: +atlas_subdir( ParticleGun ) + +# Install files from the package: +atlas_install_python_modules( python/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} ) + +# Install files from the package: +atlas_install_joboptions( share/common/*.py + share/example/*.py ) + diff --git a/Generators/ParticleGun/README b/Generators/ParticleGun/README new file mode 100644 index 0000000000000000000000000000000000000000..6cdb698eda4f549c076ac3a3ce732ad65225c11d --- /dev/null +++ b/Generators/ParticleGun/README @@ -0,0 +1,5 @@ +ParticleGun documentation +------------------------- +See https://twiki.cern.ch/twiki/bin/viewauth/AtlasProtected/ParticleGunForAtlas +for some coherent documentation that should be kept up to date. + diff --git a/Generators/ParticleGun/python/__init__.py b/Generators/ParticleGun/python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4e88fc56c49679a210ad57b62055a6a7c0867ddd --- /dev/null +++ b/Generators/ParticleGun/python/__init__.py @@ -0,0 +1,129 @@ +# Copyright (C) 2002-2021 CERN for the benefit of the ATLAS collaboration + +from AthenaCommon.AppMgr import ServiceMgr as svcMgr +from GeneratorModules.EvgenAlg import EvgenAlg +from ParticleGun.samplers import ParticleSampler +from ParticleGun.samplers import * # noqa: F401, F403 (import into our namespace) +# commenting out the HepMC import for now +#try: +# from AthenaPython.PyAthena import HepMC3 as HepMC +#except ImportError: +# from AthenaPython.PyAthena import HepMC as HepMC + +from AthenaPython.PyAthena import StatusCode +import ROOT,random + +__author__ = "Andy Buckley <andy.buckley@cern.ch>" + +class ParticleGun(EvgenAlg): + """ + A simple but flexible algorithm for generating events from simple distributions. + """ + + def __init__(self, name="ParticleGun", randomSvcName="AtRndmGenSvc", randomStream="ParticleGun", randomSeed=None): + super(ParticleGun, self).__init__(name=name) + self.samplers = [ParticleSampler()] + self.randomStream = randomStream + self.randomSvcName = randomSvcName + self.randomSeed = randomSeed + + @property + def sampler(self): + "Get the first (and presumed only) sampler" + return self.samplers[0] if self.samplers else None + @sampler.setter + def sampler(self, s): + "Set the samplers list to include only a single sampler, s" + self.samplers = [s] + + + def initialize(self): + """ + Pass the AtRndmGenSvc seed to Python's random module, or use a fixed value set via pg.randomSeed. + """ + seed = None + ## Use self.randomSeed directly, or if it's None find a seed string from the ATLAS random number service + if self.randomSeed is not None: + seed = self.randomSeed + else: + randomSvc = getattr(svcMgr, self.randomSvcName, None) + if randomSvc is not None: + for seedstr in randomSvc.Seeds: + if seedstr.startswith(self.randomStream): + seed = seedstr + self.msg.info("ParticleGun: Using random seed '%s' ", seed) + break + if seed is None: + self.msg.warning("ParticleGun: Failed to find a seed for the random stream named '%s' ", self.randomStream) + else: + self.msg.warning("ParticleGun: Failed to find random number service called '%s' ", self.randomSvcName) + ## Apply the seed + if seed is not None: + random.seed(seed) + return StatusCode.Success + else: + self.msg.error("ParticleGun: randomSeed property not set, and no %s random number service found", self.randomSvcName) + return StatusCode.Failure + + + def fillEvent(self, evt): + """ + Sample a list of particle properties, which are then used to create a new GenEvent in StoreGate. + """ + ## Set event weight(s) + # TODO: allow weighted sampling? + try: + from AthenaPython.PyAthena import HepMC3 as HepMC + except ImportError: + from AthenaPython.PyAthena import HepMC as HepMC + evt.weights().push_back(1.0) + + ## Make and fill particles + for s in self.samplers: + particles = s.shoot() + for p in particles: + ## Debug printout of particle properties + #print DEBUG0, p.pid, p.mom.E(), p.mom.Pt(), p.mom.M() + #print "DEBUG1 (px,py,pz,E) = (%0.2e, %0.2e, %0.2e, %0.2e)" % (p.mom.Px(), p.mom.Py(), p.mom.Pz(), p.mom.E()) + #print "DEBUG2 (eta,phi,pt,m) = (%0.2e, %0.2e, %0.2e, %0.2e)" % (p.mom.Eta(), p.mom.Phi(), p.mom.Pt(), p.mom.M()) + #print "DEBUG3 (x,y,z,t) = (%0.2e, %0.2e, %0.2e, %0.2e)" % (p.pos.X(), p.pos.Y(), p.pos.Z(), p.pos.T()) + + ## Make particle-creation vertex + # TODO: do something cleverer than one vertex per particle? + pos = HepMC.FourVector(p.pos.X(), p.pos.Y(), p.pos.Z(), p.pos.T()) + gv = HepMC.GenVertex(pos) + ROOT.SetOwnership(gv, False) + evt.add_vertex(gv) + + ## Make particle with status == 1 + mom = HepMC.FourVector(p.mom.Px(), p.mom.Py(), p.mom.Pz(), p.mom.E()) + gp = HepMC.GenParticle() + gp.set_status(1) + gp.set_pdg_id(p.pid) + gp.set_momentum(mom) + if p.mass is not None: + gp.set_generated_mass(p.mass) + ROOT.SetOwnership(gp, False) + gv.add_particle_out(gp) + + return StatusCode.Success + + +## PyAthena HepMC notes +# +## evt.print() isn't valid syntax in Python2 due to reserved word +# TODO: Add a Pythonisation, e.g. evt.py_print()? +#getattr(evt, 'print')() +# +## How to check that the StoreGate key exists and is an McEventCollection +# if self.sg.contains(McEventCollection, self.sgkey): +# print self.sgkey + " found!" +# +## Modifying an event other than that supplied as an arg +# mcevts = self.sg[self.sgkey] +# for vtx in mcevts[0].vertices: # only way to get the first vtx?! +# gp2 = HepMC.GenParticle() +# gp2.set_momentum(HepMC.FourVector(1,2,3,4)) +# gp2.set_status(1) +# vtx.add_particle_out(gp2) +# break diff --git a/Generators/ParticleGun/python/histsampling.py b/Generators/ParticleGun/python/histsampling.py new file mode 100644 index 0000000000000000000000000000000000000000..c64112cc84e23963b63ea381817d1457f1c2a122 --- /dev/null +++ b/Generators/ParticleGun/python/histsampling.py @@ -0,0 +1,132 @@ +# Copyright (C) 2002-2021 CERN for the benefit of the ATLAS collaboration + +""" +Tools for histogram sampling, in particular inverse transform sampling which is +missing from ROOT's TH2 classes. +""" + +__author__ = "Andy Buckley <andy.buckley@cern.ch>" + +import random, ROOT + + +def load_hist(*args): + """ + Load a histogram from a filename/TFile and histo name. If a single arg is + provided, it has to be a histo object and will be cloned before return. + """ + h = None + if len(args) == 1 and issubclass(type(args[0]), ROOT.TH1): + h = args[0].Clone() + elif len(args) == 2: + if isinstance(args[0], str) and isinstance(args[1], str) : + f = ROOT.TFile.Open(args[0]) + h = f.Get(args[1]).Clone() + #f.Close() + elif type(args[0]) is ROOT.TFile and type(args[1]) is str: + h = args[0].Get(args[1]).Clone() + if h is None: + raise Exception("Error in histogram loading from " + args) + return h + + +def get_sampling_vars(h): + """ + Get the following from a histogram h, since the ROOT API sucks: + * list of global bin IDs (not even contiguous for 2D, gee thanks ROOT) + * dict mapping global bin IDs to a tuple of axis bin IDs + * list of nbins+1 cumulative bin values, in the same order as globalbins + """ + globalbin_to_axisbin = {} # for reverse axis bin lookup to get edges + globalbins = [] # because they aren't easily predicted, nor contiguous + cheights = [0] # cumulative "histogram" from which to uniformly sample + if issubclass(type(h), ROOT.TH1): + for ix in range(1, h.GetNbinsX()+1): + iglobal = h.GetBin(ix) + globalbins.append(iglobal) + globalbin_to_axisbin[iglobal] = (ix,) + cheights.append(cheights[-1] + h.GetBinContent(iglobal)) + elif issubclass(type(h), ROOT.TH2): + for ix in range(1, h.GetNbinsX()+1): + for iy in range(1, h.GetNbinsY()+1): + iglobal = h.GetBin(ix, iy) + globalbins.append(iglobal) + globalbin_to_axisbin[iglobal] = (ix, iy) + cheights.append(cheights[-1] + h.GetBinContent(iglobal)) + return globalbins, globalbin_to_axisbin, cheights + + +def get_random_bin(globalbins, cheights): + """ + Choose a random bin from the cumulative distribution list of nbins+1 entries. + + TODO: Search more efficiently (lin and log guesses, then lin search or + binary split depending on vector size). + """ + assert len(cheights) == len(globalbins)+1 + randomheight = random.uniform(0, cheights[-1]) + for i, iglobal in enumerate(globalbins): + if randomheight >= cheights[i] and randomheight < cheights[i+1]: + return iglobal + raise Exception("Sample fell outside range of cumulative distribution?!?!") + + +def get_random_x(h, globalbins, cheights, globalbin_to_axisbin): + """ + Choose a random bin via get_random_bin, then pick a uniform random x + point in that bin (without any attempt at estimating the in-bin distribution). + """ + irand = get_random_bin(globalbins, cheights) + axisids = globalbin_to_axisbin.get(irand) + assert axisids is not None + xrand = random.uniform(h.GetXaxis().GetBinLowEdge(axisids[0]), h.GetXaxis().GetBinUpEdge(axisids[0])) + return xrand + + +def get_random_xy(h2, globalbins, cheights, globalbin_to_axisbin): + """ + Choose a random bin via get_random_bin, then pick a uniform random x,y + point in that bin (without any attempt at estimating the in-bin distribution). + """ + irand = get_random_bin(globalbins, cheights) + axisids = globalbin_to_axisbin.get(irand) + assert axisids is not None + xrand = random.uniform(h2.GetXaxis().GetBinLowEdge(axisids[0]), h2.GetXaxis().GetBinUpEdge(axisids[0])) + yrand = random.uniform(h2.GetYaxis().GetBinLowEdge(axisids[1]), h2.GetYaxis().GetBinUpEdge(axisids[1])) + return xrand, yrand + + +class TH1(object): + "Minimal wrapper for ROOT TH1, for sampling consistency and easy loading" + + def __init__(self, *args): + self.th1 = load_hist(*args) + self.globalbins, self.globalbin_to_axisbin, self.cheights = None, None, None + + def GetRandom(self): + "A GetRandom that works for TH1s and uses Python random numbers" + if self.globalbins is None or self.globalbin_to_axisbin is None or self.cheights is None: + self.globalbins, self.globalbin_to_axisbin, self.cheights = get_sampling_vars(self.th1) + return get_random_x(self.th1, self.globalbins, self.cheights, self.globalbin_to_axisbin) + + def __getattr__(self, attr): + "Forward all attributes to the contained TH1" + return getattr(self.th1, attr) + + +class TH2(object): + "Minimal wrapper for ROOT TH2, for easy loading and to allow 2D sampling" + + def __init__(self, *args): + self.th2 = load_hist(*args) + self.globalbins, self.globalbin_to_axisbin, self.cheights = None, None, None + + def GetRandom(self): + "A GetRandom that works for TH2s" + if self.globalbins is None or self.globalbin_to_axisbin is None or self.cheights is None: + self.globalbins, self.globalbin_to_axisbin, self.cheights = get_sampling_vars(self.th2) + return get_random_xy(self.th2, self.globalbins, self.cheights, self.globalbin_to_axisbin) + + def __getattr__(self, attr): + "Forward other attributes to the contained TH2" + return getattr(self.th2, attr) diff --git a/Generators/ParticleGun/python/samplers.py b/Generators/ParticleGun/python/samplers.py new file mode 100644 index 0000000000000000000000000000000000000000..90e9676a5ce20dc1b52e48c79f0b36a7da49f639 --- /dev/null +++ b/Generators/ParticleGun/python/samplers.py @@ -0,0 +1,912 @@ +# Copyright (C) 2002-2021 CERN for the benefit of the ATLAS collaboration + +import ROOT, math, random +from ParticleGun.histsampling import TH1 + +## For convenience +PI = math.pi +TWOPI = 2*math.pi + + +class Sampler(object): + "Base class for all samplers" + + def shoot(self): + return RuntimeError("Can't sample from an abstract sampler object.") + + def __call__(self): + """This is the call method that will actually be used (so that normal + functions can also be passed in as samplers).""" + return self.shoot() + + # TODO: add a sampling weight? + + +class ConstSampler(Sampler): + "A special-case sampler which just returns one value rather than sampling." + + def __init__(self, val): + self.val = val + + def shoot(self): + return self.val + + def __repr__(self): + return "ConstSampler[%s]" % str(self.val) + + +## Continuous distribution samplers + +class ContinuousSampler(Sampler): + "Base class for samplers from continuous distributions." + pass + + +class UniformSampler(ContinuousSampler): + "Uniformly sample in the range [low,high)." + + def __init__(self, low, high): + assert(low <= high) + self.low = float(low) + self.high = float(high) + + def shoot(self): + return random.uniform(self.low, self.high) + + +class ModUniformSampler(ContinuousSampler): + "Uniformly sample in the modulus range (-high,low]+[low,high)." + + def __init__(self, low, high): + assert(low == abs(low) and high == abs(high)) + assert(low <= high) + self.low = float(low) + self.high = float(high) + + def shoot(self): + val = random.uniform(self.low, self.high) + if random.random() > 0.5: + val *= -1 + return val + + +class DisjointUniformSampler(ContinuousSampler): + "Uniformly sample from a set of disjoint intervals." + + def __init__(self, ranges): + """ + The ranges variable can either be a list of increasing numbers or a + list of pairs of numbers. + + The former case will be treated as + defining alternating on/off ranges for sampling, starting with an active + one (i.e. it's a list of bin edges). The latter way specifically lists + the 'on' regions only, with their start and end values in the pairs. + + The behaviour is undefined if the numbers are not ordered or overlap -- + i.e. it might work but hasn't been designed that way and might change in + future. Don't rely on this behaviour! + """ + if not ranges: + raise Exception("You must supply at least one non-null sampling range") + if hasattr(ranges[0], "__len__"): + assert all(len(x) == 2 for x in ranges) + self.ranges = ranges + else: + assert len(ranges) > 1 + lows = [x for x in ranges[:-1]] + highs = [x for x in ranges[1:]] + myranges = [] + for i, pair in enumerate(zip(lows, highs)): + if i % 2 == 0: + myranges.append(pair) + assert len(myranges) == len(ranges) // 2 + self.ranges = myranges + + def _getRanges(self): + return self._ranges + + def _setRanges(self, ranges): + # TODO: Check that ranges don't overlap + self._ranges = ranges + self._totalwidth = sum(r[1] - r[0] for r in ranges) + + runningwidth = 0.0 + self._divisions = [0.0] + for r in ranges: + assert(r[1] >= r[0]) + runningwidth += float(r[1] - r[0]) + self._divisions.append(runningwidth) + self._totalwidth = runningwidth + for i in range(len(self._divisions)): + self._divisions[i] = float(self._divisions[i]) / float(self._totalwidth) + + ranges = property(_getRanges, _setRanges) + + def _map_unit_to_val(self, x): + assert(x >= 0 and x <= 1) + idx = None + rem = None + for i in range(len(self._divisions) - 1): + if x >= self._divisions[i] and x < self._divisions[i+1]: + idx = i + rem = x - self._divisions[i] + break + if idx is None: + raise ValueError("No matching division found in unit interval! How?") + val = self.ranges[idx][0] + self._totalwidth * rem + return val + + def shoot(self): + rand = random.random() + val = self._map_unit_to_val(rand) + return val + + +class LogSampler(ContinuousSampler): + "Randomly sample from an exponential distribution (i.e. uniformly on a log scale)." + + def __init__(self, low, high): + self.low = float(low) + self.high = float(high) + + def shoot(self): + rand = random.random() + logval = rand * math.log(self.high) + (1 - rand) * math.log(self.low) + val = math.exp(logval) + return val + + +class GaussianSampler(ContinuousSampler): + "Randomly sample from a 1D Gaussian distribution." + + def __init__(self, mean, sigma, oneside = False): + self.mean = float(mean) + self.sigma = float(sigma) + self.oneside = bool(oneside) + + def shoot(self): + if self.oneside: + return abs(random.gauss(self.mean, self.sigma)) + else: + return random.gauss(self.mean, self.sigma) + +class InvSampler(ContinuousSampler): + "Randomly sample from a 1/x distribution." + + def __init__(self, low, high): + self.low = float(low) + self.high = float(high) + + def shoot(self): + invx = random.uniform(1/self.high, 1/self.low) #< limit inversion not actually necessary + return 1./invx + + +######################################## + + +class TH1Sampler(ContinuousSampler): + "Randomly sample from a 1D ROOT histogram." + + def __init__(self, *args): + self.hist = TH1(*args) + if self.hist.GetEntries() < 1: + raise Exception("Histogram %s is EMPTY! Cannot sample" % self.hist.GetName()) + + def shoot(self): + return self.hist.GetRandom() + + +######################################## + + +## Discrete sequence samplers + +class DiscreteSampler(Sampler): + "Base class for samplers from lists of discrete values" + pass + + +class RandomSeqSampler(DiscreteSampler): + "Uniformly random sample from a list of values." + + def __init__(self, *args): + if len(args) == 1: + self.sequence = args[0] + else: + self.sequence = args + + def shoot(self): + return random.choice(self.sequence) +# Alias: +RndmSeq = RandomSeqSampler + + +class CyclicSeqSampler(DiscreteSampler): + "Sequentially sample from a list of values, returning to the beginning once exhausted." + + def __init__(self, *args): + if len(args) == 1: + self.sequence = args[0] + else: + self.sequence = args + self.index = 0 + + def shoot(self): + self.index = (self.index + 1) % len(self.sequence) + return self.sequence[self.index] +## Alias: +Sequence = CyclicSeqSampler + + +######################################## + + +## Convenience function for sampler-making from Python literals + +def mksampler(x): + """ + Automatically cast the provided object to a sampler type. This is used + extensively inside the particle and position samplers, so that the user + can pass in a primitive type like a number or list and it will be + treated as if the more verbose sampler constructors had been called. + + Behaviour: + - if x can be called, i.e. x() is valid, we just return x; + - a Python list (square brackets) will be converted to a continuous + UniformSampler or DisjointUniformSampler; + - a Python tuple (round brackets/parentheses) will be treated + as a discrete CyclicSeqSampler; + - a Python set (curly brackets/braces) will be treated + as a discrete RandomSeqSampler; + - otherwise a ConstSampler will be created from x, so that x is + returned when the sampler is called. + """ + if hasattr(x, "__call__"): + return x + elif type(x) is list: + # NB: disjoint ranges can be given as nested lists, e.g. [(1,2), (4,5)] + if len(x) == 2 and type(x[0]) in (int,float) and type(x[1]) in (int,float): + #print "MKSAMPLER: Casting %s to UniformSampler" % str(x) + return UniformSampler(*x) + elif len(x) > 2 or (len(x) > 0 and type(x[0]) not in (int,float)): + #print "MKSAMPLER: Casting %s to DisjointUniformSampler" % str(x) + return DisjointUniformSampler(x) + if len(x) < 2: + raise Exception("Supplied list could not be converted to a continuous sampler") + elif type(x) is tuple: + #print "MKSAMPLER: Casting %s to CyclicSeqSampler" % str(x) + return CyclicSeqSampler(*x) + elif type(x) is set: + #print "MKSAMPLER: Casting %s to RandomSeqSampler" % str(x) + return RandomSeqSampler(*x) + else: + #print "MKSAMPLER: Casting %s to ConstSampler" % str(x) + return ConstSampler(x) + + +######################################## + + +## Beam-spot (origin vertex) sampling + +class PosSampler(Sampler): + """ + Sampler of position 3-vectors, for modelling a beamspot. + """ + + def __init__(self, x, y, z, t=0): + self.x = x + self.y = y + self.z = z + self.t = t + + @property + def x(self): + "x position sampler" + return self._x + @x.setter + def x(self, x): + self._x = mksampler(x) + + @property + def y(self): + "y position sampler" + return self._y + @y.setter + def y(self, y): + self._y = mksampler(y) + + @property + def z(self): + "z position sampler" + return self._z + @z.setter + def z(self, z): + self._z = mksampler(z) + + @property + def t(self): + "Time sampler" + return self._t + @t.setter + def t(self, t): + self._t = mksampler(t) + + def shoot(self): + x = self.x() + y = self.y() + z = self.z() + t = self.t() + #print "POS =", x, y, z, t + return ROOT.TLorentzVector(x, y, z, t) + + +# TODO: Make a 3-Gaussian BeamspotSampler + + +## Momentum sampling + +class MomSampler(Sampler): + """ + Base class for four-momentum sampling. + + There are many ways to unambiguously specify four-momenta. Not all are sensible/useful, + though. The following are implemented here: + * M,px,py,pz + * E,M,phi,eta + * E,M,phi,y + * E,M,phi,theta + * pT,M,phi,eta + * pT,M,phi,y + * pT,M,phi,theta + + Possibly the following (not yet implemented) could be useful: let us know if you + need one of these: + * E,px,py,pz + * pT,E,M,phi + """ + pass + + +class NullMomSampler(MomSampler): + "A momentum sampler which just returns a null vector with the given mass." + + def __init__(self, mass=0.0): + self.mass = mass + + @property + def mass(self): + "Mass sampler" + return self._m + @mass.setter + def mass(self, x): + self._m = mksampler(x) + + def shoot(self): + v4 = ROOT.TLorentzVector(0, 0, 0, self.mass) + return v4 + + +class MXYZSampler(MomSampler): + "Create a 4-momentum vector from mass, px, py, pz distributions/samplers." + + def __init__(self, px, py, pz, mass=0.0): + self.mass = mass + self.px = px + self.py = py + self.pz = pz + + @property + def mass(self): + "Mass sampler" + return self._m + @mass.setter + def mass(self, x): + self._m = mksampler(x) + + @property + def px(self): + "px sampler" + return self._px + @px.setter + def px(self, x): + self._px = mksampler(x) + + @property + def py(self): + "py sampler" + return self._py + @py.setter + def py(self, x): + self._py = mksampler(x) + + @property + def pz(self): + "pz sampler" + return self._pz + @pz.setter + def pz(self, x): + self._pz = mksampler(x) + + def shoot(self): + m = self.mass() + px = self.px() + py = self.py() + pz = self.pz() + e = math.sqrt(px**2 + py**2 + pz**2 + m**2) + v4 = ROOT.TLorentzVector(px, py, pz, e) + return v4 + + +class EEtaMPhiSampler(MomSampler): + "Create a 4-momentum vector from E, eta, m and phi distributions/samplers." + + # TODO: ensure that E >= m! + + def __init__(self, energy, eta, mass=0.0, phi=[0, TWOPI]): + self.energy = energy + self.eta = eta + self.mass = mass + self.phi = phi + + @property + def energy(self): + "Energy sampler" + return self._e + @energy.setter + def energy(self, x): + self._e = mksampler(x) + + @property + def eta(self): + "Pseudorapidity sampler" + return self._eta + @eta.setter + def eta(self, x): + self._eta = mksampler(x) + + @property + def mass(self): + "Mass sampler" + return self._m + @mass.setter + def mass(self, x): + self._m = mksampler(x) + + @property + def phi(self): + "Azimuthal angle sampler" + return self._phi + @phi.setter + def phi(self, x): + self._phi = mksampler(x) + + def shoot(self): + """ + eta = - ln(tan(theta/2)) / 2 + => theta = 2 atan( exp(-eta) ) + """ + eta = self.eta() + theta = 2 * math.atan(math.exp(-eta)) + e = self.energy() + m = self.mass() + p = math.sqrt( e**2 - m**2 ) + pz = p * math.cos(theta) + pt = p * math.sin(theta) + phi = self.phi() + px = pt * math.cos(phi) + py = pt * math.sin(phi) + v4 = ROOT.TLorentzVector(px, py, pz, e) + return v4 + + +class ERapMPhiSampler(MomSampler): + "Create a 4-momentum vector from E, y, m and phi distributions." + + # TODO: ensure that E >= m! + + def __init__(self, energy, rap, mass=0.0, phi=[0, TWOPI]): + self.energy = energy + self.rap = rap + self.mass = mass + self.phi = phi + + @property + def energy(self): + "Energy sampler" + return self._e + @energy.setter + def energy(self, x): + self._e = mksampler(x) + + @property + def rap(self): + "Rapidity sampler" + return self._rap + @rap.setter + def rap(self, x): + self._rap = mksampler(x) + + @property + def mass(self): + "Mass sampler" + return self._m + @mass.setter + def mass(self, x): + self._m = mksampler(x) + + @property + def phi(self): + "Azimuthal angle sampler" + return self._phi + @phi.setter + def phi(self, x): + self._phi = mksampler(x) + + def shoot(self): + """ + y = 0.5 * ln((E+pz)/(E-pz)) + -> (E^2 - pz^2) exp(2y) = (E+pz)^2 + & (E^2 - pz^2) exp(-2y) = (E-pz)^2 + -> E = sqrt(pt^2 + m^2) cosh(y) + -> pz = sqrt(pt^2 + m^2) sinh(y) + -> sqrt(pt^2 + m^2) = E / cosh(y) + """ + e = self.energy() + y = self.rap() + sqrt_pt2_m2 = e / math.cosh(y) + pz = sqrt_pt2_m2 * math.sinh(y) + m = self.mass() + pt = math.sqrt( sqrt_pt2_m2**2 - m**2 ) + phi = self.phi() + px = pt * math.cos(phi) + py = pt * math.sin(phi) + v4 = ROOT.TLorentzVector(px, py, pz, e) + return v4 + + +class EThetaMPhiSampler(MomSampler): + "Create a 4-momentum vector from E, theta, m and phi distributions/samplers." + + # TODO: ensure that E >= m! + + def __init__(self, energy, theta, mass=0.0, phi=[0, TWOPI]): + self.energy = energy + self.theta = theta + self.mass = mass + self.phi = phi + + @property + def energy(self): + "Energy sampler" + return self._e + @energy.setter + def energy(self, x): + self._e = mksampler(x) + + @property + def theta(self): + "Polar angle sampler" + return self._theta + @theta.setter + def theta(self, x): + self._theta = mksampler(x) + + @property + def mass(self): + "Mass sampler" + return self._m + @mass.setter + def mass(self, x): + self._m = mksampler(x) + + @property + def phi(self): + "Azimuthal angle sampler" + return self._phi + @phi.setter + def phi(self, x): + self._phi = mksampler(x) + + def shoot(self): + """ + p = sqrt(e^2 - m^2) + pz = p cos(theta) + pt = p sin(theta) + """ + e = self.energy() + m = self.mass() + p = math.sqrt( e**2 - m**2 ) + theta = self.theta() + pz = p * math.cos(theta) + pt = p * math.sin(theta) + phi = self.phi() + px = pt * math.cos(phi) + py = pt * math.sin(phi) + v4 = ROOT.TLorentzVector(px, py, pz, e) + return v4 + + +class PtEtaMPhiSampler(MomSampler): + "Create a 4-momentum vector from pt, eta, m and phi distributions/samplers." + + def __init__(self, pt, eta, mass=0.0, phi=[0, TWOPI]): + self.pt = pt + self.eta = eta + self.mass = mass + self.phi = phi + + @property + def pt(self): + "Transverse momentum sampler" + return self._pt + @pt.setter + def pt(self, x): + self._pt = mksampler(x) + + @property + def eta(self): + "Pseudorapidity sampler" + return self._eta + @eta.setter + def eta(self, x): + self._eta = mksampler(x) + + @property + def mass(self): + "Mass sampler" + return self._m + @mass.setter + def mass(self, x): + self._m = mksampler(x) + + @property + def phi(self): + "Azimuthal angle sampler" + return self._phi + @phi.setter + def phi(self, x): + self._phi = mksampler(x) + + def shoot(self): + """ + eta = - ln(tan(theta/2)) / 2 + => theta = 2 atan( exp(-eta) ) + """ + eta = self.eta() + theta = 2 * math.atan(math.exp(-eta)) + pt = self.pt() + p = pt / math.sin(theta) + phi = self.phi() + px = pt * math.cos(phi) + py = pt * math.sin(phi) + pz = p * math.cos(theta) + m = self.mass() + e = math.sqrt( p**2 + m**2 ) + v4 = ROOT.TLorentzVector(px, py, pz, e) + return v4 + + +class PtRapMPhiSampler(MomSampler): + "Create a 4-momentum vector from pt, y, m and phi distributions/samplers." + + def __init__(self, pt, rap, mass=0.0, phi=[0, TWOPI]): + self.pt = pt + self.rap = rap + self.mass = mass + self.phi = phi + + @property + def pt(self): + "Transverse momentum sampler" + return self._pt + @pt.setter + def pt(self, x): + self._pt = mksampler(x) + + @property + def rap(self): + "Rapidity sampler" + return self._rap + @rap.setter + def rap(self, x): + self._rap = mksampler(x) + + @property + def mass(self): + "Mass sampler" + return self._m + @mass.setter + def mass(self, x): + self._m = mksampler(x) + + @property + def phi(self): + "Azimuthal angle sampler" + return self._phi + @phi.setter + def phi(self, x): + self._phi = mksampler(x) + + def shoot(self): + """ + y = 0.5 * ln((E+pz)/(E-pz)) + -> (E^2 - pz^2) exp(2y) = (E+pz)^2 + & (E^2 - pz^2) exp(-2y) = (E-pz)^2 + -> E = sqrt(pt^2 + m^2) cosh(y) + -> pz = sqrt(pt^2 + m^2) sinh(y) + -> sqrt(pt^2 + m^2) = E / cosh(y) + """ + pt = self.pt() + assert pt >= 0 + m = self.mass() + assert m >= 0 + sqrt_pt2_m2 = math.sqrt( pt**2 + m**2 ) + y = self.rap() + e = sqrt_pt2_m2 * math.cosh(y) + pz = sqrt_pt2_m2 * math.sinh(y) + phi = self.phi() + px = pt * math.cos(phi) + py = pt * math.sin(phi) + v4 = ROOT.TLorentzVector(px, py, pz, e) + return v4 + + +class PtThetaMPhiSampler(MomSampler): + "Create a 4-momentum vector from pt, theta, m and phi distributions/samplers." + + def __init__(self, pt, theta, mass=0.0, phi=[0, TWOPI]): + self.pt = pt + self.theta = theta + self.mass = mass + self.phi = phi + + @property + def pt(self): + "Transverse momentum sampler" + return self._pt + @pt.setter + def pt(self, x): + self._pt = mksampler(x) + + @property + def theta(self): + "Polar angle sampler" + return self._theta + @theta.setter + def theta(self, x): + self._theta = mksampler(x) + + @property + def mass(self): + "Mass sampler" + return self._m + @mass.setter + def mass(self, x): + self._m = mksampler(x) + + @property + def phi(self): + "Azimuthal angle sampler" + return self._phi + @phi.setter + def phi(self, x): + self._phi = mksampler(x) + + def shoot(self): + """ + p = pt / math.sin(theta) + pz = p cos(theta) + pt = p sin(theta) + E = sqrt(p^2 + m^2) + """ + theta = self.theta() + pt = self.pt() + p = pt / math.sin(theta) + phi = self.phi() + px = pt * math.cos(phi) + py = pt * math.sin(phi) + pz = p * math.cos(theta) + m = self.mass() + e = math.sqrt( p**2 + m**2 ) + v4 = ROOT.TLorentzVector(px, py, pz, e) + return v4 + + +# TODO: add the missing ways to specify/sample 4-momenta + + +########################################################### + + +## Combined samplers returning a particle configuration + + +## A default dictionary of particle masses (in MeV) +MASSES = { 22 : 0.0, # photon + 11 : 0.5, # electron + 12 : 0.0, # nu_e + 13 : 105.7, # muon + 14 : 0.0, # nu_mu + 15 : 1777.8, # tau + 16 : 0.0, # nu_tau + 2212 : 938.0, # proton + 2112 : 940.0, # neutron + 111 : 135.0, # pi0 + 211 : 140.0, # pi+- + 221 : 547.0, # eta + 321 : 494.0, # K+- + 311 : 598.0 # K0 + } + + +class SampledParticle(object): + """ + A particle object for use as a return value from the particle samplers. + """ + def __init__(self, pid=None, mom=ROOT.TLorentzVector(0,0,0,0), pos=ROOT.TLorentzVector(0,0,0,0)): + """ + Constructor/initializer: PID is the (int) PDG particle ID code + of this particle, mom is its momentum 4-vector, and pos is + the vertex 4-position (both as ROOT.TLorentzVector, in MeV). + """ + self.pid = pid + self.mom = mom + self.pos = pos + self.mass = None + + +class ParticleSampler(Sampler): + """ + A simple N-independent-particle sampler. + """ + + def __init__(self, pid=999, + mom=NullMomSampler(), + n=1, + pos=PosSampler(0, 0, 0)): + self.pid = pid + self.mom = mom + self.n = n + self.pos = pos + self.massdict = MASSES + self.mass_override = True + + @property + def pid(self): + "Particle ID code sampler" + return self._pid + @pid.setter + def pid(self, x): + self._pid = mksampler(x) + + @property + def n(self): + "Particle number sampler" + return self._n + @n.setter + def n(self, x): + self._n = mksampler(x) + + def shoot(self): + "Return a vector of sampled particles" + numparticles = self.n() + rtn = [] + for i in range(numparticles): + ## Sample the particle ID and create a particle + pid = self.pid() + p = SampledParticle(pid) + ## Pass mass info to the v4 sampler and set same generated mass + if self.mass_override and abs(pid) in self.massdict: + m = self.massdict[abs(pid)] + self.mom.mass = m + p.mass = m + # TODO: Should the particle generated_mass be set from the sampler by default? + ## Sample momentum and vertex positions into the particle + p.mom = self.mom() + p.pos = self.pos() + ## Add particle to output list + rtn.append(p) + return rtn diff --git a/Generators/ParticleGun/share/common/ParticleGun_Common.py b/Generators/ParticleGun/share/common/ParticleGun_Common.py new file mode 100644 index 0000000000000000000000000000000000000000..3fab6fb0b7c4093fe92bd0e34e57bc7093b0281a --- /dev/null +++ b/Generators/ParticleGun/share/common/ParticleGun_Common.py @@ -0,0 +1,4 @@ +## Common setup for ParticleGun +import ParticleGun as PG +genSeq += PG.ParticleGun() +evgenConfig.generators += ["ParticleGun"] diff --git a/Generators/ParticleGun/share/common/ParticleGun_EoverP_Config.py b/Generators/ParticleGun/share/common/ParticleGun_EoverP_Config.py new file mode 100644 index 0000000000000000000000000000000000000000..8b78a953f31c253a9ead42158bf4d8b0dab77ce0 --- /dev/null +++ b/Generators/ParticleGun/share/common/ParticleGun_EoverP_Config.py @@ -0,0 +1,66 @@ +#! -*- python -*- +evgenConfig.description = "Single particle gun for E/p event generation" +evgenConfig.keywords = ["singleParticle",] +evgenConfig.generators = ["ParticleGun"] +evgenConfig.contact = ["zach.marshall@cern.ch"] + +import ParticleGun as PG +import ROOT +from ParticleGun.samplers import * +class PEtaSampler(PG.MomSampler): + "Create a 4-momentum vector from pt, eta, m and phi distributions/samplers." + + def __init__(self, momentum, eta, pid=211, phi=[0, math.pi*2.]): + self.momentum = momentum + self.eta = eta + pdg_table = ROOT.TDatabasePDG.Instance() + mass = pdg_table.GetParticle(pid).Mass() + self.mass = mass + self.phi = phi + + @property + def momentum(self): + "Momentum sampler" + return self._momentum + @momentum.setter + def momentum(self, x): + self._momentum = mksampler(x) + + @property + def eta(self): + "Pseudorapidity sampler" + return self._eta + @eta.setter + def eta(self, x): + self._eta = mksampler(x) + + @property + def mass(self): + "Mass sampler" + return self._m + @mass.setter + def mass(self, x): + self._m = mksampler(x) + + @property + def phi(self): + "Azimuthal angle sampler" + return self._phi + @phi.setter + def phi(self, x): + self._phi = mksampler(x) + + def shoot(self): + v4 = ROOT.TLorentzVector() + pt = p / math.cosh(self.eta()) + v4.SetPtEtaPhiM(pt, self.eta(), self.phi(), self.mass()) + return v4 + +a_particle = int(jofile.split('_')[-1].split('.py')[0].replace('m','-')) + +pg = PG.ParticleGun() +pg.sampler.pid = int(a_particle) #PID +pg.sampler.mom = PEtaSampler(momentum=(500,800,1000,1200,1500,2000,3000,4000,5000,6000,7000,8000,9000,10000,11000,13000,15000,17000,20000,\ + 25000,35000,50000,75000,100000,200000,350000,500000), eta=[-0.3,0.3], pid=int(a_particle)) +genSeq += pg + diff --git a/Generators/ParticleGun/share/common/ParticleGun_FastCalo_ChargeFlip_Config.py b/Generators/ParticleGun/share/common/ParticleGun_FastCalo_ChargeFlip_Config.py new file mode 100644 index 0000000000000000000000000000000000000000..a5399a64019b935aa729244f88c4b5c0ebe5b35f --- /dev/null +++ b/Generators/ParticleGun/share/common/ParticleGun_FastCalo_ChargeFlip_Config.py @@ -0,0 +1,78 @@ +#! -*- python -*- +evgenConfig.description = "Single particle gun for FastCaloSim event generation" +evgenConfig.keywords = ["singleParticle",] +evgenConfig.generators = ["ParticleGun"] +evgenConfig.contact = ["david.sosa@cern.ch"] + +import ParticleGun as PG +import ROOT + +class MyParticleSampler(PG.ParticleSampler): + def __init__(self,energy,eta,pid,shift_z=0): + self.pid = pid + self.shift_z = shift_z + pdg_table = ROOT.TDatabasePDG.Instance() + mass = pdg_table.GetParticle(self.pid()).Mass() + self.mom1 = PG.EEtaMPhiSampler(energy=energy,eta=eta,mass=mass) + + def shoot(self): + pid = self.pid() + + shift_z = self.shift_z + + mom = self.mom1.shoot() + pos_temp = mom.Vect().Unit() + + # Would it hit the barrel, or the endcap? + if abs(pos_temp.Z())/3550.<pos_temp.Perp()/1148.: # Hit the barrel! + pos_temp *= 1148./pos_temp.Perp() + else: # Hit the endcap! + pos_temp *= 3550./abs(pos_temp.Z()) + + # Shift position of vector in the Z direction + pos_temp_2 = ROOT.TVector3() + pos_temp_2.SetXYZ(pos_temp.X(), pos_temp.Y(), pos_temp.Z()+shift_z) + pos_temp_2 *= 1. / pos_temp_2.Mag(); # reduce magnitude of vector + + # recalculate; Would it hit the barrel, or the endcap? + if abs(pos_temp_2.Z())/3550.<pos_temp_2.Perp()/1148.: + pos_temp_2 *= 1148./pos_temp_2.Perp() + else: + pos_temp_2 *= 3550./abs(pos_temp_2.Z()) + + pos = ROOT.TLorentzVector(pos_temp_2.X(),pos_temp_2.Y(),pos_temp_2.Z(), pos_temp_2.Mag()) + + #print "pid ",pid + + return [ PG.SampledParticle( pid , mom , pos ) ] + +myE = float(jofile.split('_E')[1].split('_')[0]) +myZV = float(jofile.split('_')[-1].split('.py')[0].replace("m","-")) + +myPDGID = jofile.split('_pid')[1].split('_')[0].replace('n','-') +myPDGID = int(float(myPDGID.replace('p',''))) + +eta_li = [] + +if "disj" in jofile: + myLowEta1 = 0.01*float(jofile.split('eta_')[1].split('_')[0].replace('m','-')) + myLowEta2 = 0.01*float(jofile.split('eta_')[1].split('_')[1].replace('m','-')) + myHighEta1 = 0.01*float(jofile.split('eta_')[1].split('_')[2].replace('m','-')) + myHighEta2 = 0.01*float(jofile.split('eta_')[1].split('_')[3].replace('m','-')) + eta_li.extend([myLowEta1,myLowEta2,myHighEta1,myHighEta2]) + +else: + myLowEta = 0.01*float(jofile.split('eta')[1].split('_')[0].replace('m','-')) + myHighEta = 0.01*float(jofile.split('eta')[1].split('_')[1].replace('m','-')) + eta_li.extend([myLowEta,myHighEta]) + + +print "================ SETTTINGS =================" +print ("energy = ", myE) +print ("eta = ", eta_li) +print ("pid = ", myPDGID) +print ("shift_z = ", myZV) +print "============================================" + +genSeq += PG.ParticleGun() +genSeq.ParticleGun.sampler = MyParticleSampler(energy=myE,eta=eta_li,pid=(myPDGID,myPDGID),shift_z=myZV) #unmixed diff --git a/Generators/ParticleGun/share/common/ParticleGun_FastCalo_Config.py b/Generators/ParticleGun/share/common/ParticleGun_FastCalo_Config.py new file mode 100644 index 0000000000000000000000000000000000000000..1b2e9a68bc5e1c1612bf2e294c58dcc472f700fb --- /dev/null +++ b/Generators/ParticleGun/share/common/ParticleGun_FastCalo_Config.py @@ -0,0 +1,101 @@ +#! -*- python -*- +evgenConfig.description = "Single particle gun for FastCaloSim event generation" +evgenConfig.keywords = ["singleParticle",] +evgenConfig.generators = ["ParticleGun"] +evgenConfig.contact = ["david.sosa@cern.ch"] + +import ParticleGun as PG +import ROOT + +class MyParticleSampler(PG.ParticleSampler): + def __init__(self,energy,eta,pid,shift_z=0): + self.pid = pid + self.shift_z = shift_z + pdg_table = ROOT.TDatabasePDG.Instance() + mass = pdg_table.GetParticle(self.pid()).Mass() + self.mom1 = PG.EEtaMPhiSampler(energy=energy,eta=eta,mass=mass) + + def shoot(self): + pid = self.pid() + + shift_z = self.shift_z + + mom = self.mom1.shoot() + pos_temp = mom.Vect().Unit() + + # Define geometry + barrelR1 = 1148.0 + barrelR2 = 120.0 + barrelR3 = 41.0 + endcapZ1 = 3550.0 + endcapZ2 = 4587.0 + + # Would it hit the barrel, or the endcap? + tanTheta = pos_temp.Perp() / abs( pos_temp.Z() ); + if tanTheta > barrelR1 / endcapZ1: + pos_temp *= barrelR1 / pos_temp.Perp() + elif tanTheta > barrelR2 / endcapZ1: + pos_temp *= endcapZ1 / abs( pos_temp.Z() ) + elif tanTheta > barrelR2 / endcapZ2: + pos_temp *= barrelR2 / pos_temp.Perp() + elif tanTheta > barrelR3 / endcapZ2: + pos_temp *= endcapZ2 / abs( pos_temp.Z() ) + else: + pos_temp *= barrelR3 / pos_temp.Perp() + + # Shift position of vector in the Z direction + pos_temp_2 = ROOT.TVector3() + pos_temp_2.SetXYZ(pos_temp.X(), pos_temp.Y(), pos_temp.Z()+shift_z) + pos_temp_2 *= 1. / pos_temp_2.Mag(); # reduce magnitude of vector + + # recalculate; Would it hit the barrel, or the endcap? + tanTheta_2 = pos_temp_2.Perp() / abs( pos_temp_2.Z() ); + if tanTheta_2 > barrelR1 / endcapZ1: + pos_temp_2 *= barrelR1 / pos_temp_2.Perp() + elif tanTheta_2 > barrelR2 / endcapZ1: + pos_temp_2 *= endcapZ1 / abs( pos_temp_2.Z() ) + elif tanTheta_2 > barrelR2 / endcapZ2: + pos_temp_2 *= barrelR2 / pos_temp_2.Perp() + elif tanTheta_2 > barrelR3 / endcapZ2: + pos_temp_2 *= endcapZ2 / abs( pos_temp_2.Z() ) + else: + pos_temp_2 *= barrelR3 / pos_temp_2.Perp() + + pos = ROOT.TLorentzVector(pos_temp_2.X(),pos_temp_2.Y(),pos_temp_2.Z(), pos_temp_2.Mag()) + + #print "pid ",pid + + return [ PG.SampledParticle( pid , mom , pos ) ] + +myE = float(jofile.split('_E')[1].split('_')[0]) +myZV = float(jofile.split('_')[-1].split('.py')[0].replace("m","-")) +myPDGID = int(float(jofile.split('_pid')[1].split('_')[0].replace('m','-'))) + +eta_li = [] + +if "disj" in jofile: + myLowEta1 = 0.01*float(jofile.split('eta_')[1].split('_')[0].replace('m','-')) + myLowEta2 = 0.01*float(jofile.split('eta_')[1].split('_')[1].replace('m','-')) + myHighEta1 = 0.01*float(jofile.split('eta_')[1].split('_')[2].replace('m','-')) + myHighEta2 = 0.01*float(jofile.split('eta_')[1].split('_')[3].replace('m','-')) + eta_li.extend([myLowEta1,myLowEta2,myHighEta1,myHighEta2]) + +else: + myLowEta = 0.01*float(jofile.split('eta')[1].split('_')[0].replace('m','-')) + myHighEta = 0.01*float(jofile.split('eta')[1].split('_')[1].replace('m','-')) + eta_li.extend([myLowEta,myHighEta]) + + +print "================ SETTTINGS =================" +print ("energy = ", myE) +print ("eta = ", eta_li) +print ("pid = ", myPDGID) +print ("shift_z = ", myZV) +print "============================================" + +genSeq += PG.ParticleGun() +if myPDGID != 22: + genSeq.ParticleGun.sampler = MyParticleSampler(energy=myE,eta=eta_li,pid=(-myPDGID,myPDGID),shift_z=myZV) +else: + genSeq.ParticleGun.sampler = MyParticleSampler(energy=myE,eta=eta_li,pid=myPDGID,shift_z=myZV) + diff --git a/Generators/ParticleGun/share/common/ParticleGun_FastCalo_Config_Erange.py b/Generators/ParticleGun/share/common/ParticleGun_FastCalo_Config_Erange.py new file mode 100644 index 0000000000000000000000000000000000000000..75ebc0621e7ba4e6803ae25c20a6d7438fc45466 --- /dev/null +++ b/Generators/ParticleGun/share/common/ParticleGun_FastCalo_Config_Erange.py @@ -0,0 +1,103 @@ +#! -*- python -*- +evgenConfig.description = "Single particle gun for FastCaloSim event generation" +evgenConfig.keywords = ["singleParticle",] +evgenConfig.generators = ["ParticleGun"] +evgenConfig.contact = ["david.sosa@cern.ch"] + +import ParticleGun as PG +import ROOT + +class MyParticleSampler(PG.ParticleSampler): + def __init__(self,energy,eta,pid,shift_z=0): + self.pid = pid + self.shift_z = shift_z + pdg_table = ROOT.TDatabasePDG.Instance() + mass = pdg_table.GetParticle(self.pid()).Mass() + self.mom1 = PG.EEtaMPhiSampler(energy=energy,eta=eta,mass=mass) + + def shoot(self): + pid = self.pid() + + shift_z = self.shift_z + + mom = self.mom1.shoot() + pos_temp = mom.Vect().Unit() + + # Define geometry + barrelR1 = 1148.0 + barrelR2 = 120.0 + barrelR3 = 41.0 + endcapZ1 = 3550.0 + endcapZ2 = 4587.0 + + # Would it hit the barrel, or the endcap? + tanTheta = pos_temp.Perp() / abs( pos_temp.Z() ); + if tanTheta > barrelR1 / endcapZ1: + pos_temp *= barrelR1 / pos_temp.Perp() + elif tanTheta > barrelR2 / endcapZ1: + pos_temp *= endcapZ1 / abs( pos_temp.Z() ) + elif tanTheta > barrelR2 / endcapZ2: + pos_temp *= barrelR2 / pos_temp.Perp() + elif tanTheta > barrelR3 / endcapZ2: + pos_temp *= endcapZ2 / abs( pos_temp.Z() ) + else: + pos_temp *= barrelR3 / pos_temp.Perp() + + # Shift position of vector in the Z direction + pos_temp_2 = ROOT.TVector3() + pos_temp_2.SetXYZ(pos_temp.X(), pos_temp.Y(), pos_temp.Z()+shift_z) + pos_temp_2 *= 1. / pos_temp_2.Mag(); # reduce magnitude of vector + + # recalculate; Would it hit the barrel, or the endcap? + tanTheta_2 = pos_temp_2.Perp() / abs( pos_temp_2.Z() ); + if tanTheta_2 > barrelR1 / endcapZ1: + pos_temp_2 *= barrelR1 / pos_temp_2.Perp() + elif tanTheta_2 > barrelR2 / endcapZ1: + pos_temp_2 *= endcapZ1 / abs( pos_temp_2.Z() ) + elif tanTheta_2 > barrelR2 / endcapZ2: + pos_temp_2 *= barrelR2 / pos_temp_2.Perp() + elif tanTheta_2 > barrelR3 / endcapZ2: + pos_temp_2 *= endcapZ2 / abs( pos_temp_2.Z() ) + else: + pos_temp_2 *= barrelR3 / pos_temp_2.Perp() + + pos = ROOT.TLorentzVector(pos_temp_2.X(),pos_temp_2.Y(),pos_temp_2.Z(), pos_temp_2.Mag()) + + #print "pid ",pid + + return [ PG.SampledParticle( pid , mom , pos ) ] + +E_li = [] +myLowE = float(jofile.split('_E')[1].split('_')[0]) +myHighE = float(jofile.split('_E')[1].split('_')[1]) +E_li.extend([myLowE,myHighE]) + +myZV = float(jofile.split('_')[-1].split('.py')[0].replace("m","-")) +myPDGID = int(float(jofile.split('_pid')[1].split('_')[0].replace('m','-'))) + +eta_li = [] + +if "disj" in jofile: + myLowEta1 = 0.01*float(jofile.split('eta_')[1].split('_')[0].replace('m','-')) + myLowEta2 = 0.01*float(jofile.split('eta_')[1].split('_')[1].replace('m','-')) + myHighEta1 = 0.01*float(jofile.split('eta_')[1].split('_')[2].replace('m','-')) + myHighEta2 = 0.01*float(jofile.split('eta_')[1].split('_')[3].replace('m','-')) + eta_li.extend([myLowEta1,myLowEta2,myHighEta1,myHighEta2]) + +else: + myLowEta = 0.01*float(jofile.split('eta')[1].split('_')[0].replace('m','-')) + myHighEta = 0.01*float(jofile.split('eta')[1].split('_')[1].replace('m','-')) + eta_li.extend([myLowEta,myHighEta]) + + +print "================ SETTTINGS =================" +print ("energy = ", E_li) +print ("eta = ", eta_li) +print ("pid = ", myPDGID) +print ("shift_z = ", myZV) +print "============================================" + +genSeq += PG.ParticleGun() +print "E_li = ", E_li, ", eta_li = ", eta_li, ", pid = ", myPDGID, ", myZV = ", myZV +genSeq.ParticleGun.sampler = MyParticleSampler(energy=E_li,eta=eta_li,pid=myPDGID,shift_z=myZV) + diff --git a/Generators/ParticleGun/share/common/ParticleGun_FastCalo_NoChargeFlip_Config.py b/Generators/ParticleGun/share/common/ParticleGun_FastCalo_NoChargeFlip_Config.py new file mode 100644 index 0000000000000000000000000000000000000000..7ba60ef2bc9d3a9195ada72e9a2232864f173567 --- /dev/null +++ b/Generators/ParticleGun/share/common/ParticleGun_FastCalo_NoChargeFlip_Config.py @@ -0,0 +1,78 @@ +#! -*- python -*- +evgenConfig.description = "Single particle gun for FastCaloSim event generation" +evgenConfig.keywords = ["singleParticle",] +evgenConfig.generators = ["ParticleGun"] +evgenConfig.contact = ["david.sosa@cern.ch"] + +import ParticleGun as PG +import ROOT + +class MyParticleSampler(PG.ParticleSampler): + def __init__(self,energy,eta,pid,shift_z=0): + self.pid = pid + self.shift_z = shift_z + pdg_table = ROOT.TDatabasePDG.Instance() + mass = pdg_table.GetParticle(self.pid()).Mass() + self.mom1 = PG.EEtaMPhiSampler(energy=energy,eta=eta,mass=mass) + + def shoot(self): + pid = self.pid() + + shift_z = self.shift_z + + mom = self.mom1.shoot() + pos_temp = mom.Vect().Unit() + + # Would it hit the barrel, or the endcap? + if abs(pos_temp.Z())/3550.<pos_temp.Perp()/1148.: # Hit the barrel! + pos_temp *= 1148./pos_temp.Perp() + else: # Hit the endcap! + pos_temp *= 3550./abs(pos_temp.Z()) + + # Shift position of vector in the Z direction + pos_temp_2 = ROOT.TVector3() + pos_temp_2.SetXYZ(pos_temp.X(), pos_temp.Y(), pos_temp.Z()+shift_z) + pos_temp_2 *= 1. / pos_temp_2.Mag(); # reduce magnitude of vector + + # recalculate; Would it hit the barrel, or the endcap? + if abs(pos_temp_2.Z())/3550.<pos_temp_2.Perp()/1148.: + pos_temp_2 *= 1148./pos_temp_2.Perp() + else: + pos_temp_2 *= 3550./abs(pos_temp_2.Z()) + + pos = ROOT.TLorentzVector(pos_temp_2.X(),pos_temp_2.Y(),pos_temp_2.Z(), pos_temp_2.Mag()) + + #print "pid ",pid + + return [ PG.SampledParticle( pid , mom , pos ) ] + +myE = float(jofile.split('_E')[1].split('_')[0]) +myZV = float(jofile.split('_')[-1].split('.py')[0].replace("m","-")) + +myPDGID = jofile.split('_pid')[1].split('_')[0].replace('n','-') +myPDGID = int(float(myPDGID.split('_pid')[1].split('_')[0].replace('p',''))) + +eta_li = [] + +if "disj" in jofile: + myLowEta1 = 0.01*float(jofile.split('eta_')[1].split('_')[0].replace('m','-')) + myLowEta2 = 0.01*float(jofile.split('eta_')[1].split('_')[1].replace('m','-')) + myHighEta1 = 0.01*float(jofile.split('eta_')[1].split('_')[2].replace('m','-')) + myHighEta2 = 0.01*float(jofile.split('eta_')[1].split('_')[3].replace('m','-')) + eta_li.extend([myLowEta1,myLowEta2,myHighEta1,myHighEta2]) + +else: + myLowEta = 0.01*float(jofile.split('eta')[1].split('_')[0].replace('m','-')) + myHighEta = 0.01*float(jofile.split('eta')[1].split('_')[1].replace('m','-')) + eta_li.extend([myLowEta,myHighEta]) + + +print "================ SETTTINGS =================" +print ("energy = ", myE) +print ("eta = ", eta_li) +print ("pid = ", myPDGID) +print ("shift_z = ", myZV) +print "============================================" + +genSeq += PG.ParticleGun() +genSeq.ParticleGun.sampler = MyParticleSampler(energy=myE,eta=eta_li,pid=(myPDGID,myPDGID),shift_z=myZV) #unmixed diff --git a/Generators/ParticleGun/share/common/ParticleGun_SamplingFraction.py b/Generators/ParticleGun/share/common/ParticleGun_SamplingFraction.py new file mode 100644 index 0000000000000000000000000000000000000000..54557f0d5d2afacedb09999acac22e02ad8576ac --- /dev/null +++ b/Generators/ParticleGun/share/common/ParticleGun_SamplingFraction.py @@ -0,0 +1,97 @@ +#! -*- python -*- +evgenConfig.description = "Single particle gun for Sampling Fraction event generation" +evgenConfig.keywords = ["singleParticle",] +evgenConfig.generators = ["ParticleGun"] +evgenConfig.contact = ["michael.duehrssen@cern.ch"] + +import ParticleGun as PG +import ROOT, math, random + +class MyParticleSampler(PG.ParticleSampler): + """ + Projective showers starting at entrance of calorimeter, flat in eta, constant energy + """ + + def __init__(self,pid=11,momentum=50000.,eta1=0.,eta2=1.4,bec=0,radius=1500.,z=3740.5): + self.pid = pid + self.momentum = momentum + self.eta1 = eta1 + self.eta2 = eta2 + pdg_table = ROOT.TDatabasePDG.Instance() + self.mass = pdg_table.GetParticle(self.pid()).Mass() + self.bec=bec + self.radius=radius + self.z=z + + def shoot(self): + rtn=[] + eta = random.uniform(self.eta1, self.eta2) + phi = random.uniform(0, math.tau) # tau = 2 * pi + v4 = ROOT.TLorentzVector() + pt = self.momentum / math.cosh(eta) + v4.SetPtEtaPhiM(pt, eta, phi, self.mass) + if self.bec==0: + radius= self.radius + x=radius*math.cos(phi) + y=radius*math.sin(phi) + z=radius*math.sinh(eta) + else: + z=self.z + radius=z/math.sinh(eta) + x=radius*math.cos(phi) + y=radius*math.sin(phi) + t=math.sqrt(x*x+y*y+z*z) + vp = ROOT.TLorentzVector(x,y,z,t) + p = PG.SampledParticle(pid=self.pid(),mom=v4,pos=vp) + #print "E,eta,phi,mass ",e,eta,phi,self.mass," position ",x,y,z," pid=",p.pid + rtn.append(p) + return rtn + +##MC15 style with Generate_tf.py +#args=jofile.split('.py')[0] + +##MC16 style with Gen_tf.py +FIRST_DIR = (os.environ['JOBOPTSEARCHPATH']).split(":")[0] +jofiles = [f for f in os.listdir(FIRST_DIR) if (f.startswith('mc') and f.endswith('.py'))] + +print "================ SETTTINGS =================" +print ("jofiles = ", jofiles) + +### parse options from MC job-options filename +args = jofiles[0].split('.py')[0] +print ("args = ", args) + +myMomentum = float(args.split('_Mom')[1].split('_')[0]) +print ("Momentum = ", myMomentum,"MeV") + +myPDGID = int(float(args.split('_pid')[1].split('_')[0].replace('m','-'))) +print ("pid = ", myPDGID) + +myLowEta = 0.01*float(args.split('eta_')[1].split('_')[0].replace('m','-')) +print ("etalow = ", myLowEta) + +myHighEta = 0.01*float(args.split('eta_')[1].split('_')[1].replace('m','-')) +print ("etahigh = ", myHighEta) + +if "_Radius" in args: + myRadius = 0.001*float(args.split('_Radius')[1].split('_')[0]) #Argument needs to by in mum, since a "." in the filename is not allowed +else: + myRadius = 1500. +print ("radius = ", myRadius,"mm") + +if "_Z" in args: + myZ = 0.001*float(args.split('_Z')[1].split('_')[0]) #Argument needs to by in mum, since a "." in the filename is not allowed +else: + myZ = 3740.5 +print ("Z = ", myZ,"mm") + +if "bec" in args: + bec=1 +else: + bec=0 +print ("bec = ", bec) +print "============================================" + +genSeq += PG.ParticleGun() +genSeq.ParticleGun.sampler = MyParticleSampler(momentum=myMomentum,eta1=myLowEta,eta2=myHighEta,pid=myPDGID,bec=bec,radius=myRadius,z=myZ) + diff --git a/Generators/ParticleGun/share/common/ParticleGun_SingleHECO.py b/Generators/ParticleGun/share/common/ParticleGun_SingleHECO.py new file mode 100644 index 0000000000000000000000000000000000000000..8ffb36dd28235948f0ec1d298c121902267f27d1 --- /dev/null +++ b/Generators/ParticleGun/share/common/ParticleGun_SingleHECO.py @@ -0,0 +1,74 @@ + +PDG = 10000000 + int(float(charge)*100.0) +loE = (float(mass) + 10.)*1000. +hiE = (float(mass) + 6000.)*1000. +MeVmass=float(mass)*1000. +#-------------------------------------------------------------- +# Configuration for EvgenJobTransforms +#-------------------------------------------------------------- +evgenConfig.description = "Single HECO generation for Mass=%s, Charge=%s in MC15" % (mass,charge) +evgenConfig.keywords = ["exotic", "singleParticle","highElectricChargeObject"] +evgenConfig.generators = ["ParticleGun"] +evgenConfig.contact = ["anlionti@cern.ch"] + +evgenConfig.specialConfig = 'MASS=%s;CHARGE=%s;preInclude=SimulationJobOptions/preInclude.Qball.py' % (mass,charge) + + + +#-------------------------------------------------------------- +# Configuration for ParticleGun +#-------------------------------------------------------------- +include("ParticleGun/ParticleGun_Common.py") + +import ParticleGun as PG +PG.MASSES[PDG] = float(MeVmass) +genSeq.ParticleGun.sampler.pid = (-PDG, PDG) +genSeq.ParticleGun.sampler.mom = PG.EEtaMPhiSampler(energy=[loE,hiE], eta=[-2,2]) + + +#-------------------------------------------------------------- +# Edit PDGTABLE.MeV with monopole mass +#-------------------------------------------------------------- +ALINE1="M %s %s.E+03 +0.0E+00 -0.0E+00 Monopole 0" % (PDG,mass) +ALINE2="W %s 0.E+00 +0.0E+00 -0.0E+00 Monopole 0" % (PDG) + +import os +import sys + +pdgmod = os.path.isfile('PDGTABLE.MeV') +if pdgmod is True: + os.remove('PDGTABLE.MeV') +os.system('get_files -data PDGTABLE.MeV') +f=open('PDGTABLE.MeV','a') +f.writelines(str(ALINE1)) +f.writelines('\n') +f.writelines(str(ALINE2)) +f.writelines('\n') +f.close() + +del ALINE1 +del ALINE2 + +#-------------------------------------------------------------- +# Edit G4particle_whitelist.txt with monopole +#-------------------------------------------------------------- + +ALINE1="%s qb %s.E+03 (Mev/c) lepton %s" % (PDG,mass,charge) +ALINE2="-%s qbbar %s.E+03 (Mev/c) lepton -%s" % (PDG,mass,charge) + +import os +import sys + +pdgmod = os.path.isfile('G4particle_whitelist.txt') +if pdgmod is True: + os.remove('G4particle_whitelist.txt') +os.system('get_files -data G4particle_whitelist.txt') +f=open('G4particle_whitelist.txt','a') +f.writelines(str(ALINE1)) +f.writelines('\n') +f.writelines(str(ALINE2)) +f.writelines('\n') +f.close() + +del ALINE1 +del ALINE2 diff --git a/Generators/ParticleGun/share/common/ParticleGun_SingleMonopole.py b/Generators/ParticleGun/share/common/ParticleGun_SingleMonopole.py new file mode 100644 index 0000000000000000000000000000000000000000..3a6bf0574561af0f4cab4a20e68248dcc3111bae --- /dev/null +++ b/Generators/ParticleGun/share/common/ParticleGun_SingleMonopole.py @@ -0,0 +1,74 @@ + +PDG = 4110000 +loE = (float(monmass) + 10.)*1000. +hiE = (float(monmass) + 6000.)*1000. +MeVmass=float(monmass)*1000. +#-------------------------------------------------------------- +# Configuration for EvgenJobTransforms +#-------------------------------------------------------------- +evgenConfig.description = "Single magnetic monopole generation for Mass=%s, Gcharge=%s in MC15" % (monmass,gcharge) +evgenConfig.keywords = ["exotic", "magneticMonopole", "singleParticle"] +evgenConfig.generators = ["ParticleGun"] +evgenConfig.contact = ["anlionti@cern.ch"] + +evgenConfig.specialConfig = 'MASS=%s;GCHARGE=%s;preInclude=SimulationJobOptions/preInclude.Monopole.py' % (monmass,gcharge) + + + +#-------------------------------------------------------------- +# Configuration for ParticleGun +#-------------------------------------------------------------- +include("ParticleGun/ParticleGun_Common.py") + +import ParticleGun as PG +PG.MASSES[4110000] = float(MeVmass) +genSeq.ParticleGun.sampler.pid = (-PDG, PDG) +genSeq.ParticleGun.sampler.mom = PG.EEtaMPhiSampler(energy=[loE,hiE], eta=[-2,2]) + + +#-------------------------------------------------------------- +# Edit PDGTABLE.MeV with monopole mass +#-------------------------------------------------------------- +ALINE1="M 4110000 %s.E+03 +0.0E+00 -0.0E+00 Monopole 0" % (monmass) +ALINE2="W 4110000 0.E+00 +0.0E+00 -0.0E+00 Monopole 0" + +import os +import sys + +pdgmod = os.path.isfile('PDGTABLE.MeV') +if pdgmod is True: + os.remove('PDGTABLE.MeV') +os.system('get_files -data PDGTABLE.MeV') +f=open('PDGTABLE.MeV','a') +f.writelines(str(ALINE1)) +f.writelines('\n') +f.writelines(str(ALINE2)) +f.writelines('\n') +f.close() + +del ALINE1 +del ALINE2 + +#-------------------------------------------------------------- +# Edit G4particle_whitelist.txt with monopole +#-------------------------------------------------------------- + +ALINE1="4110000 mm %s.E+03 (Mev/c) lepton %s" % (monmass,gcharge) +ALINE2="-4110000 mmbar %s.E+03 (Mev/c) lepton -%s" % (monmass,gcharge) + +import os +import sys + +pdgmod = os.path.isfile('G4particle_whitelist.txt') +if pdgmod is True: + os.remove('G4particle_whitelist.txt') +os.system('get_files -data G4particle_whitelist.txt') +f=open('G4particle_whitelist.txt','a') +f.writelines(str(ALINE1)) +f.writelines('\n') +f.writelines(str(ALINE2)) +f.writelines('\n') +f.close() + +del ALINE1 +del ALINE2 diff --git a/Generators/ParticleGun/share/common/ParticleGun_egammaET.py b/Generators/ParticleGun/share/common/ParticleGun_egammaET.py new file mode 100644 index 0000000000000000000000000000000000000000..04ea9b92a3e209afc279d743307dff393f1d8839 --- /dev/null +++ b/Generators/ParticleGun/share/common/ParticleGun_egammaET.py @@ -0,0 +1,51 @@ +__doc__ = "Holds a 4-momentum sampler according to the egamma Et spectrum" + +import ParticleGun as PG +from GaudiKernel.SystemOfUnits import GeV + +def dbnFermiDirac(x,mu,kT): + import math + arg = (x-mu)/kT + if arg < -20 : # avoid numerical underflows + result = 1 + elif arg > 20 : # avoid numerical overflows + result = 0 + else : + div = math.exp(arg)+1 + result = 1/div + return result + +class egammaETSampler(PG.PtEtaMPhiSampler): + "4-momentum sampler according to the egamma Et spectrum." + def __init__(self, pid, eta=[-2.5, 2.5], phi=[0, PG.TWOPI], + mu1 = 0.5, kT1 = 0.1, mu2 = 200, kT2 = 20, y0 = 0.005, PtMin = 0 , PtMax = 3e3, nBins=None): + """ + Parameters for the MVA-shaped spectrum : higher density in the < 100 GeV range + PtMin = 0 # minimum Pt + PtMax = 3000 # maximum Pt (3 TeV) + nBins # number of bins (one every 100 MeV by default) + mu1 = 0.5 # mu1,kT1 : smooth but steep ramp-up from 0 to 1 GeV (requested by TauCP) + kT1 = 0.1 + mu2 = 200 # mu2,kT2 : smooth, slow ramp-down in the 100-300 GeV range + kT2 = 20 + y0 = 0.005 # y0 : baseline for low-density at high ET up to PtMax + """ + self.m = PG.MASSES[abs(pid)] + self.eta = eta + self.phi = phi + + # Create and fill a very fine-grained histogram + from ROOT import TH1D + etSpectrumFullRange = TH1D("ETSpectrumFullRange", + "Reference ET spectrum for egamma MVA calib", + int(nBins or (PtMax - PtMin)*10), PtMin , PtMax) + for i in xrange(etSpectrumFullRange.GetNbinsX()): + x = etSpectrumFullRange.GetBinCenter(i+1) + y1 = dbnFermiDirac(x,mu1,kT1) + y2 = dbnFermiDirac(x,mu2,kT2) + y = y0 - y1 + y2 + etSpectrumFullRange.SetBinContent(i+1,y) + self.hist = PG.TH1(etSpectrumFullRange) #< wrap *after* populating + + def pt(self): + return self.hist.GetRandom() * GeV diff --git a/Generators/ParticleGun/share/examples/jobOption.ParticleGun_constenergy_flateta.py b/Generators/ParticleGun/share/examples/jobOption.ParticleGun_constenergy_flateta.py new file mode 100644 index 0000000000000000000000000000000000000000..3c3cb0321b37e7f3d5ec3fcfc2602b2f1cba2cb0 --- /dev/null +++ b/Generators/ParticleGun/share/examples/jobOption.ParticleGun_constenergy_flateta.py @@ -0,0 +1,17 @@ +#! -*- python -*- + +# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration + +include("GeneratorUtils/StdEvgenSetup.py") +theApp.EvtMax = 100 + +import ParticleGun as PG +pg = PG.ParticleGun() +pg.randomSeed = 123456 +pg.sampler.pid = {11,-11,211,111} +pg.sampler.mom = PG.EEtaMPhiSampler(energy=10000, eta=[-2,2]) +topSeq += pg + +include("GeneratorUtils/postJO.CopyWeights.py") +include("GeneratorUtils/postJO.PoolOutput.py") +include("GeneratorUtils/postJO.DumpMC.py") diff --git a/Generators/ParticleGun/share/examples/jobOption.ParticleGun_correlated.py b/Generators/ParticleGun/share/examples/jobOption.ParticleGun_correlated.py new file mode 100644 index 0000000000000000000000000000000000000000..cb0a9437388d3b21fffd744c67cfe590dbddaf4e --- /dev/null +++ b/Generators/ParticleGun/share/examples/jobOption.ParticleGun_correlated.py @@ -0,0 +1,34 @@ +#! -*- python -*- + +# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration + +include("GeneratorUtils/StdEvgenSetup.py") +theApp.EvtMax = 100 + +import ParticleGun as PG + +class MyParticleSampler(PG.ParticleSampler): + "A special sampler with two _correlated_ particles." + + def __init__(self): + self.mom1 = PG.PtEtaMPhiSampler(pt=25000, eta=[-2,2]) + + def shoot(self): + "Return a vector of sampled particles" + p1 = PG.SampledParticle(11, self.mom1.shoot()) + eta1 = p1.mom.Eta() + phi1 = p1.mom.Phi() + # TODO: will phi be properly wrapped into range? + mom2 = PG.PtEtaMPhiSampler(pt=25000, + eta=[eta1-0.5, eta1+0.5], + phi=[phi1-0.5, phi1+0.5]) + p2 = PG.SampledParticle(11, mom2.shoot()) + return [p1, p2] + +topSeq += PG.ParticleGun() +topSeq.ParticleGun.randomSeed = 123456 +topSeq.ParticleGun.sampler = MyParticleSampler() + +include("GeneratorUtils/postJO.CopyWeights.py") +include("GeneratorUtils/postJO.PoolOutput.py") +include("GeneratorUtils/postJO.DumpMC.py") diff --git a/Generators/ParticleGun/share/examples/jobOption.ParticleGun_corrhist.py b/Generators/ParticleGun/share/examples/jobOption.ParticleGun_corrhist.py new file mode 100644 index 0000000000000000000000000000000000000000..7c0cd41b88e35f0290b137e5eebe80296a90b3ca --- /dev/null +++ b/Generators/ParticleGun/share/examples/jobOption.ParticleGun_corrhist.py @@ -0,0 +1,41 @@ +#! -*- python -*- + +# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration + +## ROOT 2D histogram sampling alg (in ParticleGun.histsampling) by Andy Buckley +## Thanks to Alejandro Alonso for the initial Athena example on which this is based. + +include("GeneratorUtils/StdEvgenSetup.py") +theApp.EvtMax = 100 + +import ParticleGun as PG + +class PtEtaHistParticleSampler(PG.ParticleSampler): + "Particle sampler with correlated pT and eta from a 2D histogram." + + def __init__(self, pid, histfile, num=100): + self.pid = PG.mksampler(pid) + self.hist = PG.TH2(histfile, "h_pt_eta") + self.numparticles = num + + def shoot(self): + "Return a vector of sampled particles from the provided pT--eta histogram" + particles = [] + for i in xrange(self.numparticles): + ptrand, etarand = self.hist.GetRandom() + ptrand *= 1000 # NB. This _particular_ histogram is in GeV, but Athena needs MeV! + # TODO: Provide 4-mom construction functions to avoid building this one-time sampler + pid = self.pid() + mom = PG.PtEtaMPhiSampler(pt=ptrand, eta=etarand, mass=PG.MASSES[abs(pid)]) + p = PG.SampledParticle(pid, mom()) + #print p.mom.Pt(), "\t", p.mom.Eta(), "\t", p.mom.Phi(), "\t", p.mom.M() + particles.append(p) + return particles + +topSeq += PG.ParticleGun() +topSeq.ParticleGun.randomSeed = 123456 +topSeq.ParticleGun.sampler = PtEtaHistParticleSampler(11, "data_histos_el_1470pt.root") + +include("GeneratorUtils/postJO.CopyWeights.py") +include("GeneratorUtils/postJO.PoolOutput.py") +include("GeneratorUtils/postJO.DumpMC.py") diff --git a/Generators/ParticleGun/share/examples/jobOption.ParticleGun_flatcurvature_flatip.py b/Generators/ParticleGun/share/examples/jobOption.ParticleGun_flatcurvature_flatip.py new file mode 100644 index 0000000000000000000000000000000000000000..38233563de9673b1d9ebd1ae434b57f6051ae70e --- /dev/null +++ b/Generators/ParticleGun/share/examples/jobOption.ParticleGun_flatcurvature_flatip.py @@ -0,0 +1,41 @@ +#! -*- python -*- + +# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration + +include("GeneratorUtils/StdEvgenSetup.py") +theApp.EvtMax = 100 + +import ParticleGun as PG + +class MyParticleSampler(PG.ParticleSampler): + """ + A special sampler to generate single particles flat in 1/pT and in + impact parameter to the beam, with flat z0. + """ + + def __init__(self): + psamp = PG.PtEtaMPhiSampler(pt=PG.InvSampler(4000, 400000), eta=[0.1,0.3], phi=[0.3, 0.5]) + xsamp = PG.PosSampler(0, 0, [-150,150], 0) + PG.ParticleSampler.__init__(self, pid={13,-13}, mom=psamp, pos=xsamp) + self.ip = PG.mksampler([-2,2]) + + def shoot(self): + "Return a vector of sampled particles" + ps = PG.ParticleSampler.shoot(self) + assert len(ps) == 1 + p = ps[0] + from math import sqrt + m = -p.mom.X() / p.mom.Y() #< gradient of azimuthal IP sampling line, perp to mom + x = self.ip() / sqrt(1 + m**2) #< just decomposing sampled IP into x component... + y = m*x #< ... and y-component + p.pos.SetX(x) + p.pos.SetY(m*x) + return [p] + +topSeq += PG.ParticleGun() +topSeq.ParticleGun.randomSeed = 123456 +topSeq.ParticleGun.sampler = MyParticleSampler() + +include("GeneratorUtils/postJO.CopyWeights.py") +include("GeneratorUtils/postJO.PoolOutput.py") +include("GeneratorUtils/postJO.DumpMC.py") diff --git a/Generators/ParticleGun/share/examples/jobOption.ParticleGun_flatpt_2particle.py b/Generators/ParticleGun/share/examples/jobOption.ParticleGun_flatpt_2particle.py new file mode 100644 index 0000000000000000000000000000000000000000..97ed64f8857e82dbcaeb06085fc84d84eb246c0b --- /dev/null +++ b/Generators/ParticleGun/share/examples/jobOption.ParticleGun_flatpt_2particle.py @@ -0,0 +1,20 @@ +#! -*- python -*- + +# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration + +include("GeneratorUtils/StdEvgenSetup.py") +theApp.EvtMax = 100 + +import ParticleGun as PG +pg = PG.ParticleGun() +pg.randomSeed = 123456 +pg.samplers.append(PG.ParticleSampler()) # add a second sampler +pg.samplers[0].pid = (-13, 13) # cycle mu+- +pg.samplers[0].mom = PG.PtEtaMPhiSampler(pt=[4000, 100000], eta=[1.0, 3.2]) # flat in pt and +ve eta +pg.samplers[1].pid = (13, -13) # cycle mu-+ +pg.samplers[1].mom = PG.PtEtaMPhiSampler(pt=[4000, 100000], eta=[-3.2, -1.0]) # flat in pt and -ve eta +topSeq += pg + +include("GeneratorUtils/postJO.CopyWeights.py") +include("GeneratorUtils/postJO.PoolOutput.py") +include("GeneratorUtils/postJO.DumpMC.py") diff --git a/Generators/ParticleGun/share/examples/jobOption.ParticleGun_fwd_sequence.py b/Generators/ParticleGun/share/examples/jobOption.ParticleGun_fwd_sequence.py new file mode 100644 index 0000000000000000000000000000000000000000..d1d4746fb7b7002fb8e096058c1b2b5d1c4eb348 --- /dev/null +++ b/Generators/ParticleGun/share/examples/jobOption.ParticleGun_fwd_sequence.py @@ -0,0 +1,19 @@ +#! -*- python -*- + +# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration + +include("GeneratorUtils/StdEvgenSetup.py") +theApp.EvtMax = 100 + +import ParticleGun as PG +pg = PG.ParticleGun() +pg.randomSeed = 123456 +pg.sampler.pid = (2112, 22, 2112, 22) +pg.sampler.mom = PG.EThetaMPhiSampler(energy=(1360000, 500000, 1360000, 500000), + theta=(0, 0, PG.PI, PG.PI)) +pg.sampler.pos = PG.PosSampler(x=[-120,-100], y=[-10,10], z=203950) +topSeq += pg + +include("GeneratorUtils/postJO.CopyWeights.py") +include("GeneratorUtils/postJO.PoolOutput.py") +include("GeneratorUtils/postJO.DumpMC.py") diff --git a/Generators/ParticleGun/share/examples/jobOption.ParticleGun_vtx.py b/Generators/ParticleGun/share/examples/jobOption.ParticleGun_vtx.py new file mode 100644 index 0000000000000000000000000000000000000000..cdacb7ff5a678cb95894a39a14f0ead507c41a4b --- /dev/null +++ b/Generators/ParticleGun/share/examples/jobOption.ParticleGun_vtx.py @@ -0,0 +1,18 @@ +#! -*- python -*- + +# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration + +include("GeneratorUtils/StdEvgenSetup.py") +theApp.EvtMax = 100 + +import ParticleGun as PG +pg = PG.ParticleGun() +pg.randomSeed = 123456 +pg.sampler.pid = 13 +pg.sampler.pos = PG.PosSampler(x=3140.0, y=[-154.134,154.134], z=[4938.76,5121.29], t=5929.7) +pg.sampler.mom = PG.EEtaMPhiSampler(energy=100000, eta=1.25, phi=0.0) +topSeq += pg + +include("GeneratorUtils/postJO.CopyWeights.py") +include("GeneratorUtils/postJO.PoolOutput.py") +include("GeneratorUtils/postJO.DumpMC.py") diff --git a/Scintillator/ScintDetDescr/ScintIdDictFiles/data/IdDictScintillator.xml b/Scintillator/ScintDetDescr/ScintIdDictFiles/data/IdDictScintillator.xml index 13d77e21372822bd19cbdb4c5b7ae3046d27693f..54552a212ae62ae33650c186ea6529c383524518 100644 --- a/Scintillator/ScintDetDescr/ScintIdDictFiles/data/IdDictScintillator.xml +++ b/Scintillator/ScintDetDescr/ScintIdDictFiles/data/IdDictScintillator.xml @@ -4,6 +4,7 @@ <label name="Veto" value="1" /> <label name="Trigger" value="2" /> <label name="Preshower" value="3" /> + <label name="VetoNu" value="4" /> </field> <region> @@ -26,4 +27,12 @@ <range field="plate" minvalue="0" maxvalue="1" /> <range field="pmt" minvalue="0" maxvalue="0" /> </region> -</IdDictionary> \ No newline at end of file + + <region> + <range field="part" value="VetoNu" /> + <range field="station" minvalue="0" maxvalue="0" /> + <range field="plate" minvalue="0" maxvalue="1" /> + <range field="pmt" minvalue="0" maxvalue="0" /> + </region> + +</IdDictionary> diff --git a/Scintillator/ScintDetDescr/ScintIdDictFiles/data/IdDictScintillator_TB00.xml b/Scintillator/ScintDetDescr/ScintIdDictFiles/data/IdDictScintillator_TB00.xml index a4b3f0c98f58f58bf64ef490bf6d6f446bb0256b..71fd8298645674dae3bd0d54c411d099742a6416 100644 --- a/Scintillator/ScintDetDescr/ScintIdDictFiles/data/IdDictScintillator_TB00.xml +++ b/Scintillator/ScintDetDescr/ScintIdDictFiles/data/IdDictScintillator_TB00.xml @@ -4,8 +4,8 @@ <label name="Veto" value="1" /> <label name="Trigger" value="2" /> <label name="Preshower" value="3" /> + <label name="VetoNu" value="4" /> </field> - <region> <range field="part" value="Veto" /> <range field="station" minvalue="0" maxvalue="0" /> @@ -24,4 +24,10 @@ <range field="plate" minvalue="0" maxvalue="1" /> <range field="pmt" minvalue="0" maxvalue="0" /> </region> -</IdDictionary> \ No newline at end of file + <region> + <range field="part" value="VetoNu" /> + <range field="station" minvalue="0" maxvalue="0" /> + <range field="plate" minvalue="0" maxvalue="1" /> + <range field="pmt" minvalue="0" maxvalue="0" /> + </region> +</IdDictionary> diff --git a/Scintillator/ScintDetDescr/ScintIdentifier/CMakeLists.txt b/Scintillator/ScintDetDescr/ScintIdentifier/CMakeLists.txt index 6dc9408f8ffb04b6a32144d7d03dc338ab9ad8bb..5874ae0de68035f16d71d47887a95cdfb8f857a6 100644 --- a/Scintillator/ScintDetDescr/ScintIdentifier/CMakeLists.txt +++ b/Scintillator/ScintDetDescr/ScintIdentifier/CMakeLists.txt @@ -11,6 +11,7 @@ find_package( ROOT COMPONENTS Core Tree MathCore Hist RIO pthread ) # Component(s) in the package: atlas_add_library( ScintIdentifier src/VetoID.cxx + src/VetoNuID.cxx src/TriggerID.cxx src/PreshowerID.cxx # src/ScintillatorID.cxx diff --git a/Scintillator/ScintDetDescr/ScintIdentifier/ScintIdentifier/ScintIdentifierDict.h b/Scintillator/ScintDetDescr/ScintIdentifier/ScintIdentifier/ScintIdentifierDict.h index 9f3832c16e73859facfe81aa17f4864cf63a9b19..0968f0b4136d48216bcea7305e4933dac7337e2d 100644 --- a/Scintillator/ScintDetDescr/ScintIdentifier/ScintIdentifier/ScintIdentifierDict.h +++ b/Scintillator/ScintDetDescr/ScintIdentifier/ScintIdentifier/ScintIdentifierDict.h @@ -16,5 +16,6 @@ #include "ScintIdentifier/PreshowerID.h" #include "ScintIdentifier/TriggerID.h" #include "ScintIdentifier/VetoID.h" +#include "ScintIdentifier/VetoNuID.h" #endif // SCINTIDENTIFIER_SCINTIDENTIFIERDICT_H diff --git a/Scintillator/ScintDetDescr/ScintIdentifier/ScintIdentifier/VetoNuID.h b/Scintillator/ScintDetDescr/ScintIdentifier/ScintIdentifier/VetoNuID.h new file mode 100644 index 0000000000000000000000000000000000000000..064f9a132378b5cc5740103b402e88d32c48d84d --- /dev/null +++ b/Scintillator/ScintDetDescr/ScintIdentifier/ScintIdentifier/VetoNuID.h @@ -0,0 +1,541 @@ +/* + Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration +*/ + +#ifndef SCINTIDENTIFIER_VETONUID_H +#define SCINTIDENTIFIER_VETONUID_H +/** + * @file VetoNuID.h + * + * @brief This is an Identifier helper class for the VetoNu + * subdetector. This class is a factory for creating compact + * Identifier objects and IdentifierHash or hash ids. And it also + * allows decoding of these ids. + * + */ + +//<<<<<< INCLUDES >>>>>> + +#include "FaserDetDescr/FaserDetectorID.h" +#include "Identifier/Identifier.h" +#include "Identifier/IdentifierHash.h" +#include "Identifier/Range.h" +#include "Identifier/IdHelper.h" +#include "IdDict/IdDictFieldImplementation.h" +#include "AthenaKernel/CLASS_DEF.h" + +#include <string> +#include <assert.h> +#include <algorithm> + +//<<<<<< PUBLIC DEFINES >>>>>> +//<<<<<< PUBLIC CONSTANTS >>>>>> +//<<<<<< PUBLIC TYPES >>>>>> + +class IdDictDictionary; + +//<<<<<< PUBLIC VARIABLES >>>>>> +//<<<<<< PUBLIC FUNCTIONS >>>>>> +//<<<<<< CLASS DECLARATIONS >>>>>> + +/** + ** @class VetoNuID + ** + ** @brief This is an Identifier helper class for the VetoNu + ** subdetector. This class is a factory for creating compact + ** Identifier objects and IdentifierHash or hash ids. And it also + ** allows decoding of these ids. + ** + ** Definition and the range of values for the levels of the + ** identifier are: + ** + ** @verbatim + ** element range meaning + ** ------- ----- ------- + ** + ** station 0 to 1 longitudinal location + ** plate 0 to 1 two plates per station + ** pmt 0 single pmt per plate + ** + ** @endverbatim + ** + */ +class VetoNuID : public FaserDetectorID +{ +public: + + /// @name public typedefs + //@{ + typedef Identifier::size_type size_type; + typedef std::vector<Identifier>::const_iterator const_id_iterator; + typedef MultiRange::const_identifier_factory const_expanded_id_iterator; + //@} + + /// @name strutors + //@{ + VetoNuID(void); + virtual ~VetoNuID(void) = default; + //@} + + /// @name Creators for plate ids and pmt ids + //@{ + /// For a single station + Identifier station_id ( int station ) const; + Identifier station_id ( int station, + bool checks) const; + + /// For a station from a plate id + Identifier station_id ( const Identifier& plate_id ) const; + + /// For a single plate + Identifier plate_id ( int station, + int plate ) const; + Identifier plate_id ( int station, + int plate, + bool checks) const; + + /// For a single plate from a pmt id + Identifier plate_id ( const Identifier& pmt_id ) const; + + /// From hash - optimized + Identifier plate_id ( IdentifierHash plate_hash ) const; + + /// For an individual pmt + Identifier pmt_id ( int station, + int plate, + int pmt ) const; + + Identifier pmt_id ( int station, + int plate, + int pmt, + bool check ) const; + + Identifier pmt_id ( const Identifier& plate_id, + int pmt ) const; + + //@} + + + /// @name Hash table maximum sizes + //@{ + size_type plate_hash_max (void) const; + size_type pmt_hash_max (void) const; + //@} + + /// @name Access to all ids + //@{ + /// Iterators over full set of ids. Plate iterator is sorted + const_id_iterator plate_begin (void) const; + const_id_iterator plate_end (void) const; + /// For pmt ids, only expanded id iterators are available. Use + /// following "pmt_id" method to obtain a compact identifier + const_expanded_id_iterator pmt_begin (void) const; + const_expanded_id_iterator pmt_end (void) const; + //@} + + + /// @name Optimized accessors - ASSUMES id IS a vetonu id, i.e. NOT other + //@{ + /// wafer hash from id - optimized + IdentifierHash plate_hash (Identifier plate_id) const; + + /// Values of different levels (failure returns 0) + int station (const Identifier& id) const; + int plate (const Identifier& id) const; + int pmt (const Identifier& id) const; + + /// Max/Min values for each field (-999 == failure) + int station_max (const Identifier& id) const; + int plate_max (const Identifier& id) const; + int pmt_max (const Identifier& id) const; + //@} + + /// @name module navigation + //@{ + /// Previous plate in z + int get_prev_in_z(const IdentifierHash& id, IdentifierHash& prev) const; + /// Next plate in z + int get_next_in_z(const IdentifierHash& id, IdentifierHash& next) const; + // /// Previous wafer hash in phi (return == 0 for neighbor found) + // int get_prev_in_phi (const IdentifierHash& id, IdentifierHash& prev) const; + // /// Next wafer hash in phi (return == 0 for neighbor found) + // int get_next_in_phi (const IdentifierHash& id, IdentifierHash& next) const; + // /// Previous wafer hash in eta (return == 0 for neighbor found) + // int get_prev_in_eta (const IdentifierHash& id, IdentifierHash& prev) const; + // /// Next wafer hash in eta (return == 0 for neighbor found) + // int get_next_in_eta (const IdentifierHash& id, IdentifierHash& next) const; + // /// Wafer hash on other side + // int get_other_side (const IdentifierHash& id, IdentifierHash& other) const; + + // // To check for when phi wrap around may be needed, use + // bool is_phi_module_max(const Identifier& id) const; + // /// For the barrel + // bool is_eta_module_min(const Identifier& id) const; + // /// For the barrel + // bool is_eta_module_max(const Identifier& id) const; + //@} + + /// @name contexts to distinguish plate id from pixel id + //@{ + IdContext plate_context (void) const; + IdContext pmt_context (void) const; + //@} + + /// @name methods from abstract interface - slower than opt version + //@{ + /// Create compact id from hash id (return == 0 for OK) + virtual int get_id (const IdentifierHash& hash_id, + Identifier& id, + const IdContext* context = 0) const; + + /// Create hash id from compact id (return == 0 for OK) + virtual int get_hash (const Identifier& id, + IdentifierHash& hash_id, + const IdContext* context = 0) const; + //@} + + /// Return the lowest bit position used in the channel id + int base_bit (void) const; + + /// Calculate a channel offset between the two identifiers. + Identifier::diff_type calc_offset(const Identifier& base, + const Identifier& target) const; + + /// Create an identifier with a given base and channel offset + Identifier pmt_id_offset(const Identifier& base, + Identifier::diff_type offset) const; + + /// @name interaction with id dictionary + //@{ + /// Create strip Identifier from expanded id, which is returned by the + /// id_iterators + Identifier pmt_id (const ExpandedIdentifier& pmt_id) const; + + /// Create expanded id from compact id (return == 0 for OK) + void get_expanded_id (const Identifier& id, + ExpandedIdentifier& exp_id, + const IdContext* context = 0) const; + + /// Initialization from the identifier dictionary + virtual int initialize_from_dictionary(const IdDictMgr& dict_mgr); + + /// Tests of packing + void test_plate_packing (void) const; + //@} + +private: + + enum {NOT_VALID_HASH = 64000}; + + typedef std::vector<Identifier> id_vec; + typedef id_vec::const_iterator id_vec_it; + typedef std::vector<unsigned short> hash_vec; + typedef hash_vec::const_iterator hash_vec_it; + + void plate_id_checks ( int station, + int plate ) const; + + void pmt_id_checks ( int station, + int plate, + int pmt ) const; + + + int initLevelsFromDict(void); + + int init_hashes(void); + + int init_neighbors(void); + + // Temporary method for adapting an identifier for the MultiRange + // check - MR is missing the InnerDetector level + // Identifier idForCheck (const Identifier& id) const; + + size_type m_vetonu_region_index; + size_type m_SCINT_INDEX; + size_type m_VETONU_INDEX; + size_type m_STATION_INDEX; + size_type m_PLATE_INDEX; + size_type m_PMT_INDEX; + + const IdDictDictionary* m_dict; + MultiRange m_full_plate_range; + MultiRange m_full_pmt_range; + size_type m_plate_hash_max; + size_type m_pmt_hash_max; + // Range::field m_barrel_field; + id_vec m_plate_vec; + hash_vec m_prev_z_plate_vec; + hash_vec m_next_z_plate_vec; + // hash_vec m_prev_phi_wafer_vec; + // hash_vec m_next_phi_wafer_vec; + // hash_vec m_prev_eta_wafer_vec; + // hash_vec m_next_eta_wafer_vec; + // bool m_hasRows ; + + IdDictFieldImplementation m_scint_impl ; + IdDictFieldImplementation m_vetonu_impl ; + IdDictFieldImplementation m_station_impl ; + IdDictFieldImplementation m_plate_impl ; + IdDictFieldImplementation m_pmt_impl ; +}; + + +//<<<<<< INLINE PUBLIC FUNCTIONS >>>>>> + +///////////////////////////////////////////////////////////////////////////// +//<<<<<< INLINE MEMBER FUNCTIONS >>>>>> +///////////////////////////////////////////////////////////////////////////// + +//using the macros below we can assign an identifier (and a version) +//This is required and checked at compile time when you try to record/retrieve +CLASS_DEF(VetoNuID, 247779284, 1) + +//---------------------------------------------------------------------------- +inline Identifier +VetoNuID::station_id ( int station, + bool checks) const +{ + + // Build identifier + Identifier result((Identifier::value_type)0); + + // Pack fields independently + m_scint_impl.pack (scint_field_value(), result); + m_vetonu_impl.pack (vetonu_field_value(), result); + m_station_impl.pack (station, result); + // Do checks + if(checks) + { + plate_id_checks ( station, 0 ); + } + + return result; +} + +inline Identifier +VetoNuID::station_id ( int station ) const +{ + return station_id (station, do_checks()); +} + +//---------------------------------------------------------------------------- +inline Identifier +VetoNuID::station_id ( const Identifier& plate_id ) const +{ + Identifier result(plate_id); + // Reset the plate and pmt fields + m_plate_impl.reset(result); + m_pmt_impl.reset(result); + return (result); +} + +//---------------------------------------------------------------------------- +inline Identifier +VetoNuID::plate_id ( int station, + int plate, + bool checks) const +{ + // Build identifier + Identifier result((Identifier::value_type)0); + + // Pack fields independently + m_scint_impl.pack (scint_field_value(), result); + m_vetonu_impl.pack (vetonu_field_value(), result); + m_station_impl.pack (station, result); + m_plate_impl.pack (plate, result); + + // Do checks + if(checks) + { + plate_id_checks ( station, plate ); + } + return result; +} + +inline Identifier +VetoNuID::plate_id ( int station, + int plate ) const +{ + return plate_id (station, plate, do_checks()); +} + +//---------------------------------------------------------------------------- +inline Identifier +VetoNuID::plate_id ( const Identifier& pmt_id ) const +{ + Identifier result(pmt_id); + // reset the pmt field + m_pmt_impl.reset(result); + return (result); +} + +//---------------------------------------------------------------------------- +inline Identifier VetoNuID::plate_id ( IdentifierHash plate_hash ) const +{ + return (m_plate_vec[plate_hash]); +} + +//---------------------------------------------------------------------------- +inline IdentifierHash VetoNuID::plate_hash (Identifier plate_id) const +{ + // MsgStream log(m_msgSvc, "VetoNuID"); + // log << MSG::VERBOSE << "m_plate_vec size: " << m_plate_vec.size() << endmsg; + // log << MSG::VERBOSE << "input id = " << plate_id << endmsg; + // for (size_t i = 0; i < m_plate_vec.size(); i++) + // { + // log << MSG::VERBOSE << "Hash = " << i << " : ID = " << m_plate_vec[i] << endmsg; + // } + id_vec_it it = std::lower_bound(m_plate_vec.begin(), + m_plate_vec.end(), + plate_id); + // Require that plate_id matches the one in vector + if (it != m_plate_vec.end() && plate_id == (*it)) { + return (it - m_plate_vec.begin()); + } + IdentifierHash result; + return (result); // return hash in invalid state +} + +//---------------------------------------------------------------------------- +inline Identifier +VetoNuID::pmt_id ( int station, + int plate, + int pmt, + bool checks) const +{ + // Build identifier + Identifier result((Identifier::value_type)0); + + // Pack fields independently + m_scint_impl.pack (scint_field_value(), result); + m_vetonu_impl.pack (vetonu_field_value(),result); + m_station_impl.pack (station, result); + m_plate_impl.pack (plate, result); + m_pmt_impl.pack (pmt, result); + + // Do checks + if(checks) { + pmt_id_checks ( station, plate, pmt ); + } + return result; +} + +inline Identifier +VetoNuID::pmt_id ( int station, + int plate, + int pmt ) const +{ + return pmt_id (station, plate, pmt, do_checks()); +} + +//---------------------------------------------------------------------------- +inline Identifier +VetoNuID::pmt_id (const ExpandedIdentifier& id) const +{ + // Build identifier + Identifier result((Identifier::value_type)0); + + // Pack fields independently + m_scint_impl.pack (scint_field_value(), result); + m_vetonu_impl.pack (vetonu_field_value(), result); + m_station_impl.pack (id[m_STATION_INDEX], result); + m_plate_impl.pack (id[m_PLATE_INDEX], result); + m_pmt_impl.pack (id[m_PMT_INDEX], result); + + // Do checks + if(m_do_checks) + { + pmt_id_checks ( id[m_STATION_INDEX], + id[m_PLATE_INDEX], + id[m_PMT_INDEX]); + } + return result; +} + +//---------------------------------------------------------------------------- +inline Identifier +VetoNuID::pmt_id ( const Identifier& plate_id, int pmt ) const +{ + // Build identifier + Identifier result(plate_id); + + // Reset strip and then add in value + m_pmt_impl.reset (result); + m_pmt_impl.pack (pmt, result); + + if(m_do_checks) + { + pmt_id_checks ( station(result), + plate(result), + pmt ); + } + return result; +} + +//---------------------------------------------------------------------------- +inline Identifier::diff_type +VetoNuID::calc_offset(const Identifier& base, const Identifier& target) const +{ + Identifier::diff_type tval = static_cast<Identifier::diff_type>(target.get_compact() >> base_bit()); + Identifier::diff_type bval = static_cast<Identifier::diff_type>(base.get_compact() >> base_bit()); + return (tval - bval); +} + +//---------------------------------------------------------------------------- +inline Identifier +VetoNuID::pmt_id_offset(const Identifier& base, + Identifier::diff_type offset) const +{ + Identifier::value_type bval = base.get_compact() >> base_bit(); + return Identifier((bval + offset) << base_bit()); +} + +//---------------------------------------------------------------------------- +inline int +VetoNuID::base_bit ( void ) const +{ + int base = static_cast<int>(m_pmt_impl.shift()); // lowest field base + return (base > 32) ? 32 : base; + // max base is 32 so we can still read old strip id's and differences + // from non-SLHC releases. +} + +//---------------------------------------------------------------------------- +inline IdContext +VetoNuID::plate_context (void) const +{ + ExpandedIdentifier id; + return (IdContext(id, 0, m_PLATE_INDEX)); +} + +//---------------------------------------------------------------------------- +inline IdContext +VetoNuID::pmt_context (void) const +{ + ExpandedIdentifier id; + return (IdContext(id, 0, m_PMT_INDEX)); +} + +//---------------------------------------------------------------------------- +inline int +VetoNuID::station (const Identifier& id) const +{ + return (m_station_impl.unpack(id)); +} + +//---------------------------------------------------------------------------- +inline int +VetoNuID::plate (const Identifier& id) const +{ + return (m_plate_impl.unpack(id)); +} + +//---------------------------------------------------------------------------- +inline int +VetoNuID::pmt (const Identifier& id) const +{ + return (m_pmt_impl.unpack(id)); +} + + +#endif // SCINTIDENTIFIER_VETONUID_H diff --git a/Scintillator/ScintDetDescr/ScintIdentifier/ScintIdentifier/selection.xml b/Scintillator/ScintDetDescr/ScintIdentifier/ScintIdentifier/selection.xml index 543856daa22bebedab8c75cff790f07b7f7468f5..bebeff9ec46d1ef9554a176ba64038033469d21b 100644 --- a/Scintillator/ScintDetDescr/ScintIdentifier/ScintIdentifier/selection.xml +++ b/Scintillator/ScintDetDescr/ScintIdentifier/ScintIdentifier/selection.xml @@ -3,6 +3,7 @@ <class name="ScintillatorID" /> --> <class name="VetoID" /> + <class name="VetoNuID" /> <class name="TriggerID" /> <class name="PreshowerID" /> </lcgdict> diff --git a/Scintillator/ScintDetDescr/ScintIdentifier/src/VetoNuID.cxx b/Scintillator/ScintDetDescr/ScintIdentifier/src/VetoNuID.cxx new file mode 100644 index 0000000000000000000000000000000000000000..837b3c892da1ec7a338bd1e6a7c0da18d9eeeeca --- /dev/null +++ b/Scintillator/ScintDetDescr/ScintIdentifier/src/VetoNuID.cxx @@ -0,0 +1,1030 @@ +/* + Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration +*/ + +/*************************************************************************** + Scintillator identifier package + ------------------------------------------- +***************************************************************************/ + +//<<<<<< INCLUDES >>>>>> +#include "GaudiKernel/MsgStream.h" + +#include "ScintIdentifier/VetoNuID.h" +#include "Identifier/IdentifierHash.h" +#include "IdDict/IdDictDefs.h" +#include <set> +#include <algorithm> +#include <iostream> + +//<<<<<< PRIVATE DEFINES >>>>>> +//<<<<<< PRIVATE CONSTANTS >>>>>> +//<<<<<< PRIVATE TYPES >>>>>> +//<<<<<< PRIVATE VARIABLE DEFINITIONS >>>>>> +//<<<<<< PUBLIC VARIABLE DEFINITIONS >>>>>> +//<<<<<< CLASS STRUCTURE INITIALIZATION >>>>>> +//<<<<<< PRIVATE FUNCTION DEFINITIONS >>>>>> +//<<<<<< PUBLIC FUNCTION DEFINITIONS >>>>>> +//<<<<<< MEMBER FUNCTION DEFINITIONS >>>>>> + + +///////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////// + + +VetoNuID::VetoNuID(void) + : + m_vetonu_region_index(0), + m_SCINT_INDEX(0), + m_VETONU_INDEX(1), + m_STATION_INDEX(2), + m_PLATE_INDEX(3), + m_PMT_INDEX(4), + m_dict(0), + m_plate_hash_max(0), + m_pmt_hash_max(0) +{ +} + +void +VetoNuID::plate_id_checks ( int station, + int plate ) const +{ + + // Check that id is within allowed range + + // Fill expanded id + ExpandedIdentifier id; + id << scint_field_value() << vetonu_field_value() + << station << plate; + + if (!m_full_plate_range.match(id)) { // module range check is sufficient + MsgStream log(m_msgSvc, "VetoNuID"); + log << MSG::ERROR << " VetoNuID::plate_id result is NOT ok. ID, range " + << (std::string)id << " " << (std::string)m_full_plate_range << endmsg; + } +} + +void +VetoNuID::pmt_id_checks ( int station, + int plate, + int pmt) const +{ + + // Check that id is within allowed range + + // Fill expanded id + ExpandedIdentifier id; + id << scint_field_value() << vetonu_field_value() + << station << plate << pmt; + + if (!m_full_pmt_range.match(id)) { + MsgStream log(m_msgSvc, "VetoNuID"); + log << MSG::ERROR << " VetoNuID::pmt_id result is NOT ok. ID, range " + << (std::string)id << " " << (std::string)m_full_pmt_range << std::endl; + } +} + +int +VetoNuID::station_max(const Identifier& id) const +{ + // get max from dictionary + ExpandedIdentifier expId; + IdContext plate_context1 = plate_context(); + get_expanded_id(id, expId, &plate_context1); + for (unsigned int i = 0; i < m_full_plate_range.size(); ++i) { + const Range& range = m_full_plate_range[i]; + if (range.match(expId)) { + const Range::field& station_field = range[m_STATION_INDEX]; + if (station_field.has_maximum()) { + return (station_field.get_maximum()); + } + } + } + return (-999); // default +} + +int +VetoNuID::pmt_max (const Identifier& id) const +{ + ExpandedIdentifier expId; + IdContext station_context(expId, 0, m_STATION_INDEX); + get_expanded_id(id, expId, &station_context); + int result = -999; + for (unsigned int i = 0; i < m_full_pmt_range.size(); ++i) { + const Range& range = m_full_pmt_range[i]; + if (range.match(expId)) { + const Range::field& pmt_field = range[m_PMT_INDEX]; + if (pmt_field.has_maximum()) { + int pmt = pmt_field.get_maximum(); + if (result < pmt) result = pmt; + } + } + } + return (result); +} + +int +VetoNuID::plate_max(const Identifier& id) const +{ + // get max from dictionary + ExpandedIdentifier expId; + IdContext plate_context1 = plate_context(); + get_expanded_id(id, expId, &plate_context1); + for (unsigned int i = 0; i < m_full_plate_range.size(); ++i) { + const Range& range = m_full_plate_range[i]; + if (range.match(expId)) { + const Range::field& plate_field = range[m_PLATE_INDEX]; + if (plate_field.has_maximum()) { + return (plate_field.get_maximum()); + } + } + } + return -1; +} + +int +VetoNuID::initialize_from_dictionary(const IdDictMgr& dict_mgr) +{ + MsgStream log(m_msgSvc, "VetoNuID"); + log << MSG::INFO << "Initialize from dictionary" << endmsg; + + // Check whether this helper should be reinitialized + if (!reinitialize(dict_mgr)) { + log << MSG::INFO << "Request to reinitialize not satisfied - tags have not changed" << endmsg; + return (0); + } + else { + if (m_msgSvc) { + log << MSG::DEBUG << "(Re)initialize" << endmsg; + } + else { + std::cout << " DEBUG (Re)initialize" << std::endl; + } + } + + // init base object + if(FaserDetectorID::initialize_from_dictionary(dict_mgr)) return (1); + + // Register version of InnerDetector dictionary + if (register_dict_tag(dict_mgr, "Scintillator")) return(1); + + m_dict = dict_mgr.find_dictionary ("Scintillator"); + if(!m_dict) { + log << MSG::ERROR << " VetoNuID::initialize_from_dict - cannot access Scintillator dictionary " << endmsg; + return 1; + } + + // Initialize the field indices + if(initLevelsFromDict()) return (1); + + // + // Build multirange for the valid set of identifiers + // + + + // Find value for the field Scintillator + const IdDictDictionary* faserDict = dict_mgr.find_dictionary ("FASER"); + int scintField = -1; + if (faserDict->get_label_value("subdet", "Scintillator", scintField)) { + log << MSG::ERROR << "Could not get value for label 'Scintillator' of field 'subdet' in dictionary " + << faserDict->m_name + << endmsg; + return (1); + } + + // Find value for the field VetoNu + int vetonuField = -1; + if (m_dict->get_label_value("part", "VetoNu", vetonuField)) { + log << MSG::ERROR << "Could not get value for label 'VetoNu' of field 'part' in dictionary " + << m_dict->m_name + << endmsg; + return (1); + } + if (m_msgSvc) { + log << MSG::DEBUG << " VetoNuID::initialize_from_dict " + << "Found field values: VetoNu " + << vetonuField + << std::endl; + } + else { + std::cout << " DEBUG VetoNuID::initialize_from_dict " + << "Found field values: VetoNu " + << vetonuField + << std::endl; + } + + // Set up id for region and range prefix + ExpandedIdentifier region_id; + region_id.add(scintField); + region_id.add(vetonuField); + Range prefix; + m_full_plate_range = m_dict->build_multirange(region_id, prefix, "plate"); + m_full_pmt_range = m_dict->build_multirange(region_id, prefix); + + // Setup the hash tables + if(init_hashes()) return (1); + + // Setup hash tables for finding neighbors + if(init_neighbors()) return (1); + + if (m_msgSvc) { + log << MSG::INFO << " VetoNuID::initialize_from_dict " << endmsg; + log << MSG::DEBUG + << "Plate range -> " << (std::string)m_full_plate_range + << endmsg; + log << MSG::DEBUG + << "Pmt range -> " << (std::string)m_full_pmt_range + << endmsg; + } + else { + std::cout << " INFO VetoNuID::initialize_from_dict " << std::endl; + std::cout << " DEBUG Plate range -> " << (std::string)m_full_plate_range + << std::endl; + std::cout << " DEBUG Pmt range -> " << (std::string)m_full_pmt_range + << std::endl; + } + + return 0; +} + +int +VetoNuID::init_hashes(void) +{ + + // + // create a vector(s) to retrieve the hashes for compact ids. For + // the moment, we implement a hash for plates but NOT for pmts + // + MsgStream log(m_msgSvc, "VetoNuID"); + // plate hash + m_plate_hash_max = m_full_plate_range.cardinality(); + m_plate_vec.resize(m_plate_hash_max); + unsigned int nids = 0; + std::set<Identifier> ids; + for (unsigned int i = 0; i < m_full_plate_range.size(); ++i) { + const Range& range = m_full_plate_range[i]; + Range::const_identifier_factory first = range.factory_begin(); + Range::const_identifier_factory last = range.factory_end(); + for (; first != last; ++first) { + const ExpandedIdentifier& exp_id = (*first); + Identifier id = plate_id(exp_id[m_STATION_INDEX], + exp_id[m_PLATE_INDEX]); + if(!(ids.insert(id)).second) { + log << MSG::ERROR << " VetoNuID::init_hashes " + << " Error: duplicated id for plate id. nid " << nids + << " compact id " << id.getString() + << " id " << (std::string)exp_id << endmsg; + return (1); + } + nids++; + } + } + if(ids.size() != m_plate_hash_max) { + log << MSG::ERROR << " VetoNuID::init_hashes " + << " Error: set size NOT EQUAL to hash max. size " << ids.size() + << " hash max " << m_plate_hash_max + << endmsg; + return (1); + } + + nids = 0; + std::set<Identifier>::const_iterator first = ids.begin(); + std::set<Identifier>::const_iterator last = ids.end(); + for (; first != last && nids < m_plate_vec.size(); ++first) { + m_plate_vec[nids] = (*first); + nids++; + } + + // pmt hash - we do not keep a vec for the pmts + m_pmt_hash_max = m_full_pmt_range.cardinality(); + + return (0); +} + + int + VetoNuID::get_prev_in_z(const IdentifierHash& id, IdentifierHash& prev) const + { + unsigned short index = id; + if (index < m_prev_z_plate_vec.size()) + { + if (m_prev_z_plate_vec[index] == NOT_VALID_HASH) return (1); + prev = m_prev_z_plate_vec[index]; + return (0); + } + return (1); + } + + int + VetoNuID::get_next_in_z(const IdentifierHash& id, IdentifierHash& next) const + { + unsigned short index = id; + if (index < m_next_z_plate_vec.size()) + { + if (m_next_z_plate_vec[index] == NOT_VALID_HASH) return (1); + next = m_next_z_plate_vec[index]; + return (0); + } + return (1); + } + +// int +// VetoNuID::get_prev_in_phi(const IdentifierHash& id, IdentifierHash& prev) const +// { +// unsigned short index = id; +// if (index < m_prev_phi_wafer_vec.size()) { +// if (m_prev_phi_wafer_vec[index] == NOT_VALID_HASH) return (1); +// prev = m_prev_phi_wafer_vec[index]; +// return (0); +// } +// return (1); +// } + +// int +// VetoNuID::get_next_in_phi(const IdentifierHash& id, IdentifierHash& next) const +// { +// unsigned short index = id; +// if (index < m_next_phi_wafer_vec.size()) { +// if (m_next_phi_wafer_vec[index] == NOT_VALID_HASH) return (1); +// next = m_next_phi_wafer_vec[index]; +// return (0); +// } +// return (1); +// } + +// int +// VetoNuID::get_prev_in_eta(const IdentifierHash& id, IdentifierHash& prev) const +// { +// unsigned short index = id; +// if (index < m_prev_eta_wafer_vec.size()) { +// if (m_prev_eta_wafer_vec[index] == NOT_VALID_HASH) return (1); +// prev = m_prev_eta_wafer_vec[index]; +// return (0); +// } +// return (1); +// } + +// int +// VetoNuID::get_next_in_eta(const IdentifierHash& id, IdentifierHash& next) const +// { +// unsigned short index = id; +// if (index < m_next_eta_wafer_vec.size()) { +// if (m_next_eta_wafer_vec[index] == NOT_VALID_HASH) return (1); +// next = m_next_eta_wafer_vec[index]; +// return (0); +// } +// return (1); +// } + +// int +// VetoNuID::get_other_side (const IdentifierHash& hashId, IdentifierHash& other) const +// { +// if (m_dict) { +// // get max from dictionary +// Identifier id; +// IdContext wafer_context1 = wafer_context(); +// if(!get_id(hashId, id, &wafer_context1)) { +// other = side(id) ? hashId - 1 : hashId + 1; +// return (0); +// } +// } +// return (1); +// } + +int +VetoNuID::init_neighbors(void) +{ + // + // create a vector(s) to retrieve the hashes for compact ids for + // plate neighbors. + // + MsgStream log(m_msgSvc, "VetoNuID"); + + m_prev_z_plate_vec.resize(m_plate_hash_max, NOT_VALID_HASH); + m_next_z_plate_vec.resize(m_plate_hash_max, NOT_VALID_HASH); + for (unsigned int i = 0; i < m_full_plate_range.size(); i++) + { + const Range& range = m_full_plate_range[i]; + const Range::field& station_field = range[m_STATION_INDEX]; + const Range::field& plate_field = range[m_PLATE_INDEX]; + Range::const_identifier_factory first = range.factory_begin(); + Range::const_identifier_factory last = range.factory_end(); + for (; first != last; ++first) + { + const ExpandedIdentifier& exp_id = (*first); + ExpandedIdentifier::element_type previous_plate; + ExpandedIdentifier::element_type next_plate; + ExpandedIdentifier::element_type previous_station; + ExpandedIdentifier::element_type next_station; + bool pplate = plate_field.get_previous(exp_id[m_PLATE_INDEX], previous_plate); + bool nplate = plate_field.get_next (exp_id[m_PLATE_INDEX], next_plate); + bool pstation = station_field.get_previous(exp_id[m_STATION_INDEX], previous_station); + bool nstation = station_field.get_next (exp_id[m_STATION_INDEX], next_station); + + IdContext pcontext = plate_context(); + + IdentifierHash hash_id; + Identifier originalId = plate_id(exp_id[m_STATION_INDEX], + exp_id[m_PLATE_INDEX]); + + if (get_hash(originalId, hash_id, &pcontext)) + { + log << MSG::ERROR << " VetoNuID::init_neighbors - unable to get hash, exp/compact " + << show_to_string(originalId, &pcontext) + << " " << (std::string)m_full_plate_range << endmsg; + return (1); + } + + // index for the subsequent arrays + unsigned short index = hash_id; + assert (hash_id < m_prev_z_plate_vec.size()); + assert (hash_id < m_next_z_plate_vec.size()); + + if (pplate) { + // Get previous plate hash id + ExpandedIdentifier expId = exp_id; + expId[m_PLATE_INDEX] = previous_plate; + Identifier id = plate_id(expId[m_STATION_INDEX], + expId[m_PLATE_INDEX]); + + if (get_hash(id, hash_id, &pcontext)) { + log << MSG::ERROR << " VetoNuID::init_neighbors - unable to get previous plate hash, exp/compact " << id.getString() << " " + << endmsg; + return (1); + } + m_prev_z_plate_vec[index] = hash_id; + } + else if (pstation) + { + ExpandedIdentifier expId = exp_id; + expId[m_STATION_INDEX] = previous_station; + ExpandedIdentifier stationId; + stationId.add(expId[m_SCINT_INDEX]); + stationId.add(expId[m_VETONU_INDEX]); + stationId.add(previous_station); + Range prefix; + MultiRange stationPlateRange = m_dict->build_multirange(stationId, prefix, "plate"); + const Range::field& upstream_plate_field = range[m_PLATE_INDEX]; + if (upstream_plate_field.has_maximum()) + { + expId[m_PLATE_INDEX] = upstream_plate_field.get_maximum(); + Identifier id = plate_id(expId[m_STATION_INDEX], + expId[m_PLATE_INDEX]); + if (get_hash(id, hash_id, &pcontext)) { + log << MSG::ERROR << " VetoNuID::init_neighbors - unable to get last plate hash from previous station, exp/compact " << id.getString() << " " + << endmsg; + return (1); + } + m_prev_z_plate_vec[index] = hash_id; + } + else + { + log << MSG::ERROR << "VetoNuID::init_neighbors - unable to get plate_max for previous station, exp/compact " << originalId.getString() << " " + << endmsg; + return (1); + } + } + + if (nplate) { + // Get next plate hash id + ExpandedIdentifier expId = exp_id; + expId[m_PLATE_INDEX] = next_plate; + Identifier id = plate_id(expId[m_STATION_INDEX], + expId[m_PLATE_INDEX]); + + if (get_hash(id, hash_id, &pcontext)) { + log << MSG::ERROR << " VetoNuID::init_neighbors - unable to get next plate hash, exp/compact " << id.getString() << " " + << endmsg; + return (1); + } + m_next_z_plate_vec[index] = hash_id; + } + else if (nstation) + { + ExpandedIdentifier expId = exp_id; + expId[m_STATION_INDEX] = next_station; + ExpandedIdentifier stationId; + stationId.add(expId[m_SCINT_INDEX]); + stationId.add(expId[m_VETONU_INDEX]); + stationId.add(next_station); + Range prefix; + MultiRange stationPlateRange = m_dict->build_multirange(stationId, prefix, "plate"); + const Range::field& downstream_plate_field = range[m_PLATE_INDEX]; + if (downstream_plate_field.has_minimum()) + { + expId[m_PLATE_INDEX] = downstream_plate_field.get_minimum(); + Identifier id = plate_id(expId[m_STATION_INDEX], + expId[m_PLATE_INDEX]); + if (get_hash(id, hash_id, &pcontext)) { + log << MSG::ERROR << " VetoNuID::init_neighbors - unable to get previous plate hash from next station, exp/compact " << id.getString() << " " + << endmsg; + return (1); + } + m_next_z_plate_vec[index] = hash_id; + } + else + { + log << MSG::ERROR << "VetoNuID::init_neighbors - unable to get plate_min for next station, exp/compact " << originalId.getString() << " " + << endmsg; + return (1); + } + } + + } + } + + // m_prev_phi_wafer_vec.resize(m_wafer_hash_max, NOT_VALID_HASH); + // m_next_phi_wafer_vec.resize(m_wafer_hash_max, NOT_VALID_HASH); + // m_prev_eta_wafer_vec.resize(m_wafer_hash_max, NOT_VALID_HASH); + // m_next_eta_wafer_vec.resize(m_wafer_hash_max, NOT_VALID_HASH); + + // for (unsigned int i = 0; i < m_full_wafer_range.size(); ++i) { + // const Range& range = m_full_wafer_range[i]; + // const Range::field& phi_field = range[m_PHI_MODULE_INDEX]; + // const Range::field& eta_field = range[m_ETA_MODULE_INDEX]; + // Range::const_identifier_factory first = range.factory_begin(); + // Range::const_identifier_factory last = range.factory_end(); + // for (; first != last; ++first) { + // const ExpandedIdentifier& exp_id = (*first); + // ExpandedIdentifier::element_type previous_phi; + // ExpandedIdentifier::element_type next_phi; + // ExpandedIdentifier::element_type previous_eta; + // ExpandedIdentifier::element_type next_eta; + // bool pphi = phi_field.get_previous(exp_id[m_PHI_MODULE_INDEX], previous_phi); + // bool nphi = phi_field.get_next (exp_id[m_PHI_MODULE_INDEX], next_phi); + // bool peta = eta_field.get_previous(exp_id[m_ETA_MODULE_INDEX], previous_eta); + // bool neta = eta_field.get_next (exp_id[m_ETA_MODULE_INDEX], next_eta); + + // IdContext wcontext = wafer_context(); + + // // First get primary hash id + // IdentifierHash hash_id; + // Identifier id = wafer_id(exp_id[m_BARREL_EC_INDEX], + // exp_id[m_LAYER_DISK_INDEX], + // exp_id[m_PHI_MODULE_INDEX], + // exp_id[m_ETA_MODULE_INDEX], + // exp_id[m_SIDE_INDEX]); + // if (get_hash(id, hash_id, &wcontext)) { + // log << MSG::ERROR << " VetoNuID::init_neighbors - unable to get hash, exp/compact " + // << show_to_string(id, &wcontext) + // << " " << (std::string)m_full_wafer_range << endmsg; + // return (1); + // } + + // // index for the subsequent arrays + // unsigned short index = hash_id; + // assert (hash_id < m_prev_phi_wafer_vec.size()); + // assert (hash_id < m_next_phi_wafer_vec.size()); + // assert (hash_id < m_prev_eta_wafer_vec.size()); + // assert (hash_id < m_next_eta_wafer_vec.size()); + + // if (pphi) { + // // Get previous phi hash id + // ExpandedIdentifier expId = exp_id; + // expId[m_PHI_MODULE_INDEX] = previous_phi; + // Identifier id = wafer_id(expId[m_BARREL_EC_INDEX], + // expId[m_LAYER_DISK_INDEX], + // expId[m_PHI_MODULE_INDEX], + // expId[m_ETA_MODULE_INDEX], + // expId[m_SIDE_INDEX]); + // if (get_hash(id, hash_id, &wcontext)) { + // log << MSG::ERROR << " VetoNuID::init_neighbors - unable to get previous phi hash, exp/compact " << id.getString() << " " + // << endmsg; + // return (1); + // } + // m_prev_phi_wafer_vec[index] = hash_id; + // } + + // if (nphi) { + // // Get next phi hash id + // ExpandedIdentifier expId = exp_id; + // expId[m_PHI_MODULE_INDEX] = next_phi; + // Identifier id = wafer_id(expId[m_BARREL_EC_INDEX], + // expId[m_LAYER_DISK_INDEX], + // expId[m_PHI_MODULE_INDEX], + // expId[m_ETA_MODULE_INDEX], + // expId[m_SIDE_INDEX]); + // if (get_hash(id, hash_id, &wcontext)) { + // log << MSG::ERROR << " VetoNuID::init_neighbors - unable to get next phi hash, exp/compact " << id.getString() << + // " " << MSG::hex << id.getString() << MSG::dec << endmsg; + // return (1); + // } + // m_next_phi_wafer_vec[index] = hash_id; + // } + + // if (peta) { + // // Get previous eta hash id + // ExpandedIdentifier expId = exp_id; + // expId[m_ETA_MODULE_INDEX] = previous_eta; + // Identifier id = wafer_id(expId[m_BARREL_EC_INDEX], + // expId[m_LAYER_DISK_INDEX], + // expId[m_PHI_MODULE_INDEX], + // expId[m_ETA_MODULE_INDEX], + // expId[m_SIDE_INDEX]); + // if (get_hash(id, hash_id, &wcontext)) { + // log << MSG::ERROR << " VetoNuID::init_neighbors - unable to get previous eta hash, exp/compact " << id.getString() + // << " " << std::endl; + // return (1); + // } + // m_prev_eta_wafer_vec[index] = hash_id; + // } + + // if (neta) { + // // Get next eta hash id + // ExpandedIdentifier expId = exp_id; + // expId[m_ETA_MODULE_INDEX] = next_eta; + // Identifier id = wafer_id(expId[m_BARREL_EC_INDEX], + // expId[m_LAYER_DISK_INDEX], + // expId[m_PHI_MODULE_INDEX], + // expId[m_ETA_MODULE_INDEX], + // expId[m_SIDE_INDEX]); + // if (get_hash(id, hash_id, &wcontext)) { + // log << MSG::ERROR << " VetoNuID::init_neighbors - unable to get next eta hash, exp/compact " << id.getString() + // << " " << endmsg; + // return (1); + // } + // m_next_eta_wafer_vec[index] = hash_id; + // } + + +// std::cout << " VetoNuID::init_neighbors " +// << " phi, previous, next " << id[m_PHI_MODULE_INDEX] +// << " " << pphi +// << " " << previous_phi +// << " " << nphi +// << " " << next_phi +// << " eta, previous, next " << id[m_ETA_MODULE_INDEX] +// << " " << peta +// << " " << previous_eta +// << " " << neta +// << " " << next_eta +// << " id " << (std::string)(*first) +// << std::endl; + // } + // } + return (0); +} + + + +int +VetoNuID::initLevelsFromDict() +{ + + + MsgStream log(m_msgSvc, "VetoNuID"); + if(!m_dict) { + log << MSG::ERROR << " VetoNuID::initLevelsFromDict - dictionary NOT initialized " << endmsg; + return (1); + } + + // Find out which identifier field corresponds to each level. Use + // names to find each field/leve. + + m_SCINT_INDEX = 999; + m_VETONU_INDEX = 999; + m_STATION_INDEX = 999; + m_PLATE_INDEX = 999; + m_PMT_INDEX = 999; + + // Save index to a VetoNu region for unpacking + ExpandedIdentifier id; + id << scint_field_value() << vetonu_field_value(); + if (m_dict->find_region(id, m_vetonu_region_index)) { + log << MSG::ERROR << "VetoNuID::initLevelsFromDict - unable to find vetonu region index: id, reg " + << (std::string)id << " " << m_vetonu_region_index + << endmsg; + return (1); + } + + // Find a VetoNu region + IdDictField* field = m_dict->find_field("subdet"); + if (field) { + m_SCINT_INDEX = field->m_index; + } + else { + log << MSG::ERROR << "VetoNuID::initLevelsFromDict - unable to find 'subdet' field " << endmsg; + return (1); + } + field = m_dict->find_field("part"); + if (field) { + m_VETONU_INDEX = field->m_index; + } + else { + log << MSG::ERROR << "VetoNuID::initLevelsFromDict - unable to find 'part' field " << endmsg; + return (1); + } + field = m_dict->find_field("station"); + if (field) { + m_STATION_INDEX = field->m_index; + } + else { + log << MSG::ERROR << "VetoNuID::initLevelsFromDict - unable to find 'station' field " << endmsg; + return (1); + } + field = m_dict->find_field("plate"); + if (field) { + m_PLATE_INDEX = field->m_index; + } + else { + log << MSG::ERROR<< "VetoNuID::initLevelsFromDict - unable to find 'plate' field " << endmsg; + return (1); + } + field = m_dict->find_field("pmt"); + if (field) { + m_PMT_INDEX = field->m_index; + } + else { + log << MSG::ERROR << "VetoNuID::initLevelsFromDict - unable to find 'pmt' field " << endmsg; + return (1); + } + + // Set the field implementations + + const IdDictRegion& region = *m_dict->m_regions[m_vetonu_region_index]; + + m_scint_impl = region.m_implementation[m_SCINT_INDEX]; + m_vetonu_impl = region.m_implementation[m_VETONU_INDEX]; + m_station_impl = region.m_implementation[m_STATION_INDEX]; + m_plate_impl = region.m_implementation[m_PLATE_INDEX]; + m_pmt_impl = region.m_implementation[m_PMT_INDEX]; + + if (m_msgSvc) { + log << MSG::DEBUG << "decode index and bit fields for each level: " << endmsg; + log << MSG::DEBUG << "scint " << m_scint_impl.show_to_string() << endmsg; + log << MSG::DEBUG << "vetonu " << m_vetonu_impl.show_to_string() << endmsg; + log << MSG::DEBUG << "station " << m_station_impl.show_to_string() << endmsg; + log << MSG::DEBUG << "plate " << m_plate_impl.show_to_string() << endmsg; + log << MSG::DEBUG << "pmt " << m_pmt_impl.show_to_string() << endmsg; + } + else { + std::cout << " DEBUG decode index and bit fields for each level: " << std::endl; + std::cout << " DEBUG scint " << m_scint_impl.show_to_string() << std::endl; + std::cout << " DEBUG vetonu " << m_vetonu_impl.show_to_string() << std::endl; + std::cout << " DEBUG station " << m_station_impl.show_to_string() << std::endl; + std::cout << " DEBUG plate " << m_plate_impl.show_to_string() << std::endl; + std::cout << " DEBUG pmt " << m_pmt_impl.show_to_string() << std::endl; + } + + std::cout << "scint " << m_scint_impl.decode_index() << " " + << (std::string)m_scint_impl.ored_field() << " " + << std::hex << m_scint_impl.mask() << " " + << m_scint_impl.zeroing_mask() << " " + << std::dec << m_scint_impl.shift() << " " + << m_scint_impl.bits() << " " + << m_scint_impl.bits_offset() + << std::endl; + std::cout << "vetonu" << m_vetonu_impl.decode_index() << " " + << (std::string)m_vetonu_impl.ored_field() << " " + << std::hex << m_vetonu_impl.mask() << " " + << m_vetonu_impl.zeroing_mask() << " " + << std::dec << m_vetonu_impl.shift() << " " + << m_vetonu_impl.bits() << " " + << m_vetonu_impl.bits_offset() + << std::endl; + std::cout << "station"<< m_station_impl.decode_index() << " " + << (std::string)m_station_impl.ored_field() << " " + << std::hex << m_station_impl.mask() << " " + << m_station_impl.zeroing_mask() << " " + << std::dec << m_station_impl.shift() << " " + << m_station_impl.bits() << " " + << m_station_impl.bits_offset() + << std::endl; + std::cout << "plate" << m_plate_impl.decode_index() << " " + << (std::string)m_plate_impl.ored_field() << " " + << std::hex << m_plate_impl.mask() << " " + << m_plate_impl.zeroing_mask() << " " + << std::dec << m_plate_impl.shift() << " " + << m_plate_impl.bits() << " " + << m_plate_impl.bits_offset() + << std::endl; + std::cout << "pmt" << m_pmt_impl.decode_index() << " " + << (std::string)m_pmt_impl.ored_field() << " " + << std::hex << m_pmt_impl.mask() << " " + << m_pmt_impl.zeroing_mask() << " " + << std::dec << m_pmt_impl.shift() << " " + << m_pmt_impl.bits() << " " + << m_pmt_impl.bits_offset() + << std::endl; + + return (0); +} + +VetoNuID::size_type +VetoNuID::plate_hash_max (void) const +{ + return m_plate_hash_max; +} + +VetoNuID::size_type +VetoNuID::pmt_hash_max (void) const +{ + return m_pmt_hash_max; +} + +VetoNuID::const_id_iterator VetoNuID::plate_begin (void) const +{ + return (m_plate_vec.begin()); +} + +VetoNuID::const_id_iterator VetoNuID::plate_end (void) const +{ + return (m_plate_vec.end()); +} + +VetoNuID::const_expanded_id_iterator VetoNuID::pmt_begin (void) const +{ + return (m_full_pmt_range.factory_begin()); +} + +VetoNuID::const_expanded_id_iterator VetoNuID::pmt_end (void) const +{ + return (m_full_pmt_range.factory_end()); +} + +// From hash get Identifier +int +VetoNuID::get_id (const IdentifierHash& hash_id, + Identifier& id, + const IdContext* context) const +{ + + int result = 1; + id.clear(); + + size_t begin = (context) ? context->begin_index(): 0; + // cannot get hash if end is 0: + size_t end = (context) ? context->end_index() : 0; + if (0 == begin) { + // No hashes yet for ids with prefixes + if (m_PLATE_INDEX == end) { + if (hash_id < (unsigned int)(m_plate_vec.end() - m_plate_vec.begin())) { + id = m_plate_vec[hash_id]; + result = 0; + } + } + else if (m_PMT_INDEX == end) { + // Do not know how to calculate strip id from hash yet!! + std::cout << "Do not know how to calculate pmt id from hash yet!!" << std::endl; + } + } + return (result); +} + +void +VetoNuID::get_expanded_id (const Identifier& id, + ExpandedIdentifier& exp_id, + const IdContext* context) const +{ + exp_id.clear(); + exp_id << scint_field_value() + << vetonu_field_value() + << station(id) + << plate(id); + if(!context || context->end_index() == m_PMT_INDEX) + { + exp_id << pmt(id); + } +} + +int +VetoNuID::get_hash (const Identifier& id, + IdentifierHash& hash_id, + const IdContext* context) const +{ + + // Get the hash code from either a vec (for plate) or calculate + // it (pmts). For the former, we convert to compact and call + // get_hash again. For the latter, we calculate the hash from the + // Identifier. + + int result = 1; + hash_id = 0; + size_t begin = (context) ? context->begin_index(): 0; + size_t end = (context) ? context->end_index() : 0; + if (0 == begin) { + // No hashes yet for ids with prefixes + if (m_PLATE_INDEX == end) { + hash_id = plate_hash(id); + if (hash_id.is_valid()) result = 0; + } + else if (context && context->end_index() == m_PMT_INDEX) { + // Must calculate for strip hash + ExpandedIdentifier new_id; + get_expanded_id(id, new_id); + hash_id = m_full_pmt_range.cardinalityUpTo(new_id); + result = 0; + } + } + return (result); +} + + +void +VetoNuID::test_plate_packing (void) const +{ + MsgStream log(m_msgSvc, "VetoNuID"); + + if (m_dict) { + + int nids = 0; + int nerr = 0; + IdContext context = plate_context(); + const_id_iterator first = m_plate_vec.begin(); + const_id_iterator last = m_plate_vec.end(); + for (; first != last; ++first, ++nids) { + Identifier id = (*first); + ExpandedIdentifier exp_id; + get_expanded_id(id, exp_id, &context); + Identifier new_id = plate_id(exp_id[m_STATION_INDEX], + exp_id[m_PLATE_INDEX]); + if (id != new_id) { + log << MSG::ERROR << "VetoNuID::test_plate_packing: new and old compacts not equal. New/old/expanded ids " + << MSG::hex << show_to_string(id) << " " << show_to_string(new_id) << " " << MSG::dec + << (std::string)exp_id << endmsg; + nerr++; + continue; + } + // check station id + if (!exp_id[m_PLATE_INDEX]) { + + Identifier new_id1 = station_id(exp_id[m_STATION_INDEX]); + if (id != new_id1) { + log << MSG::ERROR << "VetoNuID::test_plate_packing: new and old station ids not equal. New/old/expanded ids " + << MSG::hex << show_to_string(id) << " " << show_to_string(new_id1) << " " << MSG::dec + << (std::string)exp_id << endmsg; + nerr++; + continue; + } + } + } + + if (m_msgSvc) { + log << MSG::DEBUG << "VetoNuID::test_plate_packing: tested plate and station ids. nids, errors " + << nids << " " << nerr << endmsg; + } + else { + std::cout << " DEBUG VetoNuID::test_plate_packing: tested plate and station ids. nids, errors " + << nids << " " << nerr << std::endl; + } + + nids = 0; + context = pmt_context(); + const_expanded_id_iterator first_vetonu = pmt_begin(); + const_expanded_id_iterator last_vetonu = pmt_end(); + for (; first_vetonu != last_vetonu; ++first_vetonu, ++nids) { + // if (nids%10000 != 1) continue; + const ExpandedIdentifier& exp_id = *first_vetonu; + ExpandedIdentifier new_exp_id; + + Identifier id = plate_id(exp_id[m_STATION_INDEX], + exp_id[m_PLATE_INDEX]); + get_expanded_id(id, new_exp_id, &context); + if (exp_id[0] != new_exp_id[0] || + exp_id[1] != new_exp_id[1] || + exp_id[2] != new_exp_id[2] || + exp_id[3] != new_exp_id[3]) + { + log << MSG::ERROR << "VetoNuID::test_plate_packing: new and old ids not equal. New/old/compact ids " + << (std::string)new_exp_id << " " << (std::string)exp_id + << " " << show_to_string(id) << endmsg; + continue; + } + + Identifier pmtid ; + Identifier pmtid1 ; + pmtid = pmt_id ( + exp_id[m_STATION_INDEX], + exp_id[m_PLATE_INDEX], + exp_id[m_PMT_INDEX]); + + pmtid1 = pmt_id ( + station(pmtid), + plate(pmtid), + pmt(pmtid)); + + if (pmtid != pmtid1) { + log << MSG::ERROR << "VetoNuID::test_plate_packing: new and old pixel ids not equal. New/old ids " + << " " << show_to_string(pmtid1) << " " + << show_to_string(pmtid) << endmsg; + } + } + + if (m_msgSvc) { + log << MSG::DEBUG << "VetoNuID::test_plate_packing: Successful tested " + << nids << " ids. " + << endmsg; + } + else { + std::cout << " DEBUG VetoNuID::test_plate_packing: Successful tested " + << nids << " ids. " + << std::endl; + } + } + else { + log << MSG::ERROR << "VetoNuID::test_plate_packing: Unable to test plate packing - no dictionary has been defined. " + << endmsg; + } +} + diff --git a/Scintillator/ScintDetDescrCnv/ScintIdCnv/src/ScintIdCnv_entries.cxx b/Scintillator/ScintDetDescrCnv/ScintIdCnv/src/ScintIdCnv_entries.cxx index 18719ef20c15237f1febd9b268e819ce2b88f5e5..0b99ec46b38a725159e9ae7f914d7df3b99b2178 100644 --- a/Scintillator/ScintDetDescrCnv/ScintIdCnv/src/ScintIdCnv_entries.cxx +++ b/Scintillator/ScintDetDescrCnv/ScintIdCnv/src/ScintIdCnv_entries.cxx @@ -3,6 +3,7 @@ // #include "SiliconIDDetDescrCnv.h" // #include "TRT_IDDetDescrCnv.h" #include "VetoIDDetDescrCnv.h" +#include "VetoNuIDDetDescrCnv.h" #include "TriggerIDDetDescrCnv.h" #include "PreshowerIDDetDescrCnv.h" @@ -11,5 +12,6 @@ // DECLARE_CONVERTER(SiliconIDDetDescrCnv) // DECLARE_CONVERTER(TRT_IDDetDescrCnv) DECLARE_CONVERTER(VetoIDDetDescrCnv) +DECLARE_CONVERTER(VetoNuIDDetDescrCnv) DECLARE_CONVERTER(TriggerIDDetDescrCnv) DECLARE_CONVERTER(PreshowerIDDetDescrCnv) diff --git a/Scintillator/ScintDetDescrCnv/ScintIdCnv/src/VetoNuIDDetDescrCnv.cxx b/Scintillator/ScintDetDescrCnv/ScintIdCnv/src/VetoNuIDDetDescrCnv.cxx new file mode 100644 index 0000000000000000000000000000000000000000..82d3ab337ed23e70f62dd97bd5be675c656d7244 --- /dev/null +++ b/Scintillator/ScintDetDescrCnv/ScintIdCnv/src/VetoNuIDDetDescrCnv.cxx @@ -0,0 +1,239 @@ +/* + Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +*/ + +/*************************************************************************** + Scint DetDescrCnv package + ----------------------------------------- + ***************************************************************************/ + +//<<<<<< INCLUDES >>>>>> + +#include "VetoNuIDDetDescrCnv.h" + +#include "DetDescrCnvSvc/DetDescrConverter.h" +#include "DetDescrCnvSvc/DetDescrAddress.h" +#include "GaudiKernel/MsgStream.h" +#include "StoreGate/StoreGateSvc.h" + +#include "IdDictDetDescr/IdDictManager.h" +#include "ScintIdentifier/VetoNuID.h" + + +//<<<<<< PRIVATE DEFINES >>>>>> +//<<<<<< PRIVATE CONSTANTS >>>>>> +//<<<<<< PRIVATE TYPES >>>>>> +//<<<<<< PRIVATE VARIABLE DEFINITIONS >>>>>> +//<<<<<< PUBLIC VARIABLE DEFINITIONS >>>>>> +//<<<<<< CLASS STRUCTURE INITIALIZATION >>>>>> +//<<<<<< PRIVATE FUNCTION DEFINITIONS >>>>>> +//<<<<<< PUBLIC FUNCTION DEFINITIONS >>>>>> +//<<<<<< MEMBER FUNCTION DEFINITIONS >>>>>> + +//-------------------------------------------------------------------- + +long int +VetoNuIDDetDescrCnv::repSvcType() const +{ + return (storageType()); +} + +//-------------------------------------------------------------------- + +StatusCode +VetoNuIDDetDescrCnv::initialize() +{ + // First call parent init + StatusCode sc = DetDescrConverter::initialize(); + MsgStream log(msgSvc(), "VetoNuIDDetDescrCnv"); + log << MSG::DEBUG << "in initialize" << endmsg; + + if (sc.isFailure()) { + log << MSG::ERROR << "DetDescrConverter::initialize failed" << endmsg; + return sc; + } + + // The following is an attempt to "bootstrap" the loading of a + // proxy for VetoNuID into the detector store. However, + // VetoNuIDDetDescrCnv::initialize is NOT called by the conversion + // service. So for the moment, this cannot be use. Instead the + // DetDescrCnvSvc must do the bootstrap from a parameter list. + + +// // Add Scint_DetDescrManager proxy as entry point to the detector store +// // - this is ONLY needed for the manager of each system +// sc = addToDetStore(classID(), "VetoNuID"); +// if (sc.isFailure()) { +// log << MSG::FATAL << "Unable to add proxy for VetoNuID to the Detector Store!" << endmsg; +// return StatusCode::FAILURE; +// } else {} + + return StatusCode::SUCCESS; +} + +//-------------------------------------------------------------------- + +StatusCode +VetoNuIDDetDescrCnv::finalize() +{ + MsgStream log(msgSvc(), "VetoNuIDDetDescrCnv"); + log << MSG::DEBUG << "in finalize" << endmsg; + + return StatusCode::SUCCESS; +} + +//-------------------------------------------------------------------- + +StatusCode +VetoNuIDDetDescrCnv::createObj(IOpaqueAddress* pAddr, DataObject*& pObj) +{ + //StatusCode sc = StatusCode::SUCCESS; + MsgStream log(msgSvc(), "VetoNuIDDetDescrCnv"); + log << MSG::INFO << "in createObj: creating a VetoNuID helper object in the detector store" << endmsg; + + // Create a new VetoNuID + + DetDescrAddress* ddAddr; + ddAddr = dynamic_cast<DetDescrAddress*> (pAddr); + if(!ddAddr) { + log << MSG::FATAL << "Could not cast to DetDescrAddress." << endmsg; + return StatusCode::FAILURE; + } + + // Get the StoreGate key of this container. + std::string helperKey = *( ddAddr->par() ); + if ("" == helperKey) { + log << MSG::DEBUG << "No Helper key " << endmsg; + } + else { + log << MSG::DEBUG << "Helper key is " << helperKey << endmsg; + } + + + // get DetectorStore service + StoreGateSvc * detStore; + StatusCode status = serviceLocator()->service("DetectorStore", detStore); + if (status.isFailure()) { + log << MSG::FATAL << "DetectorStore service not found !" << endmsg; + return StatusCode::FAILURE; + } else {} + + // Get the dictionary manager from the detector store + const DataHandle<IdDictManager> idDictMgr; + status = detStore->retrieve(idDictMgr, "IdDict"); + if (status.isFailure()) { + log << MSG::FATAL << "Could not get IdDictManager !" << endmsg; + return StatusCode::FAILURE; + } + else { + log << MSG::DEBUG << " Found the IdDictManager. " << endmsg; + } + + // Only create new helper if it is the first pass or if there is a + // change in the the file or tag + bool initHelper = false; + + const IdDictMgr* mgr = idDictMgr->manager(); + + // Internal Scint id tag + std::string scintIDTag = mgr->tag(); + + // DoChecks flag + bool doChecks = mgr->do_checks(); + + IdDictDictionary* dict = mgr->find_dictionary("Scintillator"); + if (!dict) { + log << MSG::ERROR + << "unable to find idDict for Scintillator" + << endmsg; + return StatusCode::FAILURE; + } + + // File to be read for Scint ids + std::string scintIDFileName = dict->file_name(); + + // Tag of RDB record for Scint ids + std::string scintIdDictTag = dict->dict_tag(); + + + if (m_vetonuId) { + + // VetoNu id helper already exists - second pass. Check for a + // change + if (scintIDTag != m_scintIDTag) { + // Internal Scint id tag + initHelper = true; + log << MSG::DEBUG << " Changed internal Scint id tag: " + << scintIDTag << endmsg; + } + if (scintIDFileName != m_scintIDFileName) { + // File to be read for Scint ids + initHelper = true; + log << MSG::DEBUG << " Changed ScintFileName:" + << scintIDFileName << endmsg; + } + if (scintIdDictTag != m_scintIdDictTag) { + // Tag of RDB record for Scint ids + initHelper = true; + log << MSG::DEBUG << " Changed ScintIdDictTag: " + << scintIdDictTag + << endmsg; + } + if (doChecks != m_doChecks) { + // DoChecks flag + initHelper = true; + log << MSG::DEBUG << " Changed doChecks flag: " + << doChecks + << endmsg; + } + } + else { + // create the helper + m_vetonuId = new VetoNuID; + initHelper = true; + // add in message service for printout + m_vetonuId->setMessageSvc(msgSvc()); + } + + if (initHelper) { + if (idDictMgr->initializeHelper(*m_vetonuId)) { + log << MSG::ERROR << "Unable to initialize VetoNuID" << endmsg; + return StatusCode::FAILURE; + } + // Save state: + m_scintIDTag = scintIDTag; + m_scintIDFileName = scintIDFileName; + m_scintIdDictTag = scintIdDictTag; + m_doChecks = doChecks; + } + + // Pass a pointer to the container to the Persistency service by reference. + pObj = StoreGateSvc::asStorable(m_vetonuId); + + return StatusCode::SUCCESS; + +} + +//-------------------------------------------------------------------- + +long +VetoNuIDDetDescrCnv::storageType() +{ + return DetDescr_StorageType; +} + +//-------------------------------------------------------------------- +const CLID& +VetoNuIDDetDescrCnv::classID() { + return ClassID_traits<VetoNuID>::ID(); +} + +//-------------------------------------------------------------------- +VetoNuIDDetDescrCnv::VetoNuIDDetDescrCnv(ISvcLocator* svcloc) + : + DetDescrConverter(ClassID_traits<VetoNuID>::ID(), svcloc), + m_vetonuId(0), + m_doChecks(false) + +{} + diff --git a/Scintillator/ScintDetDescrCnv/ScintIdCnv/src/VetoNuIDDetDescrCnv.h b/Scintillator/ScintDetDescrCnv/ScintIdCnv/src/VetoNuIDDetDescrCnv.h new file mode 100644 index 0000000000000000000000000000000000000000..784d518879aec982ebcb3a96cb73c7e8aded80c8 --- /dev/null +++ b/Scintillator/ScintDetDescrCnv/ScintIdCnv/src/VetoNuIDDetDescrCnv.h @@ -0,0 +1,71 @@ +/* + Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +*/ + +/*************************************************************************** + Scint DetDescrCnv package + ----------------------------------------- + ***************************************************************************/ + +#ifndef SCINTIDCNV_VETONUIDDETDESCRCNV_H +#define SCINTIDCNV_VETONUIDDETDESCRCNV_H + +//<<<<<< INCLUDES >>>>>> + +#include "DetDescrCnvSvc/DetDescrConverter.h" + +//<<<<<< PUBLIC DEFINES >>>>>> +//<<<<<< PUBLIC CONSTANTS >>>>>> +//<<<<<< PUBLIC TYPES >>>>>> + +class VetoNuID; + +//<<<<<< PUBLIC VARIABLES >>>>>> +//<<<<<< PUBLIC FUNCTIONS >>>>>> +//<<<<<< CLASS DECLARATIONS >>>>>> + + +/** + ** This class is a converter for the VetoNuID an IdHelper which is + ** stored in the detector store. This class derives from + ** DetDescrConverter which is a converter of the DetDescrCnvSvc. + ** + **/ + +class VetoNuIDDetDescrCnv: public DetDescrConverter { + +public: + virtual long int repSvcType() const; + virtual StatusCode initialize(); + virtual StatusCode finalize(); + virtual StatusCode createObj(IOpaqueAddress* pAddr, DataObject*& pObj); + + // Storage type and class ID (used by CnvFactory) + static long storageType(); + static const CLID& classID(); + + VetoNuIDDetDescrCnv(ISvcLocator* svcloc); + +private: + /// The helper - only will create it once + VetoNuID* m_vetonuId; + + /// File to be read for Scint ids + std::string m_scintIDFileName; + + /// Tag of RDB record for Scint ids + std::string m_scintIdDictTag; + + /// Internal Scint id tag + std::string m_scintIDTag; + + /// Whether or not + bool m_doChecks; + +}; + + +//<<<<<< INLINE PUBLIC FUNCTIONS >>>>>> +//<<<<<< INLINE MEMBER FUNCTIONS >>>>>> + +#endif // SCINTIDCNV_VETONUIDDETDESCRCNV_H diff --git a/Scintillator/ScintDigiAlgs/CMakeLists.txt b/Scintillator/ScintDigiAlgs/CMakeLists.txt index 43695c5f90d96053a8b44fa3ac41c0f4c159ad99..3ebe3d691decc51518eb6f64ecae03efd76aaf73 100644 --- a/Scintillator/ScintDigiAlgs/CMakeLists.txt +++ b/Scintillator/ScintDigiAlgs/CMakeLists.txt @@ -9,7 +9,7 @@ atlas_subdir( ScintDigiAlgs ) atlas_add_component( ScintDigiAlgs src/*.cxx src/*.h src/components/*.cxx - LINK_LIBRARIES AthenaBaseComps Identifier StoreGateLib WaveRawEvent ScintSimEvent WaveDigiToolsLib) + LINK_LIBRARIES AthenaBaseComps Identifier ScintIdentifier StoreGateLib WaveRawEvent ScintSimEvent WaveDigiToolsLib) atlas_install_python_modules( python/*.py ) diff --git a/Scintillator/ScintDigiAlgs/python/ScintDigiAlgsConfig.py b/Scintillator/ScintDigiAlgs/python/ScintDigiAlgsConfig.py index 243e4493dd117e6758c894a7904115b3d3941ec3..292a19c0890672bb3a1530b080d936c8716a2fff 100644 --- a/Scintillator/ScintDigiAlgs/python/ScintDigiAlgsConfig.py +++ b/Scintillator/ScintDigiAlgs/python/ScintDigiAlgsConfig.py @@ -10,10 +10,17 @@ from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg # https://indico.cern.ch/event/1099652/contributions/4626975/attachments/2352595/4013927/Faser-Physics-run3933-plots.pdf (20/01/2022) # Parameters are per scintillator source, but not per channel. dict_CB_param = {} -dict_CB_param["Trigger"]=dict(CB_alpha=-0.38, CB_n=25, CB_mean=815, CB_sigma=7.7) -dict_CB_param["Timing"]=dict(CB_alpha=-0.32, CB_n=65, CB_mean=846, CB_sigma=5.3) # copy from Preshower; Timing was not in TestBeam -dict_CB_param["Veto"]=dict(CB_alpha=-0.38, CB_n=25, CB_mean=815, CB_sigma=7.7) # copy from Trigger; Veto was not in TestBeam, but in sim "Veto" is the TestBeam Trigger component -dict_CB_param["Preshower"]=dict(CB_alpha=-0.32, CB_n=65, CB_mean=846, CB_sigma=5.3) +dict_CB_param["Trigger"]=dict(CB_alpha=-0.38, CB_n=25, CB_mean=815, CB_sigma=7.7, CB_norm = 500 ) +dict_CB_param["Timing"]=dict(CB_alpha=-0.32, CB_n=65, CB_mean=846, CB_sigma=5.3, CB_norm = 500) # copy from Preshower; Timing was not in TestBeam +dict_CB_param["Veto"]=dict(CB_alpha=-0.38, CB_n=25, CB_mean=815, CB_sigma=7.7, CB_norm = 1000) # copy from Trigger; Veto was not in TestBeam, but in sim "Veto" is the TestBeam Trigger component +dict_CB_param["Preshower"]=dict(CB_alpha=-0.32, CB_n=65, CB_mean=846, CB_sigma=5.3, CB_norm = 500) + +dict_baseline_params = { + "Trigger" : {"mean" : 15000, "rms" : 3}, + "Timing" : {"mean" : 15000, "rms" : 3}, + "Veto" : {"mean" : 15000, "rms" : 3}, + "Preshower" : {"mean" : 15000, "rms" : 3}, + } # One stop shopping for normal FASER data def ScintWaveformDigitizationCfg(flags): @@ -46,7 +53,11 @@ def ScintWaveformDigiCfg(flags, name="ScintWaveformDigiAlg", source="", **kwargs digiAlg.CB_n = dict_CB_param[source]["CB_n"] digiAlg.CB_mean = dict_CB_param[source]["CB_mean"] digiAlg.CB_sigma = dict_CB_param[source]["CB_sigma"] - + digiAlg.CB_norm = dict_CB_param[source]["CB_norm"] + + digiAlg.base_mean = dict_baseline_params[source]["mean"] + digiAlg.base_rms = dict_baseline_params[source]["rms"] + kwargs.setdefault("WaveformDigitisationTool", tool) acc.addEventAlgo(digiAlg) diff --git a/Scintillator/ScintDigiAlgs/src/ScintWaveformDigiAlg.cxx b/Scintillator/ScintDigiAlgs/src/ScintWaveformDigiAlg.cxx index b25080419ad0a7ea64f3933a0eb3acfdfe4c7eac..d5235a92ccdacbc2631e4dafc66a3f9dea48dc9d 100644 --- a/Scintillator/ScintDigiAlgs/src/ScintWaveformDigiAlg.cxx +++ b/Scintillator/ScintDigiAlgs/src/ScintWaveformDigiAlg.cxx @@ -1,9 +1,7 @@ - #include "ScintWaveformDigiAlg.h" -#include "Identifier/Identifier.h" +#include "ScintSimEvent/ScintHitIdHelper.h" -#include <vector> #include <map> @@ -20,24 +18,27 @@ ScintWaveformDigiAlg::initialize() { // Initalize tools ATH_CHECK( m_digiTool.retrieve() ); - // Set key to read waveform from ATH_CHECK( m_scintHitContainerKey.initialize() ); // Set key to write container ATH_CHECK( m_waveformContainerKey.initialize() ); - // Will eventually depend on the type of detector - // TODO: Vary time at which centre it? - // TODO: Better parameters - + // Set up helpers + ATH_CHECK(detStore()->retrieve(m_vetoID, "VetoID")); + ATH_CHECK(detStore()->retrieve(m_triggerID, "TriggerID")); + ATH_CHECK(detStore()->retrieve(m_preshowerID, "PreshowerID")); - - m_kernel = new TF1("PDF", "ROOT::Math::crystalball_pdf(x, [0],[1],[2],[3])", 0, 1200); + // Create CB time kernel and pre-evaluate for number of samples + m_kernel = new TF1("PDF", "[4] * ROOT::Math::crystalball_pdf(x, [0],[1],[2],[3])", 0, 1200); m_kernel->SetParameter(0, m_CB_alpha); m_kernel->SetParameter(1, m_CB_n); m_kernel->SetParameter(2, m_CB_sigma); m_kernel->SetParameter(3, m_CB_mean); + m_kernel->SetParameter(4, m_CB_norm); + + // Pre-evaluate time kernel for each bin + m_timekernel = m_digiTool->evaluate_timekernel(m_kernel); return StatusCode::SUCCESS; } @@ -54,11 +55,9 @@ ScintWaveformDigiAlg::finalize() { StatusCode ScintWaveformDigiAlg::execute(const EventContext& ctx) const { ATH_MSG_DEBUG("Executing"); + ATH_MSG_DEBUG("Run: " << ctx.eventID().run_number() << " Event: " << ctx.eventID().event_number()); - ATH_MSG_DEBUG("Run: " << ctx.eventID().run_number() - << " Event: " << ctx.eventID().event_number()); - - // Find the input HIT collection + // Find the input HITS collection SG::ReadHandle<ScintHitCollection> scintHitHandle(m_scintHitContainerKey, ctx); ATH_CHECK( scintHitHandle.isValid() ); @@ -69,15 +68,59 @@ ScintWaveformDigiAlg::execute(const EventContext& ctx) const { ATH_CHECK( waveformContainerHandle.record( std::make_unique<RawWaveformContainer>()) ); ATH_MSG_DEBUG("WaveformsContainer '" << waveformContainerHandle.name() << "' initialized"); - + if (scintHitHandle->size() == 0) { ATH_MSG_DEBUG("ScintHitCollection found with zero length!"); return StatusCode::SUCCESS; } + + // Create structure to store pulse for each channel + std::map<Identifier, std::vector<uint16_t>> waveforms; + + auto first = *scintHitHandle->begin(); + if (first.isVeto()) { + waveforms = m_digiTool->create_waveform_map(m_vetoID); + } else if (first.isTrigger()) { + waveforms = m_digiTool->create_waveform_map(m_triggerID); + } else if (first.isPreshower()) { + waveforms = m_digiTool->create_waveform_map(m_preshowerID); + } + + // Loop over time samples + for (const auto& tk : m_timekernel) { + std::map<unsigned int, float> counts; + + // Convolve hit energy with evaluated kernel and sum for each hit id (i.e. channel) + for (const auto& hit : *scintHitHandle) { + counts[hit.identify()] += tk * hit.energyLoss(); + } + + // Subtract count from basleine and add result to correct waveform vector + for (const auto& c : counts) { + + unsigned int baseline = m_digiTool->generate_baseline(m_base_mean, m_base_rms); + int value = baseline - c.second; + + if (value < 0) { + ATH_MSG_WARNING("Found pulse " << c.second << " larger than baseline " << c.first); + value = 0; // Protect against scaling signal above baseline + } + + // Convert hit id to Identifier and store + Identifier id = ScintHitIdHelper::GetHelper()->getIdentifier(c.first); + waveforms[id].push_back(value); + } + } - // Digitise the hits - CHECK( m_digiTool->digitise<ScintHitCollection>(scintHitHandle.ptr(), - waveformContainerHandle.ptr(), m_kernel) ); + // Loop over wavefrom vectors to make and store raw waveform + unsigned int nsamples = m_digiTool->nsamples(); + for (const auto& w : waveforms) { + RawWaveform* wfm = new RawWaveform(); + wfm->setWaveform(0, w.second); + wfm->setIdentifier(w.first); + wfm->setSamples(nsamples); + waveformContainerHandle->push_back(wfm); + } ATH_MSG_DEBUG("WaveformsHitContainer " << waveformContainerHandle.name() << "' filled with "<< waveformContainerHandle->size() <<" items"); diff --git a/Scintillator/ScintDigiAlgs/src/ScintWaveformDigiAlg.h b/Scintillator/ScintDigiAlgs/src/ScintWaveformDigiAlg.h index af60e02d8169194ccc2459f0b7a69727b2544eba..de03370698620986377c695852af07c7d8c79b18 100644 --- a/Scintillator/ScintDigiAlgs/src/ScintWaveformDigiAlg.h +++ b/Scintillator/ScintDigiAlgs/src/ScintWaveformDigiAlg.h @@ -19,12 +19,21 @@ #include "GaudiKernel/ServiceHandle.h" #include "GaudiKernel/ToolHandle.h" +//Helpers +#include "ScintIdentifier/VetoID.h" +#include "ScintIdentifier/TriggerID.h" +#include "ScintIdentifier/PreshowerID.h" +#include "ScintSimEvent/ScintHitIdHelper.h" +#include "Identifier/Identifier.h" + + // ROOT #include "TF1.h" // STL #include <string> +#include <vector> class ScintWaveformDigiAlg : public AthReentrantAlgorithm { @@ -50,15 +59,32 @@ class ScintWaveformDigiAlg : public AthReentrantAlgorithm { ScintWaveformDigiAlg &operator=(const ScintWaveformDigiAlg&) = delete; //@} + /// + + /** @name Steerable pameters for crystal ball and baseline **/ + //@{ Gaudi::Property<double> m_CB_alpha {this, "CB_alpha", 0, "Alpha of the crystal ball function"}; Gaudi::Property<double> m_CB_n {this, "CB_n", 0, "n of the crystal ball function"}; Gaudi::Property<double> m_CB_mean {this, "CB_mean", 0, "Mean of the crystal ball function"}; Gaudi::Property<double> m_CB_sigma {this, "CB_sigma", 0, "Sigma of the crystal ball function"}; + Gaudi::Property<double> m_CB_norm {this, "CB_norm", 0, "Norm of the crystal ball function"}; + Gaudi::Property<double> m_base_mean {this, "base_mean", 0, "Mean of the baseline"}; + Gaudi::Property<double> m_base_rms {this, "base_rms", 0, "RMS of the baseline"}; + //@} - /// Kernel PDF - TF1* m_kernel; + /** Kernel PDF and evaluated values **/ + //@{ + TF1* m_kernel; + std::vector<float> m_timekernel; + //@} + + + /// Detector ID helpers + const VetoID* m_vetoID{nullptr}; + const TriggerID* m_triggerID{nullptr}; + const PreshowerID* m_preshowerID{nullptr}; /** * @name Digitisation tool @@ -88,4 +114,5 @@ class ScintWaveformDigiAlg : public AthReentrantAlgorithm { }; + #endif // SCINTDIGIALGS_SCINTDIGIALG_H diff --git a/Scintillator/ScintSimEvent/CMakeLists.txt b/Scintillator/ScintSimEvent/CMakeLists.txt index 27b63871df2aa95ea230c6be47454573a7c14d16..07224a272694548a6c39740e081059d7652a9d91 100644 --- a/Scintillator/ScintSimEvent/CMakeLists.txt +++ b/Scintillator/ScintSimEvent/CMakeLists.txt @@ -18,11 +18,11 @@ atlas_add_library( ScintSimEvent PRIVATE_INCLUDE_DIRS ${ROOT_INCLUDE_DIRS} ${GEANT4_INCLUDE_DIRS} DEFINITIONS ${CLHEP_DEFINITIONS} LINK_LIBRARIES ${CLHEP_LIBRARIES} AthAllocators AthenaKernel CxxUtils GeneratorObjects HitManagement StoreGateLib SGtests - PRIVATE_LINK_LIBRARIES ${ROOT_LIBRARIES} ScintIdentifier ) + PRIVATE_LINK_LIBRARIES ${ROOT_LIBRARIES} ScintIdentifier Identifier) atlas_add_dictionary( ScintSimEventDict ScintSimEvent/ScintSimEventDict.h ScintSimEvent/selection.xml INCLUDE_DIRS ${ROOT_INCLUDE_DIRS} ${CLHEP_INCLUDE_DIRS} ${GEANT4_INCLUDE_DIRS} - LINK_LIBRARIES ${ROOT_LIBRARIES} ${CLHEP_LIBRARIES} AthAllocators CxxUtils GeneratorObjects HitManagement StoreGateLib SGtests ScintIdentifier ScintSimEvent ) + LINK_LIBRARIES ${ROOT_LIBRARIES} ${CLHEP_LIBRARIES} AthAllocators CxxUtils GeneratorObjects HitManagement StoreGateLib SGtests ScintIdentifier ScintSimEvent Identifier) diff --git a/Scintillator/ScintSimEvent/ScintSimEvent/ScintHitIdHelper.h b/Scintillator/ScintSimEvent/ScintSimEvent/ScintHitIdHelper.h index 2874bac894b15684252de5a531081f338750c10c..233acb37e9a01ff47d8e3f93924e473eebe37f62 100644 --- a/Scintillator/ScintSimEvent/ScintSimEvent/ScintHitIdHelper.h +++ b/Scintillator/ScintSimEvent/ScintSimEvent/ScintHitIdHelper.h @@ -23,6 +23,15 @@ // This class is singleton and static method and variable are used. #include "CxxUtils/checker_macros.h" + +//Helpers +#include "ScintIdentifier/VetoID.h" +#include "ScintIdentifier/TriggerID.h" +#include "ScintIdentifier/PreshowerID.h" + +#include "Identifier/Identifier.h" + + ATLAS_NO_CHECK_FILE_THREAD_SAFETY; class ScintHitIdHelper : HitIdHelper { @@ -43,6 +52,9 @@ class ScintHitIdHelper : HitIdHelper { // Layer/Disk int getPlate(const int& hid) const; + // identifier + Identifier getIdentifier(const int& hid) const; + // // Info packing: int buildHitId(const int, const int, const int) const; @@ -54,6 +66,12 @@ class ScintHitIdHelper : HitIdHelper { // // Initialize the helper, only called by the constructor void Initialize(); + + /// Detector ID helpers + const VetoID* m_vetoID{nullptr}; + const TriggerID* m_triggerID{nullptr}; + const PreshowerID* m_preshowerID{nullptr}; + }; #endif // SCINTSIMEVENT_SCINTHITIDHELPER diff --git a/Scintillator/ScintSimEvent/src/ScintHitIdHelper.cxx b/Scintillator/ScintSimEvent/src/ScintHitIdHelper.cxx index cacdb5d720bfdc7759188481346899d48069d856..5f115d47109690e545267e6cbc561fc5b8e7afdf 100644 --- a/Scintillator/ScintSimEvent/src/ScintHitIdHelper.cxx +++ b/Scintillator/ScintSimEvent/src/ScintHitIdHelper.cxx @@ -6,7 +6,6 @@ #include "ScintSimEvent/ScintHitIdHelper.h" #include "StoreGate/StoreGateSvc.h" -#include "ScintIdentifier/VetoID.h" #include "G4Types.hh" #ifdef G4MULTITHREADED @@ -44,7 +43,9 @@ void ScintHitIdHelper::Initialize() { const VetoID* pix; ServiceHandle<StoreGateSvc> detStore ("DetectorStore", "ScitHitIdHelper"); if (detStore.retrieve().isSuccess()) { - if (detStore->retrieve(pix, "VetoID").isFailure()) { pix = 0; } + if (detStore->retrieve(m_vetoID, "VetoID").isFailure()) { m_vetoID = 0; } + if (detStore->retrieve(m_triggerID, "TriggerID").isFailure()) { m_triggerID = 0; } + if (detStore->retrieve(m_preshowerID, "PreshowerID").isFailure()) { m_preshowerID = 0; } } InitializeField("VetoTriggerPreshower", 0, 2); @@ -87,6 +88,20 @@ int ScintHitIdHelper::getPlate(const int& hid) const return this->GetFieldValue("Plate", hid); } + +// identifier +Identifier ScintHitIdHelper::getIdentifier(const int& hid) const +{ + if (isVeto(hid)) { + return m_vetoID->pmt_id(getStation(hid), getPlate(hid), 0); + } else if (isTrigger(hid)) { + return m_triggerID->pmt_id(getStation(hid), getPlate(hid), 0); + } else if (isPreshower(hid)) { + return m_preshowerID->pmt_id(getStation(hid), getPlate(hid), 0); + } + return Identifier(); +} + // // Info packing: int ScintHitIdHelper::buildHitId(const int veto_trigger_preshower, diff --git a/Simulation/G4Faser/G4FaserAlg/test/G4FaserAlgConfigNew_Test.py b/Simulation/G4Faser/G4FaserAlg/test/G4FaserAlgConfigNew_Test.py index 709797ae28bec863f565eba2aa662d8bcefc0e2e..3b1aac71e96914107c58558ae96a11eb2c4f7340 100644 --- a/Simulation/G4Faser/G4FaserAlg/test/G4FaserAlgConfigNew_Test.py +++ b/Simulation/G4Faser/G4FaserAlg/test/G4FaserAlgConfigNew_Test.py @@ -12,7 +12,7 @@ if __name__ == '__main__': # Set up logging and config behaviour # from AthenaCommon.Logging import log - from AthenaCommon.Constants import DEBUG + from AthenaCommon.Constants import DEBUG, VERBOSE from AthenaCommon.Configurable import Configurable log.setLevel(DEBUG) Configurable.configurableRun3Behavior = 1 @@ -43,6 +43,8 @@ if __name__ == '__main__': ConfigFlags.Sim.ReleaseGeoModel = False ConfigFlags.Sim.IncludeParentsInG4Event = True # Controls whether BeamTruthEvent is written to output HITS file ConfigFlags.addFlag("Sim.Gun",{"Generator" : "SingleParticle"}) # Property bag for particle gun keyword:argument pairs + ConfigFlags.addFlag("Sim.Beam.xangle", 0) # Potential beam crossing angles + ConfigFlags.addFlag("Sim.Beam.yangle", 0) ConfigFlags.GeoModel.FaserVersion = "FASERNU-02" # Geometry set-up ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02" # Conditions set-up @@ -53,6 +55,13 @@ if __name__ == '__main__': # import sys ConfigFlags.fillFromArgs(sys.argv[1:]) + +# from math import atan +# from AthenaCommon.SystemOfUnits import GeV, TeV, cm, m +# from AthenaCommon.PhysicalConstants import pi +# import ParticleGun as PG +# ConfigFlags.Sim.Gun = {"Generator" : "SingleParticle", "pid" : 11, "energy" : PG.LogSampler(10*GeV, 1*TeV), "theta" : PG.GaussianSampler(0, atan((10*cm)/(7*m)), oneside = True), "phi" : [0, 2*pi], "mass" : 0.511, "radius" : -10*cm, "randomSeed" : 12345} + # # By being a little clever, we can steer the geometry setup from the command line using GeoModel.FaserVersion # @@ -82,11 +91,24 @@ if __name__ == '__main__': # if ConfigFlags.Input.Files and ConfigFlags.Input.Files != ["_CALYPSO_GENERIC_INPUTFILE_NAME_"] : print("Input.Files = ",ConfigFlags.Input.Files) + +# +# If so, and only one file that ends in .events read as HepMC +# + if len(ConfigFlags.Input.Files) == 1 and (ConfigFlags.Input.Files[0].endswith(".events") or ConfigFlags.Input.Files[0].endswith(".hepmc")): + + from HEPMCReader.HepMCReaderConfig import HepMCReaderCfg + cfg.merge(HepMCReaderCfg(ConfigFlags)) + + from McEventSelector.McEventSelectorConfig import McEventSelectorCfg + cfg.merge(McEventSelectorCfg(ConfigFlags)) + # -# If so, set up to read it +# Else, set up to read it as a pool.root file # - from AthenaPoolCnvSvc.PoolReadConfig import PoolReadCfg - cfg.merge(PoolReadCfg(ConfigFlags)) + else: + from AthenaPoolCnvSvc.PoolReadConfig import PoolReadCfg + cfg.merge(PoolReadCfg(ConfigFlags)) # # If not, configure the particle gun as requested, or using defaults # @@ -98,16 +120,31 @@ if __name__ == '__main__': cfg.merge(FaserParticleGunCfg(ConfigFlags)) from McEventSelector.McEventSelectorConfig import McEventSelectorCfg cfg.merge(McEventSelectorCfg(ConfigFlags)) + # # Output file # from AthenaPoolCnvSvc.PoolWriteConfig import PoolWriteCfg cfg.merge(PoolWriteCfg(ConfigFlags)) + +# +# Shift LOS +# + + if ConfigFlags.Sim.Beam.xangle or ConfigFlags.Sim.Beam.yangle: + MCEventKey = "BeamTruthEventShifted" + import McParticleEvent.Pythonizations + from GeneratorUtils.ShiftLOSConfig import ShiftLOSCfg + cfg.merge(ShiftLOSCfg(ConfigFlags, OutputMCEventKey = MCEventKey, + xcross = ConfigFlags.Sim.Beam.xangle, ycross = ConfigFlags.Sim.Beam.yangle)) + else: + MCEventKey = "BeamTruthEvent" + # # Add the G4FaserAlg # from G4FaserAlg.G4FaserAlgConfigNew import G4FaserAlgCfg - cfg.merge(G4FaserAlgCfg(ConfigFlags)) + cfg.merge(G4FaserAlgCfg(ConfigFlags, InputTruthCollection = MCEventKey)) # # Dump config # @@ -124,6 +161,8 @@ if __name__ == '__main__': # # Execute and finish # + #cfg.foreach_component("*").OutputLevel = VERBOSE + sc = cfg.run() b = time.time() diff --git a/Tracker/TrackerAlignTools/TrackerAlignGenTools/src/TrackerAlignDBTool.cxx b/Tracker/TrackerAlignTools/TrackerAlignGenTools/src/TrackerAlignDBTool.cxx index 77714ac0974d54b0f6292a74a174cc86a6c7cf29..4483eb5b4cd52b31aa70cb915ba6500641297685 100644 --- a/Tracker/TrackerAlignTools/TrackerAlignGenTools/src/TrackerAlignDBTool.cxx +++ b/Tracker/TrackerAlignTools/TrackerAlignGenTools/src/TrackerAlignDBTool.cxx @@ -1,6 +1,6 @@ /* - Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -*/ + Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration + */ // TrackerAlignDBTool.cxx // AlgTool to manage the SCT AlignableTransforms in the conditions store @@ -12,6 +12,9 @@ #include "AthenaPoolUtilities/CondAttrListCollection.h" #include "AthenaPoolUtilities/AthenaAttributeList.h" +#include <string> +#include <set> +#include <exception> #include <fstream> #include <iostream> @@ -35,16 +38,16 @@ #define TRACKER_ALIGN "/Tracker/Align" TrackerAlignDBTool::TrackerAlignDBTool(const std::string& type, - const std::string& name, const IInterface* parent) + const std::string& name, const IInterface* parent) : AthAlgTool(type,name,parent), - p_toolsvc("ToolSvc",name), - m_sctid(nullptr), - m_sctman(nullptr), - m_par_scttwoside(false), - m_par_dbroot( TRACKER_ALIGN ), - m_par_dbkey( TRACKER_ALIGN ), - m_dynamicDB(false), - m_forceUserDBConfig(false) + p_toolsvc("ToolSvc",name), + m_sctid(nullptr), + m_sctman(nullptr), + m_par_scttwoside(false), + m_par_dbroot( TRACKER_ALIGN ), + m_par_dbkey( TRACKER_ALIGN ), + m_dynamicDB(false), + m_forceUserDBConfig(false) { declareInterface<ITrackerAlignDBTool>(this); declareProperty("IToolSvc", p_toolsvc); @@ -59,76 +62,76 @@ TrackerAlignDBTool::~TrackerAlignDBTool() StatusCode TrackerAlignDBTool::initialize() { - - ATH_MSG_DEBUG("initializing"); - - m_alignobjs.clear(); - m_alignchans.clear(); - int ndet {0}; - // get storegate access to conditions store - ATH_CHECK(detStore().retrieve()); + ATH_MSG_DEBUG("initializing"); - ATH_CHECK( p_toolsvc.retrieve() ); + m_alignobjs.clear(); + m_alignchans.clear(); + int ndet {0}; - // attempt to get ID helpers from detector store - // (relying on GeoModel to put them) + // get storegate access to conditions store + ATH_CHECK(detStore().retrieve()); - ATH_CHECK(detStore()->retrieve(m_sctman,"SCT")); + ATH_CHECK( p_toolsvc.retrieve() ); - if (m_sctman->m_alignfoldertype == TrackerDD::static_run1 && - !m_forceUserDBConfig) - { - m_par_dbroot = "/Tracker/Align"; - m_dynamicDB = false; - } - else if (m_sctman->m_alignfoldertype == TrackerDD::timedependent_run2 && - !m_forceUserDBConfig) - { - m_par_dbroot = "/Tracker/AlignL3"; - m_dynamicDB = true; - } - m_par_dbkey = m_par_dbroot; + // attempt to get ID helpers from detector store + // (relying on GeoModel to put them) + + ATH_CHECK(detStore()->retrieve(m_sctman,"SCT")); - ATH_CHECK(detStore()->retrieve(m_sctid)); + if (m_sctman->m_alignfoldertype == TrackerDD::static_run1 && + !m_forceUserDBConfig) + { + m_par_dbroot = "/Tracker/Align"; + m_dynamicDB = false; + } + else if (m_sctman->m_alignfoldertype == TrackerDD::timedependent_run2 && + !m_forceUserDBConfig) + { + m_par_dbroot = "/Tracker/AlignL3"; + m_dynamicDB = true; + } + m_par_dbkey = m_par_dbroot; - // setup list of alignable transforms from geometry - int chan[3]; - int TransfLevel_low = 0; // depending on alignfolder sheme; 0 for old, 2 for new - if (m_dynamicDB) TransfLevel_low = 2; + ATH_CHECK(detStore()->retrieve(m_sctid)); - for (int i=0;i<3;++i) chan[i]=100*i; + // setup list of alignable transforms from geometry + int chan[3]; + int TransfLevel_low = 0; // depending on alignfolder sheme; 0 for old, 2 for new + if (m_dynamicDB) TransfLevel_low = 2; - std::string man_name; - for (const TrackerDD::SiDetectorElement* element : *(m_sctman->getDetectorElementCollection())) + for (int i=0;i<3;++i) chan[i]=100*i; + + std::string man_name; + for (const TrackerDD::SiDetectorElement* element : *(m_sctman->getDetectorElementCollection())) + { + if (element!=0) { - if (element!=0) - { - const Identifier ident=element->identify(); - int station, layer, eta, phi, side; - if (idToDetSet(ident, station, layer, eta, phi, side)) - { - std::string level[3]; - for (int i=TransfLevel_low; i<3; ++i) - { - m_stations.insert(station); - level[i]=dirkey(station, layer, 1+i, phi); - // add this to list if not seen already - std::vector<std::string>::const_iterator ix = - find(m_alignobjs.begin(), m_alignobjs.end(), level[i]); - if (ix==m_alignobjs.end()) - { - m_alignobjs.push_back(level[i]); - m_alignchans.push_back(chan[i]++); - } - } - ++ndet; - } - else - { - ATH_MSG_ERROR("Error translating element identifier." ); - } - } + const Identifier ident=element->identify(); + int station, layer, eta, phi, side; + if (idToDetSet(ident, station, layer, eta, phi, side)) + { + std::string level[3]; + for (int i=TransfLevel_low; i<3; ++i) + { + m_stations.insert(station); + level[i]=dirkey(station, layer, 1+i, phi); + // add this to list if not seen already + std::vector<std::string>::const_iterator ix = + find(m_alignobjs.begin(), m_alignobjs.end(), level[i]); + if (ix==m_alignobjs.end()) + { + m_alignobjs.push_back(level[i]); + m_alignchans.push_back(chan[i]++); + } + } + ++ndet; + } + else + { + ATH_MSG_ERROR("Error translating element identifier." ); + } + } } ATH_CHECK(m_outputTool.retrieve()); @@ -136,12 +139,12 @@ StatusCode TrackerAlignDBTool::initialize() { ATH_MSG_DEBUG( "Database root folder " << m_par_dbroot ); ATH_MSG_DEBUG( "Geometry initialisation sees " << ndet << - " SCT modules giving " << m_alignobjs.size() << " alignment keys" ); + " SCT modules giving " << m_alignobjs.size() << " alignment keys" ); ATH_MSG_DEBUG("Keys/channels are:"); - + for (unsigned int i=0; i < m_alignobjs.size(); ++i) ATH_MSG_DEBUG( " " << m_alignobjs[i] << " [" << m_alignchans[i] << "]" ); - + } return StatusCode::SUCCESS; @@ -155,7 +158,7 @@ StatusCode TrackerAlignDBTool::finalize() StatusCode TrackerAlignDBTool::createDB() const { - + ATH_MSG_DEBUG("createDB method called"); AlignableTransform* pat = nullptr; @@ -167,8 +170,8 @@ StatusCode TrackerAlignDBTool::createDB() const if (detStore()->contains<AlignableTransformContainer>(m_par_dbroot)) { - ATH_MSG_WARNING("createDB: AlignableTransformContainer already exists" ); - return StatusCode::FAILURE; + ATH_MSG_WARNING("createDB: AlignableTransformContainer already exists" ); + return StatusCode::FAILURE; } // put them in a collection /Indet/Align @@ -187,7 +190,7 @@ StatusCode TrackerAlignDBTool::createDB() const } // record collection in SG - + ATH_CHECK( detStore()->record(patc, m_par_dbroot) ); ATH_MSG_DEBUG( "Collection has size " << patc->size() ); @@ -195,78 +198,133 @@ StatusCode TrackerAlignDBTool::createDB() const std::vector<std::string> level2; for (const TrackerDD::SiDetectorElement* element : *(m_sctman->getDetectorElementCollection()) ) { - if (element != 0) + if (element != 0) + { + const Identifier ident = element->identify(); + std::string key = dirkey(ident,3); + // do not produce AlignableTrasnforms for SCT side 1 if option set + if (!(m_sctid->side(ident)==1) || m_par_scttwoside) { - const Identifier ident = element->identify(); - std::string key = dirkey(ident,3); - // do not produce AlignableTrasnforms for SCT side 1 if option set - if (!(m_sctid->side(ident)==1) || m_par_scttwoside) - { - if ((pat=getTransPtr(key))) - { - pat->add(ident,Amg::EigenTransformToCLHEP( Amg::Transform3D::Identity() ) ); - } - else - { - ATH_MSG_ERROR( "Cannot retrieve AlignableTransform for key " << key ); - } - // add level 2 transform if needed - do this the first time a module - // for this level 3 key is seen - std::vector<std::string>::const_iterator ix= - find(level2.begin(),level2.end(),key); - if (ix==level2.end()) - { - level2.push_back(key); - // construct identifier of level 2 transform - Identifier ident2; - ident2=m_sctid->wafer_id(m_sctid->station(ident), - m_sctid->layer(ident), - 0, 0, 0); - std::string key2 = dirkey(ident, 2); - if ((pat = getTransPtr(key2))) - { - pat->add(ident2, - Amg::EigenTransformToCLHEP( Amg::Transform3D::Identity() ) ); - } - else - { - ATH_MSG_ERROR( "Cannot retrieve AlignableTransform for key " << key2 ); - } - } - } + if ((pat=getTransPtr(key))) + { + const auto iStation = m_sctid->station(ident); + const auto iLayer = m_sctid->layer(ident); + const auto iModuleEta = m_sctid->eta_module(ident); + const auto iModulePhi = m_sctid->phi_module(ident); + int iModule = iModulePhi; + if (iModuleEta < 0) iModule +=4; + + const auto buildKey = [](auto iStation, auto iLayer, auto iModule) { + std::stringstream ss; + ss << iStation << iLayer << iModule; + return ss.str(); + }; + + const auto key = buildKey(iStation, iLayer, iModule); + if (not (m_alignment.find(key) == m_alignment.end())) { + const std::vector<double> c = m_alignment.value().find(key)->second; + ATH_MSG_VERBOSE("Applying correction for " << key); + ATH_MSG_VERBOSE(c[0] << " " << c[1] << " " << c[2] << " " << c[3] << " " << c[4] << " " << c[5]); + Amg::Translation3D newtranslation(c[0], c[1], c[2]); + Amg::Transform3D alignment = newtranslation * Amg::RotationMatrix3D::Identity(); + alignment *= Amg::AngleAxis3D(c[5], Amg::Vector3D(0.,0.,1.)); + alignment *= Amg::AngleAxis3D(c[4], Amg::Vector3D(0.,1.,0.)); + alignment *= Amg::AngleAxis3D(c[3], Amg::Vector3D(1.,0.,0.)); + + pat->add(ident, Amg::EigenTransformToCLHEP(alignment)); + } else { + ATH_MSG_VERBOSE("No correction given for " << key); + } + } + else + { + ATH_MSG_ERROR( "Cannot retrieve AlignableTransform for key " << key ); + } + // add level 2 transform if needed - do this the first time a module + // for this level 3 key is seen + std::vector<std::string>::const_iterator ix= + find(level2.begin(),level2.end(),key); + if (ix==level2.end()) + { + level2.push_back(key); + // construct identifier of level 2 transform + Identifier ident2; + ident2=m_sctid->wafer_id(m_sctid->station(ident), + m_sctid->layer(ident), + 0, 0, 0); + std::string key2 = dirkey(ident, 2); + if ((pat = getTransPtr(key2))) + { + auto iStation = m_sctid->station(ident); + auto iLayer = m_sctid->layer(ident); + + const auto buildKey = [](auto iStation, auto iLayer) { + std::stringstream ss; + ss << iStation << iLayer; + return ss.str(); + }; + + const auto key = buildKey(iStation, iLayer); + if (not (m_alignment.find(key) == m_alignment.end())) { + const std::vector<double> c = m_alignment.value().find(key)->second; + ATH_MSG_VERBOSE("Applying correction for " << key); + ATH_MSG_VERBOSE(c[0] << " " << c[1] << " " << c[2] << " " << c[3] << " " << c[4] << " " << c[5]); + Amg::Translation3D newtranslation(c[0], c[1], c[2]); + Amg::Transform3D alignment = newtranslation * Amg::RotationMatrix3D::Identity(); + alignment *= Amg::AngleAxis3D(c[5], Amg::Vector3D(0.,0.,1.)); + alignment *= Amg::AngleAxis3D(c[4], Amg::Vector3D(0.,1.,0.)); + alignment *= Amg::AngleAxis3D(c[3], Amg::Vector3D(1.,0.,0.)); + + pat->add(ident2, Amg::EigenTransformToCLHEP(alignment)); + } else { + ATH_MSG_VERBOSE("No correction given for " << key); + } + } + else + { + ATH_MSG_ERROR( "Cannot retrieve AlignableTransform for key " << key2 ); + } + } } } + } - // create the global object with positions for the stations - Identifier ident1; - std::string key1 = dirkey(ident1, 1); - if ((pat = getTransPtr(key1))) - { - Amg::Transform3D globshift; - globshift.setIdentity(); - for (int station : m_stations) - { - ident1 = m_sctid->wafer_id(station, 0, 0, 0, 0); - pat->add(ident1, Amg::EigenTransformToCLHEP(globshift)); - } - } - else + // create the global object with positions for the stations + Identifier ident1; + std::string key1 = dirkey(ident1, 1); + if ((pat = getTransPtr(key1))) + { + // Amg::Translation3D translation(0.1,0.2,0.3); + //Amg::Transform3D globshift=translation*Amg::RotationMatrix3D::Identity(); + //std::cout<<"rotation"<<std::endl; + //std::cout<<globshift.rotation()(0,0)<<" , "<<globshift.rotation()(1,1)<<" , "<<globshift.rotation()(2,2)<<std::endl; + //std::cout<<"translation"<<std::endl; + //std::cout<<globshift.translation()(0,0)<<" , "<<globshift.translation()(1,1)<<" , "<<globshift.translation()(2,2)<<std::endl; + Amg::Transform3D globshift; + globshift.setIdentity(); + for (int station : m_stations) { - ATH_MSG_ERROR( "Cannot retrieve AlignableTransform for key " << key1 ); - } + ident1 = m_sctid->wafer_id(station, 0, 0, 0, 0); + pat->add(ident1, Amg::EigenTransformToCLHEP(globshift)); + } + } + else + { + ATH_MSG_ERROR( "Cannot retrieve AlignableTransform for key " << key1 ); + } - // sort the created objects (in case, usually come out sorted from GeoModel) - ATH_CHECK(sortTrans()); - // list out size of all created objects - ATH_MSG_DEBUG( "Dumping size of created AlignableTransform objects"); - for (unsigned int i = 0; i < m_alignobjs.size(); ++i) - if ((pat = getTransPtr(m_alignobjs[i]))) pat->print(); + // sort the created objects (in case, usually come out sorted from GeoModel) + ATH_CHECK(sortTrans()); + // list out size of all created objects + ATH_MSG_DEBUG( "Dumping size of created AlignableTransform objects"); + for (unsigned int i = 0; i < m_alignobjs.size(); ++i) + if ((pat = getTransPtr(m_alignobjs[i]))) pat->print(); - return StatusCode::SUCCESS; + return StatusCode::SUCCESS; } bool TrackerAlignDBTool::idToDetSet(const Identifier ident, int& station, - int& layer, int& eta, int& phi, int& side) const + int& layer, int& eta, int& phi, int& side) const { // transform Identifier to list of integers specifiying station,layer // eta, phi, side @@ -280,7 +338,7 @@ bool TrackerAlignDBTool::idToDetSet(const Identifier ident, int& station, } std::string TrackerAlignDBTool::dirkey(const Identifier& ident, - const int level) const + const int level) const { // given SCT identifier, and level (1, 2 or 3) return // directory key name for associated alignment data @@ -290,9 +348,9 @@ std::string TrackerAlignDBTool::dirkey(const Identifier& ident, } std::string TrackerAlignDBTool::dirkey(const int station, - const int layer, - const int level, - const int ) const + const int layer, + const int level, + const int ) const { // channel info and level (1,2 or 3) return // directory key name for associated alignment data @@ -307,17 +365,17 @@ std::string TrackerAlignDBTool::dirkey(const int station, { result << m_par_dbkey << "/" ; } - + if (level==1) { result << "Stations"; } else { if (level==2) result << "Planes"; if (level==3) { - if (station == 3 ) result << "Downstream"; - if (station == 2 ) result << "Central"; - if (station == 1 ) result << "Upstream"; - if (station == 0 ) result << "Interface"; - result << 1+layer; + if (station == 3 ) result << "Downstream"; + if (station == 2 ) result << "Central"; + if (station == 1 ) result << "Upstream"; + if (station == 0 ) result << "Interface"; + result << 1+layer; } } return result.str(); @@ -327,7 +385,7 @@ std::string TrackerAlignDBTool::dirkey(const int station, // const int layer,const int ring, const int sector, // const float rphidisp, const float rdisp, const float zdisp, // const int syst, const int level, const int skip) const { - + // ATH_MSG_DEBUG( "dispGroup called: level " << level << " syst " << syst); // int nmod=0; // // random number service @@ -438,7 +496,7 @@ std::string TrackerAlignDBTool::dirkey(const int station, // } // // update, adding to any existing shift // if (update) { - + // Amg::Transform3D shift = Amg::Translation3D(xd,yd,zd) * Amg::RotationMatrix3D::Identity(); // pat->tweak(ident2,Amg::EigenTransformToCLHEP(shift)); // ATH_MSG_VERBOSE( "Updated module " << mdet << "," << mbec @@ -495,13 +553,13 @@ std::string TrackerAlignDBTool::dirkey(const int station, // bool InDetAlignDBTool::setTrans(const Identifier& ident, const int level, // const Amg::Vector3D& translate, double alpha, double beta, double gamma) const // { - + // Amg::Translation3D newtranslation(translate); // Amg::Transform3D newtrans = newtranslation * Amg::RotationMatrix3D::Identity(); // newtrans *= Amg::AngleAxis3D(gamma, Amg::Vector3D(0.,0.,1.)); // newtrans *= Amg::AngleAxis3D(beta, Amg::Vector3D(0.,1.,0.)); // newtrans *= Amg::AngleAxis3D(alpha, Amg::Vector3D(1.,0.,0.)); - + // return setTrans(ident, level, newtrans); // } @@ -543,13 +601,13 @@ std::string TrackerAlignDBTool::dirkey(const int station, // bool InDetAlignDBTool::tweakTrans(const Identifier& ident, const int level, // const Amg::Vector3D& translate, double alpha, double beta, double gamma) const // { - + // Amg::Translation3D newtranslation(translate); // Amg::Transform3D newtrans = newtranslation * Amg::RotationMatrix3D::Identity(); // newtrans *= Amg::AngleAxis3D(gamma, Amg::Vector3D(0.,0.,1.)); // newtrans *= Amg::AngleAxis3D(beta, Amg::Vector3D(0.,1.,0.)); // newtrans *= Amg::AngleAxis3D(alpha, Amg::Vector3D(1.,0.,0.)); - + // return tweakTrans(ident, level, newtrans); // } @@ -586,7 +644,7 @@ std::string TrackerAlignDBTool::dirkey(const int station, // /** get cumulative L1, L2, L3 trafo for (L3-) module */ // Amg::Transform3D InDetAlignDBTool::getTransL123( const Identifier& ident ) const { - + // Amg::Transform3D result ; // InDetDD::SiDetectorElement* element = m_pixman->getDetectorElement( ident ) ; // if( !element ) { @@ -623,14 +681,14 @@ std::string TrackerAlignDBTool::dirkey(const int station, // } StatusCode TrackerAlignDBTool::outputObjs() const { - + ATH_MSG_DEBUG( "Output AlignableTranform objects to stream" << m_outputTool ); // get the AthenaOutputStream tool -// IAthenaOutputStreamTool* optool {nullptr}; + // IAthenaOutputStreamTool* optool {nullptr}; + + // ATH_CHECK(p_toolsvc->retrieveTool("AthenaOutputStreamTool", m_par_condstream, optool)); -// ATH_CHECK(p_toolsvc->retrieveTool("AthenaOutputStreamTool", m_par_condstream, optool)); - -// ATH_CHECK(optool->connectOutput("myAlignFile.pool.root")); + // ATH_CHECK(optool->connectOutput("myAlignFile.pool.root")); ATH_CHECK(m_outputTool->connectOutput()); // construct list of objects to be written out, either @@ -638,13 +696,13 @@ StatusCode TrackerAlignDBTool::outputObjs() const { int npairs=1; IAthenaOutputStreamTool::TypeKeyPairs typekeys(npairs); typekeys[0]= - IAthenaOutputStreamTool::TypeKeyPair("AlignableTransformContainer", - m_par_dbroot); + IAthenaOutputStreamTool::TypeKeyPair("AlignableTransformContainer", + m_par_dbroot); if (!(detStore()->contains<AlignableTransformContainer>(m_par_dbroot))) ATH_MSG_ERROR( - "Expected " << m_par_dbroot << " object not found" ); + "Expected " << m_par_dbroot << " object not found" ); // write objects to stream -// ATH_CHECK(optool->streamObjects(typekeys, "myAlignFile.pool.root")); + // ATH_CHECK(optool->streamObjects(typekeys, "myAlignFile.pool.root")); ATH_CHECK(m_outputTool->streamObjects(typekeys)); // commit output @@ -654,10 +712,10 @@ StatusCode TrackerAlignDBTool::outputObjs() const { } StatusCode TrackerAlignDBTool::fillDB(const std::string tag, - const unsigned int run1, const unsigned int event1, - const unsigned int run2, const unsigned int event2) const + const unsigned int run1, const unsigned int event1, + const unsigned int run2, const unsigned int event2) const { - + ATH_MSG_DEBUG( "fillDB: Data tag " << tag ); ATH_MSG_DEBUG( "Run/evt1 [" << run1 << "," << event1 << "]" ); ATH_MSG_DEBUG( "Run/evt2 [" << run2 << "," << event2 << "]" ); @@ -669,7 +727,7 @@ StatusCode TrackerAlignDBTool::fillDB(const std::string tag, // loop over all AlignableTransform objects created earlier and save them ATH_CHECK(regsvc->registerIOV( - "AlignableTransformContainer",m_par_dbroot, tag, run1, run2, event1, event2)); + "AlignableTransformContainer",m_par_dbroot, tag, run1, run2, event1, event2)); ATH_MSG_DEBUG( "Stored AlignableTransform object " << m_par_dbroot ); ATH_MSG_DEBUG( " Wrote one AlignableTransformContainer objects to conditions database" ); @@ -678,12 +736,12 @@ StatusCode TrackerAlignDBTool::fillDB(const std::string tag, StatusCode TrackerAlignDBTool::printDB(const int level) const { - + ATH_MSG_DEBUG("Printout TrackerAlign database contents, detail level" << level ); for (std::vector<std::string>::const_iterator iobj = m_alignobjs.begin(); - iobj != m_alignobjs.end(); - ++iobj) + iobj != m_alignobjs.end(); + ++iobj) { const AlignableTransform* pat; if ((pat = cgetTransPtr(*iobj))) @@ -691,31 +749,31 @@ StatusCode TrackerAlignDBTool::printDB(const int level) const ATH_MSG_DEBUG( "AlignableTransform object " << *iobj ); int nobj = 0; for (AlignableTransform::AlignTransMem_citr cit = pat->begin(); - cit!=pat->end(); - ++cit) + cit!=pat->end(); + ++cit) { - const Identifier& ident = cit->identify(); - const Amg::Transform3D& trans = Amg::CLHEPTransformToEigen( cit->transform() ); - Amg::Vector3D shift = trans.translation(); - //Amg::RotationMatrix3D rot=trans.rotation(); - int station, layer, eta, phi, side; - if (idToDetSet(ident, station, layer, eta, phi, side)) - { - if (level > 1) - { - double alpha, beta, gamma; - extractAlphaBetaGamma(trans, alpha, beta, gamma); - ATH_MSG_DEBUG( "Tracker [" << station << "," << layer << - "," << eta << "," << phi << "," << side << "] Trans:(" << - shift.x() << "," << shift.y() << "," << shift.z() << ") Rot:{" - << alpha << "," << beta << "," << gamma << "}"); - } - ++nobj; - } - else - { - ATH_MSG_ERROR("Unknown identifier in AlignableTransform" ); - } + const Identifier& ident = cit->identify(); + const Amg::Transform3D& trans = Amg::CLHEPTransformToEigen( cit->transform() ); + Amg::Vector3D shift = trans.translation(); + //Amg::RotationMatrix3D rot=trans.rotation(); + int station, layer, eta, phi, side; + if (idToDetSet(ident, station, layer, eta, phi, side)) + { + if (level > 1) + { + double alpha, beta, gamma; + extractAlphaBetaGamma(trans, alpha, beta, gamma); + ATH_MSG_DEBUG( "Tracker [" << station << "," << layer << + "," << eta << "," << phi << "," << side << "] Trans:(" << + shift.x() << "," << shift.y() << "," << shift.z() << ") Rot:{" + << alpha << "," << beta << "," << gamma << "}"); + } + ++nobj; + } + else + { + ATH_MSG_ERROR("Unknown identifier in AlignableTransform" ); + } } ATH_MSG_DEBUG( "Object contains " << nobj << " transforms" ); } @@ -731,28 +789,28 @@ StatusCode TrackerAlignDBTool::printDB(const int level) const AlignableTransform* TrackerAlignDBTool::getTransPtr(const std::string key) const { - // look in collection to retrieve pointer to AlignableTransform object of - // given key and return it, return 0 if not collection or key value not found - AlignableTransformContainer* patc; - AlignableTransform* pat {nullptr}; + // look in collection to retrieve pointer to AlignableTransform object of + // given key and return it, return 0 if not collection or key value not found + AlignableTransformContainer* patc; + AlignableTransform* pat {nullptr}; - if(detStore()->retrieve(patc,m_par_dbroot ).isFailure()) - { + if(detStore()->retrieve(patc,m_par_dbroot ).isFailure()) + { ATH_MSG_ERROR("Unable to retrieve alignment container from DetStore."); return nullptr; - } + } - for (DataVector<AlignableTransform>::iterator dva=patc->begin(); - dva!=patc->end();++dva) - { - if ((*dva)->tag()==key) + for (DataVector<AlignableTransform>::iterator dva=patc->begin(); + dva!=patc->end();++dva) { - pat=*dva; - break; + if ((*dva)->tag()==key) + { + pat=*dva; + break; + } } + return pat; } - return pat; -} const AlignableTransform* TrackerAlignDBTool::cgetTransPtr(const std::string key) const { @@ -764,18 +822,18 @@ const AlignableTransform* TrackerAlignDBTool::cgetTransPtr(const std::string key if(detStore()->retrieve(patc, m_par_dbroot ).isFailure()) { - ATH_MSG_ERROR("Unable to retrieve alignment container from DetStore."); - return nullptr; + ATH_MSG_ERROR("Unable to retrieve alignment container from DetStore."); + return nullptr; } for (DataVector<AlignableTransform>::const_iterator dva=patc->begin(); - dva!=patc->end(); - ++dva) + dva!=patc->end(); + ++dva) { if ((*dva)->tag() == key) { - pat=*dva; - break; + pat=*dva; + break; } } return pat; @@ -784,7 +842,7 @@ const AlignableTransform* TrackerAlignDBTool::cgetTransPtr(const std::string key StatusCode TrackerAlignDBTool::sortTrans() const { // loop through all the AlignableTransform objects and sort them - + ATH_MSG_DEBUG( "Sorting all AlignableTransforms in TDS" ); AlignableTransform* pat; // use cget and a const cast to allow containers that have been read in @@ -796,7 +854,7 @@ StatusCode TrackerAlignDBTool::sortTrans() const } void TrackerAlignDBTool::extractAlphaBetaGamma(const Amg::Transform3D & trans, - double& alpha, double& beta, double &gamma) const + double& alpha, double& beta, double &gamma) const { double siny = trans(0,2); beta = asin(siny); @@ -832,14 +890,14 @@ void TrackerAlignDBTool::extractAlphaBetaGamma(const Amg::Transform3D & trans, // idToDetSet(ident,det,bec,layer,ring,sector,side); // const unsigned int DBident=det*10000+2*bec*1000+layer*100+ring*10+sector; // // so far not a very fancy DB identifier, but seems elaborate enough for this simple structure - + // if (StatusCode::SUCCESS==detStore()->retrieve(atrlistcol1,key)) { // // loop over objects in collection // //atrlistcol1->dump(); // atrlistcol2 = const_cast<CondAttrListCollection*>(atrlistcol1); // if (atrlistcol2!=0){ // for (CondAttrListCollection::const_iterator citr=atrlistcol2->begin(); citr!=atrlistcol2->end();++citr) { - + // const coral::AttributeList& atrlist=citr->second; // coral::AttributeList& atrlist2 = const_cast<coral::AttributeList&>(atrlist); @@ -865,7 +923,7 @@ void TrackerAlignDBTool::extractAlphaBetaGamma(const Amg::Transform3D & trans, // oldtrans *= Amg::AngleAxis3D(atrlist2["Rz"].data<float>()*CLHEP::mrad, Amg::Vector3D(0.,0.,1.)); // oldtrans *= Amg::AngleAxis3D(atrlist2["Ry"].data<float>()*CLHEP::mrad, Amg::Vector3D(0.,1.,0.)); // oldtrans *= Amg::AngleAxis3D(atrlist2["Rx"].data<float>()*CLHEP::mrad, Amg::Vector3D(1.,0.,0.)); - + // // get the new transform // Amg::Transform3D newtrans = trans*oldtrans; @@ -880,7 +938,7 @@ void TrackerAlignDBTool::extractAlphaBetaGamma(const Amg::Transform3D & trans, // atrlist2["Rx"].data<float>() = alpha/CLHEP::mrad ; // atrlist2["Ry"].data<float>() = beta/CLHEP::mrad ; // atrlist2["Rz"].data<float>() = gamma/CLHEP::mrad ; - + // result = true; // msg(MSG::DEBUG) << "Tweak New global DB -- channel: " << citr->first // << " ,det: " << atrlist2["det"].data<int>() @@ -894,7 +952,7 @@ void TrackerAlignDBTool::extractAlphaBetaGamma(const Amg::Transform3D & trans, // << " ,Rx: " << atrlist2["Rx"].data<float>() // << " ,Ry: " << atrlist2["Ry"].data<float>() // << " ,Rz: " << atrlist2["Rz"].data<float>() << endmsg; - + // } // } // } diff --git a/Tracker/TrackerAlignTools/TrackerAlignGenTools/src/TrackerAlignDBTool.h b/Tracker/TrackerAlignTools/TrackerAlignGenTools/src/TrackerAlignDBTool.h index 2c82c20a10baffab0b01d6c00af036b6adabaa11..6ee1a190427d31b683aa946fe54726e8b04079ba 100644 --- a/Tracker/TrackerAlignTools/TrackerAlignGenTools/src/TrackerAlignDBTool.h +++ b/Tracker/TrackerAlignTools/TrackerAlignGenTools/src/TrackerAlignDBTool.h @@ -186,6 +186,8 @@ class TrackerAlignDBTool: virtual public ITrackerAlignDBTool, public AthAlgTool bool m_dynamicDB; bool m_forceUserDBConfig; std::set<int> m_stations; + Gaudi::Property<std::map<std::string, std::vector<double>>> m_alignment{ this, "AlignmentConstants", {}, "Alignment constants."}; + mutable ToolHandle<IAthenaOutputStreamTool> m_outputTool { this, "OutputTool", "AthenaOutputStreamTool/CondStream1"} ; diff --git a/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/CMakeLists.txt b/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..f712d3881883fb0cdff9e8186b5b36578d48bf62 --- /dev/null +++ b/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/CMakeLists.txt @@ -0,0 +1,12 @@ +atlas_subdir(TrackSeedPerformanceWriter) + +atlas_add_component( + TrackSeedPerformanceWriter + src/TrackSeedPerformanceWriter.h + src/TrackSeedPerformanceWriter.cxx + src/components/TrackSeedPerformanceWriter_entries.cxx + LINK_LIBRARIES AthenaBaseComps StoreGateLib TrkTrack TrackerSimData TrackerPrepRawData TrkRIO_OnTrack TrackerRIO_OnTrack +) + +atlas_install_python_modules(python/*.py) +atlas_install_scripts(test/*.py) diff --git a/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/python/TrackSeedPerformanceWriterConfig.py b/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/python/TrackSeedPerformanceWriterConfig.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b0eca720b42ed668d7918bac86569007f4d68f --- /dev/null +++ b/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/python/TrackSeedPerformanceWriterConfig.py @@ -0,0 +1,30 @@ +""" +Copyright (C) 2022 CERN for the benefit of the FASER collaboration +""" + +from AthenaConfiguration.ComponentFactory import CompFactory +from FaserSCT_GeoModel.FaserSCT_GeoModelConfig import FaserSCT_GeometryCfg +from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg + + + +def TrackSeedPerformanceWriterCfg(flags, **kwargs): + acc = FaserSCT_GeometryCfg(flags) + kwargs.setdefault("TrackCollection", "SegmentFit") + TrackSeedPerformanceWriter = CompFactory.Tracker.TrackSeedPerformanceWriter + acc.addEventAlgo(TrackSeedPerformanceWriter(**kwargs)) + + itemList = ["xAOD::EventInfo#*", + "xAOD::EventAuxInfo#*", + "xAOD::FaserTriggerData#*", + "xAOD::FaserTriggerDataAux#*", + "FaserSCT_RDO_Container#*", + "Tracker::FaserSCT_ClusterContainer#*", + "TrackCollection#*" + ] + acc.merge(OutputStreamCfg(flags, "ESD", itemList)) + + thistSvc = CompFactory.THistSvc() + thistSvc.Output += ["HIST1 DATAFILE='TrackSeedPerformanceWriter.root' OPT='RECREATE'"] + acc.addService(thistSvc) + return acc diff --git a/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/python/__init__.py b/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/src/TrackSeedPerformanceWriter.cxx b/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/src/TrackSeedPerformanceWriter.cxx new file mode 100644 index 0000000000000000000000000000000000000000..9c43a4e2a1bffc98af9c2fdd49385ddfaac0f2f9 --- /dev/null +++ b/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/src/TrackSeedPerformanceWriter.cxx @@ -0,0 +1,124 @@ +#include "TrackSeedPerformanceWriter.h" +#include "TrackerRIO_OnTrack/FaserSCT_ClusterOnTrack.h" +#include "TrackerPrepRawData/FaserSCT_Cluster.h" +#include "TrackerIdentifier/FaserSCT_ID.h" + + +namespace Tracker { + + TrackSeedPerformanceWriter::TrackSeedPerformanceWriter(const std::string &name, ISvcLocator *pSvcLocator) + : AthReentrantAlgorithm(name, pSvcLocator), AthHistogramming(name), + m_idHelper(nullptr), m_histSvc("THistSvc/THistSvc", name) {} + + + StatusCode TrackSeedPerformanceWriter::initialize() { + ATH_CHECK(m_trackCollectionKey.initialize()); + ATH_CHECK(m_simDataCollectionKey.initialize()); + ATH_CHECK(detStore()->retrieve(m_idHelper, "FaserSCT_ID")); + + m_tree = new TTree("tree", "tree"); + m_tree->Branch("run", &m_run, "run/I"); + m_tree->Branch("event", &m_event, "event/I"); + m_tree->Branch("station", &m_station, "station/I"); + m_tree->Branch("chi2", &m_chi2, "chi2/D"); + m_tree->Branch("dof", &m_dof, "dof/I"); + m_tree->Branch("nHits", &m_nHits, "nHits/I"); + m_tree->Branch("x", &m_x, "x/D"); + m_tree->Branch("y", &m_y, "y/D"); + m_tree->Branch("z", &m_z, "z/D"); + m_tree->Branch("px", &m_px, "px/D"); + m_tree->Branch("py", &m_py, "py/D"); + m_tree->Branch("pz", &m_pz, "pz/D"); + // m_tree->Branch("barcode", &m_barcode, "barcode/I"); + m_tree->Branch("barcodes", &m_barcodes); + m_tree->Branch("nMajorityHits", &m_nMajorityHits, "nMajorityHits/I"); + m_tree->Branch("nMajorityParticle", &m_majorityParticle, "nMajorityParticle/I"); + // m_tree->Branch("run", &m_run); + // m_tree->Branch("event", &m_event); + // m_tree->Branch("station", &m_station); + // m_tree->Branch("barcodes", &m_barcodes); + ATH_CHECK(histSvc()->regTree("/HIST1/TrackSeedPerformance", m_tree)); + + return StatusCode::SUCCESS; + } + + + StatusCode TrackSeedPerformanceWriter::execute(const EventContext &ctx) const { + m_run = ctx.eventID().run_number(); + m_event = ctx.eventID().event_number(); + + SG::ReadHandle<TrackCollection> trackCollection{m_trackCollectionKey, ctx}; + ATH_CHECK(trackCollection.isValid()); + + SG::ReadHandle<TrackerSimDataCollection> simDataCollection {m_simDataCollectionKey, ctx}; + ATH_CHECK(simDataCollection.isValid()); + + for (const Trk::Track *track: *trackCollection) { + m_chi2 = track->fitQuality()->chiSquared(); + m_dof = track->fitQuality()->numberDoF(); + const Amg::Vector3D trackPosition = track->trackParameters()->front()->position(); + const Amg::Vector3D trackMomentum = track->trackParameters()->front()->momentum(); + m_x = trackPosition.x(); + m_y = trackPosition.y(); + m_z = trackPosition.z(); + m_px = trackMomentum.x(); + m_py = trackMomentum.y(); + m_pz = trackMomentum.z(); + m_nHits = track->measurementsOnTrack()->size(); + m_barcodes = {}; + m_hitCounts = {}; + for (const auto meas: *track->measurementsOnTrack()) { + const auto *clusterOnTrack = dynamic_cast<const Tracker::FaserSCT_ClusterOnTrack *>(meas); + if (clusterOnTrack) { + const Tracker::FaserSCT_Cluster *cluster = clusterOnTrack->prepRawData(); + Identifier id = cluster->identify(); + m_barcode = matchHit(id, simDataCollection.get()); + // fill hit map + if (m_hitCounts.count(m_barcode) > 0) { + m_hitCounts[m_barcode] += 1; + } else { + m_hitCounts[m_barcode] = 1; + } + m_barcodes.push_back(m_barcode); + m_station = m_idHelper->station(id); + } + } + + // find majority particle + m_nMajorityHits = 0; + m_majorityParticle = 0; + for (const auto& hit : m_hitCounts) { + if (hit.second > m_nMajorityHits) { + m_majorityParticle = hit.first; + m_nMajorityHits = hit.second; + } + } + m_tree->Fill(); + } + + return StatusCode::SUCCESS; + } + + + StatusCode TrackSeedPerformanceWriter::finalize() { + return StatusCode::SUCCESS; + } + + + int TrackSeedPerformanceWriter::matchHit( + Identifier id, const TrackerSimDataCollection *simDataCollection) const { + int barcode = 0; + if (simDataCollection->count(id) != 0) { + const auto& deposits = simDataCollection->find(id)->second.getdeposits(); + float highestDep = 0; + for (const TrackerSimData::Deposit &deposit : deposits) { + if (deposit.second > highestDep) { + highestDep = deposit.second; + barcode = deposit.first->barcode(); + } + } + } + return barcode; + } + +} // namespace Tracker diff --git a/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/src/TrackSeedPerformanceWriter.h b/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/src/TrackSeedPerformanceWriter.h new file mode 100644 index 0000000000000000000000000000000000000000..86192d77c69193ee86b41a2123e42819d27051cc --- /dev/null +++ b/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/src/TrackSeedPerformanceWriter.h @@ -0,0 +1,61 @@ +#ifndef FASERACTSKALMANFILTER_TRACKSEEDPERFORMANCEWRITER_H +#define FASERACTSKALMANFILTER_TRACKSEEDPERFORMANCEWRITER_H + + +#include "AthenaBaseComps/AthReentrantAlgorithm.h" +#include "AthenaBaseComps/AthHistogramming.h" +#include "TrkTrack/TrackCollection.h" +#include "TrackerSimData/TrackerSimDataCollection.h" + +class TTree; +class FaserSCT_ID; + + +namespace Tracker { + class TrackSeedPerformanceWriter : public AthReentrantAlgorithm, AthHistogramming { + public: + TrackSeedPerformanceWriter(const std::string &name, ISvcLocator *pSvcLocator); + virtual ~TrackSeedPerformanceWriter() = default; + virtual StatusCode initialize() override; + virtual StatusCode execute(const EventContext &ctx) const override; + virtual StatusCode finalize() override; + const ServiceHandle<ITHistSvc> &histSvc() const; + + private: + int matchHit(Identifier id, const TrackerSimDataCollection *simDataCollection) const; + SG::ReadHandleKey<TrackCollection> m_trackCollectionKey { + this, "TrackCollection", "SegmentFit", "Input track collection name"}; + SG::ReadHandleKey<TrackerSimDataCollection> m_simDataCollectionKey { + this, "TrackerSimDataCollection", "SCT_SDO_Map"}; + ServiceHandle<ITHistSvc> m_histSvc; + const FaserSCT_ID *m_idHelper; + mutable TTree *m_tree; + + mutable unsigned int m_run; + mutable unsigned int m_event; + mutable unsigned int m_station; + mutable double m_chi2; + mutable int m_dof; + mutable int m_nHits; + mutable double m_x; + mutable double m_y; + mutable double m_z; + mutable double m_px; + mutable double m_py; + mutable double m_pz; + mutable int m_barcode; + mutable int m_majorityParticle; + mutable int m_nMajorityHits; + mutable std::vector<int> m_barcodes; + mutable std::map<int, int> m_hitCounts; + }; + + + inline const ServiceHandle<ITHistSvc> &TrackSeedPerformanceWriter::histSvc() const { + return m_histSvc; + } + +} // namespace Tracker + + +#endif // FASERACTSKALMANFILTER_TRACKSEEDPERFORMANCEWRITER_H diff --git a/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/src/components/TrackSeedPerformanceWriter_entries.cxx b/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/src/components/TrackSeedPerformanceWriter_entries.cxx new file mode 100644 index 0000000000000000000000000000000000000000..9e569f003a45ef87af33da385a561d3b6a77c95a --- /dev/null +++ b/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/src/components/TrackSeedPerformanceWriter_entries.cxx @@ -0,0 +1,3 @@ +#include "../TrackSeedPerformanceWriter.h" + +DECLARE_COMPONENT(Tracker::TrackSeedPerformanceWriter) diff --git a/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/test/TrackSeedPerformanceWriterDbg.py b/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/test/TrackSeedPerformanceWriterDbg.py new file mode 100644 index 0000000000000000000000000000000000000000..b00b9a623561d8b7ef3b57bc0f7bca7e824699c0 --- /dev/null +++ b/Tracker/TrackerRecAlgs/TrackSeedPerformanceWriter/test/TrackSeedPerformanceWriterDbg.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +""" +Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration +""" + +import sys +from AthenaCommon.Logging import log, logging +from AthenaCommon.Constants import DEBUG, VERBOSE, INFO +from AthenaCommon.Configurable import Configurable +from CalypsoConfiguration.AllConfigFlags import ConfigFlags +from AthenaConfiguration.TestDefaults import defaultTestFiles +from CalypsoConfiguration.MainServicesConfig import MainServicesCfg +from AthenaPoolCnvSvc.PoolReadConfig import PoolReadCfg +from AthenaPoolCnvSvc.PoolWriteConfig import PoolWriteCfg +from TrackerPrepRawDataFormation.TrackerPrepRawDataFormationConfig import FaserSCT_ClusterizationCfg +from TrackerSegmentFit.TrackerSegmentFitConfig import SegmentFitAlgCfg +from TrackSeedPerformanceWriter.TrackSeedPerformanceWriterConfig import TrackSeedPerformanceWriterCfg + +log.setLevel(DEBUG) +Configurable.configurableRun3Behavior = True + +ConfigFlags.Input.Files = ['my.RDO.pool.root'] +ConfigFlags.Output.ESDFileName = "seeds.ESD.pool.root" +ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-01" +ConfigFlags.IOVDb.DatabaseInstance = "OFLP200" +ConfigFlags.Input.ProjectName = "data21" +ConfigFlags.Input.isMC = True +ConfigFlags.GeoModel.FaserVersion = "FASER-01" +ConfigFlags.Common.isOnline = False +ConfigFlags.GeoModel.Align.Dynamic = False +ConfigFlags.Beam.NumberOfCollisions = 0. +ConfigFlags.lock() + +acc = MainServicesCfg(ConfigFlags) +acc.merge(PoolReadCfg(ConfigFlags)) +acc.merge(PoolWriteCfg(ConfigFlags)) +acc.merge(FaserSCT_ClusterizationCfg(ConfigFlags)) +acc.merge(SegmentFitAlgCfg(ConfigFlags, MaxClusters=20, TanThetaCut=0.1)) +acc.merge(TrackSeedPerformanceWriterCfg(ConfigFlags)) +#acc.getEventAlgo("Tracker::SegmentFitAlg").OutputLevel = DEBUG +acc.getEventAlgo("Tracker::TrackSeedPerformanceWriter").OutputLevel = DEBUG + +sc = acc.run(maxEvents=1000) +sys.exit(not sc.isSuccess()) diff --git a/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/CombinatorialKalmanFilterAlg.h b/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/CombinatorialKalmanFilterAlg.h index b6f33a1953cc834b4cfced4a5d3aaa4eaaf729bd..05cd0db8593b9ab4273cb74d003d64b2269b88b8 100644 --- a/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/CombinatorialKalmanFilterAlg.h +++ b/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/CombinatorialKalmanFilterAlg.h @@ -112,9 +112,11 @@ public: Gaudi::Property<bool> m_resolvePassive {this, "resolvePassive", false}; Gaudi::Property<bool> m_resolveMaterial {this, "resolveMaterial", true}; Gaudi::Property<bool> m_resolveSensitive {this, "resolveSensitive", true}; - Gaudi::Property<double> m_maxSteps {this, "maxSteps", 10000}; + Gaudi::Property<bool> m_noDiagnostics {this, "noDiagnostics", true, "Set ACTS logging level to INFO and do not run performance writer, states writer or summary writer"}; + Gaudi::Property<double> m_maxSteps {this, "maxSteps", 1000}; Gaudi::Property<double> m_chi2Max {this, "chi2Max", 15}; Gaudi::Property<unsigned long> m_nMax {this, "nMax", 10}; + SG::ReadCondHandleKey<FaserFieldCacheCondObj> m_fieldCondObjInputKey {this, "FaserFieldCacheCondObj", "fieldCondObj", "Name of the Magnetic Field conditions object key"}; ToolHandle<ITrackSeedTool> m_trackSeedTool {this, "TrackSeed", "ClusterTrackSeedTool"}; ToolHandle<IFaserActsTrackingGeometryTool> m_trackingGeometryTool {this, "TrackingGeometryTool", "FaserActsTrackingGeometryTool"}; diff --git a/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/MyAmbiguitySolver.h b/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/MyAmbiguitySolver.h new file mode 100644 index 0000000000000000000000000000000000000000..47257befa2fcbd70da95d1ee88bd41d0fdabeb2a --- /dev/null +++ b/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/MyAmbiguitySolver.h @@ -0,0 +1,80 @@ +#ifndef FASERACTSKALMANFILTER_AMBIGUITYSOLVER_H +#define FASERACTSKALMANFILTER_AMBIGUITYSOLVER_H + +#include "Acts/TrackFinding/CombinatorialKalmanFilter.hpp" +#include "FaserActsKalmanFilter/FaserActsRecMultiTrajectory.h" + +using CombinatorialKalmanFilterResult = Acts::CombinatorialKalmanFilterResult<IndexSourceLink>; +using TrackFitterResult = Acts::Result<CombinatorialKalmanFilterResult>; +using TrackFinderResult = std::vector<TrackFitterResult>; + + +size_t numberMeasurements(const CombinatorialKalmanFilterResult& ckfResult) { + auto traj = FaserActsRecMultiTrajectory(ckfResult.fittedStates, ckfResult.lastMeasurementIndices, ckfResult.fittedParameters); + const auto& mj = traj.multiTrajectory(); + const auto& trackTips = traj.tips(); + size_t maxMeasurements = 0; + for (const auto& trackTip : trackTips) { + auto trajState = Acts::MultiTrajectoryHelpers::trajectoryState(mj, trackTip); + size_t nMeasurements = trajState.nMeasurements; + if (nMeasurements > maxMeasurements) { + maxMeasurements = nMeasurements; + } + std::cout << "# measurements: " << trajState.nMeasurements << std::endl; + } + return maxMeasurements; +} + +int countSharedHits(const CombinatorialKalmanFilterResult& result1, const CombinatorialKalmanFilterResult& result2) { + int count = 0; + std::vector<size_t> hitIndices {}; + + for (auto measIndex : result1.lastMeasurementIndices) { + result1.fittedStates.visitBackwards(measIndex, [&](const auto& state) { + if (not state.typeFlags().test(Acts::TrackStateFlag::MeasurementFlag)) + return; + size_t hitIndex = state.uncalibrated().index(); + hitIndices.emplace_back(hitIndex); + }); + } + + for (auto measIndex : result2.lastMeasurementIndices) { + result2.fittedStates.visitBackwards(measIndex, [&](const auto& state) { + if (not state.typeFlags().test(Acts::TrackStateFlag::MeasurementFlag)) + return; + size_t hitIndex = state.uncalibrated().index(); + if (std::find(hitIndices.begin(), hitIndices.end(), hitIndex) != hitIndices.end()) { + count += 1; + } + }); + } + return count; +} + + +std::pair<int, int> solveAmbiguity(TrackFinderResult& results, size_t minMeasurements = 13) { + std::map<std::pair<size_t, size_t>, size_t> trackPairs {}; + for (size_t i = 0; i < results.size(); ++i) { + // if (not results.at(i).ok()) continue; + // if (numberMeasurements(results.at(i).value()) < minMeasurements) continue; + for (size_t j = i+1; j < results.size(); ++j) { + // if (not results.at(j).ok()) continue; + // if (numberMeasurements(results.at(j).value()) < minMeasurements) continue; + int n = countSharedHits(results.at(i).value(), results.at(j).value()); + trackPairs[std::make_pair(i, j)] = n; + } + } + + std::pair<size_t, size_t> bestTrackPair; + size_t minSharedHits = std::numeric_limits<size_t>::max(); + for (const auto& trackPair : trackPairs) { + if (trackPair.second < minSharedHits) { + minSharedHits = trackPair.second; + bestTrackPair = trackPair.first; + } + } + + return bestTrackPair; +} + +#endif //FASERACTSKALMANFILTER_AMBIGUITYSOLVER_H diff --git a/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/PerformanceWriterTool.h b/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/PerformanceWriterTool.h index 037a7da407d4c09d2e4cd20cba31731af455f52d..198e4515988226e71e76c07b1a812ca00e64c641 100644 --- a/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/PerformanceWriterTool.h +++ b/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/PerformanceWriterTool.h @@ -33,6 +33,7 @@ private: this, "McEventCollection", "BeamTruthEvent"}; ToolHandle<IFaserActsExtrapolationTool> m_extrapolationTool { this, "ExtrapolationTool", "FaserActsExtrapolationTool"}; + Gaudi::Property<bool> m_noDiagnostics {this, "noDiagnostics", true, "Set ACTS logging level to INFO and do not run performance writer, states writer or summary writer"}; Gaudi::Property<std::string> m_filePath{this, "FilePath", "performance_ckf.root"}; TFile* m_outputFile{nullptr}; diff --git a/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/RootTrajectoryStatesWriterTool.h b/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/RootTrajectoryStatesWriterTool.h index 60ed86e7d4902e862b0d538d63088b6a39a731d6..ee8ef5d25a9f808d4ed8ae87787b437188969b49 100644 --- a/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/RootTrajectoryStatesWriterTool.h +++ b/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/RootTrajectoryStatesWriterTool.h @@ -40,6 +40,7 @@ private: const FaserSCT_ID* m_idHelper{nullptr}; const TrackerDD::SCT_DetectorManager* m_detMgr {nullptr}; + Gaudi::Property<bool> m_noDiagnostics {this, "noDiagnostics", true, "Set ACTS logging level to INFO and do not run performance writer, states writer or summary writer"}; Gaudi::Property<std::string> m_filePath {this, "FilePath", "track_states_ckf.root", "Output root file"}; Gaudi::Property<std::string> m_treeName {this, "TreeName", "tree", "Tree name"}; Gaudi::Property<bool> m_mc {this, "MC", false}; @@ -127,4 +128,3 @@ private: }; #endif // FASERACTSKALMANFILTER_ROOTTRAJECTORYSTATESWRITERTOOL_H - diff --git a/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/RootTrajectorySummaryWriterTool.h b/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/RootTrajectorySummaryWriterTool.h index 2ce72c2429164d9ff8af8e77cf95aaac36d8df26..33b5ee882b8a5f4358cb8b65203704f4bbd9c9a3 100644 --- a/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/RootTrajectorySummaryWriterTool.h +++ b/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/RootTrajectorySummaryWriterTool.h @@ -40,6 +40,7 @@ private: ToolHandle<IFaserActsExtrapolationTool> m_extrapolationTool { this, "ExtrapolationTool", "FaserActsExtrapolationTool"}; + Gaudi::Property<bool> m_noDiagnostics {this, "noDiagnostics", true, "Set ACTS logging level to INFO and do not run performance writer, states writer or summary writer"}; Gaudi::Property<std::string> m_filePath{this, "FilePath", "track_summary_ckf.root", "Output root file"}; Gaudi::Property<std::string> m_treeName{this, "TreeName", "tree", "Tree name"}; diff --git a/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/ThreeStationTrackSeedTool.h b/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/ThreeStationTrackSeedTool.h index 15d20cdfe73007d410cf8966cd1cc843127061b0..3cf450414f20733b873ceda542f7c4ab44cdd573 100644 --- a/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/ThreeStationTrackSeedTool.h +++ b/Tracking/Acts/FaserActsKalmanFilter/FaserActsKalmanFilter/ThreeStationTrackSeedTool.h @@ -81,7 +81,7 @@ private: Gaudi::Property<double> m_origin {this, "origin", 0, "z position of the reference surface"}; static Acts::CurvilinearTrackParameters get_params( - const Amg::Vector3D& position_st1, const Amg::Vector3D& position_st2, const Amg::Vector3D& position_st3, const Acts::BoundSymMatrix& cov, double origin) ; + const Amg::Vector3D& position_st1, const Amg::Vector3D& position_st2, const Amg::Vector3D& position_st3, const Acts::BoundSymMatrix& cov, double origin); static std::pair<double, double> momentum(const std::map<int, Amg::Vector3D>& pos, double B=0.57); }; @@ -126,4 +126,3 @@ ThreeStationTrackSeedTool::spacePoints() const { } #endif // FASERACTSKALMANFILTER_THREESTATIONTRACKSEEDTOOL_H - diff --git a/Tracking/Acts/FaserActsKalmanFilter/python/CombinatorialKalmanFilterConfig.py b/Tracking/Acts/FaserActsKalmanFilter/python/CombinatorialKalmanFilterConfig.py index 6c76a8a5fcfd426a78348887b2c285f59ea6989d..b3aa4a82b7b68537b42f8362f160b46fd97d619a 100644 --- a/Tracking/Acts/FaserActsKalmanFilter/python/CombinatorialKalmanFilterConfig.py +++ b/Tracking/Acts/FaserActsKalmanFilter/python/CombinatorialKalmanFilterConfig.py @@ -58,19 +58,23 @@ def CombinatorialKalmanFilterCfg(flags, **kwargs): track_seed_writer_tool.FilePath = "TrackSeeds.root" trajectory_states_writer_tool = CompFactory.RootTrajectoryStatesWriterTool() + trajectory_states_writer_tool.noDiagnostics = kwargs["noDiagnostics"] trajectory_states_writer_tool.MC = True trajectory_summary_writer_tool = CompFactory.RootTrajectorySummaryWriterTool() + trajectory_summary_writer_tool .noDiagnostics = kwargs["noDiagnostics"] actsExtrapolationTool = CompFactory.FaserActsExtrapolationTool("FaserActsExtrapolationTool") actsExtrapolationTool.MaxSteps = 1000 actsExtrapolationTool.TrackingGeometryTool = CompFactory.FaserActsTrackingGeometryTool("TrackingGeometryTool") performance_writer_tool = CompFactory.PerformanceWriterTool("PerformanceWriterTool") + performance_writer_tool.noDiagnostics = kwargs["noDiagnostics"] performance_writer_tool.ExtrapolationTool = actsExtrapolationTool ckf = CompFactory.CombinatorialKalmanFilterAlg(**kwargs) ckf.TrackSeed = track_seed_tool ckf.ActsLogging = "INFO" + ckf.noDiagnostics = kwargs["noDiagnostics"] ckf.RootTrajectoryStatesWriterTool = trajectory_states_writer_tool ckf.RootTrajectorySummaryWriterTool = trajectory_summary_writer_tool ckf.PerformanceWriterTool = performance_writer_tool diff --git a/Tracking/Acts/FaserActsKalmanFilter/src/CombinatorialKalmanFilterAlg.cxx b/Tracking/Acts/FaserActsKalmanFilter/src/CombinatorialKalmanFilterAlg.cxx index ee04249f024e2c17eea42704be16b365cc38e409..2cacbeda0e1113ac7b6e146c4b56ae43b007e467 100644 --- a/Tracking/Acts/FaserActsKalmanFilter/src/CombinatorialKalmanFilterAlg.cxx +++ b/Tracking/Acts/FaserActsKalmanFilter/src/CombinatorialKalmanFilterAlg.cxx @@ -51,10 +51,10 @@ StatusCode CombinatorialKalmanFilterAlg::initialize() { ATH_CHECK(detStore()->retrieve(m_idHelper,"FaserSCT_ID")); m_fit = makeTrackFinderFunction(m_trackingGeometryTool->trackingGeometry(), m_resolvePassive, m_resolveMaterial, m_resolveSensitive); - // FIXME fix Acts logging level - if (m_actsLogging == "VERBOSE") { + if (m_actsLogging == "VERBOSE" && !m_noDiagnostics) { m_logger = Acts::getDefaultLogger("KalmanFitter", Acts::Logging::VERBOSE); - } else if (m_actsLogging == "DEBUG") { + } else if (m_actsLogging == "DEBUG" && !m_noDiagnostics) { + m_logger = Acts::getDefaultLogger("KalmanFitter", Acts::Logging::DEBUG); } else { m_logger = Acts::getDefaultLogger("KalmanFitter", Acts::Logging::INFO); } @@ -65,7 +65,7 @@ StatusCode CombinatorialKalmanFilterAlg::initialize() { StatusCode CombinatorialKalmanFilterAlg::execute() { const EventContext& ctx = Gaudi::Hive::currentContext(); - ++m_numberOfEvents; + m_numberOfEvents++; ATH_CHECK(m_trackCollection.initialize()); SG::WriteHandle<TrackCollection> trackContainer{m_trackCollection,ctx}; @@ -173,15 +173,14 @@ StatusCode CombinatorialKalmanFilterAlg::execute() { } // run the performance writer - if (m_statesWriter) { - ATH_CHECK(m_trajectoryStatesWriterTool->write(geoctx, selectedTrajectories)); + if (m_statesWriter && !m_noDiagnostics) { + ATH_CHECK(m_trajectoryStatesWriterTool->write(geoctx, trajectories)); } - if (m_summaryWriter) { - ATH_CHECK(m_trajectorySummaryWriterTool->write(geoctx, selectedTrajectories)); + if (m_summaryWriter && !m_noDiagnostics) { + ATH_CHECK(m_trajectorySummaryWriterTool->write(geoctx, trajectories)); } - if (m_performanceWriter) { - ATH_MSG_DEBUG("?? performance writer tool"); - ATH_CHECK(m_performanceWriterTool->write(geoctx, selectedTrajectories)); + if (m_performanceWriter && !m_noDiagnostics) { + ATH_CHECK(m_performanceWriterTool->write(geoctx, trajectories)); } ATH_CHECK(trackContainer.record(std::move(outputTracks))); @@ -190,6 +189,11 @@ StatusCode CombinatorialKalmanFilterAlg::execute() { StatusCode CombinatorialKalmanFilterAlg::finalize() { + ATH_MSG_INFO("CombinatorialKalmanFilterAlg::finalize()"); + ATH_MSG_INFO(m_numberOfEvents << " events processed."); + ATH_MSG_INFO(m_numberOfTrackSeeds << " seeds."); + ATH_MSG_INFO(m_numberOfFittedTracks << " fitted tracks."); + ATH_MSG_INFO(m_numberOfSelectedTracks << " good fitted tracks."); return StatusCode::SUCCESS; } diff --git a/Tracking/Acts/FaserActsKalmanFilter/src/MyAmbiguitySolver.cxx b/Tracking/Acts/FaserActsKalmanFilter/src/MyAmbiguitySolver.cxx new file mode 100644 index 0000000000000000000000000000000000000000..998269a83cf6228092dd3b31987696f929820e4e --- /dev/null +++ b/Tracking/Acts/FaserActsKalmanFilter/src/MyAmbiguitySolver.cxx @@ -0,0 +1,3 @@ +#include "FaserActsKalmanFilter/MyAmbiguitySolver.h" + + diff --git a/Tracking/Acts/FaserActsKalmanFilter/src/PerformanceWriterTool.cxx b/Tracking/Acts/FaserActsKalmanFilter/src/PerformanceWriterTool.cxx index 3f99bea98466b59a4ccaa12c898c1f9660ca27bc..b01d58ed37c095c451c3f453cbbcc4f261945cec 100644 --- a/Tracking/Acts/FaserActsKalmanFilter/src/PerformanceWriterTool.cxx +++ b/Tracking/Acts/FaserActsKalmanFilter/src/PerformanceWriterTool.cxx @@ -11,43 +11,46 @@ PerformanceWriterTool::PerformanceWriterTool( StatusCode PerformanceWriterTool::initialize() { - ATH_CHECK(m_extrapolationTool.retrieve()); - ATH_CHECK(m_mcEventCollectionKey.initialize()); - ATH_CHECK(m_simDataCollectionKey.initialize()); - - std::string filePath = m_filePath; - m_outputFile = TFile::Open(filePath.c_str(), "RECREATE"); - if (m_outputFile == nullptr) { - ATH_MSG_WARNING("Unable to open output file at " << m_filePath); - return StatusCode::RECOVERABLE; - } + if (!m_noDiagnostics) { + ATH_CHECK(m_extrapolationTool.retrieve()); + ATH_CHECK(m_mcEventCollectionKey.initialize()); + ATH_CHECK(m_simDataCollectionKey.initialize()); + + std::string filePath = m_filePath; + m_outputFile = TFile::Open(filePath.c_str(), "RECREATE"); + if (m_outputFile == nullptr) { + ATH_MSG_WARNING("Unable to open output file at " << m_filePath); + return StatusCode::RECOVERABLE; + } - // initialize the residual and efficiency plots tool - m_resPlotTool.book(m_resPlotCache); - m_effPlotTool.book(m_effPlotCache); - m_summaryPlotTool.book(m_summaryPlotCache); + // initialize the residual and efficiency plots tool + m_resPlotTool.book(m_resPlotCache); + m_effPlotTool.book(m_effPlotCache); + m_summaryPlotTool.book(m_summaryPlotCache); + } return StatusCode::SUCCESS; } StatusCode PerformanceWriterTool::finalize() { - // fill residual and pull details into additional hists - m_resPlotTool.refinement(m_resPlotCache); - if (m_outputFile) { - m_outputFile->cd(); - m_resPlotTool.write(m_resPlotCache); - m_effPlotTool.write(m_effPlotCache); - m_summaryPlotTool.write(m_summaryPlotCache); - ATH_MSG_VERBOSE("Wrote performance plots to '" << m_outputFile->GetPath() << "'"); - } + if (!m_noDiagnostics) { + // fill residual and pull details into additional hists + m_resPlotTool.refinement(m_resPlotCache); + if (m_outputFile) { + m_outputFile->cd(); + m_resPlotTool.write(m_resPlotCache); + m_effPlotTool.write(m_effPlotCache); + m_summaryPlotTool.write(m_summaryPlotCache); + ATH_MSG_VERBOSE("Wrote performance plots to '" << m_outputFile->GetPath() << "'"); + } - m_resPlotTool.clear(m_resPlotCache); - m_effPlotTool.clear(m_effPlotCache); - m_summaryPlotTool.clear(m_summaryPlotCache); - if (m_outputFile) { - m_outputFile->Close(); + m_resPlotTool.clear(m_resPlotCache); + m_effPlotTool.clear(m_effPlotCache); + m_summaryPlotTool.clear(m_summaryPlotCache); + if (m_outputFile) { + m_outputFile->Close(); + } } - return StatusCode::SUCCESS; } diff --git a/Tracking/Acts/FaserActsKalmanFilter/src/RootTrajectoryStatesWriterTool.cxx b/Tracking/Acts/FaserActsKalmanFilter/src/RootTrajectoryStatesWriterTool.cxx index e81f46ff6d2d87c96b9d86fba97982bbf01dc4cb..b857ce4788a0a8b51ad8bcc8080761bb732adac9 100644 --- a/Tracking/Acts/FaserActsKalmanFilter/src/RootTrajectoryStatesWriterTool.cxx +++ b/Tracking/Acts/FaserActsKalmanFilter/src/RootTrajectoryStatesWriterTool.cxx @@ -23,184 +23,187 @@ RootTrajectoryStatesWriterTool::RootTrajectoryStatesWriterTool( : AthAlgTool(type, name, parent) {} StatusCode RootTrajectoryStatesWriterTool::initialize() { - ATH_CHECK(m_mcEventCollectionKey.initialize()); - ATH_CHECK(m_simDataCollectionKey.initialize()); - ATH_CHECK(m_faserSiHitKey.initialize()); - ATH_CHECK(detStore()->retrieve(m_idHelper, "FaserSCT_ID")); - ATH_CHECK(detStore()->retrieve(m_detMgr, "SCT")); - - std::string filePath = m_filePath; - std::string treeName = m_treeName; - m_outputFile = TFile::Open(filePath.c_str(), "RECREATE"); - if (m_outputFile == nullptr) { - ATH_MSG_ERROR("Unable to open output file at " << m_filePath); - return StatusCode::FAILURE; - } - m_outputFile->cd(); - m_outputTree = new TTree(treeName.c_str(), treeName.c_str()); - if (m_outputTree == nullptr) { - ATH_MSG_ERROR("Unable to create TTree"); - return StatusCode::FAILURE; - } - - m_outputTree = new TTree("tree", "tree"); - - m_outputTree->Branch("event_nr", &m_eventNr); - m_outputTree->Branch("multiTraj_nr", &m_multiTrajNr); - m_outputTree->Branch("subTraj_nr", &m_subTrajNr); - - m_outputTree->Branch("t_x", &m_t_x); - m_outputTree->Branch("t_y", &m_t_y); - m_outputTree->Branch("t_z", &m_t_z); - m_outputTree->Branch("t_dx", &m_t_dx); - m_outputTree->Branch("t_dy", &m_t_dy); - m_outputTree->Branch("t_dz", &m_t_dz); - m_outputTree->Branch("t_eLOC0", &m_t_eLOC0); - m_outputTree->Branch("t_eLOC1", &m_t_eLOC1); - m_outputTree->Branch("t_ePHI", &m_t_ePHI); - m_outputTree->Branch("t_eTHETA", &m_t_eTHETA); - m_outputTree->Branch("t_eQOP", &m_t_eQOP); - m_outputTree->Branch("t_eT", &m_t_eT); - - m_outputTree->Branch("nStates", &m_nStates); - m_outputTree->Branch("nMeasurements", &m_nMeasurements); - m_outputTree->Branch("volume_id", &m_volumeID); - m_outputTree->Branch("layer_id", &m_layerID); - m_outputTree->Branch("module_id", &m_moduleID); - m_outputTree->Branch("station", &m_station); - m_outputTree->Branch("layer", &m_layer); - m_outputTree->Branch("phi_module", &m_phi_module); - m_outputTree->Branch("eta_module", &m_eta_module); - m_outputTree->Branch("side", &m_side); - m_outputTree->Branch("pathLength", &m_pathLength); - m_outputTree->Branch("l_x_hit", &m_lx_hit); - m_outputTree->Branch("l_y_hit", &m_ly_hit); - m_outputTree->Branch("g_x_hit", &m_x_hit); - m_outputTree->Branch("g_y_hit", &m_y_hit); - m_outputTree->Branch("g_z_hit", &m_z_hit); - m_outputTree->Branch("res_x_hit", &m_res_x_hit); - m_outputTree->Branch("res_y_hit", &m_res_y_hit); - m_outputTree->Branch("err_x_hit", &m_err_x_hit); - m_outputTree->Branch("err_y_hit", &m_err_y_hit); - m_outputTree->Branch("pull_x_hit", &m_pull_x_hit); - m_outputTree->Branch("pull_y_hit", &m_pull_y_hit); - m_outputTree->Branch("dim_hit", &m_dim_hit); - - m_outputTree->Branch("nPredicted", &m_nParams[0]); - m_outputTree->Branch("predicted", &m_hasParams[0]); - m_outputTree->Branch("eLOC0_prt", &m_eLOC0[0]); - m_outputTree->Branch("eLOC1_prt", &m_eLOC1[0]); - m_outputTree->Branch("ePHI_prt", &m_ePHI[0]); - m_outputTree->Branch("eTHETA_prt", &m_eTHETA[0]); - m_outputTree->Branch("eQOP_prt", &m_eQOP[0]); - m_outputTree->Branch("eT_prt", &m_eT[0]); - m_outputTree->Branch("res_eLOC0_prt", &m_res_eLOC0[0]); - m_outputTree->Branch("res_eLOC1_prt", &m_res_eLOC1[0]); - m_outputTree->Branch("res_ePHI_prt", &m_res_ePHI[0]); - m_outputTree->Branch("res_eTHETA_prt", &m_res_eTHETA[0]); - m_outputTree->Branch("res_eQOP_prt", &m_res_eQOP[0]); - m_outputTree->Branch("res_eT_prt", &m_res_eT[0]); - m_outputTree->Branch("err_eLOC0_prt", &m_err_eLOC0[0]); - m_outputTree->Branch("err_eLOC1_prt", &m_err_eLOC1[0]); - m_outputTree->Branch("err_ePHI_prt", &m_err_ePHI[0]); - m_outputTree->Branch("err_eTHETA_prt", &m_err_eTHETA[0]); - m_outputTree->Branch("err_eQOP_prt", &m_err_eQOP[0]); - m_outputTree->Branch("err_eT_prt", &m_err_eT[0]); - m_outputTree->Branch("pull_eLOC0_prt", &m_pull_eLOC0[0]); - m_outputTree->Branch("pull_eLOC1_prt", &m_pull_eLOC1[0]); - m_outputTree->Branch("pull_ePHI_prt", &m_pull_ePHI[0]); - m_outputTree->Branch("pull_eTHETA_prt", &m_pull_eTHETA[0]); - m_outputTree->Branch("pull_eQOP_prt", &m_pull_eQOP[0]); - m_outputTree->Branch("pull_eT_prt", &m_pull_eT[0]); - m_outputTree->Branch("g_x_prt", &m_x[0]); - m_outputTree->Branch("g_y_prt", &m_y[0]); - m_outputTree->Branch("g_z_prt", &m_z[0]); - m_outputTree->Branch("px_prt", &m_px[0]); - m_outputTree->Branch("py_prt", &m_py[0]); - m_outputTree->Branch("pz_prt", &m_pz[0]); - m_outputTree->Branch("eta_prt", &m_eta[0]); - m_outputTree->Branch("pT_prt", &m_pT[0]); - - m_outputTree->Branch("nFiltered", &m_nParams[1]); - m_outputTree->Branch("filtered", &m_hasParams[1]); - m_outputTree->Branch("eLOC0_flt", &m_eLOC0[1]); - m_outputTree->Branch("eLOC1_flt", &m_eLOC1[1]); - m_outputTree->Branch("ePHI_flt", &m_ePHI[1]); - m_outputTree->Branch("eTHETA_flt", &m_eTHETA[1]); - m_outputTree->Branch("eQOP_flt", &m_eQOP[1]); - m_outputTree->Branch("eT_flt", &m_eT[1]); - m_outputTree->Branch("res_eLOC0_flt", &m_res_eLOC0[1]); - m_outputTree->Branch("res_eLOC1_flt", &m_res_eLOC1[1]); - m_outputTree->Branch("res_ePHI_flt", &m_res_ePHI[1]); - m_outputTree->Branch("res_eTHETA_flt", &m_res_eTHETA[1]); - m_outputTree->Branch("res_eQOP_flt", &m_res_eQOP[1]); - m_outputTree->Branch("res_eT_flt", &m_res_eT[1]); - m_outputTree->Branch("err_eLOC0_flt", &m_err_eLOC0[1]); - m_outputTree->Branch("err_eLOC1_flt", &m_err_eLOC1[1]); - m_outputTree->Branch("err_ePHI_flt", &m_err_ePHI[1]); - m_outputTree->Branch("err_eTHETA_flt", &m_err_eTHETA[1]); - m_outputTree->Branch("err_eQOP_flt", &m_err_eQOP[1]); - m_outputTree->Branch("err_eT_flt", &m_err_eT[1]); - m_outputTree->Branch("pull_eLOC0_flt", &m_pull_eLOC0[1]); - m_outputTree->Branch("pull_eLOC1_flt", &m_pull_eLOC1[1]); - m_outputTree->Branch("pull_ePHI_flt", &m_pull_ePHI[1]); - m_outputTree->Branch("pull_eTHETA_flt", &m_pull_eTHETA[1]); - m_outputTree->Branch("pull_eQOP_flt", &m_pull_eQOP[1]); - m_outputTree->Branch("pull_eT_flt", &m_pull_eT[1]); - m_outputTree->Branch("g_x_flt", &m_x[1]); - m_outputTree->Branch("g_y_flt", &m_y[1]); - m_outputTree->Branch("g_z_flt", &m_z[1]); - m_outputTree->Branch("px_flt", &m_px[1]); - m_outputTree->Branch("py_flt", &m_py[1]); - m_outputTree->Branch("pz_flt", &m_pz[1]); - m_outputTree->Branch("eta_flt", &m_eta[1]); - m_outputTree->Branch("pT_flt", &m_pT[1]); - - m_outputTree->Branch("nSmoothed", &m_nParams[2]); - m_outputTree->Branch("smoothed", &m_hasParams[2]); - m_outputTree->Branch("eLOC0_smt", &m_eLOC0[2]); - m_outputTree->Branch("eLOC1_smt", &m_eLOC1[2]); - m_outputTree->Branch("ePHI_smt", &m_ePHI[2]); - m_outputTree->Branch("eTHETA_smt", &m_eTHETA[2]); - m_outputTree->Branch("eQOP_smt", &m_eQOP[2]); - m_outputTree->Branch("eT_smt", &m_eT[2]); - m_outputTree->Branch("res_eLOC0_smt", &m_res_eLOC0[2]); - m_outputTree->Branch("res_eLOC1_smt", &m_res_eLOC1[2]); - m_outputTree->Branch("res_ePHI_smt", &m_res_ePHI[2]); - m_outputTree->Branch("res_eTHETA_smt", &m_res_eTHETA[2]); - m_outputTree->Branch("res_eQOP_smt", &m_res_eQOP[2]); - m_outputTree->Branch("res_eT_smt", &m_res_eT[2]); - m_outputTree->Branch("err_eLOC0_smt", &m_err_eLOC0[2]); - m_outputTree->Branch("err_eLOC1_smt", &m_err_eLOC1[2]); - m_outputTree->Branch("err_ePHI_smt", &m_err_ePHI[2]); - m_outputTree->Branch("err_eTHETA_smt", &m_err_eTHETA[2]); - m_outputTree->Branch("err_eQOP_smt", &m_err_eQOP[2]); - m_outputTree->Branch("err_eT_smt", &m_err_eT[2]); - m_outputTree->Branch("pull_eLOC0_smt", &m_pull_eLOC0[2]); - m_outputTree->Branch("pull_eLOC1_smt", &m_pull_eLOC1[2]); - m_outputTree->Branch("pull_ePHI_smt", &m_pull_ePHI[2]); - m_outputTree->Branch("pull_eTHETA_smt", &m_pull_eTHETA[2]); - m_outputTree->Branch("pull_eQOP_smt", &m_pull_eQOP[2]); - m_outputTree->Branch("pull_eT_smt", &m_pull_eT[2]); - m_outputTree->Branch("g_x_smt", &m_x[2]); - m_outputTree->Branch("g_y_smt", &m_y[2]); - m_outputTree->Branch("g_z_smt", &m_z[2]); - m_outputTree->Branch("px_smt", &m_px[2]); - m_outputTree->Branch("py_smt", &m_py[2]); - m_outputTree->Branch("pz_smt", &m_pz[2]); - m_outputTree->Branch("eta_smt", &m_eta[2]); - m_outputTree->Branch("pT_smt", &m_pT[2]); - - m_outputTree->Branch("chi2", &m_chi2); + if (!m_noDiagnostics) { + ATH_CHECK(m_mcEventCollectionKey.initialize()); + ATH_CHECK(m_simDataCollectionKey.initialize()); + ATH_CHECK(m_faserSiHitKey.initialize()); + ATH_CHECK(detStore()->retrieve(m_idHelper, "FaserSCT_ID")); + ATH_CHECK(detStore()->retrieve(m_detMgr, "SCT")); + + std::string filePath = m_filePath; + std::string treeName = m_treeName; + m_outputFile = TFile::Open(filePath.c_str(), "RECREATE"); + if (m_outputFile == nullptr) { + ATH_MSG_ERROR("Unable to open output file at " << m_filePath); + return StatusCode::FAILURE; + } + m_outputFile->cd(); + m_outputTree = new TTree(treeName.c_str(), treeName.c_str()); + if (m_outputTree == nullptr) { + ATH_MSG_ERROR("Unable to create TTree"); + return StatusCode::FAILURE; + } + m_outputTree = new TTree("tree", "tree"); + + m_outputTree->Branch("event_nr", &m_eventNr); + m_outputTree->Branch("multiTraj_nr", &m_multiTrajNr); + m_outputTree->Branch("subTraj_nr", &m_subTrajNr); + + m_outputTree->Branch("t_x", &m_t_x); + m_outputTree->Branch("t_y", &m_t_y); + m_outputTree->Branch("t_z", &m_t_z); + m_outputTree->Branch("t_dx", &m_t_dx); + m_outputTree->Branch("t_dy", &m_t_dy); + m_outputTree->Branch("t_dz", &m_t_dz); + m_outputTree->Branch("t_eLOC0", &m_t_eLOC0); + m_outputTree->Branch("t_eLOC1", &m_t_eLOC1); + m_outputTree->Branch("t_ePHI", &m_t_ePHI); + m_outputTree->Branch("t_eTHETA", &m_t_eTHETA); + m_outputTree->Branch("t_eQOP", &m_t_eQOP); + m_outputTree->Branch("t_eT", &m_t_eT); + + m_outputTree->Branch("nStates", &m_nStates); + m_outputTree->Branch("nMeasurements", &m_nMeasurements); + m_outputTree->Branch("volume_id", &m_volumeID); + m_outputTree->Branch("layer_id", &m_layerID); + m_outputTree->Branch("module_id", &m_moduleID); + m_outputTree->Branch("station", &m_station); + m_outputTree->Branch("layer", &m_layer); + m_outputTree->Branch("phi_module", &m_phi_module); + m_outputTree->Branch("eta_module", &m_eta_module); + m_outputTree->Branch("side", &m_side); + m_outputTree->Branch("pathLength", &m_pathLength); + m_outputTree->Branch("l_x_hit", &m_lx_hit); + m_outputTree->Branch("l_y_hit", &m_ly_hit); + m_outputTree->Branch("g_x_hit", &m_x_hit); + m_outputTree->Branch("g_y_hit", &m_y_hit); + m_outputTree->Branch("g_z_hit", &m_z_hit); + m_outputTree->Branch("res_x_hit", &m_res_x_hit); + m_outputTree->Branch("res_y_hit", &m_res_y_hit); + m_outputTree->Branch("err_x_hit", &m_err_x_hit); + m_outputTree->Branch("err_y_hit", &m_err_y_hit); + m_outputTree->Branch("pull_x_hit", &m_pull_x_hit); + m_outputTree->Branch("pull_y_hit", &m_pull_y_hit); + m_outputTree->Branch("dim_hit", &m_dim_hit); + + m_outputTree->Branch("nPredicted", &m_nParams[0]); + m_outputTree->Branch("predicted", &m_hasParams[0]); + m_outputTree->Branch("eLOC0_prt", &m_eLOC0[0]); + m_outputTree->Branch("eLOC1_prt", &m_eLOC1[0]); + m_outputTree->Branch("ePHI_prt", &m_ePHI[0]); + m_outputTree->Branch("eTHETA_prt", &m_eTHETA[0]); + m_outputTree->Branch("eQOP_prt", &m_eQOP[0]); + m_outputTree->Branch("eT_prt", &m_eT[0]); + m_outputTree->Branch("res_eLOC0_prt", &m_res_eLOC0[0]); + m_outputTree->Branch("res_eLOC1_prt", &m_res_eLOC1[0]); + m_outputTree->Branch("res_ePHI_prt", &m_res_ePHI[0]); + m_outputTree->Branch("res_eTHETA_prt", &m_res_eTHETA[0]); + m_outputTree->Branch("res_eQOP_prt", &m_res_eQOP[0]); + m_outputTree->Branch("res_eT_prt", &m_res_eT[0]); + m_outputTree->Branch("err_eLOC0_prt", &m_err_eLOC0[0]); + m_outputTree->Branch("err_eLOC1_prt", &m_err_eLOC1[0]); + m_outputTree->Branch("err_ePHI_prt", &m_err_ePHI[0]); + m_outputTree->Branch("err_eTHETA_prt", &m_err_eTHETA[0]); + m_outputTree->Branch("err_eQOP_prt", &m_err_eQOP[0]); + m_outputTree->Branch("err_eT_prt", &m_err_eT[0]); + m_outputTree->Branch("pull_eLOC0_prt", &m_pull_eLOC0[0]); + m_outputTree->Branch("pull_eLOC1_prt", &m_pull_eLOC1[0]); + m_outputTree->Branch("pull_ePHI_prt", &m_pull_ePHI[0]); + m_outputTree->Branch("pull_eTHETA_prt", &m_pull_eTHETA[0]); + m_outputTree->Branch("pull_eQOP_prt", &m_pull_eQOP[0]); + m_outputTree->Branch("pull_eT_prt", &m_pull_eT[0]); + m_outputTree->Branch("g_x_prt", &m_x[0]); + m_outputTree->Branch("g_y_prt", &m_y[0]); + m_outputTree->Branch("g_z_prt", &m_z[0]); + m_outputTree->Branch("px_prt", &m_px[0]); + m_outputTree->Branch("py_prt", &m_py[0]); + m_outputTree->Branch("pz_prt", &m_pz[0]); + m_outputTree->Branch("eta_prt", &m_eta[0]); + m_outputTree->Branch("pT_prt", &m_pT[0]); + + m_outputTree->Branch("nFiltered", &m_nParams[1]); + m_outputTree->Branch("filtered", &m_hasParams[1]); + m_outputTree->Branch("eLOC0_flt", &m_eLOC0[1]); + m_outputTree->Branch("eLOC1_flt", &m_eLOC1[1]); + m_outputTree->Branch("ePHI_flt", &m_ePHI[1]); + m_outputTree->Branch("eTHETA_flt", &m_eTHETA[1]); + m_outputTree->Branch("eQOP_flt", &m_eQOP[1]); + m_outputTree->Branch("eT_flt", &m_eT[1]); + m_outputTree->Branch("res_eLOC0_flt", &m_res_eLOC0[1]); + m_outputTree->Branch("res_eLOC1_flt", &m_res_eLOC1[1]); + m_outputTree->Branch("res_ePHI_flt", &m_res_ePHI[1]); + m_outputTree->Branch("res_eTHETA_flt", &m_res_eTHETA[1]); + m_outputTree->Branch("res_eQOP_flt", &m_res_eQOP[1]); + m_outputTree->Branch("res_eT_flt", &m_res_eT[1]); + m_outputTree->Branch("err_eLOC0_flt", &m_err_eLOC0[1]); + m_outputTree->Branch("err_eLOC1_flt", &m_err_eLOC1[1]); + m_outputTree->Branch("err_ePHI_flt", &m_err_ePHI[1]); + m_outputTree->Branch("err_eTHETA_flt", &m_err_eTHETA[1]); + m_outputTree->Branch("err_eQOP_flt", &m_err_eQOP[1]); + m_outputTree->Branch("err_eT_flt", &m_err_eT[1]); + m_outputTree->Branch("pull_eLOC0_flt", &m_pull_eLOC0[1]); + m_outputTree->Branch("pull_eLOC1_flt", &m_pull_eLOC1[1]); + m_outputTree->Branch("pull_ePHI_flt", &m_pull_ePHI[1]); + m_outputTree->Branch("pull_eTHETA_flt", &m_pull_eTHETA[1]); + m_outputTree->Branch("pull_eQOP_flt", &m_pull_eQOP[1]); + m_outputTree->Branch("pull_eT_flt", &m_pull_eT[1]); + m_outputTree->Branch("g_x_flt", &m_x[1]); + m_outputTree->Branch("g_y_flt", &m_y[1]); + m_outputTree->Branch("g_z_flt", &m_z[1]); + m_outputTree->Branch("px_flt", &m_px[1]); + m_outputTree->Branch("py_flt", &m_py[1]); + m_outputTree->Branch("pz_flt", &m_pz[1]); + m_outputTree->Branch("eta_flt", &m_eta[1]); + m_outputTree->Branch("pT_flt", &m_pT[1]); + + m_outputTree->Branch("nSmoothed", &m_nParams[2]); + m_outputTree->Branch("smoothed", &m_hasParams[2]); + m_outputTree->Branch("eLOC0_smt", &m_eLOC0[2]); + m_outputTree->Branch("eLOC1_smt", &m_eLOC1[2]); + m_outputTree->Branch("ePHI_smt", &m_ePHI[2]); + m_outputTree->Branch("eTHETA_smt", &m_eTHETA[2]); + m_outputTree->Branch("eQOP_smt", &m_eQOP[2]); + m_outputTree->Branch("eT_smt", &m_eT[2]); + m_outputTree->Branch("res_eLOC0_smt", &m_res_eLOC0[2]); + m_outputTree->Branch("res_eLOC1_smt", &m_res_eLOC1[2]); + m_outputTree->Branch("res_ePHI_smt", &m_res_ePHI[2]); + m_outputTree->Branch("res_eTHETA_smt", &m_res_eTHETA[2]); + m_outputTree->Branch("res_eQOP_smt", &m_res_eQOP[2]); + m_outputTree->Branch("res_eT_smt", &m_res_eT[2]); + m_outputTree->Branch("err_eLOC0_smt", &m_err_eLOC0[2]); + m_outputTree->Branch("err_eLOC1_smt", &m_err_eLOC1[2]); + m_outputTree->Branch("err_ePHI_smt", &m_err_ePHI[2]); + m_outputTree->Branch("err_eTHETA_smt", &m_err_eTHETA[2]); + m_outputTree->Branch("err_eQOP_smt", &m_err_eQOP[2]); + m_outputTree->Branch("err_eT_smt", &m_err_eT[2]); + m_outputTree->Branch("pull_eLOC0_smt", &m_pull_eLOC0[2]); + m_outputTree->Branch("pull_eLOC1_smt", &m_pull_eLOC1[2]); + m_outputTree->Branch("pull_ePHI_smt", &m_pull_ePHI[2]); + m_outputTree->Branch("pull_eTHETA_smt", &m_pull_eTHETA[2]); + m_outputTree->Branch("pull_eQOP_smt", &m_pull_eQOP[2]); + m_outputTree->Branch("pull_eT_smt", &m_pull_eT[2]); + m_outputTree->Branch("g_x_smt", &m_x[2]); + m_outputTree->Branch("g_y_smt", &m_y[2]); + m_outputTree->Branch("g_z_smt", &m_z[2]); + m_outputTree->Branch("px_smt", &m_px[2]); + m_outputTree->Branch("py_smt", &m_py[2]); + m_outputTree->Branch("pz_smt", &m_pz[2]); + m_outputTree->Branch("eta_smt", &m_eta[2]); + m_outputTree->Branch("pT_smt", &m_pT[2]); + + m_outputTree->Branch("chi2", &m_chi2); + } return StatusCode::SUCCESS; } StatusCode RootTrajectoryStatesWriterTool::finalize() { - m_outputFile->cd(); - m_outputTree->Write(); - m_outputFile->Close(); + if (!m_noDiagnostics) { + m_outputFile->cd(); + m_outputTree->Write(); + m_outputFile->Close(); + } return StatusCode::SUCCESS; } diff --git a/Tracking/Acts/FaserActsKalmanFilter/src/RootTrajectorySummaryWriterTool.cxx b/Tracking/Acts/FaserActsKalmanFilter/src/RootTrajectorySummaryWriterTool.cxx index 15a955ffa77007274a15d8449077592ddde227bf..2d8765e3163930e003b0d1a6eeaeb999256b9432 100644 --- a/Tracking/Acts/FaserActsKalmanFilter/src/RootTrajectorySummaryWriterTool.cxx +++ b/Tracking/Acts/FaserActsKalmanFilter/src/RootTrajectorySummaryWriterTool.cxx @@ -32,96 +32,98 @@ RootTrajectorySummaryWriterTool::RootTrajectorySummaryWriterTool( StatusCode RootTrajectorySummaryWriterTool::initialize() { - ATH_CHECK(m_simDataCollectionKey.initialize()); - ATH_CHECK(m_mcEventCollectionKey.initialize()); - ATH_CHECK(detStore()->retrieve(m_idHelper, "FaserSCT_ID")); - - std::string filePath = m_filePath; - std::string treeName = m_treeName; - m_outputFile = TFile::Open(filePath.c_str(), "RECREATE"); - if (m_outputFile == nullptr) { - ATH_MSG_WARNING("Unable to open output file at " << m_filePath); - return StatusCode::RECOVERABLE; - } else { - std::cout << "(SummaryWriter) create file " << filePath << std::endl; - } - m_outputFile->cd(); - m_outputTree = new TTree(treeName.c_str(), treeName.c_str()); - if (m_outputTree == nullptr) { - ATH_MSG_ERROR("Unable to create TTree"); - return StatusCode::FAILURE; - } + if (!m_noDiagnostics) { + ATH_CHECK(m_simDataCollectionKey.initialize()); + ATH_CHECK(m_mcEventCollectionKey.initialize()); + ATH_CHECK(detStore()->retrieve(m_idHelper, "FaserSCT_ID")); + + std::string filePath = m_filePath; + std::string treeName = m_treeName; + m_outputFile = TFile::Open(filePath.c_str(), "RECREATE"); + if (m_outputFile == nullptr) { + ATH_MSG_WARNING("Unable to open output file at " << m_filePath); + return StatusCode::RECOVERABLE; + } + m_outputFile->cd(); + m_outputTree = new TTree(treeName.c_str(), treeName.c_str()); + if (m_outputTree == nullptr) { + ATH_MSG_ERROR("Unable to create TTree"); + return StatusCode::FAILURE; + } - m_outputTree = new TTree("tree", "tree"); - - m_outputTree->Branch("event_nr", &m_eventNr); - m_outputTree->Branch("multiTraj_nr", &m_multiTrajNr); - m_outputTree->Branch("subTraj_nr", &m_subTrajNr); - - m_outputTree->Branch("nStates", &m_nStates); - m_outputTree->Branch("nMeasurements", &m_nMeasurements); - m_outputTree->Branch("nOutliers", &m_nOutliers); - m_outputTree->Branch("nHoles", &m_nHoles); - m_outputTree->Branch("nSharedHits", &m_nSharedHits); - m_outputTree->Branch("chi2Sum", &m_chi2Sum); - m_outputTree->Branch("NDF", &m_NDF); - m_outputTree->Branch("measurementChi2", &m_measurementChi2); - m_outputTree->Branch("outlierChi2", &m_outlierChi2); - m_outputTree->Branch("measurementVolume", &m_measurementVolume); - m_outputTree->Branch("measurementLayer", &m_measurementLayer); - m_outputTree->Branch("outlierVolume", &m_outlierVolume); - m_outputTree->Branch("outlierLayer", &m_outlierLayer); - - m_outputTree->Branch("nMajorityHits", &m_nMajorityHits); - m_outputTree->Branch("majorityParticleId", &m_majorityParticleId); - m_outputTree->Branch("t_charge", &m_t_charge); - m_outputTree->Branch("t_time", &m_t_time); - m_outputTree->Branch("t_vx", &m_t_vx); - m_outputTree->Branch("t_vy", &m_t_vy); - m_outputTree->Branch("t_vz", &m_t_vz); - m_outputTree->Branch("t_px", &m_t_px); - m_outputTree->Branch("t_py", &m_t_py); - m_outputTree->Branch("t_pz", &m_t_pz); - m_outputTree->Branch("t_theta", &m_t_theta); - m_outputTree->Branch("t_phi", &m_t_phi); - m_outputTree->Branch("t_eta", &m_t_eta); - m_outputTree->Branch("t_p", &m_t_p); - m_outputTree->Branch("t_pT", &m_t_pT); - - m_outputTree->Branch("hasFittedParams", &m_hasFittedParams); - m_outputTree->Branch("eLOC0_fit", &m_eLOC0_fit); - m_outputTree->Branch("eLOC1_fit", &m_eLOC1_fit); - m_outputTree->Branch("ePHI_fit", &m_ePHI_fit); - m_outputTree->Branch("eTHETA_fit", &m_eTHETA_fit); - m_outputTree->Branch("eQOP_fit", &m_eQOP_fit); - m_outputTree->Branch("eT_fit", &m_eT_fit); - m_outputTree->Branch("err_eLOC0_fit", &m_err_eLOC0_fit); - m_outputTree->Branch("err_eLOC1_fit", &m_err_eLOC1_fit); - m_outputTree->Branch("err_ePHI_fit", &m_err_ePHI_fit); - m_outputTree->Branch("err_eTHETA_fit", &m_err_eTHETA_fit); - m_outputTree->Branch("err_eQOP_fit", &m_err_eQOP_fit); - m_outputTree->Branch("err_eT_fit", &m_err_eT_fit); - m_outputTree->Branch("res_eLOC0_fit", &m_res_eLOC0_fit); - m_outputTree->Branch("res_eLOC1_fit", &m_res_eLOC1_fit); - m_outputTree->Branch("res_ePHI_fit", &m_res_ePHI_fit); - m_outputTree->Branch("res_eTHETA_fit", &m_res_eTHETA_fit); - m_outputTree->Branch("res_eQOP_fit", &m_res_eQOP_fit); - m_outputTree->Branch("res_eT_fit", &m_res_eT_fit); - m_outputTree->Branch("pull_eLOC0_fit", &m_pull_eLOC0_fit); - m_outputTree->Branch("pull_eLOC1_fit", &m_pull_eLOC1_fit); - m_outputTree->Branch("pull_ePHI_fit", &m_pull_ePHI_fit); - m_outputTree->Branch("pull_eTHETA_fit", &m_pull_eTHETA_fit); - m_outputTree->Branch("pull_eQOP_fit", &m_pull_eQOP_fit); - m_outputTree->Branch("pull_eT_fit", &m_pull_eT_fit); + m_outputTree = new TTree("tree", "tree"); + + m_outputTree->Branch("event_nr", &m_eventNr); + m_outputTree->Branch("multiTraj_nr", &m_multiTrajNr); + m_outputTree->Branch("subTraj_nr", &m_subTrajNr); + + m_outputTree->Branch("nStates", &m_nStates); + m_outputTree->Branch("nMeasurements", &m_nMeasurements); + m_outputTree->Branch("nOutliers", &m_nOutliers); + m_outputTree->Branch("nHoles", &m_nHoles); + m_outputTree->Branch("nSharedHits", &m_nSharedHits); + m_outputTree->Branch("chi2Sum", &m_chi2Sum); + m_outputTree->Branch("NDF", &m_NDF); + m_outputTree->Branch("measurementChi2", &m_measurementChi2); + m_outputTree->Branch("outlierChi2", &m_outlierChi2); + m_outputTree->Branch("measurementVolume", &m_measurementVolume); + m_outputTree->Branch("measurementLayer", &m_measurementLayer); + m_outputTree->Branch("outlierVolume", &m_outlierVolume); + m_outputTree->Branch("outlierLayer", &m_outlierLayer); + + m_outputTree->Branch("nMajorityHits", &m_nMajorityHits); + m_outputTree->Branch("majorityParticleId", &m_majorityParticleId); + m_outputTree->Branch("t_charge", &m_t_charge); + m_outputTree->Branch("t_time", &m_t_time); + m_outputTree->Branch("t_vx", &m_t_vx); + m_outputTree->Branch("t_vy", &m_t_vy); + m_outputTree->Branch("t_vz", &m_t_vz); + m_outputTree->Branch("t_px", &m_t_px); + m_outputTree->Branch("t_py", &m_t_py); + m_outputTree->Branch("t_pz", &m_t_pz); + m_outputTree->Branch("t_theta", &m_t_theta); + m_outputTree->Branch("t_phi", &m_t_phi); + m_outputTree->Branch("t_eta", &m_t_eta); + m_outputTree->Branch("t_p", &m_t_p); + m_outputTree->Branch("t_pT", &m_t_pT); + + m_outputTree->Branch("hasFittedParams", &m_hasFittedParams); + m_outputTree->Branch("eLOC0_fit", &m_eLOC0_fit); + m_outputTree->Branch("eLOC1_fit", &m_eLOC1_fit); + m_outputTree->Branch("ePHI_fit", &m_ePHI_fit); + m_outputTree->Branch("eTHETA_fit", &m_eTHETA_fit); + m_outputTree->Branch("eQOP_fit", &m_eQOP_fit); + m_outputTree->Branch("eT_fit", &m_eT_fit); + m_outputTree->Branch("err_eLOC0_fit", &m_err_eLOC0_fit); + m_outputTree->Branch("err_eLOC1_fit", &m_err_eLOC1_fit); + m_outputTree->Branch("err_ePHI_fit", &m_err_ePHI_fit); + m_outputTree->Branch("err_eTHETA_fit", &m_err_eTHETA_fit); + m_outputTree->Branch("err_eQOP_fit", &m_err_eQOP_fit); + m_outputTree->Branch("err_eT_fit", &m_err_eT_fit); + m_outputTree->Branch("res_eLOC0_fit", &m_res_eLOC0_fit); + m_outputTree->Branch("res_eLOC1_fit", &m_res_eLOC1_fit); + m_outputTree->Branch("res_ePHI_fit", &m_res_ePHI_fit); + m_outputTree->Branch("res_eTHETA_fit", &m_res_eTHETA_fit); + m_outputTree->Branch("res_eQOP_fit", &m_res_eQOP_fit); + m_outputTree->Branch("res_eT_fit", &m_res_eT_fit); + m_outputTree->Branch("pull_eLOC0_fit", &m_pull_eLOC0_fit); + m_outputTree->Branch("pull_eLOC1_fit", &m_pull_eLOC1_fit); + m_outputTree->Branch("pull_ePHI_fit", &m_pull_ePHI_fit); + m_outputTree->Branch("pull_eTHETA_fit", &m_pull_eTHETA_fit); + m_outputTree->Branch("pull_eQOP_fit", &m_pull_eQOP_fit); + m_outputTree->Branch("pull_eT_fit", &m_pull_eT_fit); + } return StatusCode::SUCCESS; } StatusCode RootTrajectorySummaryWriterTool::finalize() { - m_outputFile->cd(); - m_outputTree->Write(); - m_outputFile->Close(); + if (!m_noDiagnostics) { + m_outputFile->cd(); + m_outputTree->Write(); + m_outputFile->Close(); + } return StatusCode::SUCCESS; } diff --git a/Tracking/Acts/FaserActsKalmanFilter/test/CombinatorialKalmanFilterAlg.py b/Tracking/Acts/FaserActsKalmanFilter/test/CombinatorialKalmanFilterAlg.py index 56bd49e668a0cf5e57affff046b5d4db0e5821b1..102eb3b37ab68b07aa516d5e2ffdf5d7076bea18 100644 --- a/Tracking/Acts/FaserActsKalmanFilter/test/CombinatorialKalmanFilterAlg.py +++ b/Tracking/Acts/FaserActsKalmanFilter/test/CombinatorialKalmanFilterAlg.py @@ -43,5 +43,5 @@ acc.getEventAlgo("CombinatorialKalmanFilterAlg").OutputLevel = VERBOSE # acc.printConfig(withDetails=True) # ConfigFlags.dump() -sc = acc.run(maxEvents=100) +sc = acc.run(maxEvents=-1) sys.exit(not sc.isSuccess()) diff --git a/Waveform/WaveDigiTools/CMakeLists.txt b/Waveform/WaveDigiTools/CMakeLists.txt index 692fdb69bc14451ba5a6a660d011bad5b14b66e5..d7e9fd857b273c764dad780aceebd0d3f58acc3a 100644 --- a/Waveform/WaveDigiTools/CMakeLists.txt +++ b/Waveform/WaveDigiTools/CMakeLists.txt @@ -13,13 +13,12 @@ atlas_add_library( WaveDigiToolsLib WaveDigiTools/*.h src/*.cxx src/*.h PUBLIC_HEADERS WaveDigiTools PRIVATE_INCLUDE_DIRS ${ROOT_INCLUDE_DIRS} - LINK_LIBRARIES AthenaBaseComps AthenaKernel GeoPrimitives WaveRawEvent + LINK_LIBRARIES AthenaBaseComps AthenaKernel GeoPrimitives WaveRawEvent Identifier PRIVATE_LINK_LIBRARIES ${ROOT_LIBRARIES} ) atlas_add_component( WaveDigiTools src/components/*.cxx INCLUDE_DIRS ${ROOT_INCLUDE_DIRS} - LINK_LIBRARIES ${ROOT_LIBRARIES} AthenaBaseComps GaudiKernel WaveDigiToolsLib ) - + LINK_LIBRARIES ${ROOT_LIBRARIES} AthenaBaseComps GaudiKernel WaveDigiToolsLib) diff --git a/Waveform/WaveDigiTools/WaveDigiTools/IWaveformDigitisationTool.h b/Waveform/WaveDigiTools/WaveDigiTools/IWaveformDigitisationTool.h index 3e85ff839a04f6d02c794fa74567841450c9cfeb..c30351902a1b79e9a58ad33d5f58a5d04ed13faa 100644 --- a/Waveform/WaveDigiTools/WaveDigiTools/IWaveformDigitisationTool.h +++ b/Waveform/WaveDigiTools/WaveDigiTools/IWaveformDigitisationTool.h @@ -23,7 +23,15 @@ #include "WaveRawEvent/RawWaveformContainer.h" #include "WaveRawEvent/RawWaveform.h" +#include "Identifier/Identifier.h" + #include "TF1.h" +#include "TRandom3.h" + +#include <utility> +#include <map> +#include <vector> + ///Interface for waveform digitisation tools class IWaveformDigitisationTool : virtual public IAlgTool @@ -39,14 +47,25 @@ public: virtual ~IWaveformDigitisationTool() = default; - // Digitise HITS to Raw waveform - template<class CONT> - StatusCode digitise(const CONT* hitCollection, - RawWaveformContainer* waveContainer, TF1* kernel) const; + /// Evaluate time kernel over time samples + virtual std::vector<float> evaluate_timekernel(TF1* kernel) const = 0; + + /// Generate random baseline + virtual unsigned int generate_baseline(int mean, int rms) const = 0; + + /// Create structure to store pulse for each channel + template <class T> + std::map<Identifier, std::vector<uint16_t>> create_waveform_map(const T* idHelper) const; + + /// Number of time samples + unsigned int nsamples() const { return m_nsamples; } private: ServiceHandle<IMessageSvc> m_msgSvc; +protected: + TRandom3* m_random; + unsigned int m_nsamples; }; #include "WaveDigiTools/IWaveformDigitisationTool.icc" diff --git a/Waveform/WaveDigiTools/WaveDigiTools/IWaveformDigitisationTool.icc b/Waveform/WaveDigiTools/WaveDigiTools/IWaveformDigitisationTool.icc index 57d4839bda5f2d30286ec412e3f01c92ce353b11..41b8c2650319a448df63d11e098fc8d0784dc056 100644 --- a/Waveform/WaveDigiTools/WaveDigiTools/IWaveformDigitisationTool.icc +++ b/Waveform/WaveDigiTools/WaveDigiTools/IWaveformDigitisationTool.icc @@ -1,51 +1,17 @@ -#include <vector> -#include <map> +#include "Identifier/Identifier.h" +#include "Identifier/ExpandedIdentifier.h" -template<class CONT> -StatusCode IWaveformDigitisationTool::digitise(const CONT* hitCollection, - RawWaveformContainer* container, TF1* kernel) const { +template <class ID> +std::map<Identifier, std::vector<uint16_t>> IWaveformDigitisationTool::create_waveform_map(const ID* idHelper) const { + std::map<Identifier, std::vector<uint16_t>> waveforms; - // Check the container - if (!container) { - MsgStream log(&(*m_msgSvc), name()); - log << MSG::ERROR << "HitCollection passed to digitise() is null!" << endmsg; - return StatusCode::FAILURE; + for (auto itr = idHelper->pmt_begin(); itr != idHelper->pmt_end(); ++itr) { + const ExpandedIdentifier& extId = *itr; + Identifier id = idHelper->pmt_id(extId); + waveforms[id] = std::vector<uint16_t>(); + waveforms[id].reserve(m_nsamples); } - unsigned int size = 600; // TODO: how know the correct number of time samples? - std::vector<float> time(size); - for (unsigned int i=0; i<size; i++) time[i] = 2.*i; - - std::map<unsigned int, std::vector<uint16_t>> waveforms; - unsigned int baseline = 8000; // TODO: vary this + add noise - - // Loop over time samples - for (const auto& t : time) { - std::map<unsigned int, float> counts; - - // Convolve hit energy with kernel and sum for each ID (i.e. channel) - for (const auto& hit : *hitCollection) { - counts[hit.identify()] += kernel->Eval(t) * hit.energyLoss(); - //std::cout << "HIT " << hit.identify() << " @ " << t << ": " << kernel->Eval(t) << " " << hit.energyLoss() << " -> " << counts[hit.identify()] << std::endl; - } - - // Add count to correct waveform vec - for (const auto& c : counts) { - waveforms[c.first].push_back(baseline - c.second); - //std::cout << "ADC " << c.first << " @ " << t << ": " << baseline - c.second << std::endl; - } - } - - // Loop over wavefrom vecs to make and store waveform - for (const auto& w : waveforms) { - RawWaveform* wfm = new RawWaveform(); - wfm->setWaveform(0, w.second); - wfm->setIdentifier(Identifier(w.first)); - wfm->setSamples(size); - container->push_back(wfm); - } - - - return StatusCode::SUCCESS; + return waveforms; } diff --git a/Waveform/WaveDigiTools/src/WaveformDigitisationTool.cxx b/Waveform/WaveDigiTools/src/WaveformDigitisationTool.cxx index e4776da3bf1b140fa1c614642d939f46557b0ba6..c62d7f8a753490907bd1e8a513cad5341a2a46be 100644 --- a/Waveform/WaveDigiTools/src/WaveformDigitisationTool.cxx +++ b/Waveform/WaveDigiTools/src/WaveformDigitisationTool.cxx @@ -20,7 +20,28 @@ WaveformDigitisationTool::WaveformDigitisationTool(const std::string& type, cons StatusCode WaveformDigitisationTool::initialize() { ATH_MSG_INFO( name() << "::initalize()" ); + + m_nsamples = 600; + m_random = new TRandom3(); + return StatusCode::SUCCESS; } +std::vector<float> +WaveformDigitisationTool::evaluate_timekernel(TF1* kernel) const { + + std::vector<float> timekernel; + timekernel.reserve(m_nsamples); + + for (unsigned int i=0; i<m_nsamples; i++) { + timekernel.push_back(kernel->Eval(2.*i)); + } + + return timekernel; +} + +unsigned int +WaveformDigitisationTool::generate_baseline(int mean, int rms) const { + return m_random->Gaus(mean, rms); +} diff --git a/Waveform/WaveDigiTools/src/WaveformDigitisationTool.h b/Waveform/WaveDigiTools/src/WaveformDigitisationTool.h index 8a5ba71f3dd124fcdd2c6b4b8124ee96591512da..e2dd5169152845824927baeeae7ce8fc36ab46f8 100644 --- a/Waveform/WaveDigiTools/src/WaveformDigitisationTool.h +++ b/Waveform/WaveDigiTools/src/WaveformDigitisationTool.h @@ -28,6 +28,13 @@ class WaveformDigitisationTool: public extends<AthAlgTool, IWaveformDigitisation /// Retrieve the necessary services in initialize StatusCode initialize(); + /// Evaluate time kernel over samples + std::vector<float> evaluate_timekernel(TF1* kernel) const; + + /// Generate random baseline + unsigned int generate_baseline(int mean, int rms) const; + + private: // None diff --git a/Waveform/WaveEventCnv/WaveByteStream/python/WaveByteStreamConfig.py b/Waveform/WaveEventCnv/WaveByteStream/python/WaveByteStreamConfig.py index b3e1e73042c8d608c68865d1328572b1e5b98955..ad6bfcd1519674674ff7d101e320bc47176198ed 100644 --- a/Waveform/WaveEventCnv/WaveByteStream/python/WaveByteStreamConfig.py +++ b/Waveform/WaveEventCnv/WaveByteStream/python/WaveByteStreamConfig.py @@ -2,8 +2,11 @@ from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator from WaveformConditionsTools.WaveformCableMappingConfig import WaveformCableMappingCfg +from WaveformConditionsTools.WaveformRangeConfig import WaveformRangeCfg def WaveByteStreamCfg(configFlags, **kwargs): - acc = WaveformCableMappingCfg(configFlags, **kwargs) + acc = ComponentAccumulator() + acc.merge(WaveformCableMappingCfg(configFlags, **kwargs)) + acc.merge(WaveformRangeCfg(configFlags, **kwargs)) return acc diff --git a/Waveform/WaveEventCnv/WaveByteStream/src/RawWaveformDecoderTool.cxx b/Waveform/WaveEventCnv/WaveByteStream/src/RawWaveformDecoderTool.cxx index f3a9f9991fd0d926584727bc7546fb00ea52732d..852ae084edac08380e302ec3026548d667da70c2 100644 --- a/Waveform/WaveEventCnv/WaveByteStream/src/RawWaveformDecoderTool.cxx +++ b/Waveform/WaveEventCnv/WaveByteStream/src/RawWaveformDecoderTool.cxx @@ -43,7 +43,8 @@ StatusCode RawWaveformDecoderTool::convert(const DAQFormats::EventFull* re, RawWaveformContainer* container, const std::string key, - WaveformCableMap cable_map + WaveformCableMap cable_map, + WaveformRangeMap range_map ) { ATH_MSG_DEBUG("RawWaveformDecoderTool::convert("+key+")"); @@ -90,6 +91,8 @@ RawWaveformDecoderTool::convert(const DAQFormats::EventFull* re, det_type = std::string("calo"); } else if (key == std::string("VetoWaveforms")) { det_type = std::string("veto"); + } else if (key == std::string("VetoNuWaveforms")) { + det_type = std::string("vetonu"); } else if (key == std::string("TriggerWaveforms")) { det_type = std::string("trigger"); } else if (key == std::string("PreshowerWaveforms")) { @@ -141,10 +144,13 @@ RawWaveformDecoderTool::convert(const DAQFormats::EventFull* re, } // Set ID if one exists (clock, for instance, doesn't have an identifier) - if (cable_map[channel].second != -1) { // Identifier doesn't have operator>= + if (cable_map.at(channel).second != -1) { // Identifier doesn't have operator>= wfm->setIdentifier(cable_map[channel].second); } + // Set ADC range + wfm->setRange(range_map.at(channel)); + container->push_back(wfm); // Sanity check diff --git a/Waveform/WaveEventCnv/WaveByteStream/src/RawWaveformDecoderTool.h b/Waveform/WaveEventCnv/WaveByteStream/src/RawWaveformDecoderTool.h index 1609d3ebacd3940bdfcd89f3c1af39b2a0a9c648..5d3ff24600e76a6d37c7e5bb60e0729e36bb71ac 100644 --- a/Waveform/WaveEventCnv/WaveByteStream/src/RawWaveformDecoderTool.h +++ b/Waveform/WaveEventCnv/WaveByteStream/src/RawWaveformDecoderTool.h @@ -14,6 +14,7 @@ #include "WaveRawEvent/RawWaveformContainer.h" #include "WaveformConditionsTools/IWaveformCableMappingTool.h" +#include "WaveformConditionsTools/IWaveformRangeTool.h" // This class provides conversion between bytestream and Waveform objects @@ -30,7 +31,7 @@ class RawWaveformDecoderTool : public AthAlgTool { virtual StatusCode initialize(); virtual StatusCode finalize(); - StatusCode convert(const DAQFormats::EventFull* re, RawWaveformContainer* wfm, std::string key, WaveformCableMap cable_map); + StatusCode convert(const DAQFormats::EventFull* re, RawWaveformContainer* wfm, std::string key, WaveformCableMap cable_map, WaveformRangeMap range_map); private: }; diff --git a/Waveform/WaveEventCnv/WaveByteStream/src/WaveByteStreamCnv.cxx b/Waveform/WaveEventCnv/WaveByteStream/src/WaveByteStreamCnv.cxx index b40fc3bd8dda5469e52be5d297ad29cf4dc2e98b..38d1cc20c5240277d6a985c2ba8e57d064baa49f 100644 --- a/Waveform/WaveEventCnv/WaveByteStream/src/WaveByteStreamCnv.cxx +++ b/Waveform/WaveEventCnv/WaveByteStream/src/WaveByteStreamCnv.cxx @@ -27,6 +27,7 @@ WaveByteStreamCnv::WaveByteStreamCnv(ISvcLocator* svcloc) , m_name("WaveByteStreamCnv") , m_tool("RawWaveformDecoderTool") , m_mappingTool("WaveformCableMappingTool") + , m_rangeTool("WaveformRangeTool") , m_rdpSvc("FaserROBDataProviderSvc", m_name) { ATH_MSG_DEBUG(m_name+"::initialize() called"); @@ -49,7 +50,7 @@ StatusCode WaveByteStreamCnv::initialize() CHECK(m_rdpSvc.retrieve()); CHECK(m_tool.retrieve()); CHECK(m_mappingTool.retrieve()); - + CHECK(m_rangeTool.retrieve()); return StatusCode::SUCCESS; } @@ -96,8 +97,11 @@ StatusCode WaveByteStreamCnv::createObj(IOpaqueAddress* pAddr, DataObject*& pObj auto mapping = m_mappingTool->getCableMapping(); ATH_MSG_DEBUG("Cable mapping contains " << mapping.size() << " entries"); + auto range = m_rangeTool->getRangeMapping(); + ATH_MSG_DEBUG("Range contains " << range.size() << " entries"); + // Convert selected channels - CHECK( m_tool->convert(re, wfmCont, key, mapping) ); + CHECK( m_tool->convert(re, wfmCont, key, mapping, range) ); pObj = SG::asStorable(wfmCont); diff --git a/Waveform/WaveEventCnv/WaveByteStream/src/WaveByteStreamCnv.h b/Waveform/WaveEventCnv/WaveByteStream/src/WaveByteStreamCnv.h index ce373326fd61a3ab9f8552b5a6f561b449cc6850..960b8759e2f72633eae58efb882d67848cdfee07 100644 --- a/Waveform/WaveEventCnv/WaveByteStream/src/WaveByteStreamCnv.h +++ b/Waveform/WaveEventCnv/WaveByteStream/src/WaveByteStreamCnv.h @@ -15,6 +15,7 @@ #include "AthenaBaseComps/AthMessaging.h" #include "FaserByteStreamCnvSvcBase/FaserByteStreamAddress.h" #include "WaveformConditionsTools/IWaveformCableMappingTool.h" +#include "WaveformConditionsTools/IWaveformRangeTool.h" class RawWaveformDecoderTool; class IFaserROBDataProviderSvc; @@ -41,6 +42,7 @@ private: std::string m_name; ToolHandle<RawWaveformDecoderTool> m_tool; ToolHandle<IWaveformCableMappingTool> m_mappingTool; + ToolHandle<IWaveformRangeTool> m_rangeTool; ServiceHandle<IFaserROBDataProviderSvc> m_rdpSvc; }; diff --git a/Waveform/WaveRawEvent/WaveRawEvent/RawWaveform.h b/Waveform/WaveRawEvent/WaveRawEvent/RawWaveform.h index 3770e05513a4d6f81ca16aa08785ea0c36a459cb..a1f42f6e86db846ea544daedb482ba8aba8fbdf5 100644 --- a/Waveform/WaveRawEvent/WaveRawEvent/RawWaveform.h +++ b/Waveform/WaveRawEvent/WaveRawEvent/RawWaveform.h @@ -62,11 +62,17 @@ public: // Waveform data unsigned int channel() const; const std::vector<unsigned int>& adc_counts() const; + size_t size() const {return m_adc_counts.size();} // Return channel identifier Identifier identify() const; Identifier32 identify32() const; + // Full-scale range (in V) of 14-bit ADC reading + // mV per bit is given by range() / 16.384 + float range() const; + float mv_per_bit() const {return m_range / 16.384;} + // some print-out: void print() const; @@ -89,6 +95,8 @@ public: void setSamples(unsigned int samp) {m_samples = samp;} void setCounts(const std::vector<unsigned int>& counts) {m_adc_counts = counts;} + void setRange(float range) {m_range = range;} + /////////////////////////////////////////////////////////////////// // Private data: /////////////////////////////////////////////////////////////////// @@ -105,6 +113,8 @@ private: std::vector<unsigned int> m_adc_counts; Identifier32 m_ID; + + float m_range; }; @@ -145,6 +155,9 @@ RawWaveform::identify() const { return Identifier(m_ID); } inline Identifier32 RawWaveform::identify32() const { return m_ID; } +inline float +RawWaveform::range() const { return m_range; } + std::ostream &operator<<(std::ostream &out, const RawWaveform &wfm); diff --git a/Waveform/WaveRawEvent/src/RawWaveform.cxx b/Waveform/WaveRawEvent/src/RawWaveform.cxx index d6ef63493018955a79d42eb2c4f7fe2eebc40de2..64c401dfad6a9fe9794737a953eca31eff110bc1 100644 --- a/Waveform/WaveRawEvent/src/RawWaveform.cxx +++ b/Waveform/WaveRawEvent/src/RawWaveform.cxx @@ -17,7 +17,8 @@ RawWaveform::RawWaveform( ) : m_samples(0), m_channel(0), m_adc_counts(), - m_ID(0xffff) + m_ID(0xffff), + m_range(2.) { } diff --git a/Waveform/WaveRecAlgs/python/WaveRecAlgsConfig.py b/Waveform/WaveRecAlgs/python/WaveRecAlgsConfig.py index d38eb4fd0a7ce0c26ffa1a78fd52a91f0261ac94..7d3219eb0da3e1026fae15754bb9f52bd1618c65 100644 --- a/Waveform/WaveRecAlgs/python/WaveRecAlgsConfig.py +++ b/Waveform/WaveRecAlgs/python/WaveRecAlgsConfig.py @@ -6,6 +6,7 @@ from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator from AthenaConfiguration.ComponentFactory import CompFactory from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg +from WaveformConditionsTools.WaveformTimingConfig import WaveformTimingCfg WaveformReconstructionTool = CompFactory.WaveformReconstructionTool ClockReconstructionTool = CompFactory.ClockReconstructionTool @@ -21,6 +22,7 @@ def WaveformReconstructionCfg(flags, naive = False): if flags.Input.isMC and naive: if "TB" not in flags.GeoModel.FaserVersion: acc.merge(PseudoScintHitToWaveformRecCfg(flags, "PseudoTriggerHitWaveformRecAlg", "Trigger")) + acc.merge(PseudoScintHitToWaveformRecCfg(flags, "PseudoVetoHitToWaveformRecAlg", "Veto")) acc.merge(PseudoScintHitToWaveformRecCfg(flags, "PseudoPresehowerHitWaveformRecAlg", "Preshower")) acc.merge(PseudoCaloHitToWaveformRecCfg(flags, "PseudoCaloHitWaveformRecAlg")) @@ -28,9 +30,15 @@ def WaveformReconstructionCfg(flags, naive = False): acc.merge(WaveformHitRecCfg(flags, "TriggerWaveformRecAlg", "Trigger")) acc.merge(WaveformHitRecCfg(flags, "VetoWaveformRecAlg", "Veto")) + if flags.Input.isMC: + print("Turning off VetoNu reco in MC!") + else: + acc.merge(WaveformHitRecCfg(flags, "VetoNuWaveformRecAlg", "VetoNu")) acc.merge(WaveformHitRecCfg(flags, "PreshowerWaveformRecAlg", "Preshower")) acc.merge(WaveformHitRecCfg(flags, "CaloWaveformRecAlg", "Calo")) + acc.merge(WaveformTimingCfg(flags)) + return acc # Return configured WaveformClock reconstruction algorithm @@ -49,13 +57,13 @@ def WaveformClockRecCfg(flags, name="ClockRecAlg", **kwargs): return acc # Return configured WaveformHit reconstruction algorithm -# Specify data source (Veto, Trigger, Preshower, Calo, Test) +# Specify data source (Veto, VetoNu, Trigger, Preshower, Calo, Test) def WaveformHitRecCfg(flags, name="WaveformRecAlg", source="", **kwargs): acc = ComponentAccumulator() - if flags.Input.isMC: - kwargs.setdefault("PeakThreshold", 5) + #if flags.Input.isMC: + # kwargs.setdefault("PeakThreshold", 5) tool = WaveformReconstructionTool(name=source+"WaveformRecTool", **kwargs) diff --git a/Waveform/WaveRecAlgs/src/RawWaveformRecAlg.cxx b/Waveform/WaveRecAlgs/src/RawWaveformRecAlg.cxx index 97f122e50f51c17f2ac3f0e0ff3174cf2f337347..8f8ac480969aea1f424021e3985e695d21318783 100644 --- a/Waveform/WaveRecAlgs/src/RawWaveformRecAlg.cxx +++ b/Waveform/WaveRecAlgs/src/RawWaveformRecAlg.cxx @@ -31,9 +31,10 @@ RawWaveformRecAlg::finalize() { ATH_MSG_INFO( m_numberOfEvents << " events processed" ); if ( m_numberOfEvents > 0) { - ATH_MSG_INFO( m_numberOfWaveforms << " waveforms found" ); - ATH_MSG_INFO( m_numberOfOverflows << " overflows" ); - ATH_MSG_INFO( m_numberOfFitErrors << " fit errors" ); + ATH_MSG_INFO( m_numberOfWaveforms << " waveforms found over threshold" ); + ATH_MSG_INFO( m_numberOfSecondaries << " secondary waveforms found" ); + ATH_MSG_INFO( m_numberOfOverflows << " overflows" ); + ATH_MSG_INFO( m_numberOfFitErrors << " fit errors" ); } return StatusCode::SUCCESS; @@ -43,6 +44,9 @@ StatusCode RawWaveformRecAlg::execute(const EventContext& ctx) const { ATH_MSG_DEBUG("Executing"); + // Keep track of some statistics + m_numberOfEvents++; + ATH_MSG_DEBUG("Run: " << ctx.eventID().run_number() << " Event: " << ctx.eventID().event_number()); @@ -52,41 +56,56 @@ RawWaveformRecAlg::execute(const EventContext& ctx) const { ATH_CHECK( waveformHandle.isValid() ); ATH_MSG_DEBUG("Found ReadHandle for RawWaveformContainer " << m_waveformContainerKey); + // Find the output waveform container + SG::WriteHandle<xAOD::WaveformHitContainer> hitContainerHandle(m_waveformHitContainerKey, ctx); + ATH_CHECK( hitContainerHandle.record( std::make_unique<xAOD::WaveformHitContainer>(), + std::make_unique<xAOD::WaveformHitAuxContainer>() ) ); + + ATH_MSG_DEBUG("WaveformsHitContainer '" << hitContainerHandle.name() << "' initialized"); + if (waveformHandle->size() == 0) { ATH_MSG_DEBUG("Waveform container found with zero length!"); return StatusCode::SUCCESS; } + // First reconstruct the primary hit (based on trigger time) + for( const auto& wave : *waveformHandle) { + ATH_MSG_DEBUG("Reconstruct primary waveform for channel " << wave->channel()); + CHECK( m_recoTool->reconstructPrimary(*wave, hitContainerHandle.ptr()) ); + } + + // Second, reconstruct any additional out of time hits + if (m_findMultipleHits) { + for( const auto& wave : *waveformHandle) { + ATH_MSG_DEBUG("Reconstruct secondary waveform for channel " << wave->channel()); + CHECK( m_recoTool->reconstructSecondary(*wave, hitContainerHandle.ptr()) ); + } + } + // Also find the clock information SG::ReadHandle<xAOD::WaveformClock> clockHandle(m_clockKey, ctx); const xAOD::WaveformClock* clockptr = NULL; + // Fix timing for all hits // Can survive without this, but make a note if ( clockHandle.isValid() ) { ATH_MSG_DEBUG("Found ReadHandle for WaveformClock"); clockptr = clockHandle.ptr(); + CHECK( m_recoTool->setLocalTime(clockptr, hitContainerHandle.ptr()) ); } else { ATH_MSG_WARNING("Didn't find ReadHandle for WaveformClock!"); } - // Find the output waveform container - SG::WriteHandle<xAOD::WaveformHitContainer> hitContainerHandle(m_waveformHitContainerKey, ctx); - ATH_CHECK( hitContainerHandle.record( std::make_unique<xAOD::WaveformHitContainer>(), - std::make_unique<xAOD::WaveformHitAuxContainer>() ) ); - - ATH_MSG_DEBUG("WaveformsHitContainer '" << hitContainerHandle.name() << "' initialized"); - - // Reconstruct all waveforms - CHECK( m_recoTool->reconstructAll(*waveformHandle, clockptr, hitContainerHandle.ptr()) ); - ATH_MSG_DEBUG("WaveformsHitContainer '" << hitContainerHandle.name() << "' filled with "<< hitContainerHandle->size() <<" items"); // Keep track of some statistics - m_numberOfEvents++; for (const auto& hit : *(hitContainerHandle.ptr())) { if (hit->status_bit(xAOD::WaveformStatus::THRESHOLD_FAILED)) continue; + m_numberOfWaveforms++; if (hit->status_bit(xAOD::WaveformStatus::WAVE_OVERFLOW)) m_numberOfOverflows++; + if (hit->status_bit(xAOD::WaveformStatus::SECONDARY)) m_numberOfSecondaries++; + if (hit->status_bit(xAOD::WaveformStatus::GFIT_FAILED)) { m_numberOfFitErrors++; } else if (hit->status_bit(xAOD::WaveformStatus::CBFIT_FAILED)) { diff --git a/Waveform/WaveRecAlgs/src/RawWaveformRecAlg.h b/Waveform/WaveRecAlgs/src/RawWaveformRecAlg.h index e57501a730dd5f67c38435cb9594b0a7def2ec75..4de415d21d162a3aa2b64fe7f1ef1bb14e0237d9 100644 --- a/Waveform/WaveRecAlgs/src/RawWaveformRecAlg.h +++ b/Waveform/WaveRecAlgs/src/RawWaveformRecAlg.h @@ -42,6 +42,10 @@ class RawWaveformRecAlg : public AthReentrantAlgorithm { virtual StatusCode finalize() override; //@} + // + // Look for more than one hit in each channel + BooleanProperty m_findMultipleHits{this, "FindMultipleHits", true}; + private: /** @name Disallow default instantiation, copy, assignment */ @@ -90,6 +94,7 @@ class RawWaveformRecAlg : public AthReentrantAlgorithm { //@{ mutable std::atomic<int> m_numberOfEvents{0}; mutable std::atomic<int> m_numberOfWaveforms{0}; + mutable std::atomic<int> m_numberOfSecondaries{0}; mutable std::atomic<int> m_numberOfOverflows{0}; mutable std::atomic<int> m_numberOfFitErrors{0}; //@} diff --git a/Waveform/WaveRecTools/CMakeLists.txt b/Waveform/WaveRecTools/CMakeLists.txt index d8f3e6f053232477a1b1501a26e3a88efe9272c1..f7f9672886ef12ab2fd9441447bf0f0a0f583418 100644 --- a/Waveform/WaveRecTools/CMakeLists.txt +++ b/Waveform/WaveRecTools/CMakeLists.txt @@ -13,13 +13,16 @@ atlas_add_library( WaveRecToolsLib WaveRecTools/*.h src/*.cxx src/*.h PUBLIC_HEADERS WaveRecTools PRIVATE_INCLUDE_DIRS ${ROOT_INCLUDE_DIRS} - LINK_LIBRARIES AthenaBaseComps AthenaKernel GeoPrimitives WaveRawEvent xAODFaserWaveform + LINK_LIBRARIES AthenaBaseComps AthenaKernel GeoPrimitives + WaveformConditionsToolsLib WaveRawEvent xAODFaserWaveform PRIVATE_LINK_LIBRARIES ${ROOT_LIBRARIES} ) atlas_add_component( WaveRecTools src/components/*.cxx INCLUDE_DIRS ${ROOT_INCLUDE_DIRS} - LINK_LIBRARIES ${ROOT_LIBRARIES} AthenaBaseComps GaudiKernel WaveRecToolsLib ) + LINK_LIBRARIES ${ROOT_LIBRARIES} + WaveformConditionsToolsLib AthenaBaseComps GaudiKernel + WaveRecToolsLib) diff --git a/Waveform/WaveRecTools/WaveRecTools/IWaveformReconstructionTool.h b/Waveform/WaveRecTools/WaveRecTools/IWaveformReconstructionTool.h index cc10197b262f0695327f7304a84998d198300678..c72c2502c9391861a3aec0696f47d568a7d7aa2b 100644 --- a/Waveform/WaveRecTools/WaveRecTools/IWaveformReconstructionTool.h +++ b/Waveform/WaveRecTools/WaveRecTools/IWaveformReconstructionTool.h @@ -32,15 +32,17 @@ class IWaveformReconstructionTool : virtual public IAlgTool virtual ~IWaveformReconstructionTool() = default; - // Reconstruct all waveforms - virtual StatusCode reconstructAll(const RawWaveformContainer& waveContainer, - const xAOD::WaveformClock* clock, - xAOD::WaveformHitContainer* container) const = 0; - - // Reconstruct all peaks in a raw waveform - virtual StatusCode reconstruct(const RawWaveform& wave, - const xAOD::WaveformClock* clock, - xAOD::WaveformHitContainer* container) const = 0; + // Reconstruct hits in trigger window + virtual StatusCode reconstructPrimary(const RawWaveform& wave, + xAOD::WaveformHitContainer* container) const = 0; + + // Reconstruct secondary hits anywhere in the waveform + virtual StatusCode reconstructSecondary(const RawWaveform& wave, + xAOD::WaveformHitContainer* container) const = 0; + + // Set local hit times from LHC clock + virtual StatusCode setLocalTime(const xAOD::WaveformClock* clock, + xAOD::WaveformHitContainer* container) const = 0; }; diff --git a/Waveform/WaveRecTools/src/ClockReconstructionTool.cxx b/Waveform/WaveRecTools/src/ClockReconstructionTool.cxx index c45ea51007362eecd0ab7827f50123c05ddebab7..825cc755862200c0dd008eaf3c273cb23b37f83a 100644 --- a/Waveform/WaveRecTools/src/ClockReconstructionTool.cxx +++ b/Waveform/WaveRecTools/src/ClockReconstructionTool.cxx @@ -102,10 +102,10 @@ ClockReconstructionTool::reconstruct(const RawWaveform& raw_wave, ATH_MSG_DEBUG("Index: " << i << " Freq: " << i*freqmult << " Mag: " << magnitude[i]); } - // Store results - clockdata->set_dc_offset(magnitude[0]); + // Store results (amplitides in mV) + clockdata->set_dc_offset(raw_wave.mv_per_bit()*magnitude[0]); + clockdata->set_amplitude(raw_wave.mv_per_bit()*magnitude[imax]); clockdata->set_frequency(imax * freqmult); - clockdata->set_amplitude(magnitude[imax]); clockdata->set_phase(atan2(im_full[imax], re_full[imax])); // Not a bug, atan2(y,x)! ATH_MSG_DEBUG("Before correcting for finite resolution:"); @@ -133,7 +133,7 @@ ClockReconstructionTool::reconstruct(const RawWaveform& raw_wave, clockdata->set_frequency( (imax+dm) * freqmult ); clockdata->set_phase (phase); - clockdata->set_amplitude( 2*M_PI*dm*magnitude[imax] / sin(M_PI * dm) ); + clockdata->set_amplitude( raw_wave.mv_per_bit() * 2*M_PI*dm*magnitude[imax] / sin(M_PI * dm) ); ATH_MSG_DEBUG("After correcting for finite resolution:"); ATH_MSG_DEBUG(*clockdata); diff --git a/Waveform/WaveRecTools/src/ClockReconstructionTool.h b/Waveform/WaveRecTools/src/ClockReconstructionTool.h index ea7ec2ec7e2a28e49d587f9a7f072678ee4bdbc5..78e6ea1770c069314ee15a068458f88623b63bda 100644 --- a/Waveform/WaveRecTools/src/ClockReconstructionTool.h +++ b/Waveform/WaveRecTools/src/ClockReconstructionTool.h @@ -51,8 +51,8 @@ class ClockReconstructionTool: public extends<AthAlgTool, IClockReconstructionTo void checkResult(const RawWaveform& raw_wave, xAOD::WaveformClock* clockdata) const; - // Limits to print warnings - FloatProperty m_amplitude_min{this, "AmplitudeMin", 1000.}; + // Limits to print warnings (amplitude in mV) + FloatProperty m_amplitude_min{this, "AmplitudeMin", 500.}; FloatProperty m_frequency_min{this, "FrequencyMin", 40.0}; FloatProperty m_frequency_max{this, "FrequencyMax", 40.1}; diff --git a/Waveform/WaveRecTools/src/WaveformReconstructionTool.cxx b/Waveform/WaveRecTools/src/WaveformReconstructionTool.cxx index 7e564dae08c401bf51130dfb0813046e62cb4251..25cdc0a3928b08ef0956698d9f2667ce89286e75 100644 --- a/Waveform/WaveRecTools/src/WaveformReconstructionTool.cxx +++ b/Waveform/WaveRecTools/src/WaveformReconstructionTool.cxx @@ -39,187 +39,290 @@ WaveformReconstructionTool::initialize() { } else { ATH_MSG_INFO("Will use fit to determine baseline"); } + + ATH_CHECK( m_timingTool.retrieve() ); + return StatusCode::SUCCESS; } -// Reconstruction step +// +// Form primary hits using trigger time +// StatusCode -WaveformReconstructionTool::reconstructAll( - const RawWaveformContainer& waveContainer, - const xAOD::WaveformClock* clock, +WaveformReconstructionTool::reconstructPrimary( + const RawWaveform& wave, xAOD::WaveformHitContainer* hitContainer) const { - ATH_MSG_DEBUG(" reconstructAll called "); + ATH_MSG_DEBUG(" reconstructPrimary called"); - // Reconstruct each waveform - for( const auto& wave : waveContainer) { + xAOD::WaveformHit* newhit = new xAOD::WaveformHit(); + hitContainer->push_back(newhit); - ATH_MSG_DEBUG("Reconstruct waveform for channel " << wave->channel()); + // Set digitizer channel and identifier + newhit->set_channel(wave.channel()); + newhit->set_id(wave.identify32().get_compact()); + + // Make sure we have ADC counts + if (wave.adc_counts().size() == 0) { + ATH_MSG_WARNING( "Found waveform for channel " << wave.channel() + << " with size " << wave.adc_counts().size() << "!"); + + newhit->set_status_bit(xAOD::WaveformStatus::WAVEFORM_MISSING); + return StatusCode::SUCCESS; + } - // Reconstruct the hits, may be more than one, so pass container - CHECK( this->reconstruct(*wave, clock, hitContainer) ); + if (wave.adc_counts().size() != wave.n_samples()) { + ATH_MSG_WARNING( "Found waveform for channel " << wave.channel() + << " with size " << wave.adc_counts().size() + << " not equal to number of samples " << wave.n_samples()); + + newhit->set_status_bit(xAOD::WaveformStatus::WAVEFORM_INVALID); + return StatusCode::SUCCESS; } - if (m_ensureChannelHits) { - ATH_MSG_DEBUG("Ensure all channels have hits at peak time"); - ensureHits(waveContainer, clock, hitContainer); + // Find the baseline for this waveform + findBaseline(wave, newhit); + + // Check for problems + if (newhit->status_bit(xAOD::WaveformStatus::BASELINE_FAILED)) + return StatusCode::SUCCESS; + + // Get the nominal trigger time (in ns) from config + float trigger_time = m_timingTool->nominalTriggerTime(); + + // Set range for windowed data in digitizer samples + float offset = m_timingTool->triggerTimeOffset(wave.channel()); + + int lo_edge = int((trigger_time+offset)/2.) + m_windowStart; + int hi_edge = int((trigger_time+offset)/2.) + m_windowStart + m_windowWidth; + + // Fill raw hit values + fillRawHitValues(wave, lo_edge, hi_edge, newhit); + + // Check if this is over threshold + if (newhit->peak() < newhit->baseline_rms() * m_primaryPeakThreshold) { + ATH_MSG_DEBUG("Primary hit failed threshold"); + newhit->set_status_bit(xAOD::WaveformStatus::THRESHOLD_FAILED); + } else { + // Reconstruct hit in this range + reconstructHit(newhit); } return StatusCode::SUCCESS; } // -// Make sure we have a hit for each channel at the time when -// there is a significant pulse found in the detector +// Form primary hits using trigger time // -void -WaveformReconstructionTool::ensureHits( - const RawWaveformContainer& waveContainer, - const xAOD::WaveformClock* clock, +StatusCode +WaveformReconstructionTool::reconstructSecondary( + const RawWaveform& wave, xAOD::WaveformHitContainer* hitContainer) const { - ATH_MSG_DEBUG(" ensureHits called "); + ATH_MSG_DEBUG(" reconstructSecondary called"); - // Find peak time (most significant hit) - xAOD::WaveformHit* peakHit = NULL; + // Find existing hit for this channel to get baseline + xAOD::WaveformHit* primaryHit = NULL; for( const auto& hit : *hitContainer) { - if (peakHit == NULL) { - peakHit = hit; - } else { - if ( hit->peak() > peakHit->peak() ) peakHit = hit; + // Use id rather than channel to make sure this works on MC + if (hit->identify() == wave.identify()) { + ATH_MSG_DEBUG("Found primary hit in channel "<< hit->channel() + << " with id 0x" << std::hex << hit->identify() << std::dec ); + primaryHit = hit; + break; } + } + // Did we find the primary hit for this channel? + if (!primaryHit) { + ATH_MSG_ERROR("found no primary hit for channel " << wave.channel() << "!"); + return StatusCode::FAILURE; } - // Didn't find anything? - if (peakHit == NULL) return; - if (peakHit->status_bit(xAOD::WaveformStatus::THRESHOLD_FAILED)) return; + if (primaryHit->status_bit(xAOD::WaveformStatus::WAVEFORM_MISSING)) { + ATH_MSG_DEBUG("Found primary hit with waveform missing"); + return StatusCode::SUCCESS; + } - ATH_MSG_DEBUG("Found peak hit in channel " << peakHit->channel() << " at time " << peakHit->localtime()); + if (primaryHit->status_bit(xAOD::WaveformStatus::WAVEFORM_INVALID)) { + ATH_MSG_DEBUG("Found primary hit with waveform invalid"); + return StatusCode::SUCCESS; + } - // Now go through all of the channels and check if there is a hit - // close in time to the peakHit - for( const auto& wave : waveContainer) { + WaveformBaselineData baseline; - // Don't worry about the peak channel, we know this has a hit... - if (wave->channel() == peakHit->channel()) continue; + baseline.mean = primaryHit->baseline_mean(); + baseline.rms = primaryHit->baseline_rms(); + + // Find the secondary peak position + int ipeak; - ATH_MSG_DEBUG("Checking for hit in channel " << wave->channel()); + // Is there already a peak in the primary? + if (primaryHit->threshold()) { - bool found = false; - // Look for a baseline-only hit that we can update - xAOD::WaveformHit* baselineHit = NULL; + ATH_MSG_DEBUG("Looking for secondary hit with primary hit above threshold"); - // There aren't so many hits, just loop over container - for( const auto& hit : *hitContainer) { - if (hit->channel() != wave->channel()) continue; + // Look before and after window + int lo_edge = int(primaryHit->time_vector().front()/2.); + int hi_edge = int(primaryHit->time_vector().back()/2.); - // Is this above threshold? - if (hit->status_bit(xAOD::WaveformStatus::THRESHOLD_FAILED)) { - baselineHit = hit; - continue; - } + std::vector<float> wwave_lo(lo_edge); + std::vector<float> wwave_hi(wave.adc_counts().size() - hi_edge - 1); - // OK, this is the right channel, check the time - float dtime = abs(hit->localtime() - peakHit->localtime()); - if (dtime > m_hitTimeDifference) continue; + int ipeak_lo = -1.; + int ipeak_hi = -1.; - // We have found a hit in the right channel at the right time - found = true; - ATH_MSG_DEBUG("Found hit in channel " << hit->channel() - << " at time " << hit->localtime()); - break; - } + // Look before + if (m_findSecondaryBefore) { + for (int i=0; i<lo_edge; i++) { + wwave_lo[i] = baseline.mean - wave.mv_per_bit() * wave.adc_counts()[i]; + } - // Is there a hit? If so, go to next waveform/channel - if (found) continue; + ipeak_lo = findPeak(baseline, m_secondaryPeakThreshold, wwave_lo); - ATH_MSG_DEBUG("No hit found for channel " << wave->channel() - << " at time " << peakHit->localtime()); + if (ipeak_lo < 0) { + ATH_MSG_DEBUG("No hit found before " << lo_edge); + } else { + ATH_MSG_DEBUG("Hit found at " << ipeak_lo << " before " << lo_edge); + } + } - // Do we have a baseline-only hit we can use? - xAOD::WaveformHit* newhit = NULL; - if (baselineHit == NULL) { - // No, make a new hit here - newhit = new xAOD::WaveformHit(); - hitContainer->push_back(newhit); + // Look after + if (m_findSecondaryAfter) { + for (unsigned int i=(hi_edge+1); i<wave.adc_counts().size(); i++) { + wwave_hi[(i-(hi_edge+1))] = baseline.mean - wave.mv_per_bit() * wave.adc_counts()[i]; + } - // Mark this as a secondary hit - newhit->set_status_bit(xAOD::WaveformStatus::THRESHOLD_FAILED); - newhit->set_status_bit(xAOD::WaveformStatus::SECONDARY); + ipeak_hi = findPeak(baseline, m_secondaryPeakThreshold, wwave_hi); - // Set digitizer channel and identifier - newhit->set_channel(wave->channel()); - newhit->set_id(wave->identify32().get_compact()); + // Is this too close to the primary hit? + if (ipeak_hi < 5) { + ATH_MSG_DEBUG("Found hit after at " << (ipeak_hi + hi_edge + 1)<< " but too close to edge"); + ipeak_hi = -1; + } - // Make sure we have ADC counts - if (wave->adc_counts().size() == 0) { - ATH_MSG_WARNING( "Found waveform for channel " << wave->channel() - << " with size " << wave->adc_counts().size() << "!"); - - newhit->set_status_bit(xAOD::WaveformStatus::WAVEFORM_MISSING); - continue; - } - - if (wave->adc_counts().size() != wave->n_samples()) { - ATH_MSG_WARNING( "Found waveform for channel " << wave->channel() - << " with size " << wave->adc_counts().size() - << " not equal to number of samples " << wave->n_samples()); - - newhit->set_status_bit(xAOD::WaveformStatus::WAVEFORM_INVALID); - continue; + if (ipeak_hi < 0) { + ATH_MSG_DEBUG("No hit found after " << hi_edge); + } else { + ATH_MSG_DEBUG("Hit found at " << ipeak_hi << " after " << hi_edge); } + } - findBaseline(*wave, newhit); + // Nothing found + if (ipeak_lo < 0 && ipeak_hi < 0) + return StatusCode::SUCCESS; + + // Both? + if (ipeak_lo >= 0 && ipeak_hi >= 0) { + + // Pick the largest signal + if (wwave_lo[ipeak_lo] >= wwave_hi[ipeak_hi]) { + ipeak = ipeak_lo; + ATH_MSG_DEBUG("Picked before as " << wwave_lo[ipeak_lo] + << " > " << wwave_hi[ipeak_hi]); + } else { + ipeak = ipeak_hi + hi_edge + 1; + ATH_MSG_DEBUG("Picked after as " << wwave_lo[ipeak_lo] + << " < " << wwave_hi[ipeak_hi]); + } + + } else if (ipeak_lo > 0) { + ipeak = ipeak_lo; + ATH_MSG_DEBUG("Peak before with " << wwave_lo[ipeak_lo]); } else { - // Use the existing baseline hit - newhit = baselineHit; + ATH_MSG_DEBUG("Peak after with " << wwave_hi[ipeak_hi]); + ipeak = ipeak_hi+hi_edge+1; } - // Check for problems - if (newhit->status_bit(xAOD::WaveformStatus::BASELINE_FAILED)) continue; - - // Set range for windowed data - unsigned int lo_edge = peakHit->time_vector().front()/2.; - unsigned int hi_edge = peakHit->time_vector().back()/2.; - - ATH_MSG_DEBUG("Windowing waveform from " << lo_edge << " to " << hi_edge); - std::vector<float> wtime(hi_edge-lo_edge+1); - std::vector<float> wwave(hi_edge-lo_edge+1); - for (unsigned int i=lo_edge; i<=hi_edge; i++) { - unsigned int j = i-lo_edge; - wtime[j] = 2.*i; - wwave[j] = newhit->baseline_mean() - wave->adc_counts()[i]; - //ATH_MSG_DEBUG(" Time: " << wtime[j] << " Wave: " << wwave[j]); + } else { + + ATH_MSG_DEBUG("Looking for secondary hit without primary hit above threshold"); + std::vector<float> wwave(wave.adc_counts().size()); + for (unsigned int i=0; i<wave.adc_counts().size(); i++) { + wwave[i] = baseline.mean - wave.mv_per_bit() * wave.adc_counts()[i]; } - newhit->set_time_vector(wtime); - newhit->set_wave_vector(wwave); + ipeak = findPeak(baseline, m_secondaryPeakThreshold, wwave); - // - // Find some raw values - WaveformFitResult raw = findRawHitValues(wtime, wwave); - newhit->set_peak(raw.peak); - newhit->set_mean(raw.mean); - newhit->set_width(raw.sigma); - newhit->set_integral(raw.integral); - newhit->set_localtime(raw.mean); - newhit->set_raw_peak(raw.peak); - newhit->set_raw_integral(raw.integral); + // Nothing found + if (ipeak < 0) + return StatusCode::SUCCESS; + + ATH_MSG_DEBUG("Found secondary peak with no primary " << wwave[ipeak]); + } + + // We seem to have a secondary hit + xAOD::WaveformHit* newhit = new xAOD::WaveformHit(); + hitContainer->push_back(newhit); + + // Fill values + newhit->set_channel(wave.channel()); + newhit->set_id(wave.identify32().get_compact()); + newhit->set_status_bit(xAOD::WaveformStatus::SECONDARY); + newhit->set_baseline_mean(baseline.mean); + newhit->set_baseline_rms(baseline.rms); + + // Set range for windowed data in digitizer samples + int lo_edge = ipeak + m_windowStart; + int hi_edge = ipeak + m_windowStart + m_windowWidth; + + // Fill raw hit values + fillRawHitValues(wave, lo_edge, hi_edge, newhit); + + // Must be over threshold, so reconstruct here + reconstructHit(newhit); + + return StatusCode::SUCCESS; +} + +StatusCode +WaveformReconstructionTool::setLocalTime(const xAOD::WaveformClock* clock, + xAOD::WaveformHitContainer* container) const { + + ATH_MSG_DEBUG(" setLocalTime called "); + + // Check the container + if (!container) { + ATH_MSG_ERROR("WaveformHitCollection passed to setLocalTime() is null!"); + return StatusCode::FAILURE; + } + + bool clock_valid; + + // + // Find time from clock + if (!clock || (clock->frequency() <= 0.)) { + clock_valid = false; + } else { + clock_valid = true; + } + + float trigger_time = m_timingTool->nominalTriggerTime(); + float offset; + + // Should actually find the time of the trigger here + // and set bcid time offset from that + // Loop through hits and set local time + for( const auto& hit : *container) { // // Find time from clock - if (!clock || (clock->frequency() <= 0.)) { - newhit->set_status_bit(xAOD::WaveformStatus::CLOCK_INVALID); - newhit->set_bcid_time(-1.); + if (clock_valid) { + hit->set_bcid_time(clock->time_from_clock(hit->localtime())); } else { - newhit->set_bcid_time(clock->time_from_clock(newhit->localtime())); + hit->set_status_bit(xAOD::WaveformStatus::CLOCK_INVALID); + hit->set_bcid_time(-1.); } - } // End of loop over waveContainer + // Also set time with respect to nominal trigger + offset = m_timingTool->triggerTimeOffset(hit->channel()); + hit->set_trigger_time(hit->localtime() - (trigger_time + offset)); + } + + return StatusCode::SUCCESS; } // Find the baseline @@ -245,187 +348,131 @@ WaveformReconstructionTool::findBaseline(const RawWaveform& raw_wave, } else { // Save baseline to hit collection object - hit->set_baseline_mean(baseline.mean); - hit->set_baseline_rms(baseline.rms); + hit->set_baseline_mean(raw_wave.mv_per_bit()*baseline.mean); + hit->set_baseline_rms(raw_wave.mv_per_bit()*baseline.rms); + ATH_MSG_DEBUG("Baseline found with mean = " << hit->baseline_mean() + << " mV and rms = " << hit->baseline_rms() + << " mV"); } return baseline; } -StatusCode -WaveformReconstructionTool::reconstruct(const RawWaveform& raw_wave, - const xAOD::WaveformClock* clock, - xAOD::WaveformHitContainer* container) const { - - ATH_MSG_DEBUG(" reconstruct called "); - - // Check the container - if (!container) { - ATH_MSG_ERROR("WaveformHitCollection passed to reconstruct() is null!"); - return StatusCode::FAILURE; - } - - // - // We always want to create at least one hit, so create it here - xAOD::WaveformHit* hit = new xAOD::WaveformHit(); - container->push_back(hit); - - // Set digitizer channel and identifier - hit->set_channel(raw_wave.channel()); - hit->set_id(raw_wave.identify32().get_compact()); +// Fill the raw hit parameters +void +WaveformReconstructionTool::fillRawHitValues(const RawWaveform& wave, + int lo_edge, int hi_edge, + xAOD::WaveformHit* hit) const { - // Make sure we have ADC counts - if (raw_wave.adc_counts().size() == 0) { - ATH_MSG_WARNING( "Found waveform for channel " << raw_wave.channel() - << " with size " << raw_wave.adc_counts().size() << "!"); + // First, make sure we don't overflow the waveform range + if (lo_edge < 0) lo_edge = 0; + if (hi_edge >= int(wave.size())) hi_edge = wave.size() - 1; - hit->set_status_bit(xAOD::WaveformStatus::WAVEFORM_MISSING); - return StatusCode::SUCCESS; - } + ATH_MSG_DEBUG("Fill channel " << wave.channel() + << " waveform from sample " << lo_edge << " to " << hi_edge); - if (raw_wave.adc_counts().size() != raw_wave.n_samples()) { - ATH_MSG_WARNING( "Found waveform for channel " << raw_wave.channel() - << " with size " << raw_wave.adc_counts().size() - << " not equal to number of samples " << raw_wave.n_samples()); + // Fill hit window with data from wave + std::vector<float> wtime(hi_edge-lo_edge+1); + std::vector<float> wwave(hi_edge-lo_edge+1); - hit->set_status_bit(xAOD::WaveformStatus::WAVEFORM_INVALID); - return StatusCode::SUCCESS; + for (int i=lo_edge; i<=hi_edge; i++) { + unsigned int j = i-lo_edge; + wtime[j] = 2.*i; // 2ns per sample at 500 MHz + wwave[j] = hit->baseline_mean() - wave.mv_per_bit() * wave.adc_counts()[i]; } - // Find the baseline - WaveformBaselineData baseline = findBaseline(raw_wave, hit); - - // Check that we have data to work with - // If any status bits are set, this is bad - if (hit->status()) return StatusCode::SUCCESS; - - // - // Create baseline-subtracted data array for both time and signal - // Time in ns from start of readout - unsigned int size = raw_wave.adc_counts().size(); - std::vector<float> time(size); - for (unsigned int i=0; i<size; i++) - time[i] = 2.*i; - - // Baseline subtracted (and inverted) ADC waveform values - std::vector<float> wave(raw_wave.adc_counts().begin(), raw_wave.adc_counts().end()); - for (auto& element : wave) - element = baseline.mean - element; - - bool first = true; - - // Now we iteratively find peaks and fit - while(true) { - - // - // Find peak in array and return time and value arrays - // This range of data is also *removed* from original arrays - std::vector<float> wtime; - std::vector<float> wwave; - - // All done if we don't have any peaks above threshold - // If we do find a significant peak, fill the window - if (! findPeak(baseline, time, wave, wtime, wwave) ) { - if (first) hit->set_status_bit(xAOD::WaveformStatus::THRESHOLD_FAILED); - break; - } - - // - // Create new hit to fill - if (!first) { - hit = new xAOD::WaveformHit(); - container->push_back(hit); - hit->set_status_bit(xAOD::WaveformStatus::SECONDARY); - } - first = false; + hit->set_time_vector(wtime); + hit->set_wave_vector(wwave); - // - // Save windowed waveform to Hit object - hit->set_channel(raw_wave.channel()); - hit->set_baseline_mean(baseline.mean); - hit->set_baseline_rms(baseline.rms); - hit->set_time_vector(wtime); - hit->set_wave_vector(wwave); + // Set raw values + WaveformFitResult raw = findRawHitValues(wtime, wwave); + hit->set_peak(raw.peak); + hit->set_mean(raw.mean); + hit->set_width(raw.sigma); + hit->set_integral(raw.integral); + hit->set_localtime(raw.mean); + hit->set_raw_peak(raw.peak); + hit->set_raw_integral(raw.integral); - // - // Find some raw values - WaveformFitResult raw = findRawHitValues(wtime, wwave); - hit->set_peak(raw.peak); - hit->set_mean(raw.mean); - hit->set_width(raw.sigma); - hit->set_integral(raw.integral); - hit->set_localtime(raw.mean); - hit->set_raw_peak(raw.peak); - hit->set_raw_integral(raw.integral); +} - // - // Perform Gaussian fit to waveform - WaveformFitResult gfit = fitGaussian(raw, wtime, wwave); - if (! gfit.valid) { - // Lets try again with a more restricted width - ATH_MSG_WARNING( " Gaussian waveform fit failed with width " << raw.sigma << " try reducing width to 1 " ); - raw.sigma = 1.; - gfit = fitGaussian(raw, wtime, wwave); - if (!gfit.valid) { - hit->set_status_bit(xAOD::WaveformStatus::GFIT_FAILED); - } - } +// Reconstruct a hit from the RawWaveform in the range specified +// Range is in units digitizer samples (not ns) +void +WaveformReconstructionTool::reconstructHit(xAOD::WaveformHit* hit) const { - // Fit results (or raw if it failed) - hit->set_peak(gfit.peak); - hit->set_mean(gfit.mean); - hit->set_width(gfit.sigma); - hit->set_integral(gfit.integral); - hit->set_localtime(gfit.time); + // Time and waveform vectors + // Don't use reference as we may modify this below + std::vector<float> wtime = hit->time_vector(); + std::vector<float> wwave = hit->wave_vector(); - // - // Check for overflow - if (m_removeOverflow && findOverflow(baseline, wtime, wwave)) { - ATH_MSG_INFO("Found waveform overflow"); - hit->set_status_bit(xAOD::WaveformStatus::WAVE_OVERFLOW); - } + ATH_MSG_DEBUG("Reconstruct channel " << hit->channel() + << " waveform from " << wtime.front() + << " to " << wtime.back()); - // - // Perform CB fit - WaveformFitResult cbfit = fitCBall(gfit, wtime, wwave); - if (! cbfit.valid) { - ATH_MSG_WARNING("CrystalBall fit failed!"); - // Still have gaussian parameters as an estimate - hit->set_status_bit(xAOD::WaveformStatus::CBFIT_FAILED); - } else { - hit->set_peak(cbfit.peak); - hit->set_mean(cbfit.mean); - hit->set_width(cbfit.sigma); - hit->set_integral(cbfit.integral); - hit->set_localtime(cbfit.time); - - hit->set_alpha(cbfit.alpha); - hit->set_nval(cbfit.nval); - } + // Fill values needed for fit (peak, mean, and sigma) + WaveformFitResult raw; + raw.peak = hit->peak(); + raw.mean = hit->mean(); + raw.sigma = hit->width(); - // - // Find time from clock - if (!clock || (clock->frequency() <= 0.)) { - hit->set_status_bit(xAOD::WaveformStatus::CLOCK_INVALID); - hit->set_bcid_time(-1.); - } else { - hit->set_bcid_time(clock->time_from_clock(hit->localtime())); + // + // Perform Gaussian fit to waveform + WaveformFitResult gfit = fitGaussian(raw, wtime, wwave); + if (! gfit.valid) { + // Lets try again with a more restricted width + ATH_MSG_WARNING( " Gaussian waveform fit failed with width " << raw.sigma << " try reducing width to 1 " ); + raw.sigma = 1.; + gfit = fitGaussian(raw, wtime, wwave); + if (!gfit.valid) { + hit->set_status_bit(xAOD::WaveformStatus::GFIT_FAILED); } + } - if (! m_findMultipleHits) break; - - } // End of loop over waveform data + // Fit results (or raw if it failed) + hit->set_peak(gfit.peak); + hit->set_mean(gfit.mean); + hit->set_width(gfit.sigma); + hit->set_integral(gfit.integral); + hit->set_localtime(gfit.time); + + // + // Check for overflow + if (m_removeOverflow && findOverflow(hit->baseline_mean(), wtime, wwave)) { + ATH_MSG_INFO("Found waveform overflow"); + hit->set_status_bit(xAOD::WaveformStatus::WAVE_OVERFLOW); + } + + // + // Perform CB fit + WaveformFitResult cbfit = fitCBall(gfit, wtime, wwave); + if (! cbfit.valid) { + ATH_MSG_WARNING("CrystalBall fit failed for channel " << hit->channel() << "!"); + // Still have gaussian parameters as an estimate + hit->set_status_bit(xAOD::WaveformStatus::CBFIT_FAILED); + } else { + hit->set_peak(cbfit.peak); + hit->set_mean(cbfit.mean); + hit->set_width(cbfit.sigma); + hit->set_integral(cbfit.integral); + hit->set_localtime(cbfit.time); + + hit->set_alpha(cbfit.alpha); + hit->set_nval(cbfit.nval); + } - ATH_MSG_DEBUG( "WaveformReconstructionTool finished for channel " - << raw_wave.channel() << " container size= " << container->size()); + ATH_MSG_DEBUG("Done reconstructing channel " << hit->channel() + << " waveform from " << wtime.front() << " to " << wtime.back()); - return StatusCode::SUCCESS; } -bool +// Returns location of peak in array wave +// Return value is -1 if peak is below threshold +int WaveformReconstructionTool::findPeak(WaveformBaselineData& baseline, - std::vector<float>& time, std::vector<float>& wave, - std::vector<float>& windowed_time, std::vector<float>& windowed_wave) const { + float threshold, + std::vector<float>& wave) const +{ ATH_MSG_DEBUG("findPeak called"); @@ -435,44 +482,31 @@ WaveformReconstructionTool::findPeak(WaveformBaselineData& baseline, ATH_MSG_DEBUG( "Found peak value " << maxval << " at position " << imax ); // Check if this is over threshold (in sigma) - if (maxval < m_peakThreshold*baseline.rms) { + if (maxval < threshold*baseline.rms) { ATH_MSG_DEBUG("Failed threshold"); - return false; + return -1; } - // Make a window around this peak, values are in bins, so units of 2ns - // Ensure our window is within the vector range - int lo_edge = ((int(imax) + m_windowStart) >= 0 ? (imax + m_windowStart) : 0); - int hi_edge = ((imax + m_windowStart + m_windowWidth) < wave.size() ? (imax + m_windowStart + m_windowWidth) : wave.size()); - - ATH_MSG_DEBUG("Windowing waveform from " << lo_edge << " to " << hi_edge); - windowed_time = std::vector<float> (time.begin()+lo_edge, time.begin()+hi_edge); - windowed_wave = std::vector<float> (wave.begin()+lo_edge, wave.begin()+hi_edge); - - // Remove these values from the original arrays so we can iterate - time.erase(time.begin()+lo_edge, time.begin()+hi_edge); - wave.erase(wave.begin()+lo_edge, wave.begin()+hi_edge); - - return true; + return imax; } bool -WaveformReconstructionTool::findOverflow(const WaveformBaselineData& base, +WaveformReconstructionTool::findOverflow(float baseline, std::vector<float>& time, std::vector<float>& wave) const { auto peakloc = std::max_element(wave.begin(), wave.end()); // If peak value is less than baseline, we have no overflow - if (*peakloc < int(base.mean)) return false; + if (*peakloc < baseline) return false; ATH_MSG_DEBUG("Removing overflows from waveform with length " << wave.size()); // We have an overflow, remove all elements that are overflowing unsigned int i = peakloc - wave.begin(); for (; i<wave.size(); i++) { - if (wave[i] < int(base.mean)) continue; + if (wave[i] < baseline) continue; - ATH_MSG_DEBUG("Removing position "<< i<< " with value " << wave[i] << " > " << int(base.mean)); + ATH_MSG_DEBUG("Removing position "<< i<< " with value " << wave[i] << " > " << baseline); // This is an overflow, remove elements time.erase(time.begin() + i); wave.erase(wave.begin() + i); @@ -646,6 +680,7 @@ WaveformReconstructionTool::findRawHitValues(const std::vector<float> time, cons double sum = 0.; double sum2 = 0.; for (unsigned int i=0; i<time.size(); i++) { + //ATH_MSG_DEBUG("findRawHitValues Time: " << time[i] << " Wave: " << wave[i]); tot += wave[i]; sum += time[i] * wave[i]; sum2 += time[i] * time[i] * wave[i]; @@ -770,7 +805,7 @@ WaveformReconstructionTool::fitCBall(const WaveformFitResult& gfit, TFitResultPtr cbfitptr = tg.Fit(&cbfunc, "QNS", ""); if (!cbfitptr->IsValid()) { - ATH_MSG_WARNING( " First Crystal Ball waveform fit failed! "); + ATH_MSG_DEBUG( " First Crystal Ball waveform fit failed! "); } // Now try releasing the tail parameter @@ -785,7 +820,7 @@ WaveformReconstructionTool::fitCBall(const WaveformFitResult& gfit, cbfit.valid = (cbfit.fit_status == 0); if (!cbfitptr->IsValid()) { - ATH_MSG_WARNING( " Crystal Ball waveform fit failed! "); + ATH_MSG_DEBUG( " Full Crystal Ball waveform fit failed! "); } else { // Improve estimation with fit results cbfit.peak = cbfitptr->Parameter(0); diff --git a/Waveform/WaveRecTools/src/WaveformReconstructionTool.h b/Waveform/WaveRecTools/src/WaveformReconstructionTool.h index 7a39f883434051173099d3702e9643fb64cc9f51..b23b60e9491ff2989601c436f4723ca7fa76dfc2 100644 --- a/Waveform/WaveRecTools/src/WaveformReconstructionTool.h +++ b/Waveform/WaveRecTools/src/WaveformReconstructionTool.h @@ -20,6 +20,9 @@ #include "WaveformBaselineData.h" #include "WaveformFitResult.h" +// Tool classes +#include "WaveformConditionsTools/IWaveformTimingTool.h" + //Gaudi #include "GaudiKernel/ToolHandle.h" @@ -37,16 +40,18 @@ class WaveformReconstructionTool: public extends<AthAlgTool, IWaveformReconstruc /// Retrieve the necessary services in initialize StatusCode initialize(); - /// Reconstruct all hits from waveform container - virtual StatusCode reconstructAll(const RawWaveformContainer& waveContainer, - const xAOD::WaveformClock* clock, - xAOD::WaveformHitContainer* hitContainer) const; + /// Reconstruct primary hits from waveform (in trigger window) + virtual StatusCode reconstructPrimary(const RawWaveform& wave, + xAOD::WaveformHitContainer* hitContainer) const; + + /// Reconstruct primary hits from waveform (in trigger window) + virtual StatusCode reconstructSecondary(const RawWaveform& wave, + xAOD::WaveformHitContainer* hitContainer) const; + + /// Set local hit times from LHC clock + virtual StatusCode setLocalTime(const xAOD::WaveformClock* clock, + xAOD::WaveformHitContainer* container) const; - /// Reconstruct hits from waveform - - virtual StatusCode reconstruct(const RawWaveform& wave, - const xAOD::WaveformClock* clock, - xAOD::WaveformHitContainer* hitContainer) const; private: @@ -54,6 +59,9 @@ class WaveformReconstructionTool: public extends<AthAlgTool, IWaveformReconstruc // Baseline Estimation Parameters BooleanProperty m_useSimpleBaseline{this, "UseSimpleBaseline", false}; + ToolHandle<IWaveformTimingTool> m_timingTool + {this, "WaveformTimingTool", "WaveformTimingTool"}; + // Minimum number of samples needed to calculate simple baseline // Just average these first n values IntegerProperty m_samplesForBaselineAverage{this, "SamplesForBaselineAverage", 40}; @@ -79,32 +87,40 @@ class WaveformReconstructionTool: public extends<AthAlgTool, IWaveformReconstruc FloatProperty m_baselineFitWindow{this, "BaselineFitWindow", 2.}; // - // Peak threshold (in sigma of baseline RMS) to find a hit - FloatProperty m_peakThreshold{this, "PeakThreshold", 10.}; + // Peak threshold (in sigma of baseline RMS) + // Primary threshold is requirement to try a fit for the in-time window + // Secondary threshold is requirement to produce a secondary hit + // from a local maximum + FloatProperty m_primaryPeakThreshold{this, "PrimaryPeakThreshold", 5.}; + FloatProperty m_secondaryPeakThreshold{this, "SecondaryPeakThreshold", 10.}; // // Window to define fitting range, in samples (2ns/sample) - IntegerProperty m_windowStart{this, "FitWindowStart", -15}; + IntegerProperty m_windowStart{this, "FitWindowStart", -20}; IntegerProperty m_windowWidth{this, "FitWindowWidth", 60}; // // Remove overflow values from CB fit BooleanProperty m_removeOverflow{this, "RemoveOverflow", true}; - // - // Look for more than one hit in each channel - BooleanProperty m_findMultipleHits{this, "FindMultipleHits", false}; - // // Fraction of peak to set local hit time - FloatProperty m_timingPeakFraction{this, "TimingPeakFraction", 0.45}; + FloatProperty m_timingPeakFraction{this, "TimingPeakFraction", 0.5}; // - // Ensure each channel has a waveform hit at time of most significant - // hit in the event - BooleanProperty m_ensureChannelHits{this, "EnsureChannelHits", true}; - // Max Time difference in ns to say a hit exists in a different channel - FloatProperty m_hitTimeDifference{this, "HitTimeDifference", 10.}; + // When looking for secondary hits with a primary found above threshold + // should we look before or after the primary hit? + BooleanProperty m_findSecondaryBefore{this, "FindSecondaryBefore", true}; + BooleanProperty m_findSecondaryAfter{this, "FindSecondaryAfter", false}; + + // Reco algorithms + // Fill hit with raw data from waveform + void fillRawHitValues(const RawWaveform& wave, + int lo_edge, int hi_edge, + xAOD::WaveformHit* hit) const; + + // Perform fits to WaveformHit data + void reconstructHit(xAOD::WaveformHit* hit) const; // Baseline algorithms WaveformBaselineData& findSimpleBaseline(const RawWaveform& wave) const; @@ -113,12 +129,10 @@ class WaveformReconstructionTool: public extends<AthAlgTool, IWaveformReconstruc xAOD::WaveformHit* hit) const; - // Find peak in wave, return windowed region in windowed_time and windowed_wave - // Windowed region is removed from original vectors - // Returns true if peak found, false if not - bool findPeak(WaveformBaselineData& baseline, - std::vector<float>& time, std::vector<float>& wave, - std::vector<float>& windowed_time, std::vector<float>& windowed_wave) const; + // Find peak in wave, return index to peak position, or -1 if + // peak isn't greater than threshold + int findPeak(WaveformBaselineData& baseline, float threshold, + std::vector<float>& wave) const; // Get estimate from waveform data itself WaveformFitResult& findRawHitValues(const std::vector<float> time, @@ -130,7 +144,7 @@ class WaveformReconstructionTool: public extends<AthAlgTool, IWaveformReconstruc const std::vector<float> wave) const; // Find overflows and remove points from arrays - bool findOverflow(const WaveformBaselineData& baseline, + bool findOverflow(float baseline, std::vector<float>& time, std::vector<float>& wave) const; // Fit windowed data to CrystalBall function @@ -138,12 +152,6 @@ class WaveformReconstructionTool: public extends<AthAlgTool, IWaveformReconstruc const std::vector<float> time, const std::vector<float> wave) const; - - /// Create hit in all channels at time of peak signal - void ensureHits(const RawWaveformContainer& waveContainer, - const xAOD::WaveformClock* clock, - xAOD::WaveformHitContainer* hitContainer) const; - }; #endif // WAVERECTOOLS_WAVEFORMRECONSTRUCTIONTOOL_H diff --git a/Waveform/WaveformConditions/WaveCondUtils/CMakeLists.txt b/Waveform/WaveformConditions/WaveCondUtils/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..1c295bd9cab84b213f7ec0227a3cb4f95486edc8 --- /dev/null +++ b/Waveform/WaveformConditions/WaveCondUtils/CMakeLists.txt @@ -0,0 +1,9 @@ +################################################################################ +# Package: WaveCondUtils +################################################################################ + +# Declare the package name: +atlas_subdir( WaveCondUtils ) + +atlas_install_scripts( scripts/*.sh scripts/*.py ) + diff --git a/Waveform/WaveformConditions/WaveCondUtils/scripts/makeTimingDB.py b/Waveform/WaveformConditions/WaveCondUtils/scripts/makeTimingDB.py new file mode 100755 index 0000000000000000000000000000000000000000..b3a1c64a23ad23ae70eaa000e3cbd76064bebfcd --- /dev/null +++ b/Waveform/WaveformConditions/WaveCondUtils/scripts/makeTimingDB.py @@ -0,0 +1,221 @@ +#!/bin/env python + +# Requires python 3.8 or higher +# +# Can test results with +# AtlCoolConsole.py "sqlite://;schema=waveform_reco.db;dbname=OFLP200" + +filename = 'waveform_reco.db' + +# Nominal trigger time in ns +nominal_data = { + 0: 820., + 4272: 830., + 6525: 820. +} + +offset_channels = 16 + +# Run +# 0 - initial data +# 3395 - Testbeam +# + +ehn1_offsets = [ -20., -20., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ] +ti12_offsets = [ -20., -20., -20., -20., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ] + +offset_data = { + 0: [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], +# Initial TI12 + 1324: [ -10., -10., -10., -10., 0., 0., 0., 0., 0., 0., 0., 0., 18., 18., 0., 0. ], +# Testbeam geometry + 3247: [ -10., -10., -10., -10., -10., -10., 15., 15., -20., -20., 0., 0., 0., 0., 0., 0. ], +# TI12 + 4272: ti12_offsets, +# EHN1 (interleaved with TI12 running) + 4360: ehn1_offsets, + 4399: ti12_offsets, + 4409: ehn1_offsets, + 4411: ti12_offsets, + 4429: ehn1_offsets, + 4439: ti12_offsets, + 4876: ehn1_offsets, + 4892: ti12_offsets, + 4904: ehn1_offsets, + 4912: ti12_offsets, + 4954: ehn1_offsets, + 4989: ti12_offsets, + 4991: ehn1_offsets, + 4993: ti12_offsets, + 4996: ehn1_offsets, + 4997: ti12_offsets, + 5042: ehn1_offsets, + 5050: ti12_offsets, +# IFT and VetoNu installed + 6525: [ -10., -10., -10., -10., -25., -25., 0., 0., 0., 0., 0., 0., 18., 18., 0., 0. ] +} + +attr_list_desc = '<timeStamp>run-lumi</timeStamp><addrHeader><address_header service_type="71" clid="40774348" /></addrHeader><typeName>AthenaAttributeList</typeName>' + +cond_attr_list_desc = '<timeStamp>run-lumi</timeStamp><addrHeader><address_header clid="1238547719" service_type="71" /></addrHeader><typeName>CondAttrListCollection</typeName>' + +maxInt32 = 0xFFFFFFFF + + +# Look for data entry errors + +print('Validating nominal data') + +lastRun = -1 +for run, data in nominal_data.items(): + assert isinstance(run, int), 'Run number is not integer' + assert isinstance(data, float), 'Time is not float' + assert run > lastRun, 'Run numbers out of order' + assert run <= maxInt32, 'Run number out of range' + lastRun = run + +print('Validating offset data') +lastRun = -1 +for run, data in offset_data.items(): + assert isinstance(run, int), 'Run number is not integer' + assert run > lastRun, 'Run numbers out of order' + assert run <= maxInt32, 'Run number out of range' + lastRun = run + assert len(data) == offset_channels, 'Offset data does not have '+str(offset_channels)+' entries' + for i in range(offset_channels): + assert isinstance(data[i], float), 'Offset time is not float' + +# Data looks OK + + +from PyCool import cool + +dbSvc = cool.DatabaseSvcFactory.databaseService() +connectString = f'sqlite://;schema={filename};dbname=CONDBR3' + +print('Creating database') + +dbSvc.dropDatabase( connectString ) +db = dbSvc.createDatabase( connectString ) + +# Nominal trigger times +nominalSpec = cool.RecordSpecification() +nominalSpec.extend( 'NominalTriggerTime', cool.StorageType.Float ) + +nominalFolderSpec = cool.FolderSpecification(cool.FolderVersioning.SINGLE_VERSION, nominalSpec) +nominalFolder = db.createFolder('/WAVE/DAQ/Timing', nominalFolderSpec, attr_list_desc, True) + +# There should be one record entered per IOV +lastValid = cool.ValidityKeyMax +for firstValidRun, time in reversed(nominal_data.items()): + firstValid = (firstValidRun << 32) + nominalRecord = cool.Record(nominalSpec) + nominalRecord[ 'NominalTriggerTime' ] = float(time) + nominalFolder.storeObject( firstValid, lastValid, nominalRecord, cool.ChannelId(0)) + lastValid = ((firstValidRun - 1) << 32) | (cool.ValidityKeyMax & 0x00000000FFFFFFFF) + + +# Trigger offset times + +offsetSpec = cool.RecordSpecification() +offsetSpec.extend( 'TriggerOffset', cool.StorageType.Float ) + +offsetFolderSpec = cool.FolderSpecification(cool.FolderVersioning.SINGLE_VERSION, offsetSpec) +offsetFolder = db.createFolder('/WAVE/DAQ/TimingOffset', offsetFolderSpec, cond_attr_list_desc, True) + +# There should be one record entered per IOV +lastValid = cool.ValidityKeyMax +for firstValidRun, offset_list in reversed(offset_data.items()): + firstValid = (firstValidRun << 32) + for channel in range(offset_channels): + offsetRecord = cool.Record(offsetSpec) + offsetRecord[ 'TriggerOffset' ] = float(offset_list[channel]) + offsetFolder.storeObject( firstValid, lastValid, offsetRecord, cool.ChannelId(channel) ) + + lastValid = ((firstValidRun - 1) << 32) | (cool.ValidityKeyMax & 0x00000000FFFFFFFF) + + +db.closeDatabase() + +print('Database completed') + +print('Working on MC database') + +# Nominal data +nominal_data = { + 0: 820. +} +# No offsets by default +offset_data = { + 0: [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.] +} + +# Validate again just in case +print('Validating nominal data') + +lastRun = -1 +for run, data in nominal_data.items(): + assert isinstance(run, int), 'Run number is not integer' + assert isinstance(data, float), 'Time is not float' + assert run > lastRun, 'Run numbers out of order' + assert run <= maxInt32, 'Run number out of range' + lastRun = run + +print('Validating offset data') +lastRun = -1 +for run, data in offset_data.items(): + assert isinstance(run, int), 'Run number is not integer' + assert run > lastRun, 'Run numbers out of order' + assert run <= maxInt32, 'Run number out of range' + lastRun = run + assert len(data) == offset_channels, 'Offset data does not have '+str(offset_channels)+' entries' + for i in range(offset_channels): + assert isinstance(data[i], float), 'Offset time is not float' + +# Data looks OK + +connectString = f'sqlite://;schema={filename};dbname=OFLP200' + +dbSvc.dropDatabase( connectString ) +db = dbSvc.createDatabase( connectString ) + +# Nominal trigger times +nominalSpec = cool.RecordSpecification() +nominalSpec.extend( 'NominalTriggerTime', cool.StorageType.Float ) + +nominalFolderSpec = cool.FolderSpecification(cool.FolderVersioning.SINGLE_VERSION, nominalSpec) +nominalFolder = db.createFolder('/WAVE/DAQ/Timing', nominalFolderSpec, attr_list_desc, True) + +# There should be one record entered per IOV +lastValid = cool.ValidityKeyMax +for firstValidRun, time in reversed(nominal_data.items()): + firstValid = (firstValidRun << 32) + nominalRecord = cool.Record(nominalSpec) + nominalRecord[ 'NominalTriggerTime' ] = float(time) + nominalFolder.storeObject( firstValid, lastValid, nominalRecord, cool.ChannelId(0)) + lastValid = ((firstValidRun - 1) << 32) | (cool.ValidityKeyMax & 0x00000000FFFFFFFF) + + +# Trigger offset times + +offsetSpec = cool.RecordSpecification() +offsetSpec.extend( 'TriggerOffset', cool.StorageType.Float ) + +offsetFolderSpec = cool.FolderSpecification(cool.FolderVersioning.SINGLE_VERSION, offsetSpec) +offsetFolder = db.createFolder('/WAVE/DAQ/TimingOffset', offsetFolderSpec, cond_attr_list_desc, True) + +# There should be one record entered per IOV +lastValid = cool.ValidityKeyMax +for firstValidRun, offset_list in reversed(offset_data.items()): + firstValid = (firstValidRun << 32) + for channel in range(offset_channels): + offsetRecord = cool.Record(offsetSpec) + offsetRecord[ 'TriggerOffset' ] = float(offset_list[channel]) + offsetFolder.storeObject( firstValid, lastValid, offsetRecord, cool.ChannelId(channel) ) + + lastValid = ((firstValidRun - 1) << 32) | (cool.ValidityKeyMax & 0x00000000FFFFFFFF) + + +db.closeDatabase() + +print('Database completed') diff --git a/Waveform/WaveformConditions/WaveCondUtils/scripts/wave_timing_check.py b/Waveform/WaveformConditions/WaveCondUtils/scripts/wave_timing_check.py new file mode 100755 index 0000000000000000000000000000000000000000..6c8f6332ff42e1ffc06fb168d290539c1db89208 --- /dev/null +++ b/Waveform/WaveformConditions/WaveCondUtils/scripts/wave_timing_check.py @@ -0,0 +1,337 @@ +#!/usr/bin/env python3 +# +import os +import sys +import math +import array +import itertools + +# Triggers: 0x01 - calo, 0x02 - veto, 0x03 - timing, 0x10 - random + +def usage(): + print("Usage: timing_check.py <filename>|<dirname> [triggermask]") + +if len(sys.argv) == 1: + usage() + sys.exit(-1) + +# Extract tracker station requirements +if len(sys.argv) == 3: + trigmask = int(sys.argv[2]) + extra = f"_{triggermask}" +else: + trigmask = 0xFF + extra = '' + +from pathlib import Path + +import ROOT +ROOT.xAOD.Init().ignore() +ROOT.xAOD.AuxContainerBase() +os.environ["XAOD_ACCESSTRACER_FRACTION"] = "0.0" + +# +# Open file or files +pathname = Path(sys.argv[1]) + +# Is this a directory? +if pathname.is_dir(): + print(f"Opening files in directory {pathname.name}") + + t2 = ROOT.TChain("CollectionTree") + nfiles = t2.Add(str(pathname)+'/Faser-Physics*.root') + + if (nfiles == 0): + print(f"TChain found no files!") + usage() + sys.exit(0) + + # Make transient tree + t1 = ROOT.xAOD.MakeTransientTree(t2) + + # Make output file name + outfile = pathname.name + "_timing"+extra+".pdf" + + print(f"TChain found {nfiles} files with {t2.GetEntries()} events") + + avperfile = t2.GetEntries() / nfiles + +# Is this a file? +elif pathname.is_file(): + print(f"Opening file {pathname.name}") + + t2 = ROOT.TChain("CollectionTree") + nfiles = t2.Add(str(pathname)) + + if (nfiles != 1): + print(f"TChain error opening file!") + usage() + sys.exit(0) + + print(f"Opened file with {t2.GetEntries()} events") + + avperfile = t2.GetEntries() + + # Make transient tree + t1 = ROOT.xAOD.MakeTransientTree(t2) + + # Make outfile name from input + outfile = pathname.stem + "_timing"+extra+".pdf" + +# Neither? +else: + print(f"Can't understand {pathname.name}") + usage() + sys.exit(-1) + +class ClockPlots: + + def __init__(self): + + # Ranges for plots + self.freq_bins = 80 + self.freq_lo = 40.0 + self.freq_hi = 40.2 + + self.th_bins = 100 + + def init(self, tree): + + self.h_freq = ROOT.TH1I("", "Clock Frequency", self.freq_bins, self.freq_lo, self.freq_hi) + self.h_freq.GetXaxis().SetTitle("Clock Frequency (MHz)") + self.h_freq.GetYaxis().SetTitle("Events") + #self.h_freq.Sumw2() + + self.h_phase = ROOT.TH1I("", "Clock Phase", 60, 2*(-3.1416), 2*3.1416) + self.h_phase.GetXaxis().SetTitle("Clock Phase") + self.h_phase.GetYaxis().SetTitle("Events") + + self.h_amp = ROOT.TH1I("", "Amplitude", 50, 0, 2000.) + self.h_amp.GetXaxis().SetTitle("Clock Amplitude (mV)") + self.h_amp.GetYaxis().SetTitle("Events") + + self.h_off = ROOT.TH1I("", "Offset", 50, 0, 2000.) + self.h_off.GetXaxis().SetTitle("Clock Offset (mV)") + self.h_off.GetYaxis().SetTitle("Events") + + def fill(self, tree): + + # First, create the histograms + self.init(tree) + + # Iterate over all entries + nev = tree.GetEntries() + iev = 0 + for ev in tree: + self.h_freq.Fill(ev.WaveformClock.frequency()) + self.h_phase.Fill(ev.WaveformClock.phase()) + self.h_amp.Fill(ev.WaveformClock.amplitude()) + self.h_off.Fill(ev.WaveformClock.dc_offset()) + + # Protect against reading off the end + iev += 1 + if iev == nev: break + + def draw(self, canvas, outfile): + + # Under/overflows, mean, rms, and entries + ROOT.gStyle.SetOptStat(111110) + + canvas.Clear() + canvas.Divide(2,2) + canvas.cd(1) + self.h_freq.Draw() + canvas.cd(2) + self.h_phase.Draw() + canvas.cd(3) + self.h_amp.Draw() + canvas.cd(4) + self.h_off.Draw() + canvas.Update() + canvas.Print(outfile) + + def print_stats(self): + + freq_mean = self.h_freq.GetMean() + freq_rms = self.h_freq.GetStdDev() + freq_n = self.h_freq.GetEntries() + print(f"LHC Clock: {freq_mean:.6} +/- {freq_rms/math.sqrt(freq_n):.6}") + +class WavePlots: + + def __init__(self, triggerMask=0xFF): + + # Number of waveforms channels + self.nchan = 15 + + # Trigger mask + self.mask = triggerMask + + self.chan_hist_list = [] + self.log_list = [] + + # Maaximum peak value + self.peak_max = 16000. + + def init(self, tree): + + # Keyed by channel + self.createChannelHist('h_localtime', 40, 750, 950, "Local Time") + self.createChannelHist('h_triggertime', 40, -80, 80, "Trigger Time") + self.createChannelHist('h_bcidtime', 50, -10, 40, "BCID Time") + + def createChannelHist(self, name, nbins, xlo, xhi, xtitle='', ytitle='Waveforms', stats=True, log=False): + + setattr(self, name, dict()) + x = getattr(self, name) + for chan in range(self.nchan): + x[chan] = ROOT.TH1I("", "", nbins, xlo, xhi) + if len(xtitle) > 0: + x[chan].GetXaxis().SetTitle(f"Ch {chan} {xtitle}") + if len(ytitle) > 0: + x[chan].GetYaxis().SetTitle(ytitle) + x[chan].SetStats(stats) + + self.chan_hist_list.append(name) + if log: + self.log_list.append(name) + + def fill(self, tree): + + # First, create the histograms + self.init(tree) + + # Iterate over all entries + nev = tree.GetEntries() + iev = 0 + for ev in tree: + + time = ev.EventInfo.timeStamp() + trig = ev.FaserTriggerData.tap() + + if not (trig & self.mask): + iev += 1 + if iev == nev: + break + else: + continue + + # Process waveforms + try: + wave_list = itertools.chain(ev.CaloWaveformHits, ev.PreshowerWaveformHits, ev.TriggerWaveformHits, ev.VetoWaveformHits, ev.VetoNuWaveformHits) + except: + wave_list = itertools.chain(ev.CaloWaveformHits, ev.PreshowerWaveformHits) + + for wave in wave_list: + + channel = wave.channel() + + # Check if failed threshold + if wave.status_bit(0): continue + + # Fill fit parameters + self.h_localtime[channel].Fill(wave.localtime()) + self.h_triggertime[channel].Fill(wave.trigger_time()) + self.h_bcidtime[channel].Fill(wave.bcid_time()) + + # End of loop over waveforms + + # Protect against reading off the end + iev+=1 + if iev == nev: break + + # End of loop over events + + # Put overflows in last bin of plots + self.fixOverflow(self.h_localtime) + self.fixOverflow(self.h_triggertime) + + def fixOverflow(self, hdict): + + for h in hdict.values(): + + if h.GetNbinsY() == 1: + self.fixOverflow1D(h) + else: + self.fixOverflow2D(h) + + def fixOverflow1D(self, hist): + nbins = hist.GetNbinsX() + nlast = hist.GetBinContent(nbins) + nover = hist.GetBinContent(nbins+1) + hist.SetBinContent(nbins, nlast+nover) + + def fixOverflow2D(self, hist): + nbx = hist.GetNbinsX() + nby = hist.GetNbinsY() + + for ibinx in range(nbx+1): + nlast = hist.GetBinContent(ibinx, nby) + nover = hist.GetBinContent(ibinx, nby+1) + hist.SetBinContent(ibinx, nby, nlast+nover) + + for ibiny in range(nby+1): + nlast = hist.GetBinContent(nbx, ibiny) + nover = hist.GetBinContent(nbx+1, ibiny) + hist.SetBinContent(nbx, ibiny, nlast+nover) + + # Also the double overflow + nlast = hist.GetBinContent(nbx, nby) + nover = hist.GetBinContent(nbx+1, nby+1) + hist.SetBinContent(nbx, nby, nlast+nover) + + + def draw(self, canvas, outfile): + + # + # Plot channel plots + for name in self.chan_hist_list: + canvas.Clear() + canvas.Divide(4,4) + + if name in self.log_list: + setlog = True + else: + setlog = False + + for chan in range(self.nchan): + canvas.cd(chan+1) + x = getattr(self, name) + x[chan].Draw() + if setlog: + ROOT.gPad.SetLogy(True) + else: + ROOT.gPad.SetLogy(False) + + canvas.Print(outfile) + + def print_stats(self): + + for chan in range(self.nchan): + local_mean = self.h_localtime[chan].GetMean() + trig_mean = self.h_triggertime[chan].GetMean() + bcid_mean = self.h_bcidtime[chan].GetMean() + print(f"Chan {chan:2}: Entries {int(self.h_localtime[chan].GetEntries()):8} Local {local_mean:6.1f} Trigger {trig_mean:6.2f} BCID {bcid_mean:6.2f}") + +#print("xAOD tree") +#t1.Print() +#print("non xAOD tree") +#t2.Print() + +cp = ClockPlots() +cp.fill(t1) + +# Triggers: 0x01 - calo, 0x02 - veto, 0x03 - timing, 0x10 - random +wp = WavePlots(triggerMask=trigmask) +wp.fill(t1) + +c = ROOT.TCanvas() +c.Print(outfile+"[") + +cp.draw(c, outfile) +wp.draw(c, outfile) + +c.Print(outfile+"]") + +cp.print_stats() +wp.print_stats() diff --git a/Waveform/WaveformConditions/WaveformConditionsTools/WaveformConditionsTools/IWaveformCableMappingTool.h b/Waveform/WaveformConditions/WaveformConditionsTools/WaveformConditionsTools/IWaveformCableMappingTool.h index 8886506e5a04efe269bb75ca91fc36c8fe283d85..461c087e0fb3957e90c1a1e50723e87a2baff996 100644 --- a/Waveform/WaveformConditions/WaveformConditionsTools/WaveformConditionsTools/IWaveformCableMappingTool.h +++ b/Waveform/WaveformConditions/WaveformConditionsTools/WaveformConditionsTools/IWaveformCableMappingTool.h @@ -2,7 +2,7 @@ Copyright (C) 2002-2019 CERN for the benefit of the ATLAS and FAsER collaborations */ -/** @file ISCT_CableMappingTool.h Interface file for SCT_CableMappingTool. +/** @file IWaveformCableMappingTool.h Interface file for WaveformCableMappingTool. */ // Multiple inclusion protection @@ -38,6 +38,10 @@ class IWaveformCableMappingTool: virtual public IAlgTool { virtual WaveformCableMap getCableMapping(const EventContext& ctx) const = 0; virtual WaveformCableMap getCableMapping(void) const = 0; + virtual int getChannelMapping(const EventContext& ctx, const Identifier id) const = 0; + virtual int getChannelMapping(const Identifier id) const = 0; + + }; //---------------------------------------------------------------------- diff --git a/Waveform/WaveformConditions/WaveformConditionsTools/WaveformConditionsTools/IWaveformRangeTool.h b/Waveform/WaveformConditions/WaveformConditionsTools/WaveformConditionsTools/IWaveformRangeTool.h new file mode 100644 index 0000000000000000000000000000000000000000..316d9c4953a100e83c4ec1a0a052714372e1fdaf --- /dev/null +++ b/Waveform/WaveformConditions/WaveformConditionsTools/WaveformConditionsTools/IWaveformRangeTool.h @@ -0,0 +1,41 @@ +/* + Copyright (C) 2002-2019 CERN for the benefit of the ATLAS and FAsER collaborations +*/ + +/** @file IWaveformRangeTool.h Interface file for WaveformRangeTool. + */ + +// Multiple inclusion protection +#ifndef IWAVEFORMRANGETOOL +#define IWAVEFORMRANGETOOL + +//STL includes +#include <map> + +//Gaudi Includes +#include "GaudiKernel/IAlgTool.h" +#include "GaudiKernel/EventContext.h" + +// ADC range in volts indexed by digitizer channel number +typedef std::map<int, float> WaveformRangeMap; + +class IWaveformRangeTool: virtual public IAlgTool { + + public: + + //----------Public Member Functions----------// + // Structors + virtual ~IWaveformRangeTool() = default; //!< Destructor + + /// Creates the InterfaceID and interfaceID() method + DeclareInterfaceID(IWaveformRangeTool, 1, 0); + + // Methods to return cable-mapping data + // Key is digitizer channel, pair is <type, identifier> + virtual WaveformRangeMap getRangeMapping(const EventContext& ctx) const = 0; + virtual WaveformRangeMap getRangeMapping(void) const = 0; + +}; + +//---------------------------------------------------------------------- +#endif // WAVEFORMRANGETOOL diff --git a/Waveform/WaveformConditions/WaveformConditionsTools/WaveformConditionsTools/IWaveformTimingTool.h b/Waveform/WaveformConditions/WaveformConditionsTools/WaveformConditionsTools/IWaveformTimingTool.h new file mode 100644 index 0000000000000000000000000000000000000000..de2a2dcbd1dde01c8a48e2a6a67822efc13e7519 --- /dev/null +++ b/Waveform/WaveformConditions/WaveformConditionsTools/WaveformConditionsTools/IWaveformTimingTool.h @@ -0,0 +1,50 @@ +/* + Copyright (C) 2002-2019 CERN for the benefit of the ATLAS and FAsER collaborations +*/ + +/** @file IWaveformTimingTool.h Interface file for WaveformTimingTool. + * + * Provides times and offsets (in ns) for different channels in the + * waveform digitizer. This aligns the input signals for different + * path lengths and cable delays. + * + */ + +// Multiple inclusion protection +#ifndef IWAVEFORMTIMINGTOOL +#define IWAVEFORMTIMINGTOOL + +//STL includes +#include <map> + +//Gaudi Includes +#include "GaudiKernel/IAlgTool.h" +#include "GaudiKernel/EventContext.h" + + +class IWaveformTimingTool: virtual public IAlgTool { + + public: + + //----------Public Member Functions----------// + // Structors + virtual ~IWaveformTimingTool() = default; //!< Destructor + + /// Creates the InterfaceID and interfaceID() method + DeclareInterfaceID(IWaveformTimingTool, 1, 0); + + // Methods to return timing data + + // Nominal trigger time (in ns) in the digitizer readout + virtual float nominalTriggerTime(void) const = 0; + virtual float nominalTriggerTime(const EventContext& ctx) const = 0; + + // Channel-by-channel corrections to the nominal trigger time (in ns) + // A given channel should be centered at nominal + offset + virtual float triggerTimeOffset(int channel) const = 0; + virtual float triggerTimeOffset(const EventContext& ctx, int channel) const = 0; + +}; + +//---------------------------------------------------------------------- +#endif // WAVEFORMTIMINGTOOL diff --git a/Waveform/WaveformConditions/WaveformConditionsTools/python/WaveformRangeConfig.py b/Waveform/WaveformConditions/WaveformConditionsTools/python/WaveformRangeConfig.py new file mode 100644 index 0000000000000000000000000000000000000000..6450bcec88b3cb8c7194b01edeb59aa6b97e242f --- /dev/null +++ b/Waveform/WaveformConditions/WaveformConditionsTools/python/WaveformRangeConfig.py @@ -0,0 +1,27 @@ +""" Define methods to configure WaveformRangeTool + +Copyright (C) 2022 CERN for the benefit of the FASER collaboration +""" +from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator +from AthenaConfiguration.ComponentFactory import CompFactory +from IOVDbSvc.IOVDbSvcConfig import addFolders +WaveformRangeTool=CompFactory.WaveformRangeTool + +def WaveformRangeToolCfg(flags, name="WaveformRangeTool", **kwargs): + """ Return a configured WaveformRangeTool""" + return WaveformRangeTool(name, **kwargs) + +def WaveformRangeCfg(flags, **kwargs): + """ Return configured ComponentAccumulator and tool for Waveform Range + + WaveformRangeTool may be provided in kwargs + """ + + acc = ComponentAccumulator() + # tool = kwargs.get("WaveformRangeTool", WaveformRangeTool(flags)) + # Probably need to figure this out! + dbInstance = kwargs.get("dbInstance", "TDAQ_OFL") + dbFolder = kwargs.get("dbFolder", "/WAVE/DAQ/Range") + acc.merge(addFolders(flags, dbFolder, dbInstance, className="CondAttrListCollection")) + return acc + diff --git a/Waveform/WaveformConditions/WaveformConditionsTools/python/WaveformTimingConfig.py b/Waveform/WaveformConditions/WaveformConditionsTools/python/WaveformTimingConfig.py new file mode 100644 index 0000000000000000000000000000000000000000..3b95ed388208d7d4ab51a3d5621ab868d438c433 --- /dev/null +++ b/Waveform/WaveformConditions/WaveformConditionsTools/python/WaveformTimingConfig.py @@ -0,0 +1,32 @@ +""" Define methods to configure WaveformTimingTool + +Copyright (C) 2022 CERN for the benefit of the FASER collaboration +""" +from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator +from AthenaConfiguration.ComponentFactory import CompFactory +from IOVDbSvc.IOVDbSvcConfig import addFolders +WaveformTimingTool=CompFactory.WaveformTimingTool + +def WaveformTimingToolCfg(flags, name="WaveformTimingTool", **kwargs): + """ Return a configured WaveformTimingTool""" + return WaveformTimingTool(name, **kwargs) + +def WaveformTimingCfg(flags, **kwargs): + """ Return configured ComponentAccumulator and tool for Waveform Timing + + WaveformTimingTool may be provided in kwargs + """ + + acc = ComponentAccumulator() + # tool = kwargs.get("WaveformTimingTool", WaveformTimingTool(flags)) + # Probably need to figure this out! + dbInstance = kwargs.get("dbInstance", "TRIGGER_OFL") + if flags.Input.isMC: + dbname = "OFLP200" + else: + dbname = "CONDBR3" + + acc.merge(addFolders(flags, "/WAVE/DAQ/Timing", dbInstance, className="AthenaAttributeList", db=dbname)) + acc.merge(addFolders(flags, "/WAVE/DAQ/TimingOffset", dbInstance, className="CondAttrListCollection", db=dbname)) + return acc + diff --git a/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformCableMappingTool.cxx b/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformCableMappingTool.cxx index a846fe8da6a51bd3293f02b73cd960439d8d77b5..530857354d1e1e82f5c522a72e8b6e0c7d6b5269 100644 --- a/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformCableMappingTool.cxx +++ b/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformCableMappingTool.cxx @@ -27,6 +27,7 @@ WaveformCableMappingTool::initialize() { // Set up helpers ATH_CHECK(detStore()->retrieve(m_ecalID, "EcalID")); ATH_CHECK(detStore()->retrieve(m_vetoID, "VetoID")); + ATH_CHECK(detStore()->retrieve(m_vetoNuID, "VetoNuID")); ATH_CHECK(detStore()->retrieve(m_triggerID, "TriggerID")); ATH_CHECK(detStore()->retrieve(m_preshowerID, "PreshowerID")); @@ -98,6 +99,9 @@ WaveformCableMappingTool::getCableMapping(const EventContext& ctx) const { else if (det_type == "veto") { identifier = m_vetoID->pmt_id(stationVal, plateVal, pmtVal); } + else if (det_type == "vetonu") { + identifier = m_vetoNuID->pmt_id(stationVal, plateVal, pmtVal); + } else if (det_type == "trigger") { identifier = m_triggerID->pmt_id(stationVal, plateVal, pmtVal); } @@ -135,5 +139,98 @@ WaveformCableMappingTool::getCableMapping(void) const { return getCableMapping(ctx); } +//---------------------------------------------------------------------- +int +WaveformCableMappingTool::getChannelMapping(const EventContext& ctx, const Identifier id) const { + // Print where you are + ATH_MSG_DEBUG("in getChannelMapping()"); + int channel = -1; + + // Read Cond Handle + SG::ReadCondHandle<CondAttrListCollection> readHandle{m_readKey, ctx}; + const CondAttrListCollection* readCdo{*readHandle}; + if (readCdo==nullptr) { + ATH_MSG_FATAL("Null pointer to the read conditions object"); + return channel; + } + // Get the validitiy range + EventIDRange rangeW; + if (not readHandle.range(rangeW)) { + ATH_MSG_FATAL("Failed to retrieve validity range for " << readHandle.key()); + return channel; + } + ATH_MSG_DEBUG("Size of CondAttrListCollection " << readHandle.fullKey() << " readCdo->size()= " << readCdo->size()); + ATH_MSG_DEBUG("Range of input is " << rangeW); + + // Read mapping info + CondAttrListCollection::const_iterator attrList{readCdo->begin()}; + CondAttrListCollection::const_iterator end{readCdo->end()}; + // CondAttrListCollection doesn't support C++11 type loops, no generic 'begin' + for (; attrList!=end; ++attrList) { + // A CondAttrListCollection is a map of ChanNum and AttributeList + CondAttrListCollection::ChanNum channelNumber{attrList->first}; + const CondAttrListCollection::AttributeList &payload{attrList->second}; + if (payload.exists("type") and not payload["type"].isNull()) { + + std::string det_type{payload["type"].data<std::string>()}; + int stationVal{payload["station"].data<int>()}; + int plateVal {payload["plate"].data<int>()}; + int rowVal {payload["row"].data<int>()}; + int moduleVal {payload["module"].data<int>()}; + int pmtVal {payload["pmt"].data<int>()}; + Identifier identifier; + + // Ugh, cant use switch statement with strings + // Must do this using an if ladder + if (det_type == "calo") { + identifier = m_ecalID->pmt_id(rowVal, moduleVal, pmtVal); + } + else if (det_type == "veto") { + identifier = m_vetoID->pmt_id(stationVal, plateVal, pmtVal); + } + else if (det_type == "vetonu") { + identifier = m_vetoNuID->pmt_id(stationVal, plateVal, pmtVal); + } + else if (det_type == "trigger") { + identifier = m_triggerID->pmt_id(stationVal, plateVal, pmtVal); + } + else if (det_type == "preshower") { + identifier = m_preshowerID->pmt_id(stationVal, plateVal, pmtVal); + } + else if (det_type == "clock") { + // No valid identifiers for these + identifier = -1; + } + else if (det_type == "none") { + identifier = -1; + } + else { + ATH_MSG_WARNING("Detector type " << det_type << " not known for channel " << channelNumber << "!"); + det_type = std::string("none"); + identifier = -1; + } + + // Is this the identifier we are looking for? + if (id != identifier) continue; + + ATH_MSG_DEBUG("Mapped identifier " << det_type << " ID: " << identifier << " to digitizer channel " << channelNumber); + + channel = channelNumber; + break; + } + + } // End of loop over attributes + + if (channel < 0) + ATH_MSG_WARNING("No channel found for identifier " << id << "!"); + + return channel; +} + +int +WaveformCableMappingTool::getChannelMapping(const Identifier id) const { + const EventContext& ctx{Gaudi::Hive::currentContext()}; + return getChannelMapping(ctx, id); +} diff --git a/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformCableMappingTool.h b/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformCableMappingTool.h index 19e6e6d9b5f294692ba0496b1594741494ef223a..8443cb1815e4db7a9c0181515b5fd405cd27d48c 100644 --- a/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformCableMappingTool.h +++ b/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformCableMappingTool.h @@ -21,6 +21,7 @@ #include "Identifier/Identifier.h" #include "FaserCaloIdentifier/EcalID.h" #include "ScintIdentifier/VetoID.h" +#include "ScintIdentifier/VetoNuID.h" #include "ScintIdentifier/TriggerID.h" #include "ScintIdentifier/PreshowerID.h" @@ -56,6 +57,11 @@ class WaveformCableMappingTool: public extends<AthAlgTool, IWaveformCableMapping virtual WaveformCableMap getCableMapping(const EventContext& ctx) const override; virtual WaveformCableMap getCableMapping(void) const override; + // Reverse mapping, reads idenfifier and returns digitizer channel + // Returns -1 if match not found for given identifier + virtual int getChannelMapping(const EventContext& ctx, const Identifier id) const override; + virtual int getChannelMapping(const Identifier id) const override; + private: // Read Cond Handle SG::ReadCondHandleKey<CondAttrListCollection> m_readKey{this, "ReadKey", "/WAVE/DAQ/CableMapping", "Key of input cabling folder"}; @@ -65,6 +71,7 @@ class WaveformCableMappingTool: public extends<AthAlgTool, IWaveformCableMapping // ID helpers const EcalID* m_ecalID{nullptr}; const VetoID* m_vetoID{nullptr}; + const VetoNuID* m_vetoNuID{nullptr}; const TriggerID* m_triggerID{nullptr}; const PreshowerID* m_preshowerID{nullptr}; diff --git a/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformRangeTool.cxx b/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformRangeTool.cxx new file mode 100644 index 0000000000000000000000000000000000000000..5c7670e3cbc2dcc9e967107b9f304743695987fc --- /dev/null +++ b/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformRangeTool.cxx @@ -0,0 +1,90 @@ +/* + Copyright (C) 2002-2019 CERN for the benefit of the ATLAS and FASER collaborations +*/ + +/** @file WaveformRangeTool.cxx Implementation file for WaveformRangeTool. + @author Eric Torrence (05/02/22) +*/ + +#include "WaveformRangeTool.h" + +//---------------------------------------------------------------------- +WaveformRangeTool::WaveformRangeTool (const std::string& type, const std::string& name, const IInterface* parent) : + base_class(type, name, parent) +{ +} + +//---------------------------------------------------------------------- +StatusCode +WaveformRangeTool::initialize() { + // Read Cond Handle Key + + ATH_MSG_DEBUG("WaveformRangeTool::initialize()"); + + ATH_CHECK(m_readKey.initialize()); + + return StatusCode::SUCCESS; +} + +//---------------------------------------------------------------------- +StatusCode +WaveformRangeTool::finalize() { + // Print where you are + return StatusCode::SUCCESS; +} + +//---------------------------------------------------------------------- +WaveformRangeMap +WaveformRangeTool::getRangeMapping(const EventContext& ctx) const { + // Print where you are + ATH_MSG_DEBUG("in getRangeMapping()"); + WaveformRangeMap mappingData; + + // Read Cond Handle + SG::ReadCondHandle<CondAttrListCollection> readHandle{m_readKey, ctx}; + const CondAttrListCollection* readCdo{*readHandle}; + if (readCdo==nullptr) { + ATH_MSG_FATAL("Null pointer to the read conditions object"); + return mappingData; + } + // Get the validitiy range + EventIDRange rangeW; + if (not readHandle.range(rangeW)) { + ATH_MSG_FATAL("Failed to retrieve validity range for " << readHandle.key()); + return mappingData; + } + ATH_MSG_DEBUG("Size of CondAttrListCollection " << readHandle.fullKey() << " readCdo->size()= " << readCdo->size()); + ATH_MSG_DEBUG("Range of input is " << rangeW); + + // Read range info + + CondAttrListCollection::const_iterator attrList{readCdo->begin()}; + CondAttrListCollection::const_iterator end{readCdo->end()}; + // CondAttrListCollection doesn't support C++11 type loops, no generic 'begin' + for (; attrList!=end; ++attrList) { + // A CondAttrListCollection is a map of ChanNum and AttributeList + CondAttrListCollection::ChanNum channelNumber{attrList->first}; + const CondAttrListCollection::AttributeList &payload{attrList->second}; + if (payload.exists("range") and not payload["range"].isNull()) { + + float range {payload["range"].data<float>()}; + + ATH_MSG_DEBUG("Found digitizer channel " << channelNumber << " range as " << range); + + mappingData.emplace(std::make_pair(channelNumber, range)); + + } + + } // End of loop over attributes + + return mappingData; +} + +WaveformRangeMap +WaveformRangeTool::getRangeMapping(void) const { + const EventContext& ctx{Gaudi::Hive::currentContext()}; + return getRangeMapping(ctx); +} + + + diff --git a/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformRangeTool.h b/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformRangeTool.h new file mode 100644 index 0000000000000000000000000000000000000000..e33123f243a41e286f32cf09059e748306a1348c --- /dev/null +++ b/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformRangeTool.h @@ -0,0 +1,60 @@ +// -*- C++ -*- + +/* + Copyright (C) 2002-2019 CERN for the benefit of the ATLAS and CERN collaborations +*/ + +/** @file WaveformRangeTool.h Header file for WaveformRangeTool. + @author Eric Torrence, 20/04/22 +*/ + +// Multiple inclusion protection +#ifndef WAVEFORM_RANGE_TOOL +#define WAVEFORM_RANGE_TOOL + +// Include interface class +#include "AthenaBaseComps/AthAlgTool.h" +#include "WaveformConditionsTools/IWaveformRangeTool.h" + +// Include Athena stuff +#include "AthenaPoolUtilities/CondAttrListCollection.h" +#include "StoreGate/ReadCondHandleKey.h" + +#include "GaudiKernel/ICondSvc.h" +#include "Gaudi/Property.h" + +// Include Gaudi classes +#include "GaudiKernel/EventContext.h" + +/** This class contains a Tool that reads Waveform range data and makes it available to + other algorithms. The current implementation reads the data from a COOL database. +*/ + +class WaveformRangeTool: public extends<AthAlgTool, IWaveformRangeTool> { + + public: + //----------Public Member Functions----------// + // Structors + WaveformRangeTool(const std::string& type, const std::string& name, const IInterface* parent); //!< Constructor + virtual ~WaveformRangeTool() = default; //!< Destructor + + // Standard Gaudi functions + virtual StatusCode initialize() override; //!< Gaudi initialiser + virtual StatusCode finalize() override; //!< Gaudi finaliser + + // Methods to return calibration data + // Map indexed by digitizer channel number + // Returns full-scale ADC range as float + virtual WaveformRangeMap getRangeMapping(const EventContext& ctx) const override; + virtual WaveformRangeMap getRangeMapping(void) const override; + + private: + // Read Cond Handle + SG::ReadCondHandleKey<CondAttrListCollection> m_readKey{this, "ReadKey", "/WAVE/DAQ/Range", "Key of range folder"}; + + ServiceHandle<ICondSvc> m_condSvc{this, "CondSvc", "CondSvc"}; + +}; + +//---------------------------------------------------------------------- +#endif // WAVEFORM_CABLE_MAPPING_TOOL diff --git a/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformTimingTool.cxx b/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformTimingTool.cxx new file mode 100644 index 0000000000000000000000000000000000000000..f163c65620a107ea8a5812818e5f8c5ba4049bac --- /dev/null +++ b/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformTimingTool.cxx @@ -0,0 +1,130 @@ +/* + Copyright (C) 2002-2019 CERN for the benefit of the ATLAS and FASER collaborations +*/ + +/** @file WaveformTimingTool.cxx Implementation file for WaveformTimingTool. + @author Eric Torrence (04/05/22) +*/ + +#include "WaveformTimingTool.h" + +//---------------------------------------------------------------------- +WaveformTimingTool::WaveformTimingTool (const std::string& type, const std::string& name, const IInterface* parent) : + base_class(type, name, parent) +{ +} + +//---------------------------------------------------------------------- +StatusCode +WaveformTimingTool::initialize() { + // Read Cond Handle Key + + ATH_MSG_DEBUG("WaveformTimingTool::initialize()"); + + ATH_CHECK(m_timingReadKey.initialize()); + ATH_CHECK(m_offsetReadKey.initialize()); + + return StatusCode::SUCCESS; +} + +//---------------------------------------------------------------------- +StatusCode +WaveformTimingTool::finalize() { + // Print where you are + return StatusCode::SUCCESS; +} + +//---------------------------------------------------------------------- +float +WaveformTimingTool::nominalTriggerTime(const EventContext& ctx) const { + // Print where you are + ATH_MSG_DEBUG("in nominalTriggerTime()"); + + float time=-1.; + + // Read Cond Handle + SG::ReadCondHandle<AthenaAttributeList> readHandle{m_timingReadKey, ctx}; + const AthenaAttributeList* readCdo(*readHandle); + + if (readCdo==nullptr) { + ATH_MSG_FATAL("Null pointer to the read conditions object"); + return time; + } + + // Get the validitiy range + EventIDRange rangeW; + if (not readHandle.range(rangeW)) { + ATH_MSG_FATAL("Failed to retrieve validity range for " << readHandle.key()); + return time; + } + ATH_MSG_DEBUG("Range of input is " << rangeW); + + // Read time info + + const CondAttrListCollection::AttributeList &payload{*readCdo}; + if (payload.exists("NominalTriggerTime") and not payload["NominalTriggerTime"].isNull()) { + time = payload["NominalTriggerTime"].data<float>(); + ATH_MSG_DEBUG("Found nominal trigger time "<<time<<" ns"); + } else { + ATH_MSG_WARNING("No valid nominal trigger time found!"); + } + + return time; +} + +//---------------------------------------------------------------------- +float +WaveformTimingTool::triggerTimeOffset(const EventContext& ctx, int channel) const { + + ATH_MSG_DEBUG("in triggerTimeOffset("<<channel<<")"); + + float time=0.; + + // Read Cond Handle + SG::ReadCondHandle<CondAttrListCollection> readHandle{m_offsetReadKey, ctx}; + const CondAttrListCollection* readCdo{*readHandle}; + if (readCdo==nullptr) { + ATH_MSG_FATAL("Null pointer to the read conditions object"); + return time; + } + // Get the validitiy range + EventIDRange rangeW; + if (not readHandle.range(rangeW)) { + ATH_MSG_FATAL("Failed to retrieve validity range for " << readHandle.key()); + return time; + } + ATH_MSG_DEBUG("Size of CondAttrListCollection " << readHandle.fullKey() << " readCdo->size()= " << readCdo->size()); + ATH_MSG_DEBUG("Range of input is " << rangeW); + + // Read offset for specific channel + const CondAttrListCollection::AttributeList& payload{readCdo->attributeList(channel)}; + + if (payload.exists("TriggerOffset") and not payload["TriggerOffset"].isNull()) { + time = payload["TriggerOffset"].data<float>(); + ATH_MSG_DEBUG("Found digitizer channel " << channel << " triger offset as " << time); + } else { + ATH_MSG_WARNING("No valid trigger offset found for channel "<<channel<<"!"); + } + + return time; + +} + +//---------------------------------------------------------------------- +float +WaveformTimingTool::nominalTriggerTime(void) const { + const EventContext& ctx(Gaudi::Hive::currentContext()); + return nominalTriggerTime(ctx); +} + +float +WaveformTimingTool::triggerTimeOffset(int channel) const { + const EventContext& ctx(Gaudi::Hive::currentContext()); + return triggerTimeOffset(ctx, channel); +} + + + + + + diff --git a/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformTimingTool.h b/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformTimingTool.h new file mode 100644 index 0000000000000000000000000000000000000000..b69e52b200907a369865f85b5b709aec5341d0dd --- /dev/null +++ b/Waveform/WaveformConditions/WaveformConditionsTools/src/WaveformTimingTool.h @@ -0,0 +1,69 @@ +// -*- C++ -*- + +/* + Copyright (C) 2002-2019 CERN for the benefit of the ATLAS and CERN collaborations +*/ + +/** @file WaveformTimingTool.h Header file for WaveformTimingTool. + @author Eric Torrence, 20/04/22 +*/ + +// Multiple inclusion protection +#ifndef WAVEFORM_TIMING_TOOL +#define WAVEFORM_TIMING_TOOL + +// Include interface class +#include "AthenaBaseComps/AthAlgTool.h" +#include "WaveformConditionsTools/IWaveformTimingTool.h" + +// Include Athena stuff +#include "AthenaPoolUtilities/CondAttrListCollection.h" +#include "StoreGate/ReadCondHandleKey.h" + +#include "GaudiKernel/ICondSvc.h" +#include "Gaudi/Property.h" + +// Include Gaudi classes +#include "GaudiKernel/EventContext.h" + +/** This class contains a Tool that reads Waveform timing data and makes it available + to other algorithms. +*/ + +class WaveformTimingTool: public extends<AthAlgTool, IWaveformTimingTool> { + + public: + //----------Public Member Functions----------// + // Structors + WaveformTimingTool(const std::string& type, const std::string& name, const IInterface* parent); //!< Constructor + virtual ~WaveformTimingTool() = default; //!< Destructor + + // Standard Gaudi functions + virtual StatusCode initialize() override; //!< Gaudi initialiser + virtual StatusCode finalize() override; //!< Gaudi finaliser + + // Methods to return timing data + // Channels indexed by digitizer channel number + // All times are in ns + + // Nominal trigger time (in ns) in the digitizer readout + virtual float nominalTriggerTime(void) const override; + virtual float nominalTriggerTime(const EventContext& ctx) const override; + + // Channel-by-channel corrections to the nominal trigger time (in ns) + // A given channel should be centered at nominal + offset + virtual float triggerTimeOffset(int channel) const override; + virtual float triggerTimeOffset(const EventContext& ctx, int channel) const override; + + private: + + // Read Cond Handle + SG::ReadCondHandleKey<AthenaAttributeList> m_timingReadKey{this, "TimingReadKey", "/WAVE/DAQ/Timing", "Key of timing folder"}; + SG::ReadCondHandleKey<CondAttrListCollection> m_offsetReadKey{this, "OffsetReadKey", "/WAVE/DAQ/TimingOffset", "Key of timing offset folder"}; + + ServiceHandle<ICondSvc> m_condSvc{this, "CondSvc", "CondSvc"}; + +}; + +//---------------------------------------------------------------------- +#endif // WAVEFORM_CABLE_MAPPING_TOOL diff --git a/Waveform/WaveformConditions/WaveformConditionsTools/src/components/WaveformConditionsTools_entries.cxx b/Waveform/WaveformConditions/WaveformConditionsTools/src/components/WaveformConditionsTools_entries.cxx index 1f6a41ac923beab0533fcd60066b86143f813b90..f694fef647428802bfb98ab393e4f784180ae2b0 100644 --- a/Waveform/WaveformConditions/WaveformConditionsTools/src/components/WaveformConditionsTools_entries.cxx +++ b/Waveform/WaveformConditions/WaveformConditionsTools/src/components/WaveformConditionsTools_entries.cxx @@ -1,3 +1,7 @@ +#include "../WaveformRangeTool.h" +#include "../WaveformTimingTool.h" #include "../WaveformCableMappingTool.h" +DECLARE_COMPONENT( WaveformRangeTool ) +DECLARE_COMPONENT( WaveformTimingTool ) DECLARE_COMPONENT( WaveformCableMappingTool ) diff --git a/faser-common b/faser-common index 5124b0e78dbcc4a05c22f511700d5dbcdb4808df..69a90ec95da88a00097fb809bede6c2bae8c02d6 160000 --- a/faser-common +++ b/faser-common @@ -1 +1 @@ -Subproject commit 5124b0e78dbcc4a05c22f511700d5dbcdb4808df +Subproject commit 69a90ec95da88a00097fb809bede6c2bae8c02d6 diff --git a/xAOD/xAODFaserWaveform/Root/WaveformHit_v1.cxx b/xAOD/xAODFaserWaveform/Root/WaveformHit_v1.cxx index df91c67cf77ef79b749934687ee910146ee3e309..59562e632c5704d53ca09eec67b0d036c6ac45b0 100644 --- a/xAOD/xAODFaserWaveform/Root/WaveformHit_v1.cxx +++ b/xAOD/xAODFaserWaveform/Root/WaveformHit_v1.cxx @@ -27,6 +27,8 @@ namespace xAOD { AUXSTORE_PRIMITIVE_SETTER_AND_GETTER( WaveformHit_v1, float, bcid_time, set_bcid_time ) + AUXSTORE_PRIMITIVE_SETTER_AND_GETTER( WaveformHit_v1, float, trigger_time, set_trigger_time ) + AUXSTORE_PRIMITIVE_SETTER_AND_GETTER( WaveformHit_v1, float, raw_peak, set_raw_peak ) AUXSTORE_PRIMITIVE_SETTER_AND_GETTER( WaveformHit_v1, float, raw_integral, set_raw_integral ) diff --git a/xAOD/xAODFaserWaveform/xAODFaserWaveform/versions/WaveformHit_v1.h b/xAOD/xAODFaserWaveform/xAODFaserWaveform/versions/WaveformHit_v1.h index 9ea7b6dc573d53a533df44d02ff3edbf92c8a9ce..8e497633d1d673e6bb6220552874e5dbea1ce0e1 100644 --- a/xAOD/xAODFaserWaveform/xAODFaserWaveform/versions/WaveformHit_v1.h +++ b/xAOD/xAODFaserWaveform/xAODFaserWaveform/versions/WaveformHit_v1.h @@ -55,6 +55,8 @@ namespace xAOD { return Identifier(this->id()); } + /// All values are in units of ns and mV + /// Best results float localtime() const; void set_localtime(float value); @@ -72,6 +74,10 @@ namespace xAOD { float bcid_time() const; void set_bcid_time(float value); + /// Time with respect to nominal trigger time (including known offsets) + float trigger_time() const; + void set_trigger_time(float value); + /// Raw values from waveform float raw_peak() const; void set_raw_peak(float value); @@ -102,7 +108,7 @@ namespace xAOD { float nval() const; void set_nval(float value); - /// Raw time and waveform data + /// Raw time and waveform data (in ns and mV) std::vector<float> time_vector() const; void set_time_vector(std::vector<float> value);