diff --git a/Simulation/SimulationJobOptions/share/hive/postInclude.GaudiHive.py b/Simulation/SimulationJobOptions/share/hive/postInclude.GaudiHive.py index 8b31b2bf3fd846ca24a7db486de76f602eba1326..41c0f0ab8ec5a93002484239f0bc34e2aebb21ad 100644 --- a/Simulation/SimulationJobOptions/share/hive/postInclude.GaudiHive.py +++ b/Simulation/SimulationJobOptions/share/hive/postInclude.GaudiHive.py @@ -1,18 +1,19 @@ -# TODO: make this declaration more automatic - -topSeq.G4AtlasAlg.ExtraInputs = [('McEventCollection','GEN_EVENT')] -topSeq.G4AtlasAlg.ExtraOutputs = [('SiHitCollection','SCT_Hits'),('RecoTimingObj','EVNTtoHITS_timings')] +# +# For post-include MT configuration, we need to add some explicit data +# dependencies for the AthenaMT scheduler. +# +# I'm not sure if we need this timing setting here, +# so leaving this older code commented out for now. +#topSeq.G4AtlasAlg.ExtraOutputs = [('SiHitCollection','SCT_Hits'),('RecoTimingObj','EVNTtoHITS_timings')] +topSeq.G4AtlasAlg.ExtraInputs = [('McEventCollection','StoreGateSvc+BeamTruthEvent')] +topSeq.G4AtlasAlg.ExtraOutputs = [('SiHitCollection','StoreGateSvc+SCT_Hits')] topSeq.StreamHITS.ExtraInputs += topSeq.G4AtlasAlg.ExtraOutputs + # Disable alg filtering - doesn't work in multi-threading topSeq.StreamHITS.AcceptAlgs = [] -algCardinality = jp.ConcurrencyFlags.NumThreads() -if (algCardinality != 1): - for alg in topSeq: - name = alg.name() - if name in ["StreamHITS"]: - # suppress INFO message about Alg unclonability - alg.Cardinality = 1 - else: - alg.Cardinality = algCardinality +# Override algorithm cloning settings +nThreads = jp.ConcurrencyFlags.NumThreads() +topSeq.BeamEffectsAlg.Cardinality = nThreads +topSeq.G4AtlasAlg.Cardinality = nThreads diff --git a/Simulation/SimulationJobOptions/share/hive/preInclude.GaudiHive.py b/Simulation/SimulationJobOptions/share/hive/preInclude.GaudiHive.py index cb2f2a6cc55e882c8a05512af1e6179191aa9770..cf30cfd853c6e81dc60b4d7b1eda57c42a88c249 100644 --- a/Simulation/SimulationJobOptions/share/hive/preInclude.GaudiHive.py +++ b/Simulation/SimulationJobOptions/share/hive/preInclude.GaudiHive.py @@ -1,14 +1,18 @@ +# +# To configure multi-threaded simulation (AtlasG4) we set up some +# AthenaMT-related infrastructure and disable some features that do not +# work yet in MT. +# -## Detector flags +# Detector flags from AthenaCommon.DetFlags import DetFlags DetFlags.ID_setOn() -DetFlags.Calo_setOff() +DetFlags.Calo_setOn() DetFlags.Muon_setOn() DetFlags.Lucid_setOff() DetFlags.Truth_setOn() - - +# Check that we correctly configured number of threads from command line from AthenaCommon.ConcurrencyFlags import jobproperties as jp nThreads = jp.ConcurrencyFlags.NumThreads() if (nThreads < 1) : @@ -16,16 +20,16 @@ if (nThreads < 1) : msg.fatal('numThreads must be >0. Did you set the --threads=N option?') sys.exit(AthenaCommon.ExitCodes.CONFIGURATION_ERROR) +# Update message stream format to include slot number +msgFmt = "% F%40W%S%5W%e%s%7W%R%T %0W%M" +svcMgr.MessageSvc.Format = msgFmt + # Thread pool service and initialization from GaudiHive.GaudiHiveConf import ThreadPoolSvc svcMgr += ThreadPoolSvc("ThreadPoolSvc") svcMgr.ThreadPoolSvc.ThreadInitTools = ["G4InitTool"] -# Algorithm resource pool -from GaudiHive.GaudiHiveConf import AlgResourcePool -svcMgr += AlgResourcePool( OutputLevel = INFO ); - -from AthenaCommon.AlgSequence import AlgSequence +# Performance monitoring probably not yet thread-safe from PerfMonComps.PerfMonFlags import jobproperties jobproperties.PerfMonFlags.doMonitoring.set_Value_and_Lock(False) jobproperties.PerfMonFlags.doDsoMonitoring.set_Value_and_Lock(False) @@ -35,52 +39,18 @@ rec.doPerfMon.set_Value_and_Lock( False ) rec.doDetailedPerfMon.set_Value_and_Lock( False ) rec.doSemiDetailedPerfMon.set_Value_and_Lock( False ) -# check to see if we're running hybrid mp/mt -nProc = jp.ConcurrencyFlags.NumProcs() -if (nProc > 0) : - - # - ## For MP/Hive we need to set the chunk size - # - - from AthenaCommon.Logging import log as msg - if (evtMax == -1) : - msg.fatal('EvtMax must be >0 for hybrid configuration') - sys.exit(AthenaCommon.ExitCodes.CONFIGURATION_ERROR) - - if ( evtMax % nProc != 0 ) : - msg.warning('EvtMax[%s] is not divisible by nProcs[%s]: ' + - 'MP Workers will not process all requested events', - evtMax, nProc) - - chunkSize = int (evtMax / nProc) - - from AthenaMP.AthenaMPFlags import jobproperties as jps - jps.AthenaMPFlags.ChunkSize = chunkSize - - msg.info('AthenaMP workers will process %s events each', chunkSize) - ## Simulation flags from G4AtlasApps.SimFlags import simFlags from G4AtlasApps import callbacks simFlags.load_atlas_flags() +# Disable the EtaPhi, VertexSpread and VertexRange checks +simFlags.EventFilter.set_Off() -## No magnetic field -simFlags.MagneticField.set_Off() - -# Currently, Hive requires an algorithm to load the initial data into the -# whiteboard and kickstart the data dependency chain. This alg must be at the -# front of the AlgSequence. +# Setup the algorithm sequence +from AthenaCommon.AlgSequence import AlgSequence topSeq = AlgSequence() -from AthenaCommon import CfgMgr -topSeq += CfgMgr.SGInputLoader(OutputLevel = INFO, ShowEventDump=False) - -# SGInputLoader is a module in SGComps that will do a typeless StoreGate read -# of data on disk, to preload it in the Whiteboard for other Alorithms to use. -# Is uses the same syntax as Algorithmic dependency declarations -topSeq.SGInputLoader.Load = [('McEventCollection','GEN_EVENT')] - -## Add the G4 simulation service -#from G4AtlasApps.PyG4Atlas import PyG4AtlasSvc -#svcMgr += PyG4AtlasSvc() +# Instruct the input loader to populate the whiteboard with the GEN_EVENT +# data from the input file. +from AthenaCommon import CfgMgr +CfgMgr.SGInputLoader().Load += [('McEventCollection', 'StoreGateSvc+GEN_EVENT')]