From a652ac4ed34c4d07812e6f61f6c489e866ac1aad Mon Sep 17 00:00:00 2001 From: Alex Pearce <alex.pearce@cern.ch> Date: Tue, 4 Jul 2017 09:32:15 +0200 Subject: [PATCH] Juggle raw event to mimic Brunel behaviour. Makes Tesla output more closely resemble Stripping output. Closes LHCBPS-1721. --- Phys/Tesla/python/Tesla/Configuration.py | 78 +++++++++++++++++------- 1 file changed, 56 insertions(+), 22 deletions(-) diff --git a/Phys/Tesla/python/Tesla/Configuration.py b/Phys/Tesla/python/Tesla/Configuration.py index f6ec46131..bb19849b1 100644 --- a/Phys/Tesla/python/Tesla/Configuration.py +++ b/Phys/Tesla/python/Tesla/Configuration.py @@ -59,6 +59,7 @@ from GaudiConf import IOExtension, IOHelper from GaudiConf import PersistRecoConf import GaudiKernel.ProcessJobOptions from LHCbKernel.Configuration import * +from RawEventCompat.Configuration import ReverseDict as RawEventLocationsToBanks class Tesla(LHCbConfigurableUser): @@ -88,7 +89,8 @@ class Tesla(LHCbConfigurableUser): # "Online" means running on data from the pit # "Offline" is used on DSTs, i.e. the output of Brunel , 'Pack' : True # Do we want to pack the objects? - , 'RawFormatVersion' : 0.4 # Which banks form the Turbo stream + , 'SplitRawEventInput' : 0.4 + , 'SplitRawEventOutput' : 4.3 , 'Monitors' : [] # Histogram monitors to run in the job , 'Histogram' : "" # Name of histogram file , 'VetoJuggle' : True # True if raw event removal not required in online mode @@ -134,7 +136,8 @@ class Tesla(LHCbConfigurableUser): , 'TriggerLines' : 'Which trigger line to process' , 'Mode' : '"Online" (strip unnecessary banks and run lumi algorithms) or "Offline"?' , 'Pack' : 'Do we want to pack the object?' - , 'RawFormatVersion': 'Which banks form the Turbo stream' + , 'SplitRawEventInput': "How is the event split up in the input? Propagated to RawEventJuggler() and DecodeRawEvent()." + , 'SplitRawEventOutput': "How should the event be split up in the output?" , 'Monitors' : 'Histogram monitors to run in the job' , 'Histogram' : 'File name for histogram file' , 'VetoJuggle' : 'Do we want to stop raw bank removal (assume happened further upstream)' @@ -289,7 +292,7 @@ class Tesla(LHCbConfigurableUser): # DecodeRawEvent().DataOnDemand=False writer=InputCopyStream( self.writerName+stream ) - DstConf().setProp("SplitRawEventOutput", self.getProp("RawFormatVersion")) + self._safeSet(DstConf(), ['SplitRawEventOutput']) # we can only do this if we are setting one output writer if not self.getProp("Streams"): @@ -298,7 +301,7 @@ class Tesla(LHCbConfigurableUser): TurboBanksSeq=GaudiSequencer("TurboBanksSeq") RawEventJuggler().TCK=tck RawEventJuggler().Input="Moore" - RawEventJuggler().Output=self.getProp("RawFormatVersion") + RawEventJuggler().Output=self.getProp("SplitRawEventOutput") RawEventJuggler().Sequencer=TurboBanksSeq RawEventJuggler().WriterOptItemList=writer RawEventJuggler().KillExtraNodes=True @@ -736,9 +739,10 @@ class Tesla(LHCbConfigurableUser): datatype = self.getProp('DataType') mode = self.getProp('Mode') online = mode == 'Online' - raw_format = self.getProp('RawFormatVersion') vertex_report_location = self.getProp('VertRepLoc') enable_line_checker = self.getProp('EnableLineChecker') + raw_format_input = self.getProp('SplitRawEventInput') + raw_format_output = self.getProp('SplitRawEventOutput') ok = (trigger_lines and not streams) or (streams and not trigger_lines) assert ok, ('Must define at least one of Tesla().Streams and ' @@ -764,7 +768,7 @@ class Tesla(LHCbConfigurableUser): decoders_seq.Members.append(decoder) DecodeRawEvent().DataOnDemand = False - DstConf().setProp('SplitRawEventOutput', raw_format) + self._safeSet(DstConf(), ['SplitRawEventOutput']) # This decoder is setup by TurboConf packed_data_decoder = HltPackedDataDecoder('Hlt2PackedDataDecoder') @@ -792,16 +796,11 @@ class Tesla(LHCbConfigurableUser): stream_name, streams[stream_name]['lines'], prpacking, - online + online, + raw_format_output ) stream_sequences.append(stream_seq) - # Kill any links the output file might have had to the input file - if online: - address_killer = [AddressKillerAlg()] - else: - address_killer = [] - if enable_line_checker: all_lines = sum([d['lines'] for d in streams.values()], []) ignored_lines = self.getProp('IgnoredLines') @@ -825,21 +824,28 @@ class Tesla(LHCbConfigurableUser): Nodes=['PersistReco'] )] + # Kill any links the output file might have had to the input file + if online: + address_killer = [AddressKillerAlg()] + else: + address_killer = [] + streaming_seq = GaudiSequencer( 'TeslaStreamsSequence', Members=( [decoders_seq, unpackers_seq] + line_checker + stream_sequences + - address_killer + - dstdata_killer + dstdata_killer + + address_killer ), IgnoreFilterPassed=True ) return streaming_seq - def _configureOutputTurboSPStream(self, name, lines, packing, online): + def _configureOutputTurboSPStream(self, name, lines, packing, online, + raw_format_output): """Return a sequence for streaming the lines into the named location. Copy line outputs for this stream to a stream-specific location. @@ -849,6 +855,7 @@ class Tesla(LHCbConfigurableUser): lines -- List of HLT2 line names that belong in the stream packing -- Instance of PersistRecoConf.PersistRecoPacking online -- Configure in online mode (if True) or offline + raw_format_output -- Output format of the raw event """ output_prefix = name.title() decisions = [l + 'Decision' for l in lines] @@ -908,13 +915,8 @@ class Tesla(LHCbConfigurableUser): # /Event/<stream name>/Turbo turbo_base = os.path.join(stream_base, 'Turbo') required_output_locations = [ - os.path.join(tes_root, 'DAQ/ODIN#1'), os.path.join(tes_root, 'Rec/Summary#1') ] - if online: - required_output_locations += [ - os.path.join(tes_root, 'DAQ/RawEvent#1') - ] optional_output_locations = [] if pack: datatype = self.getProp('DataType') @@ -983,6 +985,9 @@ class Tesla(LHCbConfigurableUser): if online: writer = OutputStream(namer(self.writerName)) writer.AcceptAlgs += ['LumiSeq', 'PhysFilter'] + # In online mode we perform the raw event juggling (otherwise + # Brunel does it for us), so must save the locations after juggling + optional_output_locations += self._output_raw_event_locations(raw_format_output) else: # In offline mode, e.g. after Brunel, propagate everything from the # input file to the output @@ -1037,6 +1042,28 @@ class Tesla(LHCbConfigurableUser): return kill_seq + def _raw_event_juggler(self, input_format, output_format): + # Load the raw event format dictionaries + RawEventFormatConf().loadIfRequired() + + j = RawEventJuggler() + j.Input = input_format + j.Output = output_format + j.KillExtraNodes = True + j.KillExtraBanks = True + j.KillExtraDirectories = True + j.Sequencer = GaudiSequencer('TeslaRawEventJugglerSequence') + + return j.Sequencer + + def _output_raw_event_locations(self, output_format): + """Return the list of raw event locations for the output format.""" + # Load the raw event format dictionaries + RawEventFormatConf().loadIfRequired() + + locs = RawEventLocationsToBanks(output_format).keys() + return [os.path.join('/Event', l) + '#1' for l in locs] + def __apply_configuration__(self): ############## Set other properties ########### self._safeSet( LHCbApp(), ['EvtMax','SkipEvents','Simulation', 'DataType' , 'CondDBtag','DDDBtag'] ) @@ -1058,7 +1085,7 @@ class Tesla(LHCbConfigurableUser): self._configureLumi() else: DecodeRawEvent().DataOnDemand=True - RecombineRawEvent(Version=self.getProp('RawFormatVersion')) + RecombineRawEvent(Version=self.getProp('SplitRawEventInput')) if self.getProp('Simulation')==True: self._unpackMC() TurboConf().setProp("PersistReco",True) @@ -1092,4 +1119,11 @@ class Tesla(LHCbConfigurableUser): kill_selreports = self._configureHlt2SelReportsKill() self.teslaSeq.Members += [kill_selreports] + if self.getProp('SplitRawEventInput') != self.getProp('SplitRawEventOutput'): + raw_event_juggler = self._raw_event_juggler( + self.getProp('SplitRawEventInput'), + self.getProp('SplitRawEventOutput') + ) + self.teslaSeq.Members += [raw_event_juggler] + ApplicationMgr().TopAlg+=[self.teslaSeq] -- GitLab