Skip to content
Snippets Groups Projects

make packing configuration simpler

Merged Sevda Esen requested to merge sevda-simpler-packing into master
Compare and
2 files
+ 76
55
Compare changes
  • Side-by-side
  • Inline
Files
2
@@ -20,7 +20,7 @@ from RecoConf.data_from_file import unpacked_mc_locations
from PyConf import configurable
from PyConf.control_flow import CompositeNode, NodeLogic
from PyConf.components import get_output
from PyConf.location_prefix import prefix, unpacked_prefix, packed_prefix
from PyConf.location_prefix import prefix
from PyConf.application import register_encoding_dictionary
from GaudiConf.reading import type_map
from GaudiConf.PersistRecoConf import PersistRecoPacking
@@ -28,7 +28,6 @@ from GaudiConf.PersistRecoConf import PersistRecoPacking
from .cloning import clone_line_outputs
from .packing import pack_stream_objects, pack_stream_mc, pack_stream_mc_locations
from .persistreco import persistreco_line_outputs, persistreco_line_outputs_packed
from .serialisation import serialise_packed_containers
from .truth_matching import truth_match_lines, CHARGED_PP2MC_LOC, NEUTRAL_PP2MC_LOC
log = logging.getLogger(__name__)
@@ -163,7 +162,6 @@ def persist_line_outputs(
#add the locations from reco objects to the dictinary
prdict = persistreco_line_outputs()
prdict_packed = persistreco_line_outputs_packed(stream, reco_stream)
for val in prdict.values():
name = get_type(val) #find type of object for this DH
@@ -200,29 +198,8 @@ def persist_line_outputs(
pformat(output_cloner_locations))
cf.append(output_cloner_cf)
#Make a dictionary for output packer locations
#For line outputs, "stream+/p" added to input locations
#For reco objects, there are pre-defined output locations
#This is to be able to find reco objects regardless of their producer
outputs = {}
for key, value in inputs.items():
outputs[key] = []
for v in value:
if v in prdict_packed.keys():
outputs[key] += [prdict_packed[v]] #reco
else:
outputs[key] += [packed_prefix(v, stream)] #line
prpacking = PersistRecoPacking(
stream=stream,
unpacked=inputs,
packed=outputs,
data_type=data_type,
)
### TODO: reduce the set of encoding keys to the smallest possible one...
locations = set([ unpacked_prefix(i, stream) for i in prpacking.packedLocations() ]) | \
set([ i for i in prpacking.unpackedLocations()]) | \
locations = set([ i for ilist in inputs.values() for i in ilist]) | \
set([ i.location for i in itertools.chain( *_referenced_locations(lines).values()) ])
if clone_mc:
@@ -234,10 +211,6 @@ def persist_line_outputs(
register_encoding_dictionary("PackedObjectLocations",
sorted(locations)), 16)
packer_cf, packer_locations = pack_stream_objects(stream, prpacking,
encoding_key)
cf.append(packer_cf)
packer_mc_locations = []
if clone_mc:
@@ -245,15 +218,17 @@ def persist_line_outputs(
cf.append(mc_packer_cf)
if log.isEnabledFor(logging.DEBUG):
log.debug('packer_locations: ' + pformat(packer_locations))
log.debug('packer_locations: ' + pformat(inputs.values()))
log.debug('packer_mc_locations: ' + pformat(packer_mc_locations))
serialisation_cf, output_raw_data = serialise_packed_containers(
packer_locations, source_id)
packer_cf, serialisation_cf, output_raw_data = pack_stream_objects(
stream, inputs, encoding_key, source_id)
cf.append(packer_cf)
cf.append(serialisation_cf)
if log.isEnabledFor(logging.DEBUG):
log.debug('output_raw_data: %s', pformat(output_raw_data))
cf.append(serialisation_cf)
unpacked_mc_loc = [
prefix(l, reco_stream) for l in unpacked_mc_locations().values()
Loading