Skip to content
Snippets Groups Projects

Add RetinaClusters to digi files

Merged Giovanni Bassi requested to merge add_retina_clusters_to_digi into master
All threads resolved!
1 file
+ 129
0
Compare changes
  • Side-by-side
  • Inline
###############################################################################
# (c) Copyright 2022 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
# Script to dump larger MC samples into multiple MDF files for Allen standalone processing
#
# Variables to possibly modify:
# key: to choose entry from TestfileDB
# scifi_v6: True if the sample uses scifi raw bank version 6, False otherwise
# n_files_per_chunk: number of input files combined into one output MDF file (output files are named with increasing index)
# base_dir: output directory
# n_jobs_parallel: number of jobs (writing a single MDF file) launched in parallel
#
# author: Dorothea vom Bruch (dorothea.vom.bruch@cern.ch)
# date: 07/2021
#
import os
from multiprocessing import Process
#key = 'Upgrade_BsPhiPhi_MD_FTv4_DIGI'
#key = 'Upgrade_Ds2KKPi_MD_FTv4_DIGI'
#key = 'Upgrade_JPsiMuMu_MD_FTv4_DIGI'
#key = 'Upgrade_KstEE_MD_FTv4_DIGI'
#key = 'Upgrade_KstMuMu_MD_FTv4_DIGI'
#key = 'MiniBrunel_2018_MinBias_FTv4_DIGI'
# use Scifi v6 format for the following samples
#key = 'upgrade_DC19_01_Bs2PhiPhiMD'
#key = 'upgrade_DC19_01_MinBiasMD'
key = 'SMOG2_pppHe'
scifi_v6 = True # set to True when using v6 samples
base_dir = "/eos/lhcb/wg/rta/WP6/Allen/mdf_input" #os.getcwd()
n_files_per_chunk = 20
class FileMerger(object):
def __init__(self, input_files, output_file):
self.__process = Process(target=self.run)
self.__input_files = input_files
self.__output_file = output_file
def run(self):
os.environ["TESTFILE_KEY"] = key
os.environ["OUTPUT_FILE"] = self.__output_file
input_file_string = "!".join(self.__input_files)
os.environ["INPUT_FILES"] = input_file_string
os.environ["BASE_DIR"] = base_dir
if scifi_v6:
os.environ["SCIFI_VERSION"] = "6"
else:
os.environ["SCIFI_VERSION"] = ""
os.system(
"gaudirun.py Moore/Hlt/RecoConf/options/mdf_for_standalone_Allen_retinacluster.py"
)
def process(self):
return self.__process
def start(self):
self.__process.start()
def join(self):
self.__process.join()
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i, j in zip(range(0, len(l), n), range(1, len(l) + 1)):
yield l[i:i + n], j
if key == 'SMOG2_pppHe':
file_list = [
"/eos/lhcb/wg/IonPhysics/Simulations/SMOGHepp8MB/digi/" + string
for string in os.listdir(
r"/eos/lhcb/wg/IonPhysics/Simulations/SMOGHepp8MB/digi/")
]
files = sorted(file_list)
print(files)
else:
from PRConfig.TestFileDB import test_file_db
qualifiers = test_file_db[key].qualifiers
files = sorted(test_file_db[key].filenames)
output_dir = base_dir + "/" + key + "/mdf"
if not os.path.exists(base_dir + "/" + key):
os.makedirs(base_dir + "/" + key)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
outfile_pat = output_dir + "/" + key + "_%02d"
for input_files, i in chunks(files, n_files_per_chunk):
print("at i = " + str(i))
for file_name in input_files:
print(file_name)
mergers = [
FileMerger(input_files, outfile_pat % i)
for input_files, i in chunks(files, n_files_per_chunk)
]
#mergers[0].start()
#mergers[0].join()
n_jobs = len(mergers)
print("Total number of jobs = " + str(n_jobs))
n_jobs_parallel = 5
n_jobs_parallel_chunks = n_jobs // n_jobs_parallel + n_jobs % n_jobs_parallel
print("Number of 5 jobs launched together = " + str(n_jobs_parallel_chunks))
for i in range(n_jobs_parallel_chunks):
start = i * n_jobs_parallel
stop = min(start + n_jobs_parallel, n_jobs)
print("Launching jobs " + str(start) + " to " + str(stop))
for m in mergers[start:stop]:
m.start()
for m in mergers[start:stop]:
m.join()
Loading