Skip to content
Snippets Groups Projects
Commit 83951c3d authored by Jan Lukas Spah's avatar Jan Lukas Spah :leaves:
Browse files

Flow ouside syst loop

parent 6a109e3e
No related branches found
No related tags found
No related merge requests found
......@@ -207,7 +207,10 @@ class ZmmyProcessor(HggBaseProcessor):
eve_sel.add("n_dimuon", n_good_dimuon > 0)
# select photons
photons = events.Photon
<<<<<<< HEAD
=======
>>>>>>> master
good_photons = photons[select_photons_zmmy(self, photons)]
n_good_photon = ak.sum(ak.ones_like(good_photons.pt) > 0, axis=1)
eve_sel.add("n_photon", n_good_photon > 0)
......@@ -399,6 +402,34 @@ class ZmmyProcessor(HggBaseProcessor):
ntuple["mmy_phi"] = events.mmy.obj_mmy.phi
ntuple["mmy_mass"] = events.mmy.obj_mmy.mass
<<<<<<< HEAD
# Making the photon selection
photons = photons[eve_sel.all(*(eve_sel.names))]
photons["trkSumPtSolidConeDR04"] = ntuple["photon_trkSumPtSolidConeDR04"]
photons["trkSumPtHollowConeDR03"] = ntuple["photon_trkSumPtHollowConeDR03"]
# Performing per photon corrections using normalizing flows
if self.data_kind == "mc" and self.doFlow_corrections:
# Applyting the Flow corrections to all photons before pre-selection
counts = ak.num(photons)
corrected_inputs,var_list = calculate_flow_corrections(photons, events, self.meta["flashggPhotons"]["flow_inputs"], self.meta["flashggPhotons"]["Isolation_transform_order"], year=self.year[dataset_name][0])
# Store the raw nanoAOD value and update photon ID MVA value for preselection
photons["mvaID_run3"] = ak.unflatten(self.add_photonid_mva_run3(photons, events), counts)
photons["mvaID_nano"] = photons["mvaID"]
# Store the raw values of the inputs and update the input values with the corrections since some variables used in the preselection
for i in range(len(var_list)):
photons["raw_" + str(var_list[i])] = ak.unflatten(np.ascontiguousarray(corrected_inputs[:, i]), counts) #photons[str(var_list[i])]
photons[str(var_list[i])] = ak.unflatten(corrected_inputs[:,i] , counts)
# Re-evaluate mvaID after corrections
photons["mvaID"] = ak.unflatten(self.add_photonid_mva_run3(photons, events), counts)
=======
>>>>>>> master
if self.data_kind == "mc":
# annotate diphotons with dZ information (difference between z position of GenVtx and PV) as required by flashggfinalfits
ntuple["dZ"] = events.GenVtx.z - events.PV.z
......
......@@ -394,6 +394,25 @@ class HggBaseProcessor(processor.ProcessorABC): # type: ignore
)
continue
# Computing the normalizinf flow correction
if self.data_kind == "mc" and self.doFlow_corrections:
# Applyting the Flow corrections to all photons before pre-selection
counts = awkward.num(original_photons)
corrected_inputs,var_list = calculate_flow_corrections(original_photons, events, self.meta["flashggPhotons"]["flow_inputs"], self.meta["flashggPhotons"]["Isolation_transform_order"], year=self.year[dataset_name][0])
# Store the raw nanoAOD value and update photon ID MVA value for preselection
original_photons["mvaID_run3"] = awkward.unflatten(self.add_photonid_mva_run3(original_photons, events), counts)
original_photons["mvaID_nano"] = original_photons["mvaID"]
# Store the raw values of the inputs and update the input values with the corrections since some variables used in the preselection
for i in range(len(var_list)):
original_photons["raw_" + str(var_list[i])] = original_photons[str(var_list[i])]
original_photons[str(var_list[i])] = awkward.unflatten(corrected_inputs[:,i] , counts)
original_photons["mvaID"] = awkward.unflatten(self.add_photonid_mva_run3(original_photons, events), counts)
# Applying systematic variations
photons_dct = {}
photons_dct["nominal"] = original_photons
logger.debug(original_photons.systematics.fields)
......@@ -432,24 +451,6 @@ class HggBaseProcessor(processor.ProcessorABC): # type: ignore
if self.photonid_mva_EB and self.photonid_mva_EE:
photons = self.add_photonid_mva(photons, events)
# Computing the normalizinf flow correction
if self.data_kind == "mc" and self.doFlow_corrections:
# Applyting the Flow corrections to all photons before pre-selection
counts = awkward.num(photons)
corrected_inputs,var_list = calculate_flow_corrections(photons, events, self.meta["flashggPhotons"]["flow_inputs"], self.meta["flashggPhotons"]["Isolation_transform_order"], year=self.year[dataset_name][0])
# Store the raw nanoAOD value and update photon ID MVA value for preselection
photons["mvaID_run3"] = awkward.unflatten(self.add_photonid_mva_run3(photons, events), counts)
photons["mvaID_nano"] = photons["mvaID"]
# Store the raw values of the inputs and update the input values with the corrections since some variables used in the preselection
for i in range(len(var_list)):
photons["raw_" + str(var_list[i])] = photons[str(var_list[i])]
photons[str(var_list[i])] = awkward.unflatten(corrected_inputs[:,i] , counts)
photons["mvaID"] = awkward.unflatten(self.add_photonid_mva_run3(photons, events), counts)
# photon preselection
photons = photon_preselection(self, photons, events, year=self.year[dataset_name][0])
# sort photons in each event descending in pt
......
......@@ -172,6 +172,26 @@ class TagAndProbeProcessor(HggBaseProcessor):
continue
original_photons = events.Photon
# Performing per photon corrections using normalizing flows
if self.data_kind == "mc" and self.doFlow_corrections:
# Applyting the Flow corrections to all photons before pre-selection
counts = ak.num(original_photons)
corrected_inputs,var_list = calculate_flow_corrections(original_photons, events, self.meta["flashggPhotons"]["flow_inputs"], self.meta["flashggPhotons"]["Isolation_transform_order"], year=self.year[dataset_name][0])
# Store the raw nanoAOD value and update photon ID MVA value for preselection
original_photons["mvaID_run3"] = ak.unflatten(self.add_photonid_mva_run3(original_photons, events), counts)
original_photons["mvaID_nano"] = original_photons["mvaID"]
# Store the raw values of the inputs and update the input values with the corrections since some variables used in the preselection
for i in range(len(var_list)):
original_photons["raw_" + str(var_list[i])] = original_photons[str(var_list[i])]
original_photons[str(var_list[i])] = ak.unflatten(corrected_inputs[:,i] , counts)
# Re-evaluate mvaID after corrections
original_photons["mvaID"] = ak.unflatten(self.add_photonid_mva_run3(original_photons, events), counts)
# systematic object variations
for systematic_name in systematic_names:
if systematic_name in available_object_systematics.keys():
......@@ -222,25 +242,6 @@ class TagAndProbeProcessor(HggBaseProcessor):
if self.photonid_mva_EB and self.photonid_mva_EE:
photons = self.add_photonid_mva(photons, events)
# Performing per photon corrections using normalizing flows
# The corrections are made before pre-selection so it enable us to recalculate pre-selection SFs
if self.data_kind == "mc" and self.doFlow_corrections:
# Applyting the Flow corrections to all photons before pre-selection
counts = ak.num(photons)
corrected_inputs,var_list = calculate_flow_corrections(photons, events, self.meta["flashggPhotons"]["flow_inputs"], self.meta["flashggPhotons"]["Isolation_transform_order"], year=self.year[dataset_name][0])
# Store the raw nanoAOD value and update photon ID MVA value for preselection
photons["mvaID_run3"] = ak.unflatten(self.add_photonid_mva_run3(photons, events), counts)
photons["mvaID_nano"] = photons["mvaID"]
# Store the raw values of the inputs and update the input values with the corrections since some variables used in the preselection
for i in range(len(var_list)):
photons["raw_" + str(var_list[i])] = photons[str(var_list[i])]
photons[str(var_list[i])] = ak.unflatten(corrected_inputs[:,i] , counts)
photons["mvaID"] = ak.unflatten(self.add_photonid_mva_run3(photons, events), counts)
# photon preselection
photons = photon_preselection(
self, photons, events, apply_electron_veto=False, year=self.year[dataset_name][0]
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment