diff --git a/dhi/plots/likelihoods.py b/dhi/plots/likelihoods.py index 28ebf6dc356da83f566c9b63aa98f37142f1eeab..b06db3acdffa2e024bd6f41397929cb3b66f1edf 100644 --- a/dhi/plots/likelihoods.py +++ b/dhi/plots/likelihoods.py @@ -1194,13 +1194,15 @@ def plot_likelihood_scans_2d( for i, d in enumerate(data): # join values for contour calculation smoothContour_temp = smoothContour[0] if len(smoothContour) == 1 else smoothContour[i] - print(smoothContour_temp, "!!!!!", d["name"]) + if smoothContour_temp == None : + smoothContour_temp = ('None',) + print(smoothContour_temp, "!!!!!", d["name"], smoothContour, len(smoothContour)) contours.append( get_contours( d["values"][poi1], d["values"][poi2], d["values"]["dnll2"], - levels=[chi2_levels[2][1], chi2_levels[2][2]], + levels=[chi2_levels[2][1], chi2_levels[2][2], chi2_levels[2][3], chi2_levels[2][4], chi2_levels[2][5]], frame_kwargs=[{"mode": "edge"}], interpolation=interpolation_method, smooth=smoothContour_temp, @@ -1242,7 +1244,7 @@ def plot_likelihood_scans_2d( _color_sequence = [br_hh_colors.root[d["name"]] for d in data] # loop through data entries - for d, (cont1, cont2), col in zip(data, contours, _color_sequence[:len(data)]): + for d, (cont1, cont2, cont3, cont4, cont5), col in zip(data, contours, _color_sequence[:len(data)]): # evaluate the scan scan = evaluate_likelihood_scan_2d( d["values"][poi1], @@ -1255,7 +1257,7 @@ def plot_likelihood_scans_2d( warn("2D likelihood evaluation failed for entry '{}'".format(d["name"])) # plot selected contours - g1, g2 = None, None + g1, g2, g3, g5 = None, None, None, None for g1 in cont1: r.setup_graph(g1, props={"LineWidth": 2, "LineStyle": 1, "LineColor": colors[col]}) if 1 in show_significances: @@ -1266,6 +1268,18 @@ def plot_likelihood_scans_2d( r.setup_graph(g2, props={"LineWidth": 2, "LineStyle": 1, "LineColor": colors[col]}) if 2 in show_significances: draw_objs.append((g2, "SAME,L")) + for g3 in cont3: + r.setup_graph(g3, props={"LineWidth": 2, "LineStyle": 3, "LineColor": colors[col]}) + if 3 in show_significances: + draw_objs.append((g3, "SAME,L")) + for g4 in cont4: + r.setup_graph(g4, props={"LineWidth": 2, "LineStyle": 3, "LineColor": colors[col]}) + if 4 in show_significances: + draw_objs.append((g4, "SAME,L")) + for g5 in cont5: + r.setup_graph(g5, props={"LineWidth": 2, "LineStyle": 4, "LineColor": colors[col]}) + if 5 in show_significances: + draw_objs.append((g5, "SAME,L")) name = expand_hh_channel_label(d["name"]) if g1: legend_entries.append((g1, name, "L")) @@ -1294,6 +1308,24 @@ def plot_likelihood_scans_2d( legend_entries.append((g2_style, "#pm 2 #sigma", "L")) else: warn("no secondary contour found, no line will be visible") + if g4 and 4 in show_significances: + g4_style = g4.Clone() + r.apply_properties(g4_style, {"LineColor": colors.black}) + legend_entries.append((g4_style, "#pm 3 #sigma", "L")) + else: + warn("no 4 contour found, no line will be visible") + if g3 and 3 in show_significances: + g3_style = g3.Clone() + r.apply_properties(g3_style, {"LineColor": colors.black}) + legend_entries.append((g3_style, "#pm 3 #sigma", "L")) + else: + warn("no 3 contour found, no line will be visible") + if g5 and 5 in show_significances: + g5_style = g5.Clone() + r.apply_properties(g5_style, {"LineColor": colors.black}) + legend_entries.append((g5_style, "#pm 5 #sigma", "L")) + else: + warn("no 5 contour found, no line will be visible") # campaign label if campaign: diff --git a/dhi/tasks/limits.py b/dhi/tasks/limits.py index e26b9e59f25e05bdd6439e09288c5d68482e8a47..7297625a63a670d55c7bc4126978cf6fdf083793 100644 --- a/dhi/tasks/limits.py +++ b/dhi/tasks/limits.py @@ -10,6 +10,8 @@ import re import law import luigi +import hashlib + from dhi.tasks.base import BoxPlotTask, ModelParameters, view_output_plots from dhi.tasks.remote import HTCondorWorkflow from dhi.tasks.combine import ( @@ -352,6 +354,34 @@ class MergeUpperLimitsGrid(UpperLimitsScanBase): name = self.join_postfix(["limitgrid", self.get_output_postfix()]) + ".root" return self.target(name) + # generate unique id for intermediate outputs + def generate_unique_id_from_inputs(self, inputs): + hasher = hashlib.md5() + for inp in inputs: + hasher.update(inp.path.encode("utf-8")) + return hasher.hexdigest()[:8] + + # merge 100 files to the intermediate outputs + def batch_hadd(self, inputs, output, batch_size=100): + intermediate_outputs = [] + + unique_id = self.generate_unique_id_from_inputs(inputs) + + for i in range(0, len(inputs), batch_size): + batch = inputs[i:i + batch_size] + intermediate_output_path = os.path.join(output.parent.path, f"intermediate_{unique_id}_{i}.root") + intermediate_output = law.LocalFileTarget(intermediate_output_path) + + intermediate_dir = intermediate_output.parent + if not intermediate_dir.exists(): + intermediate_dir.touch() + + intermediate_outputs.append(intermediate_output) + + law.root.hadd_task(self, batch, intermediate_output, local=True) + + law.root.hadd_task(self, intermediate_outputs, output, local=True) + @law.decorator.log @law.decorator.safe_output def run(self): @@ -374,8 +404,14 @@ class MergeUpperLimitsGrid(UpperLimitsScanBase): input_paths.append(target.path) inputs.append(target) - # hadd using a helper - law.root.hadd_task(self, inputs, output, local=True) + # hadd using batch for files > 100 +# law.root.hadd_task(self, inputs, output, local=True) + if len(inputs) > 100: + self.logger.info(f"More than 100 input files ({len(inputs)}), using batch processing.") + self.batch_hadd(inputs, output, batch_size=100) + else: + self.logger.info(f"Less than or equal to 100 input files ({len(inputs)}), merging directly.") + law.root.hadd_task(self, inputs, output, local=True) class PlotUpperLimits(UpperLimitsScanBase, POIPlotTask):