Commit 16a9cdc3 authored by Marcel Rieger's avatar Marcel Rieger
Browse files

Remove debug lines.

parent 96bfb25c
......@@ -56,7 +56,7 @@ def remove_shape_bins(datacard, rules, directory=None, skip_shapes=False, mass="
interpreted:
1. Colon-separated bin indices to remove, starting at 1. Values in the format 'A-B' refer to a
range from A to B (inclusive).
range from A to B (inclusive). Omitting B will select all bins equal to and above A.
2. An expression 'PROCESS(<|>)THRESHOLD', with special processes 'S', 'B', 'SB', 'SOB', and
'STN' being interpreted as combined signal, background, signal+background, signal/background
and signal/sqrt(background). Process names support patterns where a leading '!' negates their
......@@ -108,8 +108,11 @@ def remove_shape_bins(datacard, rules, directory=None, skip_shapes=False, mass="
indices = set()
for part in expr.split(":"):
if "-" in part:
start, stop = map(int, part.split("-", 1))
indices |= set(range(start, stop + 1))
start, stop = part.split("-", 1)
# the stop value is optional
if not stop:
stop = 1000
indices |= set(range(int(start), int(stop) + 1))
else:
indices.add(int(part))
rule.append((INDICES, list(indices)))
......@@ -347,7 +350,8 @@ def remove_shape_bins(datacard, rules, directory=None, skip_shapes=False, mass="
# apply the comparison function
_indices = [(i + 1) for i, v in enumerate(bin_values) if comp_fn(v)]
indices.append(set(_indices))
# limit and store the indices
indices.append({i for i in _indices if (1 <= i <= bin_nums[bin_name])})
# AND concatente indices to drop by finding those existing in all lists
joined_indices = set(
......@@ -362,7 +366,7 @@ def remove_shape_bins(datacard, rules, directory=None, skip_shapes=False, mass="
new_rates = {}
new_observations = {}
for bin_name, _shapes in shapes.items():
indices = {b for b in remove_bin_indices[bin_name] if b > 0}
indices = list(sorted({b for b in remove_bin_indices[bin_name] if b > 0}))
if not indices:
continue
......@@ -377,7 +381,6 @@ def remove_shape_bins(datacard, rules, directory=None, skip_shapes=False, mass="
new_hist = drop_shape_bins(hist, name, indices, owner)
if new_hist:
cache.write_tobj(tfile, new_hist, towner=owner, name=name)
print("old bins {}, new bins {}".format(hist.GetNbinsX(), new_hist.GetNbinsX()))
# remember rate or observation
if proc_name == "data_obs":
......
......@@ -552,26 +552,29 @@ class TFileCache(object):
ROOT.gROOT.ProcessLine("gErrorIgnoreLevel = kFatal;")
for abs_path, data in self._w_cache.items():
if data["tfile"] and data["tfile"].IsOpen():
if not skip_write:
data["tfile"].cd()
for tobj, towner, name in data["objects"]:
if towner:
towner.cd()
args = (name,) if name else ()
tobj.Write(*args)
data["tfile"].Close()
if not skip_write:
shutil.move(data["tmp_path"], abs_path)
self.logger.debug(
"moving back temporary file {} to {}".format(data["tmp_path"], abs_path)
)
if not data["tfile"] or not data["tfile"].IsOpen():
continue
if not skip_write:
data["tfile"].cd()
self.logger.debug("going to write {} objects".format(len(data["objects"])))
for tobj, towner, name in data["objects"]:
if towner:
towner.cd()
args = (name,) if name else ()
tobj.Write(*args)
self.logger.debug("written object '{}'".format(tobj.GetName()))
data["tfile"].Close()
if not skip_write:
shutil.move(data["tmp_path"], abs_path)
self.logger.debug(
"moving back temporary file {} to {}".format(data["tmp_path"], abs_path)
)
self.logger.debug(
"closed {} cached file(s) opened for writing".format(len(self._w_cache))
)
self.logger.debug("closed {} cached file(s) opened for writing".format(
len(self._w_cache)))
ROOT.gROOT.ProcessLine("gErrorIgnoreLevel = {};".format(ignore_level_orig))
# clear
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment