Commit cd618769 authored by Javier Duarte's avatar Javier Duarte
Browse files

merge

parents d9456382 ab95f16c
Pipeline #3627208 skipped with stage
......@@ -118,21 +118,18 @@ campaign_labels.update(br_hh_names)
# poi defaults (value, range, points, taken from physics model) and labels
# note: C2V and CV are not following kappa notation and are upper case to be consistent to the model
poi_data = DotDict(
r=DotDict(range=(-20.0, 20.0), label="r", sm_value=1.0),
r_gghh=DotDict(range=(-20.0, 20.0), label="r_{gghh}", sm_value=1.0),
r_qqhh=DotDict(range=(-20.0, 20.0), label="r_{qqhh}", sm_value=1.0),
r_vhh=DotDict(range=(-20.0, 20.0), label="r_{vhh}", sm_value=1.0),
kl=DotDict(range=(-30.0, 30.0), label=r"\kappa_{\lambda}", sm_value=1.0),
kt=DotDict(range=(-10.0, 10.0), label=r"\kappa_{t}", sm_value=1.0),
C2V=DotDict(range=(-10.0, 10.0), label=r"\kappa_{2V}", sm_value=1.0),
CV=DotDict(range=(-10.0, 10.0), label=r"\kappa_{V}", sm_value=1.0),
C2=DotDict(range=(-2.0, 3.0), label=r"c_{2}", sm_value=0.0),
CG=DotDict(range=(-2.0, 2.0), label=r"c_{g}", sm_value=0.0),
C2G=DotDict(range=(-2.0, 2.0), label=r"c_{2g}", sm_value=0.0),
r=DotDict(range=(-20.0, 20.0), label=r"$r$", sm_value=1.0),
r_gghh=DotDict(range=(-20.0, 20.0), label=r"$r_{gghh}$", sm_value=1.0),
r_qqhh=DotDict(range=(-20.0, 20.0), label=r"$r_{qqhh}$", sm_value=1.0),
r_vhh=DotDict(range=(-20.0, 20.0), label=r"$r_{vhh}$", sm_value=1.0),
kl=DotDict(range=(-30.0, 30.0), label=r"$\kappa_{\lambda}$", sm_value=1.0),
kt=DotDict(range=(-10.0, 10.0), label=r"$\kappa_{t}$", sm_value=1.0),
C2V=DotDict(range=(-10.0, 10.0), label=r"$\kappa_{2V}$", sm_value=1.0),
CV=DotDict(range=(-10.0, 10.0), label=r"$\kappa_{V}$", sm_value=1.0),
C2=DotDict(range=(-2.0, 3.0), label=r"$C_{2}$", sm_value=0.0),
CG=DotDict(range=(-2.0, 2.0), label=r"$C_{g}$", sm_value=0.0),
C2G=DotDict(range=(-2.0, 2.0), label=r"$C_{2g}$", sm_value=0.0),
)
# add "$" embedded labels
for poi, data in poi_data.items():
data["label_math"] = "${}$".format(data.label)
# colors
colors = DotDict(
......
......@@ -16,7 +16,9 @@ from collections import OrderedDict, defaultdict
import law
import six
from dhi.util import import_ROOT, real_path, multi_match, copy_no_collisions, TFileCache
from dhi.util import (
import_ROOT, real_path, multi_match, copy_no_collisions, TFileCache, prepare_output,
)
#: Parameter directives excluding groups, autoMCStats and nuisace edit lines.
......@@ -593,10 +595,7 @@ def manipulate_datacard(datacard, target_datacard=None, read_only=False, read_st
# prepare the target location when given
if target_datacard:
target_datacard = real_path(target_datacard)
target_dirname = os.path.dirname(target_datacard)
if not os.path.exists(target_dirname):
os.makedirs(target_dirname)
target_datacard = prepare_output(target_datacard)
# prepare the writer
if writer == "simple":
......@@ -862,13 +861,9 @@ def bundle_datacard(datacard, directory, shapes_directory=".", skip_shapes=False
datacard is returned.
"""
# prepare the directories
directory = real_path(directory)
if not os.path.exists(directory):
os.makedirs(directory)
directory = prepare_output(directory, is_dir=True)
shapes_directory_relative = not shapes_directory.startswith("/")
shapes_directory = real_path(os.path.join(directory, shapes_directory or "."))
if not os.path.exists(shapes_directory):
os.makedirs(shapes_directory)
shapes_directory = prepare_output(os.path.join(directory, shapes_directory or "."), is_dir=True)
# copy the card itself
src_datacard = real_path(datacard)
......
# coding: utf-8
"""
Collection of lightweight, functional helpers to create HEPData entries.
"""
from collections import OrderedDict
import yaml
from dhi.util import DotDict, import_ROOT, prepare_output, create_tgraph
from dhi.plots.util import get_graph_points
#
# setup and general helpers
#
# configure yaml to transparently encode OrderedDict and DotDict instances like normal dicts
representer = lambda dumper, data: dumper.represent_mapping("tag:yaml.org,2002:map", data.items())
yaml.add_representer(OrderedDict, representer)
yaml.add_representer(DotDict, representer)
class Dumper(yaml.Dumper):
"""
Custom dumper class ensuring that sequence items are idented.
"""
def increase_indent(self, *args, **kwargs):
kwargs["indentless"] = False
return super(Dumper, self).increase_indent(*args, **kwargs)
def save_hep_data(data, path, **kwargs):
# default configs
kwargs.setdefault("indent", 2)
# forced configs
kwargs["Dumper"] = Dumper
# prepare the output
path = prepare_output(path)
# dump
with open(path, "w") as f:
yaml.dump(data, f, **kwargs)
print("written HEPData file to {}".format(path))
return path
#
# structured data creators
#
def create_hist_data(independent_variables=None, dependent_variables=None):
# create the entry
data = OrderedDict()
# add placeholders for in/dependent variables
data["independent_variables"] = independent_variables or []
data["dependent_variables"] = dependent_variables or []
return data
def create_independent_variable(label, unit=None, values=None, parent=None):
v = OrderedDict()
# header
v["header"] = OrderedDict({"name": label})
if unit is not None:
v["header"]["units"] = unit
# values
v["values"] = values or []
# add to parent hist data
if parent is not None:
parent["independent_variables"].append(v)
return v
def create_dependent_variable(label, unit=None, qualifiers=None, values=None, parent=None):
v = OrderedDict()
# header
v["header"] = OrderedDict({"name": label})
if unit is not None:
v["header"]["units"] = unit
# qualifiers
if qualifiers is not None:
v["qualifiers"] = qualifiers
# values
v["values"] = values or []
# add to parent hist data
if parent is not None:
parent["dependent_variables"].append(v)
return v
def create_qualifier(name, value, unit=None, parent=None):
q = OrderedDict()
# name and value
q["name"] = name
q["value"] = value
# unit
if unit is not None:
q["units"] = unit
# add to parent dependent variable
if parent is not None:
parent.setdefault("qualifiers", []).append(q)
return q
def create_range(start, stop, parent=None):
r = OrderedDict([("low", start), ("high", stop)])
# add to parent values
if parent is not None:
parent.append(r)
return r
def create_value(value, errors=None, parent=None):
v = OrderedDict()
v["value"] = value
if errors is not None:
v["errors"] = errors
# add to parent values
if parent is not None:
parent.append(v)
return v
def create_error(value, label=None, parent=None):
e = OrderedDict()
# error values
if isinstance(value, (list, tuple)) and len(value) == 2:
e["asymerror"] = OrderedDict([("plus", value[0]), ("minus", value[1])])
else:
e["symerror"] = value
# label
if label is not None:
e["label"] = label
# add to parent value
if parent is not None:
parent.setdefault("errors", []).append(e)
return e
#
# adapters
#
def create_independent_variable_from_x_axis(x_axis, label=None, unit=None, parent=None,
transform=None):
# default transform
if not callable(transform):
transform = lambda bin_number, low, high: (low, high)
# default label
if label is None:
label = x_axis.GetTitle()
# extract bin ranges
values = []
for b in range(1, x_axis.GetNbins() + 1):
low, high = x_axis.GetBinLowEdge(b), x_axis.GetBinUpEdge(b)
# transform
low, high = transform(b, low, high)
# add a new value
values.append(create_range(low, high))
return create_independent_variable(label, unit=unit, values=values, parent=parent)
def create_dependent_variable_from_hist(hist, label=None, unit=None, qualifiers=None, parent=None,
error_label=None, transform=None):
# default transform
if not callable(transform):
transform = lambda bin_number, value, err: (value, err)
# default label
if label is None:
label = hist.GetTitle()
# get values
values = []
for b in range(1, hist.GetXaxis().GetNbins() + 1):
v = hist.GetBinContent(b)
# extract the error
err = None
if error_label:
err_u = hist.GetBinErrorUp(b)
err_d = hist.GetBinErrorLow(b)
err = abs(err_u) if err_u == err_d else (err_u, -err_d)
# transform
v, err = transform(b, v, err)
errors = None if err is None else [create_error(err, label=error_label)]
# add a new value
values.append(create_value(v, errors=errors))
return create_dependent_variable(label, unit=unit, qualifiers=qualifiers, values=values,
parent=parent)
def create_dependent_variable_from_graph(graph, label=None, unit=None, qualifiers=None, parent=None,
coord="y", values_x=None, error_label=None, transform=None):
ROOT = import_ROOT()
# checks
if coord not in "xy":
raise Exception("coord must be 'x' or 'y', got {}".format(coord))
# default transform
if not callable(transform):
transform = lambda index, x, y, err: (x, y, err)
# default label
if label is None:
label = graph.GetTitle()
# helper to create splines for interpolation
def make_spline(x, y):
# delete repeated horizontal endpoints which lead to interpolation failures
x, y = list(x), list(y)
if len(x) > 1 and x[0] == x[1]:
x, y = x[1:], y[1:]
if len(x) > 1 and x[-1] == x[-2]:
x, y = x[:-1], y[:-1]
return ROOT.TSpline3("spline", create_tgraph(len(x), x, y), "", x[0], x[-1])
# build values dependent on the coordiate to extract
values = []
if coord == "x":
# when x coordinates are requested, just get graph values and optionally obtain errors
gx, gy = get_graph_points(graph)
for i, (x, y) in enumerate(zip(gx, gy)):
x, y = float(x), float(y)
# extract the error
err = None
if error_label:
err_u = graph.GetErrorXhigh(i)
err_d = graph.GetErrorXlow(i)
err = abs(err_u) if err_u == err_d else (err_u, -err_d)
# transform
x, y, err = transform(i, x, y, err)
errors = None if err is None else [create_error(err, label=error_label)]
# add a new value
values.append(create_value(x, errors=errors))
else: # coord == "y"
# when y coordonates are requested, consider custom x values and use interpolation splines
# for both nominal values and errors
points = get_graph_points(graph, errors=True)
gx, gy, errors = points[0], points[1], points[2:]
has_errors, has_asym_errors = len(errors) > 0, len(errors) > 2
spline = make_spline(gx, gy)
if error_label and has_errors:
if has_asym_errors:
spline_err_u = make_spline(gx, errors[3])
spline_err_d = make_spline(gx, errors[2])
else:
spline_err = make_spline(gx, errors[1])
# determine x values to scan
if values_x is None:
values_x = gx
for i, x in enumerate(values_x):
x = float(x)
y = spline.Eval(x)
# extract the error
err = None
if error_label and has_errors:
if has_asym_errors:
err = (spline_err_u.Eval(x), -spline_err_d.Eval(x))
else:
err = spline_err.Eval(x)
# transform
x, y, err = transform(i, x, y, err)
errors = None if err is None else [create_error(err, label=error_label)]
# add a new value
values.append(create_value(y, errors=errors))
return create_dependent_variable(label, unit=unit, qualifiers=qualifiers, values=values,
parent=parent)
......@@ -19,7 +19,7 @@ import scipy.interpolate
from dhi.tasks.combine import DatacardTask
from dhi.tasks.limits import UpperLimits
from dhi.util import real_path, get_dcr2_path
from dhi.util import real_path, get_dcr2_path, round_digits
def _is_r2c_bbbb_boosted_ggf(task):
......@@ -121,13 +121,6 @@ def _get_limit_grid_interps(poi, scan_name):
return _limit_grid_interps[key]
def round_digits(v, n, round_fn=round):
if not v:
return v
exp = int(math.floor(math.log(abs(v), 10)))
return round_fn(v / 10.0**(exp - n + 1)) * 10**(exp - n + 1)
def define_limit_grid(task, scan_parameter_values, approx_points, debug=False):
"""
Hook called by :py:class:`tasks.limits.UpperLimit` to define a scan-parameter-dependent grid of
......
# coding: utf-8
"""
Reorganization of https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit/blob/102x/python/TrilinearCouplingModels.py
for HH combination.
"""
import os
import re
from HiggsAnalysis.CombinedLimit.SMHiggsBuilder import SMHiggsBuilder
from HiggsAnalysis.CombinedLimit.PhysicsModel import SM_HIGG_DECAYS, SM_HIGG_PROD
# single Higgs production scalings coefficients
# WH and ZH coeff are very similar --> build VH coeff as an average btw the two
energy = "13TeV"
cXSmap_13 = {
"ggH": 0.66e-2,
"qqH": 0.64e-2,
"WH": 1.03e-2,
"ZH": 1.19e-2,
"ttH": 3.51e-2,
"VH": (0.5 * (1.03e-2 + 1.19e-2)),
}
EWKmap_13 = {
"ggH": 1.049,
"qqH": 0.932,
"WH": 0.93,
"ZH": 0.947,
"ttH": 1.014,
"VH": (0.5 * (0.93 + 0.947)),
}
dZH = -1.536e-3
# BR scaling vs kl (https://arxiv.org/abs/1709.08649 Eq 22)
cGammap = {
"hgg": 0.49e-2,
"hzz": 0.83e-2,
"hww": 0.73e-2,
"hgluglu": 0.66e-2,
"htt": 0,
"hbb": 0,
"hcc": 0,
"hss": 0,
"hmm": 0,
}
# ensure hss in the SM_HIGG_DECAYS
if "hss" not in SM_HIGG_DECAYS:
# do not change it inplace not to interfer with upstream code
SM_HIGG_DECAYS = SM_HIGG_DECAYS + ["hss"]
# single H production modes that are supported in the scaling
SM_SCALED_SINGLE_HIGG_PROD = ["ggZH", "tHq", "tHW", "ggH", "qqH", "ZH", "WH", "VH", "ttH"]
class HBRscaler(object):
"""
Produce single H and BR scalings for anomalous couplings, and produce XS*BR scalings for both H
and HH.
"""
def __init__(self, modelBuilder, doBRscaling, doHscaling):
super(HBRscaler, self).__init__()
self.modelBuilder = modelBuilder
# use SMHiggsBuilder to build single H XS and BR scalings
datadir = None
if "CMSSW_BASE" in os.environ:
datadir = os.path.expandvars("$CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg")
elif "DHI_SOFTWARE" in os.environ:
datadir = os.path.expandvars("$DHI_SOFTWARE/HiggsAnalysis/CombinedLimit/data/lhc-hxswg")
self.SMH = SMHiggsBuilder(self.modelBuilder, datadir=datadir)
self.doBRscaling = doBRscaling
self.doHscaling = doHscaling
self.f_BR_scalings = []
self.f_H_scalings = []
if self.doBRscaling:
self.buildBRScalings()
if self.doHscaling:
self.buildHScalings()
def buildBRScalings(self):
for d in SM_HIGG_DECAYS:
self.SMH.makeBR(d)
# FIXME: to check how to deal with BR uncertainties --> for now keep them frozen
for d in SM_HIGG_DECAYS:
self.modelBuilder.factory_("HiggsDecayWidth_UncertaintyScaling_%s[1.0]" % d)
# fix to have all BRs add up to unity
self.modelBuilder.factory_("sum::c7_SMBRs(%s)" % (",".join("SM_BR_" + d for d in SM_HIGG_DECAYS)))
# self.modelBuilder.out.function("c7_SMBRs").Print("")
# define resolved loops
self.SMH.makeScaling("hgluglu", Cb="1", Ctop="kt")
self.SMH.makeScaling("hgg", Cb="1", Ctop="kt", CW="CV", Ctau="1")
self.SMH.makeScaling("hzg", Cb="1", Ctop="kt", CW="CV", Ctau="1")
# BR scaling vs kl (https://arxiv.org/abs/1709.08649 Eq 22)
for dec, valC1 in cGammap.items():
self.modelBuilder.factory_("expr::kl_scalBR_%s('(@0-1) * %g', kl)" % (dec, valC1))
# partial widths as a function of kl, kt, and CV
self.modelBuilder.factory_("expr::CVktkl_Gscal_Z('(@0*@0+@3) * @1 * @2', CV, SM_BR_hzz, HiggsDecayWidth_UncertaintyScaling_hzz, kl_scalBR_hzz)")
self.modelBuilder.factory_("expr::CVktkl_Gscal_W('(@0*@0+@3) * @1 * @2', CV, SM_BR_hww, HiggsDecayWidth_UncertaintyScaling_hww, kl_scalBR_hww)")
self.modelBuilder.factory_("expr::CVktkl_Gscal_tau('(1+@4) * @0 * @2 + (1+@5)*@1*@3', SM_BR_htt, SM_BR_hmm, HiggsDecayWidth_UncertaintyScaling_htt, HiggsDecayWidth_UncertaintyScaling_hmm,kl_scalBR_htt, kl_scalBR_hmm)")
self.modelBuilder.factory_("expr::CVktkl_Gscal_top('(1+@2) * @0 * @1', SM_BR_hcc, HiggsDecayWidth_UncertaintyScaling_hcc, kl_scalBR_hcc)")
self.modelBuilder.factory_("expr::CVktkl_Gscal_bottom('(1+@3) * (@0*@2+@1)', SM_BR_hbb, SM_BR_hss, HiggsDecayWidth_UncertaintyScaling_hbb, kl_scalBR_hbb)")
self.modelBuilder.factory_("expr::CVktkl_Gscal_gluon('(@0+@3) * @1 * @2', Scaling_hgluglu, SM_BR_hgluglu, HiggsDecayWidth_UncertaintyScaling_hgluglu, kl_scalBR_hgluglu)")
# no kl dependance on H->zg known yet ?
self.modelBuilder.factory_("expr::CVktkl_Gscal_gamma('(@0+@6)*@1*@4 + @2*@3*@5', Scaling_hgg, SM_BR_hgg, Scaling_hzg, SM_BR_hzg, HiggsDecayWidth_UncertaintyScaling_hgg, HiggsDecayWidth_UncertaintyScaling_hzg, kl_scalBR_hgg)")
# fix to have all BRs add up to unity
self.modelBuilder.factory_("sum::CVktkl_SMBRs(%s)" % (",".join("SM_BR_" + d for d in SM_HIGG_DECAYS)))
# self.modelBuilder.out.function("CVktkl_SMBRs").Print("")
# total witdh, normalized to the SM one (just the sum over the partial widths/SM total BR)
self.modelBuilder.factory_("expr::CVktkl_Gscal_tot('(@0+@1+@2+@3+@4+@5+@6)/@7', CVktkl_Gscal_Z, CVktkl_Gscal_W, CVktkl_Gscal_tau, CVktkl_Gscal_top, CVktkl_Gscal_bottom, CVktkl_Gscal_gluon, CVktkl_Gscal_gamma, CVktkl_SMBRs)")
# BRs, normalized to the SM ones: they scale as (partial/partial_SM) / (total/total_SM)
self.modelBuilder.factory_("expr::CVktkl_BRscal_hww('(@0*@0+@3)*@2/@1', CV, CVktkl_Gscal_tot, HiggsDecayWidth_UncertaintyScaling_hww, kl_scalBR_hww)")
self.modelBuilder.factory_("expr::CVktkl_BRscal_hzz('(@0*@0+@3)*@2/@1', CV, CVktkl_Gscal_tot, HiggsDecayWidth_UncertaintyScaling_hzz, kl_scalBR_hzz)")
self.modelBuilder.factory_("expr::CVktkl_BRscal_htt('(1+@2)*@1/@0', CVktkl_Gscal_tot, HiggsDecayWidth_UncertaintyScaling_htt, kl_scalBR_htt)")
self.modelBuilder.factory_("expr::CVktkl_BRscal_hmm('(1+@2)*@1/@0', CVktkl_Gscal_tot, HiggsDecayWidth_UncertaintyScaling_hmm, kl_scalBR_hmm)")
self.modelBuilder.factory_("expr::CVktkl_BRscal_hbb('(1+@2)*@1/@0', CVktkl_Gscal_tot, HiggsDecayWidth_UncertaintyScaling_hbb, kl_scalBR_hbb)")
self.modelBuilder.factory_("expr::CVktkl_BRscal_hcc('(1+@2)*@1/@0', CVktkl_Gscal_tot, HiggsDecayWidth_UncertaintyScaling_hcc, kl_scalBR_hcc)")
self.modelBuilder.factory_("expr::CVktkl_BRscal_hss('(1+@2)*@1/@0', CVktkl_Gscal_tot, HiggsDecayWidth_UncertaintyScaling_hss, kl_scalBR_hss)")
self.modelBuilder.factory_("expr::CVktkl_BRscal_hgg('(@0+@3)*@2/@1', Scaling_hgg, CVktkl_Gscal_tot, HiggsDecayWidth_UncertaintyScaling_hgg,kl_scalBR_hgg)")
self.modelBuilder.factory_("expr::CVktkl_BRscal_hzg('@0*@2/@1', Scaling_hzg, CVktkl_Gscal_tot, HiggsDecayWidth_UncertaintyScaling_hzg)")
self.modelBuilder.factory_("expr::CVktkl_BRscal_hgluglu('(@0+@3)*@2/@1', Scaling_hgluglu, CVktkl_Gscal_tot, HiggsDecayWidth_UncertaintyScaling_hgluglu, kl_scalBR_hgluglu)")
for d in SM_HIGG_DECAYS:
self.f_BR_scalings.append("CVktkl_BRscal_%s" % d)
def buildHScalings(self):
# get VBF, tHq, tHW, ggZH cross section and resolved loops
self.SMH.makeScaling("qqH", CW="CV", CZ="CV")
self.SMH.makeScaling("tHq", CW="CV", Ctop="kt")
self.SMH.makeScaling("tHW", CW="CV", Ctop="kt")
self.SMH.makeScaling("ggZH", CZ="CV", Ctop="kt", Cb="1")
self.SMH.makeScaling("ggH", Cb="1", Ctop="kt", Cc="1")
for production in SM_HIGG_PROD:
if production in ["ggZH", "tHq", "tHW"]:
self.modelBuilder.factory_("expr::CVktkl_pos_XSscal_%s_%s('0.+@0*(@0>0)', Scaling_%s_%s)"
% (production, energy, production, energy))
self.f_H_scalings.append("CVktkl_pos_XSscal_%s_%s" % (production, energy))
elif production in ["ggH", "qqH"]:
EWK = EWKmap_13[production]
self.modelBuilder.factory_("expr::CVktkl_XSscal_%s_%s('(@1+(@0-1)*%g/%g)/((1-(@0*@0-1)*%g))', kl, Scaling_%s_%s)"
% (production,energy,cXSmap_13[production],EWK,dZH,production,energy))
self.modelBuilder.factory_("expr::CVktkl_pos_XSscal_%s_%s('0.+@0*(@0>0)', CVktkl_XSscal_%s_%s)"
% (production, energy, production, energy))
self.f_H_scalings.append("CVktkl_pos_XSscal_%s_%s"%(production, energy))
elif production in ["ZH", "WH", "VH"]:
EWK = EWKmap_13[production]
self.modelBuilder.factory_("expr::CVktkl_XSscal_%s_%s('(@1*@1+(@0-1)*%g/%g)/((1-(@0*@0-1)*%g))', kl, CV)"
% (production, energy, cXSmap_13[production], EWK, dZH))
self.modelBuilder.factory_("expr::CVktkl_pos_XSscal_%s_%s('0.+@0*(@0>0)', CVktkl_XSscal_%s_%s)"
% (production, energy, production, energy))
self.f_H_scalings.append("CVktkl_pos_XSscal_%s_%s" % (production, energy))
elif production == "ttH":
EWK = EWKmap_13[production]
self.modelBuilder.factory_("expr::CVktkl_XSscal_%s_%s('(@1*@1+(@0-1)*%g/%g)/((1-(@0*@0-1)*%g))', kl, kt)"
% (production, energy, cXSmap_13[production], EWK, dZH))
self.modelBuilder.factory_("expr::CVktkl_pos_XSscal_%s_%s('0.+@0*(@0>0)', CVktkl_XSscal_%s_%s)"
% (production, energy, production, energy))
self.f_H_scalings.append("CVktkl_pos_XSscal_%s_%s" % (production, energy))
def findBRScalings(self, process):
# to automatically parse the higgs decay from the process name, we need a fixed format,
# "*_hXX[hYY[...]]Z" where hXX, hYY, etc must be in the offical SM_HIGG_DECAYS, and Z can be
# an additional sub decay identifier that does not contain underscores
BRstr = process.split("_")[-1]