From 16a4b222c054a1fd193ead642ce30b2608387529 Mon Sep 17 00:00:00 2001 From: Marcel R Date: Wed, 25 Nov 2020 19:54:47 +0100 Subject: [PATCH 01/11] Update get_ggf_xsec in model file for plotting purposes. --- dhi/models/HHModelPinv.py | 137 ++++++++++++++++---------------------- dhi/tasks/nlo/base.py | 6 +- 2 files changed, 58 insertions(+), 85 deletions(-) diff --git a/dhi/models/HHModelPinv.py b/dhi/models/HHModelPinv.py index abe1ce0..e5663a0 100644 --- a/dhi/models/HHModelPinv.py +++ b/dhi/models/HHModelPinv.py @@ -15,7 +15,6 @@ from numpy import matrix from numpy import linalg from sympy import Matrix from collections import OrderedDict -import functools class VBFHHSample: @@ -438,14 +437,15 @@ ggf_samples = OrderedDict([ ]) # vbf samples with keys (CV, C2V, kl), SM point first, then ordered by kl, then C2V, then CV +br_hh_4b = 0.33919 vbf_samples = OrderedDict([ - ((1, 1, 1), VBFHHSample(1, 1, 1, val_xs=0.00054 / 0.3364, label="qqHH_CV_1_C2V_1_kl_1")), - ((1, 1, 0), VBFHHSample(1, 1, 0, val_xs=0.00145 / 0.3364, label="qqHH_CV_1_C2V_1_kl_0")), - ((1, 1, 2), VBFHHSample(1, 1, 2, val_xs=0.00044 / 0.3364, label="qqHH_CV_1_C2V_1_kl_2")), - ((1, 0, 1), VBFHHSample(1, 0, 1, val_xs=0.00880 / 0.3364, label="qqHH_CV_1_C2V_0_kl_1")), - ((1, 2, 1), VBFHHSample(1, 2, 1, val_xs=0.00472 / 0.3364, label="qqHH_CV_1_C2V_2_kl_1")), - ((0.5, 1, 1), VBFHHSample(0.5, 1, 1, val_xs=0.00353 / 0.3364, label="qqHH_CV_0p5_C2V_1_kl_1")), - ((1.5, 1, 1), VBFHHSample(1.5, 1, 1, val_xs=0.02149 / 0.3364, label="qqHH_CV_1p5_C2V_1_kl_1")), + ((1, 1, 1), VBFHHSample(1, 1, 1, val_xs=0.00054 / br_hh_4b, label="qqHH_CV_1_C2V_1_kl_1")), + ((1, 1, 0), VBFHHSample(1, 1, 0, val_xs=0.00145 / br_hh_4b, label="qqHH_CV_1_C2V_1_kl_0")), + ((1, 1, 2), VBFHHSample(1, 1, 2, val_xs=0.00044 / br_hh_4b, label="qqHH_CV_1_C2V_1_kl_2")), + ((1, 0, 1), VBFHHSample(1, 0, 1, val_xs=0.00880 / br_hh_4b, label="qqHH_CV_1_C2V_0_kl_1")), + ((1, 2, 1), VBFHHSample(1, 2, 1, val_xs=0.00472 / br_hh_4b, label="qqHH_CV_1_C2V_2_kl_1")), + ((0.5, 1, 1), VBFHHSample(0.5, 1, 1, val_xs=0.00353 / br_hh_4b, label="qqHH_CV_0p5_C2V_1_kl_1")), + ((1.5, 1, 1), VBFHHSample(1.5, 1, 1, val_xs=0.02149 / br_hh_4b, label="qqHH_CV_1p5_C2V_1_kl_1")), ]) @@ -528,6 +528,7 @@ model_no_vbf_C2V2 = create_model("model_no_vbf_C2V2", skip_ggf=[(0, 1)], skip_ model_no_vbf_CV0p5 = create_model("model_no_vbf_CV0p5", skip_ggf=[(0, 1)], skip_vbf=[(0.5, 1, 1)]) model_no_vbf_CV1p5 = create_model("model_no_vbf_CV1p5", skip_ggf=[(0, 1)], skip_vbf=[(1.5, 1, 1)]) + # legacy objects for development GGF_sample_list = [ # GGFHHSample(0, 1, val_xs=0.06007, label="ggHH_kl_0_kt_1"), @@ -536,13 +537,13 @@ GGF_sample_list = [ GGFHHSample(5, 1, val_xs=0.07903, label="ggHH_kl_5_kt_1"), ] VBF_sample_list = [ - VBFHHSample(1, 1, 1, val_xs=0.00054 / 0.3364, label="qqHH_CV_1_C2V_1_kl_1"), - VBFHHSample(1, 1, 0, val_xs=0.00145 / 0.3364, label="qqHH_CV_1_C2V_1_kl_0"), - VBFHHSample(1, 1, 2, val_xs=0.00044 / 0.3364, label="qqHH_CV_1_C2V_1_kl_2"), - # VBFHHSample(1, 0, 1, val_xs=0.00880 / 0.3364, label="qqHH_CV_1_C2V_0_kl_1"), - VBFHHSample(1, 2, 1, val_xs=0.00472 / 0.3364, label="qqHH_CV_1_C2V_2_kl_1"), - VBFHHSample(0.5, 1, 1, val_xs=0.00353 / 0.3364, label="qqHH_CV_0p5_C2V_1_kl_1"), - VBFHHSample(1.5, 1, 1, val_xs=0.02149 / 0.3364, label="qqHH_CV_1p5_C2V_1_kl_1"), + VBFHHSample(1, 1, 1, val_xs=0.00054 / br_hh_4b, label="qqHH_CV_1_C2V_1_kl_1"), + VBFHHSample(1, 1, 0, val_xs=0.00145 / br_hh_4b, label="qqHH_CV_1_C2V_1_kl_0"), + VBFHHSample(1, 1, 2, val_xs=0.00044 / br_hh_4b, label="qqHH_CV_1_C2V_1_kl_2"), + # VBFHHSample(1, 0, 1, val_xs=0.00880 / br_hh_4b, label="qqHH_CV_1_C2V_0_kl_1"), + VBFHHSample(1, 2, 1, val_xs=0.00472 / br_hh_4b, label="qqHH_CV_1_C2V_2_kl_1"), + VBFHHSample(0.5, 1, 1, val_xs=0.00353 / br_hh_4b, label="qqHH_CV_0p5_C2V_1_kl_1"), + VBFHHSample(1.5, 1, 1, val_xs=0.02149 / br_hh_4b, label="qqHH_CV_1p5_C2V_1_kl_1"), ] HHdefault = HHModel( ggf_sample_list=GGF_sample_list, @@ -551,49 +552,48 @@ HHdefault = HHModel( ) -def create_ggf_xsec_func(formula=None, nnlo=True): +def get_ggf_xsec(kl=1.0, unc=None): """ - Creates and returns a function that can be used to calculate numeric ggF cross section values - given an appropriate *formula*, which defaults to *HHdefault.ggf_formula*. The returned function - has the signature ``(kl=1.0, kt=1.0)``. When *nnlo* is *True*, a k-factor for the conversion - from NLO to NNLO is applied. Example: - - .. code-block:: python + Calculates the numeric ggF cross section value at NNLO for a certain *kl* value. When *unc* is + "down" ("up"), the down (up) variation of the cross section is returned instead, composed of a + *kl* dependent scale uncertainty and an independent PDF uncertainty of 3%. - get_ggf_xec = create_ggf_xsec_func() - - print(get_ggf_xec(2.)) - # -> 0.0133045... (or similar) + Formulas taken from https://twiki.cern.ch/twiki/bin/view/LHCPhysics/LHCHXSWGHH?rev=60. """ - if formula is None: - formula = HHdefault.ggf_formula - - # create the lambdify'ed evaluation function - func = lambdify(symbols("kl kt s0 s1 s2"), formula.sigma) - - # scaling functions in case nnlo is set - scale_nlo = lambda kl: 62.5339 - 44.3231 * kl + 9.6340 * kl**2. - scale_nnlo = lambda kl: 70.3874 - 50.4111 * kl + 11.0595 * kl**2. - nlo2nnlo = lambda xsec, kl: xsec * scale_nnlo(kl) / (1.115 * scale_nlo(kl)) - - # wrap into another function to apply defaults and nlo-to-nnlo scaling - @functools.wraps(func) - def wrapper(kl=1., kt=1.): - xsec = func(kl, kt, *(formula.sample_list[i].val_xs for i in range(3)))[0, 0] - - if nnlo: - xsec = nlo2nnlo(xsec, kl) - - return xsec - - return wrapper + # compute the nominal cross section + xsec_nom = 70.3874 - 50.4111 * kl + 11.0595 * kl**2. + + if unc is None: + # nothing to do when no uncertainty is requested + return xsec_nom + else: + # compute absolute values of the scale uncertainties, preserving signs + if unc.lower() == "up": + xsec_unc = max( + 72.0744 - 51.7362 * kl + 11.3712 * kl**2., + 70.9286 - 51.5708 * kl + 11.4497 * kl**2., + ) - xsec_nom + elif unc.lower() == "down": + xsec_unc = min( + 66.0621 - 46.7458 * kl + 10.1673 * kl**2., + 66.7581 - 47.7210 * kl + 10.4535 * kl**2., + ) - xsec_nom + else: + raise ValueError("when set, unc must be 'up' or 'down', got '{}'".format(unc)) + + # combine with flat 3% PDF uncertainty + scale_unc_sign = 1 if xsec_unc > 0 else -1 + xsec_unc = scale_unc_sign * ((0.03 * xsec_nom)**2. + (xsec_unc)**2.)**0.5 + + # add signed uncertainty back to nominal value and return + return xsec_nom + xsec_unc def create_vbf_xsec_func(formula=None): """ Creates and returns a function that can be used to calculate numeric VBF cross section values - given an appropriate *formula*, which defaults to *HHdefault.vbf_formula*. The returned function - has the signature ``(c2v=1.0, cv=1.0, kl=1.0)``. + given an appropriate *formula*, which defaults to *model_default.vbf_formula*. The returned + function has the signature ``(c2v=1.0, cv=1.0, kl=1.0)``. .. code-block:: python @@ -603,45 +603,20 @@ def create_vbf_xsec_func(formula=None): # -> 0.0156445... (or similar) """ if formula is None: - formula = HHdefault.vbf_formula + formula = model_default.vbf_formula + n_samples = len(formula.sample_list) # create the lambdify'ed evaluation function - func = lambdify(symbols("C2V CV kl s0 s1 s2 s3 s4 s5"), formula.sigma) + symbol_names = ["C2V", "CV", "kl"] + list(map("s{}".format, range(n_samples))) + func = lambdify(symbols(symbol_names), formula.sigma) # wrap into another function to apply defaults - @functools.wraps(func) def wrapper(c2v=1., cv=1., kl=1.): - xsec = func(c2v, cv, kl, *(formula.sample_list[i].val_xs for i in range(6)))[0, 0] + xsec = func(c2v, cv, kl, *(formula.sample_list[i].val_xs for i in range(n_samples)))[0, 0] return xsec return wrapper -#: Default function for getting ggF cross sections using the formula of the HHdefault model. -get_ggf_xsec = create_ggf_xsec_func() - -#: Default function for getting VBF cross sections using the formula of the HHdefault model. +#: Default function for getting VBF cross sections using the formula of the *model_default* model. get_vbf_xsec = create_vbf_xsec_func() - - -# debug -#bbZZ_model = False - -#if(bbZZ_model): - -# GGF_sample_list_bbZZ = [ -# GGFHHSample(1,1, val_xs = 0.02675, label = 'ggHH_hbbhzz_process' ), -# GGFHHSample(2,1, val_xs = 0.01238, label = 'klambda2_process' ), - #GGFHHSample(0,1, val_xs = 0.06007, label = 'klambda0_process' ), -# GGFHHSample(5,1, val_xs = 0.07903, label = 'klambda5_process' ), - # GGFHHSample(2.45,1, val_xs = 0.01133, label = 'ggHH_kl_2p45_kt_1' ), -# ] - -# HHbbZZ4l = HHModel( -# ggf_sample_list = GGF_sample_list_bbZZ, -# vbf_sample_list = VBF_sample_list, -# name = 'HH_bbZZ_default' -# ) - -# f = GGFHHFormula(GGF_sample_list) -# f = VBFHHFormula(VBF_sample_list) diff --git a/dhi/tasks/nlo/base.py b/dhi/tasks/nlo/base.py index 038a7d7..496cecf 100644 --- a/dhi/tasks/nlo/base.py +++ b/dhi/tasks/nlo/base.py @@ -79,11 +79,9 @@ class HHModelTask(AnalysisTask): # get the proper xsec getter for the formula of the current model module, model = self.load_hh_model() if poi == "kl": - formula = model.ggf_formula - get_xsec = module.create_ggf_xsec_func(formula) + get_xsec = module.get_ggf_xsec else: # C2V - formula = model.vbf_formula - get_xsec = module.create_vbf_xsec_func(formula) + get_xsec = module.create_vbf_xsec_func(model.vbf_formula) # create and return the function including scaling return lambda *args, **kwargs: get_xsec(*args, **kwargs) * scale -- GitLab From 9090a798cc281f8bc98ced3ea2b2d0832f35ef5b Mon Sep 17 00:00:00 2001 From: Marcel R Date: Wed, 25 Nov 2020 19:56:52 +0100 Subject: [PATCH 02/11] More verbose errors in getYieldScale of physics model. --- dhi/models/HHModelPinv.py | 78 ++++++++++++++++++--------------------- 1 file changed, 35 insertions(+), 43 deletions(-) diff --git a/dhi/models/HHModelPinv.py b/dhi/models/HHModelPinv.py index e5663a0..edbc441 100644 --- a/dhi/models/HHModelPinv.py +++ b/dhi/models/HHModelPinv.py @@ -376,53 +376,45 @@ class HHModel(PhysicsModel): self.f_r_vbf_names.append(f_prod_name) #bookkeep the scaling that has been created - def getYieldScale(self,bin,process): - - ## my control to verify for a unique association between process <-> scaling function - try: - self.scalingMap - except AttributeError: - self.scalingMap = {} - - if not self.DC.isSignal[process]: return 1 - # match the process name in the datacard to the input sample of the calculation - # this is the only point where the two things must be matched - - if not process in self.scalingMap: - self.scalingMap[process] = [] - - imatched_ggf = [] - imatched_vbf = [] - - for isample, sample in enumerate(self.ggf_formula.sample_list): - if process.startswith(sample.label): - # print self.name, ": {:>40} ===> {:>40}".format(process, sample.label) - imatched_ggf.append(isample) - - for isample, sample in enumerate(self.vbf_formula.sample_list): - if process.startswith(sample.label): - # print self.name, ": {:>40} ===> {:>40}".format(process, sample.label) - imatched_vbf.append(isample) - - ## this checks that a process finds a unique scaling - if len(imatched_ggf) + len(imatched_vbf) != 1: - print("ERROR: in HH model {} for process {} in bin {}: {} GGF and {} VBF name matches".format( - self.name, process, bin, len(imatched_ggf), len(imatched_vbf))) - # print "[ERROR] : in HH model named", self.name, "there are", len(imatched_ggf), "GGF name matches and", len(imatched_vbf), "VBF name matches" - # raise RuntimeError('HHModel : could not uniquely match the process %s to the expected sample list' % process) - - if len(imatched_ggf) == 1: - isample = imatched_ggf[0] - self.scalingMap[process].append((isample, 'GGF')) + def getYieldScale(self, bin, process): + # only deal with signal processes here + if not self.DC.isSignal[process]: + return 1. + + def find_matches(samples, kind): + # get the matching ggf sample index + matching_indices = [] + for i, sample in enumerate(samples): + if process.startswith(sample.label): + matching_indices.append(i) + + # complain when there is more than one hit + if len(matching_indices) > 1: + raise Exception("found {} matches for {} signal process {} in bin {}".format( + len(matching_indices), kind, process, bin)) + + # return the index when there is only one hit + if len(matching_indices) == 1: + return matching_indices[0] + + # otherwise, return nothing + return None + + # ggf match? + isample = find_matches(self.ggf_formula.sample_list, "GGF") + if isample is not None: + self.scalingMap[process].append((isample, "GGF")) return self.f_r_ggf_names[isample] - if len(imatched_vbf) == 1: - isample = imatched_vbf[0] - self.scalingMap[process].append((isample, 'VBF')) + # vbf match? + isample = find_matches(self.vbf_formula.sample_list, "VBF") + if isample is not None: + self.scalingMap[process].append((isample, "VBF")) return self.f_r_vbf_names[isample] - return 1 ## for signal processes that are not ggHH or qqHH -> to implement here the single H scaling - # raise RuntimeError('HHModel : fatal error in getYieldScale - this should never happen') + # complain when neither a ggf nor a vbf match was found + raise Exception("singal process {} did not match any GGF or VBF samples in bin {}".format( + process, bin)) ######################################### -- GitLab From f40f99023b2a846731f154b35aab5ebe825fe23b Mon Sep 17 00:00:00 2001 From: Marcel R Date: Thu, 26 Nov 2020 09:51:08 +0100 Subject: [PATCH 03/11] Add note on cross section values. --- dhi/models/HHModelPinv.py | 50 ++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 22 deletions(-) diff --git a/dhi/models/HHModelPinv.py b/dhi/models/HHModelPinv.py index edbc441..83d0580 100644 --- a/dhi/models/HHModelPinv.py +++ b/dhi/models/HHModelPinv.py @@ -420,6 +420,7 @@ class HHModel(PhysicsModel): ######################################### # ggf samples with keys (kl, kt), ordered by kl +# (the cross section values are not used in the model, they are listed just for completeness) ggf_samples = OrderedDict([ ((0, 1), GGFHHSample(0, 1, val_xs=0.06007, label="ggHH_kl_0_kt_1")), ((1, 1), GGFHHSample(1, 1, val_xs=0.02675, label="ggHH_kl_1_kt_1")), @@ -429,6 +430,7 @@ ggf_samples = OrderedDict([ ]) # vbf samples with keys (CV, C2V, kl), SM point first, then ordered by kl, then C2V, then CV +# (the cross section values are not used in the model, but only in create_vbf_xsec_func below) br_hh_4b = 0.33919 vbf_samples = OrderedDict([ ((1, 1, 1), VBFHHSample(1, 1, 1, val_xs=0.00054 / br_hh_4b, label="qqHH_CV_1_C2V_1_kl_1")), @@ -443,38 +445,38 @@ vbf_samples = OrderedDict([ def get_ggf_samples(keys): """ - Returns a list of :py:class:`GGFHHSample` instances mapped to certain *keys* in the ordered - *ggf_samples* dictionary above. A key can either be a tuple of (kl, kt) values as used in dict, - or an index. Example: + Returns a list of :py:class:`GGFHHSample` instances that are mapped to certain *keys* in the + ordered *ggf_samples* dictionary above. A key can either be a tuple of (kl, kt) values as used + in the dict, or a numeric index. Example: .. code-block:: python - get_ggf_samples([2, (5, 1)]) + get_ggf_samples([(2.45, 1), 3]) # -> [GGFHHSample:ggHH_kl_2p45_kt_1, GGFHHSample:ggHH_kl_5_kt_1] """ - keys = [ - (key if isinstance(key, tuple) else ggf_samples.keys()[key]) + all_keys = list(ggf_samples.keys()) + return [ + ggf_samples[key if isinstance(key, tuple) else all_keys[key]] for key in keys ] - return [ggf_samples[key] for key in keys] def get_vbf_samples(keys): """ - Returns a list of :py:class:`VBFHHSample` instances mapped to certain *keys* in the ordered - *vbf_samples* dictionary above. A key can either be a tuple of (CV, C2V, kl) values as used in - dict, or an index. Example: + Returns a list of :py:class:`VBFHHSample` instances that are mapped to certain *keys* in the + ordered *vbf_samples* dictionary above. A key can either be a tuple of (CV, C2V, kl) values as + used in the dict, or a numeric index. Example: .. code-block:: python get_vbf_samples([2, (1, 2, 1)]) # -> [VBFHHSample:qqHH_CV_1_C2V_1_kl_2, VBFHHSample:qqHH_CV_1_C2V_2_kl_1] """ - keys = [ - (key if isinstance(key, tuple) else vbf_samples.keys()[key]) + all_keys = list(ggf_samples.keys()) + return [ + vbf_samples[key if isinstance(key, tuple) else all_keys[key]] for key in keys ] - return [vbf_samples[key] for key in keys] def create_model(name, skip_ggf=None, skip_vbf=None): @@ -484,13 +486,17 @@ def create_model(name, skip_ggf=None, skip_vbf=None): through :py:func:`get_ggf_samples` and :py:func:`get_vbf_samples`. The order of passed keys to be skipped does not matter. """ - # get all use sample keys - ggf_keys = list(ggf_samples.keys()) - vbf_keys = list(vbf_samples.keys()) - for key in skip_ggf or []: - ggf_keys.remove(key) - for key in skip_vbf or []: - vbf_keys.remove(key) + # get all unskipped ggf keys + ggf_keys = [] + for i, key in enumerate(ggf_samples): + if not skip_ggf or not any(skip_key in (i, key) for skip_key in skip_ggf): + ggf_keys.append(key) + + # get all unskipped vbf keys + vbf_keys = [] + for i, key in enumerate(vbf_samples): + if not skip_vbf or not any(skip_key in (i, key) for skip_key in skip_vbf): + vbf_keys.append(key) # create the return the model return HHModel( @@ -500,7 +506,7 @@ def create_model(name, skip_ggf=None, skip_vbf=None): ) -# standard of models +# some named, default models model_all = create_model("model_all") model_default = create_model("model_default", skip_ggf=[(0, 1)], skip_vbf=[(1, 0, 1)]) model_bbgg = create_model("model_bbgg", skip_ggf=[(0, 1)], skip_vbf=[(0.5, 1, 1)]) @@ -575,7 +581,7 @@ def get_ggf_xsec(kl=1.0, unc=None): # combine with flat 3% PDF uncertainty scale_unc_sign = 1 if xsec_unc > 0 else -1 - xsec_unc = scale_unc_sign * ((0.03 * xsec_nom)**2. + (xsec_unc)**2.)**0.5 + xsec_unc = scale_unc_sign * ((0.03 * xsec_nom)**2. + xsec_unc**2.)**0.5 # add signed uncertainty back to nominal value and return return xsec_nom + xsec_unc -- GitLab From 6d05f49353dcf9c8e9dd7a70d701344106fe28c9 Mon Sep 17 00:00:00 2001 From: Marcel R Date: Thu, 26 Nov 2020 11:37:06 +0100 Subject: [PATCH 04/11] Add back option to calculate ggf nlo cross section, update sample cross sections to nlo * k. --- dhi/models/HHModelPinv.py | 135 ++++++++++++++++++++++++++------------ dhi/tasks/nlo/base.py | 2 +- 2 files changed, 93 insertions(+), 44 deletions(-) diff --git a/dhi/models/HHModelPinv.py b/dhi/models/HHModelPinv.py index 83d0580..5feb632 100644 --- a/dhi/models/HHModelPinv.py +++ b/dhi/models/HHModelPinv.py @@ -417,20 +417,17 @@ class HHModel(PhysicsModel): process, bin)) -######################################### - # ggf samples with keys (kl, kt), ordered by kl -# (the cross section values are not used in the model, they are listed just for completeness) +# cross section values are NLO with k-factor applied and only used in create_ggf_xsec_func below ggf_samples = OrderedDict([ - ((0, 1), GGFHHSample(0, 1, val_xs=0.06007, label="ggHH_kl_0_kt_1")), - ((1, 1), GGFHHSample(1, 1, val_xs=0.02675, label="ggHH_kl_1_kt_1")), - ((2.45, 1), GGFHHSample(2.45, 1, val_xs=0.01133, label="ggHH_kl_2p45_kt_1")), - ((5, 1), GGFHHSample(5, 1, val_xs=0.07903, label="ggHH_kl_5_kt_1")), - + ((0, 1), GGFHHSample(0, 1, val_xs=0.069725, label="ggHH_kl_0_kt_1")), + ((1, 1), GGFHHSample(1, 1, val_xs=0.031047, label="ggHH_kl_1_kt_1")), + ((2.45, 1), GGFHHSample(2.45, 1, val_xs=0.013124, label="ggHH_kl_2p45_kt_1")), + ((5, 1), GGFHHSample(5, 1, val_xs=0.091172, label="ggHH_kl_5_kt_1")), ]) # vbf samples with keys (CV, C2V, kl), SM point first, then ordered by kl, then C2V, then CV -# (the cross section values are not used in the model, but only in create_vbf_xsec_func below) +# cross section values are LO and only used in create_vbf_xsec_func below br_hh_4b = 0.33919 vbf_samples = OrderedDict([ ((1, 1, 1), VBFHHSample(1, 1, 1, val_xs=0.00054 / br_hh_4b, label="qqHH_CV_1_C2V_1_kl_1")), @@ -550,71 +547,123 @@ HHdefault = HHModel( ) -def get_ggf_xsec(kl=1.0, unc=None): +def create_ggf_xsec_func(formula=None): """ - Calculates the numeric ggF cross section value at NNLO for a certain *kl* value. When *unc* is - "down" ("up"), the down (up) variation of the cross section is returned instead, composed of a - *kl* dependent scale uncertainty and an independent PDF uncertainty of 3%. + Creates and returns a function that can be used to calculate numeric ggF cross section values in + pb given an appropriate *formula*, which defaults to *model_default.ggf_formula*. The returned + function has the signature ``(kl=1.0, kt=1.0, nnlo=True, unc=None)``. When *nnlo* is *True*, a + the return value is in next-to-next-to-leading order. In this case, *unc* can be set to eiher + "up" or "down" to return the up / down varied cross section instead where the uncertainty is + composed of a *kl* dependent scale uncertainty and an independent PDF uncertainty of 3%. + + Example: + + .. code-block:: python + + get_ggf_xec = create_ggf_xsec_func() + + print(get_ggf_xec(kl=2.)) + # -> 0.013803... - Formulas taken from https://twiki.cern.ch/twiki/bin/view/LHCPhysics/LHCHXSWGHH?rev=60. + print(get_ggf_xec(kl=2., nnlo=False)) + # -> 0.013852... + + print(get_ggf_xec(kl=2., unc="up")) + # -> 0.014305... + + Formulas are taken from https://twiki.cern.ch/twiki/bin/view/LHCPhysics/LHCHXSWGHH?rev=60. """ - # compute the nominal cross section - xsec_nom = 70.3874 - 50.4111 * kl + 11.0595 * kl**2. - - if unc is None: - # nothing to do when no uncertainty is requested - return xsec_nom - else: - # compute absolute values of the scale uncertainties, preserving signs + if formula is None: + formula = model_default.ggf_formula + + # create the lambdify'ed evaluation function + n_samples = len(formula.sample_list) + symbol_names = ["kl", "kt"] + list(map("s{}".format, range(n_samples))) + func = lambdify(symbols(symbol_names), formula.sigma) + + # nlo-to-nnlo scaling functions in case nnlo is set + xsec_nlo = lambda kl: 0.001 * 1.115 * (62.5339 - 44.3231 * kl + 9.6340 * kl**2.) + xsec_nnlo = lambda kl: 0.001 * (70.3874 - 50.4111 * kl + 11.0595 * kl**2.) + nlo2nnlo = lambda xsec, kl: xsec * xsec_nnlo(kl) / xsec_nlo(kl) + + # uncertainty application in case unc is set + xsec_nnlo_scale_up = lambda kl: 0.001 * max( + 72.0744 - 51.7362 * kl + 11.3712 * kl**2., + 70.9286 - 51.5708 * kl + 11.4497 * kl**2., + ) + xsec_nnlo_scale_down = lambda kl: 0.001 * min( + 66.0621 - 46.7458 * kl + 10.1673 * kl**2., + 66.7581 - 47.7210 * kl + 10.4535 * kl**2., + ) + + def apply_uncertainty_nnlo(kl, xsec_nom, unc): + # compute absolute values of the scale uncertainties if unc.lower() == "up": - xsec_unc = max( - 72.0744 - 51.7362 * kl + 11.3712 * kl**2., - 70.9286 - 51.5708 * kl + 11.4497 * kl**2., - ) - xsec_nom + xsec_unc = xsec_nnlo_scale_up(kl) - xsec_nom elif unc.lower() == "down": - xsec_unc = min( - 66.0621 - 46.7458 * kl + 10.1673 * kl**2., - 66.7581 - 47.7210 * kl + 10.4535 * kl**2., - ) - xsec_nom + xsec_unc = xsec_nnlo_scale_down(kl) - xsec_nom else: - raise ValueError("when set, unc must be 'up' or 'down', got '{}'".format(unc)) + raise ValueError("unc must be 'up' or 'down', got '{}'".format(unc)) + + # combine with flat 3% PDF uncertainty, preserving the sign + unc_sign = 1 if xsec_unc > 0 else -1 + xsec_unc = unc_sign * ((0.03 * xsec_nom)**2. + xsec_unc**2.)**0.5 + + # add signed uncertainty back to nominal value + xsec = xsec_nom + xsec_unc + + return xsec - # combine with flat 3% PDF uncertainty - scale_unc_sign = 1 if xsec_unc > 0 else -1 - xsec_unc = scale_unc_sign * ((0.03 * xsec_nom)**2. + xsec_unc**2.)**0.5 + # wrap into another function to apply defaults and nlo-to-nnlo scaling + def wrapper(kl=1., kt=1., nnlo=True, unc=None): + xsec = func(kl, kt, *(sample.val_xs for sample in formula.sample_list))[0, 0] - # add signed uncertainty back to nominal value and return - return xsec_nom + xsec_unc + # nnlo scaling? + if nnlo: + xsec = nlo2nnlo(xsec, kl) + + # apply uncertainties? + if unc: + if not nnlo: + raise NotImplementedError("NLO ggF cross section uncertainties are not implemented") + xsec = apply_uncertainty_nnlo(kl, xsec, unc) + + return xsec + + return wrapper def create_vbf_xsec_func(formula=None): """ - Creates and returns a function that can be used to calculate numeric VBF cross section values - given an appropriate *formula*, which defaults to *model_default.vbf_formula*. The returned - function has the signature ``(c2v=1.0, cv=1.0, kl=1.0)``. + Creates and returns a function that can be used to calculate numeric VBF cross section values in + pb given an appropriate *formula*, which defaults to *model_default.vbf_formula*. The returned + function has the signature ``(c2v=1.0, cv=1.0, kl=1.0)``. Example: .. code-block:: python get_vbf_xsec = create_vbf_xsec_func() - print(get_vbf_xsec(2.)) - # -> 0.0156445... (or similar) + print(get_vbf_xsec(c2v=2.)) + # -> 0.013916... (or similar) """ if formula is None: formula = model_default.vbf_formula - n_samples = len(formula.sample_list) # create the lambdify'ed evaluation function + n_samples = len(formula.sample_list) symbol_names = ["C2V", "CV", "kl"] + list(map("s{}".format, range(n_samples))) func = lambdify(symbols(symbol_names), formula.sigma) # wrap into another function to apply defaults def wrapper(c2v=1., cv=1., kl=1.): - xsec = func(c2v, cv, kl, *(formula.sample_list[i].val_xs for i in range(n_samples)))[0, 0] + xsec = func(c2v, cv, kl, *(sample.val_xs for sample in formula.sample_list))[0, 0] return xsec return wrapper +#: Default function for getting ggF cross sections using the formula of the *model_default* model. +get_ggf_xsec = create_ggf_xsec_func() + #: Default function for getting VBF cross sections using the formula of the *model_default* model. get_vbf_xsec = create_vbf_xsec_func() diff --git a/dhi/tasks/nlo/base.py b/dhi/tasks/nlo/base.py index 496cecf..90551f4 100644 --- a/dhi/tasks/nlo/base.py +++ b/dhi/tasks/nlo/base.py @@ -79,7 +79,7 @@ class HHModelTask(AnalysisTask): # get the proper xsec getter for the formula of the current model module, model = self.load_hh_model() if poi == "kl": - get_xsec = module.get_ggf_xsec + get_xsec = module.create_ggf_xsec_func(model.ggf_formula) else: # C2V get_xsec = module.create_vbf_xsec_func(model.vbf_formula) -- GitLab From 705b36a15f44f3e1122244413473e7c4705dc29e Mon Sep 17 00:00:00 2001 From: Marcel R Date: Thu, 26 Nov 2020 11:49:35 +0100 Subject: [PATCH 05/11] Add note on applied k-factor in nlo case. --- dhi/models/HHModelPinv.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/dhi/models/HHModelPinv.py b/dhi/models/HHModelPinv.py index 5feb632..9e9d814 100644 --- a/dhi/models/HHModelPinv.py +++ b/dhi/models/HHModelPinv.py @@ -526,10 +526,10 @@ model_no_vbf_CV1p5 = create_model("model_no_vbf_CV1p5", skip_ggf=[(0, 1)], skip_ # legacy objects for development GGF_sample_list = [ - # GGFHHSample(0, 1, val_xs=0.06007, label="ggHH_kl_0_kt_1"), - GGFHHSample(1, 1, val_xs=0.02675, label="ggHH_kl_1_kt_1"), - GGFHHSample(2.45, 1, val_xs=0.01133, label="ggHH_kl_2p45_kt_1"), - GGFHHSample(5, 1, val_xs=0.07903, label="ggHH_kl_5_kt_1"), + # GGFHHSample(0, 1, val_xs=0.069725, label="ggHH_kl_0_kt_1"), + GGFHHSample(1, 1, val_xs=0.031047, label="ggHH_kl_1_kt_1"), + GGFHHSample(2.45, 1, val_xs=0.013124, label="ggHH_kl_2p45_kt_1"), + GGFHHSample(5, 1, val_xs=0.091172, label="ggHH_kl_5_kt_1"), ] VBF_sample_list = [ VBFHHSample(1, 1, 1, val_xs=0.00054 / br_hh_4b, label="qqHH_CV_1_C2V_1_kl_1"), @@ -551,10 +551,11 @@ def create_ggf_xsec_func(formula=None): """ Creates and returns a function that can be used to calculate numeric ggF cross section values in pb given an appropriate *formula*, which defaults to *model_default.ggf_formula*. The returned - function has the signature ``(kl=1.0, kt=1.0, nnlo=True, unc=None)``. When *nnlo* is *True*, a - the return value is in next-to-next-to-leading order. In this case, *unc* can be set to eiher - "up" or "down" to return the up / down varied cross section instead where the uncertainty is - composed of a *kl* dependent scale uncertainty and an independent PDF uncertainty of 3%. + function has the signature ``(kl=1.0, kt=1.0, nnlo=True, unc=None)``. When *nnlo* is *False*, + the constant k-factor is still applied. Otherwise, the returned value is in full + next-to-next-to-leading order. In this case, *unc* can be set to eiher "up" or "down" to return + the up / down varied cross section instead where the uncertainty is composed of a *kl* dependent + scale uncertainty and an independent PDF uncertainty of 3%. Example: @@ -637,7 +638,9 @@ def create_vbf_xsec_func(formula=None): """ Creates and returns a function that can be used to calculate numeric VBF cross section values in pb given an appropriate *formula*, which defaults to *model_default.vbf_formula*. The returned - function has the signature ``(c2v=1.0, cv=1.0, kl=1.0)``. Example: + function has the signature ``(c2v=1.0, cv=1.0, kl=1.0)``. + + Example: .. code-block:: python -- GitLab From 53828e741f57542e9786b7b6395db04aeac1e6f3 Mon Sep 17 00:00:00 2001 From: Marcel R Date: Thu, 26 Nov 2020 16:30:29 +0100 Subject: [PATCH 06/11] Forward nlo/nnlo flag to text2workspace command. --- dhi/models/HHModelPinv.py | 13 +++++++------ dhi/tasks/nlo/base.py | 19 +++++++++++++++++-- dhi/tasks/nlo/inference.py | 2 ++ 3 files changed, 26 insertions(+), 8 deletions(-) diff --git a/dhi/models/HHModelPinv.py b/dhi/models/HHModelPinv.py index 9e9d814..b2c38ff 100644 --- a/dhi/models/HHModelPinv.py +++ b/dhi/models/HHModelPinv.py @@ -191,12 +191,13 @@ class HHModel(PhysicsModel): #self.dump_inputs() - def setPhysicsOptions(self,physOptions): - for po in physOptions: - if po.startswith("doNNLOscaling="): - self.doNNLOscaling = (po.replace("doNNLOscaling=","") in [ "yes", "1", "Yes", "True", "true" ]) - #print "[DEBUG]","[setPhysicsOptions]","Uncertainties are set to be",self.doNNLOscaling - + def setPhysicsOptions(self, physOptions): + opts = [opt.split("=", 1) for opt in physOptions if "=" in opt] + for key, value in physOptions: + if key == "doNNLOscaling": + self.doNNLOscaling = value.lower() in ["yes", "true", "1"] + print("[INFO] set doNNLOscaling of model {} to {}".format( + self.name, self.doNNLOscaling)) def check_validity_ggf(self, ggf_sample_list): if len(ggf_sample_list) < 3: diff --git a/dhi/tasks/nlo/base.py b/dhi/tasks/nlo/base.py index 90551f4..d1a45a7 100644 --- a/dhi/tasks/nlo/base.py +++ b/dhi/tasks/nlo/base.py @@ -9,6 +9,7 @@ import sys import re import glob import importlib +import functools from collections import defaultdict import law @@ -41,10 +42,18 @@ class HHModelTask(AnalysisTask): description="the name of the HH model relative to dhi.models in the format " "module:model_name; default: HHModelPinv:HHdefault", ) + hh_nlo = luigi.BoolParameter( + default=False, + description="disable the automatic nlo-to-nnlo scaling of the model and cross-section " + "calculation; default: False", + ) def store_parts(self): parts = super(HHModelTask, self).store_parts() - parts["hh_model"] = "model_" + self.hh_model.replace(".", "_").replace(":", "_") + part = "model_" + self.hh_model.replace(".", "_").replace(":", "_") + if self.hh_nlo: + part += "__nlo" + parts["hh_model"] = part return parts def load_hh_model(self, hh_model=None): @@ -61,7 +70,11 @@ class HHModelTask(AnalysisTask): with law.util.patch_object(sys, "stdout", null_stream): mod = importlib.import_module(module_id) - return mod, getattr(mod, model_name) + # get the model and set the doNNLOscaling flag correctly + model = getattr(mod, model_name) + model.doNNLOscaling = not self.hh_nlo + + return mod, model def create_xsec_func(self, poi, unit, br=None): if poi not in ["kl", "C2V"]: @@ -79,7 +92,9 @@ class HHModelTask(AnalysisTask): # get the proper xsec getter for the formula of the current model module, model = self.load_hh_model() if poi == "kl": + # get the cross section function and make it a partial with the nnlo value set properly get_xsec = module.create_ggf_xsec_func(model.ggf_formula) + get_xsec = functools.partial(get_xsec, nnlo=not self.hh_nlo) else: # C2V get_xsec = module.create_vbf_xsec_func(model.vbf_formula) diff --git a/dhi/tasks/nlo/inference.py b/dhi/tasks/nlo/inference.py index db53a4d..8366e71 100644 --- a/dhi/tasks/nlo/inference.py +++ b/dhi/tasks/nlo/inference.py @@ -81,11 +81,13 @@ class CreateWorkspace(DatacardTask, CombineCommandTask): " -o {workspace}" " -m {self.mass}" " -P dhi.models.{self.hh_model}" + " --PO doNNLOscaling={nnlo}" " {self.custom_args}" ).format( self=self, datacard=self.input().path, workspace=self.output().path, + nnlo=not self.hh_nlo, ) -- GitLab From 8be74d20105adba39d52cb152178bd9ea43138c8 Mon Sep 17 00:00:00 2001 From: Marcel R Date: Thu, 26 Nov 2020 16:31:57 +0100 Subject: [PATCH 07/11] Remove hh datasets not used by the model upon datacard combination. --- dhi/tasks/nlo/inference.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/dhi/tasks/nlo/inference.py b/dhi/tasks/nlo/inference.py index 8366e71..5e8e525 100644 --- a/dhi/tasks/nlo/inference.py +++ b/dhi/tasks/nlo/inference.py @@ -19,10 +19,11 @@ from dhi.tasks.nlo.base import ( from dhi.util import linspace from dhi.config import poi_data from dhi.datacard_tools import get_workspace_parameters, bundle_datacard -from dhi.scripts.prettify_datacard import prettify_datacard +from dhi.scripts.remove_processes import remove_processes as remove_processes_script class CombineDatacards(DatacardTask, CombineCommandTask): + def output(self): return self.local_target_dc("datacard.txt") @@ -60,15 +61,25 @@ class CombineDatacards(DatacardTask, CombineCommandTask): output.parent.touch() self.run_command(self.build_command(datacards), cwd=tmp_dir.path) - # prettify the output card - prettify_datacard(output.path) - - # finally, copy shape files to output location + # remove ggf and vbf processes that are not covered by the physics model + mod, model = self.load_hh_model() + all_hh_processes = {sample.label for sample in mod.ggf_samples.values()} + all_hh_processes |= {sample.label for sample in mod.vbf_samples.values()} + model_hh_processes = {sample.label for sample in model.ggf_formula.sample_list} + model_hh_processes |= {sample.label for sample in model.vbf_formula.sample_list} + to_remove = all_hh_processes - model_hh_processes + if to_remove: + self.logger.info("trying to remove processes {} from the combined datacard as they are " + "not part of the phyics model {}".format(",".join(to_remove), self.hh_model)) + remove_processes_script(output.path, map("{}*".format, to_remove)) + + # copy shape files to output location for basename in tmp_dir.listdir(pattern="*.root", type="f"): tmp_dir.child(basename).copy_to(output.parent) class CreateWorkspace(DatacardTask, CombineCommandTask): + def requires(self): return CombineDatacards.req(self) -- GitLab From c651dda03033dc372acf1ff6afc5d97545b986f9 Mon Sep 17 00:00:00 2001 From: Marcel R Date: Thu, 26 Nov 2020 16:50:07 +0100 Subject: [PATCH 08/11] Small fixes. --- dhi/models/HHModelPinv.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/dhi/models/HHModelPinv.py b/dhi/models/HHModelPinv.py index b2c38ff..4907d4c 100644 --- a/dhi/models/HHModelPinv.py +++ b/dhi/models/HHModelPinv.py @@ -14,7 +14,7 @@ from sympy import * from numpy import matrix from numpy import linalg from sympy import Matrix -from collections import OrderedDict +from collections import OrderedDict, defaultdict class VBFHHSample: @@ -189,11 +189,13 @@ class HHModel(PhysicsModel): self.ggf_formula = GGFHHFormula(ggf_sample_list) self.vbf_formula = VBFHHFormula(vbf_sample_list) + self.scalingMap = defaultdict(list) + #self.dump_inputs() def setPhysicsOptions(self, physOptions): opts = [opt.split("=", 1) for opt in physOptions if "=" in opt] - for key, value in physOptions: + for key, value in opts: if key == "doNNLOscaling": self.doNNLOscaling = value.lower() in ["yes", "true", "1"] print("[INFO] set doNNLOscaling of model {} to {}".format( -- GitLab From 8c3a56a9075e5a6696dd9b634d07925768de280b Mon Sep 17 00:00:00 2001 From: Luca Cadamuro Date: Fri, 27 Nov 2020 16:40:19 +0000 Subject: [PATCH 09/11] Update VBF cross section values --- dhi/models/HHModelPinv.py | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/dhi/models/HHModelPinv.py b/dhi/models/HHModelPinv.py index 4907d4c..b0541e0 100644 --- a/dhi/models/HHModelPinv.py +++ b/dhi/models/HHModelPinv.py @@ -430,16 +430,15 @@ ggf_samples = OrderedDict([ ]) # vbf samples with keys (CV, C2V, kl), SM point first, then ordered by kl, then C2V, then CV -# cross section values are LO and only used in create_vbf_xsec_func below -br_hh_4b = 0.33919 +# cross section values are LO (from 2017/2018 gridpacks) x SM k-factor for N3LO (1.0781) and only used in create_vbf_xsec_func below vbf_samples = OrderedDict([ - ((1, 1, 1), VBFHHSample(1, 1, 1, val_xs=0.00054 / br_hh_4b, label="qqHH_CV_1_C2V_1_kl_1")), - ((1, 1, 0), VBFHHSample(1, 1, 0, val_xs=0.00145 / br_hh_4b, label="qqHH_CV_1_C2V_1_kl_0")), - ((1, 1, 2), VBFHHSample(1, 1, 2, val_xs=0.00044 / br_hh_4b, label="qqHH_CV_1_C2V_1_kl_2")), - ((1, 0, 1), VBFHHSample(1, 0, 1, val_xs=0.00880 / br_hh_4b, label="qqHH_CV_1_C2V_0_kl_1")), - ((1, 2, 1), VBFHHSample(1, 2, 1, val_xs=0.00472 / br_hh_4b, label="qqHH_CV_1_C2V_2_kl_1")), - ((0.5, 1, 1), VBFHHSample(0.5, 1, 1, val_xs=0.00353 / br_hh_4b, label="qqHH_CV_0p5_C2V_1_kl_1")), - ((1.5, 1, 1), VBFHHSample(1.5, 1, 1, val_xs=0.02149 / br_hh_4b, label="qqHH_CV_1p5_C2V_1_kl_1")), + ((1, 1, 1), VBFHHSample(1, 1, 1, val_xs=0.0017260, label="qqHH_CV_1_C2V_1_kl_1")), + ((1, 1, 0), VBFHHSample(1, 1, 0, val_xs=0.0045915, label="qqHH_CV_1_C2V_1_kl_0")), + ((1, 1, 2), VBFHHSample(1, 1, 2, val_xs=0.0014306, label="qqHH_CV_1_C2V_1_kl_2")), + ((1, 0, 1), VBFHHSample(1, 0, 1, val_xs=0.0272322, label="qqHH_CV_1_C2V_0_kl_1")), + ((1, 2, 1), VBFHHSample(1, 2, 1, val_xs=0.0143923, label="qqHH_CV_1_C2V_2_kl_1")), + ((0.5, 1, 1), VBFHHSample(0.5, 1, 1, val_xs=0.0108778, label="qqHH_CV_0p5_C2V_1_kl_1")), + ((1.5, 1, 1), VBFHHSample(1.5, 1, 1, val_xs=0.0663340, label="qqHH_CV_1p5_C2V_1_kl_1")), ]) @@ -535,13 +534,13 @@ GGF_sample_list = [ GGFHHSample(5, 1, val_xs=0.091172, label="ggHH_kl_5_kt_1"), ] VBF_sample_list = [ - VBFHHSample(1, 1, 1, val_xs=0.00054 / br_hh_4b, label="qqHH_CV_1_C2V_1_kl_1"), - VBFHHSample(1, 1, 0, val_xs=0.00145 / br_hh_4b, label="qqHH_CV_1_C2V_1_kl_0"), - VBFHHSample(1, 1, 2, val_xs=0.00044 / br_hh_4b, label="qqHH_CV_1_C2V_1_kl_2"), - # VBFHHSample(1, 0, 1, val_xs=0.00880 / br_hh_4b, label="qqHH_CV_1_C2V_0_kl_1"), - VBFHHSample(1, 2, 1, val_xs=0.00472 / br_hh_4b, label="qqHH_CV_1_C2V_2_kl_1"), - VBFHHSample(0.5, 1, 1, val_xs=0.00353 / br_hh_4b, label="qqHH_CV_0p5_C2V_1_kl_1"), - VBFHHSample(1.5, 1, 1, val_xs=0.02149 / br_hh_4b, label="qqHH_CV_1p5_C2V_1_kl_1"), + VBFHHSample(1, 1, 1, val_xs=0.0017260, label="qqHH_CV_1_C2V_1_kl_1"), + VBFHHSample(1, 1, 0, val_xs=0.0045915, label="qqHH_CV_1_C2V_1_kl_0"), + VBFHHSample(1, 1, 2, val_xs=0.0014306, label="qqHH_CV_1_C2V_1_kl_2"), + # VBFHHSample(1, 0, 1, val_xs=0.0272322, label="qqHH_CV_1_C2V_0_kl_1"), + VBFHHSample(1, 2, 1, val_xs=0.0143923, label="qqHH_CV_1_C2V_2_kl_1"), + VBFHHSample(0.5, 1, 1, val_xs=0.0108778, label="qqHH_CV_0p5_C2V_1_kl_1"), + VBFHHSample(1.5, 1, 1, val_xs=0.0663340, label="qqHH_CV_1p5_C2V_1_kl_1"), ] HHdefault = HHModel( ggf_sample_list=GGF_sample_list, -- GitLab From 4e94bd551463d515e5baa43fe58be285a9efb15a Mon Sep 17 00:00:00 2001 From: Luca Cadamuro Date: Fri, 27 Nov 2020 17:23:48 +0000 Subject: [PATCH 10/11] Cross sections fixed to use values from 2017/2018 pdf sets --- dhi/models/HHModelPinv.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/dhi/models/HHModelPinv.py b/dhi/models/HHModelPinv.py index b0541e0..7c0b2c4 100644 --- a/dhi/models/HHModelPinv.py +++ b/dhi/models/HHModelPinv.py @@ -433,12 +433,12 @@ ggf_samples = OrderedDict([ # cross section values are LO (from 2017/2018 gridpacks) x SM k-factor for N3LO (1.0781) and only used in create_vbf_xsec_func below vbf_samples = OrderedDict([ ((1, 1, 1), VBFHHSample(1, 1, 1, val_xs=0.0017260, label="qqHH_CV_1_C2V_1_kl_1")), - ((1, 1, 0), VBFHHSample(1, 1, 0, val_xs=0.0045915, label="qqHH_CV_1_C2V_1_kl_0")), - ((1, 1, 2), VBFHHSample(1, 1, 2, val_xs=0.0014306, label="qqHH_CV_1_C2V_1_kl_2")), - ((1, 0, 1), VBFHHSample(1, 0, 1, val_xs=0.0272322, label="qqHH_CV_1_C2V_0_kl_1")), - ((1, 2, 1), VBFHHSample(1, 2, 1, val_xs=0.0143923, label="qqHH_CV_1_C2V_2_kl_1")), - ((0.5, 1, 1), VBFHHSample(0.5, 1, 1, val_xs=0.0108778, label="qqHH_CV_0p5_C2V_1_kl_1")), - ((1.5, 1, 1), VBFHHSample(1.5, 1, 1, val_xs=0.0663340, label="qqHH_CV_1p5_C2V_1_kl_1")), + ((1, 1, 0), VBFHHSample(1, 1, 0, val_xs=0.0046089, label="qqHH_CV_1_C2V_1_kl_0")), + ((1, 1, 2), VBFHHSample(1, 1, 2, val_xs=0.0014228, label="qqHH_CV_1_C2V_1_kl_2")), + ((1, 0, 1), VBFHHSample(1, 0, 1, val_xs=0.0270800, label="qqHH_CV_1_C2V_0_kl_1")), + ((1, 2, 1), VBFHHSample(1, 2, 1, val_xs=0.0142178, label="qqHH_CV_1_C2V_2_kl_1")), + ((0.5, 1, 1), VBFHHSample(0.5, 1, 1, val_xs=0.0108237, label="qqHH_CV_0p5_C2V_1_kl_1")), + ((1.5, 1, 1), VBFHHSample(1.5, 1, 1, val_xs=0.0660185, label="qqHH_CV_1p5_C2V_1_kl_1")), ]) @@ -535,12 +535,12 @@ GGF_sample_list = [ ] VBF_sample_list = [ VBFHHSample(1, 1, 1, val_xs=0.0017260, label="qqHH_CV_1_C2V_1_kl_1"), - VBFHHSample(1, 1, 0, val_xs=0.0045915, label="qqHH_CV_1_C2V_1_kl_0"), - VBFHHSample(1, 1, 2, val_xs=0.0014306, label="qqHH_CV_1_C2V_1_kl_2"), - # VBFHHSample(1, 0, 1, val_xs=0.0272322, label="qqHH_CV_1_C2V_0_kl_1"), - VBFHHSample(1, 2, 1, val_xs=0.0143923, label="qqHH_CV_1_C2V_2_kl_1"), - VBFHHSample(0.5, 1, 1, val_xs=0.0108778, label="qqHH_CV_0p5_C2V_1_kl_1"), - VBFHHSample(1.5, 1, 1, val_xs=0.0663340, label="qqHH_CV_1p5_C2V_1_kl_1"), + VBFHHSample(1, 1, 0, val_xs=0.0046089, label="qqHH_CV_1_C2V_1_kl_0"), + VBFHHSample(1, 1, 2, val_xs=0.0014228, label="qqHH_CV_1_C2V_1_kl_2"), + # VBFHHSample(1, 0, 1, val_xs=0.0270800, label="qqHH_CV_1_C2V_0_kl_1"), + VBFHHSample(1, 2, 1, val_xs=0.0142178, label="qqHH_CV_1_C2V_2_kl_1"), + VBFHHSample(0.5, 1, 1, val_xs=0.0108237, label="qqHH_CV_0p5_C2V_1_kl_1"), + VBFHHSample(1.5, 1, 1, val_xs=0.0660185, label="qqHH_CV_1p5_C2V_1_kl_1"), ] HHdefault = HHModel( ggf_sample_list=GGF_sample_list, -- GitLab From 5d5ac4ac09561e4723d81323b444911ebf5700d6 Mon Sep 17 00:00:00 2001 From: Luca Cadamuro Date: Fri, 27 Nov 2020 17:25:42 +0000 Subject: [PATCH 11/11] Update VBF k factor in comment --- dhi/models/HHModelPinv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dhi/models/HHModelPinv.py b/dhi/models/HHModelPinv.py index 7c0b2c4..42f24df 100644 --- a/dhi/models/HHModelPinv.py +++ b/dhi/models/HHModelPinv.py @@ -430,7 +430,7 @@ ggf_samples = OrderedDict([ ]) # vbf samples with keys (CV, C2V, kl), SM point first, then ordered by kl, then C2V, then CV -# cross section values are LO (from 2017/2018 gridpacks) x SM k-factor for N3LO (1.0781) and only used in create_vbf_xsec_func below +# cross section values are LO (from 2017/2018 gridpacks) x SM k-factor for N3LO (1.03477) and only used in create_vbf_xsec_func below vbf_samples = OrderedDict([ ((1, 1, 1), VBFHHSample(1, 1, 1, val_xs=0.0017260, label="qqHH_CV_1_C2V_1_kl_1")), ((1, 1, 0), VBFHHSample(1, 1, 0, val_xs=0.0046089, label="qqHH_CV_1_C2V_1_kl_0")), -- GitLab