diff --git a/Evolutions.txt b/Evolutions.txt deleted file mode 100644 index 2ecb1f6c3fca04911d83b876e5023f8aebe6ca17..0000000000000000000000000000000000000000 --- a/Evolutions.txt +++ /dev/null @@ -1,3 +0,0 @@ -BaseEvolution libbaseevolution_xfitter.dylib -APFELxx libapfelxx_xfitter.dylib -QCDNUM libqcdnum_xfitter.so diff --git a/Makefile.am b/Makefile.am index cdf7c31294a59e68f55e87f87b4f000ecea57f40..ed9c5b65bb410a7c91c2fddd334b534f9b699292 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,11 +1,11 @@ ACLOCAL_AMFLAGS=-I m4 AUTOMAKE_OPTIONS = foreign -SUBDIRS = minuit/src interfaces/src DY/src DIPOLE/src RT/src EW/src common common/linalg \ +SUBDIRS = minuit/src DY/src DIPOLE/src RT/src EW/src common common/linalg \ common/num_utils pdf2yaml tools/process\ evolutions/QCDNUM/src\ tools/draw tools/MakeLHAPDF FastNLO/src DiffDIS/src ACOT/src SACOT/src ABM/src FONLL/src Cascade/src \ genetic/mixmax_r004 genetic/src QEDevol/src \ - include interfaces/include FastNLO/include FastNLO/include/fastnlotk DiffDIS/include \ + include FastNLO/include FastNLO/include/fastnlotk DiffDIS/include \ DY/include tools/draw/include \ reactions/KFactor/src \ reactions/Fractal_DISNC/src \ @@ -26,12 +26,16 @@ SUBDIRS = minuit/src interfaces/src DY/src DIPOLE/src RT/src EW/src common commo reactions/AFB/src \ reactions/KMatrix/src \ pdfparams/BasePdfParam/src \ + pdfparams/Expression/src\ pdfparams/HERAPDF_PdfParam/src \ pdfparams/PolySqrtPdfParam/src \ - pdfdecompositions/BasePdfDecomposition/src pdfparams/NegativeGluonPdfParam/src \ + pdfparams/NegativeGluonPdfParam/src\ + pdfdecompositions/BasePdfDecomposition/src\ pdfdecompositions/UvDvUbarDbarS/src \ pdfdecompositions/SU3_PionPdfDecomposition/src \ evolutions/BaseEvolution/src \ + evolutions/FlipUD/src\ + evolutions/FlipCharge/src\ evolutions/APFELxx/src \ minimizers/BaseMinimizer/src minimizers/CERESMinimizer/src minimizers/MINUITMinimizer/src @@ -63,14 +67,11 @@ include $(top_srcdir)/aminclude.am reactiondir = $(prefix)/lib/ reaction_DATA = Reactions.txt -evolutiondir = $(prefix)/lib/ -evolution_DATA = Evolutions.txt - # Tell which program should run the .sh scripts. #SH_LOG_COMPILER = $(SHELL) -ex #TESTS_ENVIRONMENT = $(SHELL) #TESTS= ./tools/check.sh EXTRA_DIST= README INSTALLATION LICENCE REFERENCES \ - minuit.in.txt steering.txt testPythonInterface.py parameters.yaml Reactions.txt Evolutions.txt + minuit.in.txt steering.txt testPythonInterface.py parameters.yaml Reactions.txt bin_SCRIPTS = xfitter-config tools/xfitter-getdata.sh diff --git a/Reactions.txt b/Reactions.txt index 979e5742cbb41bc90f1c6969728077660f32e66c..a62c8ebeb2859c4adaeb31ab63ce344fcf1835e4 100644 --- a/Reactions.txt +++ b/Reactions.txt @@ -15,6 +15,8 @@ KFactor libkfactor_xfitter.so FONLL_DISNC libfonll_disnc_xfitter.so FONLL_DISCC libfonll_discc_xfitter.so BaseEvolution libbaseevolution_xfitter.so +FlipUD libFlipUD_Evol_xfitter.so +FlipCharge libFlipChargeEvol_xfitter.so APFELxx libapfelxx_xfitter.so QCDNUM libqcdnum_xfitter.so UvDvubardbars libuvdvubardbars_xfitter.so @@ -23,6 +25,7 @@ MINUIT libMINUITMinimizer_xfitter.so CERES libCERESMinimizer_xfitter.so LHAPDF liblhapdf_xfitter.so HERAPDF libHERAPDF_PdfParam_xfitter.so +Expression libExpressionPdfParam_xfitter.so NegativeGluon libNegativeGluonPdfParam_xfitter.so PolySqrt libPolySqrtPdfParam_xfitter.so AFB libafb_xfitter.so diff --git a/configure.ac b/configure.ac index 62957adaa2eccbfcb095b7de5b8243f94c7b7b82..9975dad408d6c8a60dc9ff511a55ab9a69ca31ab 100644 --- a/configure.ac +++ b/configure.ac @@ -435,28 +435,28 @@ AM_CONDITIONAL(ENABLE_HVQMNR, [test x$enable_hvqmnr == xyes]) AC_ARG_ENABLE([root],[AS_HELP_STRING([--enable-root],[use ROOT libraries])]): ${enable_root=yes} AS_IF([test "$enable_root" = "yes"], [ - AC_MSG_CHECKING([for ROOT installation]) - root_config=`which root-config` - if test x$root_config == x; then - AC_MSG_ERROR([Unable to find root-config, install ROOT or configure with --disable-root]) - else - AC_MSG_RESULT([Using $root_config]) - root_ok=1 - ROOT_CFLAGS=`root-config --cflags` - ROOT_LDFLAGS=`root-config --ldflags` - ROOT_LIBS=`root-config --libs` - AC_SUBST(ROOT_CFLAGS) - AC_SUBST(ROOT_LDFLAGS) - AC_SUBST(ROOT_LIBS) - AC_DEFINE([ROOT_ENABLED],[1],[Define if ROOT is enabled]) - fi - ], - [ - AC_MSG_WARN([ROOT libraries are disabled, xfitter-draw not available]) - if test x$enable_applgrid == xyes; then - AC_MSG_ERROR([Root is required for APPLGRID]) - fi - ]) + AC_MSG_CHECKING([for ROOT installation]) + root_config=`which root-config` + if test x$root_config == x; then + AC_MSG_ERROR([Unable to find root-config, install ROOT or configure with --disable-root]) + else + AC_MSG_RESULT([Using $root_config]) + root_ok=1 + ROOT_CFLAGS=`root-config --cflags` + ROOT_LDFLAGS=`root-config --ldflags` + ROOT_LIBS=`root-config --libs` + AC_SUBST(ROOT_CFLAGS) + AC_SUBST(ROOT_LDFLAGS) + AC_SUBST(ROOT_LIBS) + AC_DEFINE([ROOT_ENABLED],[1],[Define if ROOT is enabled]) + fi + ], + [ + AC_MSG_WARN([ROOT libraries are disabled, xfitter-draw not available]) + if test x$enable_applgrid == xyes; then + AC_MSG_ERROR([Root is required for APPLGRID]) + fi + ]) AM_CONDITIONAL([HAVE_ROOT],test $root_ok) @@ -578,7 +578,7 @@ AC_CONFIG_FILES([include/Makefile SACOT/src/Makefile SACOT/Makefile ABM/src/Makefile - FONLL/src/Makefile + FONLL/src/Makefile FONLL/Makefile Cascade/src/Makefile genetic/mixmax_r004/Makefile @@ -598,39 +598,42 @@ AC_CONFIG_FILES([include/Makefile doc/logo/Makefile examples/Makefile python/Makefile - xfitter-config - pdfparams/NegativeGluonPdfParam/src/Makefile - evolutions/LHAPDF/src/Makefile - minimizers/CERESMinimizer/src/Makefile - minimizers/MINUITMinimizer/src/Makefile - evolutions/QCDNUM/src/Makefile - pdfparams/BasePdfParam/src/Makefile - pdfparams/HERAPDF_PdfParam/src/Makefile - pdfparams/PolySqrtPdfParam/src/Makefile - pdfdecompositions/LHAPDFDecomposition/src/Makefile - pdfdecompositions/UvDvUbarDbarS/src/Makefile - pdfdecompositions/SU3_PionPdfDecomposition/src/Makefile - pdfdecompositions/BasePdfDecomposition/src/Makefile - reactions/AFB/src/Makefile - reactions/KFactor/src/Makefile - reactions/BaseDISCC/src/Makefile - reactions/FFABM_DISCC/src/Makefile - reactions/BaseDISNC/src/Makefile - reactions/FFABM_DISNC/src/Makefile - reactions/Hathor/src/Makefile - reactions/Fractal_DISNC/src/Makefile - reactions/RT_DISNC/src/Makefile - reactions/APPLgrid/src/Makefile - reactions/BaseHVQMNR/src/Makefile - reactions/HVQMNR_LHCb_7TeV_beauty/src/Makefile - reactions/HVQMNR_LHCb_7TeV_charm/src/Makefile - reactions/testZMVFNS/src/Makefile - reactions/fastNLO/src/Makefile - reactions/FONLL_DISCC/src/Makefile - reactions/FONLL_DISNC/src/Makefile - evolutions/BaseEvolution/src/Makefile - evolutions/APFELxx/src/Makefile - minimizers/BaseMinimizer/src/Makefile + xfitter-config + pdfparams/NegativeGluonPdfParam/src/Makefile + evolutions/LHAPDF/src/Makefile + minimizers/CERESMinimizer/src/Makefile + minimizers/MINUITMinimizer/src/Makefile + evolutions/QCDNUM/src/Makefile + pdfparams/BasePdfParam/src/Makefile + pdfparams/Expression/src/Makefile + pdfparams/HERAPDF_PdfParam/src/Makefile + pdfparams/PolySqrtPdfParam/src/Makefile + pdfdecompositions/LHAPDFDecomposition/src/Makefile + pdfdecompositions/UvDvUbarDbarS/src/Makefile + pdfdecompositions/SU3_PionPdfDecomposition/src/Makefile + pdfdecompositions/BasePdfDecomposition/src/Makefile + reactions/AFB/src/Makefile + reactions/KFactor/src/Makefile + reactions/BaseDISCC/src/Makefile + reactions/FFABM_DISCC/src/Makefile + reactions/BaseDISNC/src/Makefile + reactions/FFABM_DISNC/src/Makefile + reactions/Hathor/src/Makefile + reactions/Fractal_DISNC/src/Makefile + reactions/RT_DISNC/src/Makefile + reactions/APPLgrid/src/Makefile + reactions/BaseHVQMNR/src/Makefile + reactions/HVQMNR_LHCb_7TeV_beauty/src/Makefile + reactions/HVQMNR_LHCb_7TeV_charm/src/Makefile + reactions/testZMVFNS/src/Makefile + reactions/fastNLO/src/Makefile + reactions/FONLL_DISCC/src/Makefile + reactions/FONLL_DISNC/src/Makefile + evolutions/BaseEvolution/src/Makefile + evolutions/FlipUD/src/Makefile + evolutions/FlipCharge/src/Makefile + evolutions/APFELxx/src/Makefile + minimizers/BaseMinimizer/src/Makefile reactions/KMatrix/src/Makefile Makefile]) diff --git a/doxygen.cfg b/doxygen.cfg index 6fdb1376aa7e1d0fbb56d25c3e8a64b4cb1cbd9f..44dfa39edd7b68fb3ef0ddf4f1bea047580a6f85 100644 --- a/doxygen.cfg +++ b/doxygen.cfg @@ -570,11 +570,39 @@ WARN_LOGFILE = INPUT = include src \ RT/src ACOT/src ABM/src DIPOLE/src \ - DiffDIS/include DiffDIS/src \ - Hathor/interface Hathor/src EW/src \ - NNPDF/include NNPDF/src Cascade/src \ + DiffDIS/include DiffDIS/src \ + Hathor/interface Hathor/src EW/src \ + NNPDF/include NNPDF/src Cascade/src \ DY/src DY/include \ - reactions/APPLgrid/include evolutions/LHAPDF/src evolutions/LHAPDF/include evolutions/QCDNUM/src evolutions/QCDNUM/include reactions/FONLL_DISCC/src reactions/FONLL_DISCC/include reactions/FONLL_DISNC/src reactions/FONLL_DISNC/include reactions/KFactor/src reactions/KFactor/include reactions/FFABM_DISCC/src reactions/FFABM_DISCC/include reactions/Fractal_DISNC/src reactions/Fractal_DISNC/include reactions/BaseDISCC/src reactions/BaseDISCC/include reactions/FFABM_DISNC/src reactions/FFABM_DISNC/include reactions/Hathor/src reactions/Hathor/include reactions/RT_DISNC/src reactions/RT_DISNC/include reactions/BaseDISNC/src reactions/BaseDISNC/include \ + evolutions/LHAPDF/src\ + evolutions/LHAPDF/include\ + evolutions/QCDNUM/src\ + evolutions/QCDNUM/include\ + evolutions/FlipUD/src\ + evolutions/FlipUD/include\ + evolutions/FlipCharge/src\ + evolutions/FlipCharge/include\ + reactions/FONLL_DISCC/src\ + reactions/FONLL_DISCC/include\ + reactions/FONLL_DISNC/src\ + reactions/FONLL_DISNC/include\ + reactions/KFactor/src\ + reactions/KFactor/include\ + reactions/FFABM_DISCC/src\ + reactions/FFABM_DISCC/include\ + reactions/Fractal_DISNC/src\ + reactions/Fractal_DISNC/include\ + reactions/BaseDISCC/src\ + reactions/BaseDISCC/include\ + reactions/FFABM_DISNC/src\ + reactions/FFABM_DISNC/include\ + reactions/Hathor/src\ + reactions/Hathor/include\ + reactions/RT_DISNC/src\ + reactions/RT_DISNC/include\ + reactions/BaseDISNC/src\ + reactions/BaseDISNC/include\ + reactions/APPLgrid/include reactions/APPLgrid/src \ reactions/fastNLO/include \ reactions/fastNLO/src \ @@ -582,16 +610,18 @@ INPUT = include src \ reactions/BaseHVQMNR/src reactions/HVQMNR_LHCb_7TeV_beauty/include \ reactions/HVQMNR_LHCb_7TeV_beauty/src \ - pdfparams/BasePdfParam/include pdfparams/NegativeGluonPdf/include \ - pdfparams/HERAPDF_PdfParam/include \ - pdfparams/PolySqrtPdfParam/include \ - pdfdecompositions/BasePdfDecomposition/include \ - pdfdecompositions/LHAPDFDecomposition/include \ - pdfdecompositions/UvDvUbarDbarS/include \ - pdfdecompositions/GRV_PionPdfDecomposition/include \ - minimizers/BaseMinimizer/include \ - minimizers/CERESMinimizer/include \ - minimizers/MINUITMinimizer/include \ + pdfparams/BasePdfParam/include\ + pdfparams/Expression/include\ + pdfparams/NegativeGluonPdf/include \ + pdfparams/HERAPDF_PdfParam/include \ + pdfparams/PolySqrtPdfParam/include \ + pdfdecompositions/BasePdfDecomposition/include \ + pdfdecompositions/LHAPDFDecomposition/include \ + pdfdecompositions/UvDvUbarDbarS/include \ + pdfdecompositions/GRV_PionPdfDecomposition/include \ + minimizers/BaseMinimizer/include \ + minimizers/CERESMinimizer/include \ + minimizers/MINUITMinimizer/include \ # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built diff --git a/evolutions/BaseEvolution/include/BaseEvolution.h b/evolutions/BaseEvolution/include/BaseEvolution.h index a1feedfe30075ddd85bc4320129b12c38fd60b02..ebe80d4d7dd487e016a99f1ccd8f96c02dd72909 100644 --- a/evolutions/BaseEvolution/include/BaseEvolution.h +++ b/evolutions/BaseEvolution/include/BaseEvolution.h @@ -21,8 +21,8 @@ namespace xfitter class BaseEvolution { public: - /// Unique name of instance - const std::string _name; + /// Unique name of instance + const std::string _name; /** * @brief The BaseEvolution default constructor. * @param name: the unique name used to identify the instance @@ -60,16 +60,24 @@ namespace xfitter /** * @brief Function that returns a std::function that in turn - * returns a double as a function of the pdf index i, x and Q. + * i indexes flavor (QCDNUM convention): + * + * i -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 + * tb bb cb sb ub db g d u s c b t + * * @return double-valued function of i, x and Q. */ - virtual std::function<double(int const& i, double const& x, double const& Q)> xfxQDouble() = 0; + virtual std::function<double(int const& i, double const& x, double const& Q)> xfxQDouble() = 0;//why would you pass int and double by reference??? --Ivan /** * @brief Function that returns a std::function that in turn * returns a void as a function of the pdf index x, Q, and pdfs, - * where pdfs is the array of PDFs. - * @return void-valued function of x, Q and pdfs. + * where pdfs is the array of PDFs, of size 13 (C++ QCDNUM convention): + * + * i 0 1 2 3 4 5 6 7 8 9 10 11 12 + * pdfs[i] tb bb cb sb ub db g d u s c b t + * + * @return void-valued function of x, Q, which writes PDF values by pointer pdfs. */ virtual std::function<void(double const& x, double const& Q, double* pdfs)> xfxQArray() = 0; diff --git a/evolutions/FlipCharge/include/FlipChargeEvol.h b/evolutions/FlipCharge/include/FlipChargeEvol.h new file mode 100644 index 0000000000000000000000000000000000000000..1e1d81e9a7876a7992da6f631d4c4914840271e5 --- /dev/null +++ b/evolutions/FlipCharge/include/FlipChargeEvol.h @@ -0,0 +1,29 @@ +//Automatically generated by ./tools/AddEvolution.py on 2018-12-25 +#pragma once +#include"BaseEvolution.h" + +namespace xfitter{ +/** + @class FlipChargeEvol + + @brief A class for FlipCharge modifier evolution + + Takes as input another evolution and switches each quark with antiquark. + This turns a particle into an antiparticle. + + In YAML configuration, provide another evolution's name as "input" +*/ +class FlipChargeEvol:public BaseEvolution{ + public: + FlipChargeEvol(const char*name):BaseEvolution(name){} + virtual const char*getClassName()const override final{return"FlipCharge";}; + virtual void atStart()override final; + virtual void atConfigurationChange()override final; + virtual std::function<std::map<int,double>(double const&x,double const&Q)>xfxQMap()override final; + virtual std::function<void(double const&x,double const&Q,double*pdfs)>xfxQArray()override final; + virtual std::function<double(int const&i,double const&x,double const&Q)>xfxQDouble()override final; + virtual std::function<double(double const&Q)>AlphaQCD()override final; + private: + BaseEvolution*input; +}; +} diff --git a/evolutions/FlipCharge/src/FlipChargeEvol.cc b/evolutions/FlipCharge/src/FlipChargeEvol.cc new file mode 100644 index 0000000000000000000000000000000000000000..7f78107e70b4de42d0cc76a48988e7d89c0ff3bf --- /dev/null +++ b/evolutions/FlipCharge/src/FlipChargeEvol.cc @@ -0,0 +1,55 @@ +#include"FlipChargeEvol.h" +#include"xfitter_cpp_base.h" +#include"xfitter_pars.h" +#include"xfitter_steer.h" +#include"iostream" +#include<utility> //for swap +using namespace std; +namespace xfitter{ +extern"C" FlipChargeEvol*create(const char*s){return new FlipChargeEvol(s);}//for dynamic loading +void FlipChargeEvol::atStart(){atConfigurationChange();} +void FlipChargeEvol::atConfigurationChange(){ + const YAML::Node node=XFITTER_PARS::getEvolutionNode(_name)["input"]; + try{ + input=get_evolution(node.as<string>()); + }catch(const YAML::InvalidNode&ex){ + if(node.IsNull()){ + cerr<<"[ERROR] In evolution "<<_name<<" of class "<<getClassName()<<": no input evolution"<<endl; + hf_errlog(18122500,"F: No input to evolution FlipCharge, see stderr"); + }else throw ex; + }catch(const YAML::BadConversion&ex){ + cerr<<"[ERROR] In evolution "<<_name<<" of class "<<getClassName()<<": failed to convert input to string, YAML node:"<<node<<endl; + hf_errlog(18122501,"F: Bad input to evolution FlipCharge, see stderr"); + } +} +function<map<int,double>(double const&,double const&)>FlipChargeEvol::xfxQMap(){ + auto f=input->xfxQMap(); + return [f](double const&x,double const&Q)->map<int,double>{ + map<int,double>M=f(x,Q);//I don't know how to avoid copying a map with current interface --Ivan + swap(M.at(1),M.at(-1)); + swap(M.at(2),M.at(-2)); + swap(M.at(3),M.at(-3)); + swap(M.at(4),M.at(-4)); + swap(M.at(5),M.at(-5)); + swap(M.at(6),M.at(-6)); + return M; + }; +} +function<void(double const&x,double const&Q,double*pdfs)>FlipChargeEvol::xfxQArray(){ + auto f=input->xfxQArray(); + return [f](double const&x,double const&Q,double*p){ + f(x,Q,p); + swap(p[0],p[12]); + swap(p[1],p[11]); + swap(p[2],p[10]); + swap(p[3],p[ 9]); + swap(p[4],p[ 8]); + swap(p[5],p[ 7]); + }; +} +function<double(int const&i,double const&x,double const&Q)>FlipChargeEvol::xfxQDouble(){ + auto f=input->xfxQDouble(); + return [f](const int&i,const double&x,const double&Q)->double{return f(-i,x,Q);}; +} +function<double(double const&Q)>FlipChargeEvol::AlphaQCD(){return input->AlphaQCD();} +} diff --git a/evolutions/FlipCharge/src/Makefile.am b/evolutions/FlipCharge/src/Makefile.am new file mode 100644 index 0000000000000000000000000000000000000000..134ce997dfe855c45f30b3a858f7348f94e606a8 --- /dev/null +++ b/evolutions/FlipCharge/src/Makefile.am @@ -0,0 +1,4 @@ +AM_CXXFLAGS=-I$(srcdir)/../include -I$(srcdir)/../../BaseEvolution/include -I$(srcdir)/../../../include -Wall -fPIC -Wno-deprecated +lib_LTLIBRARIES=libFlipChargeEvol_xfitter.la +libFlipChargeEvol_xfitter_la_SOURCES=FlipChargeEvol.cc +dist_noinst_HEADERS=../include diff --git a/evolutions/FlipUD/include/FlipUD_Evol.h b/evolutions/FlipUD/include/FlipUD_Evol.h new file mode 100644 index 0000000000000000000000000000000000000000..b0a77834affde502c6823c472fdb4b9f34f681a6 --- /dev/null +++ b/evolutions/FlipUD/include/FlipUD_Evol.h @@ -0,0 +1,28 @@ +#pragma once +#include"BaseEvolution.h" + +namespace xfitter{ +/** + @class FlipUD_Evol + + @brief A class for FlipUD evolution + + Takes as input another evolution and switches up-quark with down-quark, ubar with dbar + This turns a proton into neutron and vice versa, assuming isospin symmetry + + In YAML configuration, provide another evolution's name as "input" +*/ +class FlipUD_Evol:public BaseEvolution{ + public: + FlipUD_Evol(const char*name):BaseEvolution(name){} + virtual const char*getClassName()const override final{return"FlipUD";}; + virtual void atStart()override final; + virtual void atConfigurationChange()override final; + virtual std::function<std::map<int,double>(double const&x,double const&Q)>xfxQMap()override final; + virtual std::function<void(double const&x,double const&Q,double*pdfs)>xfxQArray()override final; + virtual std::function<double(int const&i,double const&x,double const&Q)>xfxQDouble()override final; + virtual std::function<double(double const&Q)>AlphaQCD()override final; + private: + BaseEvolution*input; +}; +} diff --git a/evolutions/FlipUD/src/FlipUD_Evol.cc b/evolutions/FlipUD/src/FlipUD_Evol.cc new file mode 100644 index 0000000000000000000000000000000000000000..e5c3bf805dc896bfb0b6fc1382cba76266ae2fe5 --- /dev/null +++ b/evolutions/FlipUD/src/FlipUD_Evol.cc @@ -0,0 +1,55 @@ +#include"FlipUD_Evol.h" +#include"xfitter_cpp_base.h" +#include"xfitter_pars.h" +#include"xfitter_steer.h" +#include"iostream" +#include<utility> //for swap +using namespace std; +namespace xfitter{ +extern"C" FlipUD_Evol*create(const char*s){return new FlipUD_Evol(s);}//for dynamic loading +void FlipUD_Evol::atStart(){atConfigurationChange();} +void FlipUD_Evol::atConfigurationChange(){ + const YAML::Node node=XFITTER_PARS::getEvolutionNode(_name)["input"]; + try{ + input=get_evolution(node.as<string>()); + }catch(const YAML::InvalidNode&ex){ + if(node.IsNull()){ + cerr<<"[ERROR] In evolution "<<_name<<" of class "<<getClassName()<<": no input evolution"<<endl; + hf_errlog(18122510,"F: No input to evolution FlipUD, see stderr"); + }else throw ex; + }catch(const YAML::BadConversion&ex){ + cerr<<"[ERROR] In evolution "<<_name<<" of class "<<getClassName()<<": failed to convert input to string, YAML node:"<<node<<endl; + hf_errlog(18122511,"F: Bad input to evolution FlipUD, see stderr"); + } +} +function<map<int,double>(double const&x,double const&Q)>FlipUD_Evol::xfxQMap(){ + auto f=input->xfxQMap(); + return [f](double const&x,double const&Q)->map<int,double>{ + map<int,double>M=f(x,Q); + swap(M.at( 1),M.at( 2)); + swap(M.at(-1),M.at(-2)); + return M; + }; +} +function<void(double const&x,double const&Q,double*pdfs)>FlipUD_Evol::xfxQArray(){ + auto f=input->xfxQArray(); + return [f](double const&x,double const&Q,double*p){ + f(x,Q,p); + swap(p[4],p[5]); + swap(p[7],p[8]); + }; +} +function<double(int const&i,double const&x,double const&Q)>FlipUD_Evol::xfxQDouble(){ + auto f=input->xfxQDouble(); + return [f](const int&i,const double&x,const double&Q)->double{ + switch(i){ + case 1:return f( 2,x,Q); + case -1:return f(-2,x,Q); + case 2:return f( 1,x,Q); + case -2:return f(-1,x,Q); + default:return f(i,x,Q); + } + }; +} +function<double(double const&Q)>FlipUD_Evol::AlphaQCD(){return input->AlphaQCD();} +} diff --git a/evolutions/FlipUD/src/Makefile.am b/evolutions/FlipUD/src/Makefile.am new file mode 100644 index 0000000000000000000000000000000000000000..9571d8f925857b8cb552030fd283497ed65ff3ae --- /dev/null +++ b/evolutions/FlipUD/src/Makefile.am @@ -0,0 +1,4 @@ +AM_CXXFLAGS=-I$(srcdir)/../include -I$(srcdir)/../../BaseEvolution/include -I$(srcdir)/../../../include -Wall -fPIC -Wno-deprecated +lib_LTLIBRARIES=libFlipUD_Evol_xfitter.la +libFlipUD_Evol_xfitter_la_SOURCES=FlipUD_Evol.cc +dist_noinst_HEADERS=../include diff --git a/include/CheckForPDF.h b/include/CheckForPDF.h index 0633a86354f195a4b5e5c711091675dcb423281d..488ea3a5b3282bc13efa971eb8ecedefcf87bc70 100644 --- a/include/CheckForPDF.h +++ b/include/CheckForPDF.h @@ -1,7 +1,5 @@ #ifndef __CHECKFORPDF_H #define __CHECKFORPDF_H - /// for Fortran calling, check presence of PDF file. void CheckForPDF(char const*pdfname); - #endif diff --git a/include/Makefile.am b/include/Makefile.am index 6d87a80162f2088418e42bdf38b730a006b8adee..ab095428c7a8c591c78d2864b702bdfee616518a 100644 --- a/include/Makefile.am +++ b/include/Makefile.am @@ -8,6 +8,6 @@ AUTOMAKE_OPTIONS = foreign dist_noinst_HEADERS = alphas.inc endmini.inc indata.inc ntot.inc theo.inc \ APFELgridGeneration.h couplings.inc extrapars.inc iofnames.inc pdf2yaml.h TheorEval.h \ applgrid_fastpdf.inc covar.inc fcn.inc list.h pdflength.inc ReactionTheory.h theorexpr.inc \ -c2yaml.h datasets.inc for_debug.inc pdfparam.inc scales.inc xfitter_steer.h xfitter_pars.h thresholds.inc \ +c2yaml.h datasets.inc for_debug.inc scales.inc xfitter_steer.h xfitter_pars.h thresholds.inc \ chi2scan.inc dimensions.h fractal.inc polarity.inc steering.inc \ c_interface.inc dipole.inc g_offset.inc Makefile.in qcdnumhelper.inc systematics.inc xfitter_cpp.h xfitter_cpp_base.h CheckForPDF.h Profiler.h diff --git a/include/ReactionTheory.h b/include/ReactionTheory.h index 9dd901ea503883510db66a64e9f064d8efbbbb16..3bc13a6d3d91e18b8b5d3bee6ee1dbf35ccd4c9f 100644 --- a/include/ReactionTheory.h +++ b/include/ReactionTheory.h @@ -36,15 +36,22 @@ typedef void (*pXFXlike)(const double&x,const double&Q,double*results); @brief A base class manages for reaction theories - It provides an interface which must present in the derived classes + It provides an interface which must be present in the derived classes + Each concrete instance of ReactionTheory is a singleton + ReactionTheory is responsible for: + 1. Calculating theory predictions for given dataset + 2. Keeping dataset-specific parameters and other information by datasetID + 3. Reading and checking sanity of reaction parameters. @author A.Sapronov <sapronov@ifh.de> + Others also contributed??? - @version 0.1 + @version 0.1, but nobody keeps track of versions anymore @date 2016/01/21 */ //class Evolution; +//why is this not in namespace xfitter? --Ivan class ReactionTheory { @@ -58,10 +65,12 @@ class ReactionTheory public: using super = ReactionTheory; - - virtual string getReactionName() const =0; ///< Should return expected reaction name. Normally generated automatically by AddReaction.py - //A better name would be atStart - virtual int initAtStart(const string &) =0; ///< Initialization first time ReactionTheory implementation is called + virtual string getReactionName() const =0; ///< Returns expected reaction name. Normally generated automatically by AddReaction.py + /** Called once at start, used by concrete class for initialization + @param string ??? + @return 0 on success, some error code otherwise + */ + virtual int atStart(const string &) =0; virtual void setxFitterParameters(map<string,double*> &xfitter_pars) {_xfitter_pars = xfitter_pars; }; ///< Set environment map for doubles virtual void setxFitterParametersI(map<string,int> &xfitter_pars) {_xfitter_pars_i = xfitter_pars; }; ///< Set environment map for integers @@ -76,7 +85,7 @@ class ReactionTheory virtual void setExtraFunctions(map<string, pZeroParFunc>, map<string, pOneParFunc>, map<string, pTwoParFunc>) { }; //! Set XFX function for different hadrons (proton: p, neutron: n, anti-proton: pbar) - virtual void setXFX(pXFXlike xfx, string type="p" ){ _xfx[type] = xfx; }; + virtual void setXFX(pXFXlike xfx, string type="p" ){ _xfx[type] = xfx; };//DEPRECATED virtual void setBinning(int dataSetID, map<string,valarray<double> > *dsBins){ _dsIDs.push_back(dataSetID); _dsBins[dataSetID] = dsBins; } ; @@ -100,7 +109,7 @@ class ReactionTheory virtual void printInfo(){}; //! Helper function to emmulate LHAPDF6 calls to get PDFs - void xfx(const double& x, const double& q, double* results) const { (_xfx.at("p"))(x,q,results); }; + void xfx(const double& x, const double& q, double* results) const;//Currently accesses default evolution, to be replaced later --Ivan //! Helper function to emmulate LHAPDF6 calls to get PDFs double xfx(double x, double q, int iPDF) const { double pdfs[13]; xfx(x,q,pdfs); return pdfs[iPDF+6];}; diff --git a/include/TermData.h b/include/TermData.h new file mode 100644 index 0000000000000000000000000000000000000000..99b0f721cbeae8825760d54572418e1037a8f668 --- /dev/null +++ b/include/TermData.h @@ -0,0 +1,47 @@ +#pragma once +#include"ReactionTheory.h" +#include"Variant.h" +/* +TermData is a class that provides interface to all parameters for a given reaction term +TermData is responsible for: +*Providing interface to reaction parameters, resolving overwrites and global vs specific parameters +*Reading special parameters "evolution", "evolution1" and "evolution2", get pointer[s] to correct evolution[s] +*Switch XFXlike and other wrappers + +One instance of TermData exists for each reaction term. +The class TheorEval manages all instances of TermData and passes them to ReactionTheory + +It is intended to be used by ReactionTheory +*/ +namespace XFITTER_PARS{ +class TermData{ +public: + TermData(unsigned id,ReactionTheory*);//add any other parameters TheorEval needs to pass + //Unique id of this term + //In the past this was calculated as 1000*datasetID+number_of_term (see TheorEval::initReactionTerm) + //In ReactionTheory called it was also incorrectly called dataSetID + const unsigned id; + //Get reaction parameter by its name, taking into account that term-specific parameters overshadow global etc. + Variant getParam(string)const; + ReactionTheory*reaction; + void actualizeWrappers();//see wrappers below + BaseEvolution*getEvolution()const;//returns first evolution for this term + BaseEvolution*getEvolution(int i)const;//i is either 0 for evolution1, or 1 for evolution2 + //The following pointer can be used by ReactionTheory to store some additional data + //for each reaction term. It should be managed by ReactioTheory only, do not touch it from elsewhere + void*reactionData=nullptr; + //Dataset*dataset()const; //for future, after we decide on a Dataset class + //We should consider adding any other getters that could be useful to ReactionTheory +private: + //TODO: Implement TermData +} +/* Wrappers + If a ReacationTheory uses these wrapper, it must call TermData::actualizeWrappers + for each term at each iteration, to make sure the wrappers wrap the correct evolution +*/ +} +extern "C"{ +void PDF_xfx_wrapper (const double&x,const double&Q,double*results); +void PDF_xfx_wrapper1(const double&x,const double&Q,double*results); +void AlphaS_wrapper (const double&Q); +} diff --git a/include/Variant.h b/include/Variant.h new file mode 100644 index 0000000000000000000000000000000000000000..b2f589a15d64baa1c610e3df15def82c069a8d54 --- /dev/null +++ b/include/Variant.h @@ -0,0 +1,59 @@ +#pragma once +#include<sstream> +#include<vector> +#include<stdexcept> +namespace XFITTER_PARS{ +/* + Variant is a class used for transferring parameters to reations + It can encapsulate one of several types: + *const double* + *std::string + *int + *vector<const double*> + + We use pointers for double-typed parameters because it is assumed + that any of them can change during the minimization + + Variant is used mainly as return type of TermData::getParam +*/ +class Variant{ +public: + enum Type{None=0,DoublePtr=1,String=2,Int=3,Array=4}; + Variant(); + Variant(const Variant&); + Variant(const double*); + Variant(const std::string&); + Variant(const char*); + Variant(int); + Variant(const std::vector<const double*>&); + ~Variant(); + Variant&operator=(const Variant&); + operator const double*()const; + operator std::string()const; + operator int()const; + operator const std::vector<const double*>&()const; + Type type()const; + bool isNone()const; + bool isDoublePtr()const; + bool isString()const; + bool isInt()const; + bool isArray()const; + friend std::ostream&operator<<(std::ostream&,const Variant&); + class bad_cast; +private: + Type _type; + union{ + const double*_ptr; + std::string _string; + int _int; + std::vector<const double*>_array; + }; +}; +std::ostream&operator<<(std::ostream&,const Variant&); +std::ostream&operator<<(std::ostream&,Variant::Type); +const char*to_string(Variant::Type); +class Variant::bad_cast:public std::runtime_error{ +public: + bad_cast(const Variant&,Variant::Type); +}; +} diff --git a/include/expression_utils.h b/include/expression_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..eb93f0c3d4223277600d99aa138e29d2d02b57db --- /dev/null +++ b/include/expression_utils.h @@ -0,0 +1,6 @@ +#pragma once +#include<string> +#include<vector> +namespace xfitter{ +void extractParameterNames(const std::string&s,std::vector<std::string>&ret); +} diff --git a/include/ntot.inc b/include/ntot.inc index d68712f07faf6d79e55e268013f8a66cd74336e4..4f78741d674122aefcaddebab6435d0e8bfdccbc 100644 --- a/include/ntot.inc +++ b/include/ntot.inc @@ -1,28 +1,24 @@ #include "dimensions.h" -C -C Basic definitions for -C - integer NTOT !> Total number of data points + integer NTOT !> Maximal number of data points parameter(NTOT = NTOT_C) - - integer NCovarMax !> Total number of data points, treated by covariance matrix + integer NCovarMax !> Maximal number of data points, treated by covariance matrix parameter (NCovarMax = 1000) - integer nset !> Total number of datasets + integer nset !> Maximal number of datasets parameter (NSET = NSET_C) integer NKFactMax !> Max. number of k-factors parameter (NKFactMax = 10) integer NapplgridMax !> Max. number of applgrids - parameter (NapplgridMax = 10) - + parameter (NapplgridMax = 10) + integer NBinDimensionMax !> MAx. number of abstract bins - parameter (NBinDimensionMax=10) + parameter (NBinDimensionMax=10) integer NPlotMax !> Max. number of plots to be prepared parameter (NPlotMax = 500) - integer NXGridMax - parameter (NXGridMax = 500) \ No newline at end of file + integer NXGridMax + parameter (NXGridMax = 500) diff --git a/include/pdfparam.inc b/include/pdfparam.inc deleted file mode 100644 index f4b077271d84cb307df746960a75cd2cf83c4e0b..0000000000000000000000000000000000000000 --- a/include/pdfparam.inc +++ /dev/null @@ -1,65 +0,0 @@ - !> PDF parameterisations - - -C !> Common parameters for sumrules - double precision::uvalSum - double precision::dvalSum - common/sumrule_sums_common/uvalSum,dvalSum -C !> Standard parameterisation - - double precision paruval(1:10), pardval(1:10) - $ ,parubar(1:10), pardbar(1:10) - $ ,parglue(1:10),paru(1:10), pard(1:10) - $ ,parstr(1:10),parsea(1:10),parother(1:10) - $ ,parphoton(1:10) - common/stpar/paruval,pardval,parubar,pardbar, - $ parglue,paru,pard,parstr,parsea,parother,parphoton - -C--- - double precision fcharm,fstrange !> charm, strange fractions - common/pdfparam/fcharm,fstrange - -C -C !> Chebyshev polynoms for the gluon, sea -C - integer nchebmax - parameter (nchebmax=100) -C Gluon: - double precision chebpars(nchebmax),polypars(nchebmax) -C Sea, for ZEUS param. only: - double precision chebparsSea(nchebmax),polyparsSea(nchebmax) - double precision chebxminlog - - - common/pdfcheb/chebpars,polypars,chebxminlog, - $ chebparsSea,polyparsSea - - -C !> 25 Jan 2011: add poly param. for valence quarks - integer MaxPolyVal - parameter (MaxPolyVal=50) - -C !> internal order of the polynom: - integer NPOLYVALINT - - double precision PolyUval(MaxPolyVal),PolyDval(MaxPolyVal) - common/pdfpolyval/PolyUval,PolyDval,NPOLYVALINT - -C !> CTEQ-like parameterisation - double precision ctuval(1:9), ctdval(1:9),ctubar(1:9),ctdbar(1:9), - $ ctglue(1:9),ctother(1:9),ctstr(1:9), ctphoton(1:9) - common/ctpar/ctuval,ctdval,ctubar,ctdbar,ctglue,ctother,ctstr,ctphoton - -C !> Add AS parameterisation - double precision asuval(1:5), asdval(1:5), asubar(1:5), asdbar(1:5), - $ asglue(1:5),asother(1:5) - common/aspar/asuval,asdval,asubar,asdbar, - $ asglue,asother - -C !> Results of sum-rule integrals - double precision uv_sum, dv_sum, p_sum - common/csumrules/uv_sum, dv_sum, p_sum - -C !> Temperature paramter - double precision Temperature - Common/CTemperature/Temperature diff --git a/include/steering.inc b/include/steering.inc index 85a1d35dce7be1d47ecd6c1748bdd2ab7e1e2ebd..9b87cad7780b7366b6a648ff3e07a920150b92ff 100644 --- a/include/steering.inc +++ b/include/steering.inc @@ -4,7 +4,6 @@ integer Itheory !> Theory type $ ,I_FIT_ORDER !> Fit order (LO=0, NLO=1, NNLO=2) - $ ,IPARAM !> Internal flag for parameterisation type $ ,EWFIT !> EWfit (=0 NO EWFIT) $ ,iDH_MOD !> Correction factor to the chi2, scaling !> of statistical errors @@ -15,8 +14,9 @@ integer STATYPE,SYSTYPE !> For MC method, type of errors. logical lDEBUG !> Debug flag - logical DOBANDS !> Produce PDF error bands - logical DoBandsSym !> Produce symmetric bands +! DOBANDS moved to Minimizer class since 2.2.0 +! logical DOBANDS !> Produce PDF error bands +! logical DoBandsSym !> Produce symmetric bands logical lRAND !> MC method activation flag logical lrandData !> MC method using data (true) or prediction (false) logical H1QCDFUNC !> flag to store functions for grids in h1pdfs @@ -46,16 +46,16 @@ C------------------------------------------------- C Flag for PDF length, chebyshev param. for gluon integer ILENPDF real WMNLEN,WMXLEN ! W range used for pdf length calculation - -C Number of checbyshev parameters for gluon and sea (0 - standard param.) - integer nchebGLU,nchebSEA,IOFFSETCHEBSEA -C Type of cheb. parameterization: -C 0 - default -C 1 - (1-x) suppression. -C - integer ichebtypeGlu, ichebtypeSea - - real chebxmin !> low X-range to map with Cheb. polynoms +!Chebyshev parameterisation broken since 2.2.0 +!C Number of checbyshev parameters for gluon and sea (0 - standard param.) +! integer nchebGLU,nchebSEA,IOFFSETCHEBSEA +!C Type of cheb. parameterization: +!C 0 - default +!C 1 - (1-x) suppression. +!C +! integer ichebtypeGlu, ichebtypeSea +! +! real chebxmin !> low X-range to map with Cheb. polynoms C X-dependent fstrange: integer ifsttype !> 0 - default, 1 - hermes-like @@ -92,7 +92,8 @@ C integer ICHECK_QCDNUM !> Flag to perform out-of-grid check logical lead !> Flag to trigger lead PDF logical deuteron !> Flag to trigger deuteron PDF - logical CachePDFs !> Cache PDF calls to speedup +! Caching PDF calls is currently broken +! logical CachePDFs !> Cache PDF calls to speedup logical FLAGRW logical DORWONLY logical FlexibleGluon !> Flexible gluon parameterisation @@ -116,14 +117,13 @@ C common/STEERING/ & Q2VAL,starting_scale,strange_frac, Chi2MaxError, $ HF_MASS,charm_frac, - & lDEBUG,DOBANDS,UseGridLHAPDF5,WriteLHAPDF6,h1qcdfunc,WriteAlphaSToMemberPDF, - & Itheory,I_FIT_ORDER,IPARAM,HFSCHEME, + & lDEBUG,UseGridLHAPDF5,WriteLHAPDF6,h1qcdfunc,WriteAlphaSToMemberPDF, + & Itheory,I_FIT_ORDER,HFSCHEME, $ lRAND,STATYPE,SYSTYPE, - $ outxrange, outnx,ILENPDF,nchebGLU,chebxmin, - $ nchebSEA,wmnlen,wmxlen - $ ,ichebtypeGlu, ichebtypeSea + $ outxrange,outnx,ILENPDF, + $ wmnlen,wmxlen $ ,ifsttype,iSeeDmc - $ ,IOFFSETCHEBSEA,EWFIT + $ ,EWFIT $ ,npolyval, lead $ ,IZPOPOLY,IPOLYSQR, useapplg, napplgrids,lfitdy $ ,LFastAPPLGRID,Lranddata,iDH_MOD @@ -133,12 +133,11 @@ C $ ,StatScale, UncorSysScale, CorSysScale,UncorChi2Type, $ CorChi2Type, hf_scheme, AsymErrorsIterations, $ LUseAPPLgridCKM, pdfrotate, ExtraPdfs, WriteLHAPDF5, - $ DoBandsSym - $ , steering_check ! Keep this at the end of the common block + $ steering_check ! Keep this at the end of the common block common/chi2Options/Chi2PoissonCorr, Chi2FirstIterationRescale $ , Chi2ExtraSystRescale - common/STEERING2/FLAGRW, DORWONLY, CachePDFs, FlexibleGluon, + common/STEERING2/FLAGRW,DORWONLY,FlexibleGluon, $ read_qcdnum_tables, FreeStrange c parameters for QCDNUM x grid (unified with lower x values in applgrid) @@ -148,8 +147,8 @@ c parameters for QCDNUM x grid (unified with lower x values in applgrid) common/GRIDS/xmin_grid - character*64 PDF_DECOMPOSITION !> Style of PDF decompisition, e.g U_D_Ubar_Dbar - common/STEERING3/PDF_DECOMPOSITION +c character*64 PDF_DECOMPOSITION !> Style of PDF decompisition, e.g U_D_Ubar_Dbar +c common/STEERING3/PDF_DECOMPOSITION common/heavyflav/ @@ -168,11 +167,11 @@ C Value for selection of a dipole model fit common/DipCsModelType/DipCsModel - character*32 PDFStyle !> PDF style definition - common/CPdfStyle/PDFStyle +c character*32 PDFStyle !> PDF style definition +c common/CPdfStyle/PDFStyle - character*32 PDFType !> PDF type definition - common/CPdfStyle/PDFType +c character*32 PDFType !> PDF type definition +c common/CPdfStyle/PDFType character*128 LHAPDFSET !> LHAPDF set name character*128 LHAPDFVARSET !> LHAPDF VAR set name diff --git a/include/xfitter_cpp.h b/include/xfitter_cpp.h index 31c52e8d5f17b400c94e4df64c2d01c62ae7fe1c..4dfe290a8b5464861a7a2cd8113240d8b949838f 100644 --- a/include/xfitter_cpp.h +++ b/include/xfitter_cpp.h @@ -166,14 +166,12 @@ extern"C" { float hf_mass[3]; float charm_frac; int ldebug; - int dobands; int usegridlhapdf5; int writelhapdf6; int h1qcdfunc; int writealphastomemberpdf; int itheory; int i_fit_order; - int iparam; int hfscheme; int lrand; int statype; @@ -181,16 +179,10 @@ extern"C" { float outxrange[2]; int outnx; int ilenpdf; - int nchebglu; - float chebxmin; - int nchebsea; float wmnlen; float wmxlen; - int ichebtypeglu; - int ichebtypesea; int ifsttype; int iseedmc; - int ioffsetchebsea; int ewfit; int npolyval; int lead; @@ -220,7 +212,6 @@ extern"C" { int pdfrotate; int ExtraPdfs; int WriteLHAPDF5; - int DoBandsSym; int steering_check; // Keep this always last } steering_; diff --git a/include/xfitter_cpp_base.h b/include/xfitter_cpp_base.h index 582e0ddd5dcdfaa0bffedaa17af42b5e7525cb26..9cc0353c26efdd7c2a2300625a2a70a0f2398483 100644 --- a/include/xfitter_cpp_base.h +++ b/include/xfitter_cpp_base.h @@ -13,4 +13,4 @@ extern"C" { // Some basic other functions int OrderMap(std::string ord); void hf_errlog(int id, const std::string& message); - +std::string stringFromFortran(char*fortran_string,size_t size); diff --git a/include/xfitter_pars.h b/include/xfitter_pars.h index baba52b5744691fb5839f90e5276184d6c451bbe..285d5234048326f5e0ee61b7c27bd85e9f131cf6 100644 --- a/include/xfitter_pars.h +++ b/include/xfitter_pars.h @@ -58,10 +58,13 @@ namespace XFITTER_PARS { extern map<string,xfitter::EvolvedPDFfunction> gXfxQArrays; /// Global map to store evolutions + /// Do not access it directly, use xfitter::get_evolution extern map<string,xfitter::BaseEvolution*> gEvolutions; /// Global map to store decompositions + /// Do not access it directly, use xfitter::get_pdfDecomposition extern map<string,xfitter::BasePdfDecomposition*> gPdfDecompositions; /// Global map to store parameterisations + /// Do not access it directly, use xfitter::getParameterisation extern map<string,xfitter::BasePdfParam*>gParameterisations; /// Helper function to get input function from a yaml node /// diff --git a/include/xfitter_steer.h b/include/xfitter_steer.h index 387ea8eb3476b5284ff03f9a38c188d01bd8167c..34f63ea602859abfbc0d864da98d81f45ec4c9ac 100644 --- a/include/xfitter_steer.h +++ b/include/xfitter_steer.h @@ -17,11 +17,20 @@ namespace xfitter class BasePdfParam; class BaseMinimizer; - /// Load named evolution code. + ///Get evolution by its name, creating it if needed + //If name=="", returns default evolution BaseEvolution* get_evolution(std::string name=""); - /// Load named pdfDecomposition code. + ///Get decomposition by its name, creating it if needed BasePdfDecomposition* get_pdfDecomposition(std::string name=""); BasePdfParam*getParameterisation(const std::string&name=""); - /// Load the minimizer - BaseMinimizer* get_minimizer(); + ///Get minimizer + BaseMinimizer*get_minimizer(); + + //Call atConfiguration change for all evolutions and decompositions + //Call this after changing some of configuration parameters + //Used by Profiler + void updateAtConfigurationChange(); + + //When fortran code accesses pdfs, it accesses this default evolution + extern BaseEvolution*defaultEvolution; } diff --git a/interfaces/src/THIS_CODE_IS_DISABLED b/interfaces/src/THIS_CODE_IS_DISABLED new file mode 100644 index 0000000000000000000000000000000000000000..d12c5cb9f8412368548032db73a57bdeab4c5c53 --- /dev/null +++ b/interfaces/src/THIS_CODE_IS_DISABLED @@ -0,0 +1,8 @@ +This old code will not be built, and is left for reference + +There is some sort of PDF caching and some fortran interfaces +As of xfitter version 2.2.0, they no longer work +Some functions have been reimplemented for 2.2.0, see src/fortran_interface.cc + +I leave the files here for now, but remove this directory from Makefiles. +--Ivan Novikov diff --git a/minimizers/BaseMinimizer/include/BaseMinimizer.h b/minimizers/BaseMinimizer/include/BaseMinimizer.h index 30ee110f4c70e20825cedb6d86e9543e46fdd447..8616386920fa72fdc2851e08f3d84db0544f9019 100644 --- a/minimizers/BaseMinimizer/include/BaseMinimizer.h +++ b/minimizers/BaseMinimizer/include/BaseMinimizer.h @@ -28,22 +28,17 @@ namespace xfitter class BaseMinimizer { public: /// default constructor - BaseMinimizer(const std::string& name) : _name(name), - _allParameterNames() - {} + BaseMinimizer(const std::string& name):_name(name),_allParameterNames(){} /// Initialization virtual void atStart() = 0; - /// Provide some information - virtual void printInfo(){}; - - /// Add parameter blocks with Npar parameters. Optional steps bounds and priors can be used for fixed/bounded parameters. + /// Add parameter block with Npar parameters virtual void addParameterBlock(int Npar, double const* pars - , std::string const* names - , double const* steps = nullptr - , double const* const* bounds = nullptr - , double const* const* priors = nullptr ); + , std::string const* names + , double const* steps = nullptr + , double const* const* bounds = nullptr + , double const* const* priors = nullptr ); /// Add single parameter. Optional flags and bounds can be used for fixed/bounded parameters. virtual void addParameter(double par, std::string const &name, double step = 0.01, double const* bounds = nullptr, double const* priors = nullptr ); diff --git a/parameters.yaml b/parameters.yaml index 73d7821614e5f39ced708301f89409104d5bf697..12cde859c7029b4f73f3b8a6573b840e845623a4 100644 --- a/parameters.yaml +++ b/parameters.yaml @@ -57,6 +57,10 @@ Parameterisations: par_g: class: NegativeGluon parameters: [Ag,Bg,Cg,ZERO,ZERO,Agp,Bgp,Cgp] + #Example for Expression parameterisation + #par_g: + # class: Expression + # expression: "Ag*(x^Bg*(1-x)^Cg-Agp*x^Bgp*(1-x)^Cgp)" DefaultDecomposition: proton Decompositions: @@ -81,6 +85,12 @@ Evolutions: # proton-APFEL: # ? !include yaml/evolutions/APFELxx/parameters.yaml # decomposition: proton +# antiproton: +# class: FlipCharge +# input: proton-QCDNUM +# neutron: +# class: FlipUD +# input: proton-QCDNUM Q0 : 1.378404875209 # Initial scale =sqrt(1.9) diff --git a/pdfparams/BasePdfParam/include/BasePdfParam.h b/pdfparams/BasePdfParam/include/BasePdfParam.h index 2d4a8217073f60f921fa064d891e2aebd749d012..bf18cb88a397ae699c12b279605246cf181cdd92 100644 --- a/pdfparams/BasePdfParam/include/BasePdfParam.h +++ b/pdfparams/BasePdfParam/include/BasePdfParam.h @@ -48,7 +48,7 @@ protected: const std::string _name; //!Array of pointers to some global locations where minimization parameters are stored double**pars{nullptr}; - //!Number of parameters, which is also the size of the array **parameters defined above + //!Number of parameters, which is also the size of the array **pars defined above unsigned int Npars{0}; }; } diff --git a/pdfparams/BasePdfParam/src/BasePdfParam.cc b/pdfparams/BasePdfParam/src/BasePdfParam.cc index 468cf3883d8eceb5d27306d363f5fb8c50a4e34c..c5e7561a8ea01785a8bbbac7f425ef9adfc520ab 100644 --- a/pdfparams/BasePdfParam/src/BasePdfParam.cc +++ b/pdfparams/BasePdfParam/src/BasePdfParam.cc @@ -7,21 +7,10 @@ #include<memory> #include<iostream> -/// TEMPORARY XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -extern "C" { - /// Interface to minuit parameters - void addexternalparam_(const char name[], const double &val, - const double &step, - const double &min, const double &max, - const double &prior, const double &priorUnc, - const int &add, - map<std::string,double*> *map, - int len); -} namespace xfitter{ -/// Implement numeric integration BasePdfParam::~BasePdfParam(){if(pars)delete[]pars;} double BasePdfParam::moment(int iMoment)const{ + /// Numeric integration /// Simple rule, split log/lin spacing at xsplit=0.1 const double xsplit = 0.1; @@ -68,7 +57,6 @@ void BasePdfParam::atStart(){ } Npars=parsNode.size(); pars=new double*[Npars]; - //TODO: destructor for(unsigned int i=0;i<Npars;++i){ try{ pars[i]=XFITTER_PARS::gParameters.at(parsNode[i].as<string>()); @@ -83,30 +71,5 @@ void BasePdfParam::atStart(){ } } } - /* - using uint=unsigned int; - //cout<<"DEBUG["<<_name<<"]: initFromYaml: value="<<value<<endl; - if(value.IsSequence()){ - Npars=value.size(); - //cout<<Npars<<endl; - pars=new double*[Npars]; - // HARDWIRE old-way for now: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX - for(uint i=0;i<Npars;++i){ - // get a good name - const std::string pnam=_name+"_p"+std::to_string(i); - double val =value[i].as<double>(); - double step =fabs(val)/100.; /// if 0, parameter is fixed !!! - //double minv =0; - //double maxv =0; - //double priorVal=0; - //double priorUnc=0; - //int add = true; - // addexternalparam_(pnam.c_str(),val,step,minv,maxv,priorVal,priorUnc,add,&XFITTER_PARS::gParameters,pnam.size()); - - } - }else{ - cout<<"ERROR["<<_name<<"]: initFromYaml: parameter is not a sequence!"<<endl; - } - */ } } diff --git a/pdfparams/Expression/include/ExpressionPdfParam.h b/pdfparams/Expression/include/ExpressionPdfParam.h new file mode 100644 index 0000000000000000000000000000000000000000..d3e219099b6b0663a818d37d7d7aaea0099fd8cc --- /dev/null +++ b/pdfparams/Expression/include/ExpressionPdfParam.h @@ -0,0 +1,42 @@ +#pragma once +#include"BasePdfParam.h" +#include"tinyexpr.h" + +/** + @class ExpressionPdfParam + + @brief A class for Expression pdf parameterisation + + Write parameterisation as an expression directly in input YAML file. + Based on TinyExpr parser. + Expression may contain math functions supported by TinyExpr (sin, log, exp, ...) + , minimization parameters and 'x' + Note that '^' is exponentiation. + + For sumrules, numerical integration as implemented in BasePdfParam is used + + If given expression has form "PAR*rest_of_expression", + parameter PAR will be scaled to enforce sumrules (to set moment) + Otherwise moment of expression cannot be set, and trying to do so will raise an error + + Example: + Parameterisations: + example: + class: Expression + expression: "Av*x^Bv*(1-x)^Cv" +*/ + +namespace xfitter{ +class ExpressionPdfParam:public BasePdfParam{ + public: + ExpressionPdfParam(const std::string&name):BasePdfParam(name){} + virtual double operator()(double x)const override final; + virtual void setMoment(int nMoment,double value)override final; + virtual void atStart()override final; + private: + te_expr*expr=nullptr; + double*normPar=nullptr;//normalization parameter, will be modified to set moment + //normPar==nullptr <=> cannot set moment + mutable double x; +}; +} diff --git a/pdfparams/Expression/src/ExpressionPdfParam.cc b/pdfparams/Expression/src/ExpressionPdfParam.cc new file mode 100644 index 0000000000000000000000000000000000000000..1b63b9dcbdd3ae583b0d0843773b9583f76725ab --- /dev/null +++ b/pdfparams/Expression/src/ExpressionPdfParam.cc @@ -0,0 +1,102 @@ +#include"ExpressionPdfParam.h" +#include"xfitter_pars.h" +#include"xfitter_cpp_base.h" +#include"expression_utils.h" +#include<iostream> +using namespace std; +namespace xfitter{ +//returns true if expression is not a sum +bool isProduct(const string&s){ + size_t d=0;//current bracket depth + for(const char*p=s.c_str();*p;++p){ + if(*p=='(')d++; + else if(*p==')')d--; + else if(d==0&&(*p=='+'||*p=='-'))return false; + } + return true; +} +//if expression has form parname*other_stuff, return parname +//otherwise return "" +string getNormalizationParameter(const string&s){ + const char*p=s.c_str(); + while(*p==' '){//skip leading whitespace + if(!*p)return""; + ++p; + } + const char*b=p;//start of found substring + while(isalnum(*p)||*p=='_')++p;//find end of parameter name + size_t n=p-b;//substring size + if(n==1&&*b=='x')return ""; + //make sure normalization factor is followed by '*' or '/' + while(*p==' ')++p;//skip whitespace + if(!(*p=='*'||*p=='/'))return ""; + return string(b,n); +} +//for dynamic loading +extern"C" ExpressionPdfParam*create(const char*s){return new ExpressionPdfParam(s);} +double ExpressionPdfParam::operator()(double _x)const{ + x=_x; + return te_eval(expr); +} +void ExpressionPdfParam::atStart(){ + YAML::Node node=XFITTER_PARS::getParameterisationNode(_name)["expression"]; + string expression; + try{ + expression=node.as<string>(); + //}catch(const YAML::InvalidNode&ex){ + }catch(const YAML::BadConversion&ex){ + if(!node){ + cerr<<"[ERROR] No expression given for parameterisation \""<<_name<<"\""<<endl; + hf_errlog(19022600,"F: No expression given for parameterisation, see stderr"); + }else{ + cerr<<"[ERROR] Failed to convert expression given for decomposition \""<<_name<<"\" to string"<<endl; + hf_errlog(19022601,"F: Invalid expression parameter for parameterisation, see stderr"); + } + } + vector<string>pars; + extractParameterNames(expression,pars); + size_t Npars=pars.size(); + te_variable*vars=new te_variable[Npars+1];//+1 because of x + for(size_t i=0;i<Npars;++i){ + const string&parname=pars[i]; + vars[i].name=parname.c_str(); + try{ + vars[i].address=XFITTER_PARS::gParameters.at(parname); + }catch(const std::out_of_range&e){ + cerr<<"[ERROR] Unknown parameter \""<<parname<<"\" in expression \""<<expression<<"\" for parameterisation \""<<_name<<"\""<<endl; + hf_errlog(19022610,"F: Unknown parameter in parameterisation expression, see stderr"); + } + vars[i].type=TE_VARIABLE; + vars[i].context=nullptr; + } + vars[Npars].name="x"; + vars[Npars].address=&x; + vars[Npars].type=TE_VARIABLE; + vars[Npars].context=nullptr; + int err; + expr=te_compile(expression.c_str(),vars,Npars+1,&err); + if(!expr){ + cerr<<"[ERROR] TinyExpr error while parsing expression \""<<expression<<"\" for parameterisation \""<<_name<<"\"; error at position "<<err<<endl; + hf_errlog(19022611,"F: Failed to parse expression for parameterisation, see stderr"); + } + delete[]vars; + if(isProduct(expression)){ + //Try to guess which parameter should be used to set moment + string normParName=getNormalizationParameter(expression); + if(normParName=="")goto cant_moment; + normPar=XFITTER_PARS::gParameters.at(normParName); + return; + }//else + cant_moment: + normPar=nullptr; + hf_errlog(19022612,"W: Expression parameterization: cannot set moment"); +} +void ExpressionPdfParam::setMoment(int n,double val){ + if(!normPar){ + cerr<<"[ERROR] Do not know which parameter to scale to set moment of parameterisation \""<<_name<<"\""<<endl; + hf_errlog(19022613,"F: Expression parameterisation cannot set moment, see stderr"); + } + *normPar=1; + *normPar=val/moment(n); +} +} diff --git a/pdfparams/Expression/src/Makefile.am b/pdfparams/Expression/src/Makefile.am new file mode 100644 index 0000000000000000000000000000000000000000..e5161e580cc49e9027d223a17ac5598d46aa1c29 --- /dev/null +++ b/pdfparams/Expression/src/Makefile.am @@ -0,0 +1,7 @@ +#Automatically generated by ./tools/AddPdfParam.py on 2019-02-26 +AM_CXXFLAGS=-I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../BasePdfParam/include -Wall -fPIC -Wno-deprecated + +lib_LTLIBRARIES=libExpressionPdfParam_xfitter.la +libExpressionPdfParam_xfitter_la_SOURCES=ExpressionPdfParam.cc +dist_noinst_HEADERS=../include +libExpressionPdfParam_xfitter_la_LDFLAGS=-lBasePdfParam_xfitter -L$(libdir) diff --git a/pdfparams/HERAPDF_PdfParam/include/HERAPDF_PdfParam.h b/pdfparams/HERAPDF_PdfParam/include/HERAPDF_PdfParam.h index b406f9bddb5bb4acd26808b436d30ca6317843de..dadba0450c2677b07bf4498105b00d94b5831527 100644 --- a/pdfparams/HERAPDF_PdfParam/include/HERAPDF_PdfParam.h +++ b/pdfparams/HERAPDF_PdfParam/include/HERAPDF_PdfParam.h @@ -27,5 +27,6 @@ class HERAPDF_PdfParam:public BasePdfParam{ HERAPDF_PdfParam(const std::string&inName):BasePdfParam(inName){} virtual double operator()(double x)const override final; virtual double moment(int nMoment=-1)const override final; + virtual void atStart()override final; }; } diff --git a/pdfparams/HERAPDF_PdfParam/src/HERAPDF_PdfParam.cc b/pdfparams/HERAPDF_PdfParam/src/HERAPDF_PdfParam.cc index 44ea567e620e1b7a6f539c84ca5799dbfe65bb33..491b89ce1776bde98bba128b2fac07085b3888c3 100644 --- a/pdfparams/HERAPDF_PdfParam/src/HERAPDF_PdfParam.cc +++ b/pdfparams/HERAPDF_PdfParam/src/HERAPDF_PdfParam.cc @@ -6,23 +6,31 @@ */ #include "HERAPDF_PdfParam.h" +#include "xfitter_cpp_base.h" #include <cmath> +#include <iostream> namespace xfitter{ //for dynamic loading extern"C" HERAPDF_PdfParam*create(const char*name){ return new HERAPDF_PdfParam(name); } +void HERAPDF_PdfParam::atStart(){ + using namespace std; + BasePdfParam::atStart(); + const size_t n=getNPar(); + if(n<3){ + cerr<<"[ERROR] Too few parameters given to parameterisation \""<<_name<<"\", expected at least 3, got "<<n<<endl; + hf_errlog(18120700,"F: Wrong number of parameters for a parameterisation, see stderr"); + } +} // Main function to compute PDF double HERAPDF_PdfParam::operator()(double x)const{ - const int npar = getNPar(); - if (npar<3) { - return NAN; - } + const unsigned int npar=getNPar(); double power=(*pars[0])*pow(x,(*pars[1]))*pow((1-x),(*pars[2])); double poly = 1; double xx = 1; - for (int i = 3; i<npar; i++) { + for (unsigned int i = 3; i<npar; i++) { xx *= x; poly+=(*pars[i])*xx; } @@ -41,9 +49,9 @@ double HERAPDF_PdfParam::moment(int n)const{ //=> moment=A*beta(B,C)*(1+sum_{i=3}^N{P[i]*product_{k=0}^{i-3}{(B+k)/(B+C+k)}})= //=> moment=A*beta(B,C)*(1+P[3]*B/(B+C)+P[4]*B/(B+C)*(B+1)/(B+C+1)+...) //beta(B,C)=exp(lgamma(B)+lgamma(C)-lgamma(B+C)) - using uint=unsigned int; const double B=(*pars[1])+(n+1),C=(*pars[2])+1; - const uint N=getNPar(); + if(B<=0.||C<=0.)return NAN;// integral does not converge + const size_t N=getNPar(); double sum=1; double prod=1; double a=B; diff --git a/pdfparams/NegativeGluonPdfParam/include/NegativeGluonPdfParam.h b/pdfparams/NegativeGluonPdfParam/include/NegativeGluonPdfParam.h index e03125ee0ed72dfc0fc17bc1e996856d91496ed2..e9ac79c2ea31bbec4af6a03c83799c37f243105c 100644 --- a/pdfparams/NegativeGluonPdfParam/include/NegativeGluonPdfParam.h +++ b/pdfparams/NegativeGluonPdfParam/include/NegativeGluonPdfParam.h @@ -19,5 +19,6 @@ namespace xfitter { //Evaluate xf(x) at given x with current parameters virtual double operator()(double x)const override final; virtual double moment(int nMoment=-1)const override final; + virtual void atStart()override final; }; }; diff --git a/pdfparams/NegativeGluonPdfParam/src/NegativeGluonPdfParam.cc b/pdfparams/NegativeGluonPdfParam/src/NegativeGluonPdfParam.cc index 687ae3367febf6fad9a1cc6fbdacbc9522418999..6d7b278809e1f81299e2961dfb2f4a695f3e776a 100644 --- a/pdfparams/NegativeGluonPdfParam/src/NegativeGluonPdfParam.cc +++ b/pdfparams/NegativeGluonPdfParam/src/NegativeGluonPdfParam.cc @@ -7,7 +7,9 @@ */ #include "NegativeGluonPdfParam.h" +#include "xfitter_cpp_base.h" #include <cmath> +#include <iostream> namespace xfitter{ //for dynamic loading @@ -15,12 +17,16 @@ namespace xfitter{ return new NegativeGluonPdfParam(name); } // Main function to compute PDF - double NegativeGluonPdfParam::operator()(double x)const{ - //Your code here - const int npar = getNPar(); - if (npar !=8) { - return NAN; + void NegativeGluonPdfParam::atStart(){ + using namespace std; + BasePdfParam::atStart(); + const size_t n=getNPar(); + if(n!=8){ + cerr<<"[ERROR] Wrong number of parameters given to parameterisation \""<<_name<<"\", expected 8, got "<<n<<endl; + hf_errlog(18120700,"F: Wrong number of parameters for a parameterisation, see stderr"); } + } + double NegativeGluonPdfParam::operator()(double x)const{ double Pos = pow(x,(*pars[1]))*pow((1-x),(*pars[2])) * ( 1 + x * (*pars[3]) + x*x * (*pars[4])*(*pars[4])); double Neg = (*pars[5])*pow(x,(*pars[6]))*pow((1-x),(*pars[7])) ; return (*pars[0])*(Pos-Neg); @@ -30,6 +36,7 @@ namespace xfitter{ // Positive part: const double B=(*pars[1])+(n+1) , C=(*pars[2])+1; + if(B<=0.||C<=0.)return NAN;// integral does not converge double sum=1; double prod=1; double a=B; @@ -42,8 +49,8 @@ namespace xfitter{ } // Negative part: const double Bn=(*pars[6])+(n+1) , Cn=(*pars[7])+1; + if(Bn<=0.||Cn<=0.)return NAN;// integral does not converge return (*pars[0])*( exp(lgamma(B)+lgamma(C)-lgamma(B+C))*sum - (*pars[5])*exp(lgamma(Bn)+lgamma(Cn)-lgamma(Bn+Cn)) ) ; } - } diff --git a/pdfparams/PolySqrtPdfParam/include/PolySqrtPdfParam.h b/pdfparams/PolySqrtPdfParam/include/PolySqrtPdfParam.h index 203a2ebb84ef97ed8c0f227c4315bd7f6063d0cd..94b44fc3902c78806b8e82e1e0f4758457d841b3 100644 --- a/pdfparams/PolySqrtPdfParam/include/PolySqrtPdfParam.h +++ b/pdfparams/PolySqrtPdfParam/include/PolySqrtPdfParam.h @@ -27,5 +27,6 @@ class PolySqrtPdfParam:public BasePdfParam{ PolySqrtPdfParam(const std::string&inName):BasePdfParam(inName){} virtual double operator()(double x)const override final; virtual double moment(int nMoment=-1)const override final; + virtual void atStart()override final; }; } diff --git a/pdfparams/PolySqrtPdfParam/src/PolySqrtPdfParam.cc b/pdfparams/PolySqrtPdfParam/src/PolySqrtPdfParam.cc index e1258bd7cd1eef20594d8b06974d0d3210d02050..ad4ebe1274ec8a7ed6d7c553826f1d6c6916a357 100644 --- a/pdfparams/PolySqrtPdfParam/src/PolySqrtPdfParam.cc +++ b/pdfparams/PolySqrtPdfParam/src/PolySqrtPdfParam.cc @@ -7,7 +7,9 @@ */ #include"PolySqrtPdfParam.h" +#include"xfitter_cpp_base.h" #include<cmath> +#include<iostream> using namespace std; using uint=unsigned int; namespace xfitter{ @@ -15,6 +17,15 @@ namespace xfitter{ extern"C" PolySqrtPdfParam*create(const char*name){ return new PolySqrtPdfParam(name); } +void PolySqrtPdfParam::atStart(){ + using namespace std; + BasePdfParam::atStart(); + const size_t n=getNPar(); + if(n<3){ + cerr<<"[ERROR] Too few parameters given to parameterisation \""<<_name<<"\", expected at least 3, got "<<n<<endl; + hf_errlog(18120700,"F: Wrong number of parameters for a parameterisation, see stderr"); + } +} double PolySqrtPdfParam::operator()(double x)const{ const uint N=getNPar(); double pol=1; @@ -42,6 +53,7 @@ double PolySqrtPdfParam::moment(int n)const{ //=> moment=A*(beta(B,C)*(1+sum(k=1;2+2k<=N;k++){P[2+2k]*product_{i=0}^{k-1}{(B+i)/(B+C+i)}})+beta(B+1/2,C)*sum(k=0;3+2k<=N;k++){P[3+2k]*product_{i=0}^{k-1}{(B+1/2+i)/(B+C+1/2+i)}}) using uint=unsigned int; const double B=(*pars[1])+(n+1),C=(*pars[2])+1; + if(B<=0.||C<=0.)return NAN;// integral does not converge const uint N=getNPar(); double sum=1; double prod=1; diff --git a/reactions/AFB/include/ReactionAFB.h b/reactions/AFB/include/ReactionAFB.h index 7b9a74b0f72d8bc1f6b813ea6fd3f5955b859b92..b3cd2970e5ba8f0489bc6380e99b9a954d304580 100644 --- a/reactions/AFB/include/ReactionAFB.h +++ b/reactions/AFB/include/ReactionAFB.h @@ -6,7 +6,7 @@ /** @class' ReactionAFB - @brief A wrapper class for AFB reaction + @brief A wrapper class for AFB reaction Based on the ReactionTheory class. Reads options produces 3d cross section. @@ -26,7 +26,7 @@ class ReactionAFB : public ReactionTheory public: virtual string getReactionName() const { return "AFB" ;}; - int initAtStart(const string &); + int atStart(const string &); virtual int compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err); protected: virtual int parseOptions(){ return 0;}; @@ -35,7 +35,7 @@ class ReactionAFB : public ReactionTheory // Define a structure to pass the parameters struct integration_params { double Minv; - ReactionTheory* ptr; + ReactionTheory* ptr; }; static size_t calls; @@ -50,77 +50,77 @@ class ReactionAFB : public ReactionTheory static double Z_Vu, Z_Au, Z_Vd, Z_Ad, Z_Vl, Z_Al, Z_Vnu, Z_Anu; static double even_foton_up, even_foton_down, even_interf_up, even_interf_down, even_Z_up, even_Z_down; static double odd_foton_up, odd_foton_down, odd_interf_up, odd_interf_down, odd_Z_up, odd_Z_down; - + static double *propagators (double); - - + + static double uubarEF_funct (double, void *); static double integration_uubarEF_y (double, void *); double integration_uubarEF (double, double, void *); - + static double uubarEB_funct (double, void *); static double integration_uubarEB_y (double, void *); double integration_uubarEB (double, double, void *); - + static double uubarOF_funct (double, void *); static double integration_uubarOF_y (double, void *); double integration_uubarOF (double, double, void *); - + static double uubarOB_funct (double, void *); static double integration_uubarOB_y (double, void *); double integration_uubarOB (double, double, void *); - - + + static double ubaruEF_funct (double, void *); static double integration_ubaruEF_y (double, void *); double integration_ubaruEF (double, double, void *); - + static double ubaruEB_funct (double, void *); static double integration_ubaruEB_y (double, void *); double integration_ubaruEB (double, double, void *); - + static double ubaruOF_funct (double, void *); static double integration_ubaruOF_y (double, void *); double integration_ubaruOF (double, double, void *); - + static double ubaruOB_funct (double, void *); static double integration_ubaruOB_y (double, void *); double integration_ubaruOB (double, double, void *); - - + + static double ddbarEF_funct (double, void *); static double integration_ddbarEF_y (double, void *); double integration_ddbarEF (double, double, void *); - + static double ddbarEB_funct (double, void *); static double integration_ddbarEB_y (double, void *); double integration_ddbarEB (double, double, void *); - + static double ddbarOF_funct (double, void *); static double integration_ddbarOF_y (double, void *); double integration_ddbarOF (double, double, void *); - + static double ddbarOB_funct (double, void *); static double integration_ddbarOB_y (double, void *); double integration_ddbarOB (double, double, void *); - - + + static double dbardEF_funct (double, void *); static double integration_dbardEF_y (double, void *); double integration_dbardEF (double, double, void *); - + static double dbardEB_funct (double, void *); static double integration_dbardEB_y (double, void *); double integration_dbardEB (double, double, void *); - + static double dbardOF_funct (double, void *); static double integration_dbardOF_y (double, void *); double integration_dbardOF (double, double, void *); - + static double dbardOB_funct (double, void *); static double integration_dbardOB_y (double, void *); double integration_dbardOB (double, double, void *); - + double AFB (double, double); }; diff --git a/reactions/AFB/src/ReactionAFB.cc b/reactions/AFB/src/ReactionAFB.cc index a8808fe489dad28acbdd46d44b97de174e4f888e..f178960e0c3f9aeb9b3f673875156fa9cf990dc2 100644 --- a/reactions/AFB/src/ReactionAFB.cc +++ b/reactions/AFB/src/ReactionAFB.cc @@ -1,4 +1,4 @@ - + /* @file ReactionAFB.cc @date 2018-07-16 @@ -36,24 +36,24 @@ double *ReactionAFB::propagators (double Minv) double foton_squared = 1.0/pow(Minv,4); double interference = 2.0*(-pow(Minv,2)*(pow(MZ_param,2)-pow(Minv,2)))/(pow(Minv,4)*((pow(pow(MZ_param,2)-pow(Minv,2),2))+pow(MZ_param,2)*pow(GammaZ_param,2))); double Z_squared = 1.0/(pow(pow(MZ_param,2)-pow(Minv,2),2)+pow(MZ_param,2)*pow(GammaZ_param,2)); - - static double propagators[4]; + + static double propagators[4]; propagators[0] = (even_foton_up * foton_squared) + (even_interf_up * interference) + (even_Z_up * Z_squared); propagators[1] = (odd_foton_up * foton_squared) + (odd_interf_up * interference) + (odd_Z_up * Z_squared); propagators[2] = (even_foton_down * foton_squared) + (even_interf_down * interference) + (even_Z_down * Z_squared); propagators[3] = (odd_foton_down * foton_squared) + (odd_interf_down * interference) + (odd_Z_down * Z_squared); - + return propagators; } ////UUBAR EVEN FORWARD Matrix element double ReactionAFB::uubarEF_funct (double yreduced, void * params) { - + // Pointer to access PDFs - ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; + ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; // Pass the invariant mass as parameter double Minv = ((integration_params*)params)-> Minv; - + // Partonic cross section parameters double Q = Minv; double z = pow(Minv,2)/pow(energy_param,2); @@ -62,7 +62,7 @@ double ReactionAFB::uubarEF_funct (double yreduced, void * params) { double x2 = sqrt(z)*exp(-y); double dsigma_temp = pow(Minv,2)/(96*PI); double dsigma = GeVtofb_param*dsigma_temp*(2*Minv/pow(energy_param,2))*(-(1.0/2.0)*log(z)); - + // Partons PDFs std::valarray<double> pdfx1(13); std::valarray<double> pdfx2(13); @@ -73,41 +73,41 @@ double ReactionAFB::uubarEF_funct (double yreduced, void * params) { double f2ubar = pdfx2[4] / x2; double f2cbar = pdfx2[2] / x2; - // PDF combinations + // PDF combinations double uubar_PDF = f1u*f2ubar + f1c*f2cbar; - + // Angular integration limits double qqbar_cos_theta_max = min(cos(2*atan(exp(-eta_cut_param-y))),sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); double qqbar_cos_theta_min = 0; - + double angular_integration_EF = (qqbar_cos_theta_max-qqbar_cos_theta_min)+(1.0/3.0)*(pow(qqbar_cos_theta_max,3)-pow(qqbar_cos_theta_min,3)); - + // Combination with angular integration (Forward - Backward for q-qbar) double dsigma_EF = dsigma*angular_integration_EF; // Covolution with PDFs (flipping direction for q-qbar & qbar-q) // U-UBAR double uubarEF = uubar_PDF*dsigma_EF; - + double *propagator = propagators (Minv); - + return uubarEF * propagator[0]; // Multiply the PDFs combination with the correct propagator. } ////UUBAR EVEN FORWARD Integration in rapidity double ReactionAFB::integration_uubarEF_y (double Minv, void * ptr) { - + // Pass the necessary parameters (pointer to the PDFs and Minv) integration_params integrationParams; integrationParams.Minv = Minv; integrationParams.ptr = (ReactionTheory*) ptr; - + double result, error; gsl_function F; F.function = &(ReactionAFB::uubarEF_funct); F.params = &integrationParams; - + double inf = y_min_param / log(energy_param/Minv); double sup; if (y_max_param == 0.0) { @@ -115,15 +115,15 @@ double ReactionAFB::integration_uubarEF_y (double Minv, void * ptr) { } else { sup = y_max_param / log(energy_param/Minv); } - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return 2*result; } ////UUBAR EVEN FORWARD Integration in invariant mass double ReactionAFB::integration_uubarEF (double Minv_inf, double Minv_sup, void* ptr) { - + double result, error; gsl_function F; @@ -132,20 +132,20 @@ double ReactionAFB::integration_uubarEF (double Minv_inf, double Minv_sup, void* double inf = Minv_inf; double sup = Minv_sup; - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return result; } ////UUBAR EVEN BACKWARD Matrix element double ReactionAFB::uubarEB_funct (double yreduced, void * params) { - + // Pointer to access PDFs - ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; + ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; // Pass the invariant mass as parameter double Minv = ((integration_params*)params)-> Minv; - + // Partonic cross section parameters double Q = Minv; double z = pow(Minv,2)/pow(energy_param,2); @@ -154,7 +154,7 @@ double ReactionAFB::uubarEB_funct (double yreduced, void * params) { double x2 = sqrt(z)*exp(-y); double dsigma_temp = pow(Minv,2)/(96*PI); double dsigma = GeVtofb_param*dsigma_temp*(2*Minv/pow(energy_param,2))*(-(1.0/2.0)*log(z)); - + // Partons PDFs std::valarray<double> pdfx1(13); std::valarray<double> pdfx2(13); @@ -165,40 +165,40 @@ double ReactionAFB::uubarEB_funct (double yreduced, void * params) { double f2ubar = pdfx2[4] / x2; double f2cbar = pdfx2[2] / x2; - // PDF combinations + // PDF combinations double uubar_PDF = f1u*f2ubar + f1c*f2cbar; - + // Angular integration limits double qbarq_cos_theta_max = 0; - double qbarq_cos_theta_min = max(cos(PI - 2*atan(exp(-eta_cut_param-y))),-sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); - + double qbarq_cos_theta_min = max(cos(PI - 2*atan(exp(-eta_cut_param-y))),-sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); + double angular_integration_EB = (qbarq_cos_theta_max-qbarq_cos_theta_min)+(1.0/3.0)*(pow(qbarq_cos_theta_max,3)-pow(qbarq_cos_theta_min,3)); - + // Combination with angular integration (Forward - Backward for q-qbar) double dsigma_EB = dsigma*angular_integration_EB; // Covolution with PDFs (flipping direction for q-qbar & qbar-q) // U-UBAR double uubarEB = uubar_PDF*dsigma_EB; - + double *propagator = propagators (Minv); return uubarEB * propagator[0]; } ////UUBAR EVEN BACKWARD Integration in rapidity double ReactionAFB::integration_uubarEB_y (double Minv, void * ptr) { - + // Pass the necessary parameters (pointer to the PDFs and Minv) integration_params integrationParams; integrationParams.Minv = Minv; integrationParams.ptr = (ReactionTheory*) ptr; - + double result, error; gsl_function F; F.function = &(ReactionAFB::uubarEB_funct); F.params = &integrationParams; - + double inf = y_min_param / log(energy_param/Minv); double sup; if (y_max_param == 0.0) { @@ -206,37 +206,37 @@ double ReactionAFB::integration_uubarEB_y (double Minv, void * ptr) { } else { sup = y_max_param / log(energy_param/Minv); } - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return 2*result; } ////UUBAR EVEN BACKWARD Integration in invariant mass double ReactionAFB::integration_uubarEB (double Minv_inf, double Minv_sup, void* ptr) { - + double result, error; gsl_function F; F.function = &(ReactionAFB::integration_uubarEB_y); F.params = ptr; - + double inf = Minv_inf; double sup = Minv_sup; - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return result; } ////UUBAR ODD FORWARD Matrix element double ReactionAFB::uubarOF_funct (double yreduced, void * params) { - + // Pointer to access PDFs - ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; + ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; // Pass the invariant mass as parameter double Minv = ((integration_params*)params)-> Minv; - + // Partonic cross section parameters double Q = Minv; double z = pow(Minv,2)/pow(energy_param,2); @@ -245,7 +245,7 @@ double ReactionAFB::uubarOF_funct (double yreduced, void * params) { double x2 = sqrt(z)*exp(-y); double dsigma_temp = pow(Minv,2)/(96*PI); double dsigma = GeVtofb_param*dsigma_temp*(2*Minv/pow(energy_param,2))*(-(1.0/2.0)*log(z)); - + // Partons PDFs std::valarray<double> pdfx1(13); std::valarray<double> pdfx2(13); @@ -256,41 +256,41 @@ double ReactionAFB::uubarOF_funct (double yreduced, void * params) { double f2ubar = pdfx2[4] / x2; double f2cbar = pdfx2[2] / x2; - // PDF combinations + // PDF combinations double uubar_PDF = f1u*f2ubar + f1c*f2cbar; - + // Angular integration limits double qqbar_cos_theta_max = min(cos(2*atan(exp(-eta_cut_param-y))),sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); - double qqbar_cos_theta_min = 0; - + double qqbar_cos_theta_min = 0; + double angular_integration_OF = pow(qqbar_cos_theta_max,2) - pow(qqbar_cos_theta_min,2); - + // Combination with angular integration (Forward - Backward for q-qbar) double dsigma_OF = dsigma*angular_integration_OF; // Covolution with PDFs (flipping direction for q-qbar & qbar-q) // U-UBAR double uubarOF = uubar_PDF*dsigma_OF; - + double *propagator = propagators (Minv); - + return uubarOF * propagator[1]; } ////UUBAR ODD FORWARD Integration in rapidity double ReactionAFB::integration_uubarOF_y (double Minv, void * ptr) { - + // Pass the necessary parameters (pointer to the PDFs and Minv) integration_params integrationParams; integrationParams.Minv = Minv; integrationParams.ptr = (ReactionTheory*) ptr; - + double result, error; gsl_function F; F.function = &(ReactionAFB::uubarOF_funct); F.params = &integrationParams; - + double inf = y_min_param / log(energy_param/Minv); double sup; if (y_max_param == 0.0) { @@ -298,37 +298,37 @@ double ReactionAFB::integration_uubarOF_y (double Minv, void * ptr) { } else { sup = y_max_param / log(energy_param/Minv); } - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return 2*result; } ////UUBAR ODD FORWARD Integration in invariant mass double ReactionAFB::integration_uubarOF (double Minv_inf, double Minv_sup, void* ptr) { - + double result, error; gsl_function F; F.function = &(ReactionAFB::integration_uubarOF_y); F.params = ptr; - + double inf = Minv_inf; double sup = Minv_sup; - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return result; } ////UUBAR ODD BACKWARD Matrix element double ReactionAFB::uubarOB_funct (double yreduced, void * params) { - + // Pointer to access PDFs - ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; + ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; // Pass the invariant mass as parameter double Minv = ((integration_params*)params)-> Minv; - + // Partonic cross section parameters double Q = Minv; double z = pow(Minv,2)/pow(energy_param,2); @@ -337,7 +337,7 @@ double ReactionAFB::uubarOB_funct (double yreduced, void * params) { double x2 = sqrt(z)*exp(-y); double dsigma_temp = pow(Minv,2)/(96*PI); double dsigma = GeVtofb_param*dsigma_temp*(2*Minv/pow(energy_param,2))*(-(1.0/2.0)*log(z)); - + // Partons PDFs std::valarray<double> pdfx1(13); std::valarray<double> pdfx2(13); @@ -348,41 +348,41 @@ double ReactionAFB::uubarOB_funct (double yreduced, void * params) { double f2ubar = pdfx2[4] / x2; double f2cbar = pdfx2[2] / x2; - // PDF combinations + // PDF combinations double uubar_PDF = f1u*f2ubar + f1c*f2cbar; - + // Angular integration limits double qbarq_cos_theta_max = 0; - double qbarq_cos_theta_min = max(cos(PI - 2*atan(exp(-eta_cut_param-y))),-sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); - + double qbarq_cos_theta_min = max(cos(PI - 2*atan(exp(-eta_cut_param-y))),-sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); + double angular_integration_OB = pow(qbarq_cos_theta_max,2) - pow(qbarq_cos_theta_min,2); - + // Combination with angular integration (Forward - Backward for q-qbar) double dsigma_OB = dsigma*angular_integration_OB; // Covolution with PDFs (flipping direction for q-qbar & qbar-q) // U-UBAR double uubarOB = uubar_PDF*dsigma_OB; - + double *propagator = propagators (Minv); - + return uubarOB * propagator[1]; } ////UUBAR ODD BACKWARD Integration in rapidity double ReactionAFB::integration_uubarOB_y (double Minv, void * ptr) { - + // Pass the necessary parameters (pointer to the PDFs and Minv) integration_params integrationParams; integrationParams.Minv = Minv; integrationParams.ptr = (ReactionTheory*) ptr; - + double result, error; gsl_function F; F.function = &(ReactionAFB::uubarOB_funct); F.params = &integrationParams; - + double inf = y_min_param / log(energy_param/Minv); double sup; if (y_max_param == 0.0) { @@ -390,37 +390,37 @@ double ReactionAFB::integration_uubarOB_y (double Minv, void * ptr) { } else { sup = y_max_param / log(energy_param/Minv); } - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return 2*result; } ////UUBAR ODD BACKWARD Integration in invariant mass double ReactionAFB::integration_uubarOB (double Minv_inf, double Minv_sup, void* ptr) { - + double result, error; gsl_function F; F.function = &(ReactionAFB::integration_uubarOB_y); F.params = ptr; - + double inf = Minv_inf; double sup = Minv_sup; - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return result; } ////UBARU EVEN FORWARD Matrix element double ReactionAFB::ubaruEF_funct (double yreduced, void * params) { - + // Pointer to access PDFs - ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; + ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; // Pass the invariant mass as parameter double Minv = ((integration_params*)params)-> Minv; - + // Partonic cross section parameters double Q = Minv; double z = pow(Minv,2)/pow(energy_param,2); @@ -429,7 +429,7 @@ double ReactionAFB::ubaruEF_funct (double yreduced, void * params) { double x2 = sqrt(z)*exp(-y); double dsigma_temp = pow(Minv,2)/(96*PI); double dsigma = GeVtofb_param*dsigma_temp*(2*Minv/pow(energy_param,2))*(-(1.0/2.0)*log(z)); - + // Partons PDFs std::valarray<double> pdfx1(13); std::valarray<double> pdfx2(13); @@ -440,41 +440,41 @@ double ReactionAFB::ubaruEF_funct (double yreduced, void * params) { double f2u = pdfx2[8] / x2; double f2c = pdfx2[10] / x2; - // PDF combinations + // PDF combinations double ubaru_PDF = f1ubar*f2u + f1cbar*f2c; - + // Angular integration limits double qbarq_cos_theta_max = 0; - double qbarq_cos_theta_min = max(cos(PI - 2*atan(exp(-eta_cut_param-y))),-sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); - + double qbarq_cos_theta_min = max(cos(PI - 2*atan(exp(-eta_cut_param-y))),-sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); + double angular_integration_EB = (qbarq_cos_theta_max-qbarq_cos_theta_min)+(1.0/3.0)*(pow(qbarq_cos_theta_max,3)-pow(qbarq_cos_theta_min,3)); - + // Combination with angular integration (Forward - Backward for q-qbar) double dsigma_EB = dsigma*angular_integration_EB; // Covolution with PDFs (flipping direction for q-qbar & qbar-q) // UBAR-U double ubaruEF = ubaru_PDF*dsigma_EB; - + double *propagator = propagators (Minv); - + return ubaruEF * propagator[0]; } ////UBARU EVEN FORWARD Integration in rapidity double ReactionAFB::integration_ubaruEF_y (double Minv, void * ptr) { - + // Pass the necessary parameters (pointer to the PDFs and Minv) integration_params integrationParams; integrationParams.Minv = Minv; integrationParams.ptr = (ReactionTheory*) ptr; - + double result, error; gsl_function F; F.function = &(ReactionAFB::ubaruEF_funct); F.params = &integrationParams; - + double inf = y_min_param / log(energy_param/Minv); double sup; if (y_max_param == 0.0) { @@ -482,37 +482,37 @@ double ReactionAFB::integration_ubaruEF_y (double Minv, void * ptr) { } else { sup = y_max_param / log(energy_param/Minv); } - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return 2*result; } ////UBARU EVEN FORWARD Integration in invariant mass double ReactionAFB::integration_ubaruEF (double Minv_inf, double Minv_sup, void* ptr) { - + double result, error; gsl_function F; F.function = &(ReactionAFB::integration_ubaruEF_y); F.params = ptr; - + double inf = Minv_inf; double sup = Minv_sup; - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return result; } ////UBARU EVEN BACKWARD Matrix element double ReactionAFB::ubaruEB_funct (double yreduced, void * params) { - + // Pointer to access PDFs - ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; + ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; // Pass the invariant mass as parameter double Minv = ((integration_params*)params)-> Minv; - + // Partonic cross section parameters double Q = Minv; double z = pow(Minv,2)/pow(energy_param,2); @@ -521,7 +521,7 @@ double ReactionAFB::ubaruEB_funct (double yreduced, void * params) { double x2 = sqrt(z)*exp(-y); double dsigma_temp = pow(Minv,2)/(96*PI); double dsigma = GeVtofb_param*dsigma_temp*(2*Minv/pow(energy_param,2))*(-(1.0/2.0)*log(z)); - + // Partons PDFs std::valarray<double> pdfx1(13); std::valarray<double> pdfx2(13); @@ -532,41 +532,41 @@ double ReactionAFB::ubaruEB_funct (double yreduced, void * params) { double f2u = pdfx2[8] / x2; double f2c = pdfx2[10] / x2; - // PDF combinations + // PDF combinations double ubaru_PDF = f1ubar*f2u + f1cbar*f2c; - + // Angular integration limits double qqbar_cos_theta_max = min(cos(2*atan(exp(-eta_cut_param-y))),sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); - double qqbar_cos_theta_min = 0; - + double qqbar_cos_theta_min = 0; + double angular_integration_EF = (qqbar_cos_theta_max-qqbar_cos_theta_min)+(1.0/3.0)*(pow(qqbar_cos_theta_max,3)-pow(qqbar_cos_theta_min,3)); - + // Combination with angular integration (Forward - Backward for q-qbar) double dsigma_EF = dsigma*angular_integration_EF; // Covolution with PDFs (flipping direction for q-qbar & qbar-q) // UBAR-U double ubaruEB = ubaru_PDF*dsigma_EF; - + double *propagator = propagators (Minv); - + return ubaruEB * propagator[0]; } ////UBARU EVEN BACKWARD Integration in rapidity double ReactionAFB::integration_ubaruEB_y (double Minv, void * ptr) { - + // Pass the necessary parameters (pointer to the PDFs and Minv) integration_params integrationParams; integrationParams.Minv = Minv; integrationParams.ptr = (ReactionTheory*) ptr; - + double result, error; gsl_function F; F.function = &(ReactionAFB::ubaruEB_funct); F.params = &integrationParams; - + double inf = y_min_param / log(energy_param/Minv); double sup; if (y_max_param == 0.0) { @@ -574,37 +574,37 @@ double ReactionAFB::integration_ubaruEB_y (double Minv, void * ptr) { } else { sup = y_max_param / log(energy_param/Minv); } - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return 2*result; } ////UBARU EVEN BACKWARD Integration in invariant mass double ReactionAFB::integration_ubaruEB (double Minv_inf, double Minv_sup, void* ptr) { - + double result, error; gsl_function F; F.function = &(ReactionAFB::integration_ubaruEB_y); F.params = ptr; - + double inf = Minv_inf; double sup = Minv_sup; - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return result; } ////UBARU ODD FORWARD Matrix element double ReactionAFB::ubaruOF_funct (double yreduced, void * params) { - + // Pointer to access PDFs - ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; + ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; // Pass the invariant mass as parameter double Minv = ((integration_params*)params)-> Minv; - + // Partonic cross section parameters double Q = Minv; double z = pow(Minv,2)/pow(energy_param,2); @@ -613,7 +613,7 @@ double ReactionAFB::ubaruOF_funct (double yreduced, void * params) { double x2 = sqrt(z)*exp(-y); double dsigma_temp = pow(Minv,2)/(96*PI); double dsigma = GeVtofb_param*dsigma_temp*(2*Minv/pow(energy_param,2))*(-(1.0/2.0)*log(z)); - + // Partons PDFs std::valarray<double> pdfx1(13); std::valarray<double> pdfx2(13); @@ -624,41 +624,41 @@ double ReactionAFB::ubaruOF_funct (double yreduced, void * params) { double f2u = pdfx2[8] / x2; double f2c = pdfx2[10] / x2; - // PDF combinations + // PDF combinations double ubaru_PDF = f1ubar*f2u + f1cbar*f2c; - + // Angular integration limits double qbarq_cos_theta_max = 0; - double qbarq_cos_theta_min = max(cos(PI - 2*atan(exp(-eta_cut_param-y))),-sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); - + double qbarq_cos_theta_min = max(cos(PI - 2*atan(exp(-eta_cut_param-y))),-sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); + double angular_integration_OB = pow(qbarq_cos_theta_max,2) - pow(qbarq_cos_theta_min,2); - + // Combination with angular integration (Forward - Backward for q-qbar) double dsigma_OB = dsigma*angular_integration_OB; // Covolution with PDFs (flipping direction for q-qbar & qbar-q) // UBAR-U double ubaruOF = ubaru_PDF*dsigma_OB; - + double *propagator = propagators (Minv); - + return ubaruOF * propagator[1]; } ////UBARU ODD FORWARD Integration in rapidity double ReactionAFB::integration_ubaruOF_y (double Minv, void * ptr) { - + // Pass the necessary parameters (pointer to the PDFs and Minv) integration_params integrationParams; integrationParams.Minv = Minv; integrationParams.ptr = (ReactionTheory*) ptr; - + double result, error; gsl_function F; F.function = &(ReactionAFB::ubaruOF_funct); F.params = &integrationParams; - + double inf = y_min_param / log(energy_param/Minv); double sup; if (y_max_param == 0.0) { @@ -666,37 +666,37 @@ double ReactionAFB::integration_ubaruOF_y (double Minv, void * ptr) { } else { sup = y_max_param / log(energy_param/Minv); } - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return 2*result; } ////UBARU ODD FORWARD Integration in invariant mass double ReactionAFB::integration_ubaruOF (double Minv_inf, double Minv_sup, void* ptr) { - + double result, error; gsl_function F; F.function = &(ReactionAFB::integration_ubaruOF_y); F.params = ptr; - + double inf = Minv_inf; double sup = Minv_sup; - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return result; } ////UBARU ODD BACKWARD Matrix element double ReactionAFB::ubaruOB_funct (double yreduced, void * params) { - + // Pointer to access PDFs - ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; + ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; // Pass the invariant mass as parameter double Minv = ((integration_params*)params)-> Minv; - + // Partonic cross section parameters double Q = Minv; double z = pow(Minv,2)/pow(energy_param,2); @@ -705,7 +705,7 @@ double ReactionAFB::ubaruOB_funct (double yreduced, void * params) { double x2 = sqrt(z)*exp(-y); double dsigma_temp = pow(Minv,2)/(96*PI); double dsigma = GeVtofb_param*dsigma_temp*(2*Minv/pow(energy_param,2))*(-(1.0/2.0)*log(z)); - + // Partons PDFs std::valarray<double> pdfx1(13); std::valarray<double> pdfx2(13); @@ -716,41 +716,41 @@ double ReactionAFB::ubaruOB_funct (double yreduced, void * params) { double f2u = pdfx2[8] / x2; double f2c = pdfx2[10] / x2; - // PDF combinations + // PDF combinations double ubaru_PDF = f1ubar*f2u + f1cbar*f2c; - + // Angular integration limits double qqbar_cos_theta_max = min(cos(2*atan(exp(-eta_cut_param-y))),sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); - double qqbar_cos_theta_min = 0; - + double qqbar_cos_theta_min = 0; + double angular_integration_OF = pow(qqbar_cos_theta_max,2) - pow(qqbar_cos_theta_min,2); - + // Combination with angular integration (Forward - Backward for q-qbar) double dsigma_OF = dsigma*angular_integration_OF; // Covolution with PDFs (flipping direction for q-qbar & qbar-q) // UBAR-U double ubaruOB = ubaru_PDF*dsigma_OF; - + double *propagator = propagators (Minv); - + return ubaruOB * propagator[1]; } ////UBARU ODD BACKWARD Integration in rapidity double ReactionAFB::integration_ubaruOB_y (double Minv, void * ptr) { - + // Pass the necessary parameters (pointer to the PDFs and Minv) integration_params integrationParams; integrationParams.Minv = Minv; integrationParams.ptr = (ReactionTheory*) ptr; - + double result, error; gsl_function F; F.function = &(ReactionAFB::ubaruOB_funct); F.params = &integrationParams; - + double inf = y_min_param / log(energy_param/Minv); double sup; if (y_max_param == 0.0) { @@ -758,37 +758,37 @@ double ReactionAFB::integration_ubaruOB_y (double Minv, void * ptr) { } else { sup = y_max_param / log(energy_param/Minv); } - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return 2*result; } ////UBARU ODD BACKWARD Integration in invariant mass double ReactionAFB::integration_ubaruOB (double Minv_inf, double Minv_sup, void* ptr) { - + double result, error; gsl_function F; F.function = &(ReactionAFB::integration_ubaruOB_y); F.params = ptr; - + double inf = Minv_inf; double sup = Minv_sup; - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return result; } ////DDBAR EVEN FORWARD Matrix element double ReactionAFB::ddbarEF_funct (double yreduced, void * params) { - + // Pointer to access PDFs - ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; + ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; // Pass the invariant mass as parameter double Minv = ((integration_params*)params)-> Minv; - + // Partonic cross section parameters double Q = Minv; double z = pow(Minv,2)/pow(energy_param,2); @@ -797,7 +797,7 @@ double ReactionAFB::ddbarEF_funct (double yreduced, void * params) { double x2 = sqrt(z)*exp(-y); double dsigma_temp = pow(Minv,2)/(96*PI); double dsigma = GeVtofb_param*dsigma_temp*(2*Minv/pow(energy_param,2))*(-(1.0/2.0)*log(z)); - + // Partons PDFs std::valarray<double> pdfx1(13); std::valarray<double> pdfx2(13); @@ -810,41 +810,41 @@ double ReactionAFB::ddbarEF_funct (double yreduced, void * params) { double f2sbar = pdfx2[3] / x2; double f2bbar = pdfx2[1] / x2; - // PDF combinations + // PDF combinations double ddbar_PDF = f1d*f2dbar + f1s*f2sbar + f1b*f2bbar; - + // Angular integration limits double qqbar_cos_theta_max = min(cos(2*atan(exp(-eta_cut_param-y))),sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); double qqbar_cos_theta_min = 0; - + double angular_integration_EF = (qqbar_cos_theta_max-qqbar_cos_theta_min)+(1.0/3.0)*(pow(qqbar_cos_theta_max,3)-pow(qqbar_cos_theta_min,3)); - + // Combination with angular integration (Forward - Backward for q-qbar) double dsigma_EF = dsigma*angular_integration_EF; // Covolution with PDFs (flipping direction for q-qbar & qbar-q) // D-DBAR double ddbarEF = ddbar_PDF*dsigma_EF; - + double *propagator = propagators (Minv); - + return ddbarEF * propagator[2]; } ////DDBAR EVEN FORWARD Integration in rapidity double ReactionAFB::integration_ddbarEF_y (double Minv, void * ptr) { - + // Pass the necessary parameters (pointer to the PDFs and Minv) integration_params integrationParams; integrationParams.Minv = Minv; integrationParams.ptr = (ReactionTheory*) ptr; - + double result, error; gsl_function F; F.function = &(ReactionAFB::ddbarEF_funct); F.params = &integrationParams; - + double inf = y_min_param / log(energy_param/Minv); double sup; if (y_max_param == 0.0) { @@ -852,37 +852,37 @@ double ReactionAFB::integration_ddbarEF_y (double Minv, void * ptr) { } else { sup = y_max_param / log(energy_param/Minv); } - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return 2*result; } ////DDBAR EVEN FORWARD Integration in invariant mass double ReactionAFB::integration_ddbarEF (double Minv_inf, double Minv_sup, void* ptr) { - + double result, error; gsl_function F; F.function = &(ReactionAFB::integration_ddbarEF_y); F.params = ptr; - + double inf = Minv_inf; double sup = Minv_sup; - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return result; } ////DDBAR EVEN BACKWARD Matrix element double ReactionAFB::ddbarEB_funct (double yreduced, void * params) { - + // Pointer to access PDFs - ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; + ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; // Pass the invariant mass as parameter double Minv = ((integration_params*)params)-> Minv; - + // Partonic cross section parameters double Q = Minv; double z = pow(Minv,2)/pow(energy_param,2); @@ -891,7 +891,7 @@ double ReactionAFB::ddbarEB_funct (double yreduced, void * params) { double x2 = sqrt(z)*exp(-y); double dsigma_temp = pow(Minv,2)/(96*PI); double dsigma = GeVtofb_param*dsigma_temp*(2*Minv/pow(energy_param,2))*(-(1.0/2.0)*log(z)); - + // Partons PDFs std::valarray<double> pdfx1(13); std::valarray<double> pdfx2(13); @@ -904,41 +904,41 @@ double ReactionAFB::ddbarEB_funct (double yreduced, void * params) { double f2sbar = pdfx2[3] / x2; double f2bbar = pdfx2[1] / x2; - // PDF combinations + // PDF combinations double ddbar_PDF = f1d*f2dbar + f1s*f2sbar + f1b*f2bbar; - + // Angular integration limits double qbarq_cos_theta_max = 0; - double qbarq_cos_theta_min = max(cos(PI - 2*atan(exp(-eta_cut_param-y))),-sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); - + double qbarq_cos_theta_min = max(cos(PI - 2*atan(exp(-eta_cut_param-y))),-sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); + double angular_integration_EB = (qbarq_cos_theta_max-qbarq_cos_theta_min)+(1.0/3.0)*(pow(qbarq_cos_theta_max,3)-pow(qbarq_cos_theta_min,3)); - + // Combination with angular integration (Forward - Backward for q-qbar) double dsigma_EB = dsigma*angular_integration_EB; // Covolution with PDFs (flipping direction for q-qbar & qbar-q) // D-DBAR double ddbarEB = ddbar_PDF*dsigma_EB; - + double *propagator = propagators (Minv); - + return ddbarEB * propagator[2]; } ////DDBAR EVEN BACKWARD Integration in rapidity double ReactionAFB::integration_ddbarEB_y (double Minv, void * ptr) { - + // Pass the necessary parameters (pointer to the PDFs and Minv) integration_params integrationParams; integrationParams.Minv = Minv; integrationParams.ptr = (ReactionTheory*) ptr; - + double result, error; gsl_function F; F.function = &(ReactionAFB::ddbarEB_funct); F.params = &integrationParams; - + double inf = y_min_param / log(energy_param/Minv); double sup; if (y_max_param == 0.0) { @@ -946,37 +946,37 @@ double ReactionAFB::integration_ddbarEB_y (double Minv, void * ptr) { } else { sup = y_max_param / log(energy_param/Minv); } - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return 2*result; } ////DDBAR EVEN BACKWARD Integration in invariant mass double ReactionAFB::integration_ddbarEB (double Minv_inf, double Minv_sup, void* ptr) { - + double result, error; gsl_function F; F.function = &(ReactionAFB::integration_ddbarEB_y); F.params = ptr; - + double inf = Minv_inf; double sup = Minv_sup; - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return result; } ////DDBAR ODD FORWARD Matrix element double ReactionAFB::ddbarOF_funct (double yreduced, void * params) { - + // Pointer to access PDFs - ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; + ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; // Pass the invariant mass as parameter double Minv = ((integration_params*)params)-> Minv; - + // Partonic cross section parameters double Q = Minv; double z = pow(Minv,2)/pow(energy_param,2); @@ -985,7 +985,7 @@ double ReactionAFB::ddbarOF_funct (double yreduced, void * params) { double x2 = sqrt(z)*exp(-y); double dsigma_temp = pow(Minv,2)/(96*PI); double dsigma = GeVtofb_param*dsigma_temp*(2*Minv/pow(energy_param,2))*(-(1.0/2.0)*log(z)); - + // Partons PDFs std::valarray<double> pdfx1(13); std::valarray<double> pdfx2(13); @@ -998,41 +998,41 @@ double ReactionAFB::ddbarOF_funct (double yreduced, void * params) { double f2sbar = pdfx2[3] / x2; double f2bbar = pdfx2[1] / x2; - // PDF combinations + // PDF combinations double ddbar_PDF = f1d*f2dbar + f1s*f2sbar + f1b*f2bbar; - + // Angular integration limits double qqbar_cos_theta_max = min(cos(2*atan(exp(-eta_cut_param-y))),sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); - double qqbar_cos_theta_min = 0; - + double qqbar_cos_theta_min = 0; + double angular_integration_OF = pow(qqbar_cos_theta_max,2) - pow(qqbar_cos_theta_min,2); - + // Combination with angular integration (Forward - Backward for q-qbar) double dsigma_OF = dsigma*angular_integration_OF; // Covolution with PDFs (flipping direction for q-qbar & qbar-q) // D-DBAR double ddbarOF = ddbar_PDF*dsigma_OF; - + double *propagator = propagators (Minv); - + return ddbarOF * propagator[3]; } ////DDBAR ODD FORWARD Integration in rapidity double ReactionAFB::integration_ddbarOF_y (double Minv, void * ptr) { - + // Pass the necessary parameters (pointer to the PDFs and Minv) integration_params integrationParams; integrationParams.Minv = Minv; integrationParams.ptr = (ReactionTheory*) ptr; - + double result, error; gsl_function F; F.function = &(ReactionAFB::ddbarOF_funct); F.params = &integrationParams; - + double inf = y_min_param / log(energy_param/Minv); double sup; if (y_max_param == 0.0) { @@ -1040,37 +1040,37 @@ double ReactionAFB::integration_ddbarOF_y (double Minv, void * ptr) { } else { sup = y_max_param / log(energy_param/Minv); } - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return 2*result; } ////DDBAR ODD FORWARD Integration in invariant mass double ReactionAFB::integration_ddbarOF (double Minv_inf, double Minv_sup, void* ptr) { - + double result, error; gsl_function F; F.function = &(ReactionAFB::integration_ddbarOF_y); F.params = ptr; - + double inf = Minv_inf; double sup = Minv_sup; - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return result; } ////DDBAR ODD BACKWARD Matrix element double ReactionAFB::ddbarOB_funct (double yreduced, void * params) { - + // Pointer to access PDFs - ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; + ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; // Pass the invariant mass as parameter double Minv = ((integration_params*)params)-> Minv; - + // Partonic cross section parameters double Q = Minv; double z = pow(Minv,2)/pow(energy_param,2); @@ -1079,7 +1079,7 @@ double ReactionAFB::ddbarOB_funct (double yreduced, void * params) { double x2 = sqrt(z)*exp(-y); double dsigma_temp = pow(Minv,2)/(96*PI); double dsigma = GeVtofb_param*dsigma_temp*(2*Minv/pow(energy_param,2))*(-(1.0/2.0)*log(z)); - + // Partons PDFs std::valarray<double> pdfx1(13); std::valarray<double> pdfx2(13); @@ -1092,41 +1092,41 @@ double ReactionAFB::ddbarOB_funct (double yreduced, void * params) { double f2sbar = pdfx2[3] / x2; double f2bbar = pdfx2[1] / x2; - // PDF combinations + // PDF combinations double ddbar_PDF = f1d*f2dbar + f1s*f2sbar + f1b*f2bbar; - + // Angular integration limits double qbarq_cos_theta_max = 0; - double qbarq_cos_theta_min = max(cos(PI - 2*atan(exp(-eta_cut_param-y))),-sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); - + double qbarq_cos_theta_min = max(cos(PI - 2*atan(exp(-eta_cut_param-y))),-sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); + double angular_integration_OB = pow(qbarq_cos_theta_max,2) - pow(qbarq_cos_theta_min,2); - + // Combination with angular integration (Forward - Backward for q-qbar) double dsigma_OB = dsigma*angular_integration_OB; // Covolution with PDFs (flipping direction for q-qbar & qbar-q) // D-DBAR double ddbarOB = ddbar_PDF*dsigma_OB; - + double *propagator = propagators (Minv); - + return ddbarOB * propagator[3]; } ////DDBAR ODD BACKWARD Integration in rapidity double ReactionAFB::integration_ddbarOB_y (double Minv, void * ptr) { - + // Pass the necessary parameters (pointer to the PDFs and Minv) integration_params integrationParams; integrationParams.Minv = Minv; integrationParams.ptr = (ReactionTheory*) ptr; - + double result, error; gsl_function F; F.function = &(ReactionAFB::ddbarOB_funct); F.params = &integrationParams; - + double inf = y_min_param / log(energy_param/Minv); double sup; if (y_max_param == 0.0) { @@ -1134,37 +1134,37 @@ double ReactionAFB::integration_ddbarOB_y (double Minv, void * ptr) { } else { sup = y_max_param / log(energy_param/Minv); } - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return 2*result; } ////DDBAR ODD BACKWARD Integration in invariant mass double ReactionAFB::integration_ddbarOB (double Minv_inf, double Minv_sup, void* ptr) { - + double result, error; gsl_function F; F.function = &(ReactionAFB::integration_ddbarOB_y); F.params = ptr; - + double inf = Minv_inf; double sup = Minv_sup; - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return result; } ////DBARD EVEN FORWARD Matrix element double ReactionAFB::dbardEF_funct (double yreduced, void * params) { - + // Pointer to access PDFs - ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; + ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; // Pass the invariant mass as parameter double Minv = ((integration_params*)params)-> Minv; - + // Partonic cross section parameters double Q = Minv; double z = pow(Minv,2)/pow(energy_param,2); @@ -1173,7 +1173,7 @@ double ReactionAFB::dbardEF_funct (double yreduced, void * params) { double x2 = sqrt(z)*exp(-y); double dsigma_temp = pow(Minv,2)/(96*PI); double dsigma = GeVtofb_param*dsigma_temp*(2*Minv/pow(energy_param,2))*(-(1.0/2.0)*log(z)); - + // Partons PDFs std::valarray<double> pdfx1(13); std::valarray<double> pdfx2(13); @@ -1186,41 +1186,41 @@ double ReactionAFB::dbardEF_funct (double yreduced, void * params) { double f2s = pdfx2[9] / x2; double f2b = pdfx2[11] / x2; - // PDF combinations + // PDF combinations double dbard_PDF = f1dbar*f2d + f1sbar*f2s + f1bbar*f2b; - + // Angular integration limits double qbarq_cos_theta_max = 0; - double qbarq_cos_theta_min = max(cos(PI - 2*atan(exp(-eta_cut_param-y))),-sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); - + double qbarq_cos_theta_min = max(cos(PI - 2*atan(exp(-eta_cut_param-y))),-sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); + double angular_integration_EB = (qbarq_cos_theta_max-qbarq_cos_theta_min)+(1.0/3.0)*(pow(qbarq_cos_theta_max,3)-pow(qbarq_cos_theta_min,3)); - + // Combination with angular integration (Forward - Backward for q-qbar) double dsigma_EB = dsigma*angular_integration_EB; // Covolution with PDFs (flipping direction for q-qbar & qbar-q) // DBAR-D double dbardEF = dbard_PDF*dsigma_EB; - + double *propagator = propagators (Minv); - + return dbardEF * propagator[2]; } ////DBARD EVEN FORWARD Integration in rapidity double ReactionAFB::integration_dbardEF_y (double Minv, void * ptr) { - + // Pass the necessary parameters (pointer to the PDFs and Minv) integration_params integrationParams; integrationParams.Minv = Minv; integrationParams.ptr = (ReactionTheory*) ptr; - + double result, error; gsl_function F; F.function = &(ReactionAFB::dbardEF_funct); F.params = &integrationParams; - + double inf = y_min_param / log(energy_param/Minv); double sup; if (y_max_param == 0.0) { @@ -1228,37 +1228,37 @@ double ReactionAFB::integration_dbardEF_y (double Minv, void * ptr) { } else { sup = y_max_param / log(energy_param/Minv); } - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return 2*result; } ////DBARD EVEN FORWARD Integration in invariant mass double ReactionAFB::integration_dbardEF (double Minv_inf, double Minv_sup, void* ptr) { - + double result, error; gsl_function F; F.function = &(ReactionAFB::integration_dbardEF_y); F.params = ptr; - + double inf = Minv_inf; double sup = Minv_sup; - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return result; } ////DBARD EVEN BACKWARD Matrix element double ReactionAFB::dbardEB_funct (double yreduced, void * params) { - + // Pointer to access PDFs - ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; + ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; // Pass the invariant mass as parameter double Minv = ((integration_params*)params)-> Minv; - + // Partonic cross section parameters double Q = Minv; double z = pow(Minv,2)/pow(energy_param,2); @@ -1267,7 +1267,7 @@ double ReactionAFB::dbardEB_funct (double yreduced, void * params) { double x2 = sqrt(z)*exp(-y); double dsigma_temp = pow(Minv,2)/(96*PI); double dsigma = GeVtofb_param*dsigma_temp*(2*Minv/pow(energy_param,2))*(-(1.0/2.0)*log(z)); - + // Partons PDFs std::valarray<double> pdfx1(13); std::valarray<double> pdfx2(13); @@ -1280,41 +1280,41 @@ double ReactionAFB::dbardEB_funct (double yreduced, void * params) { double f2s = pdfx2[9] / x2; double f2b = pdfx2[11] / x2; - // PDF combinations + // PDF combinations double dbard_PDF = f1dbar*f2d + f1sbar*f2s + f1bbar*f2b; - + // Angular integration limits double qqbar_cos_theta_max = min(cos(2*atan(exp(-eta_cut_param-y))),sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); - double qqbar_cos_theta_min = 0; - + double qqbar_cos_theta_min = 0; + double angular_integration_EF = (qqbar_cos_theta_max-qqbar_cos_theta_min)+(1.0/3.0)*(pow(qqbar_cos_theta_max,3)-pow(qqbar_cos_theta_min,3)); - + // Combination with angular integration (Forward - Backward for q-qbar) double dsigma_EF = dsigma*angular_integration_EF; // Covolution with PDFs (flipping direction for q-qbar & qbar-q) // DBAR-D double dbardEB = dbard_PDF*dsigma_EF; - + double *propagator = propagators (Minv); - + return dbardEB * propagator[2]; } ////DBARD EVEN BACKWARD Integration in rapidity double ReactionAFB::integration_dbardEB_y (double Minv, void * ptr) { - + // Pass the necessary parameters (pointer to the PDFs and Minv) integration_params integrationParams; integrationParams.Minv = Minv; integrationParams.ptr = (ReactionTheory*) ptr; - + double result, error; gsl_function F; F.function = &(ReactionAFB::dbardEB_funct); F.params = &integrationParams; - + double inf = y_min_param / log(energy_param/Minv); double sup; if (y_max_param == 0.0) { @@ -1322,37 +1322,37 @@ double ReactionAFB::integration_dbardEB_y (double Minv, void * ptr) { } else { sup = y_max_param / log(energy_param/Minv); } - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return 2*result; } ////DBARD EVEN BACKWARD Integration in invariant mass double ReactionAFB::integration_dbardEB (double Minv_inf, double Minv_sup, void* ptr) { - + double result, error; gsl_function F; F.function = &(ReactionAFB::integration_dbardEB_y); F.params = ptr; - + double inf = Minv_inf; double sup = Minv_sup; - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return result; } ////DBARD ODD FORWARD Matrix element double ReactionAFB::dbardOF_funct (double yreduced, void * params) { - + // Pointer to access PDFs - ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; + ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; // Pass the invariant mass as parameter double Minv = ((integration_params*)params)-> Minv; - + // Partonic cross section parameters double Q = Minv; double z = pow(Minv,2)/pow(energy_param,2); @@ -1361,7 +1361,7 @@ double ReactionAFB::dbardOF_funct (double yreduced, void * params) { double x2 = sqrt(z)*exp(-y); double dsigma_temp = pow(Minv,2)/(96*PI); double dsigma = GeVtofb_param*dsigma_temp*(2*Minv/pow(energy_param,2))*(-(1.0/2.0)*log(z)); - + // Partons PDFs std::valarray<double> pdfx1(13); std::valarray<double> pdfx2(13); @@ -1374,41 +1374,41 @@ double ReactionAFB::dbardOF_funct (double yreduced, void * params) { double f2s = pdfx2[9] / x2; double f2b = pdfx2[11] / x2; - // PDF combinations + // PDF combinations double dbard_PDF = f1dbar*f2d + f1sbar*f2s + f1bbar*f2b; - + // Angular integration limits double qbarq_cos_theta_max = 0; - double qbarq_cos_theta_min = max(cos(PI - 2*atan(exp(-eta_cut_param-y))),-sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); - + double qbarq_cos_theta_min = max(cos(PI - 2*atan(exp(-eta_cut_param-y))),-sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); + double angular_integration_OB = pow(qbarq_cos_theta_max,2) - pow(qbarq_cos_theta_min,2); - + // Combination with angular integration (Forward - Backward for q-qbar) double dsigma_OB = dsigma*angular_integration_OB; // Covolution with PDFs (flipping direction for q-qbar & qbar-q) // DBAR-D double dbardOF = dbard_PDF*dsigma_OB; - + double *propagator = propagators (Minv); - + return dbardOF * propagator[3]; } ////DBARD ODD FORWARD Integration in rapidity double ReactionAFB::integration_dbardOF_y (double Minv, void * ptr) { - + // Pass the necessary parameters (pointer to the PDFs and Minv) integration_params integrationParams; integrationParams.Minv = Minv; integrationParams.ptr = (ReactionTheory*) ptr; - + double result, error; gsl_function F; F.function = &(ReactionAFB::dbardOF_funct); F.params = &integrationParams; - + double inf = y_min_param / log(energy_param/Minv); double sup; if (y_max_param == 0.0) { @@ -1416,37 +1416,37 @@ double ReactionAFB::integration_dbardOF_y (double Minv, void * ptr) { } else { sup = y_max_param / log(energy_param/Minv); } - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return 2*result; } ////DBARD ODD FORWARD Integration in invariant mass double ReactionAFB::integration_dbardOF (double Minv_inf, double Minv_sup, void* ptr) { - + double result, error; gsl_function F; F.function = &(ReactionAFB::integration_dbardOF_y); F.params = ptr; - + double inf = Minv_inf; double sup = Minv_sup; - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return result; } ////DBARD ODD BACKWARD Matrix element double ReactionAFB::dbardOB_funct (double yreduced, void * params) { - + // Pointer to access PDFs - ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; + ReactionTheory* ptr = (ReactionTheory*) ((integration_params*)params)->ptr; // Pass the invariant mass as parameter double Minv = ((integration_params*)params)-> Minv; - + // Partonic cross section parameters double Q = Minv; double z = pow(Minv,2)/pow(energy_param,2); @@ -1455,7 +1455,7 @@ double ReactionAFB::dbardOB_funct (double yreduced, void * params) { double x2 = sqrt(z)*exp(-y); double dsigma_temp = pow(Minv,2)/(96*PI); double dsigma = GeVtofb_param*dsigma_temp*(2*Minv/pow(energy_param,2))*(-(1.0/2.0)*log(z)); - + // Partons PDFs std::valarray<double> pdfx1(13); std::valarray<double> pdfx2(13); @@ -1468,41 +1468,41 @@ double ReactionAFB::dbardOB_funct (double yreduced, void * params) { double f2s = pdfx2[9] / x2; double f2b = pdfx2[11] / x2; - // PDF combinations + // PDF combinations double dbard_PDF = f1dbar*f2d + f1sbar*f2s + f1bbar*f2b; - + // Angular integration limits double qqbar_cos_theta_max = min(cos(2*atan(exp(-eta_cut_param-y))),sqrt(1-4*(pow(pT_cut_param,2)/pow(Minv,2)))); - double qqbar_cos_theta_min = 0; - + double qqbar_cos_theta_min = 0; + double angular_integration_OF = pow(qqbar_cos_theta_max,2) - pow(qqbar_cos_theta_min,2); - + // Combination with angular integration (Forward - Backward for q-qbar) double dsigma_OF = dsigma*angular_integration_OF; // Covolution with PDFs (flipping direction for q-qbar & qbar-q) // DBAR-D double dbardOB = dbard_PDF*dsigma_OF; - + double *propagator = propagators (Minv); - + return dbardOB * propagator[3]; } ////DBARD ODD BACKWARD Integration in rapidity double ReactionAFB::integration_dbardOB_y (double Minv, void * ptr) { - + // Pass the necessary parameters (pointer to the PDFs and Minv) integration_params integrationParams; integrationParams.Minv = Minv; integrationParams.ptr = (ReactionTheory*) ptr; - + double result, error; gsl_function F; F.function = &(ReactionAFB::dbardOB_funct); F.params = &integrationParams; - + double inf = y_min_param / log(energy_param/Minv); double sup; if (y_max_param == 0.0) { @@ -1510,25 +1510,25 @@ double ReactionAFB::integration_dbardOB_y (double Minv, void * ptr) { } else { sup = y_max_param / log(energy_param/Minv); } - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return 2*result; } ////DBARD ODD BACKWARD Integration in invariant mass double ReactionAFB::integration_dbardOB (double Minv_inf, double Minv_sup, void* ptr) { - + double result, error; gsl_function F; F.function = &(ReactionAFB::integration_dbardOB_y); F.params = ptr; - + double inf = Minv_inf; double sup = Minv_sup; - - gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); + + gsl_integration_qng (&F, inf, sup, epsabs, epsrel, &result, &error, &calls); return result; } @@ -1539,29 +1539,29 @@ double ReactionAFB::AFB (double Minv_inf, double Minv_sup) double uubarEB = integration_uubarEB (Minv_inf, Minv_sup, this); double uubarOF = integration_uubarOF (Minv_inf, Minv_sup, this); double uubarOB = integration_uubarOB (Minv_inf, Minv_sup, this); - + double ubaruEF = integration_ubaruEF (Minv_inf, Minv_sup, this); double ubaruEB = integration_ubaruEB (Minv_inf, Minv_sup, this); double ubaruOF = integration_ubaruOF (Minv_inf, Minv_sup, this); double ubaruOB = integration_ubaruOB (Minv_inf, Minv_sup, this); - + double ddbarEF = integration_ddbarEF (Minv_inf, Minv_sup, this); double ddbarEB = integration_ddbarEB (Minv_inf, Minv_sup, this); double ddbarOF = integration_ddbarOF (Minv_inf, Minv_sup, this); double ddbarOB = integration_ddbarOB (Minv_inf, Minv_sup, this); - - double dbardEF = integration_dbardEF (Minv_inf, Minv_sup, this); + + double dbardEF = integration_dbardEF (Minv_inf, Minv_sup, this); double dbardEB = integration_dbardEB (Minv_inf, Minv_sup, this); double dbardOF = integration_dbardOF (Minv_inf, Minv_sup, this); double dbardOB = integration_dbardOB (Minv_inf, Minv_sup, this); - + // Reconstructed Forward and Backward double Forward = uubarEF+ubaruEF+ddbarEF+dbardEF+uubarOF+ubaruOF+ddbarOF+dbardOF; double Backward = uubarEB+ubaruEB+ddbarEB+dbardEB+uubarOB+ubaruOB+ddbarOB+dbardOB; - + // Reconstructed AFB double AFB = (Forward - Backward) / (Forward + Backward); - + return AFB; } @@ -1571,8 +1571,8 @@ extern "C" ReactionAFB* create() { } // Initialize at the start of the computation -int ReactionAFB::initAtStart(const string &s) -{ +int ReactionAFB::atStart(const string &s) +{ // Parameters from "/reactions/AFB/yaml/parameters.yaml" // Check energy parameter: std::cout << checkParam("energy") << std::endl; @@ -1598,17 +1598,17 @@ int ReactionAFB::initAtStart(const string &s) std::cout << "\n\n FATAL ERROR: di-lepton rapidity lower cut (y_min) is not defined !!! \n\n" <<std::endl; return 1; } - + // Check rapidity upper cut parameter: std::cout << checkParam("y_max") << std::endl; if ( ! checkParam("y_max") ) { std::cout << "\n\n FATAL ERROR: di-lepton rapidity upper cut (y_max) is not defined !!! \n\n" <<std::endl; return 1; } - + // Constant PI = 3.14159265; - + // Read default parameters GeVtofb_param = pow(10, -3) * GetParam("convFac"); alphaEM_param = GetParam("alphaem"); @@ -1622,12 +1622,12 @@ int ReactionAFB::initAtStart(const string &s) pT_cut_param = GetParam("pT_cut"); y_min_param = GetParam("y_min"); // not implemented yet y_max_param = GetParam("y_max"); // not implemented yet - + // Calculate fixed parameters e_param = sqrt(4*PI*alphaEM_param); gsm_param = (e_param/(sqrt(stheta2W_param)*sqrt(1-stheta2W_param)))*sqrt(1+pow(stheta2W_param,2)); smangle_param = atan(-stheta2W_param); - + // Foton couplings foton_Vu = e_param*(2.0/3.0); foton_Au = 0; @@ -1647,7 +1647,7 @@ int ReactionAFB::initAtStart(const string &s) Z_Al = (1.0/2.0)*gsm_param*(-cos(smangle_param)/2.0); Z_Vnu = (1.0/2.0)*gsm_param*(cos(smangle_param)/2.0); Z_Anu = (1.0/2.0)*gsm_param*(cos(smangle_param)/2.0); - + // Even combination of couplings even_foton_up = (pow(foton_Vu,2)+pow(foton_Au,2))*(pow(foton_Vl,2)+pow(foton_Al,2)); even_foton_down = (pow(foton_Vd,2)+pow(foton_Ad,2))*(pow(foton_Vl,2)+pow(foton_Al,2)); @@ -1670,7 +1670,7 @@ int ReactionAFB::initAtStart(const string &s) // Main function to compute results at an iteration int ReactionAFB::compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err) { - auto *Minv_min = GetBinValues(dataSetID,"Minv_min"), *Minv_max = GetBinValues(dataSetID,"Minv_max"); + auto *Minv_min = GetBinValues(dataSetID,"Minv_min"), *Minv_max = GetBinValues(dataSetID,"Minv_max"); if (Minv_min == nullptr || Minv_max == nullptr) { std::cout << "\n\nFATAL ERROR: AFB code requires Invariant mass bins to be present !!!" << std::endl; std::cout << "CHECK THE DATAFILE !!!" << std::endl; @@ -1686,8 +1686,8 @@ int ReactionAFB::compute(int dataSetID, valarray<double> &val, map<string, valar std::cout << "\n\nFATAL ERROR: uneven number of Invariant mass min and max !!!" << std::endl; std::cout << "CHECK THE DATAFILE !!!" << std::endl; return 1; - } - + } + // Fill the array "val[i]" with the result of the AFB function for (int i = 0; i < Npnt_min; i++) { double AFB_result = AFB (min[i], max[i]); diff --git a/reactions/APPLgrid/include/ReactionAPPLgrid.h b/reactions/APPLgrid/include/ReactionAPPLgrid.h index d9df059fe43837040a3033d3456ab3022ee2642d..e860f7e4ab0ab83e27cbcbeb4a67a2977aab1eeb 100644 --- a/reactions/APPLgrid/include/ReactionAPPLgrid.h +++ b/reactions/APPLgrid/include/ReactionAPPLgrid.h @@ -9,7 +9,7 @@ /** @class' ReactionAPPLgrid - @brief A wrapper class for APPLgrid reaction + @brief A wrapper class for APPLgrid reaction Based on the ReactionTheory class. Reads options produces 3d cross section. @@ -33,7 +33,7 @@ class ReactionAPPLgrid : public ReactionTheory ReactionAPPLgrid(); ~ReactionAPPLgrid(); virtual string getReactionName() const { return "APPLgrid" ;}; - int initAtStart(const string &); + int atStart(const string &); virtual void setDatasetParameters( int dataSetID, map<string,string> pars, map<string,double> parsDataset) override ; virtual int compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err); protected: diff --git a/reactions/APPLgrid/include/appl_grid/appl_grid.h b/reactions/APPLgrid/include/appl_grid/appl_grid.h deleted file mode 100644 index bc74a2050b09d2d0caa53b8fb56e4dd69b879f9e..0000000000000000000000000000000000000000 --- a/reactions/APPLgrid/include/appl_grid/appl_grid.h +++ /dev/null @@ -1,696 +0,0 @@ -// emacs: this is -*- c++ -*- - -// appl_grid.h - -// grid class header - all the functions needed to create and -// fill the grid from an NLO calculation program -// -// Copyright (C) 2007 Mark Sutton (sutt@hep.ucl.ac.uk) - -// $Id: appl_grid.h, v1.00 2007/10/16 17:01:39 sutt - -// Fixme: this needs to be tidied up. eg there are too many different, -// and too many version of, accessors for x/y, Q2/tau etc there -// should be only one set, for x and Q2 *or* y and tau, but -// not both. In fact they should be for x and Q2, since y and tau -// should be purely an internal grid issue of no concern for the -// user. - -#ifndef __APPL_GRID_H -#define __APPL_GRID_H - -#include <vector> -#include <iostream> -#include <sstream> -#include <cmath> -#include <string> -#include <exception> - -#include "TH1D.h" - - -double _fy(double x); -double _fx(double y); -double _fun(double y); - - -#include "correction.h" - -namespace appl { - - -/// forward declarations - full definitions included -/// from appl_grid.cxx -class igrid; -class appl_pdf; - - -const int MAXGRIDS = 5; - - -/// externally visible grid class -class grid { - -public: - - // grid error exception - class exception : public std::exception { - public: - exception(const std::string& s) { std::cerr << what() << " " << s << std::endl; }; - //exception(std::ostream& s) { std::cerr << what() << " " << s << std::endl; }; - exception(std::ostream& s) { std::stringstream ss; ss << s.rdbuf(); std::cerr << what() << " " << ss.str() << std::endl; }; - virtual const char* what() const throw() { return "appl::grid::exception"; } - }; - - typedef enum { STANDARD=0, AMCATNLO=1, SHERPA=2, LAST_TYPE=3 } CALCULATION; - -public: - - grid(int NQ2=50, double Q2min=10000.0, double Q2max=25000000.0, int Q2order=5, - int Nx=50, double xmin=1e-5, double xmax=0.9, int xorder=5, - int Nobs=20, double obsmin=100.0, double obsmax=7000.0, - std::string genpdf="mcfm_pdf", - int leading_order=0, int nloops=1, - std::string transform="f2"); - - grid( int Nobs, const double* obsbins, - int NQ2=50, double Q2min=10000.0, double Q2max=25000000.0, int Q2order=5, - int Nx=50, double xmin=1e-5, double xmax=0.9, int xorder=5, - std::string genpdf="mcfm_pdf", - int leading_order=0, int nloops=1, - std::string transform="f2" ); - - grid( const std::vector<double>& obs, - int NQ2=50, double Q2min=10000.0, double Q2max=25000000.0, int Q2order=5, - int Nx=50, double xmin=1e-5, double xmax=0.9, int xorder=5, - std::string genpdf="mcfm_pdf", - int leading_order=0, int nloops=1, - std::string transform="f2" ); - - // build a grid but don't build the internal igrids - these can be added later - grid( const std::vector<double>& obs, - std::string genpdf="nlojet_pdf", - int leading_order=0, int nloops=1, - std::string transform="f2" ); - - // copy constructor - grid(const grid& g); - - // read from a file - grid(const std::string& filename="./grid.root", const std::string& dirname="grid"); - - // add an igrid for a given bin and a given order - void add_igrid(int bin, int order, igrid* g); - - virtual ~grid(); - - // update grid with one set of event weights - void fill(const double x1, const double x2, const double Q2, - const double obs, - const double* weight, const int iorder); - - - void fill_phasespace(const double x1, const double x2, const double Q2, - const double obs, - const double* weight, const int iorder); - - - void fill_grid(const double x1, const double x2, const double Q2, - const double obs, - const double* weight, const int iorder) { - if (isOptimised()) fill(x1, x2, Q2, obs, weight, iorder); - else fill_phasespace(x1, x2, Q2, obs, weight, iorder); - } - - - void fill_index(const int ix1, const int ix2, const int iQ2, - const int iobs, - const double* weight, const int iorder); - - - // trim/untrim the grid to reduce memory footprint - void trim(); - void untrim(); - - // formatted output - std::ostream& print(std::ostream& s=std::cout) const; - - // don't do anything anymore - // void setuppdf(void (*pdf)(const double& , const double&, double* ) ); - - // get the interpolated pdf's - // void pdfinterp(double x1, double Q2, double* f); - - - // perform the convolution to a specified number of loops - // nloops=-1 gives the nlo part only - std::vector<double> vconvolute(void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1, - double fscale_factor=1, - double Escale=1 ); - - std::vector<double> vconvolute(void (*pdf1)(const double& , const double&, double* ), - void (*pdf2)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1, - double fscale_factor=1, - double Escale=1 ); - - - // perform the convolution to a specified number of loops - // nloops=-1 gives the nlo part only - std::vector<double> vconvolute(double Escale, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1, - double fscale_factor=1 ) { - return vconvolute(pdf, alphas, nloops, rscale_factor, fscale_factor, Escale); - } - - - // perform the convolution to the max number of loops in grid - std::vector<double> vconvolute(void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ) ) { - return vconvolute( pdf, alphas, m_order-1 ); - } - - - // perform the convolution to the max number of loops in grid - std::vector<double> vconvolute(double Escale, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ) ) { - return vconvolute( Escale, pdf, alphas, m_order-1 ); - } - - - // perform the convolution to a specified number of loops - // for a single sub process, nloops=-1 gives the nlo part only - std::vector<double> vconvolute_subproc(int subproc, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1, double Escale=1 ); - - - // perform the convolution to a specified number of loops - // for a single sub process, nloops=-1 gives the nlo part only - std::vector<double> vconvolute_subproc(int subproc, - double Escale, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1 ) { - return vconvolute_subproc(subproc, pdf, alphas, nloops, rscale_factor, Escale); - } - - - // perform the convolution to the max number of loops in grid - // for a single sub process - std::vector<double> vconvolute_subproc(int subproc, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ) ) { - return vconvolute_subproc( subproc, pdf, alphas, m_order-1 ); - } - - // perform the convolution to the max number of loops in grid - // for a single sub process - std::vector<double> vconvolute_subproc(int subproc, - double Escale, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ) ) { - return vconvolute_subproc( subproc, Escale, pdf, alphas, m_order-1 ); - } - - - double vconvolute_bin( int bin, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double&) ); - - - // perform the convolution to a specified number of loops - // nloops=-1 gives the nlo part only - TH1D* convolute(void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1, - double fscale_factor=1, - double Escale=1 ); - - // perform the convolution to a specified number of loops - // nloops=-1 gives the nlo part only - TH1D* convolute(void (*pdf1)(const double& , const double&, double* ), - void (*pdf2)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1, - double fscale_factor=1, - double Escale=1 ); - - - TH1D* convolute(double Escale, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1, - double fscale_factor=1 ) { - return convolute(pdf, alphas, nloops, rscale_factor, fscale_factor, Escale); - } - - - // perform the convolution to the max number of loops in grid - TH1D* convolute(void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ) ) { - return convolute( pdf, alphas, m_order-1 ); - } - - // perform the convolution to the max number of loops in grid - TH1D* convolute(double Escale, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ) ) { - return convolute( Escale, pdf, alphas, m_order-1 ); - } - - - // perform the convolution to a specified number of loops - // for a single sub process, nloops=-1 gives the nlo part only - TH1D* convolute_subproc(int subproc, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1, double Escale=1 ); - - TH1D* convolute_subproc(int subproc, - double Escale, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1 ) { - return convolute_subproc( subproc, pdf, alphas, nloops, rscale_factor, Escale); - } - - // perform the convolution to the max number of loops in grid - // for a single sub process - TH1D* convolute_subproc(int subproc, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ) ) { - return convolute_subproc( subproc, pdf, alphas, m_order-1 ); - } - - TH1D* convolute_subproc(int subproc, - double Escale, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ) ) { - return convolute_subproc( subproc, Escale, pdf, alphas, m_order-1 ); - } - - - // optimise the bin limits - void optimise(bool force=false); - void optimise(int NQ2, int Nx); - void optimise(int NQ2, int Nx1, int Nx2); - - // redefine the limits by hand - void redefine(int iobs, int iorder, - int NQ2, double Q2min, double Q2max, - int Nx, double xmin, double xmax); - - bool setNormalised(bool t=true) { return m_normalised=t; } - bool getNormalised() const { return m_normalised; } - - - // set the filling to be symmetric and test status - bool symmetrise(bool t=true) { return m_symmetrise=t; } - bool isSymmetric() const { return m_symmetrise; } - - bool reweight(bool t=false); - - // access to internal grids if need be - const igrid* weightgrid(int iorder, int iobs) const { return m_grids[iorder][iobs]; } - - // save grid to specified file - void Write(const std::string& filename, const std::string& dirname="grid", const std::string& pdfname="" ); - - // accessors for the observable after possible bin combination - int Nobs() const { return m_obs_bins_combined->GetNbinsX(); } - double obs(int iobs) const { return m_obs_bins_combined->GetBinCenter(iobs+1); } - int obsbin(double obs) const { return m_obs_bins_combined->FindBin(obs)-1; } - double obslow(int iobs) const { return m_obs_bins_combined->GetBinLowEdge(iobs+1); } - double obsmin() const { return obslow(0); } - double obsmax() const { return obslow(Nobs()); } - double deltaobs(int iobs) const { return m_obs_bins_combined->GetBinWidth(iobs+1); } - - const TH1D* getReference() const { return m_obs_bins_combined; } - TH1D* getReference() { return m_obs_bins_combined; } - - - // TH1D* getXReference() { - // combineReference(); - // return m_obs_bins_combined; - // } - - - // accessors for the observable befor any bin combination - int Nobs_internal() const { return m_obs_bins->GetNbinsX(); } - double obs_internal(int iobs) const { return m_obs_bins->GetBinCenter(iobs+1); } - int obsbin_internal(double obs) const { return m_obs_bins->FindBin(obs)-1; } - double obslow_internal(int iobs) const { return m_obs_bins->GetBinLowEdge(iobs+1); } - double deltaobs_internal(int iobs) const { return m_obs_bins->GetBinWidth(iobs+1); } - double obsmin_internal() const { return obslow_internal(0); } - double obsmax_internal() const { return obslow_internal(Nobs_internal()); } - - const TH1D* getReference_internal() const { return m_obs_bins; } - TH1D* getReference_internal() { return m_obs_bins; } - - - - - // number of subprocesses - int subProcesses(int i) const; - - // general status accessors - double& run() { return m_run; } - - // accessors for the status information - bool isOptimised() const { return m_optimised; } - bool isTrimmed() const { return m_trimmed; } - - // lowest order of process - int leadingOrder() const { return m_leading_order; } - - /// maximum number of orders ( lo=1, nlo=2, nnlo=3 ) - /// but aMC@NLO uses 4 grids for the NLO, so m_order - /// will be 4, but really it is still only available - /// 1 loop, so take account of this - int nloops() const { - if ( m_type!=AMCATNLO ) return m_order-1; - else if ( m_order>0 ) return 1; - else return 0; - } - - // find out which transform and which pdf combination are being used - std::string getTransform() const { return m_transform; } - - static double transformvar(); - static double transformvar(double v); - - std::string getGenpdf() const { return m_genpdfname; } - - std::string version() const { return m_version; } - std::string appl_version() const; - - double getCMSScale() const { return m_cmsScale; } - void setCMSScale(double cmsScale) { m_cmsScale=cmsScale; } - - double getDynamicScale() const { return m_dynamicScale; } - void setDynamicScale(double dynamicScale) { m_dynamicScale=dynamicScale; } - - - // set optimise flag on all sub grids - bool setOptimised(bool t=true) { - return m_optimised=t; - // for ( int iorder=0 ; iorder<2 ; iorder++ ) { - // for ( int iobs=0 ; iobs<Nobs() ; iobs++ ) m_grids[iorder][iobs]->setOptimised(t); - // } - } - - // find the number of words used for storage - int size() const; - - // get the cross sections - double& crossSection() { return m_total; } - double& crossSectionError() { return m_totalerror; } - - // double Lambda() const { return m_Lambda2; } - - // very lovely algebraic operators - grid& operator=(const grid& g); - grid& operator*=(const double& d); - grid& operator+=(const grid& g); - - /// test if grids have the same limits etc - bool operator==(const grid& g) const; - - // shouldn't have these, the grid is too large a structure - // to be passed in a return - // grid operator*(const double& d) const { return grid(*this)*=d; } - // grid operator+(const grid& g) const { return grid(*this)+=g; } - - void setDocumentation(const std::string& s); - void addDocumentation(const std::string& s); - - std::string getDocumentation() const { return m_documentation; } - std::string& getDocumentation() { return m_documentation; } - - - /// set the range of the observable bins, with an optional - /// scaling of the observable valuesfor channging units - void setBinRange(int ilower, int iupper, double xScaleFactor=1); - void setRange(double lower, double upper, double xScaleFactor=1); - - - /// add a correction as a std::vector - void addCorrection( std::vector<double>& v, const std::string& label="", bool combine=false ); - - - /// add a correction by histogram - void addCorrection(TH1D* h, const std::string& label="", double scale=1, bool combine=false ); - - - /// access the corrections - // const std::vector<std::vector<double> >& corrections() const { - const std::vector<correction>& corrections() const { - return m_corrections; - } - - /// get the correction labels - const std::vector<std::string >& correctionLabels() const { - return m_correctionLabels; - } - - /// will the corrections be applied? - bool getApplyCorrections() const { return m_applyCorrections; } - - bool setApplyCorrections(bool b) { - std::cout << "appl::grid bin-by-bin corrections will " - << ( b ? "" : "not " ) << "be applied" << std::endl; - return m_applyCorrections=b; - } - - /// apply corrections to a std::vector - void applyCorrections(std::vector<double>& v, std::vector<bool>& applied); - - - /// will a specific correction be applied? - bool getApplyCorrection(unsigned i) const { - if ( m_applyCorrections ) return true; - else if ( i<m_applyCorrection.size() ) return m_applyCorrection.at(i); - return false; - } - - bool setApplyCorrection(unsigned i, bool b) { - if ( i>=m_corrections.size() ) return false; - std::cout << "appl::grid bin-by-bin correction will " - << ( b ? "" : "not " ) << "be applied for correction " << i; - if ( m_correctionLabels[i]!="" ) std::cout << " (" << m_correctionLabels[i] << ")"; - std::cout << std::endl; - return m_applyCorrection[i]=b; - } - - /// apply corrections to a std::vector - bool applyCorrection(unsigned i, std::vector<double>& v); - - - /// set the ckm matrix values if need be - /// takes a 3x3 matrix with the format { { Vud, Vus, Vub }, { Vcd, Vcs, Vcb }, { Vtd, Vts, Vtb } } - void setckm( const std::vector<std::vector<double> >& ckm ); - - /// takes a flat 9 element vector (or c array) with the format { Vud, Vus, Vub, Vcd, Vcs, Vcb, Vtd, Vts, Vtb } - void setckm( const std::vector<double>& ckm ); - void setckm( const double* ckm ); - - - /// set the squared ckm matrix values if need be - /// the squared terms for eihter W+ or W- production - you probably should use setckm() - void setckm2( const std::vector<std::vector<double> >& ckm2 ); - - /// set the ckm matrix and squared ckm matrix values if need be - const std::vector<std::vector<double> >& getckm() const; - const std::vector<std::vector<double> >& getckm2() const; - - - /// flag custom convolution routines - - void sherpa() { m_type = SHERPA; std::cout << "appl::grid::sherpa() using SHERPA convolution" << std::endl; } - void amcatnlo() { m_type = AMCATNLO; std::cout << "appl::grid::amcatnlo() using aMC@NLO convolution" << std::endl; } - void standard() { m_type = STANDARD; std::cout << "appl::grid::standard() using standard convolution" << std::endl; } - - CALCULATION calculation() const { return m_type; } - - static std::string _calculation(CALCULATION C) { - switch (C) { - case STANDARD: - return "standard"; - case SHERPA: - return "sherpa"; - case AMCATNLO: - return "amcatnlo"; - case LAST_TYPE: - return "last_type"; // NB: shouldn't ever be used - } - return "unknown"; - } - - /// reduce number of subprocesses if possible - void shrink(const std::string& name, int ckmcharge=0); - - /// set bins to be combined after the convolution - void combine( std::vector<int>& v) { if ( (m_combine=v).size() ) combineReference(true); } - - /// set combine the be combined after the convolution - void combineReference(bool force=false); - - void combineBins(std::vector<double>& v, int power=1 ) const; - - double fx(double x) const; - double fy(double x) const; - - const appl_pdf* genpdf(int i) const { return m_genpdf[i]; } - - std::vector<double>& userdata() { return m_userdata; } - const std::vector<double>& userdata() const { return m_userdata; } - -protected: - - // internal common construct for the different types of constructor - void construct(int Nobs, - int NQ2=50, double Q2min=10000.0, double Q2max=25000000.0, int Q2order=4, - int Nx=50, double xmin=1e-5, double xmax=0.9, int xorder=3, - int order=2, - std::string transform="f" ); - -protected: - - /// std::string manipulators to parse the pdf names - - /// return chomped std::string - static std::string chomptoken(std::string& s1, const std::string& s2) - { - std::string s3 = ""; - std::string::size_type pos = s1.find(s2); - if ( pos != std::string::npos ) { - s3 = s1.substr(0, pos); - s1.erase(0, pos+1); - } - else { - s3 = s1.substr(0, s1.size()); - s1.erase(0, s1.size()+1); - } - return s3; - } - - static std::vector<std::string> parse(std::string s, const std::string& key) { - std::vector<std::string> clauses; - while ( s.size() ) clauses.push_back( chomptoken(s, key) ); - return clauses; - } - - /// get the required pdf combinations from those registered - void findgenpdf( std::string s ); - - /// add a generic pdf to the data base of registered pdfs - void addpdf( const std::string& s, const std::vector<int>& combinations=std::vector<int>() ); - - appl_pdf* genpdf(int i) { return m_genpdf[i]; } - -public: - - int subproc() const { return m_subproc; } - -protected: - - // histograms for saving the observable - TH1D* m_obs_bins; - TH1D* m_obs_bins_combined; - - // order in alpha_s of tree level contribution - int m_leading_order; - - // how many orders in the calculation, lo, nlo, nnlo etc - int m_order; - - // the actual weight grids themselves - igrid** m_grids[MAXGRIDS]; /// up to MAXGRIDS grids LO, NLO, NNLO, Real virtual, etc - - // total cross section qand uncertainty - double m_total; - double m_totalerror; - - // state variables - double m_run; - bool m_optimised; - bool m_trimmed; - - bool m_normalised; - - bool m_symmetrise; - - // transform and pdf combination tags - std::string m_transform; - std::string m_genpdfname; - - // pdf combination class - appl_pdf* m_genpdf[MAXGRIDS]; - - static const std::string m_version; - - double m_cmsScale; - - double m_dynamicScale; - - /// bin by bin correction factors - // std::vector<std::vector<double> > m_corrections; - std::vector<correction> m_corrections; - std::vector<std::string> m_correctionLabels; - - - /// should we apply the corrections? - bool m_applyCorrections; - - /// flag vector to determine whether each individual - /// correction should be applied - std::vector<bool> m_applyCorrection; - - std::string m_documentation; - - std::vector<double> m_ckmsum; - std::vector<std::vector<double> > m_ckm2; - std::vector<std::vector<double> > m_ckm; - - CALCULATION m_type; - - bool m_read; - - std::vector<int> m_combine; - - int m_subproc; - int m_bin; - - std::vector<double> m_userdata; - -}; - - -}; - -// shouldn't have this, grid is too large a structure -// grid operator*(const double& d, const appl::grid& g) { return g*d; } - -std::ostream& operator<<(std::ostream& s, const appl::grid& mygrid); - - - -#endif // __APPL_GRID_H diff --git a/reactions/APPLgrid/include/appl_grid/correction.h b/reactions/APPLgrid/include/appl_grid/correction.h deleted file mode 100644 index 647bbf86df3684e43fb86d9dec1ec9680efb2ad4..0000000000000000000000000000000000000000 --- a/reactions/APPLgrid/include/appl_grid/correction.h +++ /dev/null @@ -1,67 +0,0 @@ -// emacs: this is -*- c++ -*- -// -// @file correction.h -// class to store the multipliciative post processing -// corrections to be applied, only basic for the time -// but will be extended as appropriate -// -// Copyright (C) 2014 M.Sutton (sutt@cern.ch) -// -// $Id: correction.h, v0.0 Sun 23 Mar 2014 09:08:46 CET sutt $ - - -#ifndef CORRECTION_H -#define CORRECTION_H - -#include <iostream> -#include <vector> -#include <string> - - -// typedef std::vector<double> correction; - - -class correction { - -public: - - correction(const std::vector<double>& v, const std::string& s="" ) : mlabel(s), mv(v) { } - - virtual ~correction() { } - - std::string label() const { return mlabel; } - - unsigned size() const { return mv.size(); } - - double& operator[](int i) { return mv[i]; } - double operator[](int i) const { return mv[i]; } - - operator std::vector<double>&() { return mv; } - - correction operator=(const std::vector<double>& v) { mv=v; return *this; } - -private: - - std::string mlabel; - std::vector<double> mv; - -}; - - -// inline std::ostream& operator<<( std::ostream& s, const correction& /* _c */ ) { -// return s; -// } - - - -#endif // CORRECTION_H - - - - - - - - - - diff --git a/reactions/APPLgrid/src/ReactionAPPLgrid.cc b/reactions/APPLgrid/src/ReactionAPPLgrid.cc index 37386070f01da427578627f205ca16ff6420c59f..132a1dad4f9deb854565d207b32a756a09eb86d1 100644 --- a/reactions/APPLgrid/src/ReactionAPPLgrid.cc +++ b/reactions/APPLgrid/src/ReactionAPPLgrid.cc @@ -21,7 +21,7 @@ void xfxWrapper1(const double&x,const double&Q,double*results){active_xfxQ_funct ReactionAPPLgrid::ReactionAPPLgrid(){} ReactionAPPLgrid::~ReactionAPPLgrid(){} // Initialize at the start of the computation -int ReactionAPPLgrid::initAtStart(const string &s){return 0;} +int ReactionAPPLgrid::atStart(const string &s){return 0;} // Initialize for a given dataset: void ReactionAPPLgrid::setDatasetParameters(int dataSetID, map<string,string> pars, map<string, double> parsDataset) { @@ -75,11 +75,11 @@ void ReactionAPPLgrid::setDatasetParameters(int dataSetID, map<string,string> pa order = localOrder>order ? order : localOrder; } data.order=order; -// Determine MuR and MuF. Use default +// Determine MuR and MuF. Use default data.muR=pars.find("muR") == pars.end() ? GetParam("muR") : stod(pars["muR"]); data.muF=pars.find("muF") == pars.end() ? GetParam("muF") : stod(pars["muF"]); - if(data.muR==0)data.muR=1.0; + if(data.muR==0)data.muR=1.0; if(data.muF==0)data.muF=1.0; // bin width normalisation (by default no rescaling) data.flagNorm=false; @@ -192,7 +192,7 @@ int ReactionAPPLgrid::compute(int dataSetID, valarray<double> &val, map<string, for (std::size_t i=0; i<gridVals.size(); i++) gridVals[i] *= grid->deltaobs(i); - + // insert values from this grid into output array //val.resize(val.size() + grid->Nobs()); std::copy_n(gridVals.begin(), gridVals.size(), &val[pos]); diff --git a/reactions/BaseDISCC/include/ReactionBaseDISCC.h b/reactions/BaseDISCC/include/ReactionBaseDISCC.h index cfa0fae8bbcfa8eb23ec8d600241f5c5bb81bbe0..6948ba0c7877030ad1cfb72b2aa1b0169939f1e3 100644 --- a/reactions/BaseDISCC/include/ReactionBaseDISCC.h +++ b/reactions/BaseDISCC/include/ReactionBaseDISCC.h @@ -6,7 +6,7 @@ /** @class' ReactionBaseDISCC - @brief A wrapper class for BaseDISCC reaction + @brief A wrapper class for BaseDISCC reaction Based on the ReactionTheory class. Reads options produces 3d cross section. @@ -30,10 +30,10 @@ class ReactionBaseDISCC : public ReactionTheory public: virtual string getReactionName() const { return "BaseDISCC" ;}; - int initAtStart(const string &); + int atStart(const string &); virtual void setDatasetParameters( int dataSetID, map<string,string> pars, map<string,double> parsDataset) override ; virtual void initAtIteration() override; - + virtual int compute(int dataSetID, valarray<double> &valExternal, map<string, valarray<double> > &errExternal); protected: enum class dataFlav { incl, c} ; //!< Define final state. @@ -64,14 +64,14 @@ class ReactionBaseDISCC : public ReactionTheory virtual void GetF2d( int dataSetID, valarray<double>& f2d); virtual void GetFLd( int dataSetID, valarray<double>& fld); virtual void GetxF3d( int dataSetID, valarray<double>& xf3d ); - + private: // Some buffering mechanism to avoid double calls map <int,valarray<double> > _f2u; //!< F2 for u-type quarks map <int,valarray<double> > _f2d; //!< F2 for d-type quarks map <int,valarray<double> > _flu; //!< FL for u-type quarks map <int,valarray<double> > _fld; //!< FL for d-type quarks - map <int,valarray<double> > _xf3u; + map <int,valarray<double> > _xf3u; map <int,valarray<double> > _xf3d; protected: diff --git a/reactions/BaseDISCC/src/ReactionBaseDISCC.cc b/reactions/BaseDISCC/src/ReactionBaseDISCC.cc index af70d1f251ea5900bf6fe85525a13b346b63abdd..4b287ca0edc050e91a8e5f87b3281dda81e9ac44 100644 --- a/reactions/BaseDISCC/src/ReactionBaseDISCC.cc +++ b/reactions/BaseDISCC/src/ReactionBaseDISCC.cc @@ -1,4 +1,4 @@ - + /* @file ReactionBaseDISCC.cc @date 2017-10-05 @@ -12,7 +12,7 @@ #include <IntegrateDIS.h> -// Helpers for QCDNUM (CC): +// Helpers for QCDNUM (CC): //! full const double CCEP2F[] = {0.,0.,1.,0.,1.,0., 0. ,1.,0.,1.,0.,0.,0.} ; @@ -59,7 +59,7 @@ extern "C" ReactionBaseDISCC* create() { // Initialize at the start of the computation -int ReactionBaseDISCC::initAtStart(const string &s) +int ReactionBaseDISCC::atStart(const string &s) { // This we do not want to fit: _Gf = GetParam("gf"); @@ -96,7 +96,7 @@ int ReactionBaseDISCC::compute(int dataSetID, valarray<double> &valExternal, map double polarity = GetPolarisation(dataSetID); - + if ( GetCharge(dataSetID) > 0) { val = 0.5*(yplus*f2 - yminus*xf3 - y*y*fl); val *= (1+polarity); @@ -133,17 +133,17 @@ int ReactionBaseDISCC::compute(int dataSetID, valarray<double> &valExternal, map // no idea how error could be treated: for now do nothing errExternal = err; } - + return 0; } void ReactionBaseDISCC::initAtIteration() { // Make sure to call the parent class initialization: - super::initAtIteration(); + super::initAtIteration(); // Get some basic parameters: _MW = GetParam("Mw"); - + // Re-set internal maps (faster access): for ( auto ds : _dsIDs) { (_f2u[ds])[0] = -100.; @@ -155,8 +155,8 @@ void ReactionBaseDISCC::initAtIteration() { } } -// -void ReactionBaseDISCC::setDatasetParameters( int dataSetID, map<string,string> pars, map<string,double> parsDataset) +// +void ReactionBaseDISCC::setDatasetParameters( int dataSetID, map<string,string> pars, map<string,double> parsDataset) { _polarisation[dataSetID] = (parsDataset.find("epolarity") != parsDataset.end()) ? parsDataset["epolarity"] : 0; _charge[dataSetID] = (parsDataset.find("echarge") != parsDataset.end()) ? parsDataset["echarge"] : 0; @@ -363,7 +363,7 @@ void ReactionBaseDISCC::GetF2u(int dataSetID, valarray<double>& f2u) // Get x,Q2 arrays: auto *q2p = GetBinValues(dataSetID,"Q2"), *xp = GetBinValues(dataSetID,"x"); auto q2 = *q2p, x = *xp; - + // Call QCDNUM const int id = 2; const int flag = 0; int Npnt = GetNpoint(dataSetID); switch ( GetDataFlav(dataSetID) ) @@ -388,7 +388,7 @@ void ReactionBaseDISCC::GetFLu(int dataSetID, valarray<double>& flu) // Get x,Q2 arrays: auto *q2p = GetBinValues(dataSetID,"Q2"), *xp = GetBinValues(dataSetID,"x"); auto q2 = *q2p, x = *xp; - + // Call QCDNUM const int id = 1; const int flag = 0; int Npnt = GetNpoint(dataSetID); switch ( GetDataFlav(dataSetID) ) @@ -412,7 +412,7 @@ void ReactionBaseDISCC::GetxF3u( int dataSetID, valarray<double>& xf3u ) // Get x,Q2 arrays: auto *q2p = GetBinValues(dataSetID,"Q2"), *xp = GetBinValues(dataSetID,"x"); auto q2 = *q2p, x = *xp; - + // Call QCDNUM const int id = 3; const int flag = 0; int Npnt = GetNpoint(dataSetID); switch ( GetDataFlav(dataSetID) ) @@ -440,7 +440,7 @@ void ReactionBaseDISCC::GetF2d(int dataSetID, valarray<double>& f2d) // Get x,Q2 arrays: auto *q2p = GetBinValues(dataSetID,"Q2"), *xp = GetBinValues(dataSetID,"x"); auto q2 = *q2p, x = *xp; - + // Call QCDNUM const int id = 2; const int flag = 0; int Npnt = GetNpoint(dataSetID); switch ( GetDataFlav(dataSetID) ) @@ -463,7 +463,7 @@ void ReactionBaseDISCC::GetFLd(int dataSetID, valarray<double>& fld) // Get x,Q2 arrays: auto *q2p = GetBinValues(dataSetID,"Q2"), *xp = GetBinValues(dataSetID,"x"); auto q2 = *q2p, x = *xp; - + // Call QCDNUM const int id = 1; const int flag = 0; int Npnt = GetNpoint(dataSetID); switch ( GetDataFlav(dataSetID) ) @@ -487,7 +487,7 @@ void ReactionBaseDISCC::GetxF3d( int dataSetID, valarray<double>& xf3d ) // Get x,Q2 arrays: auto *q2p = GetBinValues(dataSetID,"Q2"), *xp = GetBinValues(dataSetID,"x"); auto q2 = *q2p, x = *xp; - + // Call QCDNUM const int id = 3; const int flag = 0; int Npnt = GetNpoint(dataSetID); switch ( GetDataFlav(dataSetID) ) diff --git a/reactions/BaseDISNC/include/ReactionBaseDISNC.h b/reactions/BaseDISNC/include/ReactionBaseDISNC.h index d6d455d73fabe3aca81ef64e58a557015ce852c6..d05b1461701e8f051be6e7e833f1721a268784ff 100644 --- a/reactions/BaseDISNC/include/ReactionBaseDISNC.h +++ b/reactions/BaseDISNC/include/ReactionBaseDISNC.h @@ -5,7 +5,7 @@ /** @class' ReactionBaseDISNC - @brief A wrapper class for BaseDISNC reaction + @brief A wrapper class for BaseDISNC reaction Based on the ReactionTheory class. @@ -24,20 +24,20 @@ class ReactionBaseDISNC : public ReactionTheory ReactionBaseDISNC(){}; public: virtual string getReactionName() const { return "BaseDISNC" ;}; - int initAtStart(const string &); + int atStart(const string &); virtual void setDatasetParameters( int dataSetID, map<string,string> pars, map<string,double> parsDataset) override ; //!< Initialize all EWK couplings here: - virtual void initAtIteration() override; + virtual void initAtIteration() override; virtual int compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err) override ; protected: enum class dataType { signonred, sigred, f2, fl} ; //!< Define compute output. enum class dataFlav { incl, c, b} ; //!< Define final state. - /* - A few methods specific for DIS NC process. + /* + A few methods specific for DIS NC process. */ - + virtual void F2gamma BASE_PARS ; virtual void F2gammaZ BASE_PARS ; virtual void F2Z BASE_PARS ; @@ -54,7 +54,7 @@ class ReactionBaseDISNC : public ReactionTheory virtual void xF3gammaZ BASE_PARS ; virtual void xF3Z BASE_PARS ; - + //!< compute full xF3 virtual void xF3 BASE_PARS; @@ -102,8 +102,8 @@ class ReactionBaseDISNC : public ReactionTheory map <int,valarray<double> > _f2d; //!< F2 for d-type quarks map <int,valarray<double> > _flu; //!< FL for u-type quarks map <int,valarray<double> > _fld; //!< FL for d-type quarks - map <int,valarray<double> > _xf3u; - map <int,valarray<double> > _xf3d; + map <int,valarray<double> > _xf3u; + map <int,valarray<double> > _xf3d; protected: // for integrated cross sections diff --git a/reactions/BaseDISNC/src/ReactionBaseDISNC.cc b/reactions/BaseDISNC/src/ReactionBaseDISNC.cc index 961b25f5455984ba51449aaf2901b6817224d685..eff9897a6152c0879c23283701b76d56d535cf85 100644 --- a/reactions/BaseDISNC/src/ReactionBaseDISNC.cc +++ b/reactions/BaseDISNC/src/ReactionBaseDISNC.cc @@ -1,4 +1,4 @@ - + /* @file ReactionBaseDISNC.cc @date 2017-04-08 @@ -47,7 +47,7 @@ extern "C" ReactionBaseDISNC* create() { // Initialize at the start of the computation -int ReactionBaseDISNC::initAtStart(const string &s) +int ReactionBaseDISNC::atStart(const string &s) { /// int nwords; @@ -110,20 +110,20 @@ int ReactionBaseDISNC::compute(int dataSetID, valarray<double> &valExternal, map void ReactionBaseDISNC::initAtIteration() { // Make sure to call the parent class initialization: - super::initAtIteration(); + super::initAtIteration(); _alphaem = GetParam("alphaem"); _Mz = GetParam("Mz"); _Mw = GetParam("Mw"); _sin2thetaW = GetParam("sin2thW"); - + _ve = -0.5 + 2.*_sin2thetaW; // ! _ae = -0.5; // ! _au = 0.5; _ad = -0.5; _vu = _au - (4./3.)*_sin2thetaW; _vd = _ad + (2./3.)*_sin2thetaW; - + // print (_Mz); // Re-set internal maps (faster access): @@ -134,8 +134,8 @@ void ReactionBaseDISNC::initAtIteration() { } } -// -void ReactionBaseDISNC::setDatasetParameters( int dataSetID, map<string,string> pars, map<string,double> parsDataset) +// +void ReactionBaseDISNC::setDatasetParameters( int dataSetID, map<string,string> pars, map<string,double> parsDataset) { _polarisation[dataSetID] = (parsDataset.find("epolarity") != parsDataset.end()) ? parsDataset["epolarity"] : 0; _charge[dataSetID] = (parsDataset.find("echarge") != parsDataset.end()) ? parsDataset["echarge"] : 0; @@ -337,21 +337,21 @@ void ReactionBaseDISNC::F2 BASE_PARS { valarray<double> f2g(_npoints[dataSetID]); F2gamma(dataSetID, f2g, err); - + valarray<double> f2gZ(_npoints[dataSetID]); F2gammaZ(dataSetID, f2gZ, err); - + valarray<double> f2Z(_npoints[dataSetID]); - F2Z(dataSetID, f2Z, err); + F2Z(dataSetID, f2Z, err); valarray<double> k(_npoints[dataSetID]); kappa(dataSetID, k); // combine together: - + double pol = GetPolarisation(dataSetID); double charge = GetCharge(dataSetID); - - val = f2g - (_ve + charge*pol*_ae)*k*f2gZ + (_ae*_ae + _ve*_ve + 2*charge*pol*_ae*_ve)*k * k * f2Z; + + val = f2g - (_ve + charge*pol*_ae)*k*f2gZ + (_ae*_ae + _ve*_ve + 2*charge*pol*_ae*_ve)*k * k * f2Z; } void ReactionBaseDISNC::FLgamma BASE_PARS @@ -380,44 +380,44 @@ void ReactionBaseDISNC::FL BASE_PARS { valarray<double> flg(_npoints[dataSetID]); FLgamma(dataSetID, flg, err); - + valarray<double> flgZ(_npoints[dataSetID]); FLgammaZ(dataSetID, flgZ, err); - + valarray<double> flZ(_npoints[dataSetID]); - FLZ(dataSetID, flZ, err); + FLZ(dataSetID, flZ, err); valarray<double> k(_npoints[dataSetID]); kappa(dataSetID, k); // combine together: - + double pol = GetPolarisation(dataSetID); double charge = GetCharge(dataSetID); - - val = flg - (_ve + charge*pol*_ae)*k*flgZ + (_ae*_ae + _ve*_ve + 2*charge*pol*_ae*_ve)*k * k * flZ; + + val = flg - (_ve + charge*pol*_ae)*k*flgZ + (_ae*_ae + _ve*_ve + 2*charge*pol*_ae*_ve)*k * k * flZ; } -void ReactionBaseDISNC::xF3gammaZ BASE_PARS +void ReactionBaseDISNC::xF3gammaZ BASE_PARS { valarray<double> xf3u, xf3d; GetxF3ud(dataSetID, xf3u, xf3d); val = 2.*(2./3. * _au * xf3u - 1./3. * _ad * xf3d) ; } -void ReactionBaseDISNC::xF3Z BASE_PARS +void ReactionBaseDISNC::xF3Z BASE_PARS { valarray<double> xf3u, xf3d; GetxF3ud(dataSetID, xf3u, xf3d); val = 2.*(_vu * _au * xf3u + _vd * _ad * xf3d) ; } -void ReactionBaseDISNC::xF3 BASE_PARS +void ReactionBaseDISNC::xF3 BASE_PARS { valarray<double> xf3gZ(_npoints[dataSetID]); xF3gammaZ(dataSetID, xf3gZ, err); - + valarray<double> xf3Z(_npoints[dataSetID]); - xF3Z(dataSetID, xf3Z, err); + xF3Z(dataSetID, xf3Z, err); valarray<double> k(_npoints[dataSetID]); kappa(dataSetID, k); @@ -447,7 +447,7 @@ void ReactionBaseDISNC::sred BASE_PARS valarray<double> yplus = 1.0+(1.0-y)*(1.0-y); valarray<double> yminus = 1.0-(1.0-y)*(1.0-y); - val = f2 - y*y/yplus*fl + (yminus/yplus)*xf3 ; + val = f2 - y*y/yplus*fl + (yminus/yplus)*xf3 ; } @@ -458,14 +458,14 @@ void ReactionBaseDISNC::GetF2ud(int dataSetID, valarray<double>& f2u, valarray<d // Get x,Q2 arrays: auto *q2p = GetBinValues(dataSetID,"Q2"), *xp = GetBinValues(dataSetID,"x"); auto q2 = *q2p, x = *xp; - + // Call QCDNUM const int id = 2; const int flag = 0; int Npnt = GetNpoint(dataSetID); switch ( GetDataFlav(dataSetID) ) { case dataFlav::incl : zmstfun_(id,CNEP2F[0], x[0], q2[0], (_f2u[dataSetID])[0], Npnt, flag); - zmstfun_(id,CNEM2F[0], x[0], q2[0], (_f2d[dataSetID])[0], Npnt, flag); + zmstfun_(id,CNEM2F[0], x[0], q2[0], (_f2d[dataSetID])[0], Npnt, flag); break ; case dataFlav::c : zmstfun_(id,CNEP2Fc[0], x[0], q2[0], (_f2u[dataSetID])[0], Npnt, flag); @@ -486,21 +486,21 @@ void ReactionBaseDISNC::GetFLud(int dataSetID, valarray<double>& flu, valarray<d // Get x,Q2 arrays: auto *q2p = GetBinValues(dataSetID,"Q2"), *xp = GetBinValues(dataSetID,"x"); auto q2 = *q2p, x = *xp; - + // Call QCDNUM const int id = 1; const int flag = 0; int Npnt = GetNpoint(dataSetID); switch ( GetDataFlav(dataSetID) ) { case dataFlav::incl : zmstfun_(id,CNEP2F[0], x[0], q2[0], (_flu[dataSetID])[0], Npnt, flag); - zmstfun_(id,CNEM2F[0], x[0], q2[0], (_fld[dataSetID])[0], Npnt, flag); + zmstfun_(id,CNEM2F[0], x[0], q2[0], (_fld[dataSetID])[0], Npnt, flag); break ; case dataFlav::c : zmstfun_(id,CNEP2Fc[0], x[0], q2[0], (_flu[dataSetID])[0], Npnt, flag); - break ; + break ; case dataFlav::b : - zmstfun_(id,CNEM2Fb[0], x[0], q2[0], (_fld[dataSetID])[0], Npnt, flag); - break ; + zmstfun_(id,CNEM2Fb[0], x[0], q2[0], (_fld[dataSetID])[0], Npnt, flag); + break ; } } flu = _flu[dataSetID]; @@ -515,14 +515,14 @@ void ReactionBaseDISNC::GetxF3ud( int dataSetID, valarray<double>& xf3u, valarra // Get x,Q2 arrays: auto *q2p = GetBinValues(dataSetID,"Q2"), *xp = GetBinValues(dataSetID,"x"); auto q2 = *q2p, x = *xp; - + // Call QCDNUM const int id = 3; const int flag = 0; int Npnt = GetNpoint(dataSetID); // OZ 19.10.2017 TODO: F3 is 0 in VFNS for heavy quarks? //if ( GetDataType(dataSetID) == dataType::sigred ) { if ( GetDataFlav(dataSetID) == dataFlav::incl ) { zmstfun_(id,CNEP3F[0], x[0], q2[0], (_xf3u[dataSetID])[0], Npnt, flag); - zmstfun_(id,CNEM3F[0], x[0], q2[0], (_xf3d[dataSetID])[0], Npnt, flag); + zmstfun_(id,CNEM3F[0], x[0], q2[0], (_xf3d[dataSetID])[0], Npnt, flag); } else { @@ -542,5 +542,5 @@ void ReactionBaseDISNC::kappa(int dataSetID, valarray<double>& k) { auto *q2p = GetBinValues(dataSetID,"Q2"); double cos2thetaW = 1-_sin2thetaW; - k= 1./(4*_sin2thetaW*cos2thetaW) * (*q2p)/( (*q2p)+_Mz*_Mz); + k= 1./(4*_sin2thetaW*cos2thetaW) * (*q2p)/( (*q2p)+_Mz*_Mz); } diff --git a/reactions/BaseHVQMNR/include/MNR.h b/reactions/BaseHVQMNR/include/MNR.h index d2690c4afa16c0d9c8c8547cfd268b6b6c5f93c0..462cc804185d7ff177fbff0e3b3ed6b422385f35 100644 --- a/reactions/BaseHVQMNR/include/MNR.h +++ b/reactions/BaseHVQMNR/include/MNR.h @@ -19,10 +19,10 @@ namespace MNR public: // Constructor MNR(ReactionBaseHVQMNR* ptrReactionTheory); - + // Destructor ~MNR(); - + // Set perturbative scale coefficients // // Scales are parametrised as: @@ -31,20 +31,20 @@ namespace MNR // where mu_f, mu_r are factorisation and renormalisation, respectively, // pT is transverse momentum and xm is the heavy-quark mass. void SetScaleCoef(double mf_a, double mf_b, double mf_c, double mr_a, double mr_b, double mr_c); - + // Set debug flag void SetDebug(int debug) { bDebug = debug; }; // Calculate constants void CalcConstants(); - + // Calculate binning void CalcBinning(); // Calculate cross sections for provided grid and heavy-quark mass xm void CalcXS(Grid* grid, double xm); - + // Private members private: // Get factorisation scale @@ -68,7 +68,7 @@ namespace MNR // Public fields public: // Centre-of-mass energy squared - double fC_sh; + double fC_sh; // Number of light flavours int fC_nl; @@ -85,7 +85,7 @@ namespace MNR // Contrbution flags bool bFS_Q; // particle final state bool bFS_A; // antiparticle final state - + // Private fields private: // Constants @@ -96,7 +96,7 @@ namespace MNR const static double fC_vtf; double fC_b0; // Centre-of-mass energy - double fC_sqrt_sh; + double fC_sqrt_sh; // Normalisation factor double fC_xnorm; // Perturbative scale coefficients @@ -127,7 +127,7 @@ namespace MNR // t3 bins (3 body variable) double* fBc_x4; double* fBw_x4; - + // Precalcuated grid variables int fNRecalc; // Pointer to all allocated memory @@ -186,7 +186,7 @@ namespace MNR // Flags bool bFirst; // first run bool bDebug; // verbose output - + // pointer to instance inherited from ReactionTheory (allow access to alphas and PDF routines) ReactionBaseHVQMNR* _reactionTheory; }; diff --git a/reactions/BaseHVQMNR/include/MNRFrag.h b/reactions/BaseHVQMNR/include/MNRFrag.h index 30bf20549727c7df7cd7b91909d85b69cdc29588..2f120ae167b211b58c801e8706bc18f238a664d7 100644 --- a/reactions/BaseHVQMNR/include/MNRFrag.h +++ b/reactions/BaseHVQMNR/include/MNRFrag.h @@ -14,7 +14,7 @@ namespace MNR // // Class for non-perturbative heavy quark to hadron fragmentation. // Kartvelishvili, Peterson and BCFY fragmentation functions are provided. - // Specific ground state mesons can be treated separately + // Specific ground state mesons can be treated separately // (e.g. for contribution to D^0 from D*+ decays). class Frag { // Public methods @@ -24,20 +24,20 @@ namespace MNR // Destructor ~Frag(); - + // Set number of z (rescaling variable) bins void SetNz(int nz); - + // Add final state with fragmentation function ffrag and mass M. // If M < 0, heavy-quark mass is used instead. void AddOut(TF1* ffrag, double M); - + // Get fragmentation function for specified final state TF1* GetFF(int f) { return fFFrag[f]; }; - + // Calculate cross sections for provided grid and heavy-quark mass xm and fill histograms hcs void CalcCS(Grid* grid, double xm, std::vector<TH2D*> hcs); - + // Set debug flag void SetDebug(bool debug) { bDebug=debug; }; @@ -63,7 +63,7 @@ namespace MNR static double kar_dch(double* x, double* p); // Peterson function [Phys.Rev. D27 (1983) 105] static double pet(double* x, double* p); - // Kartvelishvili with two parameters ("Misha-style" parametrisation, + // Kartvelishvili with two parameters ("Misha-style" parametrisation, // see DESY-THESIS-2011-033, Section 2.2 for description) static double karw(double* x, double* p); // Kartvelishvili with two parameters (rescaled) @@ -90,11 +90,11 @@ namespace MNR // ff = 20: Kartvelishvili with three W bins // If mean pointer is provided, for 1D function it is set to mean value. // - // WARNING: it is known feature that the fit might not converge if fragmentation depends - // on energy in parton-parton rest frame, especially if the heavy-quark mass + // WARNING: it is known feature that the fit might not converge if fragmentation depends + // on energy in parton-parton rest frame, especially if the heavy-quark mass // is released, use ff = 10 and ff = 20 with great caution! static TF1* GetFragFunction(int ff, const char* meson, double par, double* mean = 0); - + // Private methods private: // Precalculate variables for provided grid and heavy-quark mass xm @@ -102,10 +102,10 @@ namespace MNR // Clear z binning void ClearZ(); - + // Clear precalculated variables void ClearPrecalc(); - + // Public fields public: // Charm and beauty hadron masses @@ -117,20 +117,20 @@ namespace MNR static const double fM_bzero; static const double fM_bch; static const double fM_bs; - + // Private fields private: // Final states int fNout; // number of final states std::vector<TF1*> fFFrag; // fragmentation functions std::vector<double> fMh2; // hadron masses squared - + // Precalculated variables int fBnz; double* fZc; double* fZw; - std::vector<double*> fWz; - + std::vector<double*> fWz; + // Flags bool bFirst; // first run bool bDebug; // verbose output diff --git a/reactions/BaseHVQMNR/include/MNRGrid.h b/reactions/BaseHVQMNR/include/MNRGrid.h index 45f498d7e59c61f2924a23020ecd66b7d9cd6d24..2f17859d5f34e1ea79573a7fac3c2dfec43fec05 100644 --- a/reactions/BaseHVQMNR/include/MNRGrid.h +++ b/reactions/BaseHVQMNR/include/MNRGrid.h @@ -42,31 +42,31 @@ namespace MNR public: // Default constructor (one full contrbution: LO+NLO gg+q+qg) Grid(); - + // Construct with specified array of contributions Grid(int ncontr, MNRContribution** contr); - + // Destructor ~Grid(); - + // Set pT binning (n bins from minpt to maxpt, xm is heavy-quark mass) // (internally binning in L = xm^2 / (xm^2 + pT^2)) void SetL(int n, double minpt, double maxpt, double xm); - + // Set y binning (n bins from min to max) void SetY(int n, double min, double max); - + // Set W (squared energy in parton-parton rest frame), default binning - // WARNING: it is known feature that the fit might not converge if n != 1 (i.e. if fragmentation depends - // on energy in parton-parton rest frame), especially if the heavy-quark mass is released, + // WARNING: it is known feature that the fit might not converge if n != 1 (i.e. if fragmentation depends + // on energy in parton-parton rest frame), especially if the heavy-quark mass is released, // use this with great caution! void SetW(int n = 1, double min = 0.0, double max = 500.0); // Set W (squared energy in parton-parton rest frame) three bins, separated by b1 and b2 values // (corresponds to the fragmentation set-up used for charm in arXiv:1211.1182) - // WARNING: it is known feature that the fit might not converge if this option is used, + // WARNING: it is known feature that the fit might not converge if this option is used, // especially if the heavy-quark mass is released, use this with great caution! void SetW(double b1, double b2); - + // Get cross section (by reference) in specified bin inline double& CS(int contr, int bl, int by, int bw=0) { return fCS[contr][bl][by][bw]; }; @@ -78,7 +78,7 @@ namespace MNR // Get number of W bins inline int NW() { return fNW; }; - + // Get L binning inline double* LPtr() { return fL; }; @@ -87,13 +87,13 @@ namespace MNR // Get W binning inline double* WPtr() { return fW; }; - + // Fill array with pT values for the specified heavy-quark mass xm void FillPt(double* ptall, double xm); // Get number of contributions inline int GetNContr() { return fNContr; }; - + // Get specified contribution inline MNRContribution* GetContr(int c) { return fContr[c]; }; @@ -102,13 +102,13 @@ namespace MNR // Set cross sections in specified pT bin to non-physical values void NonPhys(int bpt); - + // Set all cross sections to zero void Zero(); // Print grid (to console) for the specified heavy-quark mass void Print(double xm); - + // Find W bin that matches the specified value w int FindWBin(double w); diff --git a/reactions/BaseHVQMNR/include/ReactionBaseHVQMNR.h b/reactions/BaseHVQMNR/include/ReactionBaseHVQMNR.h index b0550a4dce842fc8f9cb9e58bfbde73c9d2efbea..b75626275bc1bc229944ce4d063ba860da7d6ad9 100644 --- a/reactions/BaseHVQMNR/include/ReactionBaseHVQMNR.h +++ b/reactions/BaseHVQMNR/include/ReactionBaseHVQMNR.h @@ -10,11 +10,11 @@ /** @class' ReactionBaseHVQMNR - @brief A wrapper class for BaseHVQMNR reaction + @brief A wrapper class for BaseHVQMNR reaction Based on the ReactionTheory class. Reads options produces 3d cross section. - This is abstract class from which implementations of HVQMNR + This is abstract class from which implementations of HVQMNR calculations for particular datasets should be derived. @version 0.1 @@ -32,14 +32,14 @@ class ReactionBaseHVQMNR : public ReactionTheory public: virtual string getReactionName() const { return "BaseHVQMNR" ;}; - virtual int initAtStart(const string &) = 0; + virtual int atStart(const string &) = 0; virtual int compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err) = 0; virtual void initAtIteration() = 0; virtual void setDatasetParameters(int dataSetID, map<string,string> pars, map<string,double> dsPars) override; protected: virtual int parseOptions(){ return 0;}; - -// ********** common stuff for MNR calculation ********** + +// ********** common stuff for MNR calculation ********** protected: // structure for particular dataset struct DataSet @@ -55,7 +55,7 @@ class ReactionBaseHVQMNR : public ReactionTheory std::valarray<double>* BinsYMinRef; std::valarray<double>* BinsYMaxRef; }; - + // structure to store theory parameters struct Parameters { @@ -85,7 +85,7 @@ class ReactionBaseHVQMNR : public ReactionTheory int nx4; int nbz; }; - + // all datasets std::map<int, DataSet> _dataSets; // theory parameters @@ -108,20 +108,20 @@ class ReactionBaseHVQMNR : public ReactionTheory // read and update theory parameters void UpdateParameters(); - + // print theory parameters void PrintParameters() const; // initialise calculation with default parameters void DefaultInit(const Steering& steer, const double mq, MNR::MNR& mnr, MNR::Frag& frag, MNR::Grid& grid, MNR::Grid& grid_smoothed); - + // return cross section in provided pT-y bin double FindXSecPtYBin(const TH2* histXSec, const double ymin, const double ymax, const double ptmin, const double ptmax, const bool diff_pt, const bool diff_y); - private: + private: // check equality of float numbers with tolerance bool IsEqual(const double val1, const double val2, const double eps = 1e-6); - + // TODO this old commented out code to be removed one day /*// read values from terminfo in format key1=value1:key2=value2:... int readFromTermInfo(const std::string& str, const std::string& key, int& value); @@ -133,6 +133,6 @@ class ReactionBaseHVQMNR : public ReactionTheory void GetMuPar(const char mu, const char q, double& A, double& B, double& C); // read fragmentation parameter from MINUIT extra parameters - double GetFragPar(const char q); + double GetFragPar(const char q); }; diff --git a/reactions/BaseHVQMNR/src/MNR.cc b/reactions/BaseHVQMNR/src/MNR.cc index 6f8fc73f8938e0db846ebe81d15837225e391a9b..c2884d2091dcb905449a60392731667ba70daab8 100644 --- a/reactions/BaseHVQMNR/src/MNR.cc +++ b/reactions/BaseHVQMNR/src/MNR.cc @@ -6,6 +6,7 @@ #include <TF1.h> // Interface to xFitter FORTRAN routines +/* extern "C" { void hf_stop_(); @@ -13,6 +14,7 @@ extern "C" //void hf_get_pdfs_(double *x, double *q2, double* pdf); //double hf_get_alphas_(double* q2); } +*/ namespace MNR { @@ -60,7 +62,7 @@ namespace MNR fNRecalc = 0; } - MNR::~MNR() + MNR::~MNR() { //printf("OZ MNR::~MNR()\n"); if(fSF_pdf) delete fSF_pdf; @@ -71,14 +73,14 @@ namespace MNR if(fBw_x4) delete fBw_x4; } - void MNR::CalcConstants() + void MNR::CalcConstants() { fC_sqrt_sh = TMath::Sqrt(fC_sh); fC_xnorm = fC_hc2 * fC_pi / fC_sh; fC_b0 = (11 * fC_vca - 4 * fC_vtf * fC_nl) / (12 * fC_pi); } - void MNR::CalcBinning() + void MNR::CalcBinning() { // PDFs in x for certain factorisation scale fSF_pdf = new double[fSF_nb * fSF_npart]; @@ -95,7 +97,7 @@ namespace MNR double minx3 = -6.0; double maxx3 = 6.0; double stepx3 = (maxx3 - minx3) / fBn_x3; - for(int b = 0; b < fBn_x3 + 1; b++) + for(int b = 0; b < fBn_x3 + 1; b++) { bb_x3[b] = minx3 + stepx3 * b; if(b == 0) continue; @@ -110,7 +112,7 @@ namespace MNR double bb_x4[fBn_x4+1]; fBc_x4 = new double[fBn_x4]; fBw_x4 = new double[fBn_x4]; - for(int b = 0; b < fBn_x4 + 1; b++) + for(int b = 0; b < fBn_x4 + 1; b++) { bb_x4[b] = 1. * b / fBn_x4; if(bb_x4[b] <= fbx4_12) bb_x4[b] = fbx4_1.Eval(bb_x4[b]); @@ -121,9 +123,9 @@ namespace MNR } } - void MNR::SetScaleCoef(double mf_a, double mf_b, double mf_c, double mr_a, double mr_b, double mr_c) + void MNR::SetScaleCoef(double mf_a, double mf_b, double mf_c, double mr_a, double mr_b, double mr_c) { - // Check for possible nan + // Check for possible nan // (it happens if corresponding parameters were not provided in steering.txt via ExtraMinimisationParameters) if(mf_a != mf_a || mf_b != mf_b || mf_c != mf_c || mr_a != mr_a || mr_b != mr_b || mr_c != mr_c) { @@ -138,38 +140,38 @@ namespace MNR fMr_C = mr_c; } - double MNR::GetMf2(double xm2, double pt2) + double MNR::GetMf2(double xm2, double pt2) { return fMf_A * pt2 + fMf_B * xm2 + fMf_C; } - double MNR::GetMr2(double xm2, double pt2) + double MNR::GetMr2(double xm2, double pt2) { return fMr_A * pt2 + fMr_B * xm2 + fMr_C; } - void MNR::PrecalculatePDF(double mf2) + void MNR::PrecalculatePDF(double mf2) { - if(mf2 < fSF_min_mf2 || mf2 > fSF_max_mf2) + if(mf2 < fSF_min_mf2 || mf2 > fSF_max_mf2) { printf("WARNING in MNR::PrecalculatePDF(): mf2 %e out of range %e .. %e\n", mf2, fSF_min_mf2, fSF_max_mf2); printf("PDFs are set to 0\n"); - for(int b = 0; b < fSF_nb; b++) + for(int b = 0; b < fSF_nb; b++) for(int nf = -fC_nl; nf <= fC_nl; nf++) fSF_pdf[b*fSF_npart+nf] = 0.0; return; } - for(int b = 0; b < fSF_nb; b++) + for(int b = 0; b < fSF_nb; b++) { double log10x = fSF_log10_min_x + b * fSF_step_log10_x; this->GetPDF(mf2, TMath::Power(10., log10x), fSF_pdf + fSF_npart * b); } } - int MNR::GetSF(double& pdf_gg, double& pdf_qq, double& pdf_qq_a, double& pdf_qg, double& pdf_qg_a, - double& pdf_qg_r, double& pdf_qg_a_r, double adoptx1, double adoptx2, double mf2/* = -1.0*/) + int MNR::GetSF(double& pdf_gg, double& pdf_qq, double& pdf_qq_a, double& pdf_qg, double& pdf_qg_a, + double& pdf_qg_r, double& pdf_qg_a_r, double adoptx1, double adoptx2, double mf2/* = -1.0*/) { pdf_gg = pdf_qq = pdf_qq_a = pdf_qg = pdf_qg_a = pdf_qg_r = pdf_qg_a_r = 0.0; - if(mf2 > 0.0 && (mf2 < fSF_min_mf2 || mf2 > fSF_max_mf2)) + if(mf2 > 0.0 && (mf2 < fSF_min_mf2 || mf2 > fSF_max_mf2)) { printf("WARNING in MNR::GetSF(): mf2 %e out of range %e .. %e\n", mf2, fSF_min_mf2, fSF_max_mf2); return 1; @@ -178,7 +180,7 @@ namespace MNR if(adoptx1<fSF_min_adoptx) adoptx1 = fSF_min_adoptx; if(adoptx2<fSF_min_adoptx) adoptx2 = fSF_min_adoptx; double pdf1[fSF_npart], pdf2[fSF_npart]; - if(mf2 < 0.0) + if(mf2 < 0.0) { double part1, part2; double delta1 = modf(adoptx1, &part1); @@ -189,14 +191,14 @@ namespace MNR int offset2 = int(part2) * fSF_npart + 6; int one_p_offset1 = offset1 + fSF_npart; int one_p_offset2 = offset2 + fSF_npart; - for(int nf = -fC_nl; nf <= fC_nl; nf++) + for(int nf = -fC_nl; nf <= fC_nl; nf++) { int six_p_nf = 6 + nf; pdf1[six_p_nf] = fSF_pdf[offset1+nf] * one_m_delta1 + fSF_pdf[one_p_offset1+nf] * delta1; pdf2[six_p_nf] = fSF_pdf[offset2+nf] * one_m_delta2 + fSF_pdf[one_p_offset2+nf] * delta2; } } - else + else { double x1 = TMath::Power(10.0, fSF_step_log10_x * adoptx1 + fSF_log10_min_x); double x2 = TMath::Power(10.0, fSF_step_log10_x * adoptx2 + fSF_log10_min_x); @@ -206,7 +208,7 @@ namespace MNR // Calculate gg pdf_gg = pdf1[6] * pdf2[6]; // ... and the rest - for(int nf = 1; nf <= fC_nl; nf++) + for(int nf = 1; nf <= fC_nl; nf++) { int six_p_nf = 6 + nf; int six_m_nf = 6 - nf; @@ -238,7 +240,7 @@ namespace MNR pdf[i] = pdfV[i]; } - double MNR::GetAs(double mr2) + double MNR::GetAs(double mr2) { //return hf_get_alphas_(&mr2); //return _reactionTheory->alpha_S(&mr2); @@ -246,7 +248,7 @@ namespace MNR return _reactionTheory->alphaS(q); } - void MNR::Precalc(Grid* grid) + void MNR::Precalc(Grid* grid) { fNRecalc++; printf("MNR::Precalc(): recalculation NO %d\n", fNRecalc); @@ -259,7 +261,7 @@ namespace MNR double mb = sizeof(double) * ndouble / (1024. * 1024.); printf("MNR::Precalc(): required %.0f MB\n", mb); - // Allocate memory in one place, because + // Allocate memory in one place, because // (1) one call to new is faster than multiple calls // (2) it is faster to access memory allocated in one place fC_mem = new double[ndouble]; @@ -356,7 +358,7 @@ namespace MNR mem_offset += fBn_x3; // Calculate x3 (binning in parton CMS rapidity) variables - for(int c_x3 = 0; c_x3 < fBn_x3; c_x3++) + for(int c_x3 = 0; c_x3 < fBn_x3; c_x3++) { double yprim = fBc_x3[c_x3]; fCk_t1t[c_x3] = 0.5 * (1 - TMath::TanH(yprim)); @@ -367,7 +369,7 @@ namespace MNR fCk_sum_lntx_o_tx = 0.0; // Calculate x4 (t3, 3 body variable) variables - for(int c_x4 = 0; c_x4 < fBn_x4; c_x4++) + for(int c_x4 = 0; c_x4 < fBn_x4; c_x4++) { double t3 = fBc_x4[c_x4]; double t32 = t3 * t3; @@ -385,12 +387,12 @@ namespace MNR } // Loop over L = xm^2 / (xm^2 + pT^2) bins - for(int c_l = 0; c_l < n_l; c_l++) + for(int c_l = 0; c_l < n_l; c_l++) { if(bDebug) if(c_l % 10 == 0) printf("MNR::Precalc(): 1st dimension: %3d from %3d\n", c_l, n_l); double l2 = p_l[c_l]; // Loop over x3 bins - for(int c_x3 = 0; c_x3 < fBn_x3; c_x3++) + for(int c_x3 = 0; c_x3 < fBn_x3; c_x3++) { double yprim = fBc_x3[c_x3]; double t1t = 0.5 * (1 - TMath::TanH(yprim)); @@ -421,7 +423,7 @@ namespace MNR fCh3c_hqhlqa[n2] = hqhlqa_(&tz, &t1t, &rot); fCh3c_a_ashpqa[n2] = ashpqa_(&tz, &t1t, &rot); // Loop over x4 bins - for(int c_x4 = 0; c_x4 < fBn_x4; c_x4++) + for(int c_x4 = 0; c_x4 < fBn_x4; c_x4++) { double t3 = fBc_x4[c_x4]; double t32 = t3 * t3; @@ -456,7 +458,7 @@ namespace MNR } } - void MNR::CalcXS(Grid* grid, double xm) + void MNR::CalcXS(Grid* grid, double xm) { // First call: precalculate variables if(bFirst) { @@ -464,13 +466,13 @@ namespace MNR bFirst = false; } grid->Zero(); - + // Check heavy-quark mass for nan if(xm != xm) { grid->NonPhys(); return; } - + int ncontr = grid->GetNContr(); double xm2 = xm * xm; int n_l = grid->NL(); @@ -479,7 +481,7 @@ namespace MNR double* p_y = grid->YPtr(); int nbw = grid->NW(); // Loop over pT (internally L) - for(int c_l = 0; c_l < n_l; c_l++) + for(int c_l = 0; c_l < n_l; c_l++) { if(bDebug) if( c_l % 10 == 0) printf("MNR::CalcXS(): 1st dimension: %3d from %3d\n", c_l, n_l); double l2 = p_l[c_l]; @@ -488,7 +490,7 @@ namespace MNR double pt = TMath::Sqrt(pt2); // Factorisation scale double mf2 = this->GetMf2(xm2, pt2); - if(mf2 < fSF_min_mf2 || mf2 > fSF_max_mf2) + if(mf2 < fSF_min_mf2 || mf2 > fSF_max_mf2) { grid->NonPhys(c_l); continue; @@ -497,7 +499,7 @@ namespace MNR this->PrecalculatePDF(mf2); // Renormalisation scale double mr2 = this->GetMr2(xm2, pt2); - if(mr2 <= 0.0) + if(mr2 <= 0.0) { grid->NonPhys(c_l); continue; @@ -510,11 +512,11 @@ namespace MNR double xmf = TMath::Log(mf2 / xm2); double xmr = 4 * fC_pi * fC_b0 * TMath::Log(mr2 / mf2); // Loop over rapidity - for(int c_y = 0; c_y < n_y; c_y++) + for(int c_y = 0; c_y < n_y; c_y++) { double y = p_y[c_y]; // Loop over rapidity in parton CMS - for(int c_x3 = 0; c_x3 < fBn_x3; c_x3++) + for(int c_x3 = 0; c_x3 < fBn_x3; c_x3++) { double yprim = fBc_x3[c_x3]; double chyprim2 = fCk_chyprim2[c_x3]; @@ -541,18 +543,18 @@ namespace MNR double w_lo_qq = N * fCh0_hqh0qa[n2] * pdf_qq_t; // X-section int bw_t = 0; - if(nbw != 1) + if(nbw != 1) { double what_t = taut * fC_sh - 4 * xm2; bw_t = grid->FindWBin(what_t); } // Add all LO contributions - for(int c = 0; c < ncontr; c++) + for(int c = 0; c < ncontr; c++) { double& cs = grid->CS(c, c_l, c_y, bw_t); MNRContribution* contr = grid->GetContr(c); if(contr->fActive == 0) continue; - if(contr->fLO) + if(contr->fLO) { if(contr->fgg) cs += w_lo_gg; if(contr->fqq) cs += w_lo_qq; @@ -572,7 +574,7 @@ namespace MNR if(bFS_A&&!bFS_Q) pdf_qq_a_t *= -1; double w_nlo_qq_c = NN * ((me_qq_h2 - me_qq_h3c) * pdf_qq_t + (me_qq_h2_a - me_qq_h3c_a) * pdf_qq_a_t); // X-section - for(int c = 0; c < ncontr; c++) + for(int c = 0; c < ncontr; c++) { double& cs_c = grid->CS(c, c_l, c_y, bw_t); MNRContribution* contr = grid->GetContr(c); @@ -583,7 +585,7 @@ namespace MNR } } // Loop over t3 (3 body variable) - for(int c_x4 = fBn_x4 - 1; c_x4 >= 0; c_x4--) + for(int c_x4 = fBn_x4 - 1; c_x4 >= 0; c_x4--) { double px1 = px1t - fCk_pxtcor[c_x4]; double px2 = px2t - fCk_pxtcor[c_x4]; @@ -607,7 +609,7 @@ namespace MNR double me_qg_h3_r = kinok ? 0.0 : (fCh3_r_hqhpqg[n3] + xmf * fCh3_r_hqbpqg[n3]) / tx + fCh3_r_hqhlqg[n3] * lntx_o_tx; double me_qg_h3_a = (kinok || (bFS_Q && bFS_A)) ? 0.0 : fCh3_a_ashpqg[n3] / tx; double me_qg_h3_a_r = (kinok || (bFS_Q && bFS_A)) ? 0.0 : fCh3_a_r_ashpqg[n3] / tx; - if(bFS_A && !bFS_Q) + if(bFS_A && !bFS_Q) { pdf_qg_a *= -1; pdf_qg_a_r *= -1; @@ -616,14 +618,14 @@ namespace MNR // X-section int bw = 0; // Determine W bins, if cross section in mupltiple W bins is needed - if(nbw != 1) + if(nbw != 1) { double tau = taut / t32; double what = tau * fC_sh - 4 * xm2; bw = grid->FindWBin(what); } // Add all NLO contributions - for(int c = 0; c < ncontr; c++) + for(int c = 0; c < ncontr; c++) { double& cs = grid->CS(c, c_l, c_y, bw); MNRContribution* contr = grid->GetContr(c); @@ -647,7 +649,7 @@ namespace MNR for(int bw = 0; bw < n_w; bw++) grid->CS(c, bl, by, bw) *= 2; } - + // Constants const double MNR::fC_pi = 3.14159265359e0; const double MNR::fC_2pi = 6.28318530718e0; diff --git a/reactions/BaseHVQMNR/src/MNRFrag.cc b/reactions/BaseHVQMNR/src/MNRFrag.cc index 1ded5423b3a94a7668caf055c86177b7671244bf..b4923917cd1bb030b3bca1150080d281fbb2cc39 100644 --- a/reactions/BaseHVQMNR/src/MNRFrag.cc +++ b/reactions/BaseHVQMNR/src/MNRFrag.cc @@ -32,7 +32,7 @@ namespace MNR fNRecalc = 0; } - Frag::~Frag() + Frag::~Frag() { //printf("OZ Frag::~Frag()\n"); this->ClearZ(); @@ -40,15 +40,15 @@ namespace MNR for(unsigned int f = 0; f < fFFrag.size(); f++) if(fFFrag[f]) delete fFFrag[f]; } - void Frag::ClearZ() + void Frag::ClearZ() { - if(fBnz) + if(fBnz) { delete fZc; fZc = NULL; delete fZw; fZw = NULL; - for(int f = 0; f < fNout; f++) + for(int f = 0; f < fNout; f++) { delete fWz[f]; fWz[f] = NULL; @@ -57,9 +57,9 @@ namespace MNR fBnz = 0; } - void Frag::ClearPrecalc() + void Frag::ClearPrecalc() { - if(fCGnpt && fCGny) + if(fCGnpt && fCGny) { delete fCGpt; fCGpt = NULL; @@ -71,7 +71,7 @@ namespace MNR fCGptf[bpt] = NULL; for(int by = 0; by < fCGny; by++) { - for(int bz = 0; bz < fBnz; bz++) + for(int bz = 0; bz < fBnz; bz++) { delete fCGyf[bpt][by][bz]; fCGyf[bpt][by][bz] = NULL; @@ -90,9 +90,9 @@ namespace MNR fCGnpt = fCGny = 0; } - void Frag::SetNz(int nz) + void Frag::SetNz(int nz) { - if(nz < 1) + if(nz < 1) { char str[256]; sprintf(str, "F: ERROR in Frag::SetNz(): nz %d < 1\n", nz); @@ -104,7 +104,7 @@ namespace MNR fZw = new double[fBnz]; double zprev, znext; zprev = 0.0; - for(int bz = 0; bz < fBnz+1; bz++) + for(int bz = 0; bz < fBnz+1; bz++) { znext = 1. * bz / fBnz; znext = TMath::Power(znext, 0.75); @@ -115,9 +115,9 @@ namespace MNR } } - void Frag::AddOut(TF1* ffrag, double M) + void Frag::AddOut(TF1* ffrag, double M) { - if(!fBnz) + if(!fBnz) { std::string str = "F: ERROR in Frag::AddOut(): first call Frag::SetNz()\n"; hf_errlog_(16123010, str.c_str(), str.length()); @@ -130,7 +130,7 @@ namespace MNR fNout++; } - void Frag::Precalc(Grid* grid, double xm) + void Frag::Precalc(Grid* grid, double xm) { fNRecalc++; // Fast variables @@ -145,19 +145,19 @@ namespace MNR fCGyf = new double***[fCGnpt]; double xm2 = xm * xm; double M2[fNout]; - for(int f = 0; f < fNout; f++) + for(int f = 0; f < fNout; f++) { M2[f] = fMh2[f]; if(fMh2[f] < 0.0) M2[f]=xm2; } double shy[fCGny]; - for(int by = 0; by < fCGny; by++) + for(int by = 0; by < fCGny; by++) { fCGy[by] = p_y[by]; shy[by] = TMath::SinH(fCGy[by]); } // Loop over pT (internally L) - for(int bpt = 0; bpt < fCGnpt; bpt++) + for(int bpt = 0; bpt < fCGnpt; bpt++) { double l2 = p_l[bpt]; double mt2 = xm2 / l2; @@ -168,18 +168,18 @@ namespace MNR fCGptf[bpt] = new double[fBnz]; fCGyf[bpt] = new double**[fCGny]; // Loop over y - for(int by = 0; by < fCGny; by++) + for(int by = 0; by < fCGny; by++) { double pl = mt * shy[by]; fCGyf[bpt][by] = new double*[fBnz]; // Loop over z - for(int bz = 0; bz < fBnz; bz++) + for(int bz = 0; bz < fBnz; bz++) { if(by == 0) fCGptf[bpt][bz] = fCGpt[bpt] * fZc[bz]; fCGyf[bpt][by][bz] = new double[fNout]; double plf = pl * fZc[bz]; // Loop over final states - for(int f = 0; f < fNout; f++) + for(int f = 0; f < fNout; f++) { double Mt = TMath::Sqrt(M2[f] + fCGptf[bpt][bz] * fCGptf[bpt][bz]); fCGyf[bpt][by][bz][f] = TMath::ASinH(plf / Mt); @@ -189,16 +189,16 @@ namespace MNR } } - void Frag::CalcCS(Grid* grid, double xm, std::vector<TH2D*> hcs) + void Frag::CalcCS(Grid* grid, double xm, std::vector<TH2D*> hcs) { // If it is first run or heavy-quark mass changed, recalculation is needed - if(bFirst || fLastxm != xm) + if(bFirst || fLastxm != xm) { this->Precalc(grid, xm); fLastxm = xm; } if(bFirst) bFirst=0; - + // Prepare output histograms and fast variables int ncontr = grid->GetNContr(); MNRContribution* contr[ncontr]; @@ -206,10 +206,10 @@ namespace MNR double* p_w = grid->WPtr(); double wgrid[ncontr][fCGnpt][fCGny][nw]; double* harray[ncontr][fNout]; - for(int c = 0; c < ncontr; c++) + for(int c = 0; c < ncontr; c++) { contr[c] = grid->GetContr(c); - for(int f = 0; f < fNout; f++) + for(int f = 0; f < fNout; f++) { TH2D* h = hcs[f * ncontr + c]; h->Reset(); @@ -220,19 +220,19 @@ namespace MNR for(int bw = 0; bw < nw; bw++) wgrid[c][bpt][by][bw] = grid->CS(c, bpt, by, bw); } - + // Check heavy-quark mass for nan if(xm != xm) return; double wfz[fNout][fBnz][nw]; - for(int f = 0; f < fNout; f++) + for(int f = 0; f < fNout; f++) { // Parton level if(fFFrag[f] == 0) continue; // 1D fragmentation function - if(fFFrag[f]->ClassName() == TString("TF1")) + if(fFFrag[f]->ClassName() == TString("TF1")) { double norm = 0.0; - for(int bz = 0; bz < fBnz; bz++) + for(int bz = 0; bz < fBnz; bz++) { double z = fZc[bz]; double wz = fZw[bz] * fFFrag[f]->Eval(z); @@ -244,13 +244,13 @@ namespace MNR wfz[f][bz][bw] /= norm; } // 2D fragmentation function - else if(fFFrag[f]->ClassName() == TString("TF2")) + else if(fFFrag[f]->ClassName() == TString("TF2")) { - for(int bw = 0; bw < nw; bw++) + for(int bw = 0; bw < nw; bw++) { double w = p_w[bw]; double norm = 0.0; - for(int bz = 0; bz < fBnz; bz++) + for(int bz = 0; bz < fBnz; bz++) { double z = fZc[bz]; double zw = fZw[bz]; @@ -260,7 +260,7 @@ namespace MNR for(int bz = 0; bz < fBnz; bz++) wfz[f][bz][bw] /= norm; } } - else + else { char str[256]; sprintf(str, "F: ERROR in Frag::CalcCS(): ff[%d] does not belong to TF1 or TF2\n", f); @@ -278,7 +278,7 @@ namespace MNR double pt2 = fCGpt[bptn]; double pt_w = pt2 - pt1; // Loop over y - for(int by = 0; by < fCGny - 1; by++) + for(int by = 0; by < fCGny - 1; by++) { int byn = by + 1; double y1 = fCGy[by]; @@ -286,13 +286,13 @@ namespace MNR double y_w = y2 - y1; double pty_w = pt_w * y_w; // Loop over z - for(int bz = 0; bz < fBnz; bz++) + for(int bz = 0; bz < fBnz; bz++) { // Loop over final states - for(int f = 0; f < fNout; f++) + for(int f = 0; f < fNout; f++) { double wf, pth1, pth2, yh11, yh12, yh21, yh22; - if(!fFFrag[f]) + if(!fFFrag[f]) { // parton level: no rescaling if(bz != 0) continue; wf = 1.0; @@ -301,7 +301,7 @@ namespace MNR yh11 = yh21 = y1; yh12 = yh22 = y2; } - else + else { wf = 0.0; pth1 = fCGptf[bpt][bz]; @@ -312,7 +312,7 @@ namespace MNR yh22 = fCGyf[bptn][byn][bz][f]; } // Loop over contributions - for(int c = 0; c < ncontr; c++) + for(int c = 0; c < ncontr; c++) { if(contr[c]->fActive == 0) continue; int offset = f * ncontr + c; @@ -326,7 +326,7 @@ namespace MNR int baynb = ay->GetNbins(); double xw = pth2 - pth1; // Loop over hadron pT bins - for(int bax = bax1; bax <= bax2; bax++) + for(int bax = bax1; bax <= bax2; bax++) { double xl = pth1; double xh = pth2; @@ -345,7 +345,7 @@ namespace MNR int bay2 = ay->FindBin(yh2); if(bay2 == 0 || bay1 == baynb + 1) continue; // Loop over hadron y bins - for(int bay = bay1; bay <= bay2; bay++) + for(int bay = bay1; bay <= bay2; bay++) { double yl = yh1; double yh = yh2; @@ -357,13 +357,13 @@ namespace MNR double dyr = (yh2 - yc) * ywcoef; int binxy = h->GetBin(bax, bay); // Loop over W - for(int bw = 0; bw < nw; bw++) + for(int bw = 0; bw < nw; bw++) { if(fFFrag[f]) wf = wfz[f][bz][bw]; // not parton level // Smooth final contrbution as linear 2D function - double w_parton = pty_w * (wgrid[c][bpt][by][bw] * dxr * dyr + - wgrid[c][bpt][byn][bw] * dxr * dy + - wgrid[c][bptn][by][bw] * dx * dyr + + double w_parton = pty_w * (wgrid[c][bpt][by][bw] * dxr * dyr + + wgrid[c][bpt][byn][bw] * dxr * dy + + wgrid[c][bptn][by][bw] * dx * dyr + wgrid[c][bptn][byn][bw] * dx * dy); harray[c][f][binxy] += w_parton * wf; } // W @@ -383,7 +383,7 @@ namespace MNR int f2 = f % 10; if(f1 == 0) { - if(f2 == 0) + if(f2 == 0) { // Kartvelishvili if(TString(meson) == TString("dzero")) f_meson = new TF1("f_kar_dzero", Frag::kar_dzero, 0., 1., 2); else if(TString(meson) == TString("dch")) f_meson = new TF1("f_kar_dch", Frag::kar_dch, 0., 1., 2); @@ -395,30 +395,30 @@ namespace MNR else if(TString(meson)==TString("dch")) f_meson = new TF1("f_bcfy_dch", Frag::bcfy_dch, 0., 1., 2); else f_meson = new TF1("f_bcfy", Frag::bcfy_v, 0., 1., 2); } - else if(f2==2) + else if(f2==2) { // Peterson f_meson = new TF1("f_pet", Frag::pet, 0., 1., 2); } } - else if(f1 == 1) + else if(f1 == 1) { - if(f2 == 0) + if(f2 == 0) { // Kartvelishvili "Misha-style" if(TString(meson) == TString("dzero")) f_meson = new TF2("f_karw_dzero", Frag::karw_dzero, 0., 1., 0., 10000., 3); else if(TString(meson) == TString("dch")) f_meson = new TF2("f_karw_dch", Frag::karw_dch, 0., 1., 0., 10000., 3); else f_meson = new TF2("f_karw", Frag::karw, 0., 1., 0., 10000., 3); } } - else if(f1 == 2) + else if(f1 == 2) { - if(f2 == 0) + if(f2 == 0) { // Kartvelishvili step if(TString(meson) == TString("dzero")) f_meson = new TF2("f_karstep_dzero", Frag::karstep_dzero, 0., 1., 0., 10000., 3); else if(TString(meson) == TString("dch")) f_meson = new TF2("f_karstep_dch", Frag::karstep_dch, 0., 1., 0., 10000., 3); else f_meson = new TF2("f_karstep", Frag::karstep, 0., 1., 0., 10000., 3); } } - if(!f_meson) + if(!f_meson) { char str[256]; sprintf(str, "F: ERROR in Frag::GetFragFunction(): unknown f %d\n", f); @@ -435,56 +435,56 @@ namespace MNR } return f_meson; } - - double Frag::bcfy_v(double* x, double* p) + + double Frag::bcfy_v(double* x, double* p) { - return 3.0 * p[0] * (p[1] * x[0] * TMath::Power((1. - x[0]), 2.) * TMath::Power((1. - (1. - p[1]) * x[0]), -6.) * - (2. - 2. * (3. - 2 * p[1]) * x[0] + 3. * (3. - 2. * p[1] + 4. * p[1] * p[1]) * x[0] * x[0] - 2. * (1. - p[1]) * - (4. - p[1] + 2 * p[1] * p[1]) * x[0] * x[0] * x[0] + (1. - p[1]) * (1. - p[1]) * (3. - 2. * p[1] + + return 3.0 * p[0] * (p[1] * x[0] * TMath::Power((1. - x[0]), 2.) * TMath::Power((1. - (1. - p[1]) * x[0]), -6.) * + (2. - 2. * (3. - 2 * p[1]) * x[0] + 3. * (3. - 2. * p[1] + 4. * p[1] * p[1]) * x[0] * x[0] - 2. * (1. - p[1]) * + (4. - p[1] + 2 * p[1] * p[1]) * x[0] * x[0] * x[0] + (1. - p[1]) * (1. - p[1]) * (3. - 2. * p[1] + 2. * p[1] * p[1]) * x[0] * x[0] * x[0] * x[0])); } - double Frag::bcfy_v_prim(double* x, double* p) + double Frag::bcfy_v_prim(double* x, double* p) { if(x[0] > (fM_dzero / fM_dstar)) return 0.; double newx = fM_dstar /fM_dzero * x[0]; return (fM_dstar / fM_dzero) * bcfy_v(&newx, p); } - double Frag::bcfy_p(double* x, double* p) + double Frag::bcfy_p(double* x, double* p) { - return p[0] * (p[1] *x[0] * TMath::Power((1. - x[0]), 2.) * TMath::Power((1. - (1. - p[1]) * x[0]), -6.) * - (6. - 18. * (1. - 2 * p[1]) * x[0] + (21. - 74. * p[1] + 68. * p[1] * p[1]) * x[0] * x[0] - 2. * - (1. - p[1]) * (6. - 19. * p[1] + 18. * p[1] *p[1]) * x[0] * x[0] * x[0] + 3. * (1. - p[1]) * (1. - p[1]) * + return p[0] * (p[1] *x[0] * TMath::Power((1. - x[0]), 2.) * TMath::Power((1. - (1. - p[1]) * x[0]), -6.) * + (6. - 18. * (1. - 2 * p[1]) * x[0] + (21. - 74. * p[1] + 68. * p[1] * p[1]) * x[0] * x[0] - 2. * + (1. - p[1]) * (6. - 19. * p[1] + 18. * p[1] *p[1]) * x[0] * x[0] * x[0] + 3. * (1. - p[1]) * (1. - p[1]) * (1. - 2. * p[1] + 2. * p[1] * p[1]) * x[0] * x[0] * x[0] * x[0])); } - double Frag::bcfy_dzero(double* x, double* p) + double Frag::bcfy_dzero(double* x, double* p) { return p[0] * (0.168 * bcfy_p(x, p) + 0.390 * bcfy_v_prim(x, p)); } - double Frag::kar_dzero(double* x, double* p) + double Frag::kar_dzero(double* x, double* p) { return p[0] * (0.168 * kar(x, p) + 0.390 * kar_prim(x, p)); } - double Frag::karw_dzero(double* x, double* p) + double Frag::karw_dzero(double* x, double* p) { return p[0] * (0.168 * karw(x, p) + 0.390 * karw_prim(x, p)); } - double Frag::karstep_dzero(double* x, double* p) + double Frag::karstep_dzero(double* x, double* p) { return p[0] * (0.168 * karstep(x, p) + 0.390 * karstep_prim(x, p)); } - double Frag::bcfy_dch(double* x, double* p) + double Frag::bcfy_dch(double* x, double* p) { return p[0] * (0.162 * bcfy_p(x, p) + 0.07153 * bcfy_v_prim(x, p)); } - double Frag::kar_dch(double* x, double* p) + double Frag::kar_dch(double* x, double* p) { return p[0] * (0.162 * kar(x, p) + 0.07153 * kar_prim(x, p)); } @@ -494,24 +494,24 @@ namespace MNR return p[0] * (0.162 * karw(x, p) + 0.07153 * karw_prim(x,p)); } - double Frag::karstep_dch(double* x, double* p) + double Frag::karstep_dch(double* x, double* p) { return p[0] * (0.162 * karstep(x, p) + 0.07153 * karstep_prim(x, p)); } - double Frag::kar(double* x, double* p) + double Frag::kar(double* x, double* p) { return p[0] * TMath::Power(x[0], p[1]) * (1 - x[0]); } - double Frag::kar_prim(double* x, double* p) + double Frag::kar_prim(double* x, double* p) { if(x[0] > (fM_dzero / fM_dstar)) return 0.; double newx = fM_dstar / fM_dzero * x[0]; return (fM_dstar / fM_dzero) * kar(&newx, p); } - double Frag::karw(double* x, double* p) + double Frag::karw(double* x, double* p) { double alpha = p[1] + p[2] / x[1]; // prevent very hard form (may lead to numerical problems) @@ -522,7 +522,7 @@ namespace MNR return kar(x, newp); } - double Frag::karw_prim(double* x, double* p) + double Frag::karw_prim(double* x, double* p) { if(x[0] > (fM_dzero / fM_dstar)) return 0.; double newx[2] = { fM_dstar / fM_dzero * x[0], x[1] }; diff --git a/reactions/BaseHVQMNR/src/MNRGrid.cc b/reactions/BaseHVQMNR/src/MNRGrid.cc index e377b3df763cd157c4fc4ad79fb0822851fffac2..839e682937eefe96b5f1e50b41777e849cc92dc3 100644 --- a/reactions/BaseHVQMNR/src/MNRGrid.cc +++ b/reactions/BaseHVQMNR/src/MNRGrid.cc @@ -5,7 +5,7 @@ namespace MNR { - Grid::Grid() + Grid::Grid() { fNL = 0; fNY = 0; @@ -22,7 +22,7 @@ namespace MNR for(int i = 0; i < fNContr; i++) fCS[i] = NULL; } - Grid::Grid(int ncontr, MNRContribution** contr) + Grid::Grid(int ncontr, MNRContribution** contr) { fNL = 0; fNY = 0; @@ -39,7 +39,7 @@ namespace MNR for(int i = 0; i < fNContr; i++) fCS[i] = NULL; } - Grid::~Grid() + Grid::~Grid() { //printf("OZ Grid::~Grid()\n"); if(fL) delete fL; @@ -47,11 +47,11 @@ namespace MNR if(fW) delete fW; if(fBW) delete fBW; if(fCS) { - for(int c = 0; c < fNContr; c++) + for(int c = 0; c < fNContr; c++) { - for(int i = 0; i < fNL; i++) + for(int i = 0; i < fNL; i++) { - for(int j = 0; j < fNY; j++) + for(int j = 0; j < fNY; j++) { delete fCS[c][i][j]; } @@ -61,7 +61,7 @@ namespace MNR } delete fCS; } - if(fContr) + if(fContr) { for(int i = 0; i < fNContr; i++) { @@ -76,7 +76,7 @@ namespace MNR for(int i = 0; i < fNL; i++) this->NonPhys(i); } - void Grid::NonPhys(int bpt) + void Grid::NonPhys(int bpt) { for(int i = 0; i < fNY; i++) for(int j = 0; j < fNW; j++) @@ -84,7 +84,7 @@ namespace MNR this->CS(k, bpt, i, j) = -1.0; // negative non-physical value } - void Grid::Zero() + void Grid::Zero() { for(int bpt = 0; bpt < fNL; bpt++) for(int i = 0; i < fNY; i++) @@ -93,7 +93,7 @@ namespace MNR this->CS(k, bpt, i, j) = 0; } - void Grid::SetL(int n, double minpt, double maxpt, double xm) + void Grid::SetL(int n, double minpt, double maxpt, double xm) { double power = 0.25; fNL = n; @@ -103,14 +103,14 @@ namespace MNR double minpower = TMath::Power(minpt,power); double maxpower = TMath::Power(maxpt,power); double steppower = (maxpower - minpower) / fNL; - for(int i = 0; i < fNL; i++) + for(int i = 0; i < fNL; i++) { double pt = TMath::Power(minpower + i * steppower, 1.0 / power); fL[i] = xm2 / (xm2 + pt * pt); } } - void Grid::FillPt(double* ptall, double xm) + void Grid::FillPt(double* ptall, double xm) { double xm2 = xm * xm; for(int i = 0; i < fNL; i++) ptall[i] = TMath::Sqrt(xm2 / fL[i] - xm2); @@ -132,14 +132,14 @@ namespace MNR fY = new double[fNY]; double step = (max - min) / (n - 1); for(int i = 0; i < n; i++) fY[i] = min + step * i; - for(int c = 0; c < fNContr; c++) + for(int c = 0; c < fNContr; c++) { if(fCS[c]) { for(int i = 0; i < fNL; i++) if(fCS[c][i]) delete fCS[c][i]; delete fCS[c]; } fCS[c] = new double**[fNL]; - for(int i = 0; i < fNL; i++) + for(int i = 0; i < fNL; i++) { fCS[c][i] = new double*[fNY]; for(int j = 0; j < fNY; j++) fCS[c][i][j] = NULL; @@ -147,9 +147,9 @@ namespace MNR } } - void Grid::SetW(int n, double min/* = 0.0*/, double max/* = 500.0*/) + void Grid::SetW(int n, double min/* = 0.0*/, double max/* = 500.0*/) { - if(!fNY || !fY) + if(!fNY || !fY) { std::string str = "F: ERROR in Grid::SetW(): first call Grid::SetY(), then Grid::SetW()\n"; hf_errlog_(16123010, str.c_str(), str.length()); @@ -175,7 +175,7 @@ namespace MNR } } - void Grid::SetW(double b1, double b2) + void Grid::SetW(double b1, double b2) { if(!fNY || !fY) { @@ -202,18 +202,18 @@ namespace MNR fCS[c][i][j] = new double[fNW]; } } - + int Grid::FindWBin(double w) { - for(int i = 0; i < fNW; i++) + for(int i = 0; i < fNW; i++) if(w < fBW[i+1] && w > fBW[i]) return i; return fNW - 1; } - void Grid::Print(double xm) + void Grid::Print(double xm) { double xm2 = xm * xm; - for(int c = 0; c < fNContr; c++) + for(int c = 0; c < fNContr; c++) { for(int bpt = 0; bpt < fNL; bpt++) { @@ -240,14 +240,14 @@ namespace MNR int nlorig = gridorig->NL(); double* lorig = gridorig->LPtr(); double spline_x[nlorig], spline_y[nlorig]; - for(int i = 0; i < nlorig; i++) + for(int i = 0; i < nlorig; i++) spline_x[nlorig-1-i] = lorig[i]; // Loop over contributions - for(int c = 0; c < gridorig->GetNContr(); c++) + for(int c = 0; c < gridorig->GetNContr(); c++) // Loop over y bins - for(int y = 0; y < gridorig->NY(); y++) + for(int y = 0; y < gridorig->NY(); y++) // Loop over W bins - for(int w = 0; w < gridorig->NW(); w++) + for(int w = 0; w < gridorig->NW(); w++) { // For spline: prepare X-section array of original grid in reversed order for(int l = 0; l < nlorig; l++) spline_y[nlorig-1-l] = gridorig->CS(c,l,y,w); diff --git a/reactions/FFABM_DISCC/include/ReactionFFABM_DISCC.h b/reactions/FFABM_DISCC/include/ReactionFFABM_DISCC.h index b0888e4e3747b7ef92949f236a065eb3b9ea3369..4f375590f0b29d491016d81c16b8b0f01cab10e8 100644 --- a/reactions/FFABM_DISCC/include/ReactionFFABM_DISCC.h +++ b/reactions/FFABM_DISCC/include/ReactionFFABM_DISCC.h @@ -6,7 +6,7 @@ /** @class' ReactionFFABM_DISCC - @brief A wrapper class for FFABM_DISCC reaction + @brief A wrapper class for FFABM_DISCC reaction Based on the ReactionTheory class. Reads options produces 3d cross section. @@ -21,7 +21,7 @@ class ReactionFFABM_DISCC : public ReactionBaseDISCC public: ReactionFFABM_DISCC(){}; virtual string getReactionName() const { return "FFABM_DISCC" ;}; - int initAtStart(const string &); + int atStart(const string &); virtual void setDatasetParameters( int dataSetID, map<string,string> pars, map<string,double> parsDataset) override ; virtual void initAtIteration() override; diff --git a/reactions/FFABM_DISCC/src/ReactionFFABM_DISCC.cc b/reactions/FFABM_DISCC/src/ReactionFFABM_DISCC.cc index 334cd8645d4807a37ae8ea26c5249365fe4c33f8..7fbfc7055183de0aaff1ed556a34ad4f188866a0 100644 --- a/reactions/FFABM_DISCC/src/ReactionFFABM_DISCC.cc +++ b/reactions/FFABM_DISCC/src/ReactionFFABM_DISCC.cc @@ -1,4 +1,4 @@ - + /* @file ReactionFFABM_DISCC.cc @date 2017-10-09 @@ -47,9 +47,9 @@ extern "C" { // Initialize at the start of the computation -int ReactionFFABM_DISCC::initAtStart(const string &s) +int ReactionFFABM_DISCC::atStart(const string &s) { - int isout = Super::initAtStart(s); + int isout = Super::atStart(s); // scales mu^2 = scalea1 * Q^2 + scaleb1 * 4*m_h^2 (default scalea1 = scaleb1 = 1.0) double hqscale1in = 1.0; diff --git a/reactions/FFABM_DISNC/include/ReactionFFABM_DISNC.h b/reactions/FFABM_DISNC/include/ReactionFFABM_DISNC.h index c81ded02c3c3b7236374d61ef6277381ef2a5da6..707eaf795d49bdea0e80d067ef9f7443032600b3 100644 --- a/reactions/FFABM_DISNC/include/ReactionFFABM_DISNC.h +++ b/reactions/FFABM_DISNC/include/ReactionFFABM_DISNC.h @@ -6,7 +6,7 @@ /** @class' ReactionFFABM_DISNC - @brief A wrapper class for FFABM_DISNC reaction + @brief A wrapper class for FFABM_DISNC reaction Based on the ReactionTheory class. Reads options produces 3d cross section. @@ -22,7 +22,7 @@ class ReactionFFABM_DISNC : public ReactionBaseDISNC ReactionFFABM_DISNC(){}; public: virtual string getReactionName() const { return "FFABM_DISNC" ;}; - int initAtStart(const string &); + int atStart(const string &); virtual void setDatasetParameters( int dataSetID, map<string,string> pars, map<string,double> parsDataset) override ; virtual void initAtIteration() override; diff --git a/reactions/FFABM_DISNC/src/ReactionFFABM_DISNC.cc b/reactions/FFABM_DISNC/src/ReactionFFABM_DISNC.cc index 6c8210104db4e92ea8f41291ee161211252b610a..f4c6f722bc45d3c693f2e49798990326b292a39b 100644 --- a/reactions/FFABM_DISNC/src/ReactionFFABM_DISNC.cc +++ b/reactions/FFABM_DISNC/src/ReactionFFABM_DISNC.cc @@ -1,4 +1,4 @@ - + /* @file ReactionFFABM_DISNC.cc @date 2017-09-29 @@ -43,9 +43,9 @@ extern "C" { // Initialize at the start of the computation -int ReactionFFABM_DISNC::initAtStart(const string &s) +int ReactionFFABM_DISNC::atStart(const string &s) { - //int isout = Super::initAtStart(s); + //int isout = Super::atStart(s); int isout = 0; // scales mu^2 = scalea1 * Q^2 + scaleb1 * 4*m_h^2 (default scalea1 = scaleb1 = 1.0) diff --git a/reactions/FONLL_DISCC/include/ReactionFONLL_DISCC.h b/reactions/FONLL_DISCC/include/ReactionFONLL_DISCC.h index 75072730637aea9c8fb48b21d7e4b26aba8f32b3..24f6af20f0e77b6818727484cf8559455de5baa3 100644 --- a/reactions/FONLL_DISCC/include/ReactionFONLL_DISCC.h +++ b/reactions/FONLL_DISCC/include/ReactionFONLL_DISCC.h @@ -5,7 +5,7 @@ /* * @class' ReactionFONLL_DISCC * - * @brief A wrapper class for FONLL_DISCC reaction + * @brief A wrapper class for FONLL_DISCC reaction * * Based on the ReactionTheory class. Reads options produces 3d cross section. * @@ -22,14 +22,14 @@ class ReactionFONLL_DISCC : public ReactionBaseDISCC //ReactionFONLL_DISCC & operator = (const ReactionAFONLL_DISCC &r) { return *(new ReactionFONLL_DISCC(r)); }; virtual string getReactionName() const { return "FONLL_DISCC"; }; - int initAtStart(const string &); + int atStart(const string &); virtual void initAtIteration() override; protected: virtual void F2 BASE_PARS override; virtual void FL BASE_PARS override; virtual void xF3 BASE_PARS override; - + private: map <int,valarray<double>> _f2fonll; map <int,valarray<double>> _flfonll; diff --git a/reactions/FONLL_DISCC/src/ReactionFONLL_DISCC.cc b/reactions/FONLL_DISCC/src/ReactionFONLL_DISCC.cc index fef06788a34c1a0ed1a5fa4c27f1aedb30d57402..faa67e397d4058b216c4a1523ac703825b8ea353 100644 --- a/reactions/FONLL_DISCC/src/ReactionFONLL_DISCC.cc +++ b/reactions/FONLL_DISCC/src/ReactionFONLL_DISCC.cc @@ -23,7 +23,7 @@ extern "C" { // Initialize at the start of the computation -int ReactionFONLL_DISCC::initAtStart(const string &s) +int ReactionFONLL_DISCC::atStart(const string &s) { // Initialize relevant parameters to be used by the mother class. _Gf = GetParam("gf"); @@ -153,7 +153,7 @@ void ReactionFONLL_DISCC::initAtIteration() // Also make sure that proper PDFs are taken by external1 function which is located in FONLL/src directory APFEL_set_pdfs( getXFX() ); - + APFEL::SetProcessDIS("CC"); // Loop over the data sets. for ( auto dataSetID : _dsIDs) @@ -170,7 +170,7 @@ void ReactionFONLL_DISCC::initAtIteration() auto *xp = GetBinValues(dataSetID,"x"); auto q2 = *q2p; auto x = *xp; - + const size_t Np = GetNpoint(dataSetID); // Resize arrays. _f2fonll[dataSetID].resize(Np); @@ -214,17 +214,17 @@ void ReactionFONLL_DISCC::initAtIteration() } // FONLL structure functions -void ReactionFONLL_DISCC::F2 BASE_PARS +void ReactionFONLL_DISCC::F2 BASE_PARS { val = _f2fonll[dataSetID]; } -void ReactionFONLL_DISCC::FL BASE_PARS +void ReactionFONLL_DISCC::FL BASE_PARS { val = _flfonll[dataSetID]; } -void ReactionFONLL_DISCC::xF3 BASE_PARS +void ReactionFONLL_DISCC::xF3 BASE_PARS { val = _f3fonll[dataSetID]; } diff --git a/reactions/FONLL_DISNC/include/ReactionFONLL_DISNC.h b/reactions/FONLL_DISNC/include/ReactionFONLL_DISNC.h index 5663b8988dcd89c3b9e2cf69ff91f89ed22ddf50..adddfc2f8f95991e3f386ba3a2c2f92fe5573c73 100644 --- a/reactions/FONLL_DISNC/include/ReactionFONLL_DISNC.h +++ b/reactions/FONLL_DISNC/include/ReactionFONLL_DISNC.h @@ -5,7 +5,7 @@ /* * @class' ReactionFONLL_DISNC * - * @brief A wrapper class for FONLL_DISNC reaction + * @brief A wrapper class for FONLL_DISNC reaction * * Based on the ReactionTheory class. Reads options produces 3d cross section. * @@ -22,14 +22,14 @@ class ReactionFONLL_DISNC : public ReactionBaseDISNC //ReactionFONLL_DISNC & operator = (const ReactionAFONLL_DISNC &r) { return *(new ReactionFONLL_DISNC(r)); }; virtual string getReactionName() const { return "FONLL_DISNC"; }; - int initAtStart(const string &); + int atStart(const string &); virtual void initAtIteration() override; protected: virtual void F2 BASE_PARS override; virtual void FL BASE_PARS override; virtual void xF3 BASE_PARS override; - + private: map <int,valarray<double>> _f2fonll; map <int,valarray<double>> _flfonll; diff --git a/reactions/FONLL_DISNC/src/ReactionFONLL_DISNC.cc b/reactions/FONLL_DISNC/src/ReactionFONLL_DISNC.cc index 67c6b0ea095b48a415bf256f23b12bf8aa17e121..5b8c463bb42d98c0eeda1fcc782ca809b759bde5 100644 --- a/reactions/FONLL_DISNC/src/ReactionFONLL_DISNC.cc +++ b/reactions/FONLL_DISNC/src/ReactionFONLL_DISNC.cc @@ -24,7 +24,7 @@ extern "C" { } // Initialize at the start of the computation -int ReactionFONLL_DISNC::initAtStart(const string &s) +int ReactionFONLL_DISNC::atStart(const string &s) { // Retrieve parameters needed to initialize APFEL. const double MCharm = GetParam("mch"); @@ -219,17 +219,17 @@ void ReactionFONLL_DISNC::initAtIteration() } // FONLL structure functions -void ReactionFONLL_DISNC::F2 BASE_PARS +void ReactionFONLL_DISNC::F2 BASE_PARS { val = _f2fonll[dataSetID]; } -void ReactionFONLL_DISNC::FL BASE_PARS +void ReactionFONLL_DISNC::FL BASE_PARS { val = _flfonll[dataSetID]; } -void ReactionFONLL_DISNC::xF3 BASE_PARS +void ReactionFONLL_DISNC::xF3 BASE_PARS { val = _f3fonll[dataSetID]; } diff --git a/reactions/Fractal_DISNC/include/ReactionFractal_DISNC.h b/reactions/Fractal_DISNC/include/ReactionFractal_DISNC.h index ea4a6f3f2ba639603a9330e69d3b56d3d40b27ae..9c58fac2f93325cadec146974909676e772d9006 100644 --- a/reactions/Fractal_DISNC/include/ReactionFractal_DISNC.h +++ b/reactions/Fractal_DISNC/include/ReactionFractal_DISNC.h @@ -6,7 +6,7 @@ /** @class' ReactionFractal_DISNC - @brief A wrapper class for Fractal_DISNC reaction + @brief A wrapper class for Fractal_DISNC reaction Based on the ReactionTheory class. Reads options produces 3d cross section. @@ -25,7 +25,7 @@ class ReactionFractal_DISNC : public ReactionTheory public: virtual string getReactionName() const { return "Fractal_DISNC" ;}; - int initAtStart(const string &){return 0;}; + int atStart(const string &){return 0;}; virtual int compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err); protected: // virtual int parseOptions(){ return 0;}; diff --git a/reactions/Fractal_DISNC/src/ReactionFractal_DISNC.cc b/reactions/Fractal_DISNC/src/ReactionFractal_DISNC.cc index e20e56494d07bccab62846d215f246b52ac7a2d9..e7b516e9fe266804a480b624adf9c665f1cf6d52 100644 --- a/reactions/Fractal_DISNC/src/ReactionFractal_DISNC.cc +++ b/reactions/Fractal_DISNC/src/ReactionFractal_DISNC.cc @@ -1,4 +1,4 @@ - + /* @file ReactionFractal_DISNC.cc @date 2017-05-15 @@ -20,7 +20,7 @@ int ReactionFractal_DISNC::compute(int dataSetID, valarray<double> &val, map<str { // Get bin arrays, check that Q2, x and y are present: - auto *q2p = GetBinValues(dataSetID,"Q2"), *xp = GetBinValues(dataSetID,"x"), *yp = GetBinValues(dataSetID,"y"); + auto *q2p = GetBinValues(dataSetID,"Q2"), *xp = GetBinValues(dataSetID,"x"), *yp = GetBinValues(dataSetID,"y"); if (q2p == nullptr || xp == nullptr || yp == nullptr ) { std::cout << "\n\nFATAL ERROR: DIS NC requires x,Q2 and y bins to be present !!!" << std::endl; std::cout << "CHECK THE DATAFILE !!!" << std::endl; @@ -41,7 +41,7 @@ int ReactionFractal_DISNC::compute(int dataSetID, valarray<double> &val, map<str double f_R = GetParam("R"); for (size_t i=0; i<Npnt; i++) { - + f2[i] = f_D0 * f_Q02* pow( ( q2[i] / ( q2[i] + f_Q02) ), f_D2-1.0) * ( pow(x[i], -f_D2+1.0) ) / ( 1.0 +f_D3 - f_D1 * log (x[i] )) * (pow(x[i],-f_D1*log(q2[i]/f_Q02+1.0)) @@ -51,7 +51,7 @@ int ReactionFractal_DISNC::compute(int dataSetID, valarray<double> &val, map<str fl = f2*f_R/(1.0+f_R); val = f2 - y*y/(1.0+(1.0-y)*(1.0-y))*fl; - + return 0; } diff --git a/reactions/HVQMNR_LHCb_7TeV_beauty/include/ReactionHVQMNR_LHCb_7TeV_beauty.h b/reactions/HVQMNR_LHCb_7TeV_beauty/include/ReactionHVQMNR_LHCb_7TeV_beauty.h index 3b21d778c1627d41a43e86bb96b04675f7f8413e..322982798d4a3ea38ceebd1c09487720548fe893 100644 --- a/reactions/HVQMNR_LHCb_7TeV_beauty/include/ReactionHVQMNR_LHCb_7TeV_beauty.h +++ b/reactions/HVQMNR_LHCb_7TeV_beauty/include/ReactionHVQMNR_LHCb_7TeV_beauty.h @@ -6,12 +6,12 @@ /** @class' ReactionHVQMNR_LHCb_7TeV_beauty - @brief A wrapper class for HVQMNR_LHCb_7TeV_beauty reaction + @brief A wrapper class for HVQMNR_LHCb_7TeV_beauty reaction Based on the ReactionTheory class. Reads options produces 3d cross section. - * + * Derived from ReactionBaseHVQMNR where basic stuff for HVQMNR calculation is implemented. - This class implements calculation for LHCb beauty measurement at 7 TeV + This class implements calculation for LHCb beauty measurement at 7 TeV [JHEP 1308 (2013) 117] [arXiv:1306.3663] @version 0.1 @@ -25,7 +25,7 @@ class ReactionHVQMNR_LHCb_7TeV_beauty : public ReactionBaseHVQMNR public: virtual string getReactionName() const { return "HVQMNR_LHCb_7TeV_beauty" ;}; - virtual int initAtStart(const string &); + virtual int atStart(const string &); virtual void initAtIteration(); virtual int compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err); protected: diff --git a/reactions/HVQMNR_LHCb_7TeV_beauty/src/ReactionHVQMNR_LHCb_7TeV_beauty.cc b/reactions/HVQMNR_LHCb_7TeV_beauty/src/ReactionHVQMNR_LHCb_7TeV_beauty.cc index 9c2bfaad80572196ace106ad983cfa756cf23462..ea10469304661e03bdead956b8d4bc151bb515bb 100644 --- a/reactions/HVQMNR_LHCb_7TeV_beauty/src/ReactionHVQMNR_LHCb_7TeV_beauty.cc +++ b/reactions/HVQMNR_LHCb_7TeV_beauty/src/ReactionHVQMNR_LHCb_7TeV_beauty.cc @@ -1,4 +1,4 @@ - + /* @file ReactionHVQMNR_LHCb_7TeV_beauty.cc @date 2017-01-02 @@ -6,7 +6,7 @@ Created by AddReaction.py on 2017-01-02 Derived from ReactionBaseHVQMNR where basic stuff for HVQMNR calculation is implemented. - This class implements calculation for LHCb beauty measurement at 7 TeV + This class implements calculation for LHCb beauty measurement at 7 TeV [JHEP 1308 (2013) 117] [arXiv:1306.3663] */ @@ -23,11 +23,11 @@ extern "C" ReactionHVQMNR_LHCb_7TeV_beauty* create() { // initialize at the start of the computation -int ReactionHVQMNR_LHCb_7TeV_beauty::initAtStart(const string &s) +int ReactionHVQMNR_LHCb_7TeV_beauty::atStart(const string &s) { - // ignore provided terminfo (s): all needed information has been set already + // ignore provided terminfo (s): all needed information has been set already // via setDatasetParameters(int dataSetID, map<string,string> pars) - + // ****************************************************************** // perform initialisation and pre-calculation // ****************************************************************** @@ -35,11 +35,11 @@ int ReactionHVQMNR_LHCb_7TeV_beauty::initAtStart(const string &s) if(_isInitAtStart) return 0; _isInitAtStart = true; - //printf("ReactionHVQMNR_LHCb_7TeV_beauty::initAtStart()\n"); + //printf("ReactionHVQMNR_LHCb_7TeV_beauty::atStart()\n"); // check HF scheme CheckHFScheme(); - + // read needed theory parameters UpdateParameters(); PrintParameters(); @@ -57,10 +57,10 @@ int ReactionHVQMNR_LHCb_7TeV_beauty::initAtStart(const string &s) steer.nx3 = 125; steer.nx4 = 125; steer.nbz = 100; - + DefaultInit(steer, _pars.mb, _mnr, _frag, _grid, _gridSmoothed); //if(_debug) - printf("ReactionHVQMNR_LHCb_7TeV_beauty::initAtStart(): at initialisation mb = %f\n", _pars.mb); + //printf("ReactionHVQMNR_LHCb_7TeV_beauty::atStart(): at initialisation mb = %f\n", _pars.mb); // MNR (parton-level calculation) _mnr.SetDebug(_debug); _mnr.fC_sh = TMath::Power(7000.0, 2.0); // centre-of-mass energy squared @@ -90,13 +90,13 @@ int ReactionHVQMNR_LHCb_7TeV_beauty::initAtStart(const string &s) int nbin_pt_bs = 15; double bin_pt_bs[16] = {0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,13.0,15.0,19.0,40.0}; _hCalculatedXSec[2]->SetBins(nbin_pt_bs, bin_pt_bs, nbin_y, bin_y); - + return 0; } // perform calculation (this is done once per iteration) -void ReactionHVQMNR_LHCb_7TeV_beauty::initAtIteration() +void ReactionHVQMNR_LHCb_7TeV_beauty::initAtIteration() { // protection against overdoing // TODO: remove this trick @@ -127,7 +127,7 @@ int ReactionHVQMNR_LHCb_7TeV_beauty::compute(int dataSetID, valarray<double> &va // TODO move to core xFitter //initAtIteration(); //printf("ReactionHVQMNR_LHCb_7TeV_beauty::compute() %d\n", dataSetID); - + // get histogramm with cross sections for needed dataset DataSet& ds = _dataSets[dataSetID]; TH2D* histXSec = NULL; @@ -153,6 +153,6 @@ int ReactionHVQMNR_LHCb_7TeV_beauty::compute(int dataSetID, valarray<double> &va else val[i] = val[i] * ds.FragFraction; } - + return 0; } diff --git a/reactions/HVQMNR_LHCb_7TeV_charm/include/ReactionHVQMNR_LHCb_7TeV_charm.h b/reactions/HVQMNR_LHCb_7TeV_charm/include/ReactionHVQMNR_LHCb_7TeV_charm.h index 61e71336f8e2b75404cf19bd37184113921976e1..c95acdf61776e76e355f34b5d997dcc822908a53 100644 --- a/reactions/HVQMNR_LHCb_7TeV_charm/include/ReactionHVQMNR_LHCb_7TeV_charm.h +++ b/reactions/HVQMNR_LHCb_7TeV_charm/include/ReactionHVQMNR_LHCb_7TeV_charm.h @@ -6,12 +6,12 @@ /** @class' ReactionHVQMNR_LHCb_7TeV_charm - @brief A wrapper class for HVQMNR_LHCb_7TeV_charm reaction + @brief A wrapper class for HVQMNR_LHCb_7TeV_charm reaction Based on the ReactionTheory class. Reads options produces 3d cross section. - * + * Derived from ReactionBaseHVQMNR where basic stuff for HVQMNR calculation is implemented. - This class implements calculation for LHCb charm measurement at 7 TeV + This class implements calculation for LHCb charm measurement at 7 TeV [Nucl. Phys. B 871 (2013), 1] [arXiv:1302.2864] @version 0.1 @@ -25,7 +25,7 @@ class ReactionHVQMNR_LHCb_7TeV_charm : public ReactionBaseHVQMNR public: virtual string getReactionName() const { return "HVQMNR_LHCb_7TeV_charm" ;}; - virtual int initAtStart(const string &); + virtual int atStart(const string &); virtual void initAtIteration(); virtual int compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err); protected: diff --git a/reactions/HVQMNR_LHCb_7TeV_charm/src/ReactionHVQMNR_LHCb_7TeV_charm.cc b/reactions/HVQMNR_LHCb_7TeV_charm/src/ReactionHVQMNR_LHCb_7TeV_charm.cc index 88288808de857ac9f0fac06a0f5e8c19bf866847..f2051a3b261627ed0f67152d6ad9cf7b16b49444 100644 --- a/reactions/HVQMNR_LHCb_7TeV_charm/src/ReactionHVQMNR_LHCb_7TeV_charm.cc +++ b/reactions/HVQMNR_LHCb_7TeV_charm/src/ReactionHVQMNR_LHCb_7TeV_charm.cc @@ -1,4 +1,4 @@ - + /* @file ReactionHVQMNR_LHCb_7TeV_charm.cc @date 2017-01-02 @@ -6,7 +6,7 @@ Created by AddReaction.py on 2017-01-02 Derived from ReactionBaseHVQMNR where basic stuff for HVQMNR calculation is implemented - This class implements calculation for LHCb charm measurement at 7 TeV + This class implements calculation for LHCb charm measurement at 7 TeV [Nucl. Phys. B 871 (2013), 1] [arXiv:1302.2864] */ @@ -23,11 +23,11 @@ extern "C" ReactionHVQMNR_LHCb_7TeV_charm* create() { // initialize at the start of the computation -int ReactionHVQMNR_LHCb_7TeV_charm::initAtStart(const string &s) +int ReactionHVQMNR_LHCb_7TeV_charm::atStart(const string &s) { - // ignore provided terminfo (s): all needed information has been set already + // ignore provided terminfo (s): all needed information has been set already // via setDatasetParameters(int dataSetID, map<string,string> pars) - + // ****************************************************************** // perform initialisation and pre-calculation // ****************************************************************** @@ -35,11 +35,11 @@ int ReactionHVQMNR_LHCb_7TeV_charm::initAtStart(const string &s) if(_isInitAtStart) return 0; _isInitAtStart = true; - //printf("ReactionHVQMNR_LHCb_7TeV_charm::initAtStart()\n"); + //printf("ReactionHVQMNR_LHCb_7TeV_charm::atStart()\n"); // check HF scheme CheckHFScheme(); - + // read needed theory parameters UpdateParameters(); PrintParameters(); @@ -57,10 +57,10 @@ int ReactionHVQMNR_LHCb_7TeV_charm::initAtStart(const string &s) steer.nx3 = 25; steer.nx4 = 125; steer.nbz = 50; - + DefaultInit(steer, _pars.mc, _mnr, _frag, _grid, _gridSmoothed); //if(_debug) - printf("ReactionHVQMNR_LHCb_7TeV_charm::initAtStart(): at initialisation mc = %f\n", _pars.mc); + //printf("ReactionHVQMNR_LHCb_7TeV_charm::atStart(): at initialisation mc = %f\n", _pars.mc); // MNR (parton-level calculation) _mnr.SetDebug(_debug); _mnr.fC_sh = TMath::Power(7000.0, 2.0); // centre-of-mass energy squared @@ -84,7 +84,7 @@ int ReactionHVQMNR_LHCb_7TeV_charm::initAtStart(const string &s) double bin_y[6] = { 2.0, 2.5, 3.0, 3.5, 4.0, 4.5 }; int nbin_pt = 8; double bin_pt[9] = { 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0 }; - for(int f = 0; f < 4; f++) + for(int f = 0; f < 4; f++) _hCalculatedXSec[f]->SetBins(nbin_pt, bin_pt, nbin_y, bin_y); // Lambda_c rapidity differenential (for normalised cross section) int nbin_pt_lambdac = 1; @@ -94,13 +94,13 @@ int ReactionHVQMNR_LHCb_7TeV_charm::initAtStart(const string &s) int nbin_y_lambdac = 1; double bin_y_lambdac[2] = { 2.0, 4.5 }; _hCalculatedXSec[5]->SetBins(nbin_pt, bin_pt, nbin_y_lambdac, bin_y_lambdac); - + return 0; } // perform calculation (this is done once per iteration) -void ReactionHVQMNR_LHCb_7TeV_charm::initAtIteration() +void ReactionHVQMNR_LHCb_7TeV_charm::initAtIteration() { // protection against overdoing // TODO: remove this trick @@ -131,7 +131,7 @@ int ReactionHVQMNR_LHCb_7TeV_charm::compute(int dataSetID, valarray<double> &val // TODO move to core xFitter //initAtIteration(); //printf("ReactionHVQMNR_LHCb_7TeV_charm::compute() %d\n", dataSetID); - + // get histogramm with cross sections for needed dataset DataSet& ds = _dataSets[dataSetID]; TH2D* histXSec = NULL; @@ -163,6 +163,6 @@ int ReactionHVQMNR_LHCb_7TeV_charm::compute(int dataSetID, valarray<double> &val else val[i] = val[i] * ds.FragFraction; } - + return 0; } diff --git a/reactions/Hathor/include/HathorPdfxFitter.h b/reactions/Hathor/include/HathorPdfxFitter.h index 1057d2dae5f17885ef274dd338f3a25117c0fe63..c3f53d092b88f0c564683b6f2ebb75939d8a450f 100644 --- a/reactions/Hathor/include/HathorPdfxFitter.h +++ b/reactions/Hathor/include/HathorPdfxFitter.h @@ -30,4 +30,4 @@ private: ReactionHathor* _reactionTheory; }; -#endif +#endif diff --git a/reactions/Hathor/include/ReactionHathor.h b/reactions/Hathor/include/ReactionHathor.h index ec872bec88444c3a431b461a6452f74af302ae47..80c03208466c60e3d60475350eb202ab79a24313 100644 --- a/reactions/Hathor/include/ReactionHathor.h +++ b/reactions/Hathor/include/ReactionHathor.h @@ -6,7 +6,7 @@ /** @class' ReactionHathor - @brief A wrapper class for Hathor reaction + @brief A wrapper class for Hathor reaction Based on the ReactionTheory class. Reads options produces 3d cross section. @@ -30,7 +30,7 @@ class ReactionHathor : public ReactionTheory public: virtual string getReactionName() const { return "Hathor" ;}; - virtual int initAtStart(const string &); + virtual int atStart(const string &); virtual void setDatasetParameters(int dataSetID, map<string,string> pars, map<string,double> dsPars) override; virtual int compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err); protected: diff --git a/reactions/Hathor/src/ReactionHathor.cc b/reactions/Hathor/src/ReactionHathor.cc index f12eb6df7ab19e82a7b888e1fb4b52abe3ede807..55146a3a83888e3bb9c8f0332e6310293bc30976 100644 --- a/reactions/Hathor/src/ReactionHathor.cc +++ b/reactions/Hathor/src/ReactionHathor.cc @@ -51,7 +51,7 @@ ReactionHathor::~ReactionHathor() // delete item.second; } -int ReactionHathor::initAtStart(const string &s) +int ReactionHathor::atStart(const string &s) { // PDFs for Hathor _pdf = new HathorPdfxFitter(this); diff --git a/reactions/KFactor/include/ReactionKFactor.h b/reactions/KFactor/include/ReactionKFactor.h index 59e1726b8a23f1efd181c38ee05bb5b3d368237c..ae861ae2c91b1caea0b3b2b4c8c165fdf5c7eb1b 100644 --- a/reactions/KFactor/include/ReactionKFactor.h +++ b/reactions/KFactor/include/ReactionKFactor.h @@ -25,7 +25,7 @@ class ReactionKFactor : public ReactionTheory public: virtual string getReactionName() const { return "KFactor" ;}; - int initAtStart(const string &); + int atStart(const string &); virtual void setDatasetParameters( int dataSetID, map<string,string> pars, map<string,double> parsDataset) override ; virtual void initAtIteration() override; virtual int compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err); diff --git a/reactions/KFactor/src/ReactionKFactor.cc b/reactions/KFactor/src/ReactionKFactor.cc index 5e8b51dfe36c671778c09285fa5d0d40f0cccc3a..76c57db5a7c4ba01c20be7a9c16cbd3180749f23 100644 --- a/reactions/KFactor/src/ReactionKFactor.cc +++ b/reactions/KFactor/src/ReactionKFactor.cc @@ -19,7 +19,7 @@ extern "C" ReactionKFactor* create() // Initialize at the start of the computation -int ReactionKFactor::initAtStart(const string &s) +int ReactionKFactor::atStart(const string &s) { return 0; } diff --git a/reactions/KMatrix/include/ReactionKMatrix.h b/reactions/KMatrix/include/ReactionKMatrix.h index 0ee7f79494e2f874b128ebee7f033754930c2f30..58011b91c5cd23a2ae5b9be92ffd418aaa05810c 100644 --- a/reactions/KMatrix/include/ReactionKMatrix.h +++ b/reactions/KMatrix/include/ReactionKMatrix.h @@ -6,7 +6,7 @@ /** @class' ReactionKMatrix - @brief A wrapper class for KMatrix reaction + @brief A wrapper class for KMatrix reaction Based on the ReactionTheory class. Reads options produces 3d cross section. @@ -25,7 +25,7 @@ class ReactionKMatrix : public ReactionTheory public: virtual string getReactionName() const { return "KMatrix" ;}; - int initAtStart(const string &); + int atStart(const string &); virtual int compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err); virtual void setDatasetParameters(int dataSetID, map<string,string> pars, map<string, double> parsDataset) override; protected: @@ -35,6 +35,6 @@ class ReactionKMatrix : public ReactionTheory map<int, std::vector<double> > _values; map<int, std::pair<std::string, double> > _parameterNames; - + }; diff --git a/reactions/KMatrix/src/ReactionKMatrix.cc b/reactions/KMatrix/src/ReactionKMatrix.cc index 35eb3bac4e8f646f623b2f9723c634ea1c658949..ad59e37d793b436f5de78de367719bfd7f302682 100644 --- a/reactions/KMatrix/src/ReactionKMatrix.cc +++ b/reactions/KMatrix/src/ReactionKMatrix.cc @@ -1,4 +1,4 @@ - + /* @file ReactionKMatrix.cc @date 2018-08-03 @@ -19,7 +19,7 @@ extern "C" ReactionKMatrix* create() // Initialize at the start of the computation -int ReactionKMatrix::initAtStart(const string &s) +int ReactionKMatrix::atStart(const string &s) { return 0; } diff --git a/reactions/RT_DISNC/include/ReactionRT_DISNC.h b/reactions/RT_DISNC/include/ReactionRT_DISNC.h index 30c6930c13057f932e60b3a4978759d3b0b67c29..ee5e233ffd2d8cd1dc9aba3534de9e36e3701fd1 100644 --- a/reactions/RT_DISNC/include/ReactionRT_DISNC.h +++ b/reactions/RT_DISNC/include/ReactionRT_DISNC.h @@ -6,10 +6,10 @@ /** @class' ReactionRT_DISNC - @brief A wrapper class for RT_DISNC reaction + @brief A wrapper class for RT_DISNC reaction Based on the ReactionTheory class. Reads options produces 3d cross section. - + @version 0.1 @date 2017-04-10 */ @@ -22,9 +22,9 @@ class ReactionRT_DISNC : public ReactionBaseDISNC ReactionRT_DISNC(){}; public: virtual string getReactionName() const { return "RT_DISNC" ;}; - int initAtStart(const string &); + int atStart(const string &); virtual void setDatasetParameters( int dataSetID, map<string,string> pars, map<string,double> parsDataset) override ; - virtual void initAtIteration() override; + virtual void initAtIteration() override; protected: virtual void F2 BASE_PARS override; @@ -32,7 +32,7 @@ class ReactionRT_DISNC : public ReactionBaseDISNC virtual void F2gamma_RT BASE_PARS; virtual void FLgamma_RT BASE_PARS; - + private: map <int,valarray<double> > _f2rt; diff --git a/reactions/RT_DISNC/src/ReactionRT_DISNC.cc b/reactions/RT_DISNC/src/ReactionRT_DISNC.cc index 3f1097b4dfe7957430328e49b74874299fa0237b..db878910b632475f1f1d7515a6f22c6dcc2298ab 100644 --- a/reactions/RT_DISNC/src/ReactionRT_DISNC.cc +++ b/reactions/RT_DISNC/src/ReactionRT_DISNC.cc @@ -15,8 +15,8 @@ extern "C" ReactionRT_DISNC* create() { // RT wrappers from RT/src/mstw2008_wrap.f: extern "C" { - void mstwnc_wrap_(const double& x, const double& q2, const int& ipn, - double& f2, double& f2c, double& f2b, double& fl, double& flc, double& flb, + void mstwnc_wrap_(const double& x, const double& q2, const int& ipn, + double& f2, double& f2c, double& f2b, double& fl, double& flc, double& flb, const int& iflag, const int& index, const double& f2QCDNUM, const double& flQCDNUM, const int& usekfactors = 0); void rt_setalphas_(const double& alphaSzero); @@ -29,11 +29,11 @@ extern "C" { // Initialize at the start of the computation -int ReactionRT_DISNC::initAtStart(const string &s) +int ReactionRT_DISNC::atStart(const string &s) { - int isout = Super::initAtStart(s); + int isout = Super::atStart(s); // Basic init: - + return isout; } @@ -44,7 +44,7 @@ void ReactionRT_DISNC::setDatasetParameters( int dataSetID, map<string,string> p _flrt[dataSetID].resize(GetNpoint(dataSetID)); } -// +// void ReactionRT_DISNC::initAtIteration() { Super::initAtIteration (); @@ -56,7 +56,7 @@ void ReactionRT_DISNC::initAtIteration() { const double qs0 = 1.0; const double as_q0 = alphaS(sqrt(qs0)); const double as_MZ = alphaS(mZ); - + const string order = GetParamS("Order"); const int iord = OrderMap( order) - 1; @@ -77,7 +77,7 @@ void ReactionRT_DISNC::initAtIteration() { } // RT -void ReactionRT_DISNC::F2 BASE_PARS +void ReactionRT_DISNC::F2 BASE_PARS { valarray<double> f2base, f2gamma_base; valarray<double> f2gamma_RT(GetNpoint(dataSetID)); @@ -98,7 +98,7 @@ void ReactionRT_DISNC::F2 BASE_PARS val = f2gamma_RT; } -void ReactionRT_DISNC::FL BASE_PARS +void ReactionRT_DISNC::FL BASE_PARS { valarray<double> flbase, flgamma_base; valarray<double> flgamma_RT(GetNpoint(dataSetID)); @@ -120,13 +120,13 @@ void ReactionRT_DISNC::FL BASE_PARS val = flgamma_RT; } -void ReactionRT_DISNC::F2gamma_RT BASE_PARS +void ReactionRT_DISNC::F2gamma_RT BASE_PARS { calcF2FL(dataSetID); val = _f2rt[dataSetID]; } -void ReactionRT_DISNC::FLgamma_RT BASE_PARS +void ReactionRT_DISNC::FLgamma_RT BASE_PARS { calcF2FL(dataSetID); val = _flrt[dataSetID]; @@ -139,7 +139,7 @@ void ReactionRT_DISNC::calcF2FL(int dataSetID) { // Get x,Q2 arrays: auto *q2p = GetBinValues(dataSetID,"Q2"), *xp = GetBinValues(dataSetID,"x"); auto q2 = *q2p, x = *xp; - + const size_t Np = GetNpoint(dataSetID); int iflag = 1; @@ -147,26 +147,26 @@ void ReactionRT_DISNC::calcF2FL(int dataSetID) { for (size_t i=0; i<Np; i++) { if (q2[i]>1.0) { - + mstwnc_wrap_(x[i], q2[i], 1, f2, f2c, f2b, fl, flc, flb, iflag, i+1, 1., 0.1, 0 ); } - - + + switch ( GetDataFlav(dataSetID) ) { case dataFlav::incl : - _f2rt[dataSetID][i] = f2; - _flrt[dataSetID][i] = fl; + _f2rt[dataSetID][i] = f2; + _flrt[dataSetID][i] = fl; break; case dataFlav::c : - _f2rt[dataSetID][i] = f2c; + _f2rt[dataSetID][i] = f2c; _flrt[dataSetID][i] = flc; break ; case dataFlav::b : - _f2rt[dataSetID][i] = f2b; - _flrt[dataSetID][i] = flb; + _f2rt[dataSetID][i] = f2b; + _flrt[dataSetID][i] = flb; break ; } } diff --git a/reactions/fastNLO/include/ReactionfastNLO.h b/reactions/fastNLO/include/ReactionfastNLO.h index 68b10e69e665bc3d52693e48caaf28a47c1ea708..0ce32783b53b1a4b5cdd29e0dc5378e390967d94 100644 --- a/reactions/fastNLO/include/ReactionfastNLO.h +++ b/reactions/fastNLO/include/ReactionfastNLO.h @@ -5,15 +5,15 @@ #include "ReactionTheory.h" //#include "fastNLOReaction.h" -//#include <memory> -#include "fastnlotk/fastNLOReader.h" +//#include <memory> +#include "fastnlotk/fastNLOReader.h" #include <map> #include <vector> /** @class' ReactionfastNLO - @brief A wrapper class for fastNLO reaction + @brief A wrapper class for fastNLO reaction Based on the ReactionTheory class @@ -27,7 +27,7 @@ public: protected: fastNLOReaction(string name) : fastNLOReader(name) {}; // not public! ReactionTheory* fReaction=NULL; - double EvolveAlphas(double Q ) const { return fReaction->alphaS(Q); } //!< provide alpha_s to fastNLO + double EvolveAlphas(double Q ) const { return fReaction->alphaS(Q); } //!< provide alpha_s to fastNLO bool InitPDF() { return true;}; //!< required by fastNLO vector<double> GetXFX(double xp, double muf) const { return fReaction->xfx(xp,muf); }//!< provide PDFs to fastNLO }; @@ -42,13 +42,13 @@ public: public: virtual string getReactionName() const { return "fastNLO" ;}; - int initAtStart(const string &) { return 0; } //< nothing todo + int atStart(const string &) { return 0; } //< nothing todo virtual void setDatasetParameters( int dataSetID, map<string,string> pars, map<string,double> parsDataset) override ; virtual int compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err); protected: virtual int parseOptions(){ return 0;}; //std::unique_ptr<FastNLOxFitter> fnlo; - // fastNLO inherited functions + // fastNLO inherited functions std::map<int,std::vector<fastNLOReaction*> > ffnlo; }; diff --git a/reactions/fastNLO/src/ReactionfastNLO.cc b/reactions/fastNLO/src/ReactionfastNLO.cc index b6dfe30a087f2d680a544959d8f234899f6acc7e..cf7cf24b31000f686b61afdbce8bc69d2233d95b 100644 --- a/reactions/fastNLO/src/ReactionfastNLO.cc +++ b/reactions/fastNLO/src/ReactionfastNLO.cc @@ -1,4 +1,4 @@ -// DB 08/2017 +// DB 08/2017 /* @file ReactionfastNLO.cc @date 2016-12-06 @@ -44,10 +44,10 @@ void ReactionfastNLO::setDatasetParameters(int ID, map<string,string> pars, map< // fastNLOTable::ffilename = filename; // this->ReadTable(); // this->SetFilename(filename); - + for ( fastNLOReaction* fnlo : ffnlo[ID] ) { - // --- Set order of calculation - if ( pars.count("Order") ) { // Local order + // --- Set order of calculation + if ( pars.count("Order") ) { // Local order hf_errlog(17090510,"W: Ignoring key 'Order' in .dat file. Only global parameter 'Order' is used."); } string order = GetParamS("Order"); // Global order @@ -66,13 +66,13 @@ void ReactionfastNLO::setDatasetParameters(int ID, map<string,string> pars, map< success &= fnlo->SetContributionON(fastNLO::kThresholdCorrection, iThr, true); } if (!success) hf_errlog(17090503,"W: fastNLO. Requested order "+order+" cannot be set."); - + // --- Set Units - if ( pars.count("Units") ) { // Local order + if ( pars.count("Units") ) { // Local order string units = pars["Units"] ; - if ( units=="absolute" ) + if ( units=="absolute" ) fnlo->SetUnits(fastNLO::kAbsoluteUnits); - else if ( units=="publication" ) + else if ( units=="publication" ) fnlo->SetUnits(fastNLO::kPublicationUnits); else hf_errlog(17090514,"E: fastNLO. Unrecognized parameter for key Units"); @@ -88,48 +88,48 @@ void ReactionfastNLO::setDatasetParameters(int ID, map<string,string> pars, map< hf_errlog(17090505,"I: Setting fastNLO scale factor mu_F: "+pars["ScaleFacMuF"]); cmuf=std::stod(pars["ScaleFacMuF"]);//GetParam("ScaleFacMuF"); } - if ( cmur!=1 || cmuf!=1 ) + if ( cmur!=1 || cmuf!=1 ) fnlo->SetScaleFactorsMuRMuF(cmur,cmuf); // --- Set scale choice - if ( !fnlo->GetIsFlexibleScaleTable() && + if ( !fnlo->GetIsFlexibleScaleTable() && (pars.count("ScaleChoiceMuR") || pars.count("ScaleChoiceMuF") ) ) { hf_errlog(17090508,"W: fastNLO. Scale choice requested, but this is not a flexible scale table."); } else { const std::map<std::string,fastNLO::EScaleFunctionalForm> sclmap{ {"kScale1" ,fastNLO::kScale1}, - {"kScale2" ,fastNLO::kScale2}, - {"kQuadraticSum" ,fastNLO::kQuadraticSum}, - {"kQuadraticMean" ,fastNLO::kQuadraticMean}, - {"kQuadraticSumOver4" ,fastNLO::kQuadraticSumOver4}, - {"kLinearMean" ,fastNLO::kLinearMean}, - {"kLinearSum" ,fastNLO::kLinearSum}, - {"kScaleMax" ,fastNLO::kScaleMax}, - {"kScaleMin" ,fastNLO::kScaleMin}, - {"kProd" ,fastNLO::kProd}, - // {"kS2plusS1half" ,fastNLO::kS2plusS1half}, - // {"kPow4Sum" ,fastNLO::kPow4Sum}, - // {"kWgtAvg" ,fastNLO::kWgtAvg}, - // {"kS2plusS1fourth" ,fastNLO::kS2plusS1fourth}, - // {"kExpProd2" ,fastNLO::kExpProd2}, - // {"kExtern" ,fastNLO::kExtern}, + {"kScale2" ,fastNLO::kScale2}, + {"kQuadraticSum" ,fastNLO::kQuadraticSum}, + {"kQuadraticMean" ,fastNLO::kQuadraticMean}, + {"kQuadraticSumOver4" ,fastNLO::kQuadraticSumOver4}, + {"kLinearMean" ,fastNLO::kLinearMean}, + {"kLinearSum" ,fastNLO::kLinearSum}, + {"kScaleMax" ,fastNLO::kScaleMax}, + {"kScaleMin" ,fastNLO::kScaleMin}, + {"kProd" ,fastNLO::kProd}, + // {"kS2plusS1half" ,fastNLO::kS2plusS1half}, + // {"kPow4Sum" ,fastNLO::kPow4Sum}, + // {"kWgtAvg" ,fastNLO::kWgtAvg}, + // {"kS2plusS1fourth" ,fastNLO::kS2plusS1fourth}, + // {"kExpProd2" ,fastNLO::kExpProd2}, + // {"kExtern" ,fastNLO::kExtern}, // {"kConst" ,fastNLO::kConst}, - }; + }; // set mu_r - if ( pars.count("ScaleChoiceMuR") ) { // + if ( pars.count("ScaleChoiceMuR") ) { // hf_errlog(17090504,"I: Setting fastNLO scale choice mu_R: "+pars["ScaleChoiceMuR"]); if ( sclmap.count(pars["ScaleChoiceMuR"])==0 ) hf_errlog(17090522,"F: fastNLO. Scale choice for mu_R not available: "+pars["ScaleChoiceMuR"]); - else + else fnlo->SetMuRFunctionalForm(sclmap.at(pars["ScaleChoiceMuR"])); } // set mu_f - if ( pars.count("ScaleChoiceMuF") ) { // Local order + if ( pars.count("ScaleChoiceMuF") ) { // Local order hf_errlog(17090506,"I: Setting fastNLO scale choice mu_F: "+pars["ScaleChoiceMuF"]); - if ( sclmap.count(pars["ScaleChoiceMuF"])==0 ) + if ( sclmap.count(pars["ScaleChoiceMuF"])==0 ) hf_errlog(17090523,"F: fastNLO. Scale choice for mu_F not available: "+pars["ScaleChoiceMuF"]); - else + else fnlo->SetMuFFunctionalForm(sclmap.at(pars["ScaleChoiceMuF"])); } } diff --git a/reactions/testZMVFNS/include/ReactiontestZMVFNS.h b/reactions/testZMVFNS/include/ReactiontestZMVFNS.h index 0009acf8c29e9cf15a68970f45221cbf83d88dd8..6174eb4fc2c0a594202c06d9070085061038a4bc 100644 --- a/reactions/testZMVFNS/include/ReactiontestZMVFNS.h +++ b/reactions/testZMVFNS/include/ReactiontestZMVFNS.h @@ -7,7 +7,7 @@ /** @class' ReactiontestZMVFNS - @brief A wrapper class for testZMVFNS reaction + @brief A wrapper class for testZMVFNS reaction Based on the ReactionTheory class. Reads options produces 3d cross section. @@ -26,7 +26,7 @@ class ReactiontestZMVFNS : public ReactionTheory public: virtual string getReactionName() const { return "testZMVFNS" ;}; - int initAtStart(const string &); + int atStart(const string &); virtual int compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err); virtual void initAtIteration() override { std::cout << " FROM INIT AT ITERATION " << std::endl; }; protected: diff --git a/reactions/testZMVFNS/src/ReactiontestZMVFNS.cc b/reactions/testZMVFNS/src/ReactiontestZMVFNS.cc index 63f73a19916ae317c9ddceab7b49a69814982aca..5f27042f343a4daefdd934f48091cafe3ed4587b 100644 --- a/reactions/testZMVFNS/src/ReactiontestZMVFNS.cc +++ b/reactions/testZMVFNS/src/ReactiontestZMVFNS.cc @@ -1,4 +1,4 @@ - + /* @file ReactiontestZMVFNS.cc @date 2016-12-08 @@ -22,7 +22,7 @@ extern "C" { // Initialize at the start of the computation -int ReactiontestZMVFNS::initAtStart(const string &s) +int ReactiontestZMVFNS::atStart(const string &s) { // Required for FL tests: std::cout << checkParam("FL_fudge") << std::endl; @@ -42,7 +42,7 @@ int ReactiontestZMVFNS::compute(int dataSetID, valarray<double> &val, map<string // Get bin arrays, check that Q2, x and y are present: - auto *q2p = GetBinValues(dataSetID,"Q2"), *xp = GetBinValues(dataSetID,"x"), *yp = GetBinValues(dataSetID,"y"); + auto *q2p = GetBinValues(dataSetID,"Q2"), *xp = GetBinValues(dataSetID,"x"), *yp = GetBinValues(dataSetID,"y"); if (q2p == nullptr || xp == nullptr || yp == nullptr ) { std::cout << "\n\nFATAL ERROR: DIS NC requires x,Q2 and y bins to be present !!!" << std::endl; std::cout << "CHECK THE DATAFILE !!!" << std::endl; @@ -86,7 +86,7 @@ int ReactiontestZMVFNS::compute(int dataSetID, valarray<double> &val, map<string double q = 100; //std::cout << " alpha_S(100.) = " << (*alpha_S)(&q) << std::endl; std::cout << " alpha_S(Q = 100 GeV) = " << alphaS(q) << std::endl; - + // look at gluon: double xx = 0.001; pTwoParFunc xg = (*PDFs)["xg"]; @@ -99,14 +99,14 @@ int ReactiontestZMVFNS::compute(int dataSetID, valarray<double> &val, map<string xfx(xx,10.,&pdfV[0]); std::cout << " xg(100,0.001) (meth 2) = " << pdfV[6] << std::endl; std::cout << " xg(100,0.001) (meth 3) = " << xfx(xx,10.,0) << std::endl; - + // compute reduced x-section: for (int i = 0; i<Npnt; i++) { double yplus = 1+(1-y[i])*(1-y[i]); double y2 = y[i]*y[i]; val[i] = f2[i] - y2/yplus * fl[i]; - } + } return 0; } diff --git a/src/GetChisquare.f b/src/GetChisquare.f index 94e543440d550dd4cda5a166ce41ce38dba16388..7065038f60131b617e631ed434f8d040420adbdf 100644 --- a/src/GetChisquare.f +++ b/src/GetChisquare.f @@ -1,4 +1,4 @@ -C-------------------------------------------------------- +C-------------------------------------------------------- C> @Brief Calculate chisquare C C> - first get error matrix @@ -16,7 +16,7 @@ C--------------------------------------------------------- #include "steering.inc" #include "systematics.inc" #include "indata.inc" - + integer n0_in, flag_in double precision fchi2_in, ERSYS_in(NSYSMax), RSYS_in(NSYSMax) double precision pchi2_in(nset), fcorchi2_in @@ -28,7 +28,7 @@ C--------------------------------------------------------- double precision ScaledGammaSav(NSysMax,Ntot) ! Scaled Gamma matrix, saved double precision ScaledOmega(NSysMax,Ntot) ! Scaled Omega matrix - + double precision ScaledErrors(Ntot) ! uncorrelated uncertainties, diagonal double precision ScaledErrorMatrix(NCovarMax,NCovarMax) ! stat+uncor error matrix @@ -42,22 +42,15 @@ C--------------------------------------------------------- logical LFirst data LFirst /.true./ - integer omegaIteration + integer omegaIteration Logical doMatrix, doNuisance, doExternal, LStop - -C---------------------------------------------------------------------------- - - ! --> WS debug - if(LDEBUG) print*,'GetNewChisquare flag_in=',flag_in - -c Global initialisation +c Global initialisation if (LFirst) then LFirst = .false. - C !> Determine which mechanisms for syst. errors should be used: - Call Init_Chi2_calc(doMatrix, doNuisance, doExternal) + Call Init_Chi2_calc(doMatrix, doNuisance, doExternal) C !> Determine which errors are diagonal and which are using covariance matrix Call init_chi2_stat(NDiag, NCovar, List_Diag, List_Covar, @@ -72,8 +65,8 @@ C !> Determine which errors are diagonal and which are using covariance matri enddo enddo endif ! LFirst - -C + +C do jsys=1,nsys rsys_in(jsys) = 0.d0 ersys_in(jsys) = 0.d0 @@ -82,7 +75,7 @@ C do i=1,nset pchi2_in(i)=0.d0 enddo - + fchi2_in = 0.d0 fcorchi2_in = 0.d0 @@ -107,34 +100,22 @@ C !> Read external (minuit) systematic sources if present: if (.not. Chi2FirstIterationRescale .or. flag_in.eq.1) then C !> Calculated scaled syst. uncertainties: call Chi2_calc_GetGamma(ScaledGamma, ScaledOmega) - + C !> Store rescaled gamma (important for asymmetric errors ): do k=1,nsys do i=1,n_syst_meas(k) j = syst_meas_idx(i,k) ScaledGammaSav(k,j) = ScaledGamma(k,j) enddo - enddo - - - ! print *,' --- ScaledGamma' - ! do i=1,n0_in - ! print *,(ScaledGamma(j,i),j=1,nsys) - ! enddo - - -C !> Rebuild syst. covariance matrix + enddo +C !> Rebuild syst. covariance matrix if ( doMatrix ) then Call Chi2_calc_covar(ScaledGamma $ ,ScaledSystMatrix $ ,List_Covar_Inv,n0_in) endif - ! print *,' --- ScaledSystMatrix' - ! do i=1,6 - ! print *,(ScaledSystMatrix(j,i),j=1,6) - ! enddo - else + else C !> Restore saved gamma: do k=1,nsys do i=1,n_syst_meas(k) @@ -154,58 +135,35 @@ c !> First recalc. stat. and bin-to-bin uncorrelated uncertainties: Call Chi2_calc_stat_uncor(ScaledErrors $ ,ScaledErrorMatrix $ ,rsys_in,n0_in, NCovar, List_Covar, Iterate) - - ! print *,' --- ScaledErrors' - ! print *,(ScaledErrors(j),j=1,6) - ! print *,' --- ScaledErrorMatrix' - ! do i=1,6 - ! print *,(ScaledErrorMatrix(j,i),j=1,6) - ! enddo C !> Sum covariance matricies and invert the total: if ( doMatrix .or. NCovar .gt. 0 ) then - Call Chi2_calc_SumCovar(ScaledErrorMatrix, - $ ScaledSystMatrix, + Call Chi2_calc_SumCovar(ScaledErrorMatrix, + $ ScaledSystMatrix, $ ScaledTotMatrix, NCovar) - - ! print *,' --- ScaledTotMatrix Inv.' - ! do i=1,6 - ! print *,(ScaledTotMatrix(j,i),j=1,6) - ! enddo endif C !> same for diagonal part: do i=1,n0_in - - if(NCovar.eq.0.and.ScaledErrors(i).eq.0.0d0) then -c no cov matrix and no ScaledErrors errors, break - print*, - $ 'GetNewChisquare: no stat and unc errors in data!' - print*,'(possibly cov matrix forgot to be included?)' - call hf_stop - endif - - if ( .not. is_covariance(i) ) then - ScaledErrors(i) = 1.D0 - $ / (ScaledErrors(i)*ScaledErrors(i)) + if(ScaledErrors(i)/=0d0)then + ScaledErrors(i)=ScaledErrors(i)**(-2) else - if (ScaledErrors(i).eq.0.0D0) then - ScaledErrors(i) = 1.D0 - else - ScaledErrors(i) = 1.D0 - $ / (ScaledErrors(i)*ScaledErrors(i)) - endif + ScaledErrors(i)=1d0 !When is this necessary? --Ivan + if(NCovar==0)then + !no cov matrix and no ScaledErrors errors, break + print*,'GetNewChisquare: no stat and unc errors in + $ data! (possibly cov matrix forgot to be included?)' + call hf_stop + endif endif enddo - endif - C !> Next determine nuisance parameter shifts omegaIteration = 1 - do - if ( LConvertCovToNui .and. do_reduce + do + if ( LConvertCovToNui .and. do_reduce $ .and. flag_in .ne. 3 ) then ! use simplified (slightly) faster version of the code call chi2_calc_syst_shifts_simple( @@ -230,10 +188,10 @@ C !> Asymmetric errors loop: $ ,ScaledOmega $ ,rsys_in $ ,omegaIteration, - $ LStop) - if (LStop) Exit + $ LStop) + if (LStop) Exit omegaIteration = omegaIteration + 1 - enddo + enddo C !> See if we want to use asymmetric errors @@ -273,7 +231,7 @@ C endif - return + return end @@ -286,7 +244,7 @@ C> @param doNuisance switch on hessian method C> @param doExternal switch on external (minuit) method C C------------------------------------------------------------------ - subroutine Init_Chi2_calc(doMatrix, doNuisance, doExternal) + subroutine Init_Chi2_calc(doMatrix, doNuisance, doExternal) implicit none logical doMatrix, doNuisance, doExternal @@ -371,7 +329,7 @@ C> @param List_Covar_inv inverted list of covariance input data C> @param n0_in total number of input data C C------------------------------------------------------------------------------------------ - subroutine Init_chi2_stat(NDiag, NCovar, List_Diag, List_Covar, + subroutine Init_chi2_stat(NDiag, NCovar, List_Diag, List_Covar, $ List_Covar_inv, n0_in) implicit none @@ -379,7 +337,7 @@ C------------------------------------------------------------------------------- #include "indata.inc" #include "systematics.inc" #include "steering.inc" - integer NDiag, NCovar, List_Diag(NTOT), List_Covar(NTOT), + integer NDiag, NCovar, List_Diag(NTOT), List_Covar(NTOT), $ List_Covar_inv(NTOT), n0_in integer i,k,l logical isCov @@ -391,7 +349,7 @@ C----------------------------------------------------------------------- List_Covar_Inv(i) = 0 ! Reset inverted list isCov = .false. -C Check if already requested to be +C Check if already requested to be if ( is_covariance(i) ) then isCov = .true. else @@ -403,7 +361,7 @@ C Check systematic sources, if a matrix source point to point i isCov = .true. endif enddo - endif + endif enddo endif @@ -426,7 +384,7 @@ C Check systematic sources, if a matrix source point to point i NDiag = NDiag + 1 List_Diag(NDiag) = i endif - + enddo if (lDebug) then @@ -438,7 +396,7 @@ C Check systematic sources, if a matrix source point to point i C---------------------------------------------------------- C -C> @brief Get external (minuit) parameters +C> @brief Get external (minuit) parameters C C> @param rsys_in C> @param ersys_in @@ -468,7 +426,7 @@ C------------------------------------------------- print *,'External systematics ',system(i),' not found' print *,'on the list of external parameters' print *,'Contact herafiter-help@desy.de with ! Stop.' - print *,' ' + print *,' ' call HF_stop endif rsys_in(i) = parminuitsave( iExtraParamMinuit(idx) ) @@ -511,7 +469,7 @@ c#include "steering.inc" logical lfirstPass/.true./ C----------------------------------------------------- do k=1,NSYS - scaling_type = SysScalingType(k) + scaling_type = SysScalingType(k) do i1=1,n_syst_meas(k) i = syst_meas_idx(i1,k) @@ -577,19 +535,19 @@ C---------------------------------------------------------------------- enddo do k=1,nsys - if (SysForm(k).eq.isMatrix) then + if (SysForm(k).eq.isMatrix) then ! The covariance matrix: do i2=1,n_syst_meas(k) i1 = syst_meas_idx(i2,k) ! data point index - i = list_covar_inv(i1) ! cov. matrix index + i = list_covar_inv(i1) ! cov. matrix index do j2=i2,n_syst_meas(k) j1 = syst_meas_idx(j2,k) ! data point idx j = list_covar_inv(j1) ! cov. matrix idx - ScaledSystMatrix(i,j) = - $ ScaledSystMatrix(i,j) + ScaledSystMatrix(i,j) = + $ ScaledSystMatrix(i,j) $ + ScaledGamma(k,i1)*ScaledGamma(k,j1) enddo @@ -600,7 +558,7 @@ C---------------------------------------------------------------------- C---------------------------------------------------------------------- end - + C=========================================================== C C> @brief Get uncertainties @@ -655,7 +613,7 @@ C> @param ScaledTotMatrix C> @param NCovar C C---------------------------------------------------------------------------- - Subroutine Chi2_calc_SumCovar(ScaledErrorMatrix, ScaledSystMatrix, + Subroutine Chi2_calc_SumCovar(ScaledErrorMatrix, ScaledSystMatrix, $ ScaledTotMatrix, NCovar) implicit none @@ -672,29 +630,29 @@ C---------------------------------------------------------------------------- C----------------------------- do i=1,NCovar do j=i,NCovar - ScaledTotMatrix(i,j) = ScaledErrorMatrix(i,j) + ScaledTotMatrix(i,j) = ScaledErrorMatrix(i,j) $ + ScaledSystMatrix(i,j) ScaledTotMatrix(j,i) = ScaledTotMatrix(i,j) enddo enddo - + ! print *,' --- ScaledTotMatrix' ! do i=1,6 ! print *,(ScaledTotMatrix(j,i),j=1,6) ! enddo -C----------------------------- +C----------------------------- Call DInv(NCovar,ScaledTotMatrix,NCovarMax,Array,IFail) C print *,IFail,NCovar - + end C----------------------------------------------------------------------- C C> @brief Scale covariance matrix and/or diagonal uncertainties -C -C> @param ScaledErrors -C> @param ScaledErrorMatrix +C +C> @param[out] ScaledErrors +C> @param[out] ScaledErrorMatrix C> @param rsys_in C> @param n0_in C> @param NCovar @@ -702,7 +660,7 @@ C> @param List_Covar C> @param Iterate C C----------------------------------------------------------------------- - subroutine chi2_calc_stat_uncor(ScaledErrors, ScaledErrorMatrix, + subroutine chi2_calc_stat_uncor(ScaledErrors, ScaledErrorMatrix, $ rsys_in,n0_in, NCovar, List_Covar, Iterate) implicit none @@ -720,8 +678,8 @@ C----------------------------------------------------------------------- integer i,j,i1,j1 double precision Stat, StatConst, Unc, Sum -c - +c + #include "indata.inc" double precision Offs C------------------------------------------------------- @@ -731,9 +689,9 @@ C Start with diagonal part C do i=1,n0_in Call GetPointErrors(i, Stat, StatConst, Unc) - sum = 1. + sum=0. if (Chi2ExtraSystRescale .and. Iterate.eq.0) then -C Re-scale for systematic shifts: +C Re-scale for systematic shifts: do j=1,NSYS if ( (SysForm(j) .eq. isNuisance) $ .and. (SysScalingType(j) .eq. isLinear ) ) then @@ -750,12 +708,10 @@ C Re-scale for systematic shifts: enddo endif -c protection against negative sum term for ExtraSystRescale case - Sum = exp(Sum-1.) - - ScaledErrors(i) = sqrt((Stat*sqrt(Sum))**2+StatConst**2+Unc**2+Offs*daten(i)**2) - ScaledErrorsStat(i) = sqrt((Stat*sqrt(Sum))**2+StatConst**2) - ScaledErrorsSyst(i) = sqrt(Unc**2+Offs*daten(i)**2) + sum=exp(sum) + ScaledErrorsStat(i)=sqrt(Stat**2*sum+StatConst**2) + ScaledErrorsSyst(i)=sqrt(Unc**2+Offs*daten(i)**2) + ScaledErrors(i)=sqrt(ScaledErrorsStat(i)**2+ScaledErrorsSyst(i)**2) enddo C @@ -764,13 +720,13 @@ C do i1=1,NCovar i = List_Covar(i1) do j1=i1,NCovar - j = List_Covar(j1) + j = List_Covar(j1) - ScaledErrorMatrix(i1,j1) = + ScaledErrorMatrix(i1,j1) = $ ScaledErrorsStat(i)*ScaledErrorsStat(j)*corr_stat(i,j) + $ ScaledErrorsSyst(i)*ScaledErrorsSyst(j)*corr_syst(i,j) + $ ScaledErrors(i)*ScaledErrors(j)*corr(i,j) + - $ cov(i,j) + $ cov(i,j) enddo enddo C-------------------------------------------------------- @@ -793,7 +749,7 @@ C------------------------------------------------- do l=1,nsys n = n_syst_meas(l) j1 = 1 - + do while ( j1 .le. n) flag = .false. C loop over all data, find non-zero correlations @@ -826,14 +782,14 @@ c print *,'EXPAND LIST',l,i endif enddo - n_syst_meas(l) = n + n_syst_meas(l) = n enddo C------------------------------------------------- end - + subroutine chi2_calc_syst_shifts_simple( $ ScaledErrors $ ,ScaledGamma @@ -849,11 +805,11 @@ C double precision ScaledErrors(NTOT) double precision ScaledGamma(NSysMax,Ntot) !> Scaled Gamma matrix double precision rsys_in(NSYSMax) - double precision A(NSYSMax,NSYSMax), C(NSysMax) + double precision A(NSYSMax,NSYSMax), C(NSysMax) double precision AS(n0_in,NSysMax) ! automatic, scaled sys. - double precision ASp(n0_in*(NsysMax+1)/2), + double precision ASp(n0_in*(NsysMax+1)/2), $ SGp(n0_in*(NsysMax+1)/2) double precision d_minus_t1 @@ -874,12 +830,12 @@ C Reset the matricies: do i=1,n0_in do j=1,nsys - AS(i,j) = + AS(i,j) = $ ScaledErrors(i) $ *ScaledGamma(j,i) enddo enddo - + do l=1,nsys C Start with "C" @@ -887,13 +843,13 @@ C Start with "C" do i1=1,n_syst_meas(l) ! loop over all data affected by this source i = syst_meas_idx(i1,l) ! i -> index of the data c do i=1,n0_in - + d_minus_t1 = daten(i) - theo(i) C Diagonal error: C(l) = C(l) + AS(i,l) $ *( d_minus_t1 ) - + enddo enddo @@ -903,7 +859,7 @@ C Diagonal error: if ( .not. UseBlas ) then !$OMP PARALLEL DO - + do i=1,n0_in do l=1,nsys do k=l,NSys @@ -937,12 +893,12 @@ C call cublas_dgemm('N','N',nsys,nsys, n0_in, 1.0D0 $ , 0.D0, A, nsysmax) endif - + C Penalty term, unity by default do i=1,nsys A(i,i) = A(i,i) + SysPriorScale(i) enddo - + c print *,A(1,1),A(nsys,nsys) call cpu_time(time2) @@ -958,9 +914,9 @@ C C Ready to invert if (nsys.gt.0) then - + Call DEQN(Nsys,A,NsysMax,IR,IFail,1,C) - + do l=1,nsys rsys_in(l) = - C(l) enddo @@ -1013,14 +969,14 @@ C double precision, allocatable :: AA(:,:) double precision, allocatable :: AA2(:,:) - double precision, allocatable :: RR(:,:) + double precision, allocatable :: RR(:,:) double precision d_minus_t1, d_minus_t2,add double precision ShiftExternal(NTOT) - + integer com_list(NTot),n_com_list !> List of affected data, common for two sources. integer IR(2*NSysMax), Ifail, Npdf - + integer nsystheo, itheoisys(NSysMax) logical lfirst @@ -1035,7 +991,7 @@ C Determine pairs of syst. uncertainties which share data if (LFirst .or. ResetCommonSyst) then LFirst = .false. - ResetCommonSyst = .false. + ResetCommonSyst = .false. call expand_syst_lists(scaledtotmatrix,list_covar_inv,n0_in) @@ -1063,10 +1019,10 @@ C Get extra piece, from external systematics: i = syst_meas_idx(i1,l) ! Consider asymmetric uncertainties: if (AsymErrorsIterations.eq.0) then - ShiftExternal(i) = ShiftExternal(i) + ShiftExternal(i) = ShiftExternal(i) $ + ScaledGamma(l,i)*rsys_in(l) else - ShiftExternal(i) = ShiftExternal(i) + ShiftExternal(i) = ShiftExternal(i) $ + ScaledGamma(l,i)*rsys_in(l) $ + ScaledOmega(l,i)*rsys_in(l)*rsys_in(l) endif @@ -1087,7 +1043,7 @@ C Reset the matricies: A(i,j) = 0.0D0 enddo C Penalty term, unity by default - A(i,i) = SysPriorScale(i) + A(i,i) = SysPriorScale(i) enddo !$OMP PARALLEL DO @@ -1100,7 +1056,7 @@ C Start with "C" i = syst_meas_idx(i1,l) ! i -> index of the data c do i=1,n0_in if (FitSample(i) ) then - + d_minus_t1 = daten(i) - theo(i) + ShiftExternal(i) if ( list_covar_inv(i) .eq. 0) then @@ -1109,18 +1065,18 @@ C Diagonal error: $ *ScaledGamma(l,i)*( d_minus_t1 ) else C Covariance matrix, need more complex sum: - i2 = list_covar_inv(i) ! i2 -> covar. matrix index for i. - do j1=1,n_syst_meas(l) - j = syst_meas_idx(j1,l) ! j -> index of the data + i2 = list_covar_inv(i) ! i2 -> covar. matrix index for i. + do j1=1,n_syst_meas(l) + j = syst_meas_idx(j1,l) ! j -> index of the data c do j = 1, n0_in if (j.ge.i) then if (FitSample(j)) then - d_minus_t2 = daten(j) - theo(j) + d_minus_t2 = daten(j) - theo(j) $ + ShiftExternal(j) - j2 = list_covar_inv(j) + j2 = list_covar_inv(j) if (j2 .gt. 0) then add = ScaledTotMatrix(i2,j2) - $ *( ScaledGamma(l,i)*d_minus_t2 + $ *( ScaledGamma(l,i)*d_minus_t2 $ + ScaledGamma(l,j)*d_minus_t1 ) if (i.ne.j) then C(l) = C(l) + add @@ -1162,7 +1118,7 @@ C do j=i,n0_in if ( j.ge.i .and. FitSample(j) ) then j2 = list_covar_inv(j) if (j2 .gt. 0) then - add = + add = $ ScaledTotMatrix(i2,j2) $ *( ScaledGamma(l,i)*ScaledGamma(k,j) $ +ScaledGamma(l,j)*ScaledGamma(k,i)) @@ -1179,7 +1135,7 @@ C do j=i,n0_in endif c enddo - endif + endif enddo enddo endif @@ -1209,7 +1165,7 @@ C Ready to invert enddo endif - + if (iflag.eq.3) then Call DEQInv(Nsys,A,NsysMax,IR, IFail, 1, C) else @@ -1241,18 +1197,18 @@ C Loop over all sources, find theory sources, count them. if (nsystheo.gt.0) then npdf = nsystheo - + Allocate(AA(npdf,npdf)) open (52,file=trim(OutDirName)//'/pdf_shifts.dat', $ status='unknown') - write (52,'(''LHAPDF set='',A32)') + write (52,'(''LHAPDF set='',A32)') $ trim(adjustl(LHAPDFSET)) write (52,'(i3)') npdf - + do l=1,npdf write (52,'(i3,2F8.4)') l, - $ rsys_in(itheoisys(l)), + $ rsys_in(itheoisys(l)), $ ersys_in(itheoisys(l)) enddo close (52) @@ -1275,7 +1231,7 @@ C Loop over all sources, find theory sources, count them. C Also rotation matrix: Call MyDSYEVD(Npdf,AA,Npdf,C,ifail) - + C scale to take into account error reduction do i=1,Npdf do j=1,Npdf @@ -1301,7 +1257,7 @@ C We want to preserve original directions as much as possible enddo enddo - + Call MyDSYEVD(k,RR,Npdf,C,ifail) C rotate rotation matrix: @@ -1312,7 +1268,7 @@ C rotate rotation matrix: AA2(i,j) = AA2(i,j) + AA(i,l)*RR(l,j) enddo enddo - enddo + enddo do i=1,k @@ -1334,7 +1290,7 @@ C Last loop to keep the direction of the original vectors open (52,file=trim(OutDirName)//'/pdf_rotate.dat' $ ,status='unknown') - write (52,'(''LHAPDF set='',A32)') + write (52,'(''LHAPDF set='',A32)') $ trim(adjustl(LHAPDFSET)) write (52,'(i4)') Npdf do i=1,Npdf @@ -1343,9 +1299,9 @@ C print *,'haha',i,C(i),ifail $ (AA(j,i),j=1,Npdf) enddo close (52) - + DeAllocate(AA,AA2,RR) - + endif endif endif @@ -1373,10 +1329,10 @@ C--------------------------------------------------------------- n_list = 0 do i1=1,n_syst_meas(isys1) i = syst_meas_idx(i1,isys1) - do j1=1,n_syst_meas(isys2) + do j1=1,n_syst_meas(isys2) j = syst_meas_idx(j1,isys2) if ( i.eq.j ) then - n_list = n_list + 1 + n_list = n_list + 1 i_list(n_list) = i goto 17 endif @@ -1386,7 +1342,7 @@ C--------------------------------------------------------------- C--------------------------------------------------------------- end - + C---------------------------------------------------------------------- C @@ -1418,7 +1374,7 @@ C---------------------------------------------------------------------- #include "steering.inc" double precision ScaledGamma(NSysMax,Ntot) ! Scaled Gamma matrix - double precision ScaledErrors(Ntot) ! uncorrelated uncertainties, diagonal + double precision ScaledErrors(Ntot) !1/d^2, where d is scaled uncorrelated uncertainty, for each datapoint double precision ScaledTotMatrix(NCovarMax,NCovarMax) ! stat+uncor+syst covar matrix double precision rsys_in(NSysMax) integer NDiag, list_covar(NTot), NCovar, list_diag(NTot) @@ -1427,7 +1383,7 @@ C---------------------------------------------------------------------- integer i,j, i1, j1, k double precision d,t, chi2, sum integer offdiag - + double precision SumCov(NCovarMax) C--------------------------------------------------------------------------- @@ -1447,8 +1403,8 @@ C Diagonal part: Sum = Sum + ScaledGamma(k,i)*rsys_in(k) enddo C Chi2 per point: - chi2 = (d - t + Sum)**2 * ScaledErrors(i) - residuals(i) = (d - t + Sum)*sqrt(ScaledErrors(i)) + residuals(i)=(d-t+Sum)*sqrt(ScaledErrors(i)) + chi2=residuals(i)**2 C Sums: if ( FitSample(i) ) then chi2_fit = chi2_fit + chi2 @@ -1460,16 +1416,16 @@ C Sums: enddo ! print*,'chi2_calc1: ',fchi2_in - + C Covariance matrix part C 1) Pre-compute sums of systematic shifts: do i1=1,NCovar - i = list_covar(i1) + i = list_covar(i1) sumcov(i1) = Daten(i) - Theo(i) do k = 1,NSys SumCov(i1) = SumCov(i1) + ScaledGamma(k,i)*rsys_in(k) - enddo + enddo enddo C 2) Actual chi2 calculation: @@ -1480,7 +1436,7 @@ C 2) Actual chi2 calculation: do j1 = 1, NCovar j = list_covar(j1) Chi2 = Chi2 + SumCov(i1)*SumCov(j1)*ScaledTotMatrix(i1,j1) - if ( ( JSET(i) .ne. JSET(j) ) + if ( ( JSET(i) .ne. JSET(j) ) $ .and. (ScaledTotMatrix(i1,j1) .ne. 0d0 ) ) then if ( offdiag .eq. 0 ) then call hf_errlog(15090916, @@ -1497,7 +1453,7 @@ C Sums: pchi2_in(JSET(i)) = pchi2_in(JSET(i)) + chi2 else chi2_cont = chi2_cont + chi2 - endif + endif enddo @@ -1515,7 +1471,7 @@ c partial chisq are not reasonably defined C Correlated chi2 part: fcorchi2_in = 0.d0 do k=1,NSys - fcorchi2_in = fcorchi2_in + fcorchi2_in = fcorchi2_in $ + rsys_in(k)**2 * SysPriorScale(k) C Also, store as residuals: residuals(ndiag+k) = rsys_in(k)*sqrt(SysPriorScale(k)) @@ -1531,9 +1487,10 @@ C-------------------------------------------------------------------------- C C> @brief Calculate additional log correction factor C> @param ScaledErrors uncertainties -C> @param chi2_log log correction +C> @param[out] chi2_log log correction C> @param n0_in number of points C +C Fills vector chi2_poi -- log corrections for each dataset C-------------------------------------------------------------------------- subroutine chi2_calc_PoissonCorr(ScaledErrors, chi2_log, n0_in) @@ -1542,7 +1499,7 @@ C-------------------------------------------------------------------------- double precision ScaledErrors(Ntot) double precision chi2_log - integer n0_in + integer n0_in !isn't number of points npoints? Why is this needed? --Ivan #include "indata.inc" @@ -1559,11 +1516,7 @@ C------------------------------------------------------------------------- do i=1,n0_in if (FitSample(i)) then if ( alpha(i).gt.0 ) then - dchi2 = - log( alpha(i)*alpha(i) - $ * ScaledErrors(i)) - - - + dchi2=-log(alpha(i)*alpha(i)*ScaledErrors(i)) chi2_log = chi2_log + dchi2 chi2_poi(JSET(i)) = chi2_poi(JSET(i)) + dchi2 endif @@ -1598,8 +1551,8 @@ C------------------------------------------------------------------ do i=1,n0_in if(ScaledErrors(i).ne.1.0D0) then ALPHA_MOD(i) = 1.D0/sqrt(ScaledErrors(i)) - else -c special case if no scaled errors given, i.e. given total cov matrix + else +c special case if no scaled errors given, i.e. given total cov matrix ALPHA_MOD(i) = 0.0D0 endif THEO_MOD(i) = THEO(i) @@ -1607,17 +1560,17 @@ c special case if no scaled errors given, i.e. given total cov matrix THEO_MOD(i) = THEO_MOD(i) - ScaledGamma(k,i)*RSys_in(k) enddo enddo - + ! CALL cvfillgamma(nsys,n0_in,ScaledGamma,NSYSMAX) ! CALL cvfillgamma(ScaledGamma,nsys,n0_in,NTOT) C------------------------------------------------------------------ end - + C--------------------------------------------------------------------- C -C> @brief +C> @brief C> @param ScaledGamma scaled Gamma matrix C> @param ScaledGammaSav scaled Gamma matrix C> @param ScaledOmega scaled Omega matrix @@ -1627,8 +1580,8 @@ C> @param LStop C C--------------------------------------------------------------------- subroutine UseOmegaScale(ScaledGamma,ScaledGammaSav,ScaledOmega, - $ rsys_in,Iteration,LStop) - + $ rsys_in,Iteration,LStop) + implicit none #include "ntot.inc" #include "steering.inc" @@ -1640,7 +1593,7 @@ C--------------------------------------------------------------------- double precision rsys_save(NSYSMax) integer Iteration logical LStop - integer i,j,k, iter + integer i,j,k, iter double precision shift integer iterMax @@ -1679,12 +1632,12 @@ C recalulate if (SysForm(k) .ne. isExternal) then do i=1,n_syst_meas(k) j = syst_meas_idx(i,k) - ScaledGamma(k,j) = ScaledGammaSav(k,j) + ScaledGamma(k,j) = ScaledGammaSav(k,j) $ + ScaledOmega(k,j)*rsys_in(k) enddo endif - enddo - + enddo + if (LDebug) then print *,'shift',iteration, shift,nsys endif @@ -1730,7 +1683,7 @@ C---- if ( SysForm(l) .eq. isExternal) then do i=1,n_syst_meas(l) j = syst_meas_idx(i,l) - ScaledGamma(l,j) = ScaledGamma(l,j) + ScaledGamma(l,j) = ScaledGamma(l,j) $ + ScaledOmega(l,j)*rsys_in(l) enddo endif @@ -1748,12 +1701,12 @@ C> @param Covar -- Input covariance matrix. Output: nuisance parameters. C> @param ANuisance -- Output nuisance parameter representation C> @param Tolerance -- fractional sum of eigenvalues for the sourced treated as uncorrelated uncertainty. 0: NCorrelated = NCovar, 1: NCorrelated = 0. C> @param Ncorrelated -- Output number of correlated nuisance parameters -C> @param Uncor -- Output uncorrelated uncertainty +C> @param Uncor -- Output uncorrelated uncertainty C @param LSepDiag -- Separate diagonal part C C-------------------------------------------------------------------------------- subroutine GetNuisanceFromCovar( NDimCovar, NDimSyst, NCovar, - $ Covar, ANuisance, Tolerance, + $ Covar, ANuisance, Tolerance, $ Ncorrelated, Uncor, LSepDiag) implicit none C-------------------------------------------------------------------------------- @@ -1764,7 +1717,7 @@ C------------------------------------------------------------------------------- integer Ncorrelated double precision Uncor(NDimCovar) logical LSepDiag - + double precision Eigenvalues(NDimCovar) integer ifail @@ -1775,7 +1728,7 @@ C------------------------------------------------------------------------------- integer i,j,k C-------------------------------------------------------------------------------- - + C Try to remove diagonal term first: if ( LSepDiag ) then @@ -1787,7 +1740,7 @@ C Try to remove diagonal term first: facMin = 0.0D0 C First check if the matrix positive definite - + do i=1,NCovar do j=1,NCovar testm(i,j) = Covar(i,j) @@ -1796,7 +1749,7 @@ C First check if the matrix positive definite Call MyDSYEVD(NCovar,testm,NCovar, EigenValues,IFail) if (EigenValues(1).lt.0) then - print + print $ '(''Negative eigenvalue for the covariance matrix '',G12.3)', $ Eigenvalues(1) print *,'List of eigenvalues' @@ -1842,9 +1795,9 @@ C First check if the matrix positive definite DeAllocate(testm) endif - + Call MyDSYEVD(NCovar,Covar,NDimCovar, EigenValues,IFail) - + Sum = 0 do i=1,NCovar c print *,'Eig',i,EigenValues(i) @@ -1951,9 +1904,9 @@ C call hf_errlog(1,'I:Read covariance matrix from file') enddo print '(''Maximum deviation from the original correlation = '' $ ,F10.2,''%'')',devmax - + return - 91 call hf_errlog(1,'F:Can not open covar.in file') + 91 call hf_errlog(1,'F:Can not open covar.in file') 92 call hf_errlog(2,'F:Can not read Covar namelist') 93 call hf_errlog(3,'F:Can not find Covar namelist') 99 call hf_errlog(3,'F:Error reading cov. matrix') @@ -1980,7 +1933,7 @@ C double precision, allocatable :: C(:,:) ! covariance matrix double precision, allocatable :: S(:,:,:) ! nuisance param representation of it. - double precision uncor_loc(NTOT, 0:n_sys_scaling_max-1) + double precision uncor_loc(NTOT, 0:n_sys_scaling_max-1) integer nui_cor(0:n_sys_scaling_max-1) logical l_present(0:n_sys_scaling_max-1) @@ -1995,7 +1948,7 @@ C logical lfirst data lfirst/.true./ namelist/ReduceSyst/do_reduce,tolerance, useBlas - + C------------------------------------------------ useBlas = .false. if (lfirst) then @@ -2034,11 +1987,11 @@ C------------------------------------------------ enddo endif enddo - ! translate to + ! translate to if (l_present(isys_scaling)) then - call GetNuisanceFromCovar(NPoints,NPoints,NPoints, - $ C, S(1,1,isys_scaling+1), tolerance, - $ Nui_cor(isys_scaling), + call GetNuisanceFromCovar(NPoints,NPoints,NPoints, + $ C, S(1,1,isys_scaling+1), tolerance, + $ Nui_cor(isys_scaling), $ Uncor_loc(1, isys_scaling), .false.) else Nui_cor(isys_scaling) = 0 @@ -2052,9 +2005,9 @@ C------------------------------------------------ N_Syst_Meas = 0 UncorNew = sqrt( UncorNew**2 + Uncor_loc(:,isLinear)**2) - UncorPoissonNew = sqrt( UncorPoissonNew**2 + UncorPoissonNew = sqrt( UncorPoissonNew**2 $ + Uncor_loc(:,isPoisson)**2) - UncorConstNew = sqrt( UncorConstNew**2 + UncorConstNew = sqrt( UncorConstNew**2 $ + Uncor_loc(:,isNoRescale)**2) @@ -2076,16 +2029,16 @@ c endif do i=1,NPoints n_syst_meas(NSYS) = n_syst_meas(NSYS) + 1 syst_meas_idx(n_syst_meas(NSYS),NSYS) = i - beta(NSYS,i) = S(isys,i,isys_scaling+1) /daten(i) + beta(NSYS,i) = S(isys,i,isys_scaling+1) /daten(i) enddo enddo enddo Deallocate(C) Deallocate(S) - + goto 18 - 17 continue + 17 continue call hf_errlog(1, $ 'F:Error reading ReduceSyst Namelist ! Stop') 18 continue @@ -2142,7 +2095,7 @@ C--------------------------------------------------------------- character*8 sys_prefix(NCovTypeMax) data sys_prefix/'CTot_','CTot_','CSyst_','CStat_','CSyst_'/ - + character*80 name_s character*3 name_n, name_t integer imaskSta, imaskUnc @@ -2166,10 +2119,10 @@ C-------------------------------------------------------- endif - + do i=1,Npoints - theo(i) = daten(i) ! Set theory = data for error scaling + theo(i) = daten(i) ! Set theory = data for error scaling call GetPointErrors(i, Stat, StatConst, Unc(i)) Sta(i) = sqrt(Stat**2+StatConst**2) @@ -2202,7 +2155,7 @@ c print *,iCovType,Icovbit,NCovar cycle ! nothing to be done endif -C Generate compact matrix: +C Generate compact matrix: do i1=1,NCovar i = List_Covar(i1) do j1=1,NCovar @@ -2211,9 +2164,9 @@ C Use proper source: if (iCovBit .eq. iCovSyst) then cov_loc(i1,j1) = cov(i,j) elseif (iCovBit .eq. iCovSystCorr) then - cov_loc(i1,j1) = corr_syst(i,j)*Unc(i)*Unc(j) + cov_loc(i1,j1) = corr_syst(i,j)*Unc(i)*Unc(j) elseif (iCovBit .eq. iCovStatCorr) then - cov_loc(i1,j1) = corr_stat(i,j)*Sta(i)*Sta(j) + cov_loc(i1,j1) = corr_stat(i,j)*Sta(i)*Sta(j) elseif (iCovBit .eq. iCovTotal) then cov_loc(i1,j1) = cov(i,j) @@ -2221,18 +2174,18 @@ C Use proper source: tot1 = sqrt(sta(i)**2+unc(i)**2) tot2 = sqrt(sta(j)**2+unc(j)**2) cov_loc(i1,j1) = corr(i,j)*tot1*tot2 - + endif enddo enddo c endif - + do j=1,NCovar Uncor(j) = 0.D0 UncorSt(j) = 0.D0 enddo - if ( (iCovBit.eq.iCovSyst) .or. (iCovBit.eq.iCovSystCorr) ) + if ( (iCovBit.eq.iCovSyst) .or. (iCovBit.eq.iCovSystCorr) ) $ then C Direct diagonalisation: Call GetNuisanceFromCovar(NCovarMax, NCovarMax,NCovar, @@ -2261,7 +2214,7 @@ C Subtract diagonal as much as possible: endif C print *,'hihi',Nui_cor,tolerance, ncovar - + C Define the scaling property based on the first point: imaskSta = iStatTypesBitMask(JSet(List_Covar(1))) @@ -2277,7 +2230,7 @@ C Define the scaling property based on the first point: name_t = ':M' elseif (iAnd(imaskUnc,ibConst).eq.imaskUnc) then name_t = ':A' - else + else Call hf_errlog(14090401, $ 'W: inconsistent use of uncor and '// $ 'uncor const for dataset: "'// @@ -2286,7 +2239,7 @@ C Define the scaling property based on the first point: endif elseif ( (iCovBit.eq.iCovStatCorr) ) then - ! Poisson is default for stat. + ! Poisson is default for stat. name_t = ':P' ! Check bits @@ -2294,7 +2247,7 @@ C Define the scaling property based on the first point: name_t = ':P' elseif (iAnd(imaskSta,ibConst).eq.imaskSta) then name_t = ':A' - else + else Call hf_errlog(14090402, $ 'W: inconsistent use of stat and '// $ 'stat const for dataset "'// @@ -2307,7 +2260,7 @@ C Define the scaling property based on the first point: endif do i=1,NSET - if ( DataName(i).eq.DataSetLabel(JSet(List_Covar(1)))) + if ( DataName(i).eq.DataSetLabel(JSet(List_Covar(1)))) $ then name_t = DataSystType(i) endif @@ -2322,7 +2275,7 @@ C Define the scaling property based on the first point: write (name_n,'(i3)') j1 endif name_s = trim(Sys_prefix(icovtype)) - $ // name_n // '_' + $ // name_n // '_' $ // DataSetLabel( JSet(List_Covar(1)) ) @@ -2334,12 +2287,12 @@ C Define the scaling property based on the first point: beta(NSYS,i) = anui_loc(j1,i1)/daten(i) enddo enddo - + do i1=1,NCovar i = List_Covar(i1) - if ( (iCovBit.eq.iCovSyst) .or. (iCovBit.eq.iCovSystCorr) ) + if ( (iCovBit.eq.iCovSyst) .or. (iCovBit.eq.iCovSystCorr) ) $ then -C Re-set uncorrelated systematics: +C Re-set uncorrelated systematics: if (name_t .eq. ':M') then UncorNew(i) = Uncor(i1)/daten(i) UncorPoissonNew(i) = 0.0D0 @@ -2390,7 +2343,7 @@ C Re-set uncorrelated systematics: enddo goto 18 - 17 continue + 17 continue call hf_errlog(1, $ 'F:Error reading CovarToNuisance Namelist ! Stop') 18 continue @@ -2436,8 +2389,8 @@ C First check if this is possible: end -C-------------------------------------------------------- -C> @Brief Interface to lapack, to dynamically allocate work arrays +C-------------------------------------------------------- +C> @Brief Interface to lapack, to dynamically allocate work arrays subroutine MyDSYEVD(NCovar,Covar,NDimCovar, EigenValues,ifail) implicit none integer NCovar, NDimCovar @@ -2447,7 +2400,7 @@ C> @Brief Interface to lapack, to dynamically allocate work arrays integer IWork Character*80 msg C--------------------------------------------------------------- -C Determine optimal size of the work array: +C Determine optimal size of the work array: Call DSYEVD('V','U',NCovar,Covar,NDimCovar, EigenValues, Work, $ -1, IWork, -1, IFail) @@ -2467,10 +2420,10 @@ C Determine optimal size of the work array: integer NCovar, NDimCovar double precision Covar(NDimCovar,NDimCovar), EigenValues(NCovar) integer nwork, nlwork - double precision Work(nwork) ! Dynamic array - integer IWork(nlwork) ! Dynamic array + double precision Work(nwork) ! Dynamic array + integer IWork(nlwork) ! Dynamic array integer IFail -C--------------------------------------------------------------------- +C--------------------------------------------------------------------- Call DSYEVD('V','U',NCovar,Covar,NDimCovar, EigenValues, Work, $ nwork, IWork, nlwork, IFail) diff --git a/src/GetPDFUncType.f b/src/GetPDFUncType.f index 5bea3b5922829d20aac476b1154ecac4d7785396..5f61b73b192a7d58fc57b7a9aa0fe04d84e585b6 100644 --- a/src/GetPDFUncType.f +++ b/src/GetPDFUncType.f @@ -41,7 +41,7 @@ c Adapted from LHAPDF uncertainties.f character*(*) name logical lMonteCarlo,lAsymhess,lSymmhess - ! logical variables for lhapdf interface + ! logical variables for lhapdf interface logical lhapdf_mc, lhapdf_symmetric integer nset lMonteCarlo = .false. @@ -56,7 +56,7 @@ c Adapted from LHAPDF uncertainties.f call getnset(nset) call getpdfunctypem(nset, lhapdf_mc, lhapdf_symmetric) - if(lhapdf_symmetric) then + if(lhapdf_symmetric) then lMonteCarlo=.false. lSymmhess=.true. lAsymhess=.false. @@ -73,10 +73,10 @@ c Adapted from LHAPDF uncertainties.f lAsymhess=.false. endif #endif - + end subroutine GetPDFUncType_HERAF_lhapdf6 - + @@ -92,28 +92,28 @@ c Adapted from LHAPDF uncertainties.f lMonteCarlo = .false. lAsymhess = .false. lSymmhess = .false. - if(PDF_DECOMPOSITION.eq."LHAPDF") then -#ifndef LHAPDF_ENABLED - call hf_errlog(26061547, "S: Call to lhapdf function but"// - $ "xFitter compiled without --enable-lhapdf switch") -#else - call getlhapdfversion(version) - if(index(version, '5.').eq.1) then - call GetPDFUncType_HERAF_lhapdf5(lMonteCarlo, - $ lAsymhess, lSymmhess, name) - else if(index(version, '6.').eq.1) then - call GetPDFUncType_HERAF_lhapdf6(lMonteCarlo, - $ lAsymhess, lSymmhess, name) - else - call hf_errlog(26061518, "S: lhapdf can not"// - $ "determine error type") - endif -#endif - else - if ( DoBandsSym ) then - lSymmhess=.true. - else - lAsymhess=.true. - endif - endif +C this part is broken since 2.2.0 +c if(PDF_DECOMPOSITION.eq."LHAPDF") then +c#ifndef LHAPDF_ENABLED +c call hf_errlog(26061547, "S: Call to lhapdf function but"// +c $ "xFitter compiled without --enable-lhapdf switch") +c#else +c call getlhapdfversion(version) +c if(index(version, '5.').eq.1) then +c call GetPDFUncType_HERAF_lhapdf5(lMonteCarlo, +c $ lAsymhess, lSymmhess, name) +c else if(index(version, '6.').eq.1) then +c call GetPDFUncType_HERAF_lhapdf6(lMonteCarlo, +c $ lAsymhess, lSymmhess, name) +c else +c call hf_errlog(26061518, "S: lhapdf can not"// +c $ "determine error type") +c endif +c#endif +c else +c if ( DoBandsSym ) then +c lSymmhess=.true. +c else +c lAsymhess=.true. +c endif end subroutine GetPDFUncType_HERAF diff --git a/src/Makefile.am b/src/Makefile.am index 6bceefdc17ee95ca570b999bec690a30a4588a61..ded37f43e77384acb1d4e231a5141e4507b1484b 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -19,13 +19,14 @@ dist_noinst_HEADERS = offset.h lib_LTLIBRARIES = libxfmain.la libxfmain_la_SOURCES = hfbanner.cc init_theory.f fcn.f GetChisquare.f minuit_ini.f read_data.f store_output.f\ -sumrules.f read_steer.f error_bands_pumplin.f pdf_param.f systematics.f\ -evolution.f mc_errors.f difftop_fastnlo.f store_h1qcdfunc.f\ -dataset_tools.f theory_dispatcher.f error_logging.f \ -GetDummyXsection.f main.f \ -prep_corr.f GetPDFUncType.f rediagonalize.f qedevol.f \ +read_steer.f error_bands_pumplin.f systematics.f\ +mc_errors.f difftop_fastnlo.f store_h1qcdfunc.f\ +dataset_tools.f theory_dispatcher.f error_logging.f\ +GetDummyXsection.f main.f\ +prep_corr.f GetPDFUncType.f rediagonalize.f\ ExtraParConstr.cc TheorEval.cc ftheor_eval.cc xfitter_cpp_base.cc xfitter_pars.cc xfitter_steer.cc Profiler.cc\ -tinyexpr.c dependent_pars.cc +tinyexpr.c expression_utils.cc dependent_pars.cc\ +fortran_interface.cc # WS 2013-01-19 for the Offset method libxfmain_la_SOURCES += offset_fns.f g_offset.cc @@ -43,16 +44,17 @@ libxfmain_la_SOURCES += ReactionTheory.cc libxfmain_la_SOURCES += IntegrateDIS.cc # optional sources # these should be moved to indiviual shared libraries in the future -if ENABLE_APFEL - libxfmain_la_SOURCES+= apfel.f -else - libxfmain_la_SOURCES+= apfel_dum.f -endif -if ENABLE_MELA - libxfmain_la_SOURCES+= mela.f -else - libxfmain_la_SOURCES+= mela_dum.f -endif +##APFEL and MELA commented out because they need to be updated for the new evolutions interface +#if ENABLE_APFEL +# libxfmain_la_SOURCES+= apfel.f +#else +# libxfmain_la_SOURCES+= apfel_dum.f +#endif +#if ENABLE_MELA +# libxfmain_la_SOURCES+= mela.f +#else +# libxfmain_la_SOURCES+= mela_dum.f +#endif @@ -97,14 +99,13 @@ LDADD = libxfitter.a # libraries to be linked to the main target LDADD += $(QCDNUMLIBS) $(LAPACK_LIBS) $(BLAS_LIBS) - if ENABLE_LHAPDF LDADD+=$(LHAPDF_LDFLAGS) endif if HAVE_ROOT LDADD+=$(ROOT_LIBS) - libxfmain_la_CPPFLAGS+=$(ROOT_CFLAGS) + libxfmain_la_CXXFLAGS+=$(ROOT_CFLAGS) endif # libraries to be linked to the main target @@ -120,7 +121,6 @@ if ENABLE_LHAPDF libxfmain_la_FFLAGS+=$(DEFS) $(LHAPDF_CPPFLAGS) endif - XFLIBS = ../EW/src/libmyew.a \ ../interfaces/src/libinterfaces.a\ ../minuit/src/libmyminuit.a $(CERNLIBS) @FLIBS@ \ @@ -129,7 +129,6 @@ XFLIBS = ../EW/src/libmyew.a \ ../common/num_utils/libnum_utils.a \ ../tools/draw/src/pdferrors.o - LDADD += $(XFLIBS) GFRTLIB = -lgfortran @@ -138,12 +137,10 @@ APPLGRID_1 = $(subst -m64,,$(APPLGRID_LDFLAGS)) APPLGRID_2 = $(subst -pthread,,$(APPLGRID_1)) APPLGRID_3 = $(subst -rdynamic,,$(APPLGRID_2)) - -if ENABLE_MELA - LDADD+=$(MELA_LDFLAGS) -endif +#if ENABLE_MELA +# LDADD+=$(MELA_LDFLAGS) +#endif if ENABLE_GENETIC LDADD+= ../genetic/mixmax_r004/libmixmax.a endif - diff --git a/src/Profiler.cc b/src/Profiler.cc index 856c36078ac55eb2ddc7754da091dd7833081410..67ab53699a16bbcce5710bf01d4630fc0f8249ea 100644 --- a/src/Profiler.cc +++ b/src/Profiler.cc @@ -106,6 +106,7 @@ namespace xfitter for ( size_t i=0; i<len; i++) { double val = node[i].as<double>(); *ppar = val; + updateAtConfigurationChange(); preds.push_back( evaluatePredictions() ); } *ppar = save; diff --git a/src/ReactionTheory.cc b/src/ReactionTheory.cc index 5a8f6d49dbdf28caf31ec0a7b21abcb6dcba7770..571c3cd1b1881832914ec66bc75cbc703a3ddc4e 100644 --- a/src/ReactionTheory.cc +++ b/src/ReactionTheory.cc @@ -11,6 +11,9 @@ #include <string> #include "ReactionTheory.h" +//The following two includes are to access the default evolution, remove later +#include "xfitter_steer.h" +#include "BaseEvolution.h" using std::list; using std::string; @@ -23,12 +26,14 @@ void protonPDF(double const& x, double const& Q, double* pdfs) { gProtonPdf(x,Q,pdfs); } +/* Unused since 2.2.0 // Also fortran interface extern "C" { void proton_pdf_(double const& x, double const& Q, double* pdfs) { gProtonPdf(x,Q,pdfs); }; } +*/ ReactionTheory::ReactionTheory(const ReactionTheory &rt) @@ -79,18 +84,9 @@ const pXFXlike ReactionTheory::getXFX(const string& type) { std::cerr<<ex.what(); hf_errlog(18091400,"F: Exception in retrieveXfxQArray, details written to stderr"); } - - // return _xfx[type]; - // double dd[13]; - // gProtonPdf(0.01,5.,dd); - // for (int i=1; i<12; i++) { - // std::cout << " pdf at x=0.01, q=5: " << i << " val =" << dd[i] << std::endl; - //} - - return &protonPDF; } - +void ReactionTheory::xfx(const double&x,const double&q,double*r)const{xfitter::defaultEvolution->xfxQArray()(x,q,r);};//To be replaced after TermData rewrite bool ReactionTheory::notMasked(int DSID, int Bin) { auto bins = _dsBins[DSID]; auto flag = bins->find("binFlag"); diff --git a/src/TheorEval.cc b/src/TheorEval.cc index 13c7b2e81b961814dfcd26f1e84c91c57a9e2027..e42a01fc4015f32995b9eab182d4007575500888 100644 --- a/src/TheorEval.cc +++ b/src/TheorEval.cc @@ -18,7 +18,7 @@ #include "ReactionTheory.h" #include "xfitter_cpp.h" #include "get_pdfs.h" -#include <string.h> +#include <string.h> #include <yaml-cpp/yaml.h> #include "xfitter_pars.h" @@ -27,34 +27,25 @@ using namespace std; -// extern struct ord_scales { -// double datasetmur[150]; -// double datasetmuf[150]; -// int datasetiorder[150]; -// } cscales_; - // Global variable to hold current alphaS std::function<double(double const& Q)> gAlphaS; double alphaS(double const& Q) { - return gAlphaS(Q); + return gAlphaS(Q); } // also fortran interface extern "C" { double alphasdef_(double const& Q) { - return gAlphaS(Q); + return gAlphaS(Q); } } -TheorEval::TheorEval(const int dsId, const int nTerms, const std::vector<string> stn, const std::vector<string> stt, +TheorEval::TheorEval(const int dsId, const int nTerms, const std::vector<string> stn, const std::vector<string> stt, const std::vector<string> sti, const std::vector<string> sts, const string& expr) : _dsId(dsId), _nTerms(nTerms) { - // _iOrd = cscales_.datasetiorder[_dsId-1]; - // _xmur = cscales_.datasetmur[_dsId-1]; - // _xmuf = cscales_.datasetmuf[_dsId-1]; for (int it= 0 ; it<nTerms; it++ ){ _termNames.push_back(stn[it]); _termTypes.push_back(stt[it]); @@ -93,7 +84,7 @@ TheorEval::initTheory() this->convertToRPN(sl); } -int +int TheorEval::assignTokens(list<tToken> &sl) { stringstream strexpr(_expr); @@ -105,29 +96,29 @@ TheorEval::assignTokens(list<tToken> &sl) while (1){ strexpr.get(c); if ( strexpr.eof() ) break; - if ( isspace(c) ) continue; // skip whitespaces. + if ( isspace(c) ) continue; // skip whitespaces. // Oh noes! doesn't work after fortran reading expression with spaces :(. if ( isdigit(c) ) { // process numbers term.assign(1,c); do { strexpr.get(c); if ( strexpr.eof() ) break; - if ( isdigit(c) || c=='.' ) { + if ( isdigit(c) || c=='.' ) { term.append(1,c); - } else if ( c=='E' || c=='e' ) { // read mantissa including sign in scientific notation - term.append(1,c); - strexpr.get(c); + } else if ( c=='E' || c=='e' ) { // read mantissa including sign in scientific notation + term.append(1,c); + strexpr.get(c); if ( strexpr.eof() ) break; - if ( isdigit(c) || c == '-' ){ - term.append(1,c); - } else { - cout << "Theory expression syntax error: " << _expr << endl; - return -1; - } - } else { - strexpr.putback(c); - break; - } + if ( isdigit(c) || c == '-' ){ + term.append(1,c); + } else { + cout << "Theory expression syntax error: " << _expr << endl; + return -1; + } + } else { + strexpr.putback(c); + break; + } } while (1); double dterm = atof(term.c_str()); @@ -140,44 +131,44 @@ TheorEval::assignTokens(list<tToken> &sl) term.assign(1,c); while (strexpr.get(c) ) { if ( isalnum(c) ) term.append(1,c); - else { - strexpr.putback(c); - break; - } + else { + strexpr.putback(c); + break; + } } if ( term == string("sum") ) { // special case for sum() function t.opr = 4; t.name = "sum"; - t.val = new valarray<double>(0., nb); - sl.push_back(t); - continue; + t.val = new valarray<double>(0., nb); + sl.push_back(t); + continue; } - + /* if ( term == string("avg") ) { // special case for avg() function t.opr = 4; t.name = "avg"; - t.val = new valarray<double>(0., nb); - sl.push_back(t); - continue; + t.val = new valarray<double>(0., nb); + sl.push_back(t); + continue; } */ - + vector<string>::iterator found_term = find(_termNames.begin(), _termNames.end(), term); - if ( found_term == _termNames.end() ) { + if ( found_term == _termNames.end() ) { cout << "Undeclared term " << term << " in expression " << _expr << endl; - return -1; + return -1; } else { t.opr = 0; t.name = term; - if ( _mapInitdTerms.find(term) != _mapInitdTerms.end()){ - t.val = _mapInitdTerms[term]; - } else { - t.val = new valarray<double>(0.,nb); - this->initTerm(int(found_term-_termNames.begin()), t.val); - _mapInitdTerms[term] = t.val; - } - sl.push_back(t); + if ( _mapInitdTerms.find(term) != _mapInitdTerms.end()){ + t.val = _mapInitdTerms[term]; + } else { + t.val = new valarray<double>(0.,nb); + this->initTerm(int(found_term-_termNames.begin()), t.val); + _mapInitdTerms[term] = t.val; + } + sl.push_back(t); } term.clear(); continue; @@ -218,7 +209,7 @@ TheorEval::convertToRPN(list<tToken> &sl) if ( t.opr >0 ) { while ( tknstk.size() > 0 && t.opr <= tknstk.top().opr ) { _exprRPN.push_back(tknstk.top()); - tknstk.pop(); + tknstk.pop(); } tknstk.push(t); @@ -226,7 +217,7 @@ TheorEval::convertToRPN(list<tToken> &sl) if ( t.opr == -1 ){ tknstk.push(t); delete t.val;} // left parenthesis if ( t.opr == -2 ){ // right parenthesis while ( tknstk.top().opr != -1 ) { - if ( tknstk.size() == 0 ) cout << "ERROR: Wrong syntax in theoretical expression: "<< _expr << endl; + if ( tknstk.size() == 0 ) cout << "ERROR: Wrong syntax in theoretical expression: "<< _expr << endl; _exprRPN.push_back(tknstk.top()); tknstk.pop(); } @@ -239,8 +230,8 @@ TheorEval::convertToRPN(list<tToken> &sl) _exprRPN.push_back(tknstk.top()); tknstk.pop(); } - - + + /* vector<tToken>::iterator it= _exprRPN.begin(); for (;it!=_exprRPN.end(); it++){ @@ -248,22 +239,19 @@ TheorEval::convertToRPN(list<tToken> &sl) } cout << endl; */ - + } int TheorEval::initTerm(int iterm, valarray<double> *val) { - + string term_type = _termTypes.at(iterm); if ( term_type == string("reaction")) { this->initReactionTerm(iterm, val); } else { - int id = 15102301; - char text[] = "S: Unknown term type in expression for term"; - std::cout << "Unknown term type in expression for term " << _termNames[iterm] << std::endl; - int textlen = strlen(text); - hf_errlog_(id, text, textlen); + std::cerr<<"[ERROR] Unknown term_type=\""<<term_type<<"\" in expression for term \""<<_termNames[iterm]<<'\"'<<std::endl; + hf_errlog(15102301,"S: Unknown term type, see stderr"); return -1; } } @@ -306,8 +294,6 @@ TheorEval::initReactionTerm(int iterm, valarray<double> *val) string term_source = _termSources.at(iterm); string term_type = _termTypes.at(iterm); string term_info = _termInfos.at(iterm); -// ReactionTheory *rt = ReactionTheoryDispatcher::getInstance().getReactionTheory(_termSources.at(iterm)); - // Re-define term-source if "use:" string is found: if ( term_source.find("use:") != std::string::npos ) { term_source = GetParamDS(term_source.substr(4),GetDSname(),_dsPars["FileIndex"]); @@ -321,22 +307,23 @@ TheorEval::initReactionTerm(int iterm, valarray<double> *val) ReactionTheory * rt; if ( gNameReaction.find(term_source) == gNameReaction.end()) { - void *theory_handler = dlopen((PREFIX+string("/lib/")+libname).c_str(), RTLD_NOW); - if (theory_handler == NULL) { - std::cout << dlerror() << std::endl; - string text = "F: Reaction shared library ./lib/" + libname + " not present for " +term_source + ". Check Reactions.txt file" ; - hf_errlog_(16120502,text.c_str(),text.size()); + string path_to_lib=PREFIX+string("/lib/")+libname; + void *theory_handler = dlopen(path_to_lib.c_str(), RTLD_NOW); + if (theory_handler == NULL) { + std::cerr<<"Failed to open shared library "<<path_to_lib<<" for "<<term_source<<"; error:\n" + <<dlerror()<<"\n Check that the correct library is given in Reactions.txt"<<std::endl; + hf_errlog(16120502,"F: Failed to open reaction shared library, see stderr for details"); } - + // reset errors dlerror(); - + create_t *dispatch_theory = (create_t*) dlsym(theory_handler, "create"); rt = dispatch_theory(); gNameReaction[term_source] = rt; - // First make sure the name matches: + // First make sure the name matches: if ( rt->getReactionName() == term_source) { string msg = "I: Use reaction "+ rt->getReactionName(); hf_errlog_(17041610+_dsId,msg.c_str(),msg.size()); @@ -355,7 +342,7 @@ TheorEval::initReactionTerm(int iterm, valarray<double> *val) rt->setxFitterParametersS(XFITTER_PARS::gParametersS); rt->setxFitterparametersVec(XFITTER_PARS::gParametersV); rt->setxFitterparametersYaml(XFITTER_PARS::gParametersY); - + // Override some global pars for reaction specific: if ( XFITTER_PARS::gParametersY[term_source] ) { rt->resetParameters(XFITTER_PARS::gParametersY[term_source]); @@ -367,24 +354,28 @@ TheorEval::initReactionTerm(int iterm, valarray<double> *val) //Retrieve evolution - xfitter::BaseEvolution* evo = xfitter::get_evolution(evoName); + xfitter::BaseEvolution* evo = xfitter::get_evolution(evoName); // rt->setEvolFunctions( &HF_GET_ALPHASQ_WRAP, &g2Dfunctions); - /// XXX + //This is not how we should pass PDFs and alphas + //pending TermData rewrite + //--Ivan gAlphaS = evo-> AlphaQCD(); rt->setEvolFunctions( &alphaS, &g2Dfunctions); + /* broken since 2.2.0 // simplify interfaces to LHAPDF: rt->setXFX(&HF_GET_PDFSQ_WRAP); // proton rt->setXFX(&HF_GET_PDFSQ_BAR_WRAP,"pbar"); // anti-proton rt->setXFX(&HF_GET_PDFSQ_N_WRAP,"n"); // neutron + */ // initialize - if (rt->initAtStart("") != 0) { + if (rt->atStart("") != 0) { // failed to init, somehow ... string text = "F:Failed to init reaction " +term_source ; hf_errlog_(16120803,text.c_str(),text.size()); }; - + } else { rt = gNameReaction[term_source]; } @@ -394,7 +385,7 @@ TheorEval::initReactionTerm(int iterm, valarray<double> *val) // Set bins rt->setBinning(_dsId*1000+iterm, &gDataBins[_dsId]); - + // split term_info into map<string, string> according to key1=value1:key2=value2:key=value3... map<string, string> pars = SplitTermInfo(term_info); LoadParametersFromYAML(pars,rt->getReactionName()); @@ -413,10 +404,10 @@ TheorEval::setBins(int nBinDim, int nPoints, int *binFlags, double *allBins) _binFlags.push_back(binFlags[ip]); } - for(int ibd = 0; ibd < nBinDim; ibd++){ - vector<double> bins; - bins.clear(); - for(int ip = 0; ip<nPoints; ip++){ + for(int ibd = 0; ibd < nBinDim; ibd++){ + vector<double> bins; + bins.clear(); + for(int ip = 0; ip<nPoints; ip++){ bins.push_back(allBins[ip*10 + ibd]); } _dsBins.push_back(bins); @@ -469,61 +460,54 @@ TheorEval::Evaluate(valarray<double> &vte ) stk.top() /= a; } else if ( it->name == string(".") ){ - valarray<double> temp; - valarray<double> result; - - valarray<double> a(stk.top()); - int size_a = a.size(); - stk.pop(); - valarray<double> b(stk.top()); - int size_b = b.size(); - - if(size_a % size_b == 0){ // Matrix * Vector - int size_return = size_a / size_b; - result.resize(size_return); - for ( int n = 0; n < size_b; n++){ - temp.resize(size_return); - temp = a[std::slice(n*size_return, size_return, 1)]; //creating nth colum vector - temp *= b[n]; - result += temp; - } - stk.top() = result; - }else if(size_b % size_a == 0){ // Transposed(Vector)*Matrix -> Transposed(Matrix) vector - int size_return = size_b / size_a; - result.resize(size_return); - for ( int n = 0; n < size_a; n++){ - temp.resize(size_return); - temp = b[std::slice(n, size_return, size_a)]; // creating nth row vector -> nth colum vector - temp *= a[n]; - result += temp; - } - stk.top() = result; - }else{ - char error[] = "ERROR: Dimensions do not match "; - cout<<error<<endl;} - - - /*if(it + 1 ->name == string("kmatrix")){//possible matrix matrix multiplication - int nb1 = ?;//TODO find dimensions of matrices for check and multiplication - int mb1 = ?; - int nb2 = ?; - int mb2 = ?; - result.resize(mb1*nb2); - for(int m = 0; m < mb1; m++){ - for(int n = 0; n < nb2; n++){ - temp.resize(nb1); - temp = M.slize(m*nb1,1, nb); - temp *= M2.slize(n, mb2, nb2); - result[m*nb1 + n] = temp.sum(); - } - } - }*/ - - - + valarray<double> temp; + valarray<double> result; + valarray<double> a(stk.top()); + int size_a = a.size(); + stk.pop(); + valarray<double> b(stk.top()); + int size_b = b.size(); + + if(size_a % size_b == 0){ // Matrix * Vector + int size_return = size_a / size_b; + result.resize(size_return); + for ( int n = 0; n < size_b; n++){ + temp.resize(size_return); + temp = a[std::slice(n*size_return, size_return, 1)]; //creating nth colum vector + temp *= b[n]; + result += temp; + } + stk.top() = result; + }else if(size_b % size_a == 0){ // Transposed(Vector)*Matrix -> Transposed(Matrix) vector + int size_return = size_b / size_a; + result.resize(size_return); + for ( int n = 0; n < size_a; n++){ + temp.resize(size_return); + temp = b[std::slice(n, size_return, size_a)]; // creating nth row vector -> nth colum vector + temp *= a[n]; + result += temp; + } + stk.top() = result; + }else{ + char error[] = "ERROR: Dimensions do not match "; + cout<<error<<endl;} + /*if(it + 1 ->name == string("kmatrix")){//possible matrix matrix multiplication + int nb1 = ?;//TODO find dimensions of matrices for check and multiplication + int mb1 = ?; + int nb2 = ?; + int mb2 = ?; + result.resize(mb1*nb2); + for(int m = 0; m < mb1; m++){ + for(int n = 0; n < nb2; n++){ + temp.resize(nb1); + temp = M.slize(m*nb1,1, nb); + temp *= M2.slize(n, mb2, nb2); + result[m*nb1 + n] = temp.sum(); + } + } + }*/ } - it++; } @@ -535,13 +519,13 @@ TheorEval::Evaluate(valarray<double> &vte ) //Normalised cross section if (_normalised) { - double integral = 0; - for (int bin = 0; bin < _binFlags.size(); bin++) - if (!(vte[bin] != vte[bin])) //protection against nan - integral += (_dsBins.at(1).at(bin) - _dsBins.at(0).at(bin)) * vte[bin]; - if (integral != 0) - for (int bin = 0; bin < _binFlags.size(); bin++) - vte[bin] /= integral; + double integral = 0; + for (int bin = 0; bin < _binFlags.size(); bin++) + if (!(vte[bin] != vte[bin])) //protection against nan + integral += (_dsBins.at(1).at(bin) - _dsBins.at(0).at(bin)) * vte[bin]; + if (integral != 0) + for (int bin = 0; bin < _binFlags.size(); bin++) + vte[bin] /= integral; } //vte /= _units; } @@ -556,15 +540,15 @@ TheorEval::getReactionValues() ReactionTheory* rt = (itm->first).first; int idTerm = (itm->first).second; map<string, valarray<double> > errors; - + int result = rt->compute(_dsId*1000+idTerm, *(itm->second), errors); - + if (result != 0) { string text = "F:(from TheorEval::getReactionValues) Failed to compute theory"; hf_errlog_(16081202,text.c_str(),text.size()); } } - + return 1; } @@ -664,17 +648,17 @@ const std::string GetParamDS(const std::string& ParName, const std::string& DSna std::string Val = Node["defaultValue"].as<string>(); if (Node[DSname]) { - Val = Node[DSname].as<string>(); + Val = Node[DSname].as<string>(); } if (Node[DSindex]) { - Val = Node[DSindex].as<string>(); + Val = Node[DSindex].as<string>(); } return Val; } else { string text = "F: missing value field for parameter " + ParName; - hf_errlog_(17041101,text.c_str(),text.size()); + hf_errlog_(17041101,text.c_str(),text.size()); return ""; } } diff --git a/src/Variant.cc b/src/Variant.cc new file mode 100644 index 0000000000000000000000000000000000000000..8fc8f3dee6ac4514d24581de84850b246826e3dd --- /dev/null +++ b/src/Variant.cc @@ -0,0 +1,108 @@ +#ifndef XFITTER_VARIANT +#define XFITTER_VARIANT +#include"Variant.h" +namespace XFITTER_PARS{ +Variant::Variant(): _type{None}{} +Variant::Variant(const double*p): _type{DoublePtr},_ptr{p}{} +Variant::Variant(const std::string&s): _type{String} ,_string{s}{} +Variant::Variant(const char*s): _type{String} ,_string{s}{} +Variant::Variant(int i): _type{Int} ,_int{i}{} +Variant::Variant(const std::vector<const double*>&v):_type{Array} ,_array{v}{} +Variant::Variant(const Variant&o):_type{o._type}{ + switch(_type){ + case None:break; + case DoublePtr:_ptr=o._ptr;break; + //One can't simply assign to initialize a string in union + //because assignment requires left value to be valid + //That's why we have this weird-looking construct-in-place + case String:new(&_string)std::string(o._string);break; + case Int:_int=o._int;break; + case Array:new(&_array)std::vector<const double*>(o._array);break; + } +} +Variant::~Variant(){ +switch(_type){ + case String:_string.std::string::~string();break; + case Array:_array.std::vector<const double*>::~vector<const double*>();break; + default:break; +} +} +Variant&Variant::operator=(const Variant&o){ + + if(_type==String){ + if(o._type==String){ + _string=o._string; + return *this; + //When type changes from string to something, we need to destroy the old string + }else _string.std::string::~string(); + }else if(_type==Array){ + if(o._type==Array){ + _array=o._array; + return *this; + //When type changes from array to something, we need to destroy the old array + }else _array.std::vector<const double*>::~vector<const double*>(); + } + _type=o._type; + switch(_type){ + case DoublePtr:_ptr=o._ptr;break; + case String:new(&_string)std::string(o._string);break; + case Int:_int=o._int;break; + case Array:new(&_array)std::vector<const double*>(o._array);break; + default:break; + } + return *this; +} +std::string bad_cast_message(const Variant&v,Variant::Type castTo){ + std::ostringstream ss; + ss<<"Failed to cast Variant "<<v<<" from "<<to_string(v.type())<<" to "<<to_string(castTo); + return ss.str(); +} +Variant::bad_cast::bad_cast(const Variant&v,Variant::Type to):std::runtime_error(bad_cast_message(v,to)){} +Variant::operator const double*()const{ + if(_type!=DoublePtr)throw bad_cast(*this,DoublePtr); + return _ptr; +} +Variant::operator std::string()const{ + if(_type==String)return _string; + if(_type==Int)return std::to_string(_int); + throw bad_cast(*this,String); +}; +Variant::operator int()const{ + switch(_type){ + case Int:return _int; + case String: + try{return std::stoi(_string); + }catch(std::invalid_argument&ex){ + break; + }catch(std::out_of_range&ex){ + break; + } + default:break; + } + throw bad_cast(*this,Int); +} +Variant::operator const std::vector<const double*>&()const{ + if(_type!=Array)throw bad_cast(*this,Array); + return _array; +} +const char*to_string(Variant::Type t){ + static const char*a[]={"None","DoublePtr","String","Int","Array"}; + return a[int(t)]; +} +std::ostream&operator<<(std::ostream&os,const Variant&v){ + if(v._type==Variant::String)return os<<v._string; + if(v._type==Variant::Int)return os<<v._int; + if(v._type==Variant::DoublePtr)return os<<v._ptr; + return os<<to_string(v._type); +} +std::ostream&operator<<(std::ostream&os,Variant::Type t){ + return os<<to_string(t); +} +Variant::Type Variant::type()const{return _type;} +bool Variant::isNone() const{return _type==None;} +bool Variant::isDoublePtr()const{return _type==DoublePtr;} +bool Variant::isString() const{return _type==String;} +bool Variant::isInt() const{return _type==Int;} +bool Variant::isArray() const{return _type==Array;} +} +#endif diff --git a/src/c_interface.f b/src/c_interface.f index de3b09993ea46ea405bbf23246b19c1bf6128523..954970216676d43ee1c5a010cdd27905216ca371 100644 --- a/src/c_interface.f +++ b/src/c_interface.f @@ -22,7 +22,7 @@ c_itheory = itheory - c_dobands= dobands + !c_dobands= dobands Broken since 2.2.0 c_hf_mass(1)= hf_mass(1) c_hf_mass(2)= hf_mass(2) c_hf_mass(3)= hf_mass(3) @@ -47,7 +47,8 @@ integer npari, nparx, istat get_nmembers=0 - +C Broken since 2.2.0 +#if 0 if(PDF_DECOMPOSITION.eq."LHAPDF") then #ifndef LHAPDF_ENABLED call hf_errlog(29061521, "S: Call to lhapdf function but"// @@ -72,4 +73,5 @@ get_nmembers= npari + 1 ! central + vars endif endif +#endif end diff --git a/src/chi2scan.cc b/src/chi2scan.cc index b8f5815b0f821f2b5f4e8f2ce462901db99c8125..92e55c8466e5b579c1acdebd3a5da49eb5eb0589 100644 --- a/src/chi2scan.cc +++ b/src/chi2scan.cc @@ -1,3 +1,10 @@ +/* +The whole chi2scan seems to be broken since 2.2.0 +I commented everything out, maybe we could recover this code later +Chi2scan should probably work similar to Profiler +--Ivan +*/ +#if 0 #include "xfitter_cpp.h" #include "dimensions.h" @@ -1131,3 +1138,4 @@ void chi2_scan_() cout << endl; } #endif +#endif diff --git a/src/dependent_pars.cc b/src/dependent_pars.cc index 40b8d0ed12f149b73f0c6d16a02bcbb4cc628805..6656d4953a8511f539cda8e3d4ef6f553cd23a85 100644 --- a/src/dependent_pars.cc +++ b/src/dependent_pars.cc @@ -2,11 +2,9 @@ #include"tinyexpr.h" #include"xfitter_pars.h" #include"xfitter_cpp_base.h" +#include"expression_utils.h" #include<iostream> -#include<vector> #include<map> -#include<string> -#include<algorithm> using namespace std; namespace xfitter{ //The Constraint class is used for dependent parameters @@ -49,37 +47,6 @@ Constraint::~Constraint(){ void Constraint::atIteration(){ *parameter=te_eval(expr); } -//Return a list of all parameter names present in expression, given as string s, excluding duplicates -//Parameter is defined as string of alphanumeric characters and '_', not beginning with a digit, and not a builtin -void extractParameterNames(const string&s,vector<string>&ret){ - //fills ret - static const vector<string>builtins={"abs","acos","asin","atan","atan2","ceil","cos","cosh","e","exp","fac","floor","ln","log","log10","ncr","npr","pi","pow","sin","sinh","sqrt","tan","tanh"}; - const char*cstr=s.c_str(); - const char*p=cstr; - //p is pointer to next character to be read - while(true){ - while(true){ - char c=*p; - if((c>='a'&&c<='z')||(c>='A'&&c<='Z')||(c=='_'))break; - if(c==0)return; - ++p; - } - size_t p0=size_t(p-cstr);//position of first character of parameter name - ++p; - while((*p>='a'&&*p<='z')||(*p>='A'&&*p<='Z')||(*p=='_')||(*p>='0'&&*p<='9'))++p; - string name=s.substr(p0,size_t(p-cstr)-p0); - //check if this name is a builtin - if(binary_search(builtins.begin(),builtins.end(),name))goto skip_append; - //check if this name is already in list - for(const string&e:ret)if(e==name)goto skip_append; - //else append - ret.push_back(name); - skip_append: - if(*p==0)return; - ++p; - } - //unreachable -} //array of all constraints, sorted in correct order vector<Constraint*>constraints; /* diff --git a/src/error_bands_pumplin.f b/src/error_bands_pumplin.f index 6d1acf92e1bac5790a8b95c328b4ad4ac6404cd9..f2d1fca2266d67a3d8d684b3be903231c0863a3b 100644 --- a/src/error_bands_pumplin.f +++ b/src/error_bands_pumplin.f @@ -4,7 +4,6 @@ implicit none #include "steering.inc" -#include "pdfparam.inc" #include "endmini.inc" #include "alphas.inc" #include "thresholds.inc" @@ -38,7 +37,7 @@ C SG: x-dependent fs: double precision fs0 double precision fshermes - + character*20 parname integer ind,ind2,jext,iint @@ -47,23 +46,21 @@ C SG: x-dependent fs: integer iunint(nparmax) ! internal param. number integer iexint(nparmax) ! external param. number - double precision - $ parval, parerr,parlolim,parhilim + double precision parval,parerr,parlolim,parhilim integer mpar double precision chichi - double precision chi2data_theory ! function + double precision chi2data_theory !function double precision shift -C Function - double precision GetUmat + double precision GetUmat !function ! double precision DecorVarShift C for theory errors: double precision, allocatable :: TheoVars(:,:,:) C--------------------------------------------------------------- - + C C Fix relation between internal and external params. C @@ -100,23 +97,17 @@ C npar = MNE !> npar runs over external parameters. -C -C Allocate -C allocate(TheoVars(NTOT,2,mpar)) - - - C C Loop over de-correlated (diagonalised) errors: C do j=1,mpar - + jext = iexint(j) base = TRIM(OutDirName)//'/pdfs_q2val_'//tag(j) idx = index(base,' ')-1 - + base2 = TRIM(OutDirName)//'/pdfs_'//tag(j) idx2 = index(base2,' ')-1 @@ -141,10 +132,10 @@ C C -C Shift variable paramters by the j-th de-correlated error: +C Shift parameters by the j-th de-correlated error: C do i=1,npar - a(i) = pkeep(i) + a(i) = pkeep(i) iint = iunint(i) if (iint.gt.0) then if(doOffset) then @@ -156,23 +147,7 @@ C endif enddo ! i - -C -C Decode "a". 2 stands for IFLag = 2, which is a normal iteration. -C - call PDF_param_iteration(a,2) - -C -C Fix some pars by sum-rules: -C - -C 23 Apr 2017: replace by chi2data_theory(2) (needs checks, potentially) -C -c kflag = 0 -c call SumRules(kflag) -c call Evolution -c -C end replace 23 Apr 2017 + call copy_minuit_extrapars(a) !Set shifted parameters ifcncount = ifcncount+1 chichi = chi2data_theory(2) @@ -192,8 +167,8 @@ C call save_data_lhapdf6(j*2) call error_band_action(j*2) endif - - + + enddo ! shift_dir enddo ! j @@ -204,24 +179,24 @@ C write out once more (with theory errors filled) Theo_mod = TheoModFCN3 ALPHA_Mod = ALphaModFCN3 - call GetTheoErrorsAsym(TheoVars,ntot,mpar) + call GetTheoErrorsAsym(TheoVars,ntot,mpar) call writefittedpoints deallocate(TheoVars) - + return end C----------------------------------------------------------- C> @brief Compute asymmetric uncertainties for theory predictions based on eigenvector variations, asymmetric hessian -C +C C @param TheoVars 3-D array of theory variations shaped as ndata, 2, nvector C @param nd number of data points, ndata C @param nv number of eigenvectors, nvector C----------------------------------------------------------- subroutine GetTheoErrorsAsym(TheoVars,nd,nv) implicit none - integer nd,nv + integer nd,nv double precision TheoVars(nd,2,nv) #include "ntot.inc" #include "theo.inc" @@ -248,14 +223,14 @@ C----------------------------------------- C----------------------------------------------------------- C> @brief Compute asymmetric uncertainties for theory predictions based on eigenvector variations, symmetric hessian -C +C C @param TheoVars 3-D array of theory variations shaped as ndata, nvector C @param nd number of data points, ndata C @param nv number of eigenvectors, nvector C----------------------------------------------------------- subroutine GetTheoErrorsSym(TheoVars,nd,nv) implicit none - integer nd,nv + integer nd,nv double precision TheoVars(nd,nv) #include "ntot.inc" #include "theo.inc" @@ -294,9 +269,9 @@ C----------------------------------------- C integer iunint(MNE) ! internal param. number integer iexint(MNE) ! external param. number - double precision + double precision $ parval, parerr,parlolim,parhilim - + double precision a(MNE) integer idx,idx2,iint,kflag @@ -335,7 +310,7 @@ C call MNCOMD(fcn,'ITERATE 10',icond,0) C Check the covariance matrix: call MNSTAT(fmin, fedm, errdef, npari, nparx, istat) print *,'Covariance matrix status =',istat,npari - + if (istat .ne. 3) then call hf_errlog(16042702, $ 'S:Problems with error matrix, can not produce bands') @@ -366,24 +341,24 @@ C Check the covariance matrix: Allocate(Amat(Npari, Npari)) Allocate(Eigenvalues(Npari)) - + if (ReadParsFromFile) then call ReadParCovMatrix(CovFileName, Amat, Npari) else call MNEMAT( Amat, Npari) endif - + C Diagonalize: call MyDSYEVD( Npari, Amat, Npari, Eigenvalues, ifail) C scale the matirx do i=1,npari do j=1,npari - Amat(j,i) = Amat(j,i) * sqrt(Eigenvalues(i)) + Amat(j,i) = Amat(j,i) * sqrt(Eigenvalues(i)) enddo enddo - + allocate(TheoVars(NTOT,Npari)) C @@ -393,10 +368,10 @@ C jext = iexint(j) base = TRIM(OutDirName)//'/pdfs_q2val_'//tag(j) idx = index(base,' ')-1 - + base2 = TRIM(OutDirName)//'/pdfs_'//tag(j) idx2 = index(base2,' ')-1 - + if (idx.gt.0) then name = base(1:idx)//'s_' name2 = base2(1:idx2)//'s.lhgrid' @@ -404,33 +379,28 @@ C name = base//'s_' name2 = base2//'s.lhgrid' endif - - -C -C Shift variable paramters by the j-th de-correlated error: + + +C +C Shift parameters by the j-th de-correlated error: C do i=1,MNE - a(i) = pkeep(i) + a(i) = pkeep(i) iint = iunint(i) if (iint.gt.0) then a(i) = a(i) + Amat(iint,j) -C a(i) = a(i) + GetUmat(iint,j) +C a(i) = a(i) + GetUmat(iint,j) endif - enddo ! i + enddo + call copy_minuit_extrapars(a) !Set shifted parameters -C -C Decode "a". 2 stands for IFLag = 2, which is a normal iteration. -C - call PDF_param_iteration(a,2) - ifcncount = ifcncount+1 chichi = chi2data_theory(2) ! sum-rules and evolution are inside - - TheoVars(:,j) = THEO ! save for error calc. + TheoVars(:,j) = THEO ! save for error calc. -C +C C Write results out: C open (76,file=name2,status='unknown') @@ -449,13 +419,14 @@ C write out once more (with theory errors filled) Theo_mod = TheoModFCN3 ALPHA_Mod = ALphaModFCN3 - call GetTheoErrorsSym(TheoVars,ntot,Npari) + call GetTheoErrorsSym(TheoVars,ntot,Npari) call writefittedpoints deallocate(TheoVars) end !> read parameter values from the pars out file +! Probably borken since 2.2.0 --Ivan subroutine ReadPars(FileName, pvals) implicit none character*(*) FileName @@ -469,9 +440,9 @@ C write out once more (with theory errors filled) double precision parval, parerr, parlolim, parhilim C------------------------------------------------ print *,'Reading parameter values from '//trim(FileName) - open (51,file=FileName, status='old',err=3) + open (51,file=FileName, status='old',err=3) 1 read (51,'(A120)',end=2, err=4) buff - + call MNPARS(buff,IStatus) goto 1 C Decode @@ -484,7 +455,7 @@ C Decode $ parhilim,ii) pvals(ind) = parval enddo - + return @@ -502,12 +473,12 @@ C Decode integer i,j C--------------------------------------------------- open (51, file=FileName, status='old', err=1) - + print *,npars do i=1,NPars read (51,*,err=2,end=3) ( Cov(j,i),j=1,NPars ) - print '(20E10.2)' ,( Cov(j,i),j=1,NPars ) - enddo + print '(20E10.2)' ,( Cov(j,i),j=1,NPars ) + enddo print *,'Read covariance matrix from '//trim(FileName) close (51) return diff --git a/src/evolution.f b/src/evolution.f deleted file mode 100644 index 3976d5b97021348cf79ae5707ddf2f10a92d389f..0000000000000000000000000000000000000000 --- a/src/evolution.f +++ /dev/null @@ -1,580 +0,0 @@ - subroutine Evolution - -* -* DOALL = true : also evolves the Uplus and Dplus distribution. -* This is not needed to calculate the DIS cross-sections, -* but needed when one store all pdfs. -* - -c implicit double precision (a-h,o-z) - implicit none -#include "steering.inc" -#include "pdfparam.inc" -#include "thresholds.inc" -#include "alphas.inc" -#include "couplings.inc" -#include "ntot.inc" -#include "datasets.inc" -c common/thresholds/q0,qc,qb - - double precision func0,func1,func24,func22 - - external func0 !input parton dists: iparam=0 - external func1 !input parton dists: iparam=1 - external func24 !input parton dists: iparam=24 - external func22 !input parton dists: iparam=22 - external func22text ! text input - external func30 - - double precision def0, def1, def24, def22,pdfv,glu,glu1,x - double precision def30 - - dimension pdfv(-6:6) - dimension def22(-6:6,12) !flavor composition - dimension def1(-6:6,12) !flavor composition - dimension def0(-6:6,12) !flavor composition - dimension def24(-6:6,12) !flavor composition - dimension def30(-6:6,12) !flavor composition - - integer iq0, iqfrmq - double precision eps -cjt test - integer nfin - double precision q2c,q2b - - data nfin/0/ - data q2c/3.D0/, q2b/25.D0/, q2b/200.D0/ !thresh and mu20 - -* --------------------------------------------------------- -* declaration related to alphas -* for RT code, transfer alpha S -* --------------------------------------------------------- - double precision alphaszero - -* --------------------------------------------------------- -* Subroutine of the external evolution codes -* --------------------------------------------------------- - external LHAPDFsubr - external APFELsubr - external APFELsubrPhoton - external QEDEVOLsubr - - double precision epsi - double precision hf_get_alphas,asRef - -* --------------------------------------------------------- -* Save scale in a common to avoid APFEL to evolve if the -* scale does not change. -* --------------------------------------------------------- - double precision q2p - common / PrevoiusQ / q2p - - integer NextraSets - -cjt test -cv====== -cv Remark: -cv need to make it working for h12k parametrisation -cv ---- - - data def0 / ! just a copy of def22 -C-- tb bb cb sb ub db g d u s c b t -C-- -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 - + 0., 0., 0., 0., 0.,-1., 0., 1., 0., 0., 0., 0., 0., !dval - + 0., 0., 0., 0.,-1., 0., 0., 0., 1., 0., 0., 0., 0., !uval - + 0., 0., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0., 0., !s+sbar - + 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., !Ubar - + 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., ! Dbar - + 0., 0., 0., -1., 0., 0., 0., 0., 0., 1., 0., 0., 0., !s-sbar - + 78*0. / - - - - - data def1 / -C-- tb bb cb sb ub db g d u s c b t -C-- -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 - + 0., 0., 0., 0., 0., 0., 0., 1., 0., 1., 0., 0., 0., !D - + 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 1., 0., 0., !U - + 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., !Ubar - + 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., !Dbar - + 0., 0., 0., -1., 0., 0., 0., 0., 0., 1., 0., 0., 0., !s-sbar - + 0., 0., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0., 0., !s+sbar - + 78*0. / - - - data def24 / -cccvC-- tb bb cb sb ub db g d u s c b t -cvC-- -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 - + 0., 0., 0., 0., 0.,-1., 0., 1., 0., 0., 0., 0., 0., !dval - + 0., 0., 0., 0.,-1., 0., 0., 0., 1., 0., 0., 0., 0., !uval - + 0., 2., 2., 2., 2., 2., 0., 0., 0., 0., 0., 0., 0., !sea - + 0., 0., 0., 0., -1., 1., 0., 0., 0., 0., 0., 0., 0., !delta - + 0., 0., 0., -1., 0., 0., 0., 0., 0., 1., 0., 0., 0., !s-sbar - + 0., 0., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0., 0., !s+sbar - + 78*0. / - - data def22 / -C-- tb bb cb sb ub db g d u s c b t -C-- -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 - + 0., 0., 0., 0., 0.,-1., 0., 1., 0., 0., 0., 0., 0., !dval - + 0., 0., 0., 0.,-1., 0., 0., 0., 1., 0., 0., 0., 0., !uval - + 0., 0., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0., 0., !s+sbar - + 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., !Ubar - + 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., ! Dbar - + 0., 0., 0., -1., 0., 0., 0., 0., 0., 1., 0., 0., 0., !s-sbar - + 78*0. / - - - - data def30 / -C-- tb bb cb sb ub db g d u s c b t -C-- -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 - + 0., 0., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., !d+ - + 0., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0., !u+ - + 0., 0., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0., 0., !s+ - + 0., 0., 0., 0., 0., -1., 0., 1., 0., 0., 0., 0., 0., !d- - + 0., 0., 0., 0., -1., 0., 0., 0., 1., 0., 0., 0., 0., !u- - + 0., 0., 0., -1., 0., 0., 0., 0., 0., 1., 0., 0., 0., !s- - + 78*0. / - - integer i -* -* Set parameters of the initial scale PDFs to be used by MELA -* - if (mod(hfscheme,10).eq.6) then - call SetxFitterParametersMELA(parubar,pardbar, - 1 paruval,pardval, - 2 parglue, - 3 fstrange,fcharm) - endif - -c call grpars(nx,xmi,xma,nq,qmi,qma,nord) - - q0=starting_scale - -c iqc = iqfrmq(qc) !charm threshold -c iqb = iqfrmq(qb) !bottom threshold -c call setcbt(nfin,iqc,iqb,999) !thesholds in the vfns - - iq0 = iqfrmq(q0) !starting scale - -* -* Initialize alphas -* - if (itheory.eq.0.or.itheory.eq.11) then - call setalf(dble(alphas),Mz*Mz) - else -* Make sure that alphas is correctly set at the Z mass - call SetMaxFlavourPDFs(5) - call SetMaxFlavourAlpha(5) - call SetAlphaQCDRef(dble(alphas),dble(Mz)) - asref = HF_Get_alphas(dble(starting_scale)) - call SetAlphaQCDRef(asref,dsqrt(dble(starting_scale))) - endif - alphaSzero = hf_get_alphas(1D0) - call RT_SetAlphaS(alphaSzero) - - NextraSets = 0 - if (ExtraPdfs) then - NextraSets = 1 - endif - -C ---- LHAPDF ---- - if(IPDFSET.eq.5) then - call EXTPDF(LHAPDFsubr,IPDFSET,NextraSets,dble(0.001),epsi) - return -C ---- APFEL ---- - elseif (IPDFSET.eq.7) then - if(itheory.eq.35)then - q2p = starting_scale - call SetPDFSet("external") - call EXTPDF(APFELsubrPhoton,IPDFSET,1,dble(0.001),epsi) - else -* 6 flavours (default) - q2p = starting_scale - call SetPDFSet("external") - call SetMaxFlavourPDFs(6) - call SetMaxFlavourAlpha(6) - call EXTPDF(APFELsubr,IPDFSET,0,dble(0.001),epsi) -C -C If the use of the H-VFNS is required, enable the evolutions -C with different values of NFmax -C - if(UseHVFNS)then -* 5 flavours - q2p = starting_scale - call SetPDFSet("external") - call SetMaxFlavourPDFs(5) - call SetMaxFlavourAlpha(5) - call EXTPDF(APFELsubr,IPDFSET+1,0,dble(0.001),epsi) -* 4 flavours (redefine alphas) - q2p = starting_scale - call SetPDFSet("external") - call SetMaxFlavourPDFs(4) - call SetMaxFlavourAlpha(4) - call EXTPDF(APFELsubr,IPDFSET+2,0,dble(0.001),epsi) -* 3 flavours (redefine alphas) - q2p = starting_scale - call SetPDFSet("external") - call SetMaxFlavourPDFs(3) - call SetMaxFlavourAlpha(3) - call EXTPDF(APFELsubr,IPDFSET+3,0,dble(0.001),epsi) - endif - endif - return -C ---- QEDEVOL ---- - elseif (IPDFSET.eq.8) then - call qedevol_main -CC call EXTPDF(QEDEVOLsubr,IPDFSET,1,dble(0.001),epsi) - return - endif - -cv === - if (PDF_DECOMPOSITION.eq.'LHAPDF') then - call evolfg(1,func0,def0,iq0,eps) !evolve all pdf's: LHAPDF - elseif (PDF_DECOMPOSITION.eq.'QCDNUM_GRID') then - call evolfg(1,func22text,def22,iq0,eps) - - elseif (Index(PDF_DECOMPOSITION,'D_U_Dbar_Ubar').gt.0) then ! D,U,Dbar,Ubar - call evolfg(1,func1,def1,iq0,eps) !evolve all pdf's: H1 - - elseif (Index(PDF_DECOMPOSITION,'Sea').gt.0) then - call evolfg(1,func24,def24,iq0,eps) !evolve all pdf's: ZEUS - - elseif (PDF_DECOMPOSITION.eq.'Diffractive') then - call evolfg(1,func30,def30,iq0,eps) !evolve all pdf's: ZEUS diffractive (hard Pomeron) - - elseif (Index(PDF_DECOMPOSITION,'Dbar_Ubar').gt.0) then - call evolfg(1,func22,def22,iq0,eps) ! uv, dv, Ubar, Dbar (and also strange) - - else - print *,'Unknown PDF Decomposition: '//PDF_DECOMPOSITION - print *,'Stop in evolution' - call HF_Stop - endif - - return - end - -* ---------------------------------------------------- - double precision function func0(id,x) -* ---------------------------------------------------- - implicit double precision (a-h,o-z) -#include "steering.inc" - - double precision pdfval, q0 - dimension pdfval(-6:6) - - q0=sqrt(starting_scale) - - call evolvePDF(x, q0, pdfval) - - if (id.eq.0) func0=pdfval(0) - if (id.eq.1) func0=pdfval(1)-pdfval(-1) - if (id.eq.2) func0=pdfval(2)-pdfval(-2) - if (id.eq.3) func0=pdfval(3)+pdfval(-3) - if (id.eq.4) func0=pdfval(-2)+pdfval(-4) - if (id.eq.5) func0=pdfval(-3)+pdfval(-1) - if (id.eq.6) func0=pdfval(3)-pdfval(-3) - - return - end - - -* ---------------------------------------------------- - double precision function func1(id,x) -* ---------------------------------------------------- - implicit double precision (a-h,o-z) -#include "pdfparam.inc" - - if (id.eq.0) func1=gluon(x) - if (id.eq.1) func1=H1D(x) - if (id.eq.2) func1=H1U(x) - if (id.eq.3) func1=Ubar(x) - if (id.eq.4) func1=Dbar(x) - if (id.eq.6) func1=2*qstrange(x) - if (id.eq.5) func1=0.d0 - - return - end - - -* ---------------------------------------------------- - double precision function func22(id,x) -* ---------------------------------------------------- - implicit double precision (a-h,o-z) -#include "pdfparam.inc" -#include "steering.inc" - - func22 = 0.D0 - if (id.eq.0) func22=gluon(x) - if (id.eq.1) func22=dval(x) - if (id.eq.2) func22=uval(x) - if (id.eq.3) func22=2*qstrange(x) - if (id.eq.4) func22=ubar(x) - if (id.eq.5) func22=dbar(x) - if (id.eq.6) func22=0.d0 - - return - end - - -* ---------------------------------------------------- - double precision function func22text(id,x) -* ---------------------------------------------------- - implicit none - integer id - double precision x - double precision pdf_from_text -C---------------------------- - func22text = pdf_from_text(x,id) - - return - end - - -* ---------------------------------------------------- - double precision function func24(id,x) -* ---------------------------------------------------- - - implicit double precision (a-h,o-z) -#include "pdfparam.inc" - - - if (id.eq.0) func24=gluon(x) - if (id.eq.1) func24=dval(x) - if (id.eq.2) func24=uval(x) - if (id.eq.3) func24=sea(x) - if (id.eq.4) func24=dbmub(x) - if (id.eq.5) func24=0.d0 - if (id.eq.6) func24=0.d0 - - return - end - - - -* ---------------------------------------------------- - double precision function func30(id,x) -* ---------------------------------------------------- - - implicit double precision (a-h,o-z) -#include "pdfparam.inc" - - PARAMETER(ParDumpFactor=1.d-3) - - dfac = dexp(-ParDumpFactor/(1.00001d0-x)) - func30 = 0.D0 - if (id.eq.0) then -*ws: nchebglu in pdf_param.f must be 0 -*ws: initialized to 0 in read_steer.f - func30 = gluon(x)*dfac - elseif (id.eq.1.or.id.eq.2.or.id.eq.3) then -*ws: NPOLYVAL in pdf_param.f must be 0 -*ws: initialized to 0 in read_steer.f - func30 = 2*Uval(x)*dfac - else - endif - return - end -* -************************************************************************ -* -* Subroutine that defines the PDFs to be evolved with APFEL. -* (predefined name) -* -************************************************************************ - subroutine ExternalSetAPFEL(x,q0,xf) -* - implicit none -#include "steering.inc" -** -* Input Variables -* - double precision x - double precision q0 -** -* Internal Variables -* - integer ipdf - double precision gluon - double precision pdf_from_text - double precision qstrange,Ubar,Dbar,H1U,H1D - double precision sea,dbmub,dval,uval - double precision photon - double precision dfac,ParDumpFactor - parameter(ParDumpFactor=1.d-3) -** -* Output Variables -* - double precision xf(-6:7) -* -* Set PDFs to zero -* - do ipdf=-6,7 - xf(ipdf) = 0d0 - enddo - if(x.gt.1d0) x = 1d0 -* -* Construct PDFs addording to the PDF decomposition -* - if(PDF_DECOMPOSITION.eq.'LHAPDF')then -c q0 = sqrt(starting_scale) - call evolvePDF(x, q0, xf) - - elseif(PDF_DECOMPOSITION.eq.'QCDNUM_GRID')then - xf(-3) = ( pdf_from_text(x,3) - pdf_from_text(x,6) ) / 2d0 - xf(-2) = pdf_from_text(x,4) - xf(-1) = pdf_from_text(x,5) - xf(0) = pdf_from_text(x,0) - xf(1) = pdf_from_text(x,1) - pdf_from_text(x,5) - xf(2) = pdf_from_text(x,2) - pdf_from_text(x,4) - xf(3) = ( pdf_from_text(x,3) + pdf_from_text(x,6) ) / 2d0 - - elseif(Index(PDF_DECOMPOSITION,'D_U_Dbar_Ubar').gt.0)then ! D,U,Dbar,Ubar - xf(-3) = qstrange(x) - xf(-2) = Ubar(x) - xf(-1) = Dbar(x) - xf(0) = gluon(x) - xf(1) = H1D(x) - xf(-3) - xf(2) = H1U(x) - xf(3) = xf(-3) - - elseif(Index(PDF_DECOMPOSITION,'Sea').gt.0)then - xf(-2) = sea(x) / 4d0 - dbmub(x) / 2d0 - xf(-1) = sea(x) / 4d0 + dbmub(x) / 2d0 - xf(0) = gluon(x) - xf(1) = dval(x) + xf(-1) - xf(2) = uval(x) + xf(-2) - - elseif(PDF_DECOMPOSITION.eq.'Diffractive')then - dfac = dexp(-ParDumpFactor/(1.00001d0-x)) -* - xf(-3) = dfac * Uval(x) - xf(-2) = xf(-3) - xf(-1) = xf(-3) - xf(0) = dfac * gluon(x) - xf(1) = xf(-3) - xf(2) = xf(-3) - xf(3) = xf(-3) - - elseif(Index(PDF_DECOMPOSITION,'Dbar_Ubar').gt.0)then - xf(-3) = qstrange(x) - xf(-2) = ubar(x) - xf(-1) = dbar(x) - xf(-3) - xf(0) = gluon(x) - xf(1) = dval(x) + xf(-1) - xf(2) = uval(x) + xf(-2) - xf(3) = xf(-3) - - else - print *,'Unknown PDF Decomposition: '//PDF_DECOMPOSITION - print *,'Stop in evolution' - call HF_Stop - endif -* -* Photon PDF -* - if(itheory.eq.35) xf(7) = photon(x) -* - return - end - - - double precision function LHAPDFsubr(ipdf,x, qmu2,first) -C--------H-A----------------------------------------------- -C -C External PDF reading for QCDNUM -C -C-------------------------------------------------------- - - implicit double precision (a-h,o-z) - dimension xf(-6:7) -#include "steering.inc" - logical first - if ( ExtraPdfs ) then - call evolvePDFphoton(x, sqrt(qmu2), xf, xf(7)) - else - call evolvePDF(x, sqrt(qmu2), xf) -! print*,"test PDF",x, sqrt(qmu2), xf(0) - endif - if(first) LHAPDFsubr = 0.D0 - if(ipdf.eq. 0) LHAPDFsubr = xf(0) - if(ipdf.eq. 1) LHAPDFsubr = xf(1) - if(ipdf.eq. 2) LHAPDFsubr = xf(2) - if(ipdf.eq. 3) LHAPDFsubr = xf(3) - if(ipdf.eq. 4) LHAPDFsubr = xf(4) - if(ipdf.eq. 5) LHAPDFsubr = xf(5) - if(ipdf.eq. 6) LHAPDFsubr = xf(6) - if(ipdf.eq. -1) LHAPDFsubr = xf(-1) - if(ipdf.eq. -2) LHAPDFsubr = xf(-2) - if(ipdf.eq. -3) LHAPDFsubr = xf(-3) - if(ipdf.eq. -4) LHAPDFsubr = xf(-4) - if(ipdf.eq. -5) LHAPDFsubr = xf(-5) - if(ipdf.eq. -6) LHAPDFsubr = xf(-6) - -! end if - return - end - -c -------------------------- - Subroutine APFELsubr(x, qmu2, xf) -C------------------------------------------------------- -C -C External PDF reading for APFEL -C -C-------------------------------------------------------- - implicit none -* -#include "steering.inc" -* - integer i - double precision x,qmu2 - double precision xf(-6:6) - - double precision q2p - common / PrevoiusQ / q2p -* -* Perform evolution with APFEL only if the final scale has changed -* - if(qmu2.ne.q2p)then - call EvolveAPFEL(dsqrt(q2p),dsqrt(qmu2)) - call SetPDFSet("apfel") - endif -* - call xPDFall(x,xf) - q2p = qmu2 -* - return - end -* - Subroutine APFELsubrPhoton(x, qmu2, xf) -C------------------------------------------------------- -C -C External PDF reading for APFEL (including the photon) -C -C-------------------------------------------------------- - implicit none -* -#include "steering.inc" -* - double precision x,qmu2 - double precision xf(-6:7) - - double precision q2p - common / PrevoiusQ / q2p -* -* Perform evolution with APFEL only if the final scale has changed -* - if(qmu2.ne.q2p)then -c call EvolveAPFEL(dsqrt(q2p),dsqrt(qmu2)) -c call SetPDFSet("apfel") - call EvolveAPFEL(dsqrt(dble(starting_scale)),dsqrt(qmu2)) - endif -* - call xPDFallPhoton(x,xf) - q2p = qmu2 -* - return - end diff --git a/src/expression_utils.cc b/src/expression_utils.cc new file mode 100644 index 0000000000000000000000000000000000000000..eebc5a5e91bd911d9aa88b7dc83862e89c89a79c --- /dev/null +++ b/src/expression_utils.cc @@ -0,0 +1,35 @@ +#include"expression_utils.h" +#include<algorithm> +using namespace std; +namespace xfitter{ +//Return a list of all parameter names present in expression, given as string s, excluding duplicates +//Parameter is defined as string of alphanumeric characters and '_', not beginning with a digit, and not a builtin or "x" +void extractParameterNames(const string&s,vector<string>&ret){ + //fills ret + static const vector<string>ignored={"abs","acos","asin","atan","atan2","ceil","cos","cosh","e","exp","fac","floor","ln","log","log10","ncr","npr","pi","pow","sin","sinh","sqrt","tan","tanh","x"}; + const char*p=s.c_str(); + //p is pointer to next character to be read + while(true){ + while(true){ + char c=*p; + if((c>='a'&&c<='z')||(c>='A'&&c<='Z')||(c=='_'))break; + if(c==0)return; + ++p; + } + const char*b=p;//beginning of substring + ++p; + while((*p>='a'&&*p<='z')||(*p>='A'&&*p<='Z')||(*p=='_')||(*p>='0'&&*p<='9'))++p; + string name=string(b,p-b); + //check if this name should be skipped + if(binary_search(ignored.begin(),ignored.end(),name))goto skip_append; + //check if this name is already in list + for(const string&e:ret)if(e==name)goto skip_append; + //else append + ret.push_back(name); + skip_append: + if(*p==0)return; + ++p; + } + //unreachable +} +} diff --git a/src/fcn.f b/src/fcn.f index 7fbb92eb3130a24766320a2853fd7b77b6b6c35a..c3041b8f18e0158b9ecb5153e1df72a164515cbf 100644 --- a/src/fcn.f +++ b/src/fcn.f @@ -24,15 +24,8 @@ C--------------------------------------------------------- #include "endmini.inc" #include "for_debug.inc" integer i + double precision chi2data_theory !function -C function: - double precision chi2data_theory -! [--- WS 2015-10-10 - double precision XParValueByName -! ---] - -C----------------------------------------------------------------- - C Store FCN flag in a common block: IFlagFCN = IFlag @@ -58,7 +51,7 @@ C Count number of FCN calls: C Store only if IFlag eq 3: if (iflag.eq.3) then do i=1,MNE - pkeep(i) = parminuit(i) + pkeep(i) = parminuit(i) C !> Also store for each fcn=3 call: pkeep3(i,nfcn3) = parminuit(i) enddo @@ -67,28 +60,21 @@ C !> Also store for each fcn=3 call: call HF_errlog(12020515,'I: FCN is called') C Print MINUIT extra parameters +c which are actually all parameters call printminuitextrapars +C Copy new parameter values from MINUIT to whereever parameterisations +c will take them from + call copy_minuit_extrapars(parminuit) -* --------------------------------------------------------- -* PDF parameterisation at the starting scale -* --------------------------------------------------------- - - call PDF_Param_Iteration(parminuit,iflag) - -! [--- WS 2015-10-10 #ifdef TRACE_CHISQ call MntInpGetparams ! calls MInput.GetMinuitParams(); #endif -! if(doHiTwist) then -! print '(''HiTwist:'',6F10.4)',XParValueByName('HT_x0'), XParValueByName('HT_sig0'), XParValueByName('HT_lambda') -! endif -! ---] - + * * Evaluate the chi2: -* +* chi2out = chi2data_theory(iflag) - + #ifdef TRACE_CHISQ if (iflag.eq.1) then ! print *,'INIT' @@ -97,34 +83,34 @@ C Print MINUIT extra parameters endif call MntShowVValues(chi2out) #endif - + return end C------------------------------------------------------ C> @brief Helper for C++ -C------------------------------------------------------ +C------------------------------------------------------ subroutine update_theory_iteration implicit none #include "ntot.inc" #include "datasets.inc" integer idataset character*128 Msg - - call gettheoryiteration + + call init_at_iteration do idataset=1,Ndatasets if(NDATAPOINTS(idataset).gt.0) then call GetTheoryForDataset(idataset) - else + else write (Msg, $ '(''W: Data set '',i2 - $,'' contains no data points, will be ignored'')') + $,'' contains no data points, will be ignored'')') $ idataset call hf_errlog(29052013,Msg) endif enddo end - + C------------------------------------------------------------------------------ C> @brief Calculate predictions for the data samples and return total chi2. C> @details Created by splitting original fcn() function @@ -139,7 +125,6 @@ C-------------------------------------------------------------- integer iflag #include "steering.inc" -#include "pdfparam.inc" #include "for_debug.inc" #include "ntot.inc" #include "datasets.inc" @@ -157,7 +142,7 @@ C-------------------------------------------------------------- * --------------------------------------------------------- double precision chi2out double precision fchi2, fcorchi2 - double precision DeltaLength +! double precision DeltaLength double precision BSYS(NSYSMax), RSYS(NSYSMax) double precision EBSYS(NSYSMax),ERSYS(NSYSMax) double precision pchi2(nset),chi2_log @@ -179,21 +164,21 @@ C-------------------------------------------------------------- character*300 base_pdfname integer npts(nset) double precision f2SM,f1SM,flSM - integer i,j,kflag,jsys,ndf,n0,h1iset,jflag,k,pr,nwds + integer i,j,jsys,ndf,n0,h1iset,jflag,k,pr,nwds logical refresh integer isys,ipoint,jpoint integer idataset double precision TempChi2 double precision GetTempChi2 ! Temperature penalty for D, E... params. double precision OffsDchi2 ! correction for final Offset calculation - + C x-dependent fs: double precision fs0 double precision fshermes c updf stuff logical firsth - double precision auh + double precision auh common/f2fit/auh(50),firsth Logical Firstd,Fccfm1,Fccfm2 Common/ myfirst/Firstd,Fccfm1,Fccfm2 @@ -211,7 +196,7 @@ c updf stuff character*2 TypeC, FormC, TypeD character*64 Msg - + double precision rmass,rmassp,rcharge COMMON /MASSES/ rmass(150),rmassp(50),rcharge(150) @@ -233,16 +218,14 @@ C--OZ 21.04.2016 Increment IfcnCount here instead of fcn routine endif C-------------------------------------------------------------- * --------------------------------------------------------- -* initialise variables +* initialise variables * --------------------------------------------------------- chi2out = 0.d0 fchi2 = 0.d0 - ndf = -nparFCN - n0 = 0 iflagfcn = iflag - itheory_ca = itheory + itheory_ca = itheory do jsys=1,nsys bsys(jsys) = 0.d0 @@ -252,10 +235,10 @@ C-------------------------------------------------------------- enddo - do i=1,ntot + do i=1,ntot !why for both used and unused points? --Ivan THEO(i) = 0.d0 THEO_MOD(i) = 0.d0 - enddo + enddo ! on second thought, why clear these anyway? @@ -276,23 +259,7 @@ C-------------------------------------------------------------- c write(6,*) ' fcn npoint ',npoints firsth=.true. Fccfm1=.true. - - endif -* --------------------------------------------------------- -* Extra constraints on input PDF due to momentum and quark -* counting sum rules: -* --------------------------------------------------------- - - kflag=0 - if (Itheory.eq.0.or.Itheory.eq.10.or.itheory.eq.11 - $.or.itheory.eq.35) then - call SumRules(kflag) - endif - if (kflag.eq.1) then - write(6,*) ' --- problem in SumRules, kflag = 1' - call HF_errlog(12020516, - + 'F: FCN - problem in SumRules, kflag = 1') endif if (iflag.eq.1) then @@ -306,42 +273,17 @@ c write(6,*) ' fcn npoint ',npoints enddo endif - -* --------------------------------------------------------- -* Call evolution -* --------------------------------------------------------- -cc if (Debug) then -cc print*,'before evolution' -cc endif -cc if (itheory.eq.0.or.itheory.eq.10.or.itheory.eq.11.or. -cc 1 itheory.eq.35) then -cc call Evolution -cc elseif(Itheory.ge.100) then -cc firsth=.false. -cc endif - -cc if (Debug) then -cc print*,'after evolution' -cc endif - - - -* --------------------------------------------------------- -* Initialise theory calculation per iteration -* --------------------------------------------------------- - call GetTheoryIteration - - if (Debug) then - print*,'after GetTheoryIteration' - endif - -* --------------------------------------------------------- +* --------------------------------------------------------- +* Initialise various c++ code per iteration +* --------------------------------------------------------- + call init_at_iteration +* --------------------------------------------------------- * Calculate theory for datasets: -* --------------------------------------------------------- +* --------------------------------------------------------- do idataset=1,NDATASETS if(NDATAPOINTS(idataset).gt.0) then call GetTheoryForDataset(idataset) - else + else write (Msg, $ '(''W: Data set '',i2, $ '' contains no data points, will be ignored'')') idataset @@ -353,40 +295,24 @@ cc endif print*,'after GetTheoryfordataset' endif - call cpu_time(time1) -* --------------------------------------------------------- -* Start of loop over data points: -* --------------------------------------------------------- - - do 100 i=1,npoints - - h1iset = JSET(i) - - if (iflag.eq.3) npts(h1iset) = npts(h1iset) + 1 - - n0 = n0 + 1 - ndf = ndf + 1 - - - - 100 continue -* --------------------------------------------------------- -* end of data loop -* --------------------------------------------------------- - if (Debug) then - print*,'after data loop' + !Count datapoints in each dataset? + if(iflag.eq.3)then + do i=1,npoints + h1iset = JSET(i) + npts(h1iset)=npts(h1iset)+1 + enddo endif - - + ndf=npoints-nparFCN !degrees of freedom + n0 =npoints * ----------------------------------------------------------- * Toy MC samples: * ----------------------------------------------------------- if (IFlag.eq.1 .and. lrand) then - call MC_Method() - endif + call MC_Method() + endif if (IFlag.eq.1) then NSysData = 0 @@ -398,9 +324,18 @@ cc endif endif if ( (IFlag.eq.1).and.(DataToTheo)) then - do i=1,npoints - daten(i) = theo(i) - enddo + !Copy theory to data + do i=1,npoints + daten(i)=theo(i) + !Update total uncorrelated uncertainty + alpha(i)=daten(i)*sqrt( + & e_stat_poisson(i)**2+ + & e_stat_const(i)**2+ !Should I use e_stat_const or e_sta_const or e_sta? I am not sure... --Ivan + & e_uncor_poisson(i)**2+ + & e_uncor_const(i)**2+ !or e_unc_const? + & e_uncor_mult(i)**2+ + & e_uncor_logNorm(i)**2) + enddo endif * --------------------------------------------------------- @@ -411,7 +346,7 @@ cc endif Chi2OffsRecalc = .true. Chi2OffsFinal = .true. call GetNewChisquare(iflag,n0,OffsDchi2,rsys,ersys, - $ pchi2offs,fcorchi2) + $ pchi2offs,fcorchi2) else Chi2OffsRecalc = .false. endif @@ -437,11 +372,12 @@ cc endif if (iflag.eq.1) close(87) if (iflag.eq.3) then - if (dobands) then - print *,'SAVE PDF values' - endif +C Broken since 2.2.0 +! if (dobands) then +! print *,'SAVE PDF values' +! endif - TheoFCN3 = Theo ! save + TheoFCN3 = Theo ! save TheoModFCN3 = Theo_Mod ALphaModFCN3 = ALPHA_Mod @@ -459,27 +395,16 @@ cc endif endif -* --------------------------------------------------------- -* pdf lenght term -- used for Chebyshev Polynomial -* --------------------------------------------------------- - if (ILenPdf.gt.0) then - call PDFLength(DeltaLength) - print *,'Chi2 from PDF length:',DeltaLength,fchi2 - else - DeltaLength = 0. - endif - - fchi2 = fchi2 + DeltaLength - +C Broken since 2.2.0 ! Temperature regularisation: - if (Temperature.ne.0) then - TempChi2 = GetTempChi2() - print *,'Temperature chi2=',TempChi2 - fchi2 = fchi2 + TempChi2 - endif - - -c Penalty from MINUIT extra parameters constraints (only for fits) +c if (Temperature.ne.0) then +c TempChi2 = GetTempChi2() +c print *,'Temperature chi2=',TempChi2 +c fchi2 = fchi2 + TempChi2 +c endif + + +c Penalty from MINUIT extra parameters constraints (only for fits) C However when/if LHAPDFErrors mode will be combined with minuit, this will need modification. if (.not. LHAPDFErrors) then call getextraparsconstrchi2(extraparsconstrchi2) @@ -490,21 +415,18 @@ C However when/if LHAPDFErrors mode will be combined with minuit, this will need $ shift_polRHp**2+shift_polRHm**2+ $ shift_polLHp**2+shift_polLHm**2+ $ shift_polL**2+shift_polT**2 +c If for any reason we got chi2==NaN, set it to +inf so that that +c a minimizer would treat it as very bad + if(chi2out/=chi2out)then !if chi2out is NaN + chi2out=transfer(z'7FF0000000000000',1d0) !+infinity + endif - - - - ! if (lprint) then +c Print time, number of calls, chi2 call cpu_time(time3) - print '(''cpu_time'',3F10.2)', time1, time3, time3-time1 + print '(''cpu_time'',3F10.2)', time1, time3, time3-time1 write(6,'(A20,i6,F12.2,i6,F12.2)') ' - $ xfitter chi2out,ndf,chi2out/ndf ',ifcncount, chi2out, + $ xfitter chi2out,ndf,chi2out/ndf ',ifcncount, chi2out, $ ndf, chi2out/ndf - - - - ! endif ! end lprint - ! ---------------- RESULTS OUTPUT --------------------------------- ! Reopen "Results.txt" file if it is not open ! It does not get opened by this point when using CERES @@ -523,7 +445,7 @@ C However when/if LHAPDFErrors mode will be combined with minuit, this will need if (doOffset) $ write(85,'('' Offset corrected '',F10.2,I6,F10.3)'),chi2out+OffsDchi2,ndf,(chi2out+OffsDchi2)/ndf write(85,*) - + write(6,*) write(6,'(''After minimisation '',F10.2,I6,F10.3)'),chi2out,ndf,chi2out/ndf ! if (doOffset .and. iflag.eq.3) @@ -536,16 +458,16 @@ C However when/if LHAPDFErrors mode will be combined with minuit, this will need call write_pars(nfcn3) if (ControlFitSplit) then - print + print $ '(''Fit chi2/Npoint, after fit = '',F10.4,I4,F10.4)' $ ,chi2_fit $ , NFitPoints,chi2_fit/NFitPoints - print + print $ '(''Control chi2/Npoint, after fit = '',F10.4,I4,F10.4)' $ ,chi2_cont - $ , NControlPoints,chi2_cont/NControlPoints + $ , NControlPoints,chi2_cont/NControlPoints -c write (71,'(4F10.4)') +c write (71,'(4F10.4)') c $ paruval(4),paruval(5),chi2_fit/NFitPoints c $ ,chi2_cont/NControlPoints @@ -559,7 +481,7 @@ c $ ,chi2_cont/NControlPoints if (iflag.eq.3) then - + if (doOffset) then fcorchi2 = 0d0 do h1iset=1,nset @@ -568,7 +490,7 @@ c $ ,chi2_cont/NControlPoints enddo ! fcorchi2 = chi2out+OffsDchi2 endif - + ! ---------------- RESULTS OUTPUT --------------------------------- write(85,*) ' Partial chi2s ' chi2_log = 0 @@ -627,20 +549,16 @@ c $ ,chi2_cont/NControlPoints open(91,file=TRIM(OutDirName)//'/params.txt') write(91,*) auh(1),auh(2),auh(3),auh(4),auh(5),auh(6),auh(7),auh(8),auh(9) - - - else -C Hardwire: - if ( ReadParsFromFile .and. DoBandsSym) then - call ReadPars(ParsFileName, pkeep) - call PDF_param_iteration(pkeep,2) - kflag = 0 - call SumRules(kflag) - endif -C XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -c call Evolution + + else +C Broken since 2.2.0 +!C Hardwire: +! if ( ReadParsFromFile .and. DoBandsSym) then +! call ReadPars(ParsFileName, pkeep) +!! call PDF_param_iteration(pkeep,2)!broken since 2.2.0 +! endif C LHAPDF output: c WS: for the Offset method save central fit only @@ -686,7 +604,7 @@ C !> Store also type of systematic source info endif write(85,'(I5,'' '',A35,'' '',F9.4,'' +/-'',F9.4,A8,3A2)') - $ jsys,SYSTEM(jsys),rsys(jsys),ersys(jsys),' ',FormC, + $ jsys,SYSTEM(jsys),rsys(jsys),ersys(jsys),' ',FormC, $ TypeC,TypeD enddo @@ -696,45 +614,42 @@ C Trigger reactions: call cpu_time(time2) print '(''cpu_time'',3F10.2)', time1, time2, time2-time1 - + endif - C Return the chi2 value: chi2data_theory = chi2out - end - +C Broken since 2.2.0 C--------------------------------------------------------------------- -!> @brief Calculate penalty term for higher oder parameters using "temperature" +!> @brief Calculate penalty term for higher oder parameters using "temperature" !> @details Currently works only for standard param-types (10p-13p-like) C--------------------------------------------------------------------- - double precision function GetTempChi2() +c double precision function GetTempChi2() - implicit none -#include "pdfparam.inc" - integer i - double precision chi2 - double precision xscale(3) - data xscale/0.01,0.01,0.01/ - chi2 = 0. +c implicit none +c integer i +c double precision chi2 +c double precision xscale(3) +c data xscale/0.01,0.01,0.01/ +c chi2 = 0. C Over d,e and F - do i=1,3 - chi2 = chi2 + (paruval(i+3)*xscale(i))**2 - chi2 = chi2 + (pardval(i+3)*xscale(i))**2 - chi2 = chi2 + (parubar(i+3)*xscale(i))**2 - chi2 = chi2 + (pardbar(i+3)*xscale(i))**2 - if (i.le.2) then - chi2 = chi2 + (parglue(i+3)*xscale(i))**2 - endif - enddo - - - GetTempChi2 = chi2*Temperature +c do i=1,3 +c chi2 = chi2 + (paruval(i+3)*xscale(i))**2 +c chi2 = chi2 + (pardval(i+3)*xscale(i))**2 +c chi2 = chi2 + (parubar(i+3)*xscale(i))**2 +c chi2 = chi2 + (pardbar(i+3)*xscale(i))**2 +c if (i.le.2) then +c chi2 = chi2 + (parglue(i+3)*xscale(i))**2 +c endif +c enddo + + +c GetTempChi2 = chi2*Temperature C--------------------------------------------------------------------- - end +c end C--------------------------------------------------------- @@ -799,4 +714,14 @@ C--------------------------------------------------------- mtp=par endif end - +C copy parameters from minuit + subroutine copy_minuit_extrapars(p) + implicit none + double precision p(*) +#include "extrapars.inc" + integer i + integer GetParameterIndex !function + do i=1,nExtraParam + ExtraParamValue(i)=p(iExtraParamMinuit(GetParameterIndex(trim(ExtraParamNames(i))))) + enddo + end diff --git a/src/fortran_interface.cc b/src/fortran_interface.cc new file mode 100644 index 0000000000000000000000000000000000000000..a95115bf63fca3adbd4de6c985021bb08340bb5b --- /dev/null +++ b/src/fortran_interface.cc @@ -0,0 +1,20 @@ +#include"xfitter_steer.h" +#include"BaseEvolution.h" +#include<cmath> +//Functions to access various things from fortran +//PDFs and alpha_s are taken from default evolution +using namespace std; +//PDFs +extern "C" void hf_get_pdfsq_(double const&x,double const&Q,double*pdfs){ + xfitter::defaultEvolution->xfxQArray()(x,Q,pdfs);//returns through *pdfs +} +extern "C" void hf_get_pdfs_(double const&x,double const&Q2,double*pdfs){ + xfitter::defaultEvolution->xfxQArray()(x,sqrt(Q2),pdfs);//returns through *pdfs +} +//alpha_s +extern "C" double hf_get_alphasq_(double const&Q){ + return xfitter::defaultEvolution->AlphaQCD()(Q); +} +extern "C" double hf_get_alphas_(double const&Q2){ + return xfitter::defaultEvolution->AlphaQCD()(sqrt(Q2)); +} diff --git a/src/ftheor_eval.cc b/src/ftheor_eval.cc index be9c9f8025a6d769388b009fa46ed58f84ad3670..37f0f2f051e654f171bbe77754ee3a52a5ea85ee 100644 --- a/src/ftheor_eval.cc +++ b/src/ftheor_eval.cc @@ -16,7 +16,6 @@ #include "xfitter_cpp.h" #include "TheorEval.h" -//#include "datasets.icc" #include <yaml-cpp/yaml.h> #include "ReactionTheory.h" #include "xfitter_pars.h" @@ -44,14 +43,14 @@ extern "C" { int set_theor_eval_(int *dsId);//, int *nTerms, char **TermName, char **TermType, // char **TermSource, char *TermExpr); int set_theor_bins_(int *dsId, int *nBinDimension, int *nPoints, int *binFlags, - double *allBins, char binNames[10][80]); + double *allBins, char binNames[10][80]); // int set_theor_units_(int *dsId, double *units); int init_theor_eval_(int *dsId); int update_theor_ckm_(); int get_theor_eval_(int *dsId, int* np, int* idx); int read_reactions_(); int close_theor_eval_(); - void init_func_map_(); + //void init_func_map_(); Broken since 2.2.0 void init_at_iteration_(); ///< Loop over reactions, initialize them void fcn3action_(); ///< Loop over reactions, call actionAtFCN3 void error_band_action_(const int& i); ///< Loop over rections, call error_band_action @@ -159,7 +158,7 @@ int set_theor_eval_(int *dsId)//, int *nTerms, char **TermName, char **TermType, write details on argumets */ int set_theor_bins_(int *dsId, int *nBinDimension, int *nPoints, int *binFlags, - double *allBins, char binNames[10][80]) + double *allBins, char binNames[10][80]) { tTEmap::iterator it = gTEmap.find(*dsId); if (it == gTEmap.end() ) { @@ -237,24 +236,23 @@ int get_theor_eval_(int *dsId, int *np, int*idx) exit(1); } - valarray<double> vte; TheorEval *te = gTEmap.at(*dsId); - vte.resize(te->getNbins()); - te->Evaluate(vte); - - // Get bin flags, and abandon bins flagged 0 - const vector<int> *binflags = te->getBinFlags(); - int ip = 0; - vector<int>::const_iterator ibf = binflags->begin(); - for (; ibf!=binflags->end(); ibf++){ - if ( 0 != *ibf ) { - c_theo_.theo[*idx+ip-1]=vte[int(ibf-binflags->begin())]; - ip++; + valarray<double>vte(te->getNbins());//vector of theory predictions for this dataset + te->Evaluate(vte);//writes into vte + + // write the predictions to THEO array + const vector<int>*te_binflags=te->getBinFlags(); + const int*binflags=te_binflags->data();//get pointer to array of bin flags + size_t ip=0; + size_t offset=*idx-1; + size_t endi=te_binflags->size(); + for(size_t i=0;i<endi;++i){ + if(binflags[i]!=0){//skip bins flagged 0 + c_theo_.theo[ip+offset]=vte[i]; + ++ip; } - //cout << *ibf << "\t" << vte[int(ibf-binflags->begin())] << endl; } - // write the predictions to THEO array if( ip != *np ){ cout << "ERROR in get_theor_eval_: number of points mismatch" << endl; return -1; @@ -283,10 +281,9 @@ int read_reactions_() frt >> rname >> lib; if (frt.eof()) break; if (gReactionLibs.find(rname) == gReactionLibs.end() ) { - // possible check + // possible check } gReactionLibs[rname] = lib; - } } else { @@ -296,18 +293,17 @@ int read_reactions_() return 1; } - -// a bunch of functions +/* Broken since 2.2.0 double xg(const double& x, const double& q2) { double pdfs[20]; HF_GET_PDFS_WRAP(x,q2,pdfs); return pdfs[6+0]; } double xu(const double& x, const double& q2) { double pdfs[20]; HF_GET_PDFS_WRAP(x,q2,pdfs); return pdfs[6+1]; } double xub(const double& x, const double& q2) { double pdfs[20]; HF_GET_PDFS_WRAP(x,q2,pdfs); return pdfs[6-1]; } - void init_func_map_() { g2Dfunctions["xg"] = &xg; g2Dfunctions["xu"] = &xu; g2Dfunctions["xub"] = &xub; } +*/ void init_at_iteration_() { xfitter::updateDependentParameters(); @@ -331,7 +327,8 @@ void init_at_iteration_() { } } - +//This is called after minimization, after result output +//Could be named atEnd or something --Ivan void fcn3action_() { // Minimizer action: diff --git a/src/init_theory.f b/src/init_theory.f index 92e10ca7f2c51261400f5b2d060329badacc0375..c3f10ba94dbabfb0825def9a69155e2c0820af6f 100644 --- a/src/init_theory.f +++ b/src/init_theory.f @@ -1,49 +1,3 @@ - subroutine init_theory_modules -* ------------------------------------------------ - - implicit none -#include "ntot.inc" -#include "steering.inc" - -* ------------------------------------------------ -* Initialise EW parameters -* ------------------------------------------------ - - call Init_EW_parameters - -* ------------------------------------------------ -* Initialise qcdnum and APFEL -* ------------------------------------------------ - if(itheory.eq.0.or.itheory.eq.10.or.itheory.eq.11 - $.or.itheory.eq.35) then -C Init evolution code: -ccCC call qcdnum_ini -C Init APFEL if needed - if(itheory.eq.10.or.itheory.eq.35) call apfel_ini -C Init QEDEVOL if needed - if(itheory.eq.11) call qedevol_ini - -ccxxx call Init_heavy_flavours - - if (ewfit.gt.0) call eprc_init(.true.) - elseif(itheory.ge.100) then -cc write(6,*) ' in ini_theory for itheory =',itheory - endif - -* ------------------------------------------------ -* Initialise calculations for each dataset: -* ------------------------------------------------ - if(Itheory.ge.100) then -ccc write(6,*) ' ini_theory: no data sets initialised for theory ',itheory - else -c call Init_theory_datasets - endif - - return - end - - - Subroutine Init_heavy_flavours() *----------------------------------------------------- * @@ -156,7 +110,7 @@ C Reduce the Q2 interval if small-x resummation through APFEL is included. if(HFSCHEME.eq.3005.or. 1 HFSCHEME.eq.3055.or. 2 HFSCHEME.eq.3555)then - QARR(1) = starting_scale + !QARR(1) = starting_scale !starting_scale is broken since 2.2.0 QARR(2) = 2.025D7 ! needed for lhapdf grid endif c QARR(2) = 64000000. ! enough for 8 TeV LHC. diff --git a/src/lhapdf6_output.c b/src/lhapdf6_output.c index 4b20b688aff6d952cae0e0175c894f728cb055b4..20b02b628f645cb702e9f6a71d6cef50d860906b 100644 --- a/src/lhapdf6_output.c +++ b/src/lhapdf6_output.c @@ -13,7 +13,7 @@ extern struct { //{{{ int nx; int read_xgrid; int dobands; - float hf_mass[3]; + float hf_mass[3]; int i_fit_order, ipdfset; int lead, useGridLHAPDF5, writeLHAPDF6, WriteAlphaSToMemberPDF, c_itheory, c_extrapdfs; @@ -29,7 +29,7 @@ typedef struct GridQX_s { //{{{ double *q2,*x; enum {QCDNUM_GRID, EXTERNAL_GRID, LHA5_GRID} type; //main interface (should include pdf modifications, like lead) - double (*pdf_ij)(struct GridQX_s grid, int pid, int ix, int iq2); + double (*pdf_ij)(struct GridQX_s grid, int pid, int ix, int iq2); double (*raw_pdf_ij)(struct GridQX_s grid, int pid, int ix, int iq2); //pdf from qcdnum grid } GridQX; //}}} @@ -140,20 +140,20 @@ void delete_grid(GridQX grid){ //{{{ -// pdf in grid point (qcdnum grid) +// pdf in grid point (qcdnum grid) double raw_qcdnum_pdf_ij(GridQX grid, int pid, int ix, int iq2) { //{{{ int inull; ix+=1; iq2+=1; - if ( fabs(pid)<=6) { - return fvalij_(&ccommoninterface_.ipdfset,&pid,&ix,&iq2,&inull); - } - else { - // Hardwire photon for now: - pid = 13; - double val = bvalij_(&ccommoninterface_.ipdfset,&pid,&ix,&iq2,&inull); - return val; - } + if ( fabs(pid)<=6) { + return fvalij_(&ccommoninterface_.ipdfset,&pid,&ix,&iq2,&inull); + } + else { + // Hardwire photon for now: + pid = 13; + double val = bvalij_(&ccommoninterface_.ipdfset,&pid,&ix,&iq2,&inull); + return val; + } } @@ -163,19 +163,19 @@ double raw_external_pdf_ij(GridQX grid, int pid, int ix, int iq2) { double x,q2; x=grid.x[ix]; q2=grid.q2[iq2]; - if ( fabs(pid)<=6) { - return fvalxq_(&ccommoninterface_.ipdfset,&pid,&x,&q2,&inull); - } - else { - // Hardwire photon for now: - pid = 13; - return bvalxq_(&ccommoninterface_.ipdfset,&pid,&x,&q2,&inull); - } + if ( fabs(pid)<=6) { + return fvalxq_(&ccommoninterface_.ipdfset,&pid,&x,&q2,&inull); + } + else { + // Hardwire photon for now: + pid = 13; + return bvalxq_(&ccommoninterface_.ipdfset,&pid,&x,&q2,&inull); + } } //}}} -//wrappers +//wrappers // direct interface double qcdnum_pdf_ij(GridQX grid, int pid, int ix, int iq2) { @@ -210,7 +210,7 @@ void print_lhapdf6(char *pdf_dir){ //{{{ save_data_lhapdf6_(¢ral_set); free(outdir); free(path); -} +} @@ -219,7 +219,7 @@ void print_lhapdf6_(){ char *pdf_dir=sfix(ccommoninterface_.LHAPDF6OutDir,128); print_lhapdf6(pdf_dir); free(pdf_dir); -} +} @@ -236,33 +236,33 @@ void print_lhapdf6_opt_(){ -void print_q2subgrid(GridQX grid, FILE *fp, int iqmin, int iqmax, int iSubGrid, int AddTop) { //{{{ +void print_q2subgrid(GridQX grid, FILE *fp, int iqmin, int iqmax, int iSubGrid, int AddTop) { //{{{ // iSubGrid = 4, 5, 6, 7: below charm, bottom, top, above all: first flavour to cut off. - int ix, iq2, i; + int ix, iq2, i; double val; const double OFFSET=1e-3; // see call PDFINP in fcn.f - // Add top too: - int PDG_NoTop[] = { -5,-4,-3,-2,-1,1,2,3,4,5,21,22}; - int PDG_Top[] = {-6, -5,-4,-3,-2,-1,1,2,3,4,5,6, 21,22}; - int QCDNUM_NoTop[] = {-5,-4,-3,-2,-1,1,2,3,4,5,0,7}; - int QCDNUM_Top[] = {-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,0,7}; - + // Add top too: + int PDG_NoTop[] = { -5,-4,-3,-2,-1,1,2,3,4,5,21,22}; + int PDG_Top[] = {-6, -5,-4,-3,-2,-1,1,2,3,4,5,6, 21,22}; + int QCDNUM_NoTop[] = {-5,-4,-3,-2,-1,1,2,3,4,5,0,7}; + int QCDNUM_Top[] = {-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,0,7}; + int *pdg_flavours = (AddTop >0 ) ? PDG_Top : PDG_NoTop; - int *qcdnum_flavours = (AddTop>0) ? QCDNUM_Top : QCDNUM_NoTop; - int NPDFToStore = (AddTop>0) ? (int) (sizeof(QCDNUM_Top)/sizeof(int)) : (sizeof(QCDNUM_NoTop)/sizeof(int)) ; + int *qcdnum_flavours = (AddTop>0) ? QCDNUM_Top : QCDNUM_NoTop; + int NPDFToStore = (AddTop>0) ? (int) (sizeof(QCDNUM_Top)/sizeof(int)) : (sizeof(QCDNUM_NoTop)/sizeof(int)) ; - if ( ! ccommoninterface_.c_extrapdfs ) { - NPDFToStore--; - } + if ( ! ccommoninterface_.c_extrapdfs ) { + NPDFToStore--; + } - double (*swap_fun)(struct GridQX_s grid, int pid, int ix, int iq2); + double (*swap_fun)(struct GridQX_s grid, int pid, int ix, int iq2); - for(ix=0;ix<grid.nx;ix++) + for(ix=0;ix<grid.nx;ix++) fprintf(fp, "%e ", grid.x[ix]); fprintf(fp, "\n"); @@ -270,48 +270,48 @@ void print_q2subgrid(GridQX grid, FILE *fp, int iqmin, int iqmax, int iSubGrid, fprintf(fp, "%e ", sqrt(grid.q2[iq2])); fprintf(fp, "\n"); - for(i=0;i<NPDFToStore ;i++) + for(i=0;i<NPDFToStore ;i++) fprintf(fp, "%i ", pdg_flavours[i]); fprintf(fp, "\n"); for(ix=0; ix<grid.nx; ix++) { - for(iq2=iqmin; iq2<iqmax; iq2++){ - for(i=0;i< NPDFToStore;i++) { - - - val= ( (fabs(qcdnum_flavours[i]) >= iSubGrid) && (fabs(pdg_flavours[i])<7) ) ? 0: grid.pdf_ij(grid, qcdnum_flavours[i], ix, iq2); - - if( fabs(val)>2*DBL_EPSILON) - fprintf(fp, "%e ", val); - else - fprintf(fp, "%e ", 0.0); - } - - - fprintf(fp, "\n"); - } - - //near the threshold - swap_fun=grid.raw_pdf_ij; - grid.raw_pdf_ij=raw_external_pdf_ij; - grid.q2[iqmax]-=OFFSET; - for(i=0;i<NPDFToStore;i++) { - val= ( (fabs(qcdnum_flavours[i]) >= iSubGrid) && (fabs(pdg_flavours[i])<7) ) ? 0 : grid.pdf_ij(grid, qcdnum_flavours[i], ix, iqmax); - - - - if( fabs(val)>2*DBL_EPSILON) - fprintf(fp, "%e ", val); - else - fprintf(fp, "%e ", 0.0); - } - grid.raw_pdf_ij=swap_fun; - grid.q2[iqmax]+=OFFSET; - - fprintf(fp, "\n"); + for(iq2=iqmin; iq2<iqmax; iq2++){ + for(i=0;i< NPDFToStore;i++) { + + + val= ( (fabs(qcdnum_flavours[i]) >= iSubGrid) && (fabs(pdg_flavours[i])<7) ) ? 0: grid.pdf_ij(grid, qcdnum_flavours[i], ix, iq2); + + if( fabs(val)>2*DBL_EPSILON) + fprintf(fp, "%e ", val); + else + fprintf(fp, "%e ", 0.0); + } + + + fprintf(fp, "\n"); + } + + //near the threshold + swap_fun=grid.raw_pdf_ij; + grid.raw_pdf_ij=raw_external_pdf_ij; + grid.q2[iqmax]-=OFFSET; + for(i=0;i<NPDFToStore;i++) { + val= ( (fabs(qcdnum_flavours[i]) >= iSubGrid) && (fabs(pdg_flavours[i])<7) ) ? 0 : grid.pdf_ij(grid, qcdnum_flavours[i], ix, iqmax); + + + + if( fabs(val)>2*DBL_EPSILON) + fprintf(fp, "%e ", val); + else + fprintf(fp, "%e ", 0.0); + } + grid.raw_pdf_ij=swap_fun; + grid.q2[iqmax]+=OFFSET; + + fprintf(fp, "\n"); } - + fprintf(fp, "---\n"); } //}}} @@ -337,35 +337,35 @@ void save_data_lhapdf6(int *pdf_set,char *pdf_dir){ //{{{ double mbt2=ccommoninterface_.hf_mass[1]*ccommoninterface_.hf_mass[1]; double mtp2=ccommoninterface_.hf_mass[2]*ccommoninterface_.hf_mass[2]; - double kmuc=ccommoninterface_.c_kmuc; - double kmub=ccommoninterface_.c_kmub; - double kmut=ccommoninterface_.c_kmut; + double kmuc=ccommoninterface_.c_kmuc; + double kmub=ccommoninterface_.c_kmub; + double kmut=ccommoninterface_.c_kmut; + + double tiny = 1e-3; - double tiny = 1e-3; - // print_q2subgrid(grid, fp, qfrmiq(0) , qfrmiq(grid.nq2-1)); - int AddTop = (mtp2 < qfrmiq(grid.nq2-1)); + int AddTop = (mtp2 < qfrmiq(grid.nq2-1)); print_q2subgrid(grid, fp, 0, iqfrmq(mch2*kmuc*kmuc+tiny), 4, AddTop); print_q2subgrid(grid, fp, iqfrmq(mch2*kmuc*kmuc+tiny), iqfrmq(mbt2*kmub*kmub+tiny), 5, AddTop); if(AddTop>0) { - print_q2subgrid(grid, fp, iqfrmq(mbt2*kmub*kmub+tiny), iqfrmq(mtp2*kmut*kmut+tiny), 6, AddTop); - print_q2subgrid(grid, fp, iqfrmq(mtp2*kmut*kmut+tiny), grid.nq2-1, 7, AddTop); + print_q2subgrid(grid, fp, iqfrmq(mbt2*kmub*kmub+tiny), iqfrmq(mtp2*kmut*kmut+tiny), 6, AddTop); + print_q2subgrid(grid, fp, iqfrmq(mtp2*kmut*kmut+tiny), grid.nq2-1, 7, AddTop); } else { - print_q2subgrid(grid, fp, iqfrmq(mbt2*kmub*kmub+tiny), grid.nq2-1, 6, AddTop); + print_q2subgrid(grid, fp, iqfrmq(mbt2*kmub*kmub+tiny), grid.nq2-1, 6, AddTop); } fclose(fp); delete_grid(grid); free(outdir); free(path); -} +} // save to LHAPDF6OutDir, fortran interface -void save_data_lhapdf6_(int *pdf_set){ +void save_data_lhapdf6_(int *pdf_set){ char *pdf_dir=sfix(ccommoninterface_.LHAPDF6OutDir,128); save_data_lhapdf6(pdf_set, pdf_dir); free(pdf_dir); @@ -374,7 +374,7 @@ void save_data_lhapdf6_(int *pdf_set){ // use opt_$LHAPDF6OutDir directory, fortran interface -void save_data_lhapdf6_opt_(int *pdf_set){ +void save_data_lhapdf6_opt_(int *pdf_set){ char *pdf_dir=sfix(ccommoninterface_.LHAPDF6OutDir,128); char *opt_pdf_dir=malloc((strlen(pdf_dir)+strlen("opt_")+1)*sizeof(char)); @@ -406,24 +406,24 @@ void save_info(char *pdf_dir) { //{{{ fprintf(fp,"NumMembers: %i\n",get_nmembers_()); double mtp2=ccommoninterface_.hf_mass[2]*ccommoninterface_.hf_mass[2]; - int addTop = (mtp2 < qfrmiq(grid.nq2-1)); - - if ( ccommoninterface_.c_extrapdfs ) { - if (addTop>0) { - fprintf(fp,"Flavors: [-6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 21, 22]\n"); - } - else { - fprintf(fp,"Flavors: [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 21, 22]\n"); - } - } - else { - if (addTop>0) { - fprintf(fp,"Flavors: [-6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 21]\n"); - } - else { - fprintf(fp,"Flavors: [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 21]\n"); - } - } + int addTop = (mtp2 < qfrmiq(grid.nq2-1)); + + if ( ccommoninterface_.c_extrapdfs ) { + if (addTop>0) { + fprintf(fp,"Flavors: [-6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 21, 22]\n"); + } + else { + fprintf(fp,"Flavors: [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 21, 22]\n"); + } + } + else { + if (addTop>0) { + fprintf(fp,"Flavors: [-6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 21]\n"); + } + else { + fprintf(fp,"Flavors: [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 21]\n"); + } + } fprintf(fp,"OrderQCD: %i\n", ccommoninterface_.i_fit_order-1); // qcdnum notation LO=1,...; LHAPDF6 LO=0,... fprintf(fp,"FlavorScheme: %s\n", get_flavor_scheme()); fprintf(fp,"ErrorType: %s\n", get_error_type()); @@ -453,12 +453,12 @@ void save_alphas_info(FILE* fp, GridQX grid) { //{{{ double q2; double mz2=ccommoninterface_.mz*ccommoninterface_.mz; - // Also get thresholds info: - double ct = ccommoninterface_.hf_mass[0]*ccommoninterface_.c_kmuc; - double bt = ccommoninterface_.hf_mass[1]*ccommoninterface_.c_kmub; - double tt = ccommoninterface_.hf_mass[2]*ccommoninterface_.c_kmut; - - + // Also get thresholds info: + double ct = ccommoninterface_.hf_mass[0]*ccommoninterface_.c_kmuc; + double bt = ccommoninterface_.hf_mass[1]*ccommoninterface_.c_kmub; + double tt = ccommoninterface_.hf_mass[2]*ccommoninterface_.c_kmut; + + getord_(&as_order); @@ -470,43 +470,43 @@ void save_alphas_info(FILE* fp, GridQX grid) { //{{{ fprintf(fp,"AlphaS_Qs: ["); for(iq2=0;iq2<grid.nq2;iq2++) { - double q = sqrt(grid.q2[iq2]); - if ( fabs(q-ct) < 0.0001 ) { - fprintf(fp,"%g,",q); // print threshold twice - } - if ( fabs(q-bt) < 0.0001 ) { - fprintf(fp,"%g,",q); // print threshold twice - } - if ( fabs(q-tt) < 0.0001 ) { - fprintf(fp,"%g,",q); // print threshold twice - } - - fprintf(fp,"%g",q); - if(iq2!=grid.nq2-1) fprintf(fp,", "); + double q = sqrt(grid.q2[iq2]); + if ( fabs(q-ct) < 0.0001 ) { + fprintf(fp,"%g,",q); // print threshold twice + } + if ( fabs(q-bt) < 0.0001 ) { + fprintf(fp,"%g,",q); // print threshold twice + } + if ( fabs(q-tt) < 0.0001 ) { + fprintf(fp,"%g,",q); // print threshold twice + } + + fprintf(fp,"%g",q); + if(iq2!=grid.nq2-1) fprintf(fp,", "); } fprintf(fp,"]\n"); fprintf(fp,"AlphaS_Vals: ["); for(iq2=0;iq2<grid.nq2;iq2++) { - double q = sqrt(grid.q2[iq2]); - double epsilon = 0; - - // Thresholds: - if ( fabs(q-ct) < 0.0001 ) { - epsilon = 0.0001; - double q2l = q*q-epsilon; - fprintf(fp,"%g,",hf_get_alphas_(&q2l)); - } - if ( fabs(q-bt) < 0.0001 ) { - epsilon = 0.0001; - double q2l = q*q-epsilon; - fprintf(fp,"%g,",hf_get_alphas_(&q2l)); - } - if ( fabs(q-tt) < 0.0001 ) { - epsilon = 0.0001; - double q2l = q*q-epsilon; - fprintf(fp,"%g,",hf_get_alphas_(&q2l)); - } + double q = sqrt(grid.q2[iq2]); + double epsilon = 0; + + // Thresholds: + if ( fabs(q-ct) < 0.0001 ) { + epsilon = 0.0001; + double q2l = q*q-epsilon; + fprintf(fp,"%g,",hf_get_alphas_(&q2l)); + } + if ( fabs(q-bt) < 0.0001 ) { + epsilon = 0.0001; + double q2l = q*q-epsilon; + fprintf(fp,"%g,",hf_get_alphas_(&q2l)); + } + if ( fabs(q-tt) < 0.0001 ) { + epsilon = 0.0001; + double q2l = q*q-epsilon; + fprintf(fp,"%g,",hf_get_alphas_(&q2l)); + } q2=grid.q2[iq2]+epsilon; fprintf(fp,"%g",hf_get_alphas_(&q2)); diff --git a/src/main.f b/src/main.f index 8f03c8fb216e0a6353dbcdb532a91e1a5fe39f6c..0687772541e007f3efaeb5b48b443052df01b25e 100644 --- a/src/main.f +++ b/src/main.f @@ -71,9 +71,8 @@ C----------------------------------------------------- call read_reactions() call parse_params() !read parameters.yaml -* -* Init evolution -* +*This makes sure that the default evolution exists and is acessible from +*fortran using hf_get_pdfs(x,Q,pdfs) etc call init_evolution() * ------------------------------------------------ @@ -84,26 +83,18 @@ C----------------------------------------------------- + 'I: data tables have been read successfully') * - call init_func_map() - - -* ------------------------------------------------ -* Initialise theory modules -* ------------------------------------------------ - - call init_theory_modules - call hf_errlog(12020503, - + 'I: theory modules initialised successfully') +! call init_func_map() + call Init_EW_parameters if (LHAPDFErrors) then ! PDF errors call get_lhapdferrors goto 36 endif - - if (SCAN) then ! chi2 scan - call chi2_scan - goto 36 - endif +C chi2scan is broken since 2.2.0 +C if (SCAN) then ! chi2 scan +C call chi2_scan +C goto 36 +C endif * ------------------------------------------------ * Do the fit @@ -157,17 +148,18 @@ c .......................................................... call flush(6) if(icond .ne. 0) goto 36 Call RecovCentrPars - if (DOBANDS) then - write(6,*) - $ ' --- Calculating error bands from Offset errors...' - ! --- set the scale by defining delta chi2 value - ! --- for MNCOMD(fcn,'ITERATE 10',...) it is set by SET ERRDEF dchi2 - ! --- with default value of 5 - Call DecorDiag(5.d0) - call Error_Bands_Pumplin - ! Error_Bands_Pumplin calls GetUmat(i,j) which needs only umat from common /umatco/ - ! See minuit/src/iterate.F - endif +! Broken since 2.2.0 +! if (DOBANDS) then +! write(6,*) +! $ ' --- Calculating error bands from Offset errors...' +! ! --- set the scale by defining delta chi2 value +! ! --- for MNCOMD(fcn,'ITERATE 10',...) it is set by SET ERRDEF dchi2 +! ! --- with default value of 5 +! Call DecorDiag(5.d0) +! call Error_Bands_Pumplin +! ! Error_Bands_Pumplin calls GetUmat(i,j) which needs only umat from common /umatco/ +! ! See minuit/src/iterate.F +! endif else if (ControlFitSplit) then Call FindBestFCN3 !> Overfitting protection. @@ -178,21 +170,22 @@ c .......................................................... call write_pars(0) endif - if (DOBANDS) then - write(6,*) ' --- Calculate error bands ...' - lprint = .false. - call hf_errlog - $ (12020506, 'I: Calculation of error bands required') - call MNCOMD(fcn,'ITERATE 10',icond,0) - call MNCOMD(fcn,'MYSTUFF 1000',icond,0) - call MNCOMD(fcn,'MYSTUFF 2000',icond,0) - - call write_pars(0) - - call Error_Bands_Pumplin - elseif (DoBandsSym) then - call ErrBandsSym - endif +! Broken since 2.2.0 +! if (DOBANDS) then +! write(6,*) ' --- Calculate error bands ...' +! lprint = .false. +! call hf_errlog +! $ (12020506, 'I: Calculation of error bands required') +! call MNCOMD(fcn,'ITERATE 10',icond,0) +! call MNCOMD(fcn,'MYSTUFF 1000',icond,0) +! call MNCOMD(fcn,'MYSTUFF 2000',icond,0) + +! call write_pars(0) + +! call Error_Bands_Pumplin +! elseif (DoBandsSym) then +! call ErrBandsSym +! endif close (24) close (25) diff --git a/src/mc_errors.f b/src/mc_errors.f index 4f8921991da84c198340b2e49c4754720cef85dc..7a814780248eac6ce8e12c6d9c640ef297c65aa2 100644 --- a/src/mc_errors.f +++ b/src/mc_errors.f @@ -1,6 +1,6 @@ C------------------------------------------------------------ C -!> MC method for propagating of the data uncertainties. +!> MC method for propagating of the data uncertainties. !> Creat a replica of the data, which fluctuates accoding to their uncertainteis. C C------------------------------------------------------------ @@ -26,17 +26,17 @@ C To be used as a seed: C Single precision here: real rndsh(3) ! additive, poisson, linear $ ,ranflat -C + double precision rand_shift(NSYS) double precision r_sh_fl(NSYS) double precision f_un - parameter (f_un = 2.0) ! translate 0.:-1 to -1.:1. - + parameter (f_un = 2.0) ! translate 0.:-1 to -1.:1. + real amu integer npoi, ierr C For log normal random shifts: real lsig, lmu,lrunif - + double precision epsilon ! estimated acceptance/lumi correction double precision data_in double precision estat_in, ecor_in, euncor_in, etot_in !> Input uncertainites @@ -44,14 +44,13 @@ C For log normal random shifts: double precision scaleF integer scaling_type - + C functions: real logshift double precision alnorm - C------------------------------------------------------------ - + cv initialise the random shifts do isys=1,nsys @@ -62,7 +61,7 @@ cv initialise the random shifts C C Loop over systematic sources: -C +C do isys=1,nsys call rnorml(rndsh,1) ! gauss random number call ranlux(ranflat,1) ! uniform random number @@ -79,7 +78,7 @@ C C Loop over the data: C do n0=1,npoints - call rnorml(rndsh,3) + call rnorml(rndsh,3) call ranlux(ranflat,1) if (lrandData) then @@ -92,7 +91,7 @@ C do isys=1,nsys cv test different distributions -cv first for systematic uncert, then for stat. +cv first for systematic uncert, then for stat. if (systype.eq.1) then ! gauss syst C ! Introduce asymmetric errors, for Gaussian case only: @@ -105,41 +104,35 @@ C ! Introduce asymmetric errors, for Gaussian case only: s = s*(1.+ BetaAsym(isys,2,n0) * rand_shift(isys)) endif endif - + elseif (systype.eq.2) then ! uniform s = s*(1. + beta(isys,n0) * r_sh_fl(isys)) - + elseif (systype.eq.3) then ! lognormal if (beta(isys,n0).ne.0) then - lsig=beta(isys,n0) + lsig=beta(isys,n0) lmu=1. lrunif=r_sh_fl(isys)/f_un + 0.5 ! Expect random number between 0 and 1. s=s*logshift(lmu,lsig,lrunif) c print*,'log...', n0,isys, -c $ lrunif, beta(isys,n0), +c $ lrunif, beta(isys,n0), c $ s,logshift(lmu,lsig,lrunif) endif endif ! endif (sys for systematic shifts) enddo ! end loop over the systematic shifts - + voica=s ! save cross section before the stat shift -CV now choose sta (advised gauss OR poisson) - - if (statype.eq.1) then ! gauss +CV now choose sta (advised gauss OR poisson) + if (statype.eq.1) then ! gauss C do separate fluctuations for stat-const, stat-poisson and stat-linear pieces - s = s + s = s $ + sqrt( e_uncor_const(n0)**2 + e_stat_const(n0)**2) - $ * daten(n0)*rndsh(1) + $ * daten(n0)*rndsh(1) $ + sqrt( e_uncor_poisson(n0)**2 + e_stat_poisson(n0)**2) $ * sqrt(abs(daten(n0)*sorig))*rndsh(2) $ + e_uncor_mult(n0)*sorig*rndsh(3) - -c if (alpha(n0).eq.0) then -c s = 0.1 -c alpha(n0) = 1.e6 -c endif elseif (statype.eq.3.) then ! lognormal lsig = alpha(n0) lmu=1. @@ -177,7 +170,7 @@ C Reset uncor: C Get acceptance/lumi correction, called "epsilon" epsilon = data_in/estat_in**2 - + C Expected number of events: amu = epsilon*theo(n0) call RNPSSN(amu, Npoi, Ierr) @@ -185,9 +178,9 @@ C Expected number of events: s = (s/THEO(n0)) * Npoi/epsilon C Also apply fluctuations due to uncorrelated systematics: - + C New absolute uncor: - euncor_out = euncor_in / data_in * s ! rescale to new value + euncor_out = euncor_in / data_in * s ! rescale to new value if (statype.eq.14) then s = s + rndsh(1)*euncor_in @@ -217,9 +210,9 @@ C Store uncor in %: e_tot(n0) = sqrt(euncor_out**2+estat_out**2+ecor_in**2) $ /s*100.0 endif - - - print + + + print $ '(''Original, systematics and stat. shifted data:'',i4,5E12.4)' $ , n0,sorig, voica,s,alpha(n0),e_unc(n0)/100.*s @@ -229,13 +222,13 @@ C Store uncor in %: $ 'S: ToyMC cross section with exact ZERO value, stopOB') endif -C Re-scale relative error sources, depending on scaling rule define in chi2 or data files. +C Scale relative error sources, depending on scaling rule defined in chi2-related section of steering or in data files C For : -C - addivie ("NoRescale") errors keep absolute errors unmodified -C - multiplicaiive ("Linear") errors keep relative errors unmodified -C - poisson ("Poisson") keep error * sqrt(old/newVal) unmodified +C - additive ("NoRescale") keep absolute errors unmodified +C - multiplicaitive ("Linear") keep relative errors unmodified +C - poisson ("Poisson") keep (relative error)*sqrt(value) unmodified - scaleF = DATEN(n0)/s + scaleF = DATEN(n0)/s !=oldValue/newValue if (s .lt. 0) then call hf_errlog(1302201901, @@ -254,29 +247,28 @@ C - poisson ("Poisson") keep error * sqrt(old/newVal) unmodified e_uncor_const(n0) = e_uncor_const(n0) * scaleF e_stat_const(n0) = e_stat_const(n0) * scaleF e_tot(n0) = e_tot(n0) * scaleF - + C Also correlated systematicss: do isys=1,nsys - scaling_type = SysScalingType(isys) - + scaling_type = SysScalingType(isys) + if ( $ (scaling_type .eq. isNoRescale) $ .or. (LForceAdditiveData(n0) ) - $ ) then ! additive, keep absolute + $ ) then ! additive, keep absolute beta(isys,n0) = beta(isys,n0) * scaleF omega(isys,n0) = omega(isys,n0) * scaleF elseif (scaling_type.eq. isLinear) then ! mult, do nothing - beta(isys,n0) = beta(isys,n0) - omega(isys,n0) = omega(isys,n0) - elseif (scaling_type.eq. isPoisson) then + beta(isys,n0) = beta(isys,n0) + omega(isys,n0) = omega(isys,n0) + elseif (scaling_type.eq. isPoisson) then beta(isys,n0) = beta(isys,n0) * sqrt(scaleF) - omega(isys,n0) = omega(isys,n0) * sqrt(scaleF) + omega(isys,n0) = omega(isys,n0) * sqrt(scaleF) endif enddo - - DATEN(n0) = s + DATEN(n0) = s C update alpha: alpha(n0) = sqrt(e_uncor_mult(n0)**2 $ +e_stat_poisson(n0)**2 @@ -284,20 +276,9 @@ C update alpha: $ +e_stat_const(n0)**2 $ +e_uncor_poisson(n0)**2) $ *daten(n0) - - enddo -C call HF_stop - - -C------------------------------------------------------------ + enddo end - - - -* --------------------------------------------- - -cv Program voica C--------------------------------------------------- C Created by SG, 23 Apr 2008 following C @@ -307,7 +288,7 @@ C !> @param[in] am mean value !> @param[in] as RMS C -C Input: am -- mean value +C Input: am -- mean value C as -- RMS C---------------------------------------------------- function alnorm(am,as) @@ -323,29 +304,29 @@ C---------------------------------------------------- COMMON/SLATE/IS(40) cv am=1 cv as=1 - + C SG: Comment out initialization of the seed, already done in read_data ! Csg call datime(ndate,ntime) Csg ntime = ntime*100+is(6) Csg isrnd = ntime - + Csg call rmarin(isrnd,0,0) cv call rnorml(normrnd1,1) cv call rnorml(normrnd2,1) call ranmar(normrnd1,1) - call ranmar(normrnd2,1) + call ranmar(normrnd2,1) cv r1 = rand() cv r2 = rand() r1 = normrnd1 r2 = normrnd2 - + rr = sqrt(-2*log(r1))*sin(2*pi*r2) stdlog = sqrt(log(1+(as/am)**2 ) ) amlog = log(am) - 0.5 * log(1+(as/am)**2) - + cv stdlog=0.548662 cv amlog =-0.150515 @@ -353,16 +334,16 @@ cv amlog =-0.150515 alnorm = dble(exp(rr)) - + cv print*,'voica gets the lognorml distribution....',alnorm end -c real function logshift(mu,sig,runif) +c real function logshift(mu,sig,runif) C----------------------------------------------------------------------- C- -C- Purpose and Methods: +C- Purpose and Methods: C- C- Inputs : C- Outputs : @@ -372,12 +353,12 @@ C- Created 12-JUN-2008 Voica Radescu C- C----------------------------------------------------------------------- * ------------------------------------------- - real function logshift(mmu,ssig,rrunif) + real function logshift(mmu,ssig,rrunif) * ------------------------------------------- IMPLICIT NONE - + real zeroth, ANS,ex2,runif real mu, sig,x2, mu2, sig2,z1,z2 external zeroth @@ -401,7 +382,7 @@ C----------------------------------------------------------------------- C----------------------------------------------------------------------- C- -C- Purpose and Methods: +C- Purpose and Methods: C- C- Inputs : C- Outputs : @@ -426,7 +407,7 @@ C---------------------------------------------------------------------- COMMON/PARAM/mu,sig,runif -cv transform the formula from mean, std of x to log(x) +cv transform the formula from mean, std of x to log(x) stdlog = sqrt(log(1+(sig/mu)**2 ) ) amlog = log(mu) - 0.5 * log(1+(sig/mu)**2) @@ -458,7 +439,7 @@ C Common from CERNLIB datime: C------------------------------------------- if (iseedmc.ne.0) then C Seed from the steering: - icount = iseedmc + icount = iseedmc else C Seed from current time call datime(ndate,ntime) diff --git a/src/pdf_param.f b/src/pdf_param.f deleted file mode 100644 index 4ebb0bb209f0c589bf6d6f9ccf37f29a7766821d..0000000000000000000000000000000000000000 --- a/src/pdf_param.f +++ /dev/null @@ -1,1616 +0,0 @@ - Subroutine PDF_param_iteration(p,iflag) -C------------------------------------------------------- -C -C Created 5 June 2011. Move PDF parameterisation setting from FCN -C -C Input: p(*) -- input minuit parameters -C iflag -- minuit flag -C -C-------------------------------------------------------- - implicit none - - - double precision p(*) - integer iflag -#include "pdfparam.inc" -#include "steering.inc" -#include "alphas.inc" -#include "thresholds.inc" -#include "extrapars.inc" -#include "polarity.inc" -#include "couplings.inc" - integer i,idx - - double precision fs,rs - double precision fshermes - double precision alphasPDF, StepAlphaS - -C------------------------------------------------------- - integer idxAlphaS !> index for alphas - integer GetParameterIndex !> function to read parameter index -C------------------------------------------------------- - - double precision getParamD - -C------------------------------------------------------- - -C make sure that par values are updated - do i=1,nExtraParam - idx = iExtraParamMinuit( - $ GetParameterIndex(trim(ExtraParamNames(i)))) - ExtraParamValue(i) = p(idx) - enddo - - if(ITheory.ge.100) return - - if (ITheory.eq.11.or.ITheory.eq.35) then - - parphoton(1) = GetParamD('Aph') - parphoton(2) = GetParamD('Bph') - parphoton(3) = GetParamD('Cph') - parphoton(4) = GetParamD('Dph') - parphoton(5) = GetParamD('Eph') - - if (parphoton(1).eq.0) then - if( PDFStyle.ne.'LHAPDF'.and.PDFStyle.ne.'LHAPDFQ0') then - - print *,'Did not find photon parameters' - print *,'Add to parameters.yanl: Aph,Bph,Cph,Dph,Eph' - Call HF_errlog(15052700, - $ 'W: Add to parameters.yaml: Aph,Bph,Cph,Dph,Eph') - endif - endif - else - do i=1,5 - parphoton(i) = 0. - enddo - endif - -C Get from extra pars: - alphas=getParamD('alphas') - - if (alphas.eq.0) then - call hf_errlog(2018031901, - $ 'S: AlphaS is not set or set to zero. Check parameters.yaml') - endif - - idxAlphaS = GetParameterIndex('alphas') - if (idxAlphaS.ne.0) then - StepAlphaS=ExtraParamStep(idxAlphaS) - idxAlphaS = iExtraParamMinuit(idxAlphaS) - else - StepAlphaS = 0.0 - endif - - fstrange=GetParamD('fs') - if (fstrange.eq.0) then - rs = GetParamD('rs') - fstrange=rs/(rs+1) - endif - - if (fstrange.eq.0) then - print *,'Did not find fs nor rs parameter' - print *,'Add to parameters.yaml with the name rs or fs' - Call HF_errlog(13050800, - $ 'S: Add to parameters.yaml with the name rs or fs') - endif -! Temperature - fcharm = GetParamD('fcharm') - temperature = GetParamD('Temperature') - if (temperature.ne.0) then - print *,'Temperature=',Temperature - endif - -! EW parameters - cau_ew=GetParamD('auEW') - cad_ew=GetParamD('adEW') - cvu_ew=GetParamD('vuEW') - cvd_ew=GetParamD('vdEW') - -! Update EWK / QCD parameters: - call update_pars_fortran() - - ! special for polarisation fits: - - shift_polRHp=GetParamD('shiftpolLHp') - shift_polLHp=GetParamD('shiftpolRHp') - shift_polLHm=GetParamD('shiftpolLHm') - shift_polRHm=GetParamD('shiftpolRHm') - shift_polL=GetParamD('shiftpolL') - shift_polT=GetParamD('shiftpolT') - -C In case PDF and alphas needs to be read from LHAPDF (iparam=0, ipdfset=5) -C maybe instead warning message should be issued - - if( PDFStyle.eq.'LHAPDF'.or.PDFStyle.eq.'LHAPDFQ0') then - if (StepAlphaS.eq.0.) then - alphas=alphasPDF(Mz) - Call HF_errlog(13051401, - $ 'W: alphas is fixed and taken from LHAPDF file') - else - Call HF_errlog(13051402, - $ 'W: alphas is free and taken from steering.txt ') - endif - endif - -C Hermes strange prepare: - if (ifsttype.eq.0) then - fs = fstrange - else - fs = fshermes(0.D0) - endif - - Call DecodePara(p) - - -C 25 Jan 2011: Poly params for valence: - if (NPOLYVAL.gt.0) then - Call StorePoly(p,iflag) - endif - -C 22 Apr 2011: CT parameterisation: - if (PDFStyle.eq.'CTEQ'.or.PDFStyle.eq.'CTEQHERA') then - Call DecodeCtPara(p) - ctphoton(1)= GetParamD('Aph') - ctphoton(2)= GetParamD('Bph') - ctphoton(3)= GetParamD('Cph') - ctphoton(4)= GetParamD('Dph') - ctphoton(5)= GetParamD('Eph') - endif - -C 22 Sep 2011: AS parameterisation: - if ((PDFStyle.eq.'AS').or.(PDFStyle.eq.'BiLog')) then - Call DecodeASPara(p) - endif - - - -C -C Chebyshev for the gluon: -C - if (NCHEBGLU.gt.0) then - do i=1,NCHEBGLU - ChebPars(i) = p(30+i) - enddo - endif -C -C Chebyshev param. for the sea: -C - if (NCHEBSea.gt.0) then - do i=1,NCHEBSea -C Offset is now steering parameter (default = 70, params start from 41) - ChebParsSea(i) = p(30+IOFFSETCHEBSEA+i) - enddo - endif - - if (NChebGlu.gt.0 .or. NChebSea.gt.0) then - call ChebToPoly - endif - - end - - -* ------------------------------------------------------- - double precision function flav_number(q) -* ------------------------------------------------------- - - implicit none -#include "thresholds.inc" - - double precision q - - flav_number = 3.d0 - if (q.ge.qc) flav_number = 4.d0 - if (q.ge.qb) flav_number = 5.d0 - - return - end - - - subroutine DecodePara(pars) -C------------------------------------------------------- -C Created 22 Apr 11 by SG. Decode minuit input for CTEQ-like param. -C pars(1-10) - gluon -C pars(11-20) - Uv -C pars(21-30) - Dv -C pars(31-40) - Ubar, U -C pars(41-50) - Dbar, D -C pars(51-60) - sea, delta -C pars(91-100) - others -C------------------------------------------------------ - implicit none -#include "pdfparam.inc" -#include "steering.inc" -#include "for_debug.inc" - double precision pars(*) - integer i,j - logical lfirstt - data lfirstt /.true./ - - double precision fs - double precision fshermes -C--------------------------------------------------------- - if (lfirstt) then - lfirstt = .false. - print *,'DecodePara INFO: First time call' - endif - -C Hermes strange prepare: - if (ifsttype.eq.0) then - fs = fstrange - else - fs = fshermes(0.D0) - endif - -C simple copy first: - do i=1,10 - parglue(i) = pars(i) - paruval(i) = pars(10+i) - pardval(i) = pars(20+i) - parubar(i) = pars(30+i) - pardbar(i) = pars(40+i) - paru(i) = pars(50+i) - pard(i) = pars(60+i) - parsea(i) = pars(70+i) - parstr(i) = pars(80+i) - parother(i) = pars(90+i) - enddo - - if (PDF_DECOMPOSITION.eq.'D_U_Dbar_Ubar') then ! H1PDF2k like - - if (pard(2).eq.0) pard(2)=paru(2) - if (parubar(2).eq.0) parubar(2)=paru(2) - if (pardbar(2).eq.0) pardbar(2)=paru(2) - if (pardbar(1).eq.0) pardbar(1)=pard(1) - if (paru(1).eq.0) parU(1)=pard(1)*(1.D0-fs)/(1.D0-fcharm) - if (parUbar(1).eq.0) parUbar(1)=parU(1) -cv elseif (iparam.eq.2) then -cv if (pardval(2).eq.0) pardval(2)=paruval(2) ! Bud = Buv -cv if (parubar(2).eq.0) parubar(2)=pardbar(2) ! Bubar = Bdbar -cv pardval(2)=paruval(2) -cv parubar(2)=pardbar(2) -cv parUbar(1)=pardbar(1)*(1.D0-fs)/(1.D0-fcharm) - - -!> this style is common to HERAPDF, ATLASPDF: - elseif (index(PDF_DECOMPOSITION,'Dv_Uv_Dbar_Ubar_Str').ne.0) then - - if (pardval(2).eq.0) pardval(2)=paruval(2) ! Bud = Buv - if (parubar(2).eq.0) parubar(2)=pardbar(2) ! Bubar = Bdbar - if (parstr(1).eq.0.and. - $ parstr(2).eq.0.and. - $ parstr(3).eq.0) then - -!> use coupled strange to Dbar - FreeStrange=.false. - - else - FreeStrange=.true. - endif -!> couple Bstr and Cstr to dbar when zero: - if (FreeStrange) then - if (parstr(2).eq.0.and.parstr(3).ne.0) parstr(2)=pardbar(2) - if (parstr(3).eq.0.and.parstr(2).ne.0) parstr(3)=pardbar(3) - - endif - - if (fs.ne.-10000.and.(FreeStrange)) then -!> then use ubar and dbar (not Dbar and Ubar) - parstr(1)=fs/(1.-fs)*pardbar(1) - if (parubar(1).eq.0) parubar(1) = pardbar(1) - else -!> then use Dbar and Ubar - if (parubar(1).eq.0) parubar(1)=pardbar(1)*(1.D0-fs) - $ /(1.D0-fcharm) !then use Ubar=Dbar - endif - - - -c elseif (iparam.eq.3) then ! g,uval,dval,sea as in ZEUS-S 2002 fit -c -c paruval(2)=0.5 -c pardval(2)=0.5 -c parstr(2)=0.5 -c parstr(3)=parsea(3)+2. - - elseif (PDFStyle.eq.'CHEB'.or.PDFStyle.eq.'ZEUS Jet') then - pardval(2)=paruval(2) - - -* dbar-ubar (not Ubar - Dbar), Adel fixed to output of ZEUS-S fit - parstr(1)=0.27 - parstr(2)=0.5 - parstr(3)=parsea(3)+2. - -c elseif (iparam.eq.24) then ! g,uval,dval,sea as in ZEUS-JET fit -c pardval(2)=paruval(2) -c parstr(1)=0.27 -c parstr(2)=0.5 -c parstr(3)=parsea(3)+2. - endif - - if (debug) then - print '(''1uv:'',11F10.4)',(paruval(i),i=1,10) - print '(''1dv:'',11F10.4)',(pardval(i),i=1,10) - print '(''1Ub:'',11F10.4)',(parubar(i),i=1,10) - print '(''1Db:'',11F10.4)',(pardbar(i),i=1,10) - print '(''1GL:'',11F10.4)',(parglue(i),i=1,10) - print '(''1ST:'',11F10.4)',(parstr(i),i=1,10) - if (ITheory.eq.11) then - print '(''1PH:'',11F10.4)',(parphoton(i),i=1,10) - endif - endif - -C--------------------------------------------------------- - end - -C--------------------------------------------------------- - - double precision function para(x,a) -C---------------------------------------------------- -C -C standard-like parameterisation: -C AF = (a*x**b)*(1 - x)**c*(1 + d*x + e*x**2+f*x**3)- -C - (ap*x**bp)*(1-x)**cp -C -C----------------------------------------------------- - implicit none - double precision x,a(1:10) - double precision AF - - AF = a(1)*x**a(2)*(1 - x)**a(3)*(1 + a(4)*x - $ + a(5)*x**2+a(6)*x**3+a(10)*x**0.5)-a(7)*x**a(8)*(1-x)**a(9) - - para = AF - - end - - - - subroutine DecodeCtPara(pars) -C------------------------------------------------------- -C Created 22 Apr 11 by SG. Decode minuit input for CTEQ-like param. -C pars(1-6) - gluon -C pars(11-16) - Uv -C pars(21-26) - Dv -C pars(31-36) - Ubar -C pars(41-46) - Dbar -C pars(81-86) - Str -C pars(95-100) - alphas, fstrange, fcharm -C------------------------------------------------------ - implicit none -#include "pdfparam.inc" -#include "steering.inc" - double precision pars(*) - integer i - logical lfirstt - data lfirstt /.true./ - - double precision fs - double precision fshermes -C--------------------------------------------------------- - if (lfirstt) then - lfirstt = .false. - print *,'DecodeCtPara INFO: First time call' - endif - -C Hermes strange prepare: - if (ifsttype.eq.0) then - fs = fstrange - else - fs = fshermes(0.D0) - endif - -C simple copy first: - do i=1,9 - ctglue(i) = pars(i) - ctuval(i) = pars(10+i) - ctdval(i) = pars(20+i) - ctubar(i) = pars(30+i) - ctdbar(i) = pars(40+i) -cv add str - ctstr(i) = pars(80+i) - ctother(i)= pars(90+i) - enddo - -c UF = a(1)*exp(a(4)*x)*(1 - x)**a(3)*x**(a(2))*(1 + exp(a(5))*x -c $ + exp(a(6))*x**2) - - -C Extra constrains: - if (ctubar(2).eq.0) ctubar(2) = ctdbar(2) ! Bubar = Bdbar - if (ctuval(2).eq.0) ctuval(2) = ctdval(2) ! Buv = Bdv - -!> use ubar and dbar (not Dbar and Ubar) - ctstr(1)=fs/(1.-fs)*ctdbar(1) - if (ctubar(1).eq.0) ctubar(1) = ctdbar(1) - -!> use coupled strange to Dbar ! - if (ctstr(2).eq.0) ctstr(2)=ctdbar(2) - if (ctstr(3).eq.0) ctstr(3)=ctdbar(3) - if (ctstr(4).eq.0) ctstr(4)=ctdbar(4) - - -C (other constraints from sum-rules) - - -C--------------------------------------------------------- - end - - double precision function ctpara(x,a) -C---------------------------------------------------- -C -C cteq-like parameterisation: -C UF = a0*E**(a3*x)*(1 - x)**a2*x**(a1 + n)*(1 + E**a4*x + E**a5*x**2) -C -C----------------------------------------------------- - implicit none - double precision x,a(1:9) - double precision UF - UF = a(1)*exp(a(4)*x)*(1 - x)**a(3)*x**(a(2))*(1 + exp(a(5))*x - $ + exp(a(6))*x**2)-a(7)*x**a(8)*(1-x)**a(9) - - ctpara = UF - - end - - double precision function ctherapara(x,a) -C---------------------------------------------------- -C -C hybrid cteq-hera parameterisation: -c UF = (A*x**B)*(1 - x)**C * exp(A4*x) * (1 + D*x + E*x**2) -c -AP*x**BP*(1-x)**CP -C -C----------------------------------------------------- - implicit none - double precision x,a(1:9) - double precision UF - UF = a(1)*(1 - x)**a(3)*x**(a(2))*exp(a(6)*x)*(1 + a(4)*x - $ + a(5)*x**2)-a(7)*x**a(8)*(1-x)**a(9) - - ctherapara = UF - - end - - subroutine DecodeASPara(pars) -C------------------------------------------------------- -C Created 20 Jul 11 by VR. Decode minuit input for AS param. -C pars(21-25) - Uv -C pars(32-35) - Dv -C pars(43-45) - Ubar -C pars(51-55) - Dbar -C pars(1-5) - gluon -C------------------------------------------------------ - implicit none -#include "pdfparam.inc" -#include "steering.inc" - double precision pars(*) - integer i - logical lfirstt - data lfirstt /.true./ - double precision fs - double precision fshermes -C--------------------------------------------------------- - if (lfirstt) then - lfirstt = .false. - print *,'DecodeASPara INFO: First time call' - endif - - -C Hermes strange prepare: - if (ifsttype.eq.0) then - fs = fstrange - else - fs = fshermes(0.D0) - endif - -C simple copy first: - do i=1,5 - asglue(i) = pars(i) - asuval(i) = pars(10+i) - asdval(i) = pars(20+i) - asubar(i) = pars(30+i) - asdbar(i) = pars(40+i) - asother(i) = pars(94+i) - enddo - -C Extra constrains: - if (pars(31).eq.0) then - asubar(1) = asdbar(1) * (1.D0-fs)/(1.D0-fcharm) ! - asubar(2) = asdbar(2) ! Bubar = Bdbar - asubar(3) = asdbar(3) ! Bubar = Bdbar - endif - -C Impose Buv = Bdv if parameter for Buv = 0. -c if (pars(12).eq.0) then -c asuval(2) = asdval(2) ! Buv = Bdv -c endif - -c print '(''2uv:'',5F10.4)',(asuval(i),i=1,5) -c print '(''2dv:'',5F10.4)',(asdval(i),i=1,5) -c print '(''2Ub:'',5F10.4)',(asubar(i),i=1,5) -c print '(''2Db:'',5F10.4)',(asdbar(i),i=1,5) -c print '(''2GL:'',5F10.4)',(asglue(i),i=1,5) - - - -C--------------------------------------------------------- - end - - - double precision function splogn(x,a) -C---------------------------------------------------- -c Special lognormal function -c -c -c A.Schoening, University Heidelberg, Physikalisches Institut -c Creation: 12.6.2011 -c A1*x**(A2-A3*log(x))*(1-x)**(A4-A5*log(1-x)) -C----------------------------------------------------- - implicit none - double precision x,a(1:5) - double precision splogn1 - - splogn1=0.0d0 - if (x.gt.0.d0.and.x.lt.1.d0) then - splogn1=A(1)*x**(A(2)-A(3)*log(x))* - $ (1.d0-x)**(A(4)-A(5)*log(1.d0-x)) - endif - - - if (abs(splogn1).lt.1d30 .and. abs(splogn1).gt.1d-30) then -c value in allowed range - splogn=splogn1 - else - splogn=0.0d0 - endif - - return - end - - - - - - -* ------------------------------------------------------- - double precision function gluon(x) -* ------------------------------------------------------- -* x *g(x,Q2) - - implicit none -#include "pdfparam.inc" -#include "steering.inc" - double precision x - integer i -C External function: - double precision PolyParam,ctpara,ctherapara,para,splogn -C------------------------------------------------- - - -C 22 Apr 11, SG, Add CTEQ-like - if (PDFStyle.eq.'CTEQ') then - gluon = ctpara(x,ctglue) - return - endif - - if (PDFStyle.eq.'CTEQHERA') then - gluon = ctherapara(x,ctglue) - return - endif - -C 22 Sept 11, VR, Add AS - if ((PDFStyle.eq.'AS').or.(PDFStyle.eq.'BiLog')) then - gluon = splogn(x,asglue) - return - endif - if (nchebglu.eq.0) then - -!> HERAPDF style goes in here: - gluon=para(x,parglue) - else -C -C Use polynomial representation of cheb. -C - gluon = parglue(1) * PolyParam(x,nchebGlu,polyPars,chebxminlog) - if (ichebtypeGlu.eq.0) then -C Do nothing - else if (ichebtypeGlu.eq.1) then - gluon = gluon * (1 - x) ! force PDFs=0 for x=1 - endif - - endif - end - -* ------------------------------------------------------- - double precision function photon(x) -* ------------------------------------------------------- -* x *photon(x,Q2) - - implicit none -#include "pdfparam.inc" -#include "steering.inc" - double precision x - integer i -C External function: - double precision ctpara,ctherapara,para -C------------------------------------------------- - - -C 22 Apr 11, SG, Add CTEQ-like - if (PDFStyle.eq.'CTEQ') then - photon = ctpara(x,ctphoton) - return - elseif (PDFStyle.eq.'CTEQHERA') then - photon = ctherapara(x,ctphoton) - return - else - photon=para(x,parphoton) - - endif - - end - - -C------------------------------------------------- - Subroutine ChebToPoly() -C------------------------------------------------- -C -C Utility to convert chebyshev to standard polynomial expansion. -C - implicit none -#include "pdfparam.inc" -#include "steering.inc" - integer i -C--------------------------------- - if (nchebGlu.gt.0) then - do i=1,nchebmax - polyPars(i) = 0 - enddo - call DCHPWS(nchebGlu,ChebPars,polyPars) - endif - if (nchebSea.gt.0) then - do i=1,nchebmax - polyParsSea(i) = 0 - enddo - call DCHPWS(nchebSea,ChebParsSea,polyParsSea) - endif - end - - -* ------------------------------------------------------- - double precision function H1U(x) -* ------------------------------------------------------- - implicit none -#include "steering.inc" -#include "pdfparam.inc" - double precision x,para - - - - H1U=para(x,paru) - - - return - end - -* ------------------------------------------------------- - double precision function H1D(x) -* ------------------------------------------------------- - implicit none -#include "steering.inc" -#include "pdfparam.inc" - double precision x,para - - - H1D=para(x,pard) - - - return - end - -* ------------------------------------------------------- - double precision function Uval(x) -* ------------------------------------------------------- - implicit none -#include "steering.inc" -#include "pdfparam.inc" - double precision x,x23 - double precision PolyVal,ctpara,ctherapara,para,splogn -C--------------------------------------------------- - -C 22 Apr 11, SG, Add CTEQ-like - if (PDFStyle.eq.'CTEQ') then - UVal = ctpara(x,ctuval) - return - endif - - if (PDFStyle.eq.'CTEQHERA') then - UVal = ctherapara(x,ctuval) - return - endif - -C 22 Sep 11, VR, Add AS - if ((PDFStyle.eq.'AS').or.(PDFStyle.eq.'BiLog')) then - UVal = splogn(x,asuval) - return - endif - -C -C 25 Jan 2011: add polynomial param -C - - if (NPOLYVAL.eq.0) then -!> HERAPDF style goes in here - Uval=para(x,paruval) - else -C -C PDFs are parameterised as a function of x23 = x^{2/3} -C - x23 = x**(2.D0/3.D0) - Uval = paruval(1) * PolyVal(x23,NPOLYVALINT,PolyUval) - endif - - return - end - - -* ------------------------------------------------------- - double precision function Dval(x) -* ------------------------------------------------------- - implicit none -#include "steering.inc" -#include "pdfparam.inc" - double precision x,x23 - double precision PolyVal,ctpara,ctherapara,para,splogn -C-------------------------------------------------------- - -C 22 Apr 11, SG, Add CTEQ-like - if (PDFStyle.eq.'CTEQ') then - DVal = ctpara(x,ctdval) - return - endif - if (PDFStyle.eq.'CTEQHERA') then - DVal = ctherapara(x,ctdval) - return - endif -C 22 Sep 11, VR, Add AS - if ((PDFStyle.eq.'AS').or.(PDFStyle.eq.'BiLog')) then - DVal = splogn(x,asdval) - return - endif - - -C -C 25 Jan 2011: add polynomial param -C - - if (NPOLYVAL.eq.0) then -!> HERAPDF style goes in here - Dval=para(x,pardval) - else -C -C PDFs are parameterised as a function of x23 = x^{2/3} -C - x23 = x**(2.D0/3.D0) - Dval = pardval(1) * PolyVal(x23,NPOLYVALINT,PolyDval) - endif - - return - end - -* ------------------------------------------------------- - double precision function PolyVal(x,NPOLY,Poly) -C----------------------------------------------------- -C 25 Jan 2011 -C -C Evaluate polynomial sum fast -C - implicit none - integer NPOLY - double precision x,Poly(NPOLY) - integer i - double precision sum -C------------------------------------------------ - sum = 0 - do i=NPoly,1,-1 - sum = x*(sum + Poly(i)) - enddo - PolyVal = sum - end - - -* ------------------------------------------------------- - double precision function sea(x) -* ------------------------------------------------------- - implicit none -#include "steering.inc" -#include "pdfparam.inc" - double precision x,Ubar,Dbar -C External function: - double precision PolyParam,para -C-------------------------------------------------- - - if (Index(PDF_DECOMPOSITION,'Dbar_Ubar').gt.0) then - - sea = Ubar(x) + Dbar(x) - elseif (Index(PDF_DECOMPOSITION,'Sea').gt.0) then -! print*,'heeeere' -* warning for iparam = 3 or 4, the sea is 2 * sum (ubar +dbar + sbar + cbar) - - if (nchebSea.eq.0) then - sea=para(x,parsea) - else - sea = PolyParam(x,nchebSea,polyParsSea,chebxminlog) - if (ichebtypeSea.eq.0) then -C Do nothing - else if (ichebtypeSea.eq.1) then - sea = sea * (1 - x) ! force PDFs=0 for x=1 - endif - endif - endif - - return - end -* ------------------------------------------------------- - double precision Function PolyParam(x,ncheb,poly,xminlog) -* ------------------------------------------------------- -C -C SG: Use polynomial representation of cheb. -C - implicit none - integer ncheb - double precision x,poly(ncheb),xminlog - double precision xx,sum - integer i -C------------------------------------------------- - xx = (2.D0*log(x)-xminlog)/(-xminlog) - sum = poly(ncheb) - do i=ncheb-1,1,-1 - sum = sum*xx + poly(i) - enddo - - - PolyParam = sum - end - - -* ------------------------------------------------------- - double precision function dbmub(x) -* ------------------------------------------------------- -* new jf , added to fit a la ZEUS, dbmub = dbar-ubar (not Dbar - Ubar) - implicit none -#include "steering.inc" -#include "pdfparam.inc" - double precision x,para - - - if (PDFStyle.eq.'CHEB'.or.PDFStyle.eq.'ZEUS Jet') then - dbmub=para(x,parstr) - endif - return - end - -* ------------------------------------------------------- - double precision function qstrange (x) -* ------------------------------------------------------- -* new jf , added to fit a la ZEUS, qstrange = 0.1 (i.e. fstrange *.5) * sea - implicit none -#include "steering.inc" -#include "pdfparam.inc" - double precision x,sea,Dbar, para, ctpara,ctherapara -C SG: x-dependent fs: - double precision fs - double precision fshermes -C---------------------------------------------------- - if (ifsttype.eq.0) then - fs = fstrange - else - fs = fshermes(x) - endif - - if (PDFStyle.eq.'CTEQ') then - qstrange = ctpara(x, ctstr) - return - endif - - if (PDFStyle.eq.'CTEQHERA') then - qstrange = ctherapara(x, ctstr) - return - endif - - if (PDFStyle.eq.'CHEB'.or.PDFStyle.eq.'ZEUS Jet') then - qstrange = 0.5 * fs * sea(x) - return - - endif - - if (FreeStrange) then - qstrange = para(x, parstr) - else - qstrange = fs * Dbar(x) - endif - - -c elseif (iparam.eq.222222.or.iparam.eq.222223) then -c qstrange = fs * Dbar(x)/(1-fs) -cv else -cv endif - - return - end - -* ------------------------------------------------------- - double precision function cbar(x) -* ------------------------------------------------------- -* new2 jf - implicit none -#include "steering.inc" -#include "pdfparam.inc" -#include "thresholds.inc" - double precision x,pdf,q2 - dimension pdf(-N_CHARGE_PDF:N_CHARGE_PDF+N_NEUTRAL_PDF) - - double precision sing,flav_number,QPDFXQ,vcplus,vicplus,cplus - integer iflag,iq0,iqc,iqfromq - - if (PDFStyle.eq.'CHEB'.or.PDFStyle.eq.'ZEUS Jet') then - if (x.eq.1) goto 999 - call hf_get_pdfs(x,q2,pdf) -* charm a la ZEUS - if (q0.lt.qc) then - cbar = 0. - else - - cbar=pdf(-4) - endif - - endif - 999 continue - return - end - -* ------------------------------------------------------- - double precision function Ubar(x) -* ------------------------------------------------------- -* new2 jf -* corrected for iparam=2 and iparam = 3 or 4 - implicit none -#include "steering.inc" -#include "pdfparam.inc" -#include "thresholds.inc" - double precision x,sea,dbmub,qstrange,cbar - double precision sing,flav_number,QPDFXQ - integer iflag,iq0,iqb,iqc,iqfromq,jtest - double precision ctpara,ctherapara,para, splogn -C---------------------------------------------- -* new2 jf SPECIAL TEST with dubar - -C 22 Sep 11, VR, Add AS - if ((PDFStyle.eq.'AS').or.(PDFStyle.eq.'BiLog')) then - Ubar = splogn(x,asubar) - return - endif - - -C 22 Apr 11, SG, Add CTEQ-like - if (PDFStyle.eq.'CTEQ') then - Ubar=ctpara(x,ctubar)/(1-fcharm) - return - endif - - if (PDFStyle.eq.'CTEQHERA') then - Ubar=ctherapara(x,ctubar)/(1-fcharm) - return - endif - - - if (PDFStyle.eq.'CHEB'.or.PDFStyle.eq.'ZEUS Jet') then - Ubar = (0.5d0 * sea(x) - dbmub(x) - qstrange (x) + cbar(x))/2.d0 - return - endif - -cv elseif (iparam.eq.222222.or.iparam.eq.222223 -cv $ .or.iparam.eq.2011) then -! Ubar=ubar/(1-fc) --> ubar=Ubar*(1-fc) - - - if (FreeStrange) then - Ubar=para(x,parubar) - else - Ubar=para(x,parubar)/(1-fcharm) - endif - - - - return - end - -* ------------------------------------------------------- - double precision function Dbar(x) -* ------------------------------------------------------- -* new2 jf - implicit none -#include "steering.inc" -#include "pdfparam.inc" - double precision x,sea,Ubar - double precision ctpara,ctherapara,para,splogn -C SG: x-dependent fs: - double precision fs - double precision fshermes -C---------------------------------------------------- - if (ifsttype.eq.0) then - fs = fstrange - else - fs = fshermes(x) - endif -C------------------------------------------------------------ - -C 22 Sep 11, VR, Add AS - - if ((PDFStyle.eq.'AS').or.(PDFStyle.eq.'BiLog')) then - Dbar = splogn(x,asdbar) - return - endif - -C 22 Apr 11, SG, Add CTEQ-like - if (PDFStyle.eq.'CTEQ') then - Dbar=ctpara(x,ctdbar)+ctpara(x,ctstr) - return - endif - - if (PDFStyle.eq.'CTEQHERA') then - Dbar=ctherapara(x,ctdbar)+ctherapara(x,ctstr) - return - endif - - if (PDFStyle.eq.'CHEB'.or.PDFStyle.eq.'ZEUS Jet') then - Dbar = sea(x) * 0.5d0 - Ubar(x) - return - endif - -! elseif(iparam.eq.222222.or.iparam.eq.222223) then -! Dbar=para(x,pardbar)/(1-fstrange) - if (FreeStrange) then - Dbar=para(x,pardbar)+para(x,parstr) - else - Dbar=para(x,pardbar) - endif - - - - end - - -C--------------------------------------------------- - double precision function fshermes(x) -C--------------------------------------------------- -C -C X-dependent strange fraction, inspired by HERMES data -C Created 31 Oct 2009 by SG. -C - implicit none -#include "steering.inc" -#include "pdfparam.inc" - double precision x - double precision hermes_xcent,hermes_xrise - parameter (hermes_xcent= 0.07) - parameter (hermes_xrise=20.0) - logical lfirstlocal - data lfirstlocal/.true./ -C--------------------------------------------------- - if (lfirstlocal) then - lfirstlocal = .false. - print *,'----------------------------------------------------' - print *,' Called FSHERMES. Hermes-inspired strange density ' - print *,' Amp,Xmean,Xslope=',fstrange,hermes_xcent - $ ,hermes_xrise - print *,'----------------------------------------------------' - endif - - if (x.lt.1.0D-8) then - fshermes = fstrange*( - $ 0.5D0*(1.D0+tanh(-(1.0D-8-hermes_xcent)*hermes_xrise))) - else - fshermes = fstrange*( - $ 0.5D0*(1.D0+tanh(-(x-hermes_xcent)*hermes_xrise))) - endif -C print *,'DEBUG:',x,fshermes,fstrange -C--------------------------------------------------- - end - - - - -C--------------------------------------------------- - subroutine StorePoly(p,iflag) -C--------------------------------------------------- -C -C Created 27 Jan 2011 by SG. Transfer parameters from MINUIT array p to -C internal arrays PolyUval and PolyDval -C - implicit none -#include "steering.inc" -#include "pdfparam.inc" - double precision p(*) - integer iflag,i,nn - double precision ptmp(100) - -C----------------------------------- - if (iflag.eq.1) then - print *, - $ 'INFO: USING POLY parameterisation for valence quarks' - print '(''Require PDFs at x=1 to vanish as (1-x)**'',I1)' - $ ,IZPOPOLY - if (IPOLYSQR.eq.1) then - print *, - $ 'Square valence parameterisation, enforce positivity' - else if (IPOLYSQR.eq.0) then - else - print *,'Invalid IPOLYSQR=',IPOLYSQR - print *,'Can be 1 or 0, stop' - call HF_stop - endif - - endif - - if (IPOLYSQR.eq.0) then - Call DecodePoly(p(21),PolyUVal,NPOLYVAL,IZPOPOLY) - Call DecodePoly(p(31),PolyDVal,NPOLYVAL,IZPOPOLY) - NPOLYVALINT = NPOLYVAL+IZPOPOLY - else if (IPOLYSQR.eq.1) then - Call DecodePoly(p(21),Ptmp,NPOLYVAL,IZPOPOLY) - Call SquarePoly(NPOLYVAL+IZPOPOLY,PTmp,NPOLYVALINT,PolyUVal) - Call DecodePoly(p(31),Ptmp,NPOLYVAL,IZPOPOLY) - Call SquarePoly(NPOLYVAL+IZPOPOLY,PTmp,NPOLYVALINT,PolyDVal) - else - print *,'Invalid IPOLYSQR=',IPOLYSQR - print *,'Can be 1 or 0, stop' - call HF_stop - endif - - end - -C--------------------------------------------------- - subroutine DecodePoly(pars,poly,np,iz) -C--------------------------------------------------- -C -C Created 29 Jan 2011. Decode input minuit parameters to internal PolyVal -C arrays for different order of (1-x) -C - implicit none - integer np,iz - double precision pars(*),poly(*) - integer i -C---------------------------------------------- - if (iz.eq.1) then -* \times (1-x) - Poly(1) = pars(1) - do i=2,np - Poly(i) = pars(i)-pars(i-1) - enddo - Poly(np+1) = -pars(np) - elseif (iz.eq.2) then -* \times (1-x)^2 - Poly(1) = pars(1) - Poly(2) = pars(2)-2*pars(1) - do i=3,np - Poly(i) = pars(i)-2*pars(i-1)+pars(i-2) - enddo - Poly(np+1) = -2*pars(np)+pars(np-1) - Poly(np+2) = pars(np) - endif - - end -C--------------------------------------------------- - subroutine squarepoly(Npar,PolyIn,Npar2,PolyOut) -C----------------------------------------- -C -C 1 Feb 2011 by SG. Square a polynom. -C -C----------------------------------------- - implicit none - integer Npar, Npar2 - double precision PolyIn(Npar), PolyOut(*) - integer j,k,i -C-------------------------------------------------- - NPar2 = NPar*2-1 - - do k=1,NPar2 - PolyOut(k) = 0. - do j=1,k - i = k-j+1 - if (i.le.NPar.and.j.le.NPar) then - PolyOut(k) = PolyOut(k) + PolyIn(i)*PolyIn(j) - endif - enddo - enddo - - end - - - - -C----------------------------------------------------- - Subroutine PDFLength(DeltaChi2) -C--------------------------------------------------- -C -C Created 27 June 2009 by SG -C Add extra constraint for the PDF "length" = int_wmin^wmax \sqrt{1+pdf'(W)**2} dw -C - implicit none - -#include "steering.inc" -#include "pdfparam.inc" -#include "pdflength.inc" - double precision DeltaChi2 - double precision pdflen(5) - double precision zero - integer i - integer ngrid - parameter(ngrid=500) - double precision grid(ngrid+1),d0,dst,val,wmin,wmax -C External functions - double precision glulen,sealen - double precision ssdint - external glulen,sealen - logical LFirstIn - data LFirstIn /.true./ -C---------------------------------------------------- -C -C - if (LFirstIn) then - Wmin = WMNLEN - Wmax = WMXLEN - LFirstIn = .false. - print '(''PDFLENGTH INITIALIZATION: Set Wmin,Wmax to '',2F8.2)', - $ Wmin,Wmax - endif - - DeltaChi2 = 0 - - if (pdfLenWeight(1).gt.0) then - pdflen(1) = ssdint(Wmin,glulen,Wmax) - $ -(Wmax-Wmin) - - DeltaChi2 = DeltaChi2 + pdflen(1)*pdfLenWeight(1) - else - pdflen(1) = 0. - endif - - if (pdfLenWeight(2).gt.0) then - pdflen(2) = ssdint(Wmin,sealen,Wmax) - $ -(Wmax-Wmin) - - DeltaChi2 = DeltaChi2 + pdflen(2)*pdfLenWeight(2) - else - pdflen(2) = 0. - endif - - - print *,'Gluon length=',pdflen(1) - print *,'Sea length=',pdflen(2) - - -C--------------------------------------------------------- - end - - -C--------------------------------------------------- - double precision function powerLen(W,a,b,c,d,e,f,ap,bp,cp) -C--------------------------------------------------- -C -C Utility to calculate pdf length element in W for power parameterization. -C - implicit none -#include "steering.inc" - double precision W,a,b,c,d,e,f,q2,x,der,derw,p,ap,bp,cp -C---------------------------------------------------- -C Assume Q2=4 -! Q2 = 4.D0 - Q2 = starting_scale - X = Q2/(Q2 + W*W) - - p = (1.D0+d*x+e*x*x+f*x*x*x) - der = a*x**b*(1.D0-x)**c*p* - $ (b/x - $ - c/(1.0D0-x) - $ + (d+2.D0*e*x+3.D0*f*x*x)/p) - $ -ap*x**bp*(1.D0-x)**cp* - $ (bp/x - $ -cp/(1.0D0-x)) - -C W derrivative: - derw = - der* (2*W*Q2)/((W*W+Q2)*(W*W+Q2)) - - PowerLen = sqrt(1.D0+derw*derw) -C---------------------------------------------------- - end - -C--------------------------------------------------- - double precision function ChebLen(W,ncheb,poly,a,xminlog,iType) -C--------------------------------------------------- -C -C Utility to calculate pdf length element in W for chebyshev parameterization. -C - implicit none -#include "steering.inc" - integer ncheb,iType - double precision W,poly(ncheb),a,xminlog - double precision Q2,X,XX,Sum,der,derw,sum2 - integer i - logical LFirst - data LFirst /.true./ -C------------------------------------------------------ - if (LFirst) then - print *,'First time in ChebLen. IType=',itype - LFirst = .false. - endif -C Assume Q2=4 - Q2 = starting_scale - X = Q2/(Q2 + W*W) -C -C get derrivative: -C - xx = (2.D0*log(x)-xminlog)/(-xminlog) - sum = poly(ncheb)*(ncheb-1) - - do i=ncheb-1,2,-1 - sum = sum*xx + poly(i)*(i-1.D0) - enddo - -C SG: Fix for (1-x) dumping: - if (iType.eq.0) then -C do nothing - else if (iType.eq.1) then -C subract term corresponding to -x: - sum2 = poly(ncheb)*ncheb - do i=ncheb-1,1,-1 - sum2 = sum2*xx + poly(i)*i - enddo - sum = sum - sum2 - endif - - der = sum * a * 2.D0/(-xminlog) / x -C W derrivative: - derw = - der* (2*W*Q2)/((W*W+Q2)*(W*W+Q2)) - - ChebLen = sqrt(1.D0 + derw*derw) -C------------------------------------------------------- - end - - -C--------------------------------------------------- - double precision function glulen(W) -C--------------------------------------------------- - implicit none - double precision W -#include "pdfparam.inc" -#include "steering.inc" -C - double precision PowerLen,ChebLen -C---------------------------------------------------- - - - if (nchebglu.eq.0) then - glulen = powerlen(W,parglue(1) - $ ,parglue(2),parglue(3),parglue(4),parglue(5),parglue(6), - $ parglue(7),parglue(8),parglue(9)) - else - glulen = cheblen(W,nchebGlu,polyPars,parglue(1),chebxminlog, - $ ichebtypeGlu) - endif - end - -C--------------------------------------------------- - double precision function Sealen(W) -C--------------------------------------------------- - implicit none - double precision W -#include "pdfparam.inc" -#include "steering.inc" -C - double precision PowerLen,ChebLen -C---------------------------------------------------- - - - if (nchebsea.eq.0) then - Sealen = powerlen(W,parsea(1),parsea(2) - $ ,parsea(3),parsea(4),parsea(5),parsea(6), - $ parsea(7),parsea(8),parsea(9)) - else - Sealen = cheblen(W,nchebSea,polyParsSea,1.D0,chebxminlog, - $ ichebtypeSea) - - endif - end -C--------------------------------------------------- - subroutine SaveRestorePdfs(imode) -C------------------------------------------------------- -C Leave only the contribution of the valence quarks -C for DGLAP+Dipole model fits. -C This subroutine ius called by subroutine fcn -C------------------------------------------------------- - implicit none - -#include "steering.inc" -#include "pdfparam.inc" - integer imode - - double precision savglue(10) - double precision savsea(10) - double precision savdv(10) - double precision savuv(10) - integer i -C------------------------------ - - if (imode.eq.0) then -C save all - do i=1,10 - savglue(i) = parglue(i) - savsea(i) = parsea(i) - savdv(i) = pardval(i) - savuv(i) = paruval(i) - enddo - elseif (imode.eq.1) then - do i=1,10 -C Restore all: - parglue(i) = savglue(i) - parsea(i) = savsea(i) - pardval(i) = savdv(i) - paruval(i) = savuv(i) - -C For dglap fit, reset gluon and sea, keep valence: - parglue(i) = 0. - parsea(i) = 0. - enddo - elseif (imode.eq.2) then - do i=1,10 -C restore - parglue(i) = savglue(i) - parsea(i) = savsea(i) - pardval(i) = savdv(i) - paruval(i) = savuv(i) - -C For dipole, reset valence: - pardval(i) = 0.0 - paruval(i) = 0.0 - enddo - - elseif (imode.eq.3) then - do i=1,10 -C restore - parglue(i) = savglue(i) - parsea(i) = savsea(i) - pardval(i) = savdv(i) - paruval(i) = savuv(i) - - enddo - endif - - print *,'in save restore',imode,parglue(1),parglue(2),parglue(3) - - end - -C !> Read PDF from a text file. - - double precision function pdf_from_text(x,id) - implicit none -#include "steering.inc" -#include "ntot.inc" -#include "pdfparam.inc" - double precision x - integer id - logical lfirst - save lfirst - data lfirst/.true./ - - integer NXgrid - double precision Q20 - namelist/XGrid/NXgrid, Q20 - - double precision xx(NxgridMax) - double precision xuv(NxgridMax) - double precision xdv(NxgridMax) - double precision xUbar(NxgridMax) - double precision xDbar(NxgridMax) - double precision xg(NxgridMax) - save xx, xuv, xdv, xUbar, xDbar, xg - integer i,ix - - integer ixfrmx - logical XXATIX -C------------------------------------------------------ - if (lfirst) then - lfirst = .false. - open (51,file=LHAPDFSET,status='old',err=91) - read (51,nml=XGrid,err=92,end=93) -C Read the data - read (51,*,err=94,end=95) (xx(i),i=1,NXgrid) ! x -C .... Add check .... - do i=1,NXgrid - ix = ixfrmx(xx(i)) - if (.not. xxatix(xx(i),ix)) then - call hf_errlog(6,'F:Mis-match of the QCDNUM and text - $ file grid in file '//Trim(LHAPDFSET)) - endif - enddo -C ... read the tables ... - read (51,*,err=94,end=95) (xuv(i),i=1,NXgrid) - read (51,*,err=94,end=95) (xdv(i),i=1,NXgrid) - read (51,*,err=94,end=95) (xubar(i),i=1,NXgrid) - read (51,*,err=94,end=95) (xdbar(i),i=1,NXgrid) - read (51,*,err=94,end=95) (xg(i),i=1,NXgrid) - -c print '(4F12.6)',(xx(i),xg(i),xdbar(i),xubar(i),i=1,nxgrid) -c stop - - call hf_errlog(301213,'I:Read PDF data from ' - $ //Trim(LHAPDFSET)) - - close (51) - endif -C------------------------------------------------------ - pdf_from_text = 0.0D0 - -C Get grid point: - ix = ixfrmx(x) - if (id.eq.0) then - pdf_from_text = xg(ix) - elseif (id.eq.1) then - pdf_from_text = xdv(ix) - elseif (id.eq.2) then - pdf_from_text = xuv(ix) - elseif (id.eq.3) then - pdf_from_text = 2*xdbar(ix) * fstrange ! /(1-fstrange) - elseif (id.eq.4) then - pdf_from_text = xubar(ix) - elseif (id.eq.5) then - pdf_from_text = xdbar(ix) ! * 1/(1-fstrange) - elseif (id.eq.6) then - pdf_from_text = 0.d0 - endif - -C print *,ix,xx(ix),x,fstrange -C stop - - - return - 91 call hf_errlog(1,'F:pdf_from_text: Can not open file ' - $ //Trim(LHAPDFSET)) - 92 call hf_errlog(2, - $ 'F:pdf_from_text: Error reading namelist XGrid in ' - $ //Trim(LHAPDFSET)) - 93 call hf_errlog(3, - $ 'F:pdf_from_text: Can not find namelist XGrid in ' - $ //Trim(LHAPDFSET)) - 94 call hf_errlog(4,'F:pdf_from_text: Error reading PDF data in ' - $ //Trim(LHAPDFSET)) - 95 call hf_errlog(5,'F:pdf_from_text: End of file for PDF data in ' - $ //Trim(LHAPDFSET)) - end - -CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC - Subroutine DecodeFractal(pars) - -#include "fractal.inc" -#include "extrapars.inc" - - - integer GetParameterIndex - double precision pars(*) - integer idpx - - write(*,*) ' read parameters for fractal model >>' - -! idpx = GetParameterIndex('frac_1') -! idpx = iExtraParamMinuit(idpx) - f_D0 = pars(1) - f_Q02 = pars(2) - f_D2 = pars(3) - f_D3 = pars(4) - f_D1 = pars(5) - f_R = pars(6) - end - - diff --git a/src/qedevol.f b/src/qedevol.f deleted file mode 100644 index d604fbad5a84620c1fda7d9e84107bd74ef1c7e9..0000000000000000000000000000000000000000 --- a/src/qedevol.f +++ /dev/null @@ -1,701 +0,0 @@ -************************************************************************ -* -* Initialization routine for QEDEVOL -* -************************************************************************ - subroutine qedevol_ini - - implicit double precision (a-h,o-z) - -#include "steering.inc" -#include "alphas.inc" -#include "couplings.inc" -#include "thresholds.inc" - - dimension xf(-6:7) - -C Evolution parameters - data mxord/4/ !maximum order of evolution (QCD+QED) - data iordqcd/2/ !QCD order: 1 - LO, 2 - NLO, 3 - NNLO - data iordqed/1/ !QED order: 0 - no QED, 1 - LO - data aem0/0.00119306554042/,rem20/3.157729D0/ !alphaem/(2*pi) - - common /aem/ aem0,rem20,q2b,q2t - -C ------------------------------------------------------------------ -C Declarations for the nxn evolution toolbox -C ------------------------------------------------------------------ - parameter (nstoru = 1000000) !size of local store - dimension storu(nstoru) !local store - dimension iqlim(2) - - dimension idPiju(28,4),idAiju(5),idAlfa(4) !identifier arrays - dimension idw1(4,4,4),idf1(4),ida1(4,4,4) !identifier arrays - dimension idw2(2,2,4),idf2(2),ida2(2,2,4) - dimension idw3(1,1,4),idf3(1),ida3(1,1,4) - dimension idw4(1,1,4),idf4(1) - dimension idw5(1,1,4),idf5(1) - dimension idw6(1,1,4),idf6(1) - dimension idw7(1,1,4),idf7(1) - dimension idw8(1,1,4),idf8(1) - dimension idw9(1,1,4),idf9(1) - dimension idw10(1,1,4),idf10(1) - dimension itypes(6) !table types - dimension itypes1(6) !table types - dimension start1(4,1000) - dimension start2(2,1000) - dimension start3(1,1000) - dimension start4(1,1000) - dimension start5(1,1000) - dimension start6(1,1000) - dimension start7(1,1000) - dimension start8(1,1000) - dimension start9(1,1000) - dimension start10(1,1000) - - data itypes/6*0/ !initialise types - data itypes1/6*0/ !initialise types - - external AsVal1,AsVal2,AsVal3,AemVal1 - common /qcdqedord/ iordqcd,iordqed - - common /nnevol/ storu,idw1,ida1,idf1,idw2,ida2,idf2, - $idw3,ida3,idf3,idw4,idf4,idw5,idf5,idw6,idf6,idw7,idf7,idw8,idf8, - $idw9,idf9,idw10,idf10 - - lun = 6 !stdout, -6 stdout w/out banner page - -C Order should not exceed that of the nxn weight calculation - if(iordqcd+iordqed.gt.mxord) stop 'Evolution order too large' - -C Set evolution parameters - call setord(iordqcd) - call setint('nopt',444) - alphas = hf_get_alphas(mz*mz) - call setalf(0.1176d0,mz*mz) !input alphas - call grpars(nx, xmi, xma, nq, qmi, qma, iord) - - q0 = starting_scale - -C also add threshold values: - - q2c = qc - q2b = qb - q2t = qt - - iqc = iqfrmq(q2c) !charm threshold - iqb = iqfrmq(q2b) !bottom threshold - iqt = iqfrmq(q2t) !top threshold - iq0 = iqfrmq(q0) !starting scale - -C ------------------------------------------------------------------ -C Do evolution with the nxn evolution toolbox -C ------------------------------------------------------------------ - -C Weight tables - itypes(1) = 5 - itypes(2) = 84 - itypes1(2) = 28 -C Put 14 pdf and 4 alpha tables in the store - itypes(5) = 14 - itypes(6) = 4 - -C isetw is the table set identifier assigned by QCDNUM - call MakeTab(storu,nstoru,itypes,0,0,isetw,nwordsu) - call MakeTab(storu,nstoru,itypes1,0,0,isetw1,nwordsu) - - print *,'+-------------------------------------------------------' - print *,'Starting scale and flavour thresholds indexes: ', - $iq0,iqc,iqb,iqt - print *,'Starting scale and flavour thresholds values: ', - $qfrmiq(iq0),qfrmiq(iqc),qfrmiq(iqb),qfrmiq(iqt) - print *,'--------------------------------------------------------' - -C Calculate evolution weigths - call FilWTqcd(storu,isetw,idpiju,idaiju,iordqcd) - call FilWTqed(storu,isetw1,idpiju,iordqed) - -C Fill tables of alphas and alphaem values - idAlfa(1) = 1000*isetw+601 - idAlfa(2) = 1000*isetw+602 - idAlfa(3) = 1000*isetw+603 - idAlfa(4) = 1000*isetw+604 - - call EvFillA(storu,idAlfa(1),AsVal1) !LO QCD - call EvFillA(storu,idAlfa(2),AsVal2) !NLO QCD - call EvFillA(storu,idAlfa(3),AsVal3) !NNLO QCD - call EvFillA(storu,idAlfa(4),AemVal1) !LO QED - -C Setup the identifiers for nxn evolution - ityp = 0 - do i = 1,4 - do j = 1,4 - ityp = ityp+1 - do k = 1,mxord - ida1(i,j,k) = idAlfa(k) - idw1(i,j,k) = idPiju(ityp,k) - enddo - enddo - enddo - - do i = 1,2 - do j = 1,2 - ityp = ityp+1 - do k = 1,mxord - ida2(i,j,k) = idAlfa(k) - idw2(i,j,k) = idPiju(ityp,k) - enddo - enddo - enddo - - do k = 1,mxord - ida3(1,1,k) = idAlfa(k) - idw3(1,1,k) = idPiju(21,k) - idw4(1,1,k) = idPiju(22,k) - idw5(1,1,k) = idPiju(23,k) - idw6(1,1,k) = idPiju(24,k) - idw7(1,1,k) = idPiju(25,k) - idw8(1,1,k) = idPiju(26,k) - idw9(1,1,k) = idPiju(27,k) - idw10(1,1,k) = idPiju(28,k) - enddo - -C PDF table identifiers - idf1(1) = 1000*isetw+501 !Delta_S - idf1(2) = 1000*isetw+502 !Sigma - idf1(3) = 1000*isetw+503 !gluon - idf1(4) = 1000*isetw+504 !photon - idf2(1) = 1000*isetw+505 !Delta_V - idf2(2) = 1000*isetw+506 !V - idf3(1) = 1000*isetw+507 !Delta_ds - idf4(1) = 1000*isetw+508 !Delta_uc - idf5(1) = 1000*isetw+509 !Delta_sb - idf6(1) = 1000*isetw+510 !Delta_ct - idf7(1) = 1000*isetw+511 !V_ds - idf8(1) = 1000*isetw+512 !V_uc - idf9(1) = 1000*isetw+513 !V_sb - idf10(1) = 1000*isetw+514 !V_ct - - do ix = 1,nx - x = xfrmix(ix) - call ExternalSetQEDEVOL(x,q0,xf) - start1(1,ix)=xf(-2)+xf(2)-((xf(-1)+xf(1))+(xf(-3)+xf(3))) - start1(2,ix)=xf(-2)+xf(2)+((xf(-1)+xf(1))+(xf(-3)+xf(3))) - start1(3,ix)=xf(0) - start2(1,ix)=xf(2)-xf(-2)-((xf(1)-xf(-1))+(xf(3)-xf(-3))) - start2(2,ix)=xf(2)-xf(-2)+((xf(1)-xf(-1))+(xf(3)-xf(-3))) - start3(1,ix)=xf(1)+xf(-1)-(xf(3)+xf(-3)) - start7(1,ix)=xf(1)-xf(-1)-(xf(3)-xf(-3)) - start1(4,ix)=xf(7) - enddo - - - - iqlim(1) = iq0 - iqlim(2) = iq0 - nf = 1 - do while (nf.gt.0) - iqlim(1) = iqlim(2) - iqlim(2) = 99999 - call EvDglap(storu,idw1,ida1,idf1,start1,4,4,iqlim,nf,eps) - call EvDglap(storu,idw2,ida2,idf2,start2,2,2,iqlim,nf,eps) - call EvDglap(storu,idw3,ida3,idf3,start3,1,1,iqlim,nf,eps) - call EvDglap(storu,idw7,ida3,idf7,start7,1,1,iqlim,nf,eps) - enddo - - do ix = 1,nx - start4(1,ix) = 0.5D0*EvPdfij(storu,idf1(1),ix,iqc,1) - $ +0.5D0*EvPdfij(storu,idf1(2),ix,iqc,1) - start8(1,ix) = 0.5D0*EvPdfij(storu,idf2(1),ix,iqc,1) - $ +0.5D0*EvPdfij(storu,idf2(2),ix,iqc,1) - start5(1,ix) = -0.25D0*EvPdfij(storu,idf1(1),ix,iqb,1) - $ +0.25D0*EvPdfij(storu,idf1(2),ix,iqb,1) - $ -0.5D0*EvPdfij(storu,idf3(1),ix,iqb,1) - start9(1,ix) = -0.25D0*EvPdfij(storu,idf2(1),ix,iqb,1) - $ +0.25D0*EvPdfij(storu,idf2(2),ix,iqb,1) - $ -0.5D0*EvPdfij(storu,idf7(1),ix,iqb,1) - enddo - - iqlim(1) = iqc - iqlim(2) = iqc - nf = 1 - do while (nf.gt.0) - iqlim(1) = iqlim(2) - iqlim(2) = 99999 - call EvDglap(storu,idw4,ida3,idf4,start4,1,1,iqlim,nf,eps) - call EvDglap(storu,idw8,ida3,idf8,start8,1,1,iqlim,nf,eps) - enddo - - if (iqt.gt.0) then - do ix = 1,nx - start6(1,ix) = +0.25D0*EvPdfij(storu,idf1(1),ix,iqt,1) - $ +0.25D0*EvPdfij(storu,idf1(2),ix,iqt,1) - $ -0.5D0*EvPdfij(storu,idf4(1),ix,iqt,1) - start10(1,ix) = +0.25D0*EvPdfij(storu,idf2(1),ix,iqt,1) - $ +0.25D0*EvPdfij(storu,idf2(2),ix,iqt,1) - $ -0.5D0*EvPdfij(storu,idf8(1),ix,iqt,1) - enddo - endif - - iqlim(1) = iqb - iqlim(2) = iqb - nf = 1 - do while (nf.gt.0) - iqlim(1) = iqlim(2) - iqlim(2) = 99999 - call EvDglap(storu,idw5,ida3,idf5,start5,1,1,iqlim,nf,eps) - call EvDglap(storu,idw9,ida3,idf9,start9,1,1,iqlim,nf,eps) - enddo - - - if (iqt.gt.0) then - iqlim(1) = iqt - iqlim(2) = iqt - nf = 1 - do while (nf.gt.0) - iqlim(1) = iqlim(2) - iqlim(2) = 99999 - call EvDglap(storu,idw6,ida3,idf6,start6,1,1,iqlim,nf,eps) - call EvDglap(storu,idw10,ida3,idf10,start10,1,1,iqlim,nf,eps) - enddo - endif - -c call dumptab(storu,isetw,11,'qcdweights.wt','') -c call dumptab(storu,isetw1,12,'qedweights.wt','') - - return - end - -************************************************************************ -* -* Evolution routine for QEDEVOL -* -************************************************************************ - subroutine qedevol_main - - implicit double precision (a-h,o-z) - -#include "steering.inc" -#include "alphas.inc" -#include "couplings.inc" -#include "thresholds.inc" - - dimension xf(-6:7) - -C evolution parameters - data mxord/4/ !maximum order of evolution - data iordqcd/2/ !QCD order: 1 - LO, 2 - NLO, 3 - NNLO - data iordqed/1/ !QED order: 0 - no QED, 1 - LO - data aem0/0.00119306554042/,rem20/3.157729D0/ !alphaem/(2*pi) - - common /aem/ aem0,rem20,q2b,q2t - -C Pdf output -c data ichk/1/ !yes/no check limits - -C ------------------------------------------------------------------ -C Declarations for the nxn evolution toolbox -C ------------------------------------------------------------------ - parameter (nstoru = 1000000) !size of local store - dimension storu(nstoru) !local store - dimension iqlim(2) - - dimension idPiju(28,4),idAiju(5),idAlfa(4) !identifier arrays - dimension idw1(4,4,4),idf1(4),ida1(4,4,4) !identifier arrays - dimension idw2(2,2,4),idf2(2),ida2(2,2,4) - dimension idw3(1,1,4),idf3(1),ida3(1,1,4) - dimension idw4(1,1,4),idf4(1) - dimension idw5(1,1,4),idf5(1) - dimension idw6(1,1,4),idf6(1) - dimension idw7(1,1,4),idf7(1) - dimension idw8(1,1,4),idf8(1) - dimension idw9(1,1,4),idf9(1) - dimension idw10(1,1,4),idf10(1) - dimension idf(0:13) - dimension itypes(6) !table types - dimension itypes1(6) !table types - dimension start1(4,1000) - dimension start2(2,1000) - dimension start3(1,1000) - dimension start4(1,1000) - dimension start5(1,1000) - dimension start6(1,1000) - dimension start7(1,1000) - dimension start8(1,1000) - dimension start9(1,1000) - dimension start10(1,1000) - dimension def(-6:6,12) - data def / -C-- tb bb cb sb ub db g d u s c b t -C-- -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6.. - + 1.,-1., 1.,-1., 1.,-1., 0.,-1., 1.,-1., 1.,-1., 1., !Delta_S - + 1., 1., 1., 1., 1., 1., 0., 1., 1., 1., 1., 1., 1., !Sigma - +-1., 1.,-1., 1.,-1., 1., 0.,-1., 1.,-1., 1.,-1., 1., !Delta_V - +-1.,-1.,-1.,-1.,-1.,-1., 0., 1., 1., 1., 1., 1., 1., !V - + 0., 0., 0.,-1., 0., 1., 0., 1., 0.,-1., 0., 0., 0., !Delta_ds - + 0., 0., 0., 1., 0.,-1., 0., 1., 0.,-1., 0., 0., 0., !V_ds - + 0., 0.,-1., 0., 1., 0., 0., 0., 1., 0.,-1., 0., 0., !Delta_uc - + 0., 0., 1., 0.,-1., 0., 0., 0., 1., 0.,-1., 0., 0., !V_uc - + 0.,-1., 0., 1., 0., 0., 0., 0., 0., 1., 0.,-1., 0., !Delta_sb - + 0., 1., 0.,-1., 0., 0., 0., 0., 0., 1., 0.,-1., 0., !V_sb - +-1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1., 0.,-1., !Delta_ct - + 1., 0.,-1., 0., 0., 0., 0., 0., 0., 0., 1., 0.,-1. / !V_ct - data itypes/6*0/ !initialise types - data itypes1/6*0/ !initialise types - - external AsVal1,AsVal2,AsVal3,AemVal1 - common /qcdqedord/ iordqcd,iordqed - - common /nnevol/ storu,idw1,ida1,idf1,idw2,ida2,idf2, - $idw3,ida3,idf3,idw4,idf4,idw5,idf5,idw6,idf6,idw7,idf7,idw8,idf8, - $idw9,idf9,idw10,idf10 - - idf(0) = idf1(3) !gluon - idf(1) = idf1(1) !Delta_S - idf(2) = idf1(2) !Sigma - idf(3) = idf2(1) !Delta_V - idf(4) = idf2(2) !V - idf(5) = idf3(1) !Delta_ds - idf(6) = idf7(1) !V_ds - idf(7) = idf4(1) !Delta_uc - idf(8) = idf8(1) !V_uc - idf(9) = idf5(1) !Delta_sb - idf(10) = idf9(1) !V_sb - idf(11) = idf6(1) !Delta_ct - idf(12) = idf10(1) !V_ct - idf(13) = idf1(4) !photon - - lun = 6 !stdout, -6 stdout w/out banner page - -C Order should not exceed that of the nxn weight calculation - if(iordqcd+iordqed.gt.mxord) stop 'Evolution order too large' - -C Set evolution parameters -c call setord(iordqcd) -c call setint('nopt',444) -c alphas = hf_get_alphas(mz*mz) - call grpars(nx, xmi, xma, nq, qmi, qma, iord) -c call setalf(alphas,mz*mz) - q0 = starting_scale - q2c = qc - q2b = qb - q2t = qt - iqc = iqfrmq(q2c) !charm threshold - iqb = iqfrmq(q2b) !bottom threshold - iqt = iqfrmq(q2t) !top threshold - iq0 = iqfrmq(q0) !starting scale - -C ------------------------------------------------------------------ -C Do evolution with the nxn evolution toolbox -C ------------------------------------------------------------------ - -C Weight tables - itypes(1) = 5 - itypes(2) = 84 - itypes1(2) = 28 -C Put 14 pdf and 4 alpha tables in the store - itypes(5) = 14 - itypes(6) = 4 - - - do ix = 1,nx - x = xfrmix(ix) - if (x.ne.0) then - call ExternalSetQEDEVOL(x,q0,xf) - start1(1,ix)=xf(-2)+xf(2)-((xf(-1)+xf(1))+(xf(-3)+xf(3))) - start1(2,ix)=xf(-2)+xf(2)+((xf(-1)+xf(1))+(xf(-3)+xf(3))) - start1(3,ix)=xf(0) - start2(1,ix)=xf(2)-xf(-2)-((xf(1)-xf(-1))+(xf(3)-xf(-3))) - start2(2,ix)=xf(2)-xf(-2)+((xf(1)-xf(-1))+(xf(3)-xf(-3))) - start3(1,ix)=xf(1)+xf(-1)-(xf(3)+xf(-3)) - start7(1,ix)=xf(1)-xf(-1)-(xf(3)-xf(-3)) - start1(4,ix)=xf(7) - endif - enddo - - iqlim(1) = iq0 - iqlim(2) = iq0 - nf = 1 - do while (nf.gt.0) - iqlim(1) = iqlim(2) - iqlim(2) = 99999 - call EvDglap(storu,idw1,ida1,idf1,start1,4,4,iqlim,nf,eps) - call EvDglap(storu,idw2,ida2,idf2,start2,2,2,iqlim,nf,eps) - call EvDglap(storu,idw3,ida3,idf3,start3,1,1,iqlim,nf,eps) - call EvDglap(storu,idw7,ida3,idf7,start7,1,1,iqlim,nf,eps) - enddo - - - do ix = 1,nx - start4(1,ix) = 0.5D0*EvPdfij(storu,idf1(1),ix,iqc,1) - $ +0.5D0*EvPdfij(storu,idf1(2),ix,iqc,1) - start8(1,ix) = 0.5D0*EvPdfij(storu,idf2(1),ix,iqc,1) - $ +0.5D0*EvPdfij(storu,idf2(2),ix,iqc,1) - start5(1,ix) = -0.25D0*EvPdfij(storu,idf1(1),ix,iqb,1) - $ +0.25D0*EvPdfij(storu,idf1(2),ix,iqb,1) - $ -0.5D0*EvPdfij(storu,idf3(1),ix,iqb,1) - start9(1,ix) = -0.25D0*EvPdfij(storu,idf2(1),ix,iqb,1) - $ +0.25D0*EvPdfij(storu,idf2(2),ix,iqb,1) - $ -0.5D0*EvPdfij(storu,idf7(1),ix,iqb,1) - enddo - - iqlim(1) = iqc - iqlim(2) = iqc - nf = 1 - do while (nf.gt.0) - iqlim(1) = iqlim(2) - iqlim(2) = 99999 - call EvDglap(storu,idw4,ida3,idf4,start4,1,1,iqlim,nf,eps) - call EvDglap(storu,idw8,ida3,idf8,start8,1,1,iqlim,nf,eps) - enddo - - - if (iqt.gt.0) then - do ix = 1,nx - start6(1,ix) = +0.25D0*EvPdfij(storu,idf1(1),ix,iqt,1) - $ +0.25D0*EvPdfij(storu,idf1(2),ix,iqt,1) - $ -0.5D0*EvPdfij(storu,idf4(1),ix,iqt,1) - start10(1,ix) = +0.25D0*EvPdfij(storu,idf2(1),ix,iqt,1) - $ +0.25D0*EvPdfij(storu,idf2(2),ix,iqt,1) - $ -0.5D0*EvPdfij(storu,idf8(1),ix,iqt,1) - enddo - endif - - iqlim(1) = iqb - iqlim(2) = iqb - nf = 1 - do while (nf.gt.0) - iqlim(1) = iqlim(2) - iqlim(2) = 99999 - call EvDglap(storu,idw5,ida3,idf5,start5,1,1,iqlim,nf,eps) - call EvDglap(storu,idw9,ida3,idf9,start9,1,1,iqlim,nf,eps) - enddo - - - if (iqt.gt.0) then - iqlim(1) = iqt - iqlim(2) = iqt - nf = 1 - do while (nf.gt.0) - iqlim(1) = iqlim(2) - iqlim(2) = 99999 - call EvDglap(storu,idw6,ida3,idf6,start6,1,1,iqlim,nf,eps) - call EvDglap(storu,idw10,ida3,idf10,start10,1,1,iqlim,nf,eps) - enddo - endif - - call EVPCOPY (storu, idf, def, 1, 8) - - return - end - - Subroutine QEDEVOLsubr(x, qmu2, xf) -C------------------------------------------------------- -C -C External PDF reading for QEDEVOL -C -C-------------------------------------------------------- - implicit double precision (a-h,o-z) -* -#include "steering.inc" -#include "thresholds.inc" - double precision x,qmu2 - double precision xdelta,xsigma,xgluon,xphoton,xdeltav,xv,xdeltads, - $ xdeltauc,xdeltasb,xvds,xvuc,xvsb - dimension xf(-6:7) - - parameter (nstoru = 1000000) !size of local store - dimension storu(nstoru) - dimension idw1(4,4,4),idf1(4),ida1(4,4,4) - dimension idw2(2,2,4),idf2(2),ida2(2,2,4) - dimension idw3(1,1,4),idf3(1),ida3(1,1,4) - dimension idw4(1,1,4),idf4(1) - dimension idw5(1,1,4),idf5(1) - dimension idw6(1,1,4),idf6(1) - dimension idw7(1,1,4),idf7(1) - dimension idw8(1,1,4),idf8(1) - dimension idw9(1,1,4),idf9(1) - dimension idw10(1,1,4),idf10(1) - - common /nnevol/ storu,idw1,ida1,idf1,idw2,ida2,idf2,idw3, - $ida3,idf3,idw4,idf4,idw5,idf5,idw6,idf6,idw7,idf7,idw8,idf8, - $idw9,idf9,idw10,idf10 - - call evtable(storu,idf1(1),x,1,qmu2,1,xdelta,1) - call evtable(storu,idf1(2),x,1,qmu2,1,xsigma,1) - call evtable(storu,idf1(3),x,1,qmu2,1,xgluon,1) - call evtable(storu,idf1(4),x,1,qmu2,1,xphoton,1) - call evtable(storu,idf2(1),x,1,qmu2,1,xdeltav,1) - call evtable(storu,idf2(2),x,1,qmu2,1,xv,1) - call evtable(storu,idf3(1),x,1,qmu2,1,xdeltads,1) - call evtable(storu,idf4(1),x,1,qmu2,1,xdeltauc,1) - call evtable(storu,idf5(1),x,1,qmu2,1,xdeltasb,1) - - - iqt = iqfrmq(qt) !top threshold - - if (iqt.gt.0) then - call evtable(storu,idf6(1),x,1,qmu2,1,xdeltact,1) - endif - call evtable(storu,idf7(1),x,1,qmu2,1,xvds,1) - call evtable(storu,idf8(1),x,1,qmu2,1,xvuc,1) - call evtable(storu,idf9(1),x,1,qmu2,1,xvsb,1) - - - if (iqt.gt.0) then - call evtable(storu,idf10(1),x,1,qmu2,1,xvct,1) - endif - - do i = -6,7 - xf(i) = 0d0 - enddo - - xf(0) = xgluon - xf(7) = xphoton - xf(1) = 0.125d0*(xsigma-xdelta+xv-xdeltav+2.d0*xdeltads+2.d0*xvds) - xf(-1)= 0.125d0*(xsigma-xdelta-xv+xdeltav+2.d0*xdeltads-2.d0*xvds) - xf(2) = 0.25d0*(xsigma+xdelta+xv+xdeltav) - xf(-2) = 0.25d0*(xsigma+xdelta-xv-xdeltav) - xf(3) = 0.125d0*(xsigma-xdelta+xv-xdeltav-2.d0*xdeltads-2.d0*xvds) - xf(-3)= 0.125d0*(xsigma-xdelta-xv+xdeltav-2.d0*xdeltads+2.d0*xvds) - - if (qmu2.gt.hf_mass(1)**2) then - xf(1) = 0.125d0*(xsigma-xdelta+xv-xdeltav+2.d0*xdeltads+2.d0*xvds) - xf(-1)= 0.125d0*(xsigma-xdelta-xv+xdeltav+2.d0*xdeltads-2.d0*xvds) - xf(2) = 0.125d0*(xsigma+xdelta+xv+xdeltav+2.d0*xdeltauc+2.d0*xvuc) - xf(-2) =0.125d0*(xsigma+xdelta-xv-xdeltav+2.d0*xdeltauc-2.d0*xvuc) - xf(3) = 0.125d0*(xsigma-xdelta+xv-xdeltav-2.d0*xdeltads-2.d0*xvds) - xf(-3)= 0.125d0*(xsigma-xdelta-xv+xdeltav-2.d0*xdeltads+2.d0*xvds) - xf(4) = 0.125d0*(xsigma+xdelta+xv+xdeltav-2.d0*xdeltauc-2.d0*xvuc) - xf(-4) =0.125d0*(xsigma+xdelta-xv-xdeltav-2.d0*xdeltauc+2.d0*xvuc) - endif - - if (qmu2.gt.hf_mass(2)**2) then - xf(1) = 1.d0/12.d0*(xsigma-xdelta+xv-xdeltav - $+4.d0*xdeltads+4.d0*xvds+2.d0*xdeltasb+2.d0*xvsb) - xf(-1)= 1.d0/12.d0*(xsigma-xdelta-xv+xdeltav - $+4.d0*xdeltads-4.d0*xvds+2.d0*xdeltasb-2.d0*xvsb) - xf(3) = 1.d0/12.d0*(xsigma-xdelta+xv-xdeltav - $-2.d0*xdeltads-2.d0*xvds+2.d0*xdeltasb+2.d0*xvsb) - xf(-3)= 1.d0/12.d0*(xsigma-xdelta-xv+xdeltav - $-2.d0*xdeltads+2.d0*xvds+2.d0*xdeltasb-2.d0*xvsb) - xf(5) = 1.d0/12.d0*(xsigma-xdelta+xv-xdeltav - $-2.d0*xdeltads-2.d0*xvds-4.d0*xdeltasb-4.d0*xvsb) - xf(-5)= 1.d0/12.d0*(xsigma-xdelta-xv+xdeltav - $-2.d0*xdeltads+2.d0*xvds-4.d0*xdeltasb+4.d0*xvsb) - endif - - if (qmu2.gt.hf_mass(3)**2) then - xf(2) = 1.d0/12.d0*(xsigma+xdelta+xv+xdeltav - $+4.d0*xdeltauc+4.d0*xvuc+2.d0*xdeltact+2.d0*xvct) - xf(-2)= 1.d0/12.d0*(xsigma+xdelta-xv-xdeltav - $+4.d0*xdeltauc-4.d0*xvuc+2.d0*xdeltact-2.d0*xvct) - xf(4) = 1.d0/12.d0*(xsigma+xdelta+xv+xdeltav - $-2.d0*xdeltauc-2.d0*xvuc+2.d0*xdeltact+2.d0*xvct) - xf(-4)= 1.d0/12.d0*(xsigma+xdelta-xv-xdeltav - $-2.d0*xdeltauc+2.d0*xvuc+2.d0*xdeltact-2.d0*xvct) - xf(6) = 1.d0/12.d0*(xsigma+xdelta+xv+xdeltav - $-2.d0*xdeltauc-2.d0*xvuc-4.d0*xdeltact-4.d0*xvct) - xf(-6)= 1.d0/12.d0*(xsigma+xdelta-xv-xdeltav - $-2.d0*xdeltauc+2.d0*xvuc-4.d0*xdeltact+4.d0*xvct) - endif - - return - end - - subroutine ExternalSetQEDEVOL(x,q0,xf) -* - implicit none -#include "steering.inc" -** -* Input Variables -* - double precision x - double precision q0 -** -* Internal Variables -* - integer ipdf - double precision gluon - double precision photon - double precision pdf_from_text - double precision qstrange,Ubar,Dbar,H1U,H1D - double precision sea,dbmub,dval,uval - double precision dfac,ParDumpFactor - parameter(ParDumpFactor=1.d-3) -** -* Output Variables -* - double precision xf(-6:7) -* -* Set PDFs to zero -* - do ipdf=-6,7 - xf(ipdf) = 0d0 - enddo - if(x.gt.1d0) x = 1d0 -c print *,photon(x) -* -* Construct PDFs addording to the PDF decomposition -* - - if(PDF_DECOMPOSITION.eq.'LHAPDF')then -c q0 = sqrt(starting_scale) - call evolvePDF(x, q0, xf) - - elseif(PDF_DECOMPOSITION.eq.'QCDNUM_GRID')then - xf(-3) = ( pdf_from_text(x,3) - pdf_from_text(x,6) ) / 2d0 - xf(-2) = pdf_from_text(x,4) - xf(-1) = pdf_from_text(x,5) - xf(0) = pdf_from_text(x,0) - xf(1) = pdf_from_text(x,1) - pdf_from_text(x,5) - xf(2) = pdf_from_text(x,2) - pdf_from_text(x,4) - xf(3) = ( pdf_from_text(x,3) + pdf_from_text(x,6) ) / 2d0 - - elseif(Index(PDF_DECOMPOSITION,'D_U_Dbar_Ubar').gt.0)then ! D,U,Dbar,Ubar. - xf(-3) = qstrange(x) - xf(-2) = Ubar(x) - xf(-1) = Dbar(x) - xf(0) = gluon(x) - xf(7) = photon(x) - xf(1) = H1D(x) - xf(-3) - xf(2) = H1U(x) - xf(3) = xf(-3) - - elseif(Index(PDF_DECOMPOSITION,'Sea').gt.0)then - xf(-2) = sea(x) / 4d0 - dbmub(x) / 2d0 - xf(-1) = sea(x) / 4d0 + dbmub(x) / 2d0 - xf(0) = gluon(x) - xf(7) = photon(x) - xf(1) = dval(x) + xf(-1) - xf(2) = uval(x) + xf(-2) - - elseif(PDF_DECOMPOSITION.eq.'Diffractive')then - dfac = dexp(-ParDumpFactor/(1.00001d0-x)) -* - xf(-3) = dfac * Uval(x) - xf(-2) = xf(-3) - xf(-1) = xf(-3) - xf(0) = dfac * gluon(x) - xf(1) = xf(-3) - xf(2) = xf(-3) - xf(3) = xf(-3) - - elseif(Index(PDF_DECOMPOSITION,'Dbar_Ubar').gt.0)then - xf(-3) = qstrange(x) - xf(-2) = ubar(x) - xf(-1) = dbar(x) - xf(-3) - xf(0) = gluon(x) - xf(7) = photon(x) - xf(1) = dval(x) + xf(-1) - xf(2) = uval(x) + xf(-2) - xf(3) = xf(-3) - - else - print *,'Unknown PDF Decomposition: '//PDF_DECOMPOSITION - print *,'Stop in evolution' - call HF_Stop - endif -* - return - end diff --git a/src/read_data.f b/src/read_data.f index e72be26a5c0c7d9e09eb9f83a45c22018bc07cd9..3af5056c6a1c4aba8de0942f597bc7a11b4edb61 100644 --- a/src/read_data.f +++ b/src/read_data.f @@ -218,6 +218,7 @@ C------------------------------------------------------------------------ #include "systematics.inc" #include "theorexpr.inc" #include "scales.inc" +#include "for_debug.inc" character *(*) CFile C Namelist variables: @@ -257,7 +258,7 @@ C Reference table integer IndexDataset double precision SystScales(nsystMax) C Extra info about k-factors, applegrid file(s): - character*1000 TheoryInfoFile(NKFactMax) + character*1000 TheoryInfoFile(NKFactMax) !Is this used anymore? --Ivan character*80 TheoryType(2) character*80 KFactorNames(NKFactMax) integer NKFactor @@ -389,8 +390,10 @@ C Reset scales to 1.0 open(51,file=CFile,status='old',err=99) - print *,'Reading data file ...' - print *,CFile + if(DEBUG)then + print *,'Reading data file ...' + print *,CFile + endif read(51,NML=Data,err=98) PlotN = -1 @@ -1061,15 +1064,14 @@ c endif close (53) endif - - print '(''Read'',i8,'' data points for '',A80)',NData,Name - print '(''Printing first'',i5,'' data points'')',min(Ndata,5) - print '(20A14)',(BinName(i),i=1,NBinDimension),' sigma' - - do j=1,min(NData,5) - print '(20E14.4)',(Allbins(i,j),i=1,NBinDimension),XSections(j) - - enddo + if(DEBUG)then + print '(''Read'',i8,'' data points for '',A80)',NData,Name + print '(''Printing first'',i5,'' data points'')',min(Ndata,5) + print '(20A14)',(BinName(i),i=1,NBinDimension),' sigma' + do j=1,min(NData,5) + print '(20E14.4)',(Allbins(i,j),i=1,NBinDimension),XSections(j) + enddo + endif return 97 continue diff --git a/src/read_steer.f b/src/read_steer.f index 6d5861f20b736f1ca47eb0ef51578f67f0708d78..c184fbc5b4836dca19869f4766e27b21f2bffd5a 100644 --- a/src/read_steer.f +++ b/src/read_steer.f @@ -1,5 +1,5 @@ C--------------------------------------------------- -C +C !> Read steering file steering.txt C C--------------------------------------------------- @@ -21,7 +21,7 @@ C Read various namelists: call read_hfitternml ! main steering FIRST call read_systematicsnml ! Read (optional) systematics namelist SECOND -C Special branch for rotation +C Special branch for rotation if (pdfrotate) then call read_theoryfilesNML call rediagonalize(NPoints,NSys) @@ -30,31 +30,20 @@ C Special branch for rotation call read_infilesnml ! Read data file names THIRD call read_outputnml ! output options - call read_outdirnml ! output dir + call read_outdirnml ! output dir - if(Itheory.lt.100) then - call read_lhapdfnml ! read lhapdf - call read_chi2scan ! read chi2scan -C -C Decode PDF type: -C - call SetPDFType -C -C Decode PDF style: -C - call SetPDFStyle - endif ! Itheory < 100 +! if(Itheory.lt.100) then + call read_lhapdfnml ! read lhapdf +! call read_chi2scan ! read chi2scan +! endif ! Itheory < 100 - call read_sumrules call read_mcerrorsnml ! MC uncertainties - call read_chebnml ! chebyshev parameterisation extra pars - call read_polynml call read_hqscalesnml ! read HQ scales if (itheory.ge.100) then call read_ccfmfilesnml endif - + call Read_InCorrNml ! Covariance matrix call read_scalesnml ! Read scales namelist c WS 2013-01-07 always read CSOffsetNML @@ -64,16 +53,6 @@ c WS 2013-01-07 always read CSOffsetNML C 30/08/2015 KK - Twist analyses call read_HigherTwists - - if(Itheory.lt.100) then -C -C Also read extra minuit parameters: -C - call readextraparam - endif ! Itheory > 100 - - - C 09/01/2013 Check consistency of the input call CheckInputs @@ -91,7 +70,6 @@ C ------------------------------------------- #include "steering.inc" #include "couplings.inc" #include "pdflength.inc" -#include "pdfparam.inc" #include "datasets.inc" #include "systematics.inc" #include "scales.inc" @@ -116,11 +94,6 @@ C------------------------------------------------------ iDH_MOD = 0 ! no Dieter Heidt modifications to stat. errros. - PDFStyle = 'HERAPDF' - PDFType = 'proton' - uvalSum = 2D0 - dvalSum = 1D0 - H1QCDFUNC= .False. C================================================= @@ -132,18 +105,6 @@ C PDF length weight factor: do i=1,5 pdfLenWeight(i) = 0. enddo -C Chebyshev param. of the gluon: - NCHEBGLU = 0 - -C Chebyshev param. of the Sea: - NCHEBSEA = 0 - -C Offset for the Sea chebyshev parameters (default:20) - IOFFSETCHEBSEA = 20 - -C Type of Chebyshev parameterization: - ichebtypeGlu = 0 - ichebtypeSea = 0 Chi2MaxError = 1.E10 ! turn off. @@ -163,25 +124,24 @@ C Add option to change Z of valence PDFs at x=1 from (1-x) to (1-x)^2 C Square polynom before calculating dv,uv. This forces positivity IPOLYSQR = 0 -C Key for W range +C Key for W range WMNlen = 20. WMXlen = 320. - chebxmin = 1.E-5 - C Hermes-like strange (off by default): ifsttype = 0 C Cache PDF calls - CachePDFs = .false. +! are currently broken +! CachePDFs = .false. ! Do not split the data into fit and control sub-samples: ControlFitSplit = .false. -C Fast applgrid: +C Fast applgrid: LFastAPPLGRID = .false. LUseAPPLgridCKM = .true. -* +* C MC Errors defaults: lRAND = .false. lRandData = .true. @@ -190,10 +150,6 @@ C MC Errors defaults: SYSTYPE = 1 C PDF output options: - -c 2012-11-08 WS: set default for DoBands - DoBands = .false. - DoBandsSym = .false. outnx = 101 do i=1,NBANDS Q2VAL(i) = -1. @@ -233,12 +189,12 @@ C Check variables for common blocks: C ============================================= C -!> Read the main steering namelisit +!> Read the main steering namelist C---------------------------------------------- - subroutine Read_HFitternml + subroutine Read_HFitternml implicit none - + #include "ntot.inc" #include "datasets.inc" #include "steering.inc" @@ -252,27 +208,27 @@ C----------------------------------------------- character*32 Chi2SettingsName(5) character*32 Chi2Settings(5) character*32 Chi2ExtraParam(8) - - real*8 Q02 ! Starting scale - integer IOrder ! Evolution order - character*8 Order ! +!Starting scale and order were moved to YAML since 2.2.0 +! real*8 Q02 ! Starting scale +! integer IOrder ! Evolution order +! character*8 Order ! character*16 TheoryType integer i - + C Main steering parameters namelist namelist/xFitter/ - $ ITheory, IOrder, ! keep for backward compatibility - $ Q02, HF_SCHEME, PDFStyle, PDFType, + $ ITheory, ! keep for backward compatibility + $ HF_SCHEME, $ LDebug, ifsttype, LFastAPPLGRID, LUseAPPLgridCKM, - $ Chi2MaxError, EWFIT, iDH_MOD, H1qcdfunc, CachePDFs, - $ ControlFitSplit,Order,TheoryType, + $ Chi2MaxError, EWFIT, iDH_MOD, H1qcdfunc, + $ ControlFitSplit,TheoryType, $ Chi2SettingsName, Chi2Settings, Chi2ExtraParam, $ AsymErrorsIterations, pdfRotate, RunningMode C-------------------------------------------------------------- C Some defaults - Order = ' ' +! Order = ' ' TheoryType = ' ' RunningMode = ' ' HF_SCHEME = 'ZMVFNS' @@ -286,16 +242,9 @@ C Read the main xFitter namelist: C open (51,file='steering.txt',status='old') read (51,NML=xFitter,END=141,ERR=42) - close (51) - - - goto 142 141 continue close (51) - 142 continue - -C if (AsymErrorsIterations .gt. 0) then call hf_errlog(13080601,'I: Use asymmetric uncertainties') else @@ -318,35 +267,27 @@ C Decode Running Mode: else if ( RunningMode .eq. 'PDF Rotate') then scan = .false. lhapdferrors = .false. - pdfrotate = .true. - else if ( RunningMode .eq. 'LHAPDF Analysis') then - scan = .false. - lhapdferrors = .true. - if ( index(pdfstyle,'LHAPDF').eq.0) then - Call hf_errlog(15072203,'I: Set LHAPDF Style.') - PDFSTYLE = 'LHAPDF' - endif - pdfrotate = .false. + pdfrotate = .true. else if ( RunningMode .eq. 'Chi2 Scan') then scan = .true. lhapdferrors = .false. - pdfrotate = .false. + pdfrotate = .false. else call hf_errlog(15072202,'F:Running mode unknonw value: '// $ trim(RunningMode)) endif C Decode computation order: - if (Order.ne.' ') then - Call DecodeOrder(Order) - else - I_FIT_ORDER = IOrder - endif - +! if (Order.ne.' ') then +! Call DecodeOrder(Order) +! else +! I_FIT_ORDER = IOrder +! endif + C Decode theory type: if (TheoryType.ne.' ') then Call DecodeTheoryType(TheoryType) - endif + endif C set debug flag used elsewhere according to steering Debug = lDebug @@ -354,17 +295,17 @@ C C Decode Chi2 style: C - call SetChi2Style(Chi2SettingsName, Chi2Settings, + call SetChi2Style(Chi2SettingsName, Chi2Settings, $ Chi2ExtraParam) if (itheory.lt.100) then C C Decode HFSCHEME: -C +C call SetHFSCHEME endif - starting_scale = Q02 +! starting_scale = Q02 !broken since 2.2.0 if (LDebug) then C Print the namelist: @@ -394,7 +335,7 @@ C------------------------------------------------------- C Namelist for statistical correlations to read namelist/InCorr/NCorrFiles,CorrFileNames C---------------------------------------------------------- -C +C C Read statistical correlations namelist: C open (51,file='steering.txt',status='old') @@ -402,7 +343,7 @@ C print '(''Read '',I4,'' correlation files'')',NCorrFiles 136 continue close (51) - + if (LDebug) then C Print the namelist: print InCorr @@ -439,30 +380,30 @@ C scale for HQ C C Read the HQScale namelist: C - open (51,file='steering.txt',status='old') - read (51,NML=HQScale,ERR=70,end=69) + open (51,file='steering.txt',status='old') + read (51,NML=HQScale,ERR=70,end=69) 69 continue close (51) C C asign mc or mb to hq scale -C +C call SetMHSCALE(MassHQ) aq2 = 1/scalea1 bq2 = -4*scaleb1/scalea1 - hqscale1in = scalea1 - hqscale2in = scaleb1 + hqscale1in = scalea1 + hqscale2in = scaleb1 if(mod(HFSCHEME,10).eq.1) then if(massh.eq.1) then - print*,'factorisation scale for heavy quarks is set to sqrt(', hqscale1in,'*Q^2 + ',hqscale2in , '* 4m_c^2 )' + print*,'factorisation scale for heavy quarks is set to sqrt(', hqscale1in,'*Q^2 + ',hqscale2in , '* 4m_c^2 )' elseif(massh.eq.2) then - print*,'factorisation scale for heavy quarks is set to sqrt(', hqscale1in,'*Q^2 + ',hqscale2in , '* 4m_b^2 )' - endif + print*,'factorisation scale for heavy quarks is set to sqrt(', hqscale1in,'*Q^2 + ',hqscale2in , '* 4m_b^2 )' + endif endif if (LDebug) then C Print the namelist: print HQScale endif - + return 70 continue @@ -479,7 +420,7 @@ C---------------------------------------- #include "steering.inc" C------------------------------------ C (Optional) LHAPDF steering card - namelist/lhapdf/LHAPDFSET,ILHAPDFSET, + namelist/lhapdf/ $ LHAPDFErrors,Scale68,LHAPDFVARSET,NPARVAR, $ WriteAlphaSToMemberPDF,DataToTheo,nremovepriors, $ lhapdfprofile,lhascaleprofile @@ -492,7 +433,7 @@ C LHAPDFErrors default lhapdfprofile = .true. lhascaleprofile = .false. - + Scale68 = .false. NPARVAR = 0 LHAPDFVARSET = '' @@ -513,16 +454,6 @@ C lhapdferrors = lhapdferrors_save endif -C check if the PDFstyle is indeed Ok - if ( RunningMode .eq. 'LHAPDF Analysis') then - if (PDFStyle.ne.'LHAPDF' .and. PDFStyle.ne.'LHAPDFQ0' - $ .and. PDFStyle.ne.'LHAPDFNATIVE') then - call HF_Errlog(12032303, - $ 'W:WARNING: Setting PDF style to LHAPDFQ0') - PDFStyle = 'LHAPDFQ0' - endif - endif - if (LDebug) then C Print the namelist: print lhapdf @@ -587,22 +518,6 @@ C--- end C -!> Read number of valence up and down quarks for sum rules -C------------------------------------------------------- - subroutine read_sumrules - implicit none -#include "pdfparam.inc" - namelist/sumrule_sums/uvalSum,dvalSum - open(51,file='steering.txt',status='old') - read(51,nml=sumrule_sums,ERR=1718,end=1717) - 1717 continue - close(51) - return - 1718 continue - print '(''Error reading namelist &sumrule_sums, STOP'')' - call HF_stop - end -C !> Read MC errors namelist C------------------------------------------------------- subroutine read_mcerrorsnml @@ -610,7 +525,7 @@ C------------------------------------------------------- implicit none #include "steering.inc" C (Optional) MC method namelist - namelist/MCErrors/LRand, ISeeDMC, StaType, SysType, LRandData + namelist/MCErrors/LRand, ISeeDMC, StaType, SysType, LRandData C------------------------------------------------------ C C Read the MC method namelist: @@ -630,82 +545,6 @@ C----------------------------------------------- print '(''Error reading namelist &MCErrors, STOP'')' call HF_stop end - - -C -!> Read optional chebyshev namelist -C-------------------------------------------------------- - subroutine read_chebnml - - implicit none -#include "steering.inc" -#include "pdflength.inc" -#include "pdfparam.inc" -C--------------------------------------------- -C (Optional) Chebyshev namelist - namelist/Cheb/ILENPDF,pdfLenWeight,NCHEBGLU,NCHEBSEA - $ ,IOFFSETCHEBSEA,ichebtypeGlu,ichebtypeSea - $ ,WMNlen,WMXlen, ChebXMin -C------------------------------------------------- -C -C Read the Chebyshev namelist: -C - open (51,file='steering.txt',status='old') - read (51,NML=Cheb,ERR=64,end=63) - - 63 continue - close (51) - - chebxminlog = log(chebxmin) - if (NCHEBGLU.ne.0) then - print *,'Use Chebyshev polynoms for gluon with N=',NCHEBGLU - endif - - if (NCHEBSEA.ne.0) then - print *,'Use Chebyshev polynoms for sea with N=',NCHEBSEA - print *,'Offset for minuit parameters is',IOFFSETCHEBSEA - endif - - if (LDebug) then - print Cheb - endif - - return -C----------------- - 64 continue - print '(''Error reading namelist &Cheb, STOP'')' - call HF_stop - end - - -C -!> Optional polynomial parametrisation for valence quarks -C------------------------------------------------------------- - subroutine read_polynml - - implicit none -#include "steering.inc" -C (Optional) Polynomial parameterisation for valence - namelist/Poly/NPOLYVAL,IZPOPOLY,IPOLYSQR -C------------------------------------------- -C - open (51,file='steering.txt',status='old') - read (51,NML=Poly,ERR=66,end=65) - 65 continue - close (51) - - if (LDebug) then - print Poly - endif - - return -C-------------------------------------------------------- - 66 continue - print '(''Error reading namelist &Poly, STOP'')' - call HF_stop - - end - C !> Read InFiles namelist C------------------------------------------------------- @@ -722,7 +561,7 @@ C Namelist for datafiles to read namelist/InFiles/NInputFiles,InputFileNames character*(80) cMsg - + C reset defaults: NInputFiles = 0 do i = 1,NSET @@ -752,7 +591,7 @@ C Determine how many files to process. First count them: $ ,'' exceeds actual number of files='',i4,'', reset'')') $ NInputFiles, nf call hf_errlog(18030601,cMsg) - $ + $ NInputFiles = nf endif endif @@ -795,7 +634,7 @@ C------------------------------------------------------ implicit none C updf stuff -C Namelist for datafiles +C Namelist for datafiles #include "steering.inc" character*132 CCFMfilename !> Names of input files @@ -816,7 +655,7 @@ C Read the CCFM data file name endif return - + 71 continue print '(''Namelist &CCFMFiles NOT found'')' call HF_stop @@ -914,10 +753,9 @@ C------------------------------------------------ integer i, ilastq2 C Output style namelist - namelist/Output/DoBands, Q2VAL, OutNX, OutXRange, - $ UseGridLHAPDF5, WriteLHAPDF6, - $ WriteLHAPDF5, DoBandsSym - $ ,ReadParsFromFile, ParsFileName, CovFileName + namelist/Output/Q2VAL, OutNX, OutXRange,UseGridLHAPDF5, + $ WriteLHAPDF5,WriteLHAPDF6, + $ ReadParsFromFile, ParsFileName, CovFileName C-------------------------------------------------------- C Read the output namelist: @@ -973,12 +811,7 @@ C 152 continue close (51) - if (DoBands .and. DoBandsSym) then - Call hf_errlog(16042701, - $ 'F: Both DoBands and DoBandsSym are set: chose one') - endif - -C check if limit of 22 char is not exceeded: +C check if limit of 22 char is not exceeded: if(LEN(TRIM(OutDirName)).gt.256) then call hf_errlog(09092013, $ 'F: Name of result directory is too long (max is 256 char) ') @@ -1034,151 +867,13 @@ C check if limit of 22 char is not exceeded: call hf_errlog(250420132, $ 'I: Creating directory to store results: '//TRIM(OutDirName)) CALL system('mkdir -p '//TRIM(OutDirName)) - + return 56 continue print '(''Error reading namelist &OutDir, STOP'')' call HF_stop end - -C--------------------------------------- -C -!> Set PDF parameterisation type -C -C--------------------------------------- - Subroutine SetPDFType() - - implicit none -#include "steering.inc" - - - if (PDFType.eq.'proton'.or. PDFType.eq.'PROTON') then - lead = .false. - deuteron = .false. - print *,'Fitting for PROTON PDFs, PDFType=', PDFType - elseif (PDFType.eq.'lead'.or. PDFType.eq.'LEAD') then - lead = .true. - deuteron = .false. - print *,'Fitting for LEAD PDFs, PDFType=', PDFType - elseif (PDFType.eq.'DEUTERON'.or. PDFType.eq.'deuteron') then - lead = .true. - deuteron = .true. - print *,'Fitting for DEUTERON PDFs, PDFType=', PDFType - else - call hf_errlog(300920131, - $ 'F: Unsupported PDFType used!') - endif - end -C--------------------------------- - - -C--------------------------------------- -C -!> Set PDF parameterisation style -C -C--------------------------------------- - Subroutine SetPDFStyle() - - - implicit none - external CheckForPDF - logical lhapdffile_exists - integer*1 has_photon -#include "steering.inc" -C--------------------------------- - - ! --- FlexibleGluon is used in SumRules - FlexibleGluon = .false. - - if ( - $ PDFStyle.eq. 'HERAPDF'.or. - $ PDFStyle.eq. 'strange') then - FlexibleGluon = .true. - PDF_DECOMPOSITION = 'Dv_Uv_Dbar_Ubar_Str' - - elseif (PDFStyle.eq.'CTEQHERA') then - FlexibleGluon = .true. - PDF_DECOMPOSITION = 'Dv_Uv_Dbar_Ubar_Str' - - elseif (PDFStyle.eq.'CTEQ') then -! FreeStrange=.false. - PDF_DECOMPOSITION = 'Dv_Uv_Dbar_Ubar_Str' - - - elseif ((PDFStyle.eq.'AS').or.(PDFStyle.eq.'BiLog')) then - FreeStrange=.false. - PDF_DECOMPOSITION = 'Dv_Uv_Dbar_Ubar' - - elseif (PDFStyle.eq.'CHEB'.or.PDFStyle.eq.'ZEUS Jet') then - PDF_DECOMPOSITION = 'Dv_Uv_Sea_Delta' - - elseif (PDFStyle.eq.'LHAPDFQ0') then - iparam = 0 - PDF_DECOMPOSITION = 'LHAPDF' - - elseif (PDFStyle.eq.'LHAPDF') then - iparam = 0 - PDF_DECOMPOSITION = 'LHAPDF' - - elseif (PDFStyle.eq.'LHAPDFNATIVE') then - iparam = 0 - PDF_DECOMPOSITION = 'LHAPDF' - - elseif (PDFStyle.eq.'DDIS') then -cv iparam = 301 - PDF_DECOMPOSITION = 'Diffractive' - elseif (PDFStyle.eq.'QCDNUM_GRID') then - PDF_DECOMPOSITION = 'QCDNUM_GRID' - else - print *,'Unsupported PDFStyle =',PDFStyle - print *,'Check value in steering.txt' - call HF_stop - endif - - - if ((PDFStyle.eq.'LHAPDF').or.(PDFStyle.eq.'LHAPDFQ0') - $ .or.(PDFStyle.eq.'LHAPDFNATIVE')) then - - call checkforpdf(LHAPDFSET) - - - INQUIRE(FILE=LHAPDFSET, EXIST=lhapdffile_exists) - if(lhapdffile_exists) then - call InitPDFset(LHAPDFSET) - else - call InitPDFsetByName(LHAPDFSET) - endif - - ! Get number of sets: - call numberPDF(nLHAPDF_Sets) - call InitPDF(ILHAPDFSET) - - ! avoid extra printout from LHAPDF: - call set_verbosity(0) - - if(has_photon().eq.1.) then - ExtraPdfs = .true. - else - ExtraPdfs = .false. - endif - - if(PDFStyle.eq.'LHAPDFQ0'.and.ExtraPdfs) then - call hf_errlog(16060101, - $ 'S: LHAPDFQ0 option cannot be used with QED (photon) PDFs') - endif - - - if(PDFStyle.eq.'LHAPDF'.or.PDFStyle.eq.'LHAPDFNATIVE') then - IPDFSET = 5 - vIPDFSET = IPDFSET - endif - endif - - end - - - C--------------------------------------- C !> Set Heavy Flavour Scheme @@ -1192,13 +887,13 @@ C--------------------------------------- #include "steering.inc" C--------------------------------- - + if (HF_SCHEME.eq.'ZMVFNS') then HFSCHEME = 0 elseif (HF_SCHEME.eq.'ZMVFNS MELA') then HFSCHEME = 6 elseif (HF_SCHEME.eq.'ACOT ZM') then - HFSCHEME = 1 + HFSCHEME = 1 nordAcot=1 elseif (HF_SCHEME.eq.'ACOT ZM +N2LO') then HFSCHEME = 1 @@ -1207,7 +902,7 @@ C--------------------------------- HFSCHEME = 1 nordAcot=3 elseif (HF_SCHEME.eq.'ACOT Full') then - HFSCHEME = 11 + HFSCHEME = 11 nordAcot=1 elseif (HF_SCHEME.eq.'ACOT Full +N2LO') then HFSCHEME = 11 @@ -1216,7 +911,7 @@ C--------------------------------- HFSCHEME = 11 nordAcot=3 elseif (HF_SCHEME.eq.'ACOT Chi') then - HFSCHEME = 111 + HFSCHEME = 111 nordAcot=1 elseif (HF_SCHEME.eq.'ACOT Chi +N2LO') then HFSCHEME = 111 @@ -1227,17 +922,17 @@ C--------------------------------- elseif (HF_SCHEME.eq.'RT') then HFSCHEME = 2 elseif (HF_SCHEME.eq.'RT FAST') then - HFSCHEME = 22 + HFSCHEME = 22 elseif (HF_SCHEME.eq.'RT OPT') then HFSCHEME = 202 elseif (HF_SCHEME.eq.'RT OPT FAST') then - HFSCHEME = 222 + HFSCHEME = 222 elseif (HF_SCHEME.eq.'FF') then - HFSCHEME = 3 + HFSCHEME = 3 elseif (HF_SCHEME.eq.'FF ABM') then - HFSCHEME = 4 + HFSCHEME = 4 elseif (HF_SCHEME.eq.'BMSN ABM') then - HFSCHEME = 44 + HFSCHEME = 44 elseif (HF_SCHEME.eq.'FF ABM RUNM') then HFSCHEME = 444 elseif (HF_SCHEME.eq.'FONLL-A') then @@ -1290,7 +985,7 @@ C--------------------------------------- C--------------------------------- if (MassHQ.eq.'mc') then - MASSH = 1 + MASSH = 1 elseif (MassHQ.eq.'mb') then MASSH = 2 else @@ -1306,17 +1001,17 @@ C--------------------------------- C--------------------------------------- C !> Set Chi2 style -!> @param Chi2SettingsName bias corrections for uncertainties and treatment of systematics in chi2 +!> @param Chi2SettingsName bias corrections for uncertainties and treatment of systematics in chi2 !> @param Chi2Settings values corresponding to each of Chi2SettingsName parameters !> @param Chi2ExtraParam extra corrections in chi2 C--------------------------------------- - Subroutine SetChi2Style(Chi2SettingsName, Chi2Settings, + Subroutine SetChi2Style(Chi2SettingsName, Chi2Settings, $ Chi2ExtraParam) implicit none character*32 Chi2SettingsName(5) character*32 Chi2Settings(5) - character*32 Chi2ExtraParam(8) + character*32 Chi2ExtraParam(8) integer i #include "steering.inc" C--------------------------------- @@ -1324,7 +1019,7 @@ C--------------------------------- if (Chi2SettingsName(1).eq.'undefined') then C C Reset defaults if Chi2SettingsName parameter is not set. -C +C CorrSystByOffset=.false. CorSysScale = 'Linear' StatScale = 'Poisson' @@ -1362,7 +1057,7 @@ C some defaults Chi2FirstIterationRescale = .true. elseif(Chi2ExtraParam(i).eq.'ExtraSystRescale') then Chi2ExtraSystRescale = .true. -c switch on the log poisson correction if ExtraSysRescale was called +c switch on the log poisson correction if ExtraSysRescale was called Chi2PoissonCorr = .true. call HF_errlog(15012601, $ 'I: extra log corr (Poisson) activated with ExtraSystRescale') @@ -1373,69 +1068,7 @@ c switch on the log poisson correction if ExtraSysRescale was called enddo endif end - - -C -!> @brief Read ExtraMinimisationParameters namelists. -!> @details Read as many instances of the namelist as exist. -C------------------------------------- - Subroutine ReadExtraParam - implicit none -#include "extrapars.inc" -#include "alphas.inc" - integer maxExtra - parameter (maxExtra=50) - character*32 name(maxExtra) - double precision Value(maxExtra),Step(maxExtra) - $ ,Min(maxExtra),Max(maxExtra) - $ ,ConstrVal(maxExtra),ConstrUnc(maxExtra) - - namelist/ExtraMinimisationParameters/Name,Value,Step,Min,Max - $ ,ConstrVal,ConstrUnc - integer i - double precision getparamd -C---------------------------------------- - - open (51,file='steering.txt',status='old') -C -C Read as many instances of the namelist as exists: -C - do while (.true.) -C -C Reset names -C - do i=1,maxExtra - name(i) = ' ' - enddo - read (51,NML=ExtraMinimisationParameters,END=71,ERR=72) - - call hf_errlog(18031501, - $ 'W: Reading parameters from'//achar(27) - $ //'[31m obsolete ExtraMinimisationParameters' - $ //' namelist. Consider using parameters.yaml instead' - $ //achar(27)//'[34m') - - do i=1,maxExtra - if (name(i).ne.' ') then - call AddExternalParam(name(i),value(i), step(i), min(i), max(i) - $ ,ConstrVal(i),ConstrUnc(i),.true.,0.0D0) - endif - enddo - enddo - 71 continue - print '(''Got '',i5,'' extra minuit parameters'')',nExtraParam - close (51) -C --- Set value of alphas - alphas = getParamD('alphas') - return - 72 continue - print *,'Problem reading namelist ExtraMinimisationParameters' - call HF_stop -C---------------------------------------- - end - -C !> Add extra fitting parameters !> @param name of extra parameter !> @param value of extra parameter @@ -1445,7 +1078,10 @@ C !> @param construnc uncertainty on constrain in case of fitting !> @param to_gparam send to gParameters or not C----------------------------------------------- - Subroutine AddExternalParam(name, value, step, min, max, +C As far as I understand, currently ALL parameters are treated as extra +C This routine registers a parameter in some array where MINUIT will +C find them (???) + Subroutine AddExternalParam(name, value, step, min, max, $ constrval, construnc, to_gParam $ ,gParam) @@ -1485,7 +1121,7 @@ C C Also add it to c++ map ... if (to_gParam) then - call add_To_Param_Map( gParam, ExtraParamValue(nExtraParam) + call add_To_Param_Map( gParam, ExtraParamValue(nExtraParam) $ , iglobal, ExtraParamNames(nExtraParam)//char(0)) endif @@ -1559,7 +1195,7 @@ C Initialisation: endif enddo - + open (51,file='steering.txt',status='old') read (51,NML=Systematics,END=123,ERR=124) @@ -1623,7 +1259,7 @@ C----------------------------------------- ! --- Initialisation: UsePrevFit = 0 ! Do not use previous fit results CorSysIndex = NSYSMAX+1 ! trick to calculate all offsets in one job - + open (51,file='steering.txt',status='old') read (51,NML=CSOffset,END=123,ERR=124) @@ -1713,10 +1349,10 @@ C------------------------------------------------ end C -!> Check if the systematic source is already on the list. +!> Check if the systematic source is already on the list. !> Takes care of asymmetric errors and : modifier. -C - integer Function SystematicsExist(SourceName) +C + integer Function SystematicsExist(SourceName) implicit none character*(*) SourceName @@ -1727,7 +1363,7 @@ C C---------------------------------------------------------------- SystematicsExist = 0 -C Check for +- signs: +C Check for +- signs: if ( SourceName( len_trim(Sourcename):len_trim(Sourcename)) $ .eq.'+' .or. $ SourceName( len_trim(Sourcename):len_trim(Sourcename)) @@ -1744,12 +1380,12 @@ C Check for : Name = Name(1:i-1) endif - do j=1,NSYS + do j=1,NSYS if ( system(j) .eq. Name ) then SystematicsExist = j Return endif - enddo + enddo C---------------------------------------------------------------- end @@ -1779,7 +1415,7 @@ C !> :T - "theory" (not data), default for theory files !> C -!> @param SName name of added systematic source. +!> @param SName name of added systematic source. C----------------------------------------------------------------------------- Subroutine AddSystematics(SName) @@ -1789,15 +1425,15 @@ C----------------------------------------------------------------------------- character*(*) SName character*64 SourceName - + integer ii,iasym C----------------------------------------- - + SourceName = SName nsys = nsys + 1 if (NSYS.gt.NSysMax) then - print + print $ '(''ReadDataFile Error: exceeding NSysMax'')' print '(''Current NSysMax='',i6)',NSysMax print '(''Increase NSYSMAX_C in include/dimensions.h'')' @@ -1830,7 +1466,7 @@ C ISystType(nsys) = iDataSyst ! Default - do while (ii.gt.0) + do while (ii.gt.0) if ( SourceName(ii+1:ii+1) .eq.'A' ) then SysScalingType(nsys) = isNoRescale Call HF_errlog(12090001, @@ -1848,7 +1484,7 @@ C elseif ( SourceName(ii+1:ii+1) .eq.'E' ) then SysForm(nsys) = isExternal elseif ( SourceName(ii+1:ii+1) .eq.'D' ) then - ISystType(nsys) = iDataSyst + ISystType(nsys) = iDataSyst elseif ( SourceName(ii+1:ii+1) .eq.'T' ) then ISystType(nsys) = iTheorySyst else @@ -1857,7 +1493,7 @@ C Call HF_errlog(12090002, $'W:WARNING: wrong form or bias correction for a systematic source') endif - + SourceName = SourceName(ii+2:) ii = index(SourceName,':') enddo @@ -1887,7 +1523,7 @@ C-------------------------------------------------------- HiTwistSubType = 'lam-sig-x0' open (51,file='steering.txt',status='old') read (51,NML=HighTwist,ERR=134,end=131) - + 131 continue close (51) @@ -1904,34 +1540,10 @@ C----------------- call HF_stop end -!> +!> DEPRECATED !> Check consistency of the data input, abort for unsupported combinations -!> +!> TODO: remove me Subroutine CheckInputs - implicit none -#include "steering.inc" - character*48 CMess -C---------------------------------------------------------- -! if ( I_Fit_order .eq. 1 ) then -! if ( index(HF_SCHEME,'RT').gt.0 ) then -! CMess = 'RT scheme does not support LO evolution' -! goto 998 -! endif -! endif - - - if (LHAPDFErrors) then - if(PDFStyle.ne.'LHAPDF'.and.PDFStyle.ne.'LHAPDFQ0' - $ .and.PDFStyle.ne.'LHAPDFNATIVE') then - call HF_Errlog(03062013, - $ 'W:WARRNING PDFstyle is not LHAPDF, setting PDFErrors to False') - LHAPDFErrors = .false. - endif - endif - return - 998 continue - call HF_ERRLOG(13010901,'F: Inconsistent steering: '//CMess) - call hf_stop end diff --git a/src/store_output.f b/src/store_output.f index c4035277dffdbae481453f1f6a699cbbfe36a606..2f847d705b414ce4cc6d6a6a31f5863095c1e7ab 100644 --- a/src/store_output.f +++ b/src/store_output.f @@ -10,7 +10,6 @@ C-------------------------------------------------------------- implicit none #include "steering.inc" -#include "pdfparam.inc" integer i,ix,idx,iq2,iflag double precision q2,x,gval,sing,umin,dmin @@ -404,26 +403,26 @@ c RP write (fname,'(''output/parsout_'',i1)') ifcn3 endif open (71,file=fname,status='unknown') - - if (DoBands .and. ifcn3.eq.0) then - Allocate(errIterate(MNE,MNE)) - call GetErrMatScaled(errIterate) - endif +! Broken since 2.2.0 +! if (DoBands .and. ifcn3.eq.0) then +! Allocate(errIterate(MNE,MNE)) +! call GetErrMatScaled(errIterate) +! endif do i=1,mne call mnpout(i,parname,val,err,xlo,xhi,ipar) C C For bands, replace by "iterate" estimate, if present -C - if ( Dobands .and. ipar.gt.0 .and. ifcn3.eq.0 ) then - if ( errIterate(ipar,ipar).gt.0 ) then - err = sqrt(errIterate(ipar,ipar)) - val = pkeep(i) - call hf_errlog(1060402016, - $ 'I: Write uncertainties to parsout_0 using Iterate method') - endif - endif +C Broken since 2.2.0 +! if ( Dobands .and. ipar.gt.0 .and. ifcn3.eq.0 ) then +! if ( errIterate(ipar,ipar).gt.0 ) then +! err = sqrt(errIterate(ipar,ipar)) +! val = pkeep(i) +! call hf_errlog(1060402016, +! $ 'I: Write uncertainties to parsout_0 using Iterate method') +! endif +! endif if (Trim(parname).ne.'undefined') then if (xlo.eq.0.and.xhi.eq.0) then @@ -433,10 +432,10 @@ C endif endif enddo - - if (DoBands .and. ifcn3.eq.0) then - deallocate(errIterate) - endif +! Broken since 2.2.0 +! if (DoBands .and. ifcn3.eq.0) then +! deallocate(errIterate) +! endif 72 format (I5,' ','''',A,'''',4F12.6) close(71) @@ -453,7 +452,7 @@ C-------------------------------------------------------------------- implicit none #include "endmini.inc" #include "steering.inc" - integer i,iminCont, kflag + integer i,iminCont double precision aminCont @@ -485,13 +484,14 @@ C------------------------------------------------------------------- print *,' ' ! Dump PDFs for this: - call PDF_param_iteration(pkeep3(1,iminCont),2) !Decode params. +!Broken since 2.2.0 +! call PDF_param_iteration(pkeep3(1,iminCont),2) !Decode params. C C Fix some pars by sum-rules: C - kflag = 0 - call SumRules(kflag) - call Evolution +C call Evolution !I'm commenting this out because it conflicts with +C the new evolution interface, but I do not know why it was here +C --Ivan C ! Ready to store: cv open (76,file='output/lhapdf.block.txt',status='unknown') diff --git a/src/sumrules.f b/src/sumrules.f deleted file mode 100644 index 65ae8521f706e23a16250cf0946926ff7c46946b..0000000000000000000000000000000000000000 --- a/src/sumrules.f +++ /dev/null @@ -1,1553 +0,0 @@ - -* -------------------------------------------------------------- - subroutine SumRules(kflag) -* -------------------------------------------------------------- - - implicit none - -#include "steering.inc" -#include "pdfparam.inc" -#include "for_debug.inc" - - integer kflag - double precision t1,t2,t3,t4,term - double precision t1mt3,t1mt4 - double precision tu,tg,td,tubar,tdbar - double precision tsbar, tcbar,tsmalldb, tsmallub - double precision CalcIntegral,CalcIntegralCheb - - double precision CalcIntXpdf,CalcIntXpdfFixN,CalcIntPdf - - double precision SSDINT - double precision ToInteg - external ToInteg - double precision zero,tgMRST - - double precision btmp,c1,c2 - common/For_Integ/btmp,c1,c2 -*new joel feltesse - double precision tuv,tdv,tub,tdb,tsea,tdel - double precision polyvalint,polyvalint0 - double precision para,x - double precision ubar,dbar,uval,dval,gluon - integer i - - double precision fs - double precision fshermes - double precision tstr,tNoGlue,tPho -*add for mixed CTEQHERA - double precision SumRuleCTEQ, SumRuleCTEQhera -C----------------------------------------- - - kflag=0 - zero = 1d-10 - -C========================================================= -C Nothing to do for LHAPDF or Diffractive: -C - if (PDF_DECOMPOSITION.eq.'LHAPDF' - $ .or. PDF_DECOMPOSITION.eq.'Diffractive' - $ .or. PDF_DECOMPOSITION.eq.'QCDNUM_GRID' ) then - Return - endif - -C========================================================== -C CTEQ-like parameterisation: -C - if (PDFStyle.eq.'CTEQ') then - Call SumRulesCTeq - Return - endif - - if (PDFStyle.eq.'CTEQHERA') then - Call SumRulesCTEQHera - Return - endif - -C 22 Sep 11, VR, Add AS parametrisation - if ((PDFStyle.eq.'AS').or.(PDFStyle.eq.'BiLog')) then - Call SumRulesAS - return - endif - - - -C========================================================== -C Standard parameterisation. -C - -C-------------- -C Valence: - if (Index(PDF_DECOMPOSITION,'Dv_Uv').gt.0) then - -C********************************************************** -C* -- sum rule : D - Dbar = 1 : gives ADval -C* - - if (pardval(1).eq.0) then - pardval(1)=dvalSum/CalcIntPdf(pardval) - else - dv_sum = pardval(1)*CalcIntPdf(pardval) - endif - -C********************************************************** -C* -- sum rule : U - Ubar = 2 : gives AUval -C* - if (paruval(1).eq.0) then - paruval(1)=uvalSum/CalcIntPdf(paruval) - else - uv_sum = paruval(1)*CalcIntPdf(paruval)/2. - endif -C* --TODO: cvalSum sumrule here? - -C Also integrate momenta, for momentum sum rule: - tUv = paruval(1)*CalcIntXpdf(paruval) - tDv = pardval(1)*CalcIntXpdf(pardval) -cv print*,'sumrules......', tuv, tdv - - else - - print *,'Un-implemented valence decomposition '//PDF_DECOMPOSITION - print *,'Stop in sumrules' - call HF_STOP - endif - - -C********************************************************** -C* -- sum rule : x ( gluon + Sigma) = 1 : gives Ag -C* - -C---------------- -C Gluon: - -C Check chebyshev and flexible gluon: - if (nchebglu.eq.0) then - if (FlexibleGluon) then - tg = CalcIntXpdfFixN(parglue,6) - tgMRST=CalcIntegral(parglue(8),parglue(9)) - else - tg = CalcIntXpdf(parglue) - tgMRST=0.0 - endif - else - tg = CalcIntegralCheb(nchebglu, - $ polyPars,chebxminlog, ichebtypeGlu) - endif - -C---------------- -C Sea: - if (Index(PDF_DECOMPOSITION,'Dbar_Ubar').gt.0) then - - tUb = parubar(1)*CalcIntXpdf(parubar) - tDb = pardbar(1)*CalcIntXpdf(pardbar) - if (iTheory.eq.11.or.iTheory.eq.35) then - tPho = parphoton(1)*CalcIntXpdf(parphoton) - else - tPho = 0 !> activate it only when QED is needed - endif - - if (Index(PDF_DECOMPOSITION,'Str').gt.0) then - tStr = parstr(1)*CalcIntXpdf(parstr) - else - tStr = 0 !> Strange already included in Dbar - endif - -C Total sea integral: - tsea = 2.0d0 * (tUb + tDb + tStr) - - elseif (Index(PDF_DECOMPOSITION,'Sea').gt.0) then - - if (nchebsea.eq.0) then - tsea = CalcIntXpdf(parsea) - else - tsea = CalcIntegralCheb(nchebsea, - $ polyParsSea,chebxminlog,ichebtypeSea ) - Parsea(1) = 1.d0 - endif - - else - print *,'Un-implemented sea decomposition '//PDF_DECOMPOSITION - print *,'Stop in sumrules' - call HF_STOP - endif - -C -C 1 - (valence + sea momentum): -C - tNoGlue = 1.D0 - ( tUv + tDv + tSea + tPho) - -C******************************************************************* - - if (tg.le.0) then - tg=0.001 - endif - -C Calculate gluon normalisation, taking into account flexible piece: - if (parglue(1).eq.0) then !> Impose sum rule - parglue(1)=(tNoGlue+parglue(7)*tgMRST)/tg - else - p_sum = parglue(1)*tg + tUv + tDv + tSea - $ + tPho - parglue(7)*tgMRST - endif - -C******************************************************************* - -C propagate the normalizations and other parameters to -C standard parametrisation - -cVR print*,'........................................tphoton', tPho - - if (NCHEBGLU.eq.0) then - if (lprint) then - print '(''uv:'',11F10.4)',(paruval(i),i=1,10) - print '(''dv:'',11F10.4)',(pardval(i),i=1,10) - print '(''Ub:'',11F10.4)',(parubar(i),i=1,10) - print '(''Db:'',11F10.4)',(pardbar(i),i=1,10) - print '(''GL:'',11F10.4)',(parglue(i),i=1,10) - print '(''ST:'',11F10.4)',(parstr(i),i=1,10) - if (iTheory.eq.11.or.iTheory.eq.35) then - print '(''PH:'',11F10.4)',(parphoton(i),i=1,10) - endif - if (uv_sum.ne.0.or. dv_sum.ne.0 .or. p_sum.ne.0) then - print '(''Sum rules, uv, dv, p:'',3F10.4)' - $ ,uv_sum, dv_sum, p_sum - endif - endif - endif - - 999 continue - return - end - - -* -------------------------------------------------------------- - double precision function ToInteg(x) -* -------------------------------------------------------------- - - implicit none - - double precision x - double precision b,c1,c2 - common/For_Integ/b,c1,c2 - double precision xloc - - xloc = x - - ToInteg = xloc**(b-1) * ( (1.-xloc)**c1 - (1.-xloc)**c2) - - return - end - - -* -------------------------------------------------------------- - double precision function CalcIntXpdf(pdfpars) -C--------------------------------------------------------------- -C Calculated \int xpdf(x) dx using the standard PDF -C parameterisation -C--------------------------------------------------------------- - implicit none - double precision pdfpars(10) - integer i - double precision sum - - double precision CalcIntegral -C--------------------------------------------------------------- - sum = CalcIntegral(pdfpars(2),pdfpars(3)) - ! WS 2015-10-08 - ! do i=1,7 - do i=1,3 - if ( pdfpars(3+i).ne.0 ) then - sum = sum + pdfpars(3+i) - $ * CalcIntegral(pdfpars(2)+i,pdfpars(3)) - endif - enddo - -C Also espsilon times sqrt x: - if ( pdfpars(10).ne.0) then - sum = sum + pdfpars(10)*CalcIntegral(pdfpars(2)+0.5,pdfpars(3)) - endif - - CalcIntXpdf = sum -C--------------------------------------------------------------- - end - - -* -------------------------------------------------------------- - double precision function CalcIntPdf(pdfpars) -C--------------------------------------------------------------- -C Calculated \int pdf(x) dx using the standard PDF -C parameterisation -C--------------------------------------------------------------- - implicit none - double precision pdfpars(10) - integer i - double precision sum - - double precision CalcIntegral -C--------------------------------------------------------------- - sum = CalcIntegral(pdfpars(2)-1,pdfpars(3)) - ! WS 2015-10-08 - ! do i=1,7 - do i=1,3 - if ( pdfpars(3+i).ne.0 ) then - sum = sum + pdfpars(3+i) - $ * CalcIntegral(pdfpars(2)+i-1,pdfpars(3)) - endif - enddo - -C Also espsilon times sqrt x: - if ( pdfpars(10).ne.0) then - sum = sum + pdfpars(10)*CalcIntegral(pdfpars(2)-0.5,pdfpars(3)) - endif - - CalcIntPdf = sum -C--------------------------------------------------------------- - end - -* -------------------------------------------------------------- - double precision function CalcIntXpdfFixN(pdfpars,n) -C--------------------------------------------------------------- -C Calculated \int xpdf(x) dx using the standard PDF -C parameterisation. Sum up to N-th term (max N=6) -C--------------------------------------------------------------- - implicit none - double precision pdfpars(10) - integer N -C--- - integer i - double precision sum - - double precision CalcIntegral -C--------------------------------------------------------------- - sum = CalcIntegral(pdfpars(2),pdfpars(3)) - do i=1,N-3 - if ( pdfpars(3+i).ne.0 ) then - sum = sum + pdfpars(3+i) - $ * CalcIntegral(pdfpars(2)+i,pdfpars(3)) - endif - enddo - CalcIntXpdfFixN = sum -C--------------------------------------------------------------- - end - -* -------------------------------------------------------------- - double precision function CalcIntegral(alpha,beta) -* -------------------------------------------------------------- - -* Calculates int_0^1 dx x^(alpha) (1-x)^(beta) -* Requires alpha > -1 and beta > -1 -* Note: DGamma(x) infinite for x above ~ 170 - - implicit none - - double precision alpha,beta - double precision eps,aa,bb,u,v,uv,DGAMMF - - eps = 1d-5 - aa = alpha+1.d0 - bb = beta+1.d0 - if (aa.le.0d0) aa =eps - if (bb.le.0d0) bb =eps - u = DGAMMF(aa) - v = DGAMMF(bb) - uv = DGAMMF(aa+bb) - CalcIntegral = u*v / uv - - return - end - - double precision function CalcIntegralCheb(ncheb,poly,xminlog - $ ,iflag) -C -C Created by SG 15 July 2009. -C -C Modified 30 Oct 2009: add a new argument "iflag" -C -C iflag = 0: default param. -C iflag = 1: default * (1-x) param. -C - implicit none -C -C Keep both simple numerical and analytic formula. -C - integer ncheb - double precision poly(ncheb),xminlog - integer iflag - double precision zero,one,result,xx,temp - external gluon - integer i - double precision PolyParam,chebint,chebint2,chebint2big -C----------------------------------------------------- - if (ncheb.le.15) then - if (iflag.eq.0) then - result = ChebInt(-2.D0/xminlog,poly) - elseif (iflag.eq.1) then - result = ChebInt2(-2.D0/xminlog,poly) - endif - elseif (ncheb.le.30 .and. iflag.eq.1 ) then - result = ChebInt2big(-2.D0/xminlog,poly) - else - zero = 1.0D-6 - one = 1.0D0 - zero - - result = 0. - -C ssdint(zero,gluon,one) - do i=1,100 - xx = i/100.D0 - 1./200D0 - temp = PolyParam(xx,ncheb,Poly,xminlog) - if (iflag.eq.0) then - else if (iflag.eq.1) then - temp = temp * ( 1 - xx ) ! new 30 oct 2009, SG. - endif - result = result + temp - enddo - result = result/100. - - endif - - print *,'Cheb integral=',result,' flag=',iflag - - CalcIntegralCheb = result - -C----------------------------------------------------- - - end - - - double precision function PolyValInt(NPOLY,Poly) -C----------------------------------------------------- -C 25 Jan 2011 -C -C Evaluate integral \int_0^1 dx ( xf(x)/x ) for PolyVal param of xf(x) -C - implicit none - integer NPOLY - double precision Poly(NPOLY) - integer i - double precision sum -C------------------------------------------------ - sum = 0 - do i=NPoly,1,-1 - sum = sum + Poly(i)/i - enddo - -C Jackbian gives factor 3./2. : - - PolyValInt = 3.D0/2.D0 * sum - - end - - - double precision function PolyValInt0(NPOLY,Poly) -C----------------------------------------------------- -C 25 Jan 2011 -C -C Evaluate integral \int_0^1 dx ( xf(x) ) for PolyVal param of xf(x) -C - implicit none - integer NPOLY - double precision Poly(NPOLY) - integer i - double precision sum -C------------------------------------------------ - sum = 0 - do i=NPoly,1,-1 - sum = sum + Poly(i)/(i+1+0.5) - enddo - -C Jackbian gives factor 3./2. : - - PolyValInt0 = 3.D0/2.D0 * sum - - end - - - - double precision function ChebInt(a,c) -C-------------------------------------- -C -C Automaticaly generated by Maple. Up to 15 polynomials -C -C-------------------------------------- - implicit double precision (t) - double precision c(15),a - t1 = c(1) - t2 = c(12) - t3 = a ** 2 - t4 = t3 ** 2 - t5 = t4 * a - t8 = c(2) - t9 = c(5) - t10 = c(4) - t11 = c(3) - t12 = c(7) - t13 = c(6) - t14 = c(8) - t15 = c(15) - t16 = t4 ** 2 - t17 = t16 * a - t20 = c(13) - t21 = t16 * t4 - t24 = t3 * a - t25 = t4 * t24 - t30 = t1 - 0.55440D5 * t2 * t5 + t8 + t9 + t10 + t11 + t12 + t13 + - # t14 + t15 - 0.726485760D9 * t15 * t17 + 0.479001600D9 * t20 * t21 - # - 0.1663200D7 * t2 * t25 + t20 + 0.182D3 * t15 * t3 - t33 = c(10) - t50 = c(14) - t57 = 0.121080960D9 * t15 * t16 + t33 + 0.110D3 * t2 * t3 + 0.3024 - #D4 * t33 * t4 - 0.15120D5 * t33 * t5 + 0.30D2 * t12 * t3 - 0.4D1 * - # t9 * a + t2 - 0.504D3 * t33 * t24 - 0.181440D6 * t33 * t25 - 0.99 - #0D3 * t2 * t24 + t50 + 0.51891840D8 * t50 * t16 + 0.360D3 * t12 * - #t4 - 0.3991680D7 * t20 * t25 - t61 = t16 * t3 - t70 = t16 * t5 - t73 = t4 * t3 - t80 = c(9) - t87 = c(11) - t92 = 0.6227020800D10 * t50 * t21 + 0.1037836800D10 * t50 * t61 - - #0.8648640D7 * t50 * t25 - 0.6D1 * t12 * a - 0.259459200D9 * t50 * - #t17 - 0.8717829120D11 * t15 * t70 + 0.1235520D7 * t50 * t73 - 0.12 - #0D3 * t12 * t24 + 0.720D3 * t12 * t73 + t80 + 0.24D2 * t9 * t4 - 0 - #.120D3 * t13 * t5 - 0.5D1 * t13 * a + 0.151200D6 * t87 * t73 + 0.2 - #0D2 * t13 * t3 - t123 = -0.720D3 * t12 * t5 - 0.2D1 * t11 * a + 0.17160D5 * t50 * t - #4 + 0.90D2 * t87 * t3 + 0.42D2 * t14 * t3 + 0.665280D6 * t20 * t73 - # - 0.154440D6 * t50 * t5 - 0.8D1 * t80 * a + 0.56D2 * t80 * t3 + 0 - #.40320D5 * t80 * t16 - 0.336D3 * t80 * t24 + 0.19958400D8 * t20 * - #t16 + 0.5040D4 * t14 * t73 + 0.1680D4 * t80 * t4 + 0.2162160D7 * t - #15 * t73 - t144 = t16 * t24 - t157 = -0.9D1 * t33 * a - 0.6720D4 * t80 * t5 + 0.20160D5 * t80 * - #t73 - 0.11D2 * t2 * a - 0.2520D4 * t14 * t5 + 0.11880D5 * t20 * t4 - # - 0.10D2 * t87 * a + 0.4358914560D11 * t15 * t21 - 0.240240D6 * t - #15 * t5 - 0.1452971520D11 * t15 * t144 - 0.24D2 * t9 * t24 - 0.210 - #D3 * t14 * t24 + 0.156D3 * t50 * t3 - 0.1716D4 * t50 * t24 - 0.622 - #7020800D10 * t50 * t70 - t188 = 0.12D2 * t9 * t3 - 0.39916800D8 * t2 * t144 - 0.6D1 * t10 * - # t24 + 0.6D1 * t10 * t3 - 0.14D2 * t15 * a - 0.3D1 * t10 * a + 0.6 - #652800D7 * t2 * t16 - 0.95040D5 * t20 * t5 + 0.2D1 * t11 * t3 - 0. - #3628800D7 * t87 * t17 + 0.7920D4 * t2 * t4 + 0.39916800D8 * t2 * t - #61 - 0.5040D4 * t14 * t25 - 0.7D1 * t14 * a - 0.40320D5 * t80 * t2 - #5 - t219 = -0.1D1 * t8 * a - 0.604800D6 * t87 * t25 + 0.24024D5 * t15 - #* t4 - 0.79833600D8 * t20 * t17 - 0.362880D6 * t33 * t17 - 0.12D2 - #* t20 * a + 0.60480D5 * t33 * t73 - 0.1320D4 * t20 * t24 + 0.36288 - #0D6 * t33 * t16 - 0.60D2 * t13 * t24 + 0.120D3 * t13 * t4 + 0.5040 - #D4 * t87 * t4 + 0.239500800D9 * t20 * t61 + t87 + 0.8717829120D11 - #* t15 * t16 * t73 - t250 = -0.479001600D9 * t20 * t144 + 0.3628800D7 * t87 * t61 + 0.3 - #632428800D10 * t15 * t61 + 0.72D2 * t33 * t3 + 0.132D3 * t20 * t3 - #- 0.30240D5 * t87 * t5 - 0.13D2 * t50 * a - 0.17297280D8 * t15 * t - #25 + 0.1814400D7 * t87 * t16 + 0.840D3 * t14 * t4 + 0.332640D6 * t - #2 * t73 - 0.3113510400D10 * t50 * t144 - 0.720D3 * t87 * t24 - 0.2 - #184D4 * t15 * t24 - 0.19958400D8 * t2 * t17 - t253 = t30 + t57 + t92 + t123 + t157 + t188 + t219 + t250 - - ChebInt = t253 - - end - - double precision function chebint2(a,c) - implicit double precision (t) - double precision c(15),a -C -C Output of -C -C with(CodeGeneration); -C Fortran(int((1-x)*sum(c[i]*(a*log(x)+1)^(i-1),i=1..15),x=0..1.0),optimize); -C -C-------------------------- - t1 = a ** 2 - t2 = t1 ** 2 - t3 = t2 ** 2 - t4 = t3 * t2 - t7 = t1 * a - t14 = t2 * a - t15 = t3 * t14 - t20 = t2 * t1 - t23 = t2 * t7 - t40 = 0.6226260666D10 * c(14) * t4 - 0.675D3 * c(11) * t7 + 0.1991 - #941875D8 * c(13) * t3 - 0.1050000000D2 * c(15) * a - 0.8717297026D - #11 * c(15) * t15 + 0.1208444738D9 * c(15) * t3 + 0.7143750000D3 * - #c(7) * t20 - 0.8614856250D7 * c(14) * t23 + 0.1225867500D7 * c(14) - # * t20 + 0.1050000000D2 * c(5) * t1 - 0.9750000000D1 * c(14) * a - - # 0.1520268750D6 * c(14) * t14 + 0.1662375000D5 * c(14) * t2 - 0.22 - #50000000D2 * c(5) * t7 - 0.1656703125D7 * c(12) * t23 - t51 = t3 * t1 - t62 = t3 * a - t73 = -0.5625000000D2 * c(6) * t7 - 0.2976750000D5 * c(11) * t14 + - # 0.3675000000D2 * c(8) * t1 + 0.1592500000D3 * c(15) * t1 - 0.1237 - #500000D4 * c(13) * t7 + 0.3989730938D8 * c(12) * t51 - 0.9D1 * c(1 - #3) * a + 0.2000250000D5 * c(9) * t20 - 0.93555D5 * c(13) * t14 - 0 - #.3976087500D7 * c(13) * t23 - 0.3625256250D6 * c(10) * t62 - 0.112 - #5000000D3 * c(7) * t7 + 0.2325000000D2 * c(5) * t2 + 0.2145268125D - #7 * c(15) * t20 + 0.3627028125D7 * c(11) * t51 - t83 = t3 * t7 - t106 = -0.7975563750D8 * c(13) * t62 - 0.1181250000D3 * c(6) * t14 - # + 0.9625000000D2 * c(12) * t1 + 0.4024125000D5 * c(9) * t3 - 0.39 - #90705469D8 * c(12) * t83 - 0.1452616791D11 * c(15) * t83 - 0.75000 - #00000D1 * c(11) * a + 0.4882500000D4 * c(11) * t2 - 0.5625000000D1 - # * c(4) * t7 + 0.3621712500D6 * c(10) * t3 + 0.7875000000D2 * c(11 - #) * t1 - 0.7257763012D9 * c(15) * t62 - 0.2480625000D4 * c(8) * t1 - #4 - 0.1488375000D5 * c(10) * t14 - 0.4788846562D9 * c(13) * t83 - t130 = 0.2929500000D4 * c(10) * t2 - 0.6D1 * c(9) * a + 0.49D2 * c - #(9) * t1 - 0.1500000000D1 * c(3) * a - 0.9281250000D3 * c(12) * t7 - # - 0.315D3 * c(9) * t7 + 0.5000000000D0 * c(9) + 0.5000000000D0 * - #c(15) - 0.4016250000D5 * c(9) * t23 + 0.5000000000D0 * c(8) + 0.50 - #00000000D0 * c(13) + 0.5000000000D0 * c(10) + 0.5000000000D0 * c(4 - #) + 0.5000000000D0 * c(11) + 0.3630655153D10 * c(15) * t51 - t158 = 0.5000000000D0 * c(6) + 0.5250000000D1 * c(4) * t1 + 0.5000 - #000000D0 * c(14) + 0.5000000000D0 * c(12) - 0.6226640733D10 * c(14 - #) * t15 + 0.3300412500D6 * c(12) * t20 - 0.3625256250D7 * c(11) * - #t62 - 0.1608750000D4 * c(14) * t7 + 0.1162500000D3 * c(6) * t2 + 0 - #.6600825000D6 * c(13) * t20 + 0.5000000000D0 * c(2) - 0.1807312500 - #D6 * c(10) * t23 - 0.1722971250D8 * c(15) * t23 + 0.2327325000D5 * - # c(15) * t2 + 0.5000000000D0 * c(7) - t187 = 0.2625000000D2 * c(7) * t1 - 0.3750000000D1 * c(6) * a - 0. - #4500000000D1 * c(7) * a - 0.5457375000D5 * c(12) * t14 + 0.2393838 - #562D9 * c(13) * t51 - 0.1993890938D8 * c(12) * t62 + 0.1750000000D - #2 * c(6) * t1 + 0.5000000000D0 * c(5) + 0.5000000000D0 * c(3) - 0. - #3112750266D10 * c(14) * t83 + 0.5179048875D8 * c(14) * t3 + 0.4789 - #431281D9 * c(13) * t4 - 0.4725000000D3 * c(10) * t7 - 0.2047500000 - #D4 * c(15) * t7 - 0.2364862500D6 * c(15) * t14 - t218 = -0.7500000000D0 * c(2) * a - 0.1968750000D3 * c(8) * t7 - 0 - #.5020312500D4 * c(8) * t23 + 0.1810856250D7 * c(11) * t3 - 0.52500 - #00000D1 * c(8) * a + 0.1037330044D10 * c(14) * t51 - 0.2250000000D - #1 * c(4) * a - 0.3D1 * c(5) * a + 0.5000625000D4 * c(8) * t20 + 0. - #8137500000D3 * c(8) * t2 + 0.5000000000D0 * c(1) - 0.7087500000D3 - #* c(7) * t14 + 0.3487500000D3 * c(7) * t2 + 0.1365000000D3 * c(14) - # * t1 - 0.6615D4 * c(9) * t14 - t250 = 0.63D2 * c(10) * t1 - 0.6750000000D1 * c(10) * a + 0.435838 - #2466D11 * c(15) * t4 + 0.6000750000D5 * c(10) * t20 + 0.6639806250 - #D7 * c(12) * t3 - 0.2592058219D9 * c(14) * t62 + 0.8717563073D11 * - # c(15) * t3 * t20 + 0.1500187500D6 * c(11) * t20 + 0.1155000000D3 - #* c(13) * t1 + 0.7672500000D4 * c(12) * t2 + 0.1627500000D4 * c(9) - # * t2 + 0.1150875000D5 * c(13) * t2 - 0.6024375000D6 * c(11) * t23 - # - 0.8250000000D1 * c(12) * a + 0.1750000000D1 * c(3) * t1 - t253 = t40 + t73 + t106 + t130 + t158 + t187 + t218 + t250 - ChebInt2 = t253 - end - - - double precision function chebint2big(a,c) - implicit double precision (t) - double precision c(30),a -C -C Output of -C -C with(CodeGeneration); -C Fortran(int((1-x)*sum(c[i]*(a*log(x)+1)^(i-1),i=1..30),x=0..1.0),optimize); -C -C-------------------------- - - - t1 = a ** 2 - t2 = t1 ** 2 - t3 = t2 ** 2 - t4 = t3 ** 2 - t5 = t4 * a - t8 = t1 * a - t9 = t3 * t8 - t12 = t2 * a - t17 = t3 * a - t22 = t4 * t8 - t25 = t2 * t1 - t28 = t4 * t1 - t31 = t2 * t8 - t32 = t3 * t31 - t43 = -0.2128781136D19 * c(22) * t5 - 0.4788846562D9 * c(13) * t9 - #- 0.6275981250D7 * c(26) * t12 + 0.1050000000D2 * c(5) * t1 - 0.18 - #14440753D10 * c(16) * t17 + 0.8137500000D3 * c(8) * t2 - 0.1077166 - #337D22 * c(24) * t22 + 0.1326165750D8 * c(19) * t25 + 0.8515140787 - #D19 * c(22) * t28 - 0.1014200549D21 * c(30) * t32 + 0.2625000000D2 - # * c(7) * t1 - 0.5625000000D2 * c(6) * t8 - 0.1968750000D3 * c(8) - #* t8 - 0.5250000000D1 * a * c(8) - t44 = t4 * t3 - t57 = t3 * t2 - t58 = t4 * t57 - t67 = t4 * t2 - t74 = t3 * t1 - t79 = 0.6204483832D24 * c(25) * t44 + 0.1662375000D5 * c(14) * t2 - #+ 0.1592500000D3 * c(15) * t1 + 0.1225867500D7 * c(14) * t25 - 0.1 - #050000000D2 * a * c(15) - 0.8614856250D7 * c(14) * t31 + 0.8841761 - #977D31 * c(30) * t58 - 0.3974788125D7 * c(24) * t12 + 0.5069190262 - #D10 * c(21) * t3 + 0.238D3 * c(18) * t1 + 0.2432900848D19 * c(21) - #* t67 + 0.8325880287D16 * c(28) * t57 - 0.9281250000D3 * c(12) * t - #8 + 0.4149559559D13 * c(24) * t74 + 0.4308667402D22 * c(24) * t67 - t85 = t3 * t25 - t94 = t4 * t74 - t101 = t3 * t12 - t112 = -0.2529635062D9 * c(20) * t31 - 0.4896140581D20 * c(29) * t - #32 + 0.7123905368D17 * c(24) * t85 - 0.6615D4 * c(9) * t12 - 0.140 - #7585670D14 * c(22) * t9 - 0.5457375000D5 * c(12) * t12 + 0.1088886 - #937D29 * c(28) * t94 - 0.6082231818D17 * c(20) * t5 - 0.2047500000 - #D4 * c(15) * t8 - 0.1248958278D18 * c(28) * t101 - 0.1125000000D2 - #* a * c(16) - 0.1722971250D8 * c(15) * t31 + 0.2904524122D11 * c(1 - #7) * t74 - 0.4938897088D12 * c(18) * t9 - t121 = t4 * t25 - t126 = t4 * t17 - t145 = 0.3630655153D10 * c(15) * t74 + 0.90117D5 * c(20) * t2 + 0. - #2179191233D12 * c(16) * t57 + 0.2490647949D16 * c(26) * t57 + 0.16 - #80380888D26 * c(27) * t121 - 0.18D2 * a * c(25) - 0.1551120981D26 - #* c(26) * t126 - 0.2976750000D5 * c(11) * t12 + 0.4882500000D4 * c - #(11) * t2 + 0.7368134775D29 * c(30) * t44 + 0.1837500000D3 * c(16) - # * t1 + 0.3575446875D7 * c(16) * t25 - 0.7638066718D22 * c(29) * t - #5 - 0.2230126145D18 * c(23) * t32 - 0.1762599589D11 * c(19) * t17 - t160 = t4 * t31 - t177 = 0.1216448684D19 * c(21) * t28 - 0.2027387404D17 * c(21) * t - #32 - 0.2055375000D5 * c(30) * t8 - 0.9961875000D4 * c(24) * t8 - 0 - #.3230571094D8 * c(16) * t31 - 0.11385D5 * c(25) * t8 - 0.775560455 - #9D25 * c(26) * t160 - 0.1216449844D18 * c(20) * t22 - 0.4274408444 - #D19 * c(26) * t32 - 0.4225642174D18 * c(30) * t101 - 0.3990705469D - #8 * c(12) * t9 - 0.7309575000D6 * c(18) * t12 + 0.1754317647D28 * - #c(30) * t121 + 0.2470545000D6 * c(25) * t2 - t194 = t4 * t9 - t197 = t4 * t12 - t210 = 0.7113530672D13 * c(25) * t74 - 0.1181250000D3 * c(6) * t12 - # + 0.6639806250D7 * c(12) * t3 + 0.5179048875D8 * c(14) * t3 - 0.7 - #123687957D16 * c(24) * t101 - 0.4740037048D12 * c(25) * t17 + 0.20 - #16457246D27 * c(27) * t44 + 0.7143750000D3 * c(7) * t25 - 0.442088 - #0980D31 * c(30) * t194 - 0.2192896797D27 * c(30) * t197 - 0.362525 - #6250D6 * c(10) * t17 - 0.6463002644D24 * c(26) * t197 - 0.21543326 - #74D23 * c(26) * t22 - 0.7095855915D17 * c(22) * t32 - 0.2273208127 - #D20 * c(28) * t32 - t240 = 0.1644705562D9 * c(27) * t25 + 0.5129329267D19 * c(24) * t4 - # + 0.3201162430D16 * c(19) * t4 + 0.1473626988D31 * c(30) * t94 + - #0.2787615144D17 * c(23) * t85 - 0.2503928239D13 * c(29) * t17 + 0. - #1208444738D9 * c(15) * t3 + 0.483D3 * c(25) * t1 + 0.4080375000D6 - #* c(28) * t2 + 0.525D3 * c(26) * t1 + 0.2325000000D2 * c(5) * t2 - - # 0.1292600529D23 * c(24) * t197 - 0.5447312965D11 * c(16) * t9 + 0 - #.1587114967D12 * c(19) * t74 - t271 = 0.2727870565D21 * c(28) * t4 + 0.1013701436D18 * c(21) * t4 - # + 0.210D3 * c(17) * t1 - 0.4537028667D27 * c(28) * t160 - 0.30162 - #55007D13 * c(20) * t9 - 0.2403725625D7 * c(22) * t12 - 0.137355750 - #0D7 * c(20) * t12 + 0.1810856250D7 * c(11) * t3 + 0.9074056794D26 - #* c(28) * t121 + 0.1279310852D13 * c(22) * t74 + 0.6226260666D10 * - # c(14) * t57 - 0.3150D4 * c(17) * t8 - 0.2025000000D2 * a * c(28) - #+ 0.38764845D8 * c(22) * t25 + 0.1295136934D16 * c(25) * t57 - t303 = 0.9625000000D2 * c(12) * t1 + 0.1365000000D3 * c(14) * t1 + - # 0.3300412500D6 * c(12) * t25 + 0.1155000000D3 * c(13) * t1 + 0.17 - #50000000D1 * c(3) * t1 - 0.1500000000D1 * a * c(3) + 0.7672500000D - #4 * c(12) * t2 + 0.7264940961D14 * c(30) * t74 + 0.1407757536D15 * - # c(22) * t57 - 0.93555D5 * c(13) * t12 - 0.1402793438D8 * c(30) * - #t12 - 0.2554544672D20 * c(22) * t22 + 0.5601267623D24 * c(27) * t6 - #7 + 0.2145268125D7 * c(15) * t25 - t332 = 0.2677500000D3 * c(19) * t1 - 0.15D2 * a * c(21) + 0.332500 - #0000D3 * c(21) * t1 + 0.3675000000D3 * c(22) * t1 - 0.1575000000D2 - # * a * c(22) - 0.1350000000D2 * a * c(19) + 0.6142500000D3 * c(28) - # * t1 + 0.4427500000D3 * c(24) * t1 - 0.5020312500D4 * c(8) * t31 - #- 0.315D3 * c(9) * t8 - 0.12D2 * a * c(17) + 0.1750000000D2 * c(6) - # * t1 - 0.3750000000D1 * a * c(6) + 0.5000000000D0 * c(12) + 0.500 - #0000000D0 * c(13) - t348 = 0.5000000000D0 * c(14) + 0.5000000000D0 * c(15) + 0.5000000 - #000D0 * c(16) + 0.5000000000D0 * c(17) + 0.5000000000D0 * c(18) + - #0.5000000000D0 * c(19) + 0.5000000000D0 * c(20) + 0.5000000000D0 * - # c(21) + 0.5000000000D0 * c(22) + 0.5000000000D0 * c(23) + 0.50000 - #00000D0 * c(24) + 0.5000000000D0 * c(25) + 0.5000000000D0 * c(26) - #+ 0.5000000000D0 * c(27) - t375 = 0.5000000000D0 * c(1) + 0.5000000000D0 * c(2) + 0.500000000 - #0D0 * c(28) + 0.5000000000D0 * c(29) + 0.5000000000D0 * c(30) - 0. - #5944250812D10 * c(29) * t31 - 0.1650000000D2 * a * c(23) + 0.40241 - #25000D5 * c(9) * t3 - 0.7500000000D0 * c(2) * a + 0.4683327433D20 - #* c(23) * t28 - 0.9961375512D14 * c(25) * t9 - 0.8841761986D31 * c - #(30) * t4 * t101 + 0.3675000000D2 * c(8) * t1 + 0.3885766564D18 * - #c(26) * t85 + 0.6402361494D16 * c(19) * t28 - t406 = -0.856184175D9 * c(23) * t31 + 0.4759788906D14 * c(29) * t7 - #4 - 0.1034080423D24 * c(25) * t197 - 0.1488375000D5 * c(10) * t12 - #+ 0.6000750000D5 * c(10) * t25 + 0.5109091781D20 * c(22) * t67 + 0 - #.6761233822D19 * c(30) * t85 - 0.6702788905D13 * c(21) * t9 + 0.63 - #65031317D21 * c(29) * t4 - 0.5837619375D9 * c(22) * t31 - 0.936663 - #7000D19 * c(23) * t5 - 0.7087500000D3 * c(7) * t12 - 0.5625000000D - #1 * c(4) * t8 + 0.63D2 * c(10) * t1 - t437 = 0.1500187500D6 * c(11) * t25 - 0.1293750000D5 * c(26) * t8 - #+ 0.3041514158D10 * c(20) * t3 + 0.3059864297D14 * c(28) * t74 - 0 - #.6537972769D12 * c(16) * t101 + 0.1124000594D22 * c(23) * t121 + 0 - #.5522107500D6 * c(30) * t2 - 0.11609325D8 * c(29) * t12 - 0.544443 - #4644D28 * c(28) * t126 - 0.389174625D9 * c(21) * t31 + 0.170073750 - #0D6 * c(23) * t2 + 0.1524441712D30 * c(29) * t94 + 0.1037330044D10 - # * c(14) * t74 + 0.1419891602D22 * c(30) * t4 - 0.1875000000D2 * a - # * c(26) - t467 = -0.1656703125D7 * c(12) * t31 + 0.4042500000D3 * c(23) * t1 - # + 0.1046107569D14 * c(17) * t85 - 0.3486918810D13 * c(17) * t101 - #- 0.3048883435D30 * c(29) * t194 - 0.1111358914D22 * c(27) * t5 + - #0.1089196546D11 * c(16) * t74 + 0.3097066580D15 * c(23) * t57 - 0. - #1270002108D13 * c(19) * t9 - 0.3630695947D13 * c(30) * t17 - 0.160 - #8750000D4 * c(14) * t8 + 0.3627028125D7 * c(11) * t74 - 0.31127502 - #66D10 * c(14) * t9 + 0.1265158125D9 * c(26) * t25 - t498 = 0.4760437500D6 * c(29) * t2 + 0.2691336375D9 * c(29) * t25 - #- 0.9763503750D8 * c(18) * t31 + 0.7875000000D2 * c(11) * t1 + 0.7 - #053844298D11 * c(18) * t74 - 0.6411612666D18 * c(24) * t32 - 0.825 - #0000000D1 * a * c(12) - 0.1125000000D3 * c(7) * t8 - 0.1512342619D - #26 * c(28) * t197 - 0.1520268750D6 * c(14) * t12 - 0.8662500000D4 - #* c(23) * t8 - 0.7257763012D9 * c(15) * t17 - 0.1689412164D15 * c( - #20) * t101 - 0.3684067442D30 * c(30) * t126 - 0.1950000000D2 * a * - # c(27) - t531 = 0.6286789884D11 * c(27) * t3 - 0.2250000000D2 * c(5) * t8 + - # 0.27689175D8 * c(21) * t25 + 0.7105000000D3 * c(30) * t1 - 0.3D1 - #* a * c(5) + 0.3000674791D23 * c(28) * t28 + 0.2585201366D23 * c(2 - #4) * t121 + 0.1727212800D12 * c(30) * t3 + 0.1627500000D4 * c(9) * - # t2 - 0.4725000000D3 * c(10) * t8 + 0.1709737288D18 * c(25) * t85 - #- 0.2480625000D4 * c(8) * t12 - 0.8401897427D24 * c(29) * t22 + 0. - #1286794451D11 * c(23) * t3 - t562 = -0.4500000000D1 * a * c(7) - 0.2559375000D4 * c(16) * t8 - - #0.5170398417D22 * c(25) * t22 + 0.2485520145D17 * c(30) * t57 - 0. - #1228022426D29 * c(30) * t160 + 0.5720715D7 * c(17) * t25 + 0.42575 - #46032D18 * c(22) * t4 + 0.1551120958D26 * c(26) * t44 - 0.33023615 - #62D10 * c(27) * t31 + 0.2959627238D11 * c(25) * t3 + 0.3487500000D - #3 * c(7) * t2 + 0.9782647875D9 * c(18) * t3 + 0.3621712500D6 * c(1 - #0) * t3 + 0.1126462500D6 * c(21) * t2 - 0.2540736054D28 * c(29) * - #t160 - t592 = 0.2160488940D25 * c(28) * t67 + 0.4234559837D27 * c(29) * t - #121 + 0.2589524438D9 * c(16) * t3 - 0.5334985780D14 * c(19) * t101 - # - 0.4147293150D10 * c(17) * t17 - 0.8569712756D15 * c(29) * t9 + - #0.9615201750D8 * c(25) * t25 + 0.2000250000D5 * c(9) * t25 - 0.296 - #2523155D12 * c(24) * t17 + 0.1000224930D23 * c(27) * t28 + 0.42315 - #D5 * c(17) * t2 + 0.5250000000D1 * c(4) * t1 - 0.2250000000D1 * a - #* c(4) - 0.3556860713D15 * c(18) * t5 - t623 = -0.159766425D9 * c(19) * t31 + 0.8891100231D13 * c(19) * t5 - #7 - 0.2092247063D14 * c(17) * t32 - 0.1831410D7 * c(21) * t12 + 0. - #4032914581D27 * c(27) * t94 - 0.1554259191D17 * c(25) * t101 + 0.3 - #378927447D16 * c(21) * t85 - 0.2413264219D10 * c(26) * t31 + 0.111 - #1354674D21 * c(27) * t4 - 0.1452616791D11 * c(15) * t9 - 0.2364862 - #500D6 * c(15) * t12 + 0.4352392997D11 * c(26) * t3 + 0.2585200441D - #23 * c(25) * t67 - 0.1065571570D12 * c(22) * t17 + 0.5620000959D21 - # * c(23) * t67 - t654 = 0.8419160889D18 * c(27) * t85 - 0.5068468510D16 * c(20) * t - #32 - 0.1307654414D13 * c(16) * t32 + 0.1013678234D16 * c(20) * t85 - # - 0.7406307887D12 * c(26) * t17 + 0.55335D5 * c(18) * t2 - 0.5450 - #625000D4 * c(20) * t8 + 0.2992500000D3 * c(20) * t1 - 0.1425000000 - #D2 * a * c(20) + 0.1991941875D8 * c(13) * t3 + 0.3173625000D5 * c( - #16) * t2 - 0.7770262500D7 * c(27) * t12 + 0.2345403229D13 * c(23) - #* t74 + 0.1926581224D14 * c(27) * t74 - t685 = 0.3556847144D15 * c(18) * t4 + 0.5000625000D4 * c(8) * t25 - #+ 0.8188691962D10 * c(22) * t3 - 0.3238039980D17 * c(26) * t101 + - #0.2092263026D14 * c(17) * t4 + 0.4358382466D11 * c(15) * t57 + 0.5 - #179048875D9 * c(17) * t3 - 0.3097255633D16 * c(23) * t101 - 0.5743 - #2375D8 * c(17) * t31 + 0.8716764932D12 * c(17) * t57 - 0.243655025 - #4D25 * c(30) * t22 - 0.1845866123D23 * c(30) * t5 - 0.7975563750D8 - # * c(13) * t17 - 0.1088886941D29 * c(28) * t194 - 0.2700609887D24 - #* c(28) * t22 - t715 = -0.1807312500D6 * c(10) * t31 - 0.675D3 * c(11) * t8 + 0.12 - #70368065D29 * c(29) * t44 + 0.1973084825D11 * c(24) * t3 - 0.21D2 - #* a * c(29) + 0.2929500000D4 * c(10) * t2 - 0.3000669068D22 * c(28 - #) * t5 - 0.1709763378D19 * c(25) * t32 + 0.2215043573D24 * c(30) * - # t28 - 0.1067046002D16 * c(19) * t32 + 0.6475684668D15 * c(24) * t - #57 - 0.2175000000D2 * a * c(30) - 0.2432899688D19 * c(21) * t22 - - #0.2592058219D9 * c(14) * t17 - t746 = -0.2331388786D18 * c(29) * t101 + 0.1938242250D8 * c(20) * - #t25 + 0.1162500000D3 * c(6) * t2 + 0.4789431281D9 * c(13) * t57 - - #0.9D1 * a * c(13) + 0.8617322477D21 * c(25) * t28 - 0.4458188109D1 - #0 * c(28) * t31 - 0.3825D4 * c(18) * t8 - 0.9536231250D7 * c(28) * - # t12 - 0.1699094162D13 * c(28) * t17 + 0.6600825000D6 * c(13) * t2 - #5 + 0.1391512500D6 * c(22) * t2 - 0.7481250000D4 * c(22) * t8 - 0. - #6402349283D16 * c(19) * t5 - 0.6049370475D26 * c(29) * t197 - t778 = 0.1457029050D17 * c(29) * t57 - 0.5081472334D29 * c(29) * t - #126 + 0.6615000000D3 * c(29) * t1 - 0.1842750000D5 * c(29) * t8 + - #0.1760876618D10 * c(19) * t3 + 0.1013678234D17 * c(22) * t85 - 0.3 - #348939219D11 * c(20) * t17 + 0.8717563073D11 * c(15) * t85 - 0.520 - #3039888D15 * c(28) * t9 - 0.3976087500D7 * c(13) * t31 + 0.6701152 - #083D12 * c(21) * t74 + 0.2667574300D15 * c(19) * t85 - 0.336076137 - #5D25 * c(27) * t197 - 0.7500000000D1 * a * c(11) - t809 = 0.1748594954D19 * c(28) * t85 + 0.2114621438D9 * c(28) * t2 - #5 + 0.8933859309D11 * c(28) * t3 - 0.3625256250D7 * c(11) * t17 - - #0.6088980398D11 * c(21) * t17 - 0.1743140149D12 * c(17) * t9 - 0.5 - #15970D6 * c(17) * t12 + 0.5330166188D8 * c(23) * t25 - 0.602437500 - #0D6 * c(11) * t31 + 0.1292600221D24 * c(26) * t67 - 0.6750000000D1 - # * a * c(10) - 0.3110703750D7 * c(23) * t12 + 0.2154330619D21 * c( - #24) * t28 + 0.1561100212D19 * c(23) * t4 - 0.2815171340D14 * c(23) - # * t9 - t839 = -0.1267059123D16 * c(22) * t101 + 0.3102241639D24 * c(25) * - # t121 - 0.4016250000D5 * c(9) * t31 + 0.4625489048D16 * c(27) * t5 - #7 + 0.8401889415D23 * c(29) * t28 + 0.2413298634D14 * c(20) * t57 - #+ 0.6033246585D14 * c(21) * t57 - 0.1481940494D14 * c(18) * t101 + - # 0.3350576041D12 * c(20) * t74 + 0.1814811521D28 * c(28) * t44 - 0 - #.4032914551D27 * c(27) * t126 + 0.7561711290D25 * c(29) * t67 + 0. - #8841105D7 * c(18) * t25 - 0.6226640733D10 * c(14) * t101 - t861 = -0.1230764752D10 * c(24) * t31 + 0.5000000000D0 * c(3) + 0. - #5000000000D0 * c(4) + 0.5000000000D0 * c(5) + 0.5000000000D0 * c(6 - #) + 0.5000000000D0 * c(7) + 0.5000000000D0 * c(8) + 0.5000000000D0 - # * c(9) + 0.5000000000D0 * c(10) + 0.5000000000D0 * c(11) + 0.3077 - #615170D22 * c(26) * t28 - 0.6D1 * a * c(9) - 0.1778817056D15 * c(2 - #6) * t9 + 0.2327325000D5 * c(15) * t2 - 0.1993890938D8 * c(12) * t - #17 - t892 = -0.14625D5 * c(27) * t8 + 0.4274441055D20 * c(26) * t4 + 0. - #2941125000D6 * c(26) * t2 - 0.2585201520D23 * c(24) * t160 - 0.127 - #5000000D2 * a * c(18) + 0.71145D5 * c(19) * t2 + 0.2027402872D17 * - # c(20) * t4 - 0.1132729442D13 * c(27) * t17 - 0.4054821212D18 * c( - #21) * t5 + 0.1307634461D13 * c(16) * t85 - 0.3547293750D6 * c(16) - #* t12 - 0.1737550238D10 * c(25) * t31 + 0.1250740303D12 * c(29) * - #t3 + 0.3048883440D30 * c(29) * t58 - t923 = -0.5109092999D20 * c(22) * t197 - 0.5395745069D14 * c(24) * - # t9 - 0.8001807074D23 * c(27) * t22 + 0.2963700077D13 * c(18) * t5 - #7 + 0.3393424125D9 * c(30) * t25 - 0.1645312500D5 * c(28) * t8 - 0 - #.3083282896D15 * c(27) * t9 - 0.1725000000D2 * a * c(24) + 0.49D2 - #* c(9) * t1 - 0.8812997944D10 * c(18) * t17 - 0.3590544184D20 * c( - #24) * t5 - 0.6204483648D24 * c(25) * t160 - 0.5020785D7 * c(25) * - #t12 - 0.1873332760D21 * c(23) * t22 + 0.1185588445D14 * c(26) * t7 - #4 - t955 = -0.1124000460D22 * c(23) * t197 + 0.5687500000D3 * c(27) * - #t1 + 0.3475875000D6 * c(27) * t2 - 0.6721523951D26 * c(27) * t160 - #- 0.8717297026D11 * c(15) * t101 - 0.9750000000D1 * a * c(14) + 0. - #1538798780D20 * c(25) * t4 + 0.2585201366D25 * c(26) * t121 + 0.24 - #36551416D26 * c(30) * t67 - 0.6476079961D17 * c(27) * t101 + 0.592 - #7942890D14 * c(18) * t85 + 0.2393838562D9 * c(13) * t74 - 0.138067 - #5944D16 * c(30) * t9 - 0.3847011625D21 * c(26) * t5 + 0.2058787500 - #D6 * c(24) * t2 - t986 = 0.7211401312D8 * c(24) * t25 - 0.1231043720D21 * c(25) * t5 - # + 0.3989730938D8 * c(12) * t74 + 0.1150875000D5 * c(13) * t2 - 0. - #1237500000D4 * c(13) * t8 - 0.1012095D7 * c(19) * t12 - 0.18032749 - #64D12 * c(23) * t17 - 0.4590D4 * c(19) * t8 - 0.1010314723D20 * c( - #27) * t32 - 0.1778410004D15 * c(18) * t32 - 0.6412500000D4 * c(21) - # * t8 - 0.4826891896D15 * c(21) * t101 + 0.3497189908D19 * c(29) * - # t85 - 0.7835603344D10 * c(30) * t31 + 0.1216448684D18 * c(20) * t - #28 - t991 = t240 + t177 + t145 + t839 + t375 + t592 + t778 + t685 + t46 - #7 + t892 + t303 + t623 + t79 + t562 + t746 + t271 + t861 + t654 + - #t955 + t986 + t112 + t43 + t437 + t923 + t332 + t809 + t406 + t348 - # + t210 + t531 + t715 + t498 - - chebint2big = t991 - end - -CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC - - - Subroutine SumRulesAS -C--------------------------------------------------------------- -C -C Created 20 Jul 2011 by VR. Add sum-rules for A.Schoening parameterisation -C -C--------------------------------------------------------------- - implicit none -#include "pdfparam.inc" - - double precision sumUv, sumDv - double precision sumMom, sumGlue,x - integer i - double precision SumRuleASpar,splogn - double precision ubar,dbar,uval,dval,gluon - integer IDebug - data IDebug/0/ - -C--------------------------------------------------------------- - -C Counting sum-rule for uv: - sumUv = SumRuleASpar(-1,asuval) - asuval(1) = uvalSum / sumUv - -C Counting sum-rule for dv: - sumDv = SumRuleASpar(-1,asdval) - asdval(1) = dvalSum / sumDv - -C Momentum sum rule: - sumMom = 2.D0*asubar(1)*SumRuleASpar(0,asubar) + - $ 2.D0*asdbar(1)*SumRuleASpar(0,asdbar) + - $ asuval(1)*SumRuleASpar(0,asuval) + - $ asdval(1)*SumRuleASpar(0,asdval) - sumGlue = SumRuleASpar(0,asglue) - asglue(1) = (1.0 - SumMom)/sumGlue - - if (IDebug.eq.1) then - print '(''uv:'',5F10.4)',(asuval(i),i=1,5) - print '(''dv:'',5F10.4)',(asdval(i),i=1,5) - print '(''Ub:'',5F10.4)',(asubar(i),i=1,5) - print '(''Db:'',5F10.4)',(asdbar(i),i=1,5) - print '(''GL:'',5F10.4)',(asglue(i),i=1,5) - endif - - - if (IDebug.eq.10) then - do i=1,8 - x = 10**(-i/2.) - print '(6F12.5)',x,splogn(x,asuval),splogn(x,asdval), - $ splogn(x,asubar),splogn(x,asdbar),splogn(x,asglue) - print '(6F12.5)',x,uval(x),dval(x),ubar(x),dbar(x),gluon(x) - enddo - endif - - return - end - - double precision function SumRuleASpar(n,aas) -C--------------------------------------------------------------- -C A wrapper for A. Schoening sum-rule integral. -C -C parameterisation where n = 0 (momentum sum-rule) or -1 ( counting sum rule) -C--------------------------------------------------------------- - implicit none - integer n - integer i - double precision aas(1:5), aass(1:5) - double precision splognni - - do i=1,5 - aass(i)=aas(i) - enddo - aass(1)=1.d0 - aass(2)=aass(2)+n - - SumRuleASpar=splognni(aass) - - if (SumRuleASpar.eq.0) then - print*, 'sum rule is ZERO---- ERROR' - STOP - endif - return - end - double precision function splognni(as) -C--------------------------------------------------------------- -* Numerical Integration of the -* Special lognormal function -* using the Simpson method -* -* -* A.Schoening, University Heidelberg, Physikalisches Institut -* Creation: 12.6.2011 -* -* ASPDF = A1*x**(A2-A3*log(x))*(1-x)**(A4-A5*log(1-x)) -C--------------------------------------------------------------- - implicit none - - integer np,i - data np /1000/ - - double precision as(1:5) - double precision xas,xnas - double precision splogn - - double precision peak,h,sum,xlmin - data xlmin /-10.0/ - - logical logflag - data logflag /.true./ - - logical falling - data falling /.false./ -c data falling /.true./ - double precision eps - data eps /0.001d0/ - double precision f1,f2,f3 - - -C----------------------------------------------------- - - f1=splogn(eps,as) - f2=splogn(0.5d0,as) - f3=splogn((1.d0-eps),as) - - if (f1.gt.f2+f3) then - logflag=.true. - falling=.true. - elseif (f3.gt.f2+f1) then - logflag=.true. - falling=.false. - else - logflag=.false. - endif -c print *,logflag,falling - -c linear integration - if (.not.logflag) then - h=1.d0/float(np) - sum=0.d0 -c weight 4 - do i=1,np-1,2 - xas=i*h - sum=sum+splogn(xas,as) - enddo - sum=2.d0*sum -c weight 2 - do i=2,np-2,2 - xas=i*h - sum=sum+splogn(xas,as) - enddo - sum=2.d0*sum - sum=sum+splogn(0.d0,as)+splogn(1.d0,as) - splognni=h/3.d0*sum - else -c steeply falling distribution - - if (falling) then - - h=-xlmin/float(np) - sum=0.d0 -c weight 4 - do i=1,np-1,2 - xas=10.d0**(xlmin+i*h) - sum=sum+xas*splogn(xas,as) - enddo - sum=2.d0*sum -c weight 2 - do i=0,np-2,2 - xas=10.d0**(xlmin+i*h) - sum=sum+xas*splogn(xas,as) - enddo - sum=2.d0*sum - xas=1.d0 - sum=sum+splogn(xas,as) - splognni=h/3.d0*sum*log(10.d0) - - - else -c steeply rising distribution - h=-xlmin/float(np) - sum=0.d0 -c weight 4 - do i=1,np-1,2 - xas=10.d0**(xlmin+i*h) - xnas=1.d0-xas - sum=sum+xas*splogn(xnas,as) -* print *,x,x - enddo - sum=2.d0*sum -c weight 2 - do i=0,np-2,2 - xas=10.d0**(xlmin+i*h) - xnas=1.d0-xas - sum=sum+xas*splogn(xnas,as) - enddo - sum=2.d0*sum - xas=0.d0 - sum=sum+splogn(xas,as) - splognni=h/3.d0*sum*log(10.d0) - - endif - - endif - if (splognni.eq.0) then - print*, 'sum rule is ZERO---- Warning', sum, logflag, falling - splognni=1d-10 - endif - return - end - - - - -ccccccccccccccccccccccccccccccccccccccccc - - Subroutine SumRulesCTeq -C--------------------------------------------------------------- -C -C Created 22 Apr 2011 by SG. Add sum-rules for CTEQ-like parameterisation -C -C--------------------------------------------------------------- - implicit none -#include "steering.inc" -#include "pdfparam.inc" - double precision sumUv, sumDv - double precision sumMom, sumGlue,x - integer i - double precision SumRuleCTEQ,ctpara, tStr - double precision ubar,dbar,uval,dval,gluon,str - double precision CalcIntegral - - integer IDebug - data IDebug/0/ - -C--------------------------------------------------------------- - -C Counting sum-rule for uv: - sumUv = SumRuleCTEQ(-1,ctuval) - ctuval(1) = uvalSum / sumUv - -C Counting sum-rule for dv: - sumDv = SumRuleCTEQ(-1,ctdval) - ctdval(1) = dvalSum / sumDv - -C Momentum sum rule: -C---------------- -C Sea: - - - if (Index(PDF_DECOMPOSITION,'Str').gt.0) then - tStr = ctstr(1)*SumRuleCTEQ(0,ctstr) - else - tStr = 0 ! Strange already included in Dbar - endif - - sumMom = 2.D0*ctubar(1)*SumRuleCTEQ(0,ctubar) + - $ 2.D0*ctdbar(1)*SumRuleCTEQ(0,ctdbar) + - $ ctuval(1)*SumRuleCTEQ(0,ctuval) + - $ ctdval(1)*SumRuleCTEQ(0,ctdval)+ 2.D0*tStr - sumGlue = SumRuleCTEQ(0,ctglue) - sumMom = sumMom - ctglue(7)*CalcIntegral(ctglue(8),ctglue(9)) - - - - ctglue(1) = (1.0 - SumMom)/sumGlue - - - - if (IDebug.eq.1) then - print '(''uv:'',9F10.4)',(ctuval(i),i=1,9) - print '(''dv:'',9F10.4)',(ctdval(i),i=1,9) - print '(''Ub:'',9F10.4)',(ctubar(i),i=1,9) - print '(''Db:'',9F10.4)',(ctdbar(i),i=1,9) - print '(''GL:'',9F10.4)',(ctglue(i),i=1,9) - print '(''ST:'',x9F10.4)',(ctstr(i),i=1,9) - endif - if (IDebug.eq.10) then - do i=1,8 - x = 10**(-i/2.) - print '(7F12.5)',x,ctpara(x,ctuval),ctpara(x,ctdval), - $ ctpara(x,ctubar),ctpara(x,ctdbar),ctpara(x,ctglue), - $ ctpara(x,ctstr) - print '(7F12.5)',x,uval(x),dval(x),ubar(x),dbar(x), - $ gluon(x),str(x) - enddo - endif - - end - - -ccccccccccccccccccccccccccccccccccccccccc - - Subroutine SumRulesCTEQHera -C--------------------------------------------------------------- -C -C Sum-rules for CTEQ-HERA hybrid parameterisation -C -C--------------------------------------------------------------- - implicit none -#include "steering.inc" -#include "pdfparam.inc" - double precision sumUv, sumDv - double precision sumMom, sumGlue,x - integer i - double precision SumRuleCTEQhera,ctherapara, tStr - double precision ubar,dbar,uval,dval,gluon,str - double precision CalcIntegral - - integer IDebug - data IDebug/0/ - -C--------------------------------------------------------------- - -C Counting sum-rule for uv: - sumUv = SumRuleCTEQhera(-1,ctuval) - ctuval(1) = uvalSum / sumUv - -C Counting sum-rule for dv: - sumDv = SumRuleCTEQhera(-1,ctdval) - ctdval(1) = dvalSum / sumDv - -C Momentum sum rule: -C---------------- -C Sea: - - - if (Index(PDF_DECOMPOSITION,'Str').gt.0) then - tStr = ctstr(1)*SumRuleCTEQhera(0,ctstr) - else - tStr = 0 ! Strange already included in Dbar - endif - - - sumMom = 2.D0*ctubar(1)*SumRuleCTEQhera(0,ctubar) + - $ 2.D0*ctdbar(1)*SumRuleCTEQhera(0,ctdbar) + - $ ctuval(1)*SumRuleCTEQhera(0,ctuval) + - $ ctdval(1)*SumRuleCTEQhera(0,ctdval)+ 2.D0*tStr - sumGlue = SumRuleCTEQhera(0,ctglue) - sumMom = sumMom - ctglue(7)*CalcIntegral(ctglue(8),ctglue(9)) - - - - ctglue(1) = (1.0 - SumMom)/sumGlue - - - - if (IDebug.eq.1) then - print '(''uv:'',9F10.4)',(ctuval(i),i=1,9) - print '(''dv:'',9F10.4)',(ctdval(i),i=1,9) - print '(''Ub:'',9F10.4)',(ctubar(i),i=1,9) - print '(''Db:'',9F10.4)',(ctdbar(i),i=1,9) - print '(''GL:'',9F10.4)',(ctglue(i),i=1,9) - print '(''ST:'',x9F10.4)',(ctstr(i),i=1,9) - endif - if (IDebug.eq.10) then - do i=1,8 - x = 10**(-i/2.) - print '(7F12.5)',x,ctherapara(x,ctuval),ctherapara(x,ctdval), - $ ctherapara(x,ctubar),ctherapara(x,ctdbar),ctherapara(x,ctglue), - $ ctherapara(x,ctstr) - print '(7F12.5)',x,uval(x),dval(x),ubar(x),dbar(x), - $ gluon(x),str(x) - enddo - endif - - end - - - - double precision function SumRuleCTEQ(n,acteq) -C--------------------------------------------------------------- -C Sum-rule integral for -C -C UF = a0*E**(a3*x)*(1 - x)**a2*x**(a1 + n)*(1 + E**a4*x + E**a5*x**2) -C -C parameterisation where n = 0 (momentum sum-rule) or -1 ( counting sum rule) -C--------------------------------------------------------------- - implicit none - integer n - double precision acteq(1:6) - double precision YF - double precision DGammF,HypG1F1r, HypG1F1 -C----------------------------------------------------- - YF = ( - & DGammF(1 + acteq(3) )*DGammF(1 + acteq(2) + n)*( - & Hypg1F1(1 + acteq(2) + n,2 + acteq(2) + acteq(3) + n, - $ acteq(4)) + - & (1 + acteq(2) + n)*DGammF(2 + acteq(2) + acteq(3) + n)* - & ( exp(acteq(5))* - & Hypg1F1R(2 + acteq(2) + n,3 + acteq(2) - $ + acteq(3) + n,acteq(4)) + - & exp(acteq(6))*(2 + acteq(2) + n)* - & Hypg1F1R(3 + acteq(2) + n,4 + acteq(2) - $ + acteq(3) + n,acteq(4)) - & ) - & ) - & )/DGammF(2 + acteq(2) + acteq(3) + n) - - SumRuleCTEQ = YF - - end - - double precision function SumRuleCTEQhera(n,acteq) -C--------------------------------------------------------------- -C Sum-rule integral for -C -C UF = a1*exp(a6*x)*(1 - x)**a3*x**(a2 + n)*(1 + a4*x + a5*x**2) -C -C parameterisation where n = 0 (momentum sum-rule) or -1 ( counting sum rule) -C--------------------------------------------------------------- - implicit none - integer n - double precision acteq(1:6) - double precision YF - double precision DGammF,HypG1F1r, HypG1F1 -C----------------------------------------------------- - YF = ( - & DGammF(1 + acteq(3) )*DGammF(1 + acteq(2) + n)*( - & Hypg1F1(1 + acteq(2) + n,2 + acteq(2) + acteq(3) + n, - $ acteq(6)) + - & (1 + acteq(2) + n)*DGammF(2 + acteq(2) + acteq(3) + n)* - & ( acteq(4)* - & Hypg1F1R(2 + acteq(2) + n,3 + acteq(2) - $ + acteq(3) + n,acteq(6)) + - & acteq(5)*(2 + acteq(2) + n)* - & Hypg1F1R(3 + acteq(2) + n,4 + acteq(2) - $ + acteq(3) + n,acteq(6)) - & ) - & ) - & )/DGammF(2 + acteq(2) + acteq(3) + n) - - SumRuleCTEQhera = YF - - end - - double precision function HYPG1F1R(a,b,z) -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c Regularized Confluent Hypergeometric function 1F1 -c -c Implementation: A.Schoening, University Heidelberg -c Date: 08.04.2011 -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc - implicit none - double precision HYPG1F1 - double precision a,b,z - double precision DGammF - - if (b.lt.0) then - print *,'HYPG1F1R Warning, function not defined for b negative' - HYPG1F1R=0.0 - return - endif - HYPG1F1R=hypg1f1(a,b,z)/DGammF(b) - return - end - - double precision function HYPG1F1(a0,b,z0) -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -c Confluent Hypergeometric function 1F1 -c -c Implementation: A.Schoening, University Heidelberg -c Date: 08.04.2011 -cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc - implicit none - double precision a0,a,b,z,z0 - - integer n - double precision prec - data prec /1e-10/ - - double precision abnzfact - double precision factor - double precision old - -c Kummer's transformation - if (z0.lt.0) then - a=b-a0 - z=-z0 - factor=exp(z0) - else - a=a0 - z=z0 - factor=1.0 - endif - - n=1 - abnzfact=1.0 - hypg1f1=1.0 - - 10 continue - abnzfact=abnzfact*(a+n-1.0)/(b+n-1.0)/n*z - hypg1f1=hypg1f1+abnzfact - -c precision reached - if (abs(abnzfact).lt.abs(hypg1f1)*prec) goto 99 - n=n+1 -c print *,'HYPG1F1: n=',n,' hpyg1f1=',hypg1f1,abnzfact,z - if (n.lt.1000) goto 10 - print *,'HYPG1F1: no convergence for ',a,b,z - - 99 continue - hypg1f1=hypg1f1*factor - - return - end - - REAL*8 FUNCTION SSDINT(XL,F,XR) -C----------------------------------------------------------------------- -C Integrate REAL*8 F over REAL*8 (XL,XR) -C Note quadrature constants R and W have been converted to explicit -C REAL*8 (.xxxxxDxx) form. -C -C Bisset's XINTH -C----------------------------------------------------------------------- - IMPLICIT NONE -*KEEP,SSLUN. -*KEND. - EXTERNAL F - INTEGER NMAX - REAL*8 TOLABS,TOLREL,XLIMS(200) - REAL*8 R(93),W(93) - INTEGER PTR(4),NORD(4) - INTEGER ICOUNT - REAL*8 XL,XR,F - REAL*8 AA,BB,TVAL,VAL,TOL - INTEGER NLIMS,I,J,KKK -C - DATA PTR,NORD/4,10,22,46, 6,12,24,48/ - DATA (R(KKK),KKK=1,48)/ - + .2386191860D0,.6612093865D0,.9324695142D0,.1252334085D0, - + .3678314990D0,.5873179543D0,.7699026742D0,.9041172563D0, - + .9815606342D0,.0640568929D0,.1911188675D0,.3150426797D0, - + .4337935076D0,.5454214714D0,.6480936519D0,.7401241916D0, - + .8200019860D0,.8864155270D0,.9382745520D0,.9747285560D0, - + .9951872200D0,.0323801710D0,.0970046992D0,.1612223561D0, - + .2247637903D0,.2873624873D0,.3487558863D0,.4086864820D0, - + .4669029048D0,.5231609747D0,.5772247261D0,.6288673968D0, - + .6778723796D0,.7240341309D0,.7671590325D0,.8070662040D0, - + .8435882616D0,.8765720203D0,.9058791367D0,.9313866907D0, - + .9529877032D0,.9705915925D0,.9841245837D0,.9935301723D0, - + .9987710073D0,.0162767488D0,.0488129851D0,.0812974955D0/ - DATA (R(KKK),KKK=49,93)/ - + .1136958501D0,.1459737146D0,.1780968824D0,.2100313105D0, - + .2417431561D0,.2731988126D0,.3043649444D0,.3352085229D0, - + .3656968614D0,.3957976498D0,.4254789884D0,.4547094222D0, - + .4834579739D0,.5116941772D0,.5393881083D0,.5665104186D0, - + .5930323648D0,.6189258401D0,.6441634037D0,.6687183100D0, - + .6925645366D0,.7156768123D0,.7380306437D0,.7596023411D0, - + .7803690438D0,.8003087441D0,.8194003107D0,.8376235112D0, - + .8549590334D0,.8713885059D0,.8868945174D0,.9014606353D0, - + .9150714231D0,.9277124567D0,.9393703398D0,.9500327178D0, - + .9596882914D0,.9683268285D0,.9759391746D0,.9825172636D0, - + .9880541263D0,.9925439003D0,.9959818430D0,.9983643759D0, - + .9996895039/ - DATA (W(KKK),KKK=1,48)/ .4679139346D0,.3607615730D0, - +.1713244924D0,.2491470458D0, .2334925365D0,.2031674267D0, - +.1600783285D0,.1069393260D0, .0471753364D0,.1279381953D0, - +.1258374563D0,.1216704729D0, .1155056681D0,.1074442701D0, - +.0976186521D0,.0861901615D0, .0733464814D0,.0592985849D0, - +.0442774388D0,.0285313886D0, .0123412298D0,.0647376968D0, - +.0644661644D0,.0639242386D0, .0631141923D0,.0620394232D0, - +.0607044392D0,.0591148397D0, .0572772921D0,.0551995037D0, - +.0528901894D0,.0503590356D0, .0476166585D0,.0446745609D0, - +.0415450829D0,.0382413511D0, .0347772226D0,.0311672278D0, - +.0274265097D0,.0235707608D0, .0196161605D0,.0155793157D0, - +.0114772346D0,.0073275539D0, .0031533461D0,.0325506145D0, - +.0325161187D0,.0324471637D0/ - DATA (W(KKK),KKK=49,93)/ - + .0323438226D0,.0322062048D0,.0320344562D0,.0318287589D0, - + .0315893308D0,.0313164256D0,.0310103326D0,.0306713761D0, - + .0302999154D0,.0298963441D0,.0294610900D0,.0289946142D0, - + .0284974111D0,.0279700076D0,.0274129627D0,.0268268667D0, - + .0262123407D0,.0255700360D0,.0249006332D0,.0242048418D0, - + .0234833991D0,.0227370697D0,.0219666444D0,.0211729399D0, - + .0203567972D0,.0195190811D0,.0186606796D0,.0177825023D0, - + .0168854799D0,.0159705629D0,.0150387210D0,.0140909418D0, - + .0131282296D0,.0121516047D0,.0111621020D0,.0101607705D0, - + .0091486712D0,.0081268769D0,.0070964708D0,.0060585455D0, - + .0050142027D0,.0039645543D0,.0029107318D0,.0018539608D0, - + .0007967921/ -C -C DATA TOLABS,TOLREL,NMAX/1.D-35,5.D-5,100/ -C DATA TOLABS,TOLREL,NMAX/1.D-30,5.D-4,200/ - - DATA TOLABS,TOLREL,NMAX/1.D-15,2.D-2,100/ - -C - SSDINT=0 - NLIMS=2 - XLIMS(1)=XL - XLIMS(2)=XR - ICOUNT=0 -C - 10 AA=(XLIMS(NLIMS)-XLIMS(NLIMS-1))/2 - BB=(XLIMS(NLIMS)+XLIMS(NLIMS-1))/2 - TVAL=0 - DO 20 I=1,3 - 20 TVAL=TVAL+W(I)*(F(BB+AA*R(I))+F(BB-AA*R(I))) - TVAL=TVAL*AA - DO 40 J=1,4 - VAL=0 - DO 30 I=PTR(J),PTR(J)-1+NORD(J) - ICOUNT=ICOUNT+1 - IF(ICOUNT.GT.1E5) THEN - WRITE(1,*) 'WARNING IN SSDINT: SET SSDINT TO ZERO' - WRITE(6,*) 'WARNING IN SSDINT: SET SSDINT TO ZERO' - SSDINT=0. - RETURN - ENDIF - 30 VAL=VAL+W(I)*(F(BB+AA*R(I))+F(BB-AA*R(I))) - VAL=VAL*AA - TOL=MAX(TOLABS,TOLREL*ABS(VAL)) - IF(ABS(TVAL-VAL).LT.TOL) THEN - SSDINT=SSDINT+VAL - NLIMS=NLIMS-2 - IF (NLIMS.NE.0) GO TO 10 - RETURN - ENDIF - 40 TVAL=VAL - IF(NMAX.EQ.2) THEN - SSDINT=VAL - RETURN - END IF - IF(NLIMS.GT.(NMAX-2)) THEN - WRITE(1,10000) SSDINT,NMAX,BB-AA,BB+AA - WRITE(6,10000) SSDINT,NMAX,BB-AA,BB+AA - RETURN - ENDIF - XLIMS(NLIMS+1)=BB - XLIMS(NLIMS+2)=BB+AA - XLIMS(NLIMS)=BB - NLIMS=NLIMS+2 - GO TO 10 -C -10000 FORMAT (' SSDINT FAILS, SSDINT,NMAX,XL,XR=',G15.7,I5,2G15.7) - END - - diff --git a/src/theory_dispatcher.f b/src/theory_dispatcher.f index 341d80f56217d8449d00fc5632acd8c001be2244..717accb59df78705312832d91d727d6bd5bcf818 100644 --- a/src/theory_dispatcher.f +++ b/src/theory_dispatcher.f @@ -29,20 +29,6 @@ C------------------------------------------------------------------- endif end - - Subroutine GetTheoryIteration -C--------------------------------------------------------------------- -C -C Created 24/06/2011. Get theory calculation per iteration, before going into individual datasets -C -C--------------------------------------------------------------------- - implicit none - -C Also for reactions: - call init_at_iteration - - end - !> Copy theo_fix to theory for a dataset Subroutine UseFixedTheoryXsection(ISet) implicit none diff --git a/src/xfitter_cpp_base.cc b/src/xfitter_cpp_base.cc index cca3d39cc33877153b910dd7f047f068ea4fea69..1dea743196221611d5ff1d15dcc5d4a1443f0112 100644 --- a/src/xfitter_cpp_base.cc +++ b/src/xfitter_cpp_base.cc @@ -21,3 +21,13 @@ int OrderMap(std::string ord) { void hf_errlog(int id,const std::string& message) { hf_errlog_(id,message.c_str(),message.size()); } +std::string stringFromFortran(char*s,size_t size){ + char*p=s+size; + while(p>s){ + if(*(--p)!=' '){ + ++p; + break; + } + } + return std::string(s,p-s); +} diff --git a/src/xfitter_pars.cc b/src/xfitter_pars.cc index bcbedaf72488004cc4a4732d4e77e413b0e6dc21..7533eac426dcd3157ef02fc40c49c14d85a853b2 100644 --- a/src/xfitter_pars.cc +++ b/src/xfitter_pars.cc @@ -254,37 +254,8 @@ void expandIncludes(YAML::Node&node,unsigned int recursionLimit=256){ } catch (const std::exception& e) {} } else { // Potentially this may go to minuit, if step is not zero. - if (value.IsMap()) { //This is probably not how it should work --Ivan - // Check if this is a minimisation block, true if step is present - if (value["step"] || value["value"]) { - // Defaults - double val = 0; - double step = 0; - double minv = 0; - double maxv = 0; - double priorVal = 0; - double priorUnc = 0; - int add = true; - - if (value["value"]) { - val = value["value"].as<double>(); - } - else { - string text = "F: missing value field for parameter " + p_name; - hf_errlog_(17032401,text.c_str(),text.size()); - } - if (value["step"]) step = value["step"].as<double>(); - if (value["prior"]) priorVal = value["prior"].as<double>(); - if (value["priorUnc"]) priorUnc = value["priorUnc"].as<double>(); - if (value["min"]) minv = value["min"].as<double>(); - if (value["max"]) maxv = value["max"].as<double>(); - // Goes to fortran - addexternalparam_(p_name.c_str(), val, step, minv, maxv, - priorVal, priorUnc, add, &dMap, p_name.size()); - } else { - // no step or value, store as it is as a yaml node: - yMap[p_name] = value; - } + if (value.IsMap()) { + yMap[p_name]=value; } else if (value.IsSequence() ) { size_t len = value.size(); vector<double> v(len); @@ -399,6 +370,10 @@ void expandIncludes(YAML::Node&node,unsigned int recursionLimit=256){ for(YAML::const_iterator it=parsNode.begin();it!=parsNode.end();++it){ string parameterName=it->first.as<string>(); stripString(parameterName); + if(XFITTER_PARS::gParameters.find(parameterName)!=XFITTER_PARS::gParameters.end()){ + cerr<<"[ERROR] Redefinition of parameter \""<<parameterName<<"\""<<endl; + hf_errlog(18112810,"F: Parameter redefinition, see stderr"); + } double value=nan(""); double step=nan(""); double min=nan(""); @@ -407,7 +382,7 @@ void expandIncludes(YAML::Node&node,unsigned int recursionLimit=256){ double pr_sigma=nan(""); YAML::Node pNode=it->second; switch(pNode.Type()){ - case YAML::NodeType::Scalar:{//Should be a special string DEPENDENT + case YAML::NodeType::Scalar:{ string definition=pNode.as<string>(); stripString(definition); if(definition=="DEPENDENT"||definition=="SUMRULE"){//This means that this parameter will be calculated using sum rules diff --git a/src/xfitter_steer.cc b/src/xfitter_steer.cc index b6f32d1b99a9f183124848b77a9b048b743447eb..590475facffe277a59d0f5b7ef515e1bf530797e 100644 --- a/src/xfitter_steer.cc +++ b/src/xfitter_steer.cc @@ -1,15 +1,18 @@ #include "xfitter_steer.h" #include "xfitter_pars.h" #include "xfitter_cpp_base.h" +#include"xfitter_cpp.h" #include "BaseEvolution.h" #include "BasePdfDecomposition.h" #include "BaseMinimizer.h" #include <dlfcn.h> #include <iostream> +#include<fstream> #include <yaml-cpp/yaml.h> #include <Profiler.h> using std::string; +using std::cerr; extern std::map<string,string> gReactionLibs; @@ -20,13 +23,16 @@ void*createDynamicObject(const string&classname,const string&instanceName){ try{ libpath=PREFIX+string("/lib/")+gReactionLibs.at(classname); }catch(const std::out_of_range&ex){ - std::cerr<<"[ERROR] out_of_range in function "<<__func__<<":\n"<<ex.what()<<"\n[/ERROR]\n"; std::ostringstream s; if(gReactionLibs.count(classname)==0){ - s<<"F: Unknown dynamically loaded class \""<<classname<<"\""; + cerr<<"[ERROR] Unknown dynamically loaded class \""<<classname<<"\"" + "\nMake sure that "<<PREFIX<<"/lib/Reactions.txt has an entry for this class" + "\n[/ERRROR]"<<endl; + s<<"F: Unknown dynamically loaded class \""<<classname<<"\", see stderr"; hf_errlog(18091901,s.str().c_str()); } - s<<"F: Unknown out_of_range exception in "<<__func__; + cerr<<"[ERROR] Unknown out_of_range in function "<<__func__<<":\n"<<ex.what()<<"\n[/ERROR]\n"; + s<<"F: Unknown out_of_range exception in "<<__func__<<", see stderr"; hf_errlog(18091902,s.str().c_str()); } void*shared_library=dlopen(libpath.c_str(),RTLD_NOW); @@ -83,7 +89,7 @@ namespace xfitter { const int errcode=18092401; const char*errmsg="F: YAML::InvalidNode exception while creating decomposition, details written to stderr"; using namespace std; - cerr<<"[ERROR]"<<__func__<<'('<<name<<')'<<endl; + cerr<<"[ERROR]"<<__func__<<"(\""<<name<<"\")"<<endl; YAML::Node node=XFITTER_PARS::getDecompositionNode(name); if(!node.IsMap()){ cerr<<"Invalid node Decompositions/"<<name<<"\nnode is not a map\n[/ERROR]"<<endl; @@ -97,7 +103,8 @@ namespace xfitter { hf_errlog(errcode,errmsg); } cerr<<"Unexpected YAML exception\nNode:\n"<<node<<"\n[/ERROR]"<<endl; - hf_errlog(errcode,errmsg); + throw ex; + //hf_errlog(errcode,errmsg); } } BasePdfParam*getParameterisation(const string&name){ @@ -179,9 +186,12 @@ extern "C" { void run_error_analysis_(); } +namespace xfitter{ + BaseEvolution*defaultEvolution=nullptr;//declared in xfitter_steer.h +} +//Make sure default evolution exists void init_evolution_() { - //TODO: reimplement for new interface with multiple evolutions - //auto evol = xfitter::get_evolution(); + xfitter::defaultEvolution=xfitter::get_evolution(); } void init_minimizer_() { @@ -200,9 +210,23 @@ void run_minimizer_() { } void report_convergence_status_(){ - //Get a status code from current minimizer and log a message + //Get a status code from current minimizer and log a message, write status to file Status.out using namespace xfitter; - switch(get_minimizer()->convergenceStatus()){ + auto status=get_minimizer()->convergenceStatus(); + //Write status to Status.out + { + std::ofstream f; + f.open(stringFromFortran(coutdirname_.outdirname,sizeof(coutdirname_.outdirname))+"/Status.out"); + if(!f.is_open()){ + hf_errlog(16042807,"W: Failed to open Status.out for writing"); + return; + } + if(status==ConvergenceStatus::SUCCESS)f<<"OK"; + else f<<"Failed"; + f.close(); + } + //Log status message + switch(status){ case ConvergenceStatus::NORUN: hf_errlog(16042801,"I: No minimization has run"); break; @@ -229,3 +253,14 @@ void run_error_analysis_() { mini->errorAnalysis(); } +namespace xfitter{ +void updateAtConfigurationChange(){ + //Call atConfigurationChange for each evolution and for each decomposition + for(map<string,BaseEvolution*>::const_iterator it=XFITTER_PARS::gEvolutions.begin();it!=XFITTER_PARS::gEvolutions.end();++it){ + it->second->atConfigurationChange(); + } + for(map<string,BasePdfDecomposition*>::const_iterator it=XFITTER_PARS::gPdfDecompositions.begin();it!=XFITTER_PARS::gPdfDecompositions.end();++it){ + it->second->atConfigurationChange(); + } +} +} diff --git a/steering.txt b/steering.txt index 580cc1d21ac3e92db6f860c0fb26dc3e25693190..dedab6d6ff13c48cf5b3639f3e92beab6258e3c9 100644 --- a/steering.txt +++ b/steering.txt @@ -117,11 +117,6 @@ !uPDF3 fit using precalculated grid of sigma_hat !uPDF4 fit calculating kernel on fly, grid of sigma_hat - - Order = 'NNLO' ! 'LO', 'NLO' or 'NNLO', used for DGLAP evolution. - - Q02 = 1.9 ! Evolution starting scale - ! --- Scheme for heavy flavors ! --- HF_SCHEME = 'ZMVFNS' : ZM-VFNS (massless) from QCDNUM, ! --- HF_SCHEME = 'ZMVFNS MELA' : ZM-VFNS (massless) from MELA (N-space), @@ -154,25 +149,6 @@ ! (Any of the FONLL schemes at LO is equivalent to the ZM-VFNS) HF_SCHEME = 'RT OPT' - ! PDF type. Possible types are currently available: - ! 'proton' -- default (fitting proton data) - ! 'lead' -- fitting ONLY lead data (can't be used in combination with proton data) - - PDFType = 'proton' - - ! PDF parameterisation style. Possible styles are currently available: - ! 'HERAPDF' -- HERAPDF-like with uval, dval, Ubar, Dbar, glu evolved pdfs - ! 'CTEQ' -- CTEQ-like parameterisation - ! 'CTEQHERA' -- Hybrid: valence like CTEQ, rest like HERAPDF - ! 'CHEB' -- CHEBYSHEV parameterisation based on glu,sea, uval,dval evolved pdfs - ! 'LHAPDFQ0' -- use lhapdf library to define pdfs at starting scale and evolve with local qcdnum parameters - ! 'LHAPDF' -- use lhapdf library to define pdfs at all scales - ! 'LHAPDFNATIVE'-- use lhapdf library to access pdfs and alphas - ! 'DDIS' -- use Diffractive DIS - ! 'BiLog' -- bi-lognormal parametrisation - - PDFStyle = 'HERAPDF' - ! XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ! ! Chi2 definition. Following options are supported: @@ -212,10 +188,6 @@ * Output steering cards * &Output - ! -- Error bands on parton distributions - DoBands = False ! asymmetric bands (J. Pumplin) - DoBandsSym = False ! symmetric bands ( HESSE ) - ! -- Q2 values at which the pdfs & errors are done (up to 20) Q2VAL = 1.9, 3.0, 4.0, 5., 10., 100., 6464, 8317 ! Q2VAL = 1.9, 4., 10., 100., 6464, 8317 @@ -382,8 +354,6 @@ * (Optional) LHAPDF sttering card * &lhapdf - LHAPDFSET = 'CT10nlo' ! LHAPDF grid file - ILHAPDFSET = 0 ! Set a PDF member of the PDF set (use together with LHAPDFPROFILE = False) ! LHAPDFVARSET = 'HERAPDF20_NLO_VAR' ! Add a PDF set with model and parametrisation uncertainties ! NPARVAR = 3 ! Number of parametrisation uncertainties in the LHAPDFVARSET set ! LHAPDFPROFILE = False ! run only on the set specified by ILHAPDFSET diff --git a/tools/AddEvolution.py b/tools/AddEvolution.py old mode 100644 new mode 100755 index 7e5366d55c6367213f7248bb7d1d50b7bb22c13d..d3aaad2333725dd71681dc650f48b479f55ff7eb --- a/tools/AddEvolution.py +++ b/tools/AddEvolution.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python ''' Script to generate templates for a new evolution module ''' @@ -7,169 +7,142 @@ import os import datetime if len(sys.argv)<2: - print ''' - Usage: AddEvolution.py NAME - ''' - exit(0) - -name = sys.argv[1] - -# First check if the name is already used - -with open("Reactions.txt","r+") as f: - for l in f: - a = l.split() - if a[0] == name: - print "Interface for evolution "+name+" already exists, exit" - exit(0) - -# Not present, add new line to the Evolutions.txt file - -with open("Reactions.txt","a") as f: - f.write(name+" "+"lib"+name.lower()+"_xfitter.so\n") - -# Create directory structures: - -print "Creating directories in evolutions/"+name - -os.system("mkdir -p evolutions/"+name+"/include") -os.system("mkdir -p evolutions/"+name+"/src") -os.system("mkdir -p evolutions/"+name+"/yaml") -os.system("touch evolutions/"+name+"/yaml/parameters.yaml") - - -print "Creating header file evolutions/"+name+"/include/Evolution"+name+".h" - -with open("evolutions/"+name+"/include/Evolution"+name+".h","w+") as f: - f.write( -''' + print "Usage: "+__file__+" NAME\n Expects working directory to be xFitter root directory" + exit(1) +if not os.path.isdir("evolutions"): + print "evolutions directory not found" + exit(2) +name=sys.argv[1] +prefix="evolutions/"+name +if os.path.isdir(prefix): + print prefix+" already exists" + exit(3) +classname=None +if name[-1].isupper(): + classname=name+"_Evol" +else: + classname=name+"Evol" + +print "Creating directories in "+prefix +os.makedirs(prefix+"/include") +os.makedirs(prefix+"/src") +os.makedirs(prefix+"/yaml") +formatDict={"name":name,"classname":classname,"date":datetime.date.today().isoformat(),"scriptname":__file__} + +filename=prefix+"/yaml/parameters.yaml" +print "Creating YAML file "+filename +with open(filename,"w") as f:f.write( +'''//Automatically generated by {scriptname} on {date} +class: {name} +#YOUR DEFAULT PARAMETERS HERE +'''.format(**formatDict)) + +filename=prefix+"/include/"+classname+".h" +print "Creating header file "+filename + +with open(filename,"w") as f: + f.write('''//Automatically generated by {scriptname} on {date} #pragma once +#include"BaseEvolution.h" -/** - @class' Evolution{NAME:s} - - @brief A wrapper class for {NAME:s} evolution +namespace xfitter{{ - @version 0.1 - @date {DATE:s} - */ +/** + @class {classname} -#include "BaseEvolution.h" + @brief A class for {name} evolution -namespace xfitter -{{ + ADD DESCRIPTION HERE -class Evolution{NAME:s} : BaseEvolution -{{ - public: - /// Empty constructor (needed for the dynamic loading) - Evolution{NAME:s}(): BaseEvolution("{NAME:s}",nullptr) {{}}; +*/ +class {classname}:public BaseEvolution{{ public: - /// Constructor setting the name - virtual std::string getEvolutionName() const {{ return "{NAME:s}" ;}}; - /// Global initialization - virtual void initAtStart() override final; - /// Init at each iteration - virtual void initAtIteration() override final; - - /// Return PDFs as a map <int,double> where int is PDF ID (-6, ... 6, 21) - virtual std::function<std::map<int,double>(double const& x, double const& Q)> xfxQMap() override final; - - /// Returns PDFs as a function of i, x, Q - virtual std::function<double(int const& i, double const& x, double const& Q)> xfxQDouble() override final; - - /// Returns PDFs as double pdfs* --> double[13] from -6 to 6. - virtual std::function<void(double const& x, double const& Q, double* pdfs)> xfxQArray() override final; - - /// Returns alphaS - virtual std::function<double(double const& Q)> AlphaQCD() override final; + {classname}(const char*name):BaseEvolution(name){{}} + virtual const char*getClassName()const override final{{return"{name}";}}; + virtual void atStart()override final; + virtual void atIteration()override final; + virtual void atConfigurationChange()override final; + virtual std::function<std::map<int,double>(double const&x,double const&Q)>xfxQMap()override final; + virtual std::function<void(double const&x,double const&Q,double*pdfs)>xfxQArray()override final; + virtual std::function<double(int const&i,double const&x,double const&Q)>xfxQDouble()override final; + virtual std::function<double(double const&Q)>AlphaQCD()override final; }}; - -}}; // namespace xfitter - -'''.format(NAME=name, DATE=datetime.date.today().isoformat()) +}} +'''.format(**formatDict) ) -print "Creating source file evolutions/"+name+"/src/Evolution"+name+".cc" -with open("evolutions/"+name+"/src/Evolution"+name+".cc","w+") as f: - f.write(''' -/* - @file Evolution{NAME:s}.cc - @date {DATE:s} - @author AddEvolution.py - Created by AddEvolution.py on {DATE:s} -*/ +filename=prefix+"/src/"+classname+".cc" +print "Creating source file "+filename +with open(filename,"w") as f: + f.write('''//Automatically generated by {scriptname} on {date} -#include "Evolution{NAME:s}.h" +#include"{classname}.h" +//These might be useful +//#include"xfitter_cpp_base.h" //for hf_errlog +#include"xfitter_pars.h" -namespace xfitter -{{ +namespace xfitter{{ +//for dynamic loading +extern"C" {classname}*create(const char*s){{return new {classname}(s);}} -// the class factories -extern "C" Evolution{NAME:s}* create() {{ - return new Evolution{NAME:s}(); +void {classname}::atStart(){{ + //YOUR CODE HERE }} - -/// Global initialization - void Evolution{NAME:s}::initAtStart() {{ - return ; - }}; - - /// Init at each iteration - void Evolution{NAME:s}::initAtIteration() {{ - return ; - }}; - - /// Return PDFs as a map <int,double> where int is PDF ID (-6, ... 6, 21) - std::function<std::map<int,double>(double const& x, double const& Q)> Evolution{NAME:s}::xfxQMap() {{ - }}; - - /// Returns PDFs as a function of i, x, Q - std::function<double(int const& i, double const& x, double const& Q)> Evolution{NAME:s}::xfxQDouble() {{ - }}; - - /// Returns PDFs as double pdfs* --> double[13] from -6 to 6. - std::function<void(double const& x, double const& Q, double* pdfs)> Evolution{NAME:s}::xfxQArray() {{ - }}; - - /// Returns alphaS - std::function<double(double const& Q)> Evolution{NAME:s}::AlphaQCD() {{ - }}; +void {classname}::atIteration(){{ + //YOUR CODE HERE }} - -'''.format(NAME=name,DATE=datetime.date.today().isoformat()) -) - - -print "Creating autoconf file evolutions/"+name+"/src/Makefile.am" -with open("evolutions/"+name+"/src/Makefile.am","w+") as f: - f.write(''' -# Created by AddEvolution.py on ''' + datetime.date.today().isoformat() + ''' - -AM_CXXFLAGS = -I$(srcdir)/../include -I$(srcdir)/../../BaseEvolution/include -I$(srcdir)/../../../include -I$(srcdir)/../../../interfaces/include -Wall -fPIC -Wno-deprecated -lib_LTLIBRARIES = lib'''+ name.lower() + '''_xfitter.la -lib'''+ name.lower()+'''_xfitter_la_SOURCES = Evolution'''+name+'''.cc - -# lib'''+ name.lower()+'''_xfitter_la_LDFLAGS = place_if_needed +void {classname}::atConfigurationChange(){{ + //YOUR CODE HERE +}} -datadir = ${prefix}/yaml/evolutions/'''+name+''' -data_DATA = ../yaml/parameters.yaml +std::function<std::map<int,double>(double const&x,double const&Q)>{classname}::xfxQMap(){{ + //YOUR CODE HERE +}} -dist_noinst_HEADERS = ../include ../yaml - ''') +std::function<void(double const&x,double const&Q,double*pdfs)>{classname}::xfxQArray(){{ + //YOUR CODE HERE +}} +std::function<double(int const&i,double const&x,double const&Q)>{classname}::xfxQDouble(){{ + //YOUR CODE HERE +}} -print "Update configure.ac file" -os.system("sed 's|xfitter-config|xfitter-config\\n evolutions/" +name +"/src/Makefile|' configure.ac >/tmp/configure.ac") -os.system("cp /tmp/configure.ac configure.ac") +std::function<double(double const&Q)>{classname}::AlphaQCD(){{ + //YOUR CODE HERE +}} -print "Update Makefile.am" -os.system("sed 's|tools/process|tools/process evolutions/" +name +"/src|' Makefile.am > /tmp/Makefile.am") -os.system("cp /tmp/Makefile.am Makefile.am") +}} +'''.format(**formatDict) +) -print "Update doxygen.cfg" -os.system("sed 's|reactions/APPLgrid/include|reactions/APPLgrid/include evolutions/" +name +"/src evolutions/" +name +"/include|' doxygen.cfg > /tmp/doxygen.cfg ") -os.system("cp /tmp/doxygen.cfg doxygen.cfg") +filename=prefix+"/src/Makefile.am" +print "Creating autoconf file "+filename +with open(filename,"w") as f: + f.write('''#Automatically generated by {scriptname} on {date} +AM_CXXFLAGS=-I$(srcdir)/../include -I$(srcdir)/../../BaseEvolution/include -I$(srcdir)/../../../include -I$(srcdir)/../../../interfaces/include -Wall -fPIC -Wno-deprecated + +lib_LTLIBRARIES=lib{classname}_xfitter.la +lib{classname}_xfitter_la_SOURCES={classname}.cc + +datadir=${{prefix}}/yaml/evolutions/{name} +data_DATA=../yaml/parameters.yaml + +dist_noinst_HEADERS=../include ../yaml +'''.format(**formatDict)) + +def insertLine(filename,after,line): + after=after.replace('/',r'\/') + if line[0]==' ':line='\\'+line + s="sed -i '/{}/a{}' {}".format(after,line,filename) + os.system(s) +print "Updating configure.ac" +insertLine("configure.ac","evolutions/BaseEvolution/src/Makefile"," {}/src/Makefile".format(prefix)) +print "Updating Makefile.am" +insertLine("Makefile.am","evolutions/BaseEvolution/src"," {}/src\\\\".format(prefix)) +print "Updating doxygen.cfg" +insertLine("doxygen.cfg","evolutions/QCDNUM/include"," {0}/src\\\\\\n {0}/include\\\\".format(prefix)) +print "Updating Reactions.txt" +insertLine("Reactions.txt","BaseEvolution",name+" lib"+classname+"_xfitter.so") diff --git a/tools/AddPdfDecomp.py b/tools/AddPdfDecomp.py old mode 100644 new mode 100755 index 516d12a2bd08ffe8679717f80c457a885e397538..6e0168543e54dc84c57377bbb5993d08de2411b1 --- a/tools/AddPdfDecomp.py +++ b/tools/AddPdfDecomp.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python ''' Script to generate templates for new PDF decomposition ''' @@ -7,176 +7,140 @@ import os import datetime if len(sys.argv)<2: - print ''' - Usage: AddPdfDecomp.py NAME - ''' - exit(0) - -name = sys.argv[1] - + print "Usage: "+__file__+" NAME\n Expects working directory to be xFitter root directory" + exit(1) +# Are we in the correct directory? +if not os.path.isdir("pdfdecompositions"): + print "pdfdecompositions directory not found" + exit(2) +name=sys.argv[1] +prefix="pdfdecompositions/"+name # First check if the name is already used - -with open("Reactions.txt","r+") as f: - for l in f: - a = l.split() - if a[0] == name: - print "Interface for reaction "+name+" already exists, exit" - exit(0) - -# Not present, add new line to the Reactions.txt file - -with open("Reactions.txt","a") as f: - f.write(name+" "+"lib"+name.lower()+"_xfitter.so\n") - - -print "Creating directories in pdfdecompositions/"+name+"PdfDecomposition" - -os.system("mkdir -p pdfdecompositions/"+name+"PdfDecomposition/include") -os.system("mkdir -p pdfdecompositions/"+name+"PdfDecomposition/src") -os.system("mkdir -p pdfdecompositions/"+name+"PdfDecomposition/yaml") -os.system("touch pdfdecompositions/"+name+"PdfDecomposition/yaml/parameters.yaml") - -hFile = "pdfdecompositions/{:s}PdfDecomposition/include/{:s}PdfDecomposition.h".format(name,name) - -print "Creating header file "+hFile - - -with open(hFile,"w+") as f: - f.write( - ''' +if os.path.isdir(prefix): + print prefix+" already exists" + exit(3) +classname=None +if name[-1].isupper(): + classname=name+"_PdfDecomp" +else: + classname=name+"PdfDecomp" + +print "Creating directories in "+prefix +os.makedirs(prefix+"/include") +os.makedirs(prefix+"/src") +#here this script used to create an empty prefix+"/yaml/parameters.yaml", but I do not see why --Ivan + +formatDict={"name":name,"classname":classname,"date":datetime.date.today().isoformat(),"scriptname":__file__} + +hFile=prefix+"/include/"+classname+".h" +print "Creating header file "+hFile + +with open(hFile,"w") as f: + f.write('''//Automatically generated by {scriptname} on {date} #pragma once +#include"BasePdfDecomposition.h" +//you probably want to include this +//#include"BasePdfParam.h" -#include "BasePdfDecomposition.h" - +namespace xfitter{{ /** - @class {:s}PdfDecomposition + @class {classname} - @brief A class for {:s} pdf decomposition + @brief A class for {name} pdf decomposition - @version 0.1 - @date {:s} - */ + ADD DESCRIPTION HERE -namespace xfitter {{ - -class {:s}PdfDecomposition : public BasePdfDecomposition -{{ +*/ +class {classname}:public BasePdfDecomposition{{ public: - /// Default constructor. - {:s}PdfDecomposition (); - - /// Default constructor. Name is the PDF name - {:s}PdfDecomposition (const std::string& inName); - - /// Optional initialization at the first call - virtual void initAtStart(const std::string & pars) override final; - - /// Compute PDF in a physical base in LHAPDF format for given x and Q - virtual std::function<std::map<int,double>(const double& x)> f0() const override final; - + {classname}(const char*name):BasePdfDecomposition(name){{}}; + //virtual ~BasePdfDecomposition() + virtual const char*getClassName()const override final{{return"{name}";}}; + virtual std::function<std::map<int,double>(const double&x)>f0()const override final; + virtual void atStart(); //use this to get parameterisations + virtual void atIteration(); //use this to enforce sum rules + //virtual void atConfigurationChange(); }}; }} -'''.format( name, name, datetime.date.today().isoformat(),name,name,name) +'''.format(**formatDict) ) - -sFile = "pdfdecompositions/{:s}PdfDecomposition/src/{:s}PdfDecomposition.cc".format(name,name) - +sFile=prefix+"/src/"+classname+".cc" print "Creating source file "+sFile -with open(sFile,"w+") as f: - f.write(''' -/* - @file {:s}PdfDecomposition.cc - @date {:s} - @author AddPdfDecomposition.py - Created by AddPdfDecomposition.py on {:s} -*/ - -#include "{:s}PdfDecomposition.h" - -namespace xfitter {{ - -/// the class factories, for dynamic loading -extern "C" {:s}PdfDecomposition* create() {{ - return new {:s}PdfDecomposition(); -}} - - -// Constructor - {:s}PdfDecomposition::{:s}PdfDecomposition() : BasePdfDecomposition("{:s}") {{ -}} - -// Constructor -{:s}PdfDecomposition::{:s}PdfDecomposition(const std::string& inName) : BasePdfDecomposition(inName) {{ +with open(sFile,"w") as f: + f.write('''//Automatically generated by {scriptname} on {date} +#include"{classname}.h" +//These might be useful +#include "xfitter_pars.h" +//#include"xfitter_cpp_base.h" //for hf_errlog +//#include<cmath> +//#include<iostream> +namespace xfitter{{ +//for dynamic loading +extern"C" {classname}*create(const char*name){{return new {classname}(name);}} + +void {classname}::atStart(){{ + //YOUR CODE HERE; THE FOLLOWING IS A SUGGESTION + const YAML::Node node=XFITTER_PARS::getDecompositionNode(_name); + //???=getParameterisation(node["???"].as<string>()); + //... }} -// Init at start: -void {:s}PdfDecomposition::initAtStart(const std::string & pars) {{ - return; +void {classname}::atIteration(){{ + //YOUR CODE HERE + //Enforce sum rules }} -// Returns a LHAPDF-style function, that returns PDFs in a physical basis for given x -std::function<std::map<int,double>(const double& x)> {:s}PdfDecomposition::f0() const -{{ - const auto f_ = [=](double const& x)->std::map<int, double> {{ - std::map<int, double> res_ = {{ - {{-6,0}}, - {{-5,0}}, - {{-4,0}}, - {{-3,0}}, - {{-2,0}}, - {{-1,0}}, - {{ 1,0}}, - {{ 2,0}}, - {{ 3,0}}, - {{ 4,0}}, - {{ 5,0}}, - {{ 6,0}}, - {{22,0}} - }}; - return res_; +std::function<std::map<int,double>(const double&x)>{classname}::f0()const{{ + return[=](double const&x)->std::map<int,double>{{ + //YOUR CODE HERE + //retrieve and rotate pdf here + return std::map<int, double>{{ + //change zeros to something meaningful + {{-6,0}}, + {{-5,0}}, + {{-4,0}}, + {{-3,0}},//sbar + {{-2,0}},//dbar + {{-1,0}},//ubar + {{ 1,0}},//d + {{ 2,0}},//u + {{ 3,0}},//s + {{ 4,0}}, + {{ 5,0}}, + {{ 6,0}}, + {{21,0}}//gluon + }}; }}; - return f_; }} }} -'''.format(name,datetime.date.today().isoformat(),datetime.date.today().isoformat() - ,name,name,name,name,name,name,name,name,name,name) +'''.format(**formatDict) ) - -aFile = "pdfdecompositions/{:s}PdfDecomposition/src/Makefile.am".format(name) - -print "Creating autoconf file " + aFile - - -with open(aFile,"w+") as f: - f.write(''' -# Created by AddPdfDecomposition.py on {:s} - -AM_CXXFLAGS = -I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../../pdfparams/BasePdfParam/include/ -I$(srcdir)/../../BasePdfDecomposition/include -Wall -fPIC -Wno-deprecated - -lib_LTLIBRARIES = lib{:s}PdfDecomposition_xfitter.la -lib{:s}PdfDecomposition_xfitter_la_SOURCES = {:s}PdfDecomposition.cc - -datadir = ${{prefix}}/yaml/pdfdecompositions/{:s} -data_DATA = ../yaml/parameters.yaml - -dist_noinst_HEADERS = ../include ../yaml -'''.format(datetime.date.today().isoformat(),name,name,name,name)) - - - -print "Update configure.ac file" -os.system("sed 's|xfitter-config|xfitter-config\\n pdfdecompositions/{:s}PdfDecomposition/src/Makefile|' configure.ac >/tmp/configure.ac".format(name)) -os.system("cp /tmp/configure.ac configure.ac") - -print "Update Makefile.am" -os.system("sed 's|pdfdecompositions/BasePdfDecomposition/src|pdfdecompositions/BasePdfDecomposition/src pdfdecompositions/{:s}PdfDecomposition/src|' Makefile.am > /tmp/Makefile.am".format(name)) -os.system("cp /tmp/Makefile.am Makefile.am") - -print "Update doxygen.cfg" -os.system("sed 's|pdfdecompositions/BasePdfDecomposition/include|pdfdecompositions/BasePdfDecomposition/include pdfdecompositions/{:s}PdfDecomposition/include|' doxygen.cfg > /tmp/doxygen.cfg".format(name)) -os.system("cp /tmp/doxygen.cfg doxygen.cfg") - +aFile=prefix+"/src/Makefile.am" +print "Creating automake file "+aFile + +with open(aFile,"w") as f: + f.write('''#Automatically generated by {scriptname} on {date} +AM_CXXFLAGS=-I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../../pdfparams/BasePdfParam/include/ -I$(srcdir)/../../BasePdfDecomposition/include -Wall -fPIC -Wno-deprecated + +lib_LTLIBRARIES=lib{classname}_xfitter.la +lib{classname}_xfitter_la_SOURCES={classname}.cc +dist_noinst_HEADERS=../include +'''.format(**formatDict)) + +def insertLine(filename,after,line): + after=after.replace('/',r'\/') + if line[0]==' ':line='\\'+line + s="sed -i '/{}/a{}' {}".format(after,line,filename) + os.system(s) +print "Updating configure.ac" +insertLine("configure.ac","pdfdecompositions/BasePdfDecomposition/src/Makefile"," {}/src/Makefile".format(prefix)) +print "Updating Makefile.am" +insertLine("Makefile.am","pdfdecompositions/BasePdfDecomposition/src"," {}/src\\\\".format(prefix)) +print "Updating doxygen.cfg" +insertLine("doxygen.cfg","pdfdecompositions/BasePdfDecomposition/include"," {}/include\\\\".format(prefix)) +print "Updating Reactions.txt" +insertLine("Reactions.txt","UvDvubardbars",name+" lib"+classname+"_xfitter.so") diff --git a/tools/AddPdfParam.py b/tools/AddPdfParam.py old mode 100644 new mode 100755 index a6f4239b34811d7212b53e79172c388fd056ce39..4cd5e52336d423d4136c462374f9b3fc798d48e4 --- a/tools/AddPdfParam.py +++ b/tools/AddPdfParam.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python ''' Script to generate templates for new PDF parameterisation ''' @@ -7,111 +7,120 @@ import os import datetime if len(sys.argv)<2: - print ''' - Usage: AddPdfParam.py NAME - ''' - exit(0) - -name = sys.argv[1] - + print "Usage: "+__file__+" NAME\n Expects working directory to be xFitter root directory" + exit(1) +# Are we in the correct directory? +if not os.path.isdir("pdfparams"): + print "pdfparams directory not found" + exit(2) # First check if the name is already used - -print "Creating directories in pdfparams/"+name - -os.system("mkdir -p pdfparams/"+name+"PdfParam/include") -os.system("mkdir -p pdfparams/"+name+"PdfParam/src") -os.system("mkdir -p pdfparams/"+name+"PdfParam/yaml") -os.system("touch pdfparams/"+name+"PdfParam/yaml/parameters.yaml") - -hFile = "pdfparams/{:s}PdfParam/include/{:s}PdfParam.h".format(name,name) - -print "Creating header file "+hFile - -with open(hFile,"w+") as f: - f.write( - ''' +name=sys.argv[1] +prefix="pdfparams/"+name +if os.path.isdir(prefix): + print prefix+" already exists" + exit(3) +classname=None +if name[-1].isupper(): + classname=name+"_PdfParam" +else: + classname=name+"PdfParam" + +print "Creating directories in "+prefix +os.makedirs(prefix+"/include") +os.makedirs(prefix+"/src") +#here this script used to create an empty prefix+"/yaml/parameters.yaml", but I do not see why --Ivan + +formatDict={"name":name,"classname":classname,"date":datetime.date.today().isoformat(),"scriptname":__file__} + +hFile=prefix+"/include/"+classname+".h" +print "Creating header file "+hFile + +with open(hFile,"w") as f: + f.write('''//Automatically generated by {scriptname} on {date} #pragma once - -#include "BasePdfParam.h" +#include"BasePdfParam.h" /** - @class {name:s}PdfParam + @class {classname} - @brief A class for {name:s} pdf parameterisation + @brief A class for {name} pdf parameterisation - @version 0.1 - @date {date:s} - */ + ADD DESCRIPTION HERE -class {name:s}PdfParam:public BasePdfParam{{ +*/ + +namespace xfitter{{ +class {classname}:public BasePdfParam{{ public: - {name:s}PdfParam(const std::string&inName):BasePdfParam(inName){{}} - //Evaluate xf(x) at given x with current parameters + {classname}(const std::string&name):BasePdfParam(name){{}} virtual double operator()(double x)const override final; - // (Optional) compute moments: // virtual double moment(int nMoment=-1)const override final; - // (Optional) set moments: // virtual void setMoment(int nMoment,double value)override final; - // (Optional) - //Initialize from a yaml node. Uses node[getName] as the basis - // virtual void initFromYaml(YAML::Node value)override final; + // virtual void atStart()override final; }}; -'''.format(name=name,date=datetime.date.today().isoformat()) +}} +'''.format(**formatDict) ) - -sFile = "pdfparams/{:s}PdfParam/src/{:s}PdfParam.cc".format(name,name) - +sFile=prefix+"/src/"+classname+".cc" print "Creating source file "+sFile -with open(sFile,"w+") as f: - f.write(''' -/* - @file {name:s}PdfParam.cc - @date {date:s} - @author AddPdfParam.py - Created by AddPdfParam.py on {date:s} -*/ +with open(sFile,"w") as f: + f.write('''//Automatically generated by {scriptname} on {date} + +#include"{classname}.h" +//These might be useful +//#include"xfitter_cpp_base.h" //for hf_errlog +//#include<cmath> +//#include<iostream> -#include "{name:s}PdfParam.h" +namespace xfitter{{ +//for dynamic loading +extern"C" {classname}*create(const char*s){{return new {classname}(s);}} -double {name:s}PdfParam::operator()(double x){{ +double {classname}::operator()(double x)const{{ //Your code here }} -'''.format(name=name,date=datetime.date.today().isoformat()) -) - -aFile = "pdfparams/{:s}PdfParam/src/Makefile.am".format(name) - -print "Creating autoconf file " + aFile - - -with open(aFile,"w+") as f: - f.write(''' -# Created by AddPdfParam.py on {:s} - -AM_CXXFLAGS = -I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../BasePdfParam/include -Wall -fPIC -Wno-deprecated - -lib_LTLIBRARIES = lib{:s}PdfParam_xfitter.la -lib{:s}PdfParam_xfitter_la_SOURCES = {:s}PdfParam.cc -datadir = ${{prefix}}/yaml/pdfparams/{:s} -data_DATA = ../yaml/parameters.yaml - -dist_noinst_HEADERS = ../include ../yaml -'''.format(datetime.date.today().isoformat(),name,name,name,name)) - - -print "Update configure.ac file" -os.system("sed 's|xfitter-config|xfitter-config\\n pdfparams/{:s}PdfParam/src/Makefile|' configure.ac >/tmp/configure.ac".format(name)) -os.system("cp /tmp/configure.ac configure.ac") - -print "Update Makefile.am" -os.system("sed 's|pdfdecompositions/BasePdfDecomposition/src|pdfdecompositions/BasePdfDecomposition/src pdfparams/{:s}PdfParam/src|' Makefile.am > /tmp/Makefile.am".format(name)) -os.system("cp /tmp/Makefile.am Makefile.am") - -print "Update doxygen.cfg" -os.system("sed 's|pdfparams/BasePdfParam/include|pdfparams/BasePdfParam/include pdfparams/{:s}PdfParam/include|' doxygen.cfg > /tmp/doxygen.cfg".format(name)) -os.system("cp /tmp/doxygen.cfg doxygen.cfg") +//OPTIONAL + +//void {classname}::atStart(){{ +// Your code here +// Check that number of parameters is sane +//}} +//double {classname}::moment(int n)const{{ +// Your code here +//}} +//void {classname}::setMoment(int n,double val){{ +// Your code here +//}} +}} +'''.format(**formatDict) +) -exit(0) +aFile=prefix+"/src/Makefile.am" +print "Creating automake file "+aFile + +with open(aFile,"w") as f: + f.write('''#Automatically generated by {scriptname} on {date} +AM_CXXFLAGS=-I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../BasePdfParam/include -Wall -fPIC -Wno-deprecated + +lib_LTLIBRARIES=lib{classname}_xfitter.la +lib{classname}_xfitter_la_SOURCES={classname}.cc +dist_noinst_HEADERS=../include +lib{classname}_xfitter_la_LDFLAGS=-lBasePdfParam_xfitter -L$(libdir) +'''.format(**formatDict)) + +def insertLine(filename,after,line): + after=after.replace('/',r'\/') + if line[0]==' ':line='\\'+line + s="sed -i '/{}/a{}' {}".format(after,line,filename) + os.system(s) +print "Updating configure.ac" +insertLine("configure.ac","pdfparams/BasePdfParam/src/Makefile"," {}/src/Makefile".format(prefix)) +print "Updating Makefile.am" +insertLine("Makefile.am","pdfparams/BasePdfParam/src"," {}/src\\\\".format(prefix)) +print "Updating doxygen.cfg" +insertLine("doxygen.cfg","pdfparams/BasePdfParam/include"," {}/include\\\\".format(prefix)) +print "Updating Reactions.txt" +insertLine("Reactions.txt","HERAPDF",name+" lib"+classname+"_xfitter.so") diff --git a/tools/draw/src/CommandParser.cc b/tools/draw/src/CommandParser.cc index 1380ea838b4ce94b4ae844fc1f97fa08153a0bf0..cf402c6c8cea3b04fcd3db695a61378523203710 100644 --- a/tools/draw/src/CommandParser.cc +++ b/tools/draw/src/CommandParser.cc @@ -121,344 +121,349 @@ CommandParser::CommandParser(int argc, char **argv): for (vector<string>::iterator it = allargs.begin() + 1; it != allargs.end(); it++) if ((*it).find("--") == 0) { - if (*it == "--help") - { - help(); - exit(0); - } - else if (*it == "--thicklines") - lwidth = 3; - else if (*it == "--largetext") - { - txtsize = 0.05; - lmarg = 0.18; - bmarg = 0.13; - offset = 1.6; - } - else if (*it == "--bw") - bw = true; - else if (*it == "--lowres") - { - resolution = 400; - // pagewidth = 10; - } - else if (*it == "--highres") - { - resolution = 2400; - // pagewidth = 60; - } - else if (*it == "--no-version") - version = false; - else if (*it == "--no-logo") - drawlogo = false; - else if (*it == "--no-data") - nodata = true; - else if (*it == "--no-pdfs") - nopdfs = true; - else if (*it == "--no-shifts") - noshifts = true; - else if (*it == "--no-tables") - notables = true; - else if (*it == "--chi2-nopdf-uncertainties") - chi2nopdf = true; - else if (*it == "--partial-log-penalty") - logpenalty = true; - else if (*it == "--helvet-fonts") - font = "helvet"; - else if (*it == "--cmbright-fonts") - font = "modernbright"; - else if (*it == "--shifts-per-page") - { - adjshift = false; - spp = atoi((*(it+1)).c_str()); - spp = max(1, spp); - spp = min(40, spp); - allargs.erase(it+1); - } - else if (*it == "--shifts-heigth") - { - adjshift = false; - shgth = atoi((*(it+1)).c_str()); - shgth = max(20, shgth); - shgth = min(200, shgth); - allargs.erase(it+1); - } - else if (*it == "--cms") - { - cms = true; - drawlogo = false; - } - else if (*it == "--cms-preliminary") - { - cmspreliminary = true; - drawlogo = false; - } - else if (*it == "--atlas") - { - atlas = true; - drawlogo = false; - } - else if (*it == "--atlas-internal") - { - atlasinternal = true; - drawlogo = false; - } - else if (*it == "--atlas-preliminary") - { - atlaspreliminary = true; - drawlogo = false; - } - else if (*it == "--cdfii-preliminary") - { - cdfiipreliminary = true; - drawlogo = false; - } - else if (*it == "--hidden") - { - cout << endl; - cout << "Hidden options" << endl; - cout << "Please use this options only if you are authorised from your collaboration to do so" << endl; - cout << "--cms" << endl; - cout << "--cms-preliminary" << endl; - cout << "--atlas" << endl; - cout << "--atlas-internal" << endl; - cout << "--atlas-preliminary" << endl; - cout << "--cdfii-preliminary" << endl; - cout << "--no-logo" << endl; - cout << endl; - exit(-1); - } - else if (*it == "--bands") - dobands = true; - else if (*it == "--scale68") - scale68 = true; - else if (*it == "--profile") { - dobands = true; - profile = true; - } - else if (*it == "--reweight-BAY") { - dobands = true; - reweight = true; - BAYweight = true; - } - else if (*it == "--reweight-GK") { - dobands = true; - reweight = true; - GKweight = true; - } - else if (*it == "--asym") - { - dobands = true; - asym = true; - } - else if (*it == "--median") - median = true; - else if (*it == "--68cl") - { - if (cl90 == true) - { - cout << "Options --68cl and --90cl are mutually exclusive, cannot use both" << endl; - exit(1); - } - cl68 = true; - median = true; - } - else if (*it == "--90cl") - { - if (cl68 == true) - { - cout << "Options --68cl and --90cl are mutually exclusive, cannot use both" << endl; - exit(1); - } - cl90 = true; - median = true; - } - else if (*it == "--absolute-errors") - { - dobands = true; - abserror = true; - } - else if (*it == "--relative-errors") - { - dobands = true; - relerror = true; - } - else if (*it == "--no-logx") - logx = false; - else if (*it == "--q2all") - q2all = true; - else if (*it == "--plots-per-page") - { - plotsperpage = atoi((*(it+1)).c_str()); - allargs.erase(it+1); - } - else if (*it == "--loose-mc-replica-selection") - looseRepSelection = true; - else if (*it == "--outdir") - { - outdir = *(it+1); - allargs.erase(it+1); - } - else if (*it == "--eps") - format = "eps"; - else if (*it == "--root") - root = true; - else if (*it == "--splitplots-eps") - { - splitplots = true; - ext = "eps"; - } - else if (*it == "--splitplots-pdf") - { - splitplots = true; - ext = "pdf"; - } - else if (*it == "--splitplots-png") - { - splitplots = true; - ext = "png"; - } - else if (*it == "--filledbands") - filledbands = true; - else if (*it == "--ratiorange") - { - rmin = atof((*(it+1)).substr(0, (*(it+1)).find(":")).c_str()); - rmax = atof((*(it+1)).substr((*(it+1)).find(":") + 1, (*(it+1)).size() - (*(it+1)).find(":") - 1).c_str()); - allargs.erase(it+1); - } - else if (*it == "--xrange") - { - xmin = max(0.000000000000001, atof((*(it+1)).substr(0, (*(it+1)).find(":")).c_str())); - xmax = min(1., atof((*(it+1)).substr((*(it+1)).find(":") + 1, (*(it+1)).size() - (*(it+1)).find(":") - 1).c_str())); - allargs.erase(it+1); - } - else if (*it == "--colorpattern") - { - int pattern = atoi((*(it+1)).c_str()); - if (pattern == 1) - { - col[0] = kBlue + 2; - col[1] = kOrange; - col[2] = kGreen - 3; - col[3] = kRed + 1; - col[5] = kOrange + 7; - col[5] = kCyan + 1; - } - else if (pattern == 2) - { - col[0] = kBlue + 2; - col[1] = kRed + 1; - col[2] = kYellow - 7; - col[3] = kOrange + 7; - col[4] = kMagenta + 1; - col[5] = kCyan + 1; - } - else if (pattern == 3) - { - col[0] = kBlue + 2; - col[1] = kMagenta + 1; - col[2] = kCyan + 1; - col[3] = kRed + 1; - col[4] = kGreen + 2; - col[5] = kYellow + 1; - } - else if (pattern == 4) - { - col[0] = kBlue + 1; - col[1] = kAzure - 9; - col[2] = kAzure + 3; - col[3] = kAzure + 4; - col[4] = kAzure + 5; - col[5] = kAzure + 6; - } - else if (pattern == 5) - { - col[0] = kOrange + 7; - col[1] = kYellow; - col[2] = kRed - 1; - col[3] = kOrange + 4; - col[4] = kOrange + 2; - col[5] = kOrange -1; - } - else if (pattern == 6) - { - col[0] = kGreen + 2; - col[1] = kSpring - 9; - col[2] = kGreen + 1; - col[3] = kSpring + 4; - col[4] = kSpring + 2; - col[5] = kSpring + 7; - } - else if (pattern == 7) - { - col[0] = kRed - 2; - col[1] = kAzure - 9; - col[2] = kSpring - 9; - col[3] = kOrange + 7; - col[4] = kSpring + 2; - col[5] = kSpring + 7; - } - else if (pattern == 8) - { - col[0] = kRed - 2; - col[1] = kBlue + 1; - col[2] = kGreen + 1; - col[3] = kOrange + 7; - col[4] = kSpring + 2; - col[5] = kSpring + 7; - } + if (*it == "--help") + { + help(); + exit(0); + } + else if (*it == "--thicklines") + lwidth = 3; + else if (*it == "--largetext") + { + txtsize = 0.05; + lmarg = 0.18; + bmarg = 0.13; + offset = 1.6; + } + else if (*it == "--bw") + bw = true; + else if (*it == "--lowres") + { + resolution = 400; + // pagewidth = 10; + } + else if (*it == "--highres") + { + resolution = 2400; + // pagewidth = 60; + } + else if (*it == "--no-version") + version = false; + else if (*it == "--no-logo") + drawlogo = false; + else if (*it == "--no-data") + nodata = true; + else if (*it == "--no-pdfs") + nopdfs = true; + else if (*it == "--no-shifts") + noshifts = true; + else if (*it == "--no-tables") + notables = true; + else if (*it == "--chi2-nopdf-uncertainties") + chi2nopdf = true; + else if (*it == "--partial-log-penalty") + logpenalty = true; + else if (*it == "--helvet-fonts") + font = "helvet"; + else if (*it == "--cmbright-fonts") + font = "modernbright"; + else if (*it == "--shifts-per-page") + { + adjshift = false; + spp = atoi((*(it+1)).c_str()); + spp = max(1, spp); + spp = min(40, spp); + allargs.erase(it+1); + } + else if (*it == "--shifts-heigth") + { + adjshift = false; + shgth = atoi((*(it+1)).c_str()); + shgth = max(20, shgth); + shgth = min(200, shgth); + allargs.erase(it+1); + } + else if (*it == "--cms") + { + cms = true; + drawlogo = false; + } + else if (*it == "--cms-preliminary") + { + cmspreliminary = true; + drawlogo = false; + } + else if (*it == "--atlas") + { + atlas = true; + drawlogo = false; + } + else if (*it == "--atlas-internal") + { + atlasinternal = true; + drawlogo = false; + } + else if (*it == "--atlas-preliminary") + { + atlaspreliminary = true; + drawlogo = false; + } + else if (*it == "--cdfii-preliminary") + { + cdfiipreliminary = true; + drawlogo = false; + } + else if (*it == "--hidden") + { + cout << endl; + cout << "Hidden options" << endl; + cout << "Please use this options only if you are authorised from your collaboration to do so" << endl; + cout << "--cms" << endl; + cout << "--cms-preliminary" << endl; + cout << "--atlas" << endl; + cout << "--atlas-internal" << endl; + cout << "--atlas-preliminary" << endl; + cout << "--cdfii-preliminary" << endl; + cout << "--no-logo" << endl; + cout << endl; + exit(-1); + } + else if (*it == "--bands") + dobands = true; + else if (*it == "--scale68") + scale68 = true; + else if (*it == "--profile") { + dobands = true; + profile = true; + } + else if (*it == "--reweight-BAY") { + dobands = true; + reweight = true; + BAYweight = true; + } + else if (*it == "--reweight-GK") { + dobands = true; + reweight = true; + GKweight = true; + } + else if (*it == "--asym") + { + dobands = true; + asym = true; + } + else if (*it == "--median") + median = true; + else if (*it == "--68cl") + { + if (cl90 == true) + { + cout << "Options --68cl and --90cl are mutually exclusive, cannot use both" << endl; + exit(1); + } + cl68 = true; + median = true; + } + else if (*it == "--90cl") + { + if (cl68 == true) + { + cout << "Options --68cl and --90cl are mutually exclusive, cannot use both" << endl; + exit(1); + } + cl90 = true; + median = true; + } + else if (*it == "--absolute-errors") + { + dobands = true; + abserror = true; + } + else if (*it == "--relative-errors") + { + dobands = true; + relerror = true; + } + else if (*it == "--no-logx") + logx = false; + else if (*it == "--q2all") + q2all = true; + else if (*it == "--plots-per-page") + { + plotsperpage = atoi((*(it+1)).c_str()); + allargs.erase(it+1); + } + else if (*it == "--loose-mc-replica-selection") + looseRepSelection = true; + else if (*it == "--outdir") + { + outdir = *(it+1); + allargs.erase(it+1); + } + else if (*it == "--eps") + format = "eps"; + else if (*it == "--root") + root = true; + else if (*it == "--splitplots-eps") + { + splitplots = true; + ext = "eps"; + } + else if (*it == "--splitplots-pdf") + { + splitplots = true; + ext = "pdf"; + } + else if (*it == "--splitplots-png") + { + splitplots = true; + ext = "png"; + } + else if (*it == "--filledbands") + filledbands = true; + else if (*it == "--ratiorange") + { + rmin = atof((*(it+1)).substr(0, (*(it+1)).find(":")).c_str()); + rmax = atof((*(it+1)).substr((*(it+1)).find(":") + 1, (*(it+1)).size() - (*(it+1)).find(":") - 1).c_str()); + allargs.erase(it+1); + } + else if (*it == "--xrange") + { + string&s=*(it+1); + size_t p=s.find(':'); + s[p]=0; + xmin=atof(s.c_str()); + xmin=max(1e-15,xmin); + xmax=atof(s.c_str()+p+1); + xmax=min(1.,xmax); + allargs.erase(it+1); + } + else if (*it == "--colorpattern") + { + int pattern = atoi((*(it+1)).c_str()); + if (pattern == 1) + { + col[0] = kBlue + 2; + col[1] = kOrange; + col[2] = kGreen - 3; + col[3] = kRed + 1; + col[5] = kOrange + 7; + col[5] = kCyan + 1; + } + else if (pattern == 2) + { + col[0] = kBlue + 2; + col[1] = kRed + 1; + col[2] = kYellow - 7; + col[3] = kOrange + 7; + col[4] = kMagenta + 1; + col[5] = kCyan + 1; + } + else if (pattern == 3) + { + col[0] = kBlue + 2; + col[1] = kMagenta + 1; + col[2] = kCyan + 1; + col[3] = kRed + 1; + col[4] = kGreen + 2; + col[5] = kYellow + 1; + } + else if (pattern == 4) + { + col[0] = kBlue + 1; + col[1] = kAzure - 9; + col[2] = kAzure + 3; + col[3] = kAzure + 4; + col[4] = kAzure + 5; + col[5] = kAzure + 6; + } + else if (pattern == 5) + { + col[0] = kOrange + 7; + col[1] = kYellow; + col[2] = kRed - 1; + col[3] = kOrange + 4; + col[4] = kOrange + 2; + col[5] = kOrange -1; + } + else if (pattern == 6) + { + col[0] = kGreen + 2; + col[1] = kSpring - 9; + col[2] = kGreen + 1; + col[3] = kSpring + 4; + col[4] = kSpring + 2; + col[5] = kSpring + 7; + } + else if (pattern == 7) + { + col[0] = kRed - 2; + col[1] = kAzure - 9; + col[2] = kSpring - 9; + col[3] = kOrange + 7; + col[4] = kSpring + 2; + col[5] = kSpring + 7; + } + else if (pattern == 8) + { + col[0] = kRed - 2; + col[1] = kBlue + 1; + col[2] = kGreen + 1; + col[3] = kOrange + 7; + col[4] = kSpring + 2; + col[5] = kSpring + 7; + } - allargs.erase(it+1); - } - else if (*it == "--therr") - therr = true; - else if (*it == "--noupband") - noupband = true; - else if (*it == "--greenband") - errbandcol = kGreen - 3; - else if (*it == "--blueband") - errbandcol = kAzure - 9; - else if (*it == "--points") - points = true; - else if (*it == "--theory") - { - theorylabel = *(it+1); - allargs.erase(it+1); - } - else if (*it == "--only-theory") - { - onlytheory = true; - ratiototheory = true; - } - else if (*it == "--theory-rel-errors") - { - onlytheory = true; - ratiototheory = true; - threlerr = true; - } - else if (*it == "--ratio-to-theory") - ratiototheory = true; - else if (*it == "--diff") - diff = true; - else if (*it == "--2panels") - twopanels = true; - else if (*it == "--3panels") - threepanels = true; - else if (*it == "--multitheory") - multitheory = true; - else if (*it == "--nothshifts") - nothshifts = true; - else - { - cout << endl; - cout << "Invalid option " << *it << endl; - cout << allargs[0] << " --help for help " << endl; - cout << endl; - exit(-1); - } - allargs.erase(it); - it = allargs.begin(); + allargs.erase(it+1); + } + else if (*it == "--therr") + therr = true; + else if (*it == "--noupband") + noupband = true; + else if (*it == "--greenband") + errbandcol = kGreen - 3; + else if (*it == "--blueband") + errbandcol = kAzure - 9; + else if (*it == "--points") + points = true; + else if (*it == "--theory") + { + theorylabel = *(it+1); + allargs.erase(it+1); + } + else if (*it == "--only-theory") + { + onlytheory = true; + ratiototheory = true; + } + else if (*it == "--theory-rel-errors") + { + onlytheory = true; + ratiototheory = true; + threlerr = true; + } + else if (*it == "--ratio-to-theory") + ratiototheory = true; + else if (*it == "--diff") + diff = true; + else if (*it == "--2panels") + twopanels = true; + else if (*it == "--3panels") + threepanels = true; + else if (*it == "--multitheory") + multitheory = true; + else if (*it == "--nothshifts") + nothshifts = true; + else + { + cout << endl; + cout << "Invalid option " << *it << endl; + cout << allargs[0] << " --help for help " << endl; + cout << endl; + exit(-1); + } + allargs.erase(it); + it = allargs.begin(); } for (vector<string>::iterator it = allargs.begin() + 1; it != allargs.end(); it++) diff --git a/tools/draw/src/DataPainter.cc b/tools/draw/src/DataPainter.cc index 819389a2cd3c18045bd6561ec6a4f04d5ff5980c..dc73a1216d3af935346bd2900aeca7d753706f86 100644 --- a/tools/draw/src/DataPainter.cc +++ b/tools/draw/src/DataPainter.cc @@ -12,6 +12,7 @@ #include <TLatex.h> #include <iostream> +#include <sstream> #include <math.h> #include <algorithm> @@ -21,7 +22,7 @@ double hmin(TH1F *h) for (int b = h->GetXaxis()->GetFirst(); b <= h->GetXaxis()->GetLast(); b++) if (h->GetBinContent(b) != 0) min0 = min(min0, h->GetBinContent(b)); - return min0; + return min0; } struct range @@ -39,9 +40,9 @@ vector <range> historanges(TH1F *h) for (; b <= h->GetXaxis()->GetLast(); b++) if (h->GetBinContent(b) == 0) { - temp.upedge = h->GetXaxis()->GetBinLowEdge(b - 1); - ranges.push_back(temp); - temp.lowedge = h->GetXaxis()->GetBinUpEdge(b); + temp.upedge = h->GetXaxis()->GetBinLowEdge(b - 1); + ranges.push_back(temp); + temp.lowedge = h->GetXaxis()->GetBinUpEdge(b); } temp.upedge = h->GetXaxis()->GetBinUpEdge(b - 2); ranges.push_back(temp); @@ -53,7 +54,7 @@ void Subplot::Draw(TH1F* histo, string opt) if (bins1.size() == 1) if (opt.find("E3") != string::npos) opt.replace(opt.find("E3"), opt.find("E3")+2, "E2"); - + if (maketgraph) { TGraphAsymmErrors * graph = new TGraphAsymmErrors(histo); @@ -61,32 +62,32 @@ void Subplot::Draw(TH1F* histo, string opt) //Set correct x point vector <double> valy; for (int i = 0; i < graph->GetN(); i++) - valy.push_back(graph->GetY()[i]); + valy.push_back(graph->GetY()[i]); for (int i = 0; i < graph->GetN(); i++) - { - graph->SetPoint(i, valx[i], valy[i]); - graph->SetPointEXhigh(i, 0); - graph->SetPointEXlow(i, 0); - } + { + graph->SetPoint(i, valx[i], valy[i]); + graph->SetPointEXhigh(i, 0); + graph->SetPointEXlow(i, 0); + } graph->Sort(); if (opt.find("same") == string::npos) - opt.insert(0, "A"); + opt.insert(0, "A"); if (opt.find("E1") != string::npos) - opt.erase(opt.find("E1")+1); + opt.erase(opt.find("E1")+1); if (opt.find("][") != string::npos) //this is a pull histo, force drawing as histogram - { - vector <double> valy; - for (int i = 0; i < graph->GetN(); i++) - valy.push_back(graph->GetY()[i]); - for (int i = 0; i < graph->GetN(); i++) - histo->SetBinContent(i+1, valy[i]); - histo->Draw(opt.c_str()); - } + { + vector <double> valy; + for (int i = 0; i < graph->GetN(); i++) + valy.push_back(graph->GetY()[i]); + for (int i = 0; i < graph->GetN(); i++) + histo->SetBinContent(i+1, valy[i]); + histo->Draw(opt.c_str()); + } else - graph->Draw(opt.c_str()); + graph->Draw(opt.c_str()); } else histo->Draw(opt.c_str()); } @@ -95,52 +96,64 @@ TCanvas * DataPainter(int dataindex, int subplotindex) { vector <Subplot> datahistos; vector <string> labels; - for (vector<string>::iterator itl = opts.labels.begin(); itl != opts.labels.end(); itl++) - if (datamap[*itl].datamap.find(dataindex) != datamap[*itl].datamap.end()) - if (datamap[*itl].datamap[dataindex].subplots.find(subplotindex) != datamap[*itl].datamap[dataindex].subplots.end()) - if (datamap[*itl].datamap[dataindex].subplots[subplotindex].IsValid()) - { - datahistos.push_back(datamap[*itl].datamap[dataindex].subplots[subplotindex]); - labels.push_back(*itl); - } + //Collect all valid sublots in datahistos + for(auto const&label:opts.labels){ + auto&datasetmap=datamap.at(label).datamap; + auto dataset_it=datasetmap.find(dataindex); + if(dataset_it==datasetmap.end())continue; + auto&subplotsmap=dataset_it->second.subplots; + auto subplot_it=subplotsmap.find(subplotindex); + if(subplot_it==subplotsmap.end())continue; + Subplot&subplot=subplot_it->second; + if(!subplot.IsValid())continue; + datahistos.push_back(subplot); + labels.push_back(label); + } if (datahistos.size() < 1) return 0; //Empty dataset vector - char cnvname[15]; - sprintf(cnvname, "data_%d-%d", dataindex, subplotindex); + string cnvname; + { + ostringstream ss; + ss<<"data_"<<dataindex<<'-'<<subplotindex; + cnvname=ss.str(); + } if (opts.multitheory) { if (datahistos.size() == 2) - { - opts.twopanels = true; - opts.threepanels = false; - } + { + opts.twopanels = true; + opts.threepanels = false; + } if (datahistos.size() == 3) - { - opts.twopanels = false; - opts.threepanels = true; - } + { + opts.twopanels = false; + opts.threepanels = true; + } if (datahistos.size() > 3) - { - cout << "Cannot plot in multitheory mode more than 3 directories" << endl; - return 0; - } + { + cout << "Cannot plot in multitheory mode more than 3 directories" << endl; + return 0; + } } - + TCanvas * cnv; if (opts.twopanels || opts.threepanels) - cnv = new TCanvas(cnvname, "", 0, 0, 2 * opts.resolution, opts.resolution); + cnv = new TCanvas(cnvname.c_str(), "", 0, 0, 2 * opts.resolution, opts.resolution); else - cnv = new TCanvas(cnvname, "", 0, 0, opts.resolution, opts.resolution); + cnv = new TCanvas(cnvname.c_str(), "", 0, 0, opts.resolution, opts.resolution); cnv->cd(); TH1F * data = datahistos[0].getdata(); TH1F * datatot = datahistos[0].getdatatot(); - char dtname[200]; - sprintf (dtname, "data_%d-%d", dataindex, subplotindex); - string dataname = dtname; + string dataname; + { + ostringstream ss; + ss<<"data_"<<dataindex<<'-'<<subplotindex; + dataname=ss.str(); + } //Set the pads geometry float dy; //subpanels height @@ -151,7 +164,7 @@ TCanvas * DataPainter(int dataindex, int subplotindex) dy = (1.-bmarg-tmarg)/3.; else //1 panel dy = (1.-bmarg-tmarg)/4.; - + TPad* Main; TPad* Ratio; @@ -196,7 +209,7 @@ TCanvas * DataPainter(int dataindex, int subplotindex) mb = marg0/my; } - + //Ratio pad geometry if (opts.twopanels || opts.threepanels) { @@ -232,9 +245,9 @@ TCanvas * DataPainter(int dataindex, int subplotindex) { int pullpad; if (opts.twopanels) - pullpad = 2; + pullpad = 2; if (opts.threepanels) - pullpad = 3; + pullpad = 3; Pulls = (TPad*)cnv->GetPad(2)->GetPad(pullpad); Pulls->SetPad(0, 0, 1, bmarg+dy); @@ -271,7 +284,7 @@ TCanvas * DataPainter(int dataindex, int subplotindex) } //create template histogram for axis - TH1F *up_templ = new TH1F(((string) "up_templ_" + cnvname).c_str(), "", nbins, axmin, axmax); + TH1F *up_templ = new TH1F(("up_templ_"+cnvname).c_str(), "", nbins, axmin, axmax); up_templ->GetYaxis()->SetLabelFont(62); up_templ->GetYaxis()->SetTitleFont(62); @@ -291,7 +304,7 @@ TCanvas * DataPainter(int dataindex, int subplotindex) { up_templ->GetXaxis()->SetNoExponent(); if (axmax/axmin < 90) - up_templ->GetXaxis()->SetMoreLogLabels(); + up_templ->GetXaxis()->SetMoreLogLabels(); } //Evaluate maximum and minimum @@ -304,11 +317,11 @@ TCanvas * DataPainter(int dataindex, int subplotindex) for (vector <Subplot>::iterator it = datahistos.begin(); it != datahistos.end(); it++) { if (opts.therr && !opts.noupband) - mx = max(mx, (float)((*it).gettherrup()->GetMaximum())); + mx = max(mx, (float)((*it).gettherrup()->GetMaximum())); else - mx = max(mx, (float)((*it).getth()->GetMaximum())); + mx = max(mx, (float)((*it).getth()->GetMaximum())); if (!opts.onlytheory) - mx = max(mx, (float)((*it).getthshift()->GetMaximum())); + mx = max(mx, (float)((*it).getthshift()->GetMaximum())); } float mn = mx; @@ -319,17 +332,17 @@ TCanvas * DataPainter(int dataindex, int subplotindex) for (vector <Subplot>::iterator it = datahistos.begin(); it != datahistos.end(); it++) { if (opts.therr && !opts.noupband) - mn = min(mn, (float)(hmin((*it).gettherrdown()))); + mn = min(mn, (float)(hmin((*it).gettherrdown()))); else - mn= min(mn, (float)(hmin((*it).getth()))); + mn= min(mn, (float)(hmin((*it).getth()))); if (!opts.onlytheory) - mn = min(mn, (float)(hmin((*it).getthshift()))); + mn = min(mn, (float)(hmin((*it).getthshift()))); } if (datahistos[0].getlogy()) { if (mn < 0) - mn = 0.000001; + mn = 0.000001; float ratio = mx / mn; mx = mx * pow(10, log10(ratio) * 0.45/my); mn = mn / pow(10, log10(ratio) * 0.7/my); @@ -362,7 +375,7 @@ TCanvas * DataPainter(int dataindex, int subplotindex) { datatot->SetAxisRange((*r).lowedge, (*r).upedge); if (!opts.onlytheory) - datahistos[0].Draw((TH1F*)datatot->Clone(), "PE3 same"); + datahistos[0].Draw((TH1F*)datatot->Clone(), "PE3 same"); } if (!opts.onlytheory) datahistos[0].Draw(data, "PE1 same"); @@ -379,17 +392,17 @@ TCanvas * DataPainter(int dataindex, int subplotindex) float txtsz; string infolabel; if (opts.atlasinternal || opts.atlaspreliminary || opts.atlas) - { - vertdist = 0.10; - txtsz = 1.; - infolabel = datahistos[0].getextralabel() + "; " + datahistos[0].getlumilabel(); - } + { + vertdist = 0.10; + txtsz = 1.; + infolabel = datahistos[0].getextralabel() + "; " + datahistos[0].getlumilabel(); + } else - { - vertdist = 0.05; - txtsz = 1.; - infolabel = datahistos[0].getextralabel(); - } + { + vertdist = 0.05; + txtsz = 1.; + infolabel = datahistos[0].getextralabel(); + } l.SetTextSize(txtsz*0.04/my); l.DrawLatex(lmarg+0.05, (1-tmarg/my) - vertdist/my, infolabel.c_str()); } @@ -397,18 +410,18 @@ TCanvas * DataPainter(int dataindex, int subplotindex) if (datahistos[0].getlumilabel() != "") if (!(opts.atlasinternal || opts.atlaspreliminary || opts.atlas)) { - TLatex l; - l.SetNDC(); - l.SetTextFont(42); - - float vertdist; - float txtsz; - vertdist = 0.13; - txtsz = 1.; - l.SetTextSize(txtsz*0.04/my); - l.DrawLatex(lmarg+0.05, (1-tmarg/my) - vertdist/my, datahistos[0].getlumilabel().c_str()); + TLatex l; + l.SetNDC(); + l.SetTextFont(42); + + float vertdist; + float txtsz; + vertdist = 0.13; + txtsz = 1.; + l.SetTextSize(txtsz*0.04/my); + l.DrawLatex(lmarg+0.05, (1-tmarg/my) - vertdist/my, datahistos[0].getlumilabel().c_str()); } - + //Main legend TPaveText* leg1; if (opts.onlytheory) @@ -420,18 +433,18 @@ TCanvas * DataPainter(int dataindex, int subplotindex) { TLegend * leg; if (opts.nothshifts) - leg = new TLegend(lmarg+0.04, mb+0.03, lmarg+0.04+0.30, mb+0.03+0.12/my); + leg = new TLegend(lmarg+0.04, mb+0.03, lmarg+0.04+0.30, mb+0.03+0.12/my); else - leg = new TLegend(lmarg+0.04, mb+0.03, lmarg+0.04+0.30, mb+0.03+0.2/my); + leg = new TLegend(lmarg+0.04, mb+0.03, lmarg+0.04+0.30, mb+0.03+0.2/my); string datalab = (string) "Data " + datahistos[0].gettitle(); if (datahistos[0].getexperiment() != "") - datalab = datahistos[0].getexperiment() + " " + datalab; + datalab = datahistos[0].getexperiment() + " " + datalab; if (!opts.onlytheory) - { - leg->AddEntry(data, datalab.c_str(), "pl"); - leg->AddEntry(data, "#delta uncorrelated", "pe"); - leg->AddEntry(datatot, "#delta total", "f"); - } + { + leg->AddEntry(data, datalab.c_str(), "pl"); + leg->AddEntry(data, "#delta uncorrelated", "pe"); + leg->AddEntry(datatot, "#delta total", "f"); + } TH1 *mark = (TH1F*)datahistos[0].getth()->Clone(); mark->SetMarkerStyle(opts.markers[labels[0]]); mark->SetMarkerSize(2 * opts.resolution / 1200); @@ -443,23 +456,23 @@ TCanvas * DataPainter(int dataindex, int subplotindex) dash->SetLineStyle(2); dash->SetLineWidth(opts.lwidth); if (datahistos.size() == 1) - { - cont->SetLineColor(opts.colors[labels[0]]); - dash->SetLineColor(opts.colors[labels[0]]); - mark->SetMarkerColor(opts.colors[labels[0]]); - } + { + cont->SetLineColor(opts.colors[labels[0]]); + dash->SetLineColor(opts.colors[labels[0]]); + mark->SetMarkerColor(opts.colors[labels[0]]); + } if (opts.onlytheory) - leg->AddEntry((TObject*)0, opts.theorylabel.c_str(), ""); + leg->AddEntry((TObject*)0, opts.theorylabel.c_str(), ""); else - { - if (opts.nothshifts) - if ((opts.points && !datahistos[0].bincenter()) || datahistos[0].nbins() == 1) - leg->AddEntry(mark, opts.theorylabel.c_str(), "p"); - else - leg->AddEntry(cont, opts.theorylabel.c_str(), "l"); - if (!opts.nothshifts) - leg->AddEntry(dash, (opts.theorylabel + " + shifts").c_str(), "l"); - } + { + if (opts.nothshifts) + if ((opts.points && !datahistos[0].bincenter()) || datahistos[0].nbins() == 1) + leg->AddEntry(mark, opts.theorylabel.c_str(), "p"); + else + leg->AddEntry(cont, opts.theorylabel.c_str(), "l"); + if (!opts.nothshifts) + leg->AddEntry(dash, (opts.theorylabel + " + shifts").c_str(), "l"); + } leg1 = (TPaveText*)leg; } leg1->SetFillColor(0); @@ -489,133 +502,133 @@ TCanvas * DataPainter(int dataindex, int subplotindex) vector <range> thranges = historanges((*it).getthshift()); for (vector<range>::iterator r = thranges.begin(); r != thranges.end(); r++) - { - (*it).getthshift()->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.onlytheory) - if (!opts.nothshifts) - if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) //plot as continous line - (*it).Draw((TH1F*)(*it).getthshift()->Clone(), "LX same"); - else - (*it).Draw((TH1F*)(*it).getthshift()->Clone(), "hist ][ same"); //plot as histogram in points mode - } + { + (*it).getthshift()->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.onlytheory) + if (!opts.nothshifts) + if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) //plot as continous line + (*it).Draw((TH1F*)(*it).getthshift()->Clone(), "LX same"); + else + (*it).Draw((TH1F*)(*it).getthshift()->Clone(), "hist ][ same"); //plot as histogram in points mode + } (*it).getthshift()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); (*it).getth()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); (*it).getth()->SetLineWidth(opts.lwidth); if (opts.bw) - (*it).getth()->SetLineStyle(opts.lstyles[labels[it-datahistos.begin()]]); + (*it).getth()->SetLineStyle(opts.lstyles[labels[it-datahistos.begin()]]); if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) //plot as continous line with dashed error bands - { - for (vector<range>::iterator r = thranges.begin(); r != thranges.end(); r++) - { - (*it).getth()->SetAxisRange((*r).lowedge, (*r).upedge); - (*it).Draw((TH1F*)(*it).getth()->Clone(), "LX same"); - } - (*it).getth()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - - if (opts.therr && !opts.noupband) - { - (*it).gettherr()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - if (opts.bw) - (*it).gettherr()->SetLineStyle(opts.lstyles[labels[it-datahistos.begin()]]); - (*it).gettherr()->SetMarkerSize(0); - (*it).gettherr()->SetFillColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).gettherr()->SetFillStyle(opts.styles[labels[it-datahistos.begin()]]); - float toterr = 0; - for (int b = 1; b <= (*it).gettherr()->GetNbinsX(); b++) - toterr += (*it).gettherr()->GetBinError(b); - if (toterr > 0) - { - for (vector<range>::iterator r = thranges.begin(); r != thranges.end(); r++) - { - (*it).gettherr()->SetAxisRange((*r).lowedge, (*r).upedge); - (*it).Draw((TH1F*)(*it).gettherr()->Clone(), "E3L same"); - } - (*it).gettherr()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - } - } - } + { + for (vector<range>::iterator r = thranges.begin(); r != thranges.end(); r++) + { + (*it).getth()->SetAxisRange((*r).lowedge, (*r).upedge); + (*it).Draw((TH1F*)(*it).getth()->Clone(), "LX same"); + } + (*it).getth()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + + if (opts.therr && !opts.noupband) + { + (*it).gettherr()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + if (opts.bw) + (*it).gettherr()->SetLineStyle(opts.lstyles[labels[it-datahistos.begin()]]); + (*it).gettherr()->SetMarkerSize(0); + (*it).gettherr()->SetFillColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).gettherr()->SetFillStyle(opts.styles[labels[it-datahistos.begin()]]); + float toterr = 0; + for (int b = 1; b <= (*it).gettherr()->GetNbinsX(); b++) + toterr += (*it).gettherr()->GetBinError(b); + if (toterr > 0) + { + for (vector<range>::iterator r = thranges.begin(); r != thranges.end(); r++) + { + (*it).gettherr()->SetAxisRange((*r).lowedge, (*r).upedge); + (*it).Draw((TH1F*)(*it).gettherr()->Clone(), "E3L same"); + } + (*it).gettherr()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + } + } + } else //plot as displaced points with vertical error line - { - gtherr->SetMarkerStyle(opts.markers[labels[it-datahistos.begin()]]); - gtherr->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - gtherr->SetMarkerSize(2 * opts.resolution / 1200); - gtherr->SetMarkerColor(opts.colors[labels[it-datahistos.begin()]]); - for (int b = 0; b < gtherr->GetN(); b++) - { - //Set X error to 0 - gtherr->SetPointEXlow(b, 0); - gtherr->SetPointEXhigh(b, 0); - - //displace horizontally - double x, y; - gtherr->GetPoint(b, x, y); - float width = (*it).getth()->GetBinWidth(b + 1); - float lowedge = (*it).getth()->GetBinLowEdge(b + 1); - x = lowedge + (it - datahistos.begin() + 1) * width/(datahistos.size() + 1); - gtherr->SetPoint(b, x, y); - - //Set Y error - float errup, errdown; - if (opts.therr && !opts.noupband) - { - errup = (*it).gettherrup()->GetBinContent(b + 1) - (*it).getth()->GetBinContent(b + 1); - errdown = (*it).getth()->GetBinContent(b + 1) - (*it).gettherrdown()->GetBinContent(b + 1); - } - else - { - errup = 0; - errdown = 0; - } - gtherr->SetPointEYhigh(b, errup); - gtherr->SetPointEYlow(b, errdown); - } - gtherr->Draw("P same"); - } + { + gtherr->SetMarkerStyle(opts.markers[labels[it-datahistos.begin()]]); + gtherr->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + gtherr->SetMarkerSize(2 * opts.resolution / 1200); + gtherr->SetMarkerColor(opts.colors[labels[it-datahistos.begin()]]); + for (int b = 0; b < gtherr->GetN(); b++) + { + //Set X error to 0 + gtherr->SetPointEXlow(b, 0); + gtherr->SetPointEXhigh(b, 0); + + //displace horizontally + double x, y; + gtherr->GetPoint(b, x, y); + float width = (*it).getth()->GetBinWidth(b + 1); + float lowedge = (*it).getth()->GetBinLowEdge(b + 1); + x = lowedge + (it - datahistos.begin() + 1) * width/(datahistos.size() + 1); + gtherr->SetPoint(b, x, y); + + //Set Y error + float errup, errdown; + if (opts.therr && !opts.noupband) + { + errup = (*it).gettherrup()->GetBinContent(b + 1) - (*it).getth()->GetBinContent(b + 1); + errdown = (*it).getth()->GetBinContent(b + 1) - (*it).gettherrdown()->GetBinContent(b + 1); + } + else + { + errup = 0; + errdown = 0; + } + gtherr->SetPointEYhigh(b, errup); + gtherr->SetPointEYlow(b, errdown); + } + gtherr->Draw("P same"); + } if (datahistos.size() == 1) - { - leg2->AddEntry((TObject*)0, (labels[it-datahistos.begin()]).c_str(), ""); - if (opts.therr && !opts.noupband && (*it).HasTherr()) - if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) - leg2->AddEntry((*it).gettherr(), "Theory uncertainty", "lf"); - else - leg2->AddEntry((*it).gettherr(), "Theory uncertainty", "pe"); - } + { + leg2->AddEntry((TObject*)0, (labels[it-datahistos.begin()]).c_str(), ""); + if (opts.therr && !opts.noupband && (*it).HasTherr()) + if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) + leg2->AddEntry((*it).gettherr(), "Theory uncertainty", "lf"); + else + leg2->AddEntry((*it).gettherr(), "Theory uncertainty", "pe"); + } else - if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) - if (opts.therr && !opts.noupband && (*it).HasTherr()) - leg2->AddEntry((*it).gettherr(), (labels[it-datahistos.begin()]).c_str(), "lf"); - else - leg2->AddEntry((*it).getth(), (labels[it-datahistos.begin()]).c_str(), "l"); - else - if (opts.therr && !opts.noupband && (*it).HasTherr()) - leg2->AddEntry(gtherr, (labels[it-datahistos.begin()]).c_str(), "pe"); - else - leg2->AddEntry(gtherr, (labels[it-datahistos.begin()]).c_str(), "p"); + if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) + if (opts.therr && !opts.noupband && (*it).HasTherr()) + leg2->AddEntry((*it).gettherr(), (labels[it-datahistos.begin()]).c_str(), "lf"); + else + leg2->AddEntry((*it).getth(), (labels[it-datahistos.begin()]).c_str(), "l"); + else + if (opts.therr && !opts.noupband && (*it).HasTherr()) + leg2->AddEntry(gtherr, (labels[it-datahistos.begin()]).c_str(), "pe"); + else + leg2->AddEntry(gtherr, (labels[it-datahistos.begin()]).c_str(), "p"); } //draw theory error borders if (opts.therr && !opts.noupband) for (vector <Subplot>::iterator it = datahistos.begin(); it != datahistos.end(); it++) { - (*it).gettherrup()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).gettherrdown()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).gettherrup()->SetLineWidth(opts.lwidth); - (*it).gettherrdown()->SetLineWidth(opts.lwidth); - if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) - { - vector <range> thranges = historanges((*it).getth()); - for (vector<range>::iterator r = thranges.begin(); r != thranges.end(); r++) - { - (*it).gettherrup()->SetAxisRange((*r).lowedge, (*r).upedge); - (*it).Draw((TH1F*)(*it).gettherrup()->Clone(), "LX same"); - (*it).gettherrdown()->SetAxisRange((*r).lowedge, (*r).upedge); - (*it).Draw((TH1F*)(*it).gettherrdown()->Clone(), "LX same"); - } - (*it).gettherrup()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - (*it).gettherrdown()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - } + (*it).gettherrup()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).gettherrdown()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).gettherrup()->SetLineWidth(opts.lwidth); + (*it).gettherrdown()->SetLineWidth(opts.lwidth); + if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) + { + vector <range> thranges = historanges((*it).getth()); + for (vector<range>::iterator r = thranges.begin(); r != thranges.end(); r++) + { + (*it).gettherrup()->SetAxisRange((*r).lowedge, (*r).upedge); + (*it).Draw((TH1F*)(*it).gettherrup()->Clone(), "LX same"); + (*it).gettherrdown()->SetAxisRange((*r).lowedge, (*r).upedge); + (*it).Draw((TH1F*)(*it).gettherrdown()->Clone(), "LX same"); + } + (*it).gettherrup()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + (*it).gettherrdown()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + } } data->SetStats(0); @@ -655,47 +668,47 @@ TCanvas * DataPainter(int dataindex, int subplotindex) for (vector <Subplot>::iterator it = datahistos.begin(); it != datahistos.end(); it++) { if (opts.diff) - { - (*it).getrth()->Add(refdata, -1); - (*it).getrthshift()->Add(refdata, -1); - (*it).getrtherr()->Add(refdata, -1); - (*it).getrtherrup()->Add(refdata, -1); - (*it).getrtherrdown()->Add(refdata, -1); - } + { + (*it).getrth()->Add(refdata, -1); + (*it).getrthshift()->Add(refdata, -1); + (*it).getrtherr()->Add(refdata, -1); + (*it).getrtherrup()->Add(refdata, -1); + (*it).getrtherrdown()->Add(refdata, -1); + } else - { - if (opts.onlytheory && opts.threlerr) - { - (*it).getrth()->Divide((*it).getth()); - (*it).getrthshift()->Divide((*it).getth()); - (*it).getrtherr()->Divide((*it).getth()); - (*it).getrtherrup()->Divide((*it).getth()); - (*it).getrtherrdown()->Divide((*it).getth()); - } - else - { - (*it).getrth()->Divide(refdata); - (*it).getrthshift()->Divide(refdata); - (*it).getrtherr()->Divide(refdata); - (*it).getrtherrup()->Divide(refdata); - (*it).getrtherrdown()->Divide(refdata); - } - } + { + if (opts.onlytheory && opts.threlerr) + { + (*it).getrth()->Divide((*it).getth()); + (*it).getrthshift()->Divide((*it).getth()); + (*it).getrtherr()->Divide((*it).getth()); + (*it).getrtherrup()->Divide((*it).getth()); + (*it).getrtherrdown()->Divide((*it).getth()); + } + else + { + (*it).getrth()->Divide(refdata); + (*it).getrthshift()->Divide(refdata); + (*it).getrtherr()->Divide(refdata); + (*it).getrtherrup()->Divide(refdata); + (*it).getrtherrdown()->Divide(refdata); + } + } for (int b = 1; b <= (*it).getrth()->GetNbinsX(); b++) - (*it).getrth()->SetBinError(b, 0); + (*it).getrth()->SetBinError(b, 0); for (int b = 1; b <= (*it).getrthshift()->GetNbinsX(); b++) - (*it).getrthshift()->SetBinError(b, 0); + (*it).getrthshift()->SetBinError(b, 0); for (int b = 1; b <= (*it).getrtherr()->GetNbinsX(); b++) - (*it).getrtherr()->SetBinError(b, ((*it).getrtherrup()->GetBinContent(b) - (*it).getrtherrdown()->GetBinContent(b)) / 2 ); + (*it).getrtherr()->SetBinError(b, ((*it).getrtherrup()->GetBinContent(b) - (*it).getrtherrdown()->GetBinContent(b)) / 2 ); for (int b = 1; b <= (*it).getrtherrup()->GetNbinsX(); b++) - (*it).getrtherrup()->SetBinError(b, 0); + (*it).getrtherrup()->SetBinError(b, 0); for (int b = 1; b <= (*it).getrtherrdown()->GetNbinsX(); b++) - (*it).getrtherrdown()->SetBinError(b, 0); + (*it).getrtherrdown()->SetBinError(b, 0); } //create template histogram for axis - TH1F *r_templ = new TH1F(((string) "r_templ_" + cnvname).c_str(), "", nbins, axmin, axmax); + TH1F *r_templ = new TH1F(("r_templ_"+cnvname).c_str(), "", nbins, axmin, axmax); r_templ->GetYaxis()->SetLabelFont(62); r_templ->GetYaxis()->SetTitleFont(62); @@ -707,7 +720,7 @@ TCanvas * DataPainter(int dataindex, int subplotindex) { r_templ->GetXaxis()->SetNoExponent(); if (axmax/axmin < 90) - r_templ->GetXaxis()->SetMoreLogLabels(); + r_templ->GetXaxis()->SetMoreLogLabels(); } r_templ->GetYaxis()->SetLabelSize(txtsize/ry); @@ -717,20 +730,20 @@ TCanvas * DataPainter(int dataindex, int subplotindex) if (opts.diff) { if (opts.onlytheory) - ytitle = "Difference"; + ytitle = "Difference"; else if (opts.ratiototheory) - ytitle = (string) "Data-" + opts.theorylabel; + ytitle = (string) "Data-" + opts.theorylabel; else - ytitle = "Theory-Data"; + ytitle = "Theory-Data"; } else { if (opts.onlytheory) - ytitle = "Ratio"; + ytitle = "Ratio"; else if (opts.ratiototheory) - ytitle = (string) "Data/" + opts.theorylabel; + ytitle = (string) "Data/" + opts.theorylabel; else - ytitle = "Theory/Data"; + ytitle = "Theory/Data"; } r_templ->SetYTitle(ytitle.c_str()); @@ -756,15 +769,15 @@ TCanvas * DataPainter(int dataindex, int subplotindex) for (vector <Subplot>::iterator it = datahistos.begin(); it != datahistos.end(); it++) { if (opts.therr) - { - mx = max(mx, (float)((*it).getrtherrup()->GetMaximum())); - mx = max(mx, (float)((*it).getrtherrdown()->GetMaximum())); - } + { + mx = max(mx, (float)((*it).getrtherrup()->GetMaximum())); + mx = max(mx, (float)((*it).getrtherrdown()->GetMaximum())); + } else - mx = max(mx, (float)((*it).getrth()->GetMaximum())); + mx = max(mx, (float)((*it).getrth()->GetMaximum())); if (!opts.threepanels) - if (!opts.onlytheory) - mx = max(mx, (float)((*it).getrthshift()->GetMaximum())); + if (!opts.onlytheory) + mx = max(mx, (float)((*it).getrthshift()->GetMaximum())); } mn = mx; @@ -775,15 +788,15 @@ TCanvas * DataPainter(int dataindex, int subplotindex) for (vector <Subplot>::iterator it = datahistos.begin(); it != datahistos.end(); it++) { if (opts.therr) - { - mn = min(mn, (float)(hmin((*it).getrtherrdown()))); - mn = min(mn, (float)(hmin((*it).getrtherrup()))); - } + { + mn = min(mn, (float)(hmin((*it).getrtherrdown()))); + mn = min(mn, (float)(hmin((*it).getrtherrup()))); + } else - mn = min(mn, (float)(hmin((*it).getrth()))); + mn = min(mn, (float)(hmin((*it).getrth()))); if (!opts.threepanels) - if (!opts.onlytheory) - mn = min(mn, (float)(hmin((*it).getrthshift()))); + if (!opts.onlytheory) + mn = min(mn, (float)(hmin((*it).getrthshift()))); } float delta = mx - mn; if (datahistos[0].getymaxr() != 0) @@ -807,7 +820,7 @@ TCanvas * DataPainter(int dataindex, int subplotindex) { r_datatot->SetAxisRange((*r).lowedge, (*r).upedge); if (!opts.onlytheory) - datahistos[0].Draw((TH1F*)r_datatot->Clone(), "E3 same"); + datahistos[0].Draw((TH1F*)r_datatot->Clone(), "E3 same"); } r_datatot->GetXaxis()->SetRange(datahistos[0].getlowrange(), datahistos[0].getuprange()); @@ -835,132 +848,132 @@ TCanvas * DataPainter(int dataindex, int subplotindex) (*it).getrth()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); (*it).getrth()->SetLineWidth(opts.lwidth); if (opts.bw) - (*it).getrth()->SetLineStyle(opts.lstyles[labels[it-datahistos.begin()]]); + (*it).getrth()->SetLineStyle(opts.lstyles[labels[it-datahistos.begin()]]); vector <range> rthranges = historanges((*it).getrthshift()); if (!opts.threepanels) - { - for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) - { - (*it).getrthshift()->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.onlytheory) - if (!opts.multitheory || (it - datahistos.begin() == 0)) //if in multitheory mode, plot only the first theory - if (!opts.nothshifts) - if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) - (*it).Draw((TH1F*)(*it).getrthshift()->Clone(), "LX same"); - else - (*it).Draw((TH1F*)(*it).getrthshift()->Clone(), "hist ][ same"); - - } - (*it).getrthshift()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - } - + { + for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) + { + (*it).getrthshift()->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.onlytheory) + if (!opts.multitheory || (it - datahistos.begin() == 0)) //if in multitheory mode, plot only the first theory + if (!opts.nothshifts) + if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) + (*it).Draw((TH1F*)(*it).getrthshift()->Clone(), "LX same"); + else + (*it).Draw((TH1F*)(*it).getrthshift()->Clone(), "hist ][ same"); + + } + (*it).getrthshift()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + } + if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) //plot as continous line with dashed error bands - { - for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) - { - (*it).getrth()->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.multitheory || (it - datahistos.begin() == 0)) //if in multitheory mode, plot only the first theory - (*it).Draw((TH1F*)(*it).getrth()->Clone(), "LX same"); - } - (*it).getrth()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - if (opts.therr) - { - (*it).getrtherr()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).getrtherr()->SetMarkerSize(0); - (*it).getrtherr()->SetFillColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).getrtherr()->SetFillStyle(opts.styles[labels[it-datahistos.begin()]]); - float toterr = 0; - for (int b = 1; b <= (*it).gettherr()->GetNbinsX(); b++) - toterr += (*it).gettherr()->GetBinError(b); - if (toterr > 0) - { - for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) - { - (*it).getrtherr()->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.multitheory || (it - datahistos.begin() == 0)) //if in multitheory mode, plot only the first theory - (*it).Draw((TH1F*)(*it).getrtherr()->Clone(), "E3L same"); - } - (*it).getrtherr()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - } - } - } + { + for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) + { + (*it).getrth()->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.multitheory || (it - datahistos.begin() == 0)) //if in multitheory mode, plot only the first theory + (*it).Draw((TH1F*)(*it).getrth()->Clone(), "LX same"); + } + (*it).getrth()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + if (opts.therr) + { + (*it).getrtherr()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).getrtherr()->SetMarkerSize(0); + (*it).getrtherr()->SetFillColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).getrtherr()->SetFillStyle(opts.styles[labels[it-datahistos.begin()]]); + float toterr = 0; + for (int b = 1; b <= (*it).gettherr()->GetNbinsX(); b++) + toterr += (*it).gettherr()->GetBinError(b); + if (toterr > 0) + { + for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) + { + (*it).getrtherr()->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.multitheory || (it - datahistos.begin() == 0)) //if in multitheory mode, plot only the first theory + (*it).Draw((TH1F*)(*it).getrtherr()->Clone(), "E3L same"); + } + (*it).getrtherr()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + } + } + } else //plot as displaced TGraphs - { - TGraphAsymmErrors * r_gtherr = new TGraphAsymmErrors((*it).getrth()); - r_gtherr->SetMarkerStyle(opts.markers[labels[it-datahistos.begin()]]); - r_gtherr->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - r_gtherr->SetMarkerSize(2 * opts.resolution / 1200); - r_gtherr->SetMarkerColor(opts.colors[labels[it-datahistos.begin()]]); - for (int b = 0; b < r_gtherr->GetN(); b++) - { - //Set X error to 0 - r_gtherr->SetPointEXlow(b, 0); - r_gtherr->SetPointEXhigh(b, 0); - - //displace horizontally - double x, y; - r_gtherr->GetPoint(b, x, y); - float width = (*it).getrth()->GetBinWidth(b + 1); - float lowedge = (*it).getrth()->GetBinLowEdge(b + 1); - x = lowedge + (it - datahistos.begin() + 1) * width/(datahistos.size() + 1); - r_gtherr->SetPoint(b, x, y); - //Set Y error - float errup, errdown; - if (opts.therr) - { - errup = (*it).getrtherrup()->GetBinContent(b + 1) - (*it).getrth()->GetBinContent(b + 1); - errdown = (*it).getrth()->GetBinContent(b + 1) - (*it).getrtherrdown()->GetBinContent(b + 1); - } - else - { - errup = 0; - errdown = 0; - } - r_gtherr->SetPointEYhigh(b, errup); - r_gtherr->SetPointEYlow(b, errdown); - } - if (!opts.multitheory || (it - datahistos.begin() == 0)) //if in multitheory mode, plot only the first theory - r_gtherr->Draw("P same"); - } + { + TGraphAsymmErrors * r_gtherr = new TGraphAsymmErrors((*it).getrth()); + r_gtherr->SetMarkerStyle(opts.markers[labels[it-datahistos.begin()]]); + r_gtherr->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + r_gtherr->SetMarkerSize(2 * opts.resolution / 1200); + r_gtherr->SetMarkerColor(opts.colors[labels[it-datahistos.begin()]]); + for (int b = 0; b < r_gtherr->GetN(); b++) + { + //Set X error to 0 + r_gtherr->SetPointEXlow(b, 0); + r_gtherr->SetPointEXhigh(b, 0); + + //displace horizontally + double x, y; + r_gtherr->GetPoint(b, x, y); + float width = (*it).getrth()->GetBinWidth(b + 1); + float lowedge = (*it).getrth()->GetBinLowEdge(b + 1); + x = lowedge + (it - datahistos.begin() + 1) * width/(datahistos.size() + 1); + r_gtherr->SetPoint(b, x, y); + //Set Y error + float errup, errdown; + if (opts.therr) + { + errup = (*it).getrtherrup()->GetBinContent(b + 1) - (*it).getrth()->GetBinContent(b + 1); + errdown = (*it).getrth()->GetBinContent(b + 1) - (*it).getrtherrdown()->GetBinContent(b + 1); + } + else + { + errup = 0; + errdown = 0; + } + r_gtherr->SetPointEYhigh(b, errup); + r_gtherr->SetPointEYlow(b, errdown); + } + if (!opts.multitheory || (it - datahistos.begin() == 0)) //if in multitheory mode, plot only the first theory + r_gtherr->Draw("P same"); + } if (opts.multitheory && (it - datahistos.begin() == 0)) - { - if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) - if (opts.therr && (*it).HasTherr()) - legr->AddEntry((*it).getrtherr(), (labels[it-datahistos.begin()]).c_str(), "lf"); - else - legr->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "l"); - else - if (opts.therr && (*it).HasTherr()) - legr->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "pe"); - else - leg2->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "p"); - } + { + if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) + if (opts.therr && (*it).HasTherr()) + legr->AddEntry((*it).getrtherr(), (labels[it-datahistos.begin()]).c_str(), "lf"); + else + legr->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "l"); + else + if (opts.therr && (*it).HasTherr()) + legr->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "pe"); + else + leg2->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "p"); + } } - + //draw theory error borders if (opts.therr) for (vector <Subplot>::iterator it = datahistos.begin(); it != datahistos.end(); it++) { - (*it).getrtherrup()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).getrtherrdown()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).getrtherrup()->SetLineWidth(opts.lwidth); - (*it).getrtherrdown()->SetLineWidth(opts.lwidth); - if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) - { - vector <range> rthranges = historanges((*it).getth()); - for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) - { - (*it).getrtherrup()->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.multitheory || (it - datahistos.begin() == 0)) //if in multitheory mode, plot only the first theory - (*it).Draw((TH1F*)(*it).getrtherrup()->Clone(), "LX same"); - (*it).getrtherrdown()->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.multitheory || (it - datahistos.begin() == 0)) //if in multitheory mode, plot only the first theory - (*it).Draw((TH1F*)(*it).getrtherrdown()->Clone(), "LX same"); - } - (*it).getrtherrup()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - (*it).getrtherrdown()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - } + (*it).getrtherrup()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).getrtherrdown()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).getrtherrup()->SetLineWidth(opts.lwidth); + (*it).getrtherrdown()->SetLineWidth(opts.lwidth); + if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) + { + vector <range> rthranges = historanges((*it).getth()); + for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) + { + (*it).getrtherrup()->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.multitheory || (it - datahistos.begin() == 0)) //if in multitheory mode, plot only the first theory + (*it).Draw((TH1F*)(*it).getrtherrup()->Clone(), "LX same"); + (*it).getrtherrdown()->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.multitheory || (it - datahistos.begin() == 0)) //if in multitheory mode, plot only the first theory + (*it).Draw((TH1F*)(*it).getrtherrdown()->Clone(), "LX same"); + } + (*it).getrtherrup()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + (*it).getrtherrdown()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + } } //Draw data points @@ -982,7 +995,7 @@ TCanvas * DataPainter(int dataindex, int subplotindex) { Shifts->cd(); if (datahistos[0].getlogx()) - Shifts->SetLogx(); + Shifts->SetLogx(); //Set up template histogram for axis r_templ->GetYaxis()->SetLabelSize(txtsize/sy); @@ -995,19 +1008,19 @@ TCanvas * DataPainter(int dataindex, int subplotindex) //draw data vector <range> rdtranges = historanges(r_datatot); for (vector<range>::iterator r = rdtranges.begin(); r != rdtranges.end(); r++) - { - r_datatot->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.onlytheory) - datahistos[0].Draw((TH1F*)r_datatot->Clone(), "E3 same"); - } + { + r_datatot->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.onlytheory) + datahistos[0].Draw((TH1F*)r_datatot->Clone(), "E3 same"); + } r_datatot->GetXaxis()->SetRange(datahistos[0].getlowrange(), datahistos[0].getuprange()); //plot lines at 1 (or 0 for diff plots) TLine *r_ref; if (opts.diff) - r_ref = new TLine(r_templ->GetBinLowEdge(r_templ->GetXaxis()->GetFirst()), 0, r_templ->GetXaxis()->GetBinUpEdge(r_templ->GetXaxis()->GetLast()), 0); + r_ref = new TLine(r_templ->GetBinLowEdge(r_templ->GetXaxis()->GetFirst()), 0, r_templ->GetXaxis()->GetBinUpEdge(r_templ->GetXaxis()->GetLast()), 0); else - r_ref = new TLine(r_templ->GetBinLowEdge(r_templ->GetXaxis()->GetFirst()), 1, r_templ->GetXaxis()->GetBinUpEdge(r_templ->GetXaxis()->GetLast()), 1); + r_ref = new TLine(r_templ->GetBinLowEdge(r_templ->GetXaxis()->GetFirst()), 1, r_templ->GetXaxis()->GetBinUpEdge(r_templ->GetXaxis()->GetLast()), 1); r_ref->SetLineStyle(2); r_ref->SetLineStyle(1); r_ref->Draw(); @@ -1016,144 +1029,144 @@ TCanvas * DataPainter(int dataindex, int subplotindex) //Draw ratios for (vector <Subplot>::iterator it = datahistos.begin(); it != datahistos.end(); it++) - { - (*it).getrthshift()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).getrthshift()->SetLineStyle(2); - (*it).getrthshift()->SetLineWidth(opts.lwidth); - - (*it).getrth()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).getrth()->SetLineWidth(opts.lwidth); - if (opts.bw) - (*it).getrth()->SetLineStyle(opts.lstyles[labels[it-datahistos.begin()]]); - - vector <range> rthranges = historanges((*it).getrthshift()); - if (!opts.threepanels) - { - for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) - { - (*it).getrthshift()->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.onlytheory) - if (!opts.multitheory || (it - datahistos.begin() == 1)) //if in multitheory mode, plot only the second theory - if (!opts.nothshifts) - if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) - (*it).Draw((TH1F*)(*it).getrthshift()->Clone(), "LX same"); - else - (*it).Draw((TH1F*)(*it).getrthshift()->Clone(), "hist ][ same"); - } - (*it).getrthshift()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - } - - if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) //plot as continous line with dashed error bands - { - for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) - { - (*it).getrth()->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.multitheory || (it - datahistos.begin() == 1)) //if in multitheory mode, plot only the second theory - (*it).Draw((TH1F*)(*it).getrth()->Clone(), "LX same"); - } - (*it).getrth()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - if (opts.therr) - { - (*it).getrtherr()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).getrtherr()->SetMarkerSize(0); - (*it).getrtherr()->SetFillColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).getrtherr()->SetFillStyle(opts.styles[labels[it-datahistos.begin()]]); - float toterr = 0; - for (int b = 1; b <= (*it).gettherr()->GetNbinsX(); b++) - toterr += (*it).gettherr()->GetBinError(b); - if (toterr > 0) - { - for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) - { - (*it).getrtherr()->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.multitheory || (it - datahistos.begin() == 1)) //if in multitheory mode, plot only the second theory - (*it).Draw((TH1F*)(*it).getrtherr()->Clone(), "E3L same"); - } - (*it).getrtherr()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - } - } - } - else //plot as displaced TGraphs - { - TGraphAsymmErrors * r_gtherr = new TGraphAsymmErrors((*it).getrth()); - r_gtherr->SetMarkerStyle(opts.markers[labels[it-datahistos.begin()]]); - r_gtherr->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - r_gtherr->SetMarkerSize(2 * opts.resolution / 1200); - r_gtherr->SetMarkerColor(opts.colors[labels[it-datahistos.begin()]]); - for (int b = 0; b < r_gtherr->GetN(); b++) - { - //Set X error to 0 - r_gtherr->SetPointEXlow(b, 0); - r_gtherr->SetPointEXhigh(b, 0); - - //displace horizontally - double x, y; - r_gtherr->GetPoint(b, x, y); - float width = (*it).getrth()->GetBinWidth(b + 1); - float lowedge = (*it).getrth()->GetBinLowEdge(b + 1); - x = lowedge + (it - datahistos.begin() + 1) * width/(datahistos.size() + 1); - r_gtherr->SetPoint(b, x, y); - //Set Y error - float errup, errdown; - if (opts.therr) - { - errup = (*it).getrtherrup()->GetBinContent(b + 1) - (*it).getrth()->GetBinContent(b + 1); - errdown = (*it).getrth()->GetBinContent(b + 1) - (*it).getrtherrdown()->GetBinContent(b + 1); - } - else - { - errup = 0; - errdown = 0; - } - r_gtherr->SetPointEYhigh(b, errup); - r_gtherr->SetPointEYlow(b, errdown); - } - if (!opts.multitheory || (it - datahistos.begin() == 1)) //if in multitheory mode, plot only the second theory - r_gtherr->Draw("P same"); - } - if (it - datahistos.begin() == 1) - { - if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) - if (opts.therr && (*it).HasTherr()) - legr->AddEntry((*it).getrtherr(), (labels[it-datahistos.begin()]).c_str(), "lf"); - else - legr->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "l"); - else - if (opts.therr && (*it).HasTherr()) - legr->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "pe"); - else - leg2->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "p"); - } - } - + { + (*it).getrthshift()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).getrthshift()->SetLineStyle(2); + (*it).getrthshift()->SetLineWidth(opts.lwidth); + + (*it).getrth()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).getrth()->SetLineWidth(opts.lwidth); + if (opts.bw) + (*it).getrth()->SetLineStyle(opts.lstyles[labels[it-datahistos.begin()]]); + + vector <range> rthranges = historanges((*it).getrthshift()); + if (!opts.threepanels) + { + for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) + { + (*it).getrthshift()->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.onlytheory) + if (!opts.multitheory || (it - datahistos.begin() == 1)) //if in multitheory mode, plot only the second theory + if (!opts.nothshifts) + if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) + (*it).Draw((TH1F*)(*it).getrthshift()->Clone(), "LX same"); + else + (*it).Draw((TH1F*)(*it).getrthshift()->Clone(), "hist ][ same"); + } + (*it).getrthshift()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + } + + if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) //plot as continous line with dashed error bands + { + for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) + { + (*it).getrth()->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.multitheory || (it - datahistos.begin() == 1)) //if in multitheory mode, plot only the second theory + (*it).Draw((TH1F*)(*it).getrth()->Clone(), "LX same"); + } + (*it).getrth()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + if (opts.therr) + { + (*it).getrtherr()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).getrtherr()->SetMarkerSize(0); + (*it).getrtherr()->SetFillColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).getrtherr()->SetFillStyle(opts.styles[labels[it-datahistos.begin()]]); + float toterr = 0; + for (int b = 1; b <= (*it).gettherr()->GetNbinsX(); b++) + toterr += (*it).gettherr()->GetBinError(b); + if (toterr > 0) + { + for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) + { + (*it).getrtherr()->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.multitheory || (it - datahistos.begin() == 1)) //if in multitheory mode, plot only the second theory + (*it).Draw((TH1F*)(*it).getrtherr()->Clone(), "E3L same"); + } + (*it).getrtherr()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + } + } + } + else //plot as displaced TGraphs + { + TGraphAsymmErrors * r_gtherr = new TGraphAsymmErrors((*it).getrth()); + r_gtherr->SetMarkerStyle(opts.markers[labels[it-datahistos.begin()]]); + r_gtherr->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + r_gtherr->SetMarkerSize(2 * opts.resolution / 1200); + r_gtherr->SetMarkerColor(opts.colors[labels[it-datahistos.begin()]]); + for (int b = 0; b < r_gtherr->GetN(); b++) + { + //Set X error to 0 + r_gtherr->SetPointEXlow(b, 0); + r_gtherr->SetPointEXhigh(b, 0); + + //displace horizontally + double x, y; + r_gtherr->GetPoint(b, x, y); + float width = (*it).getrth()->GetBinWidth(b + 1); + float lowedge = (*it).getrth()->GetBinLowEdge(b + 1); + x = lowedge + (it - datahistos.begin() + 1) * width/(datahistos.size() + 1); + r_gtherr->SetPoint(b, x, y); + //Set Y error + float errup, errdown; + if (opts.therr) + { + errup = (*it).getrtherrup()->GetBinContent(b + 1) - (*it).getrth()->GetBinContent(b + 1); + errdown = (*it).getrth()->GetBinContent(b + 1) - (*it).getrtherrdown()->GetBinContent(b + 1); + } + else + { + errup = 0; + errdown = 0; + } + r_gtherr->SetPointEYhigh(b, errup); + r_gtherr->SetPointEYlow(b, errdown); + } + if (!opts.multitheory || (it - datahistos.begin() == 1)) //if in multitheory mode, plot only the second theory + r_gtherr->Draw("P same"); + } + if (it - datahistos.begin() == 1) + { + if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) + if (opts.therr && (*it).HasTherr()) + legr->AddEntry((*it).getrtherr(), (labels[it-datahistos.begin()]).c_str(), "lf"); + else + legr->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "l"); + else + if (opts.therr && (*it).HasTherr()) + legr->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "pe"); + else + leg2->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "p"); + } + } + //draw theory error borders if (opts.therr) - for (vector <Subplot>::iterator it = datahistos.begin(); it != datahistos.end(); it++) - { - (*it).getrtherrup()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).getrtherrdown()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).getrtherrup()->SetLineWidth(opts.lwidth); - (*it).getrtherrdown()->SetLineWidth(opts.lwidth); - if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) - { - vector <range> rthranges = historanges((*it).getth()); - for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) - { - (*it).getrtherrup()->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.multitheory || (it - datahistos.begin() == 1)) //if in multitheory mode, plot only the second theory - (*it).Draw((TH1F*)(*it).getrtherrup()->Clone(), "LX same"); - (*it).getrtherrdown()->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.multitheory || (it - datahistos.begin() == 1)) //if in multitheory mode, plot only the second theory - (*it).Draw((TH1F*)(*it).getrtherrdown()->Clone(), "LX same"); - } - (*it).getrtherrup()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - (*it).getrtherrdown()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - } - } + for (vector <Subplot>::iterator it = datahistos.begin(); it != datahistos.end(); it++) + { + (*it).getrtherrup()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).getrtherrdown()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).getrtherrup()->SetLineWidth(opts.lwidth); + (*it).getrtherrdown()->SetLineWidth(opts.lwidth); + if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) + { + vector <range> rthranges = historanges((*it).getth()); + for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) + { + (*it).getrtherrup()->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.multitheory || (it - datahistos.begin() == 1)) //if in multitheory mode, plot only the second theory + (*it).Draw((TH1F*)(*it).getrtherrup()->Clone(), "LX same"); + (*it).getrtherrdown()->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.multitheory || (it - datahistos.begin() == 1)) //if in multitheory mode, plot only the second theory + (*it).Draw((TH1F*)(*it).getrtherrdown()->Clone(), "LX same"); + } + (*it).getrtherrup()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + (*it).getrtherrdown()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + } + } //Draw data points if (!opts.onlytheory) - datahistos[0].Draw(r_data, "PE1 same"); + datahistos[0].Draw(r_data, "PE1 same"); legr->SetFillColor(0); legr->SetBorderSize(0); @@ -1166,7 +1179,7 @@ TCanvas * DataPainter(int dataindex, int subplotindex) { Shifts->cd(); if (datahistos[0].getlogx()) - Shifts->SetLogx(); + Shifts->SetLogx(); //Set up template histogram for axis r_templ->GetYaxis()->SetLabelSize(txtsize/sy); @@ -1174,19 +1187,19 @@ TCanvas * DataPainter(int dataindex, int subplotindex) r_templ->GetYaxis()->SetTitleOffset((offset+0.3) * sy); string ytitle = ""; if (opts.diff) - { - if (opts.ratiototheory) - ytitle = (string) "Data-" + opts.theorylabel; - else - ytitle = "Theory-Data"; - } + { + if (opts.ratiototheory) + ytitle = (string) "Data-" + opts.theorylabel; + else + ytitle = "Theory-Data"; + } else - { - if (opts.ratiototheory) - ytitle = (string) "Ratio to " + opts.theorylabel; - else - ytitle = "#frac{Theory+shifts}{Data}"; - } + { + if (opts.ratiototheory) + ytitle = (string) "Ratio to " + opts.theorylabel; + else + ytitle = "#frac{Theory+shifts}{Data}"; + } r_templ->SetYTitle(ytitle.c_str()); @@ -1194,26 +1207,26 @@ TCanvas * DataPainter(int dataindex, int subplotindex) mx = 0; TH1F * r_dataerr = (TH1F*) r_data->Clone(); for (int b = 1; b <= r_data->GetNbinsX(); b++) - r_dataerr->SetBinContent(b, r_data->GetBinContent(b) + r_data->GetBinError(b)); + r_dataerr->SetBinContent(b, r_data->GetBinContent(b) + r_data->GetBinError(b)); if (!opts.onlytheory) - mx = r_dataerr->GetBinContent(r_dataerr->GetMaximumBin()); + mx = r_dataerr->GetBinContent(r_dataerr->GetMaximumBin()); for (vector <Subplot>::iterator it = datahistos.begin(); it != datahistos.end(); it++) - mx = max(mx, (float)((*it).getrthshift()->GetMaximum())); + mx = max(mx, (float)((*it).getrthshift()->GetMaximum())); mn = mx; for (int b = 1; b <= r_dataerr->GetNbinsX(); b++) - r_dataerr->SetBinContent(b, r_data->GetBinContent(b) - r_data->GetBinError(b)); + r_dataerr->SetBinContent(b, r_data->GetBinContent(b) - r_data->GetBinError(b)); if (!opts.onlytheory) - mn = hmin(r_dataerr); + mn = hmin(r_dataerr); for (vector <Subplot>::iterator it = datahistos.begin(); it != datahistos.end(); it++) - mn = min(mn, (float)(hmin((*it).getrthshift()))); + mn = min(mn, (float)(hmin((*it).getrthshift()))); float delta = mx - mn; if (datahistos[0].getymaxr() != 0) - { - mx = datahistos[0].getymaxr(); - mn = datahistos[0].getyminr(); - delta = 0; - } + { + mx = datahistos[0].getymaxr(); + mn = datahistos[0].getyminr(); + delta = 0; + } r_templ->SetMaximum(mx + delta * 0.2); r_templ->SetMinimum(mn - delta * 0.2); @@ -1222,37 +1235,37 @@ TCanvas * DataPainter(int dataindex, int subplotindex) /* //plot data if (!opts.onlytheory) - datahistos[0].Draw(r_data, "PE1 same"); + datahistos[0].Draw(r_data, "PE1 same"); */ //plot lines at 1 (or 0 for diff plots) TLine *rs_ref; if (opts.diff) - rs_ref = new TLine(r_templ->GetBinLowEdge(r_templ->GetXaxis()->GetFirst()), 0, r_templ->GetXaxis()->GetBinUpEdge(r_templ->GetXaxis()->GetLast()), 0); + rs_ref = new TLine(r_templ->GetBinLowEdge(r_templ->GetXaxis()->GetFirst()), 0, r_templ->GetXaxis()->GetBinUpEdge(r_templ->GetXaxis()->GetLast()), 0); else - rs_ref = new TLine(r_templ->GetBinLowEdge(r_templ->GetXaxis()->GetFirst()), 1, r_templ->GetXaxis()->GetBinUpEdge(r_templ->GetXaxis()->GetLast()), 1); + rs_ref = new TLine(r_templ->GetBinLowEdge(r_templ->GetXaxis()->GetFirst()), 1, r_templ->GetXaxis()->GetBinUpEdge(r_templ->GetXaxis()->GetLast()), 1); rs_ref->SetLineStyle(2); rs_ref->SetLineStyle(1); rs_ref->Draw(); //Draw ratios for (vector <Subplot>::iterator it = datahistos.begin(); it != datahistos.end(); it++) - { - vector <range> rthranges = historanges((*it).getrthshift()); - for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) - { - (*it).getrthshift()->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.nothshifts) - if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) - (*it).Draw((TH1F*)(*it).getrthshift()->Clone(), "LX same"); - else - (*it).Draw((TH1F*)(*it).getrthshift()->Clone(), "hist ][ same"); - } - (*it).getrthshift()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - } + { + vector <range> rthranges = historanges((*it).getrthshift()); + for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) + { + (*it).getrthshift()->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.nothshifts) + if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) + (*it).Draw((TH1F*)(*it).getrthshift()->Clone(), "LX same"); + else + (*it).Draw((TH1F*)(*it).getrthshift()->Clone(), "hist ][ same"); + } + (*it).getrthshift()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + } //plot data if (!opts.onlytheory) - datahistos[0].Draw(r_data, "PE1 same"); + datahistos[0].Draw(r_data, "PE1 same"); } //Theory-Data pulls pad @@ -1261,7 +1274,7 @@ TCanvas * DataPainter(int dataindex, int subplotindex) int nth = datahistos.size(); Pulls->cd(); if (datahistos[0].getlogx()) - Pulls->SetLogx(); + Pulls->SetLogx(); //Set up template histogram for axis r_templ->GetXaxis()->SetLabelSize(txtsize/py); @@ -1277,19 +1290,19 @@ TCanvas * DataPainter(int dataindex, int subplotindex) //draw data vector <range> rdtranges = historanges(r_datatot); for (vector<range>::iterator r = rdtranges.begin(); r != rdtranges.end(); r++) - { - r_datatot->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.onlytheory) - datahistos[0].Draw((TH1F*)r_datatot->Clone(), "E3 same"); - } + { + r_datatot->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.onlytheory) + datahistos[0].Draw((TH1F*)r_datatot->Clone(), "E3 same"); + } r_datatot->GetXaxis()->SetRange(datahistos[0].getlowrange(), datahistos[0].getuprange()); //plot lines at 1 (or 0 for diff plots) TLine *r_ref; if (opts.diff) - r_ref = new TLine(r_templ->GetBinLowEdge(r_templ->GetXaxis()->GetFirst()), 0, r_templ->GetXaxis()->GetBinUpEdge(r_templ->GetXaxis()->GetLast()), 0); + r_ref = new TLine(r_templ->GetBinLowEdge(r_templ->GetXaxis()->GetFirst()), 0, r_templ->GetXaxis()->GetBinUpEdge(r_templ->GetXaxis()->GetLast()), 0); else - r_ref = new TLine(r_templ->GetBinLowEdge(r_templ->GetXaxis()->GetFirst()), 1, r_templ->GetXaxis()->GetBinUpEdge(r_templ->GetXaxis()->GetLast()), 1); + r_ref = new TLine(r_templ->GetBinLowEdge(r_templ->GetXaxis()->GetFirst()), 1, r_templ->GetXaxis()->GetBinUpEdge(r_templ->GetXaxis()->GetLast()), 1); r_ref->SetLineStyle(2); r_ref->SetLineStyle(1); r_ref->Draw(); @@ -1298,144 +1311,144 @@ TCanvas * DataPainter(int dataindex, int subplotindex) //Draw ratios for (vector <Subplot>::iterator it = datahistos.begin(); it != datahistos.end(); it++) - { - (*it).getrthshift()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).getrthshift()->SetLineStyle(2); - (*it).getrthshift()->SetLineWidth(opts.lwidth); - - (*it).getrth()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).getrth()->SetLineWidth(opts.lwidth); - if (opts.bw) - (*it).getrth()->SetLineStyle(opts.lstyles[labels[it-datahistos.begin()]]); - - vector <range> rthranges = historanges((*it).getrthshift()); - if (!opts.threepanels) - { - for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) - { - (*it).getrthshift()->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.onlytheory) - if (!opts.multitheory || (it - datahistos.begin() == nth-1)) //if in multitheory mode, plot only the second theory - if (!opts.nothshifts) - if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) - (*it).Draw((TH1F*)(*it).getrthshift()->Clone(), "LX same"); - else - (*it).Draw((TH1F*)(*it).getrthshift()->Clone(), "hist ][ same"); - } - (*it).getrthshift()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - } - - if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) //plot as continous line with dashed error bands - { - for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) - { - (*it).getrth()->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.multitheory || (it - datahistos.begin() == nth-1)) //if in multitheory mode, plot only the second theory - (*it).Draw((TH1F*)(*it).getrth()->Clone(), "LX same"); - } - (*it).getrth()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - if (opts.therr) - { - (*it).getrtherr()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).getrtherr()->SetMarkerSize(0); - (*it).getrtherr()->SetFillColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).getrtherr()->SetFillStyle(opts.styles[labels[it-datahistos.begin()]]); - float toterr = 0; - for (int b = 1; b <= (*it).gettherr()->GetNbinsX(); b++) - toterr += (*it).gettherr()->GetBinError(b); - if (toterr > 0) - { - for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) - { - (*it).getrtherr()->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.multitheory || (it - datahistos.begin() == nth-1)) //if in multitheory mode, plot only the second theory - (*it).Draw((TH1F*)(*it).getrtherr()->Clone(), "E3L same"); - } - (*it).getrtherr()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - } - } - } - else //plot as displaced TGraphs - { - TGraphAsymmErrors * r_gtherr = new TGraphAsymmErrors((*it).getrth()); - r_gtherr->SetMarkerStyle(opts.markers[labels[it-datahistos.begin()]]); - r_gtherr->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - r_gtherr->SetMarkerSize(2 * opts.resolution / 1200); - r_gtherr->SetMarkerColor(opts.colors[labels[it-datahistos.begin()]]); - for (int b = 0; b < r_gtherr->GetN(); b++) - { - //Set X error to 0 - r_gtherr->SetPointEXlow(b, 0); - r_gtherr->SetPointEXhigh(b, 0); - - //displace horizontally - double x, y; - r_gtherr->GetPoint(b, x, y); - float width = (*it).getrth()->GetBinWidth(b + 1); - float lowedge = (*it).getrth()->GetBinLowEdge(b + 1); - x = lowedge + (it - datahistos.begin() + 1) * width/(datahistos.size() + 1); - r_gtherr->SetPoint(b, x, y); - //Set Y error - float errup, errdown; - if (opts.therr) - { - errup = (*it).getrtherrup()->GetBinContent(b + 1) - (*it).getrth()->GetBinContent(b + 1); - errdown = (*it).getrth()->GetBinContent(b + 1) - (*it).getrtherrdown()->GetBinContent(b + 1); - } - else - { - errup = 0; - errdown = 0; - } - r_gtherr->SetPointEYhigh(b, errup); - r_gtherr->SetPointEYlow(b, errdown); - } - if (!opts.multitheory || (it - datahistos.begin() == nth-1)) //if in multitheory mode, plot only the second theory - r_gtherr->Draw("P same"); - } - if (it - datahistos.begin() == nth-1) - { - if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) - if (opts.therr && (*it).HasTherr()) - legr->AddEntry((*it).getrtherr(), (labels[it-datahistos.begin()]).c_str(), "lf"); - else - legr->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "l"); - else - if (opts.therr && (*it).HasTherr()) - legr->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "pe"); - else - leg2->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "p"); - } - } - + { + (*it).getrthshift()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).getrthshift()->SetLineStyle(2); + (*it).getrthshift()->SetLineWidth(opts.lwidth); + + (*it).getrth()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).getrth()->SetLineWidth(opts.lwidth); + if (opts.bw) + (*it).getrth()->SetLineStyle(opts.lstyles[labels[it-datahistos.begin()]]); + + vector <range> rthranges = historanges((*it).getrthshift()); + if (!opts.threepanels) + { + for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) + { + (*it).getrthshift()->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.onlytheory) + if (!opts.multitheory || (it - datahistos.begin() == nth-1)) //if in multitheory mode, plot only the second theory + if (!opts.nothshifts) + if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) + (*it).Draw((TH1F*)(*it).getrthshift()->Clone(), "LX same"); + else + (*it).Draw((TH1F*)(*it).getrthshift()->Clone(), "hist ][ same"); + } + (*it).getrthshift()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + } + + if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) //plot as continous line with dashed error bands + { + for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) + { + (*it).getrth()->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.multitheory || (it - datahistos.begin() == nth-1)) //if in multitheory mode, plot only the second theory + (*it).Draw((TH1F*)(*it).getrth()->Clone(), "LX same"); + } + (*it).getrth()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + if (opts.therr) + { + (*it).getrtherr()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).getrtherr()->SetMarkerSize(0); + (*it).getrtherr()->SetFillColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).getrtherr()->SetFillStyle(opts.styles[labels[it-datahistos.begin()]]); + float toterr = 0; + for (int b = 1; b <= (*it).gettherr()->GetNbinsX(); b++) + toterr += (*it).gettherr()->GetBinError(b); + if (toterr > 0) + { + for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) + { + (*it).getrtherr()->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.multitheory || (it - datahistos.begin() == nth-1)) //if in multitheory mode, plot only the second theory + (*it).Draw((TH1F*)(*it).getrtherr()->Clone(), "E3L same"); + } + (*it).getrtherr()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + } + } + } + else //plot as displaced TGraphs + { + TGraphAsymmErrors * r_gtherr = new TGraphAsymmErrors((*it).getrth()); + r_gtherr->SetMarkerStyle(opts.markers[labels[it-datahistos.begin()]]); + r_gtherr->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + r_gtherr->SetMarkerSize(2 * opts.resolution / 1200); + r_gtherr->SetMarkerColor(opts.colors[labels[it-datahistos.begin()]]); + for (int b = 0; b < r_gtherr->GetN(); b++) + { + //Set X error to 0 + r_gtherr->SetPointEXlow(b, 0); + r_gtherr->SetPointEXhigh(b, 0); + + //displace horizontally + double x, y; + r_gtherr->GetPoint(b, x, y); + float width = (*it).getrth()->GetBinWidth(b + 1); + float lowedge = (*it).getrth()->GetBinLowEdge(b + 1); + x = lowedge + (it - datahistos.begin() + 1) * width/(datahistos.size() + 1); + r_gtherr->SetPoint(b, x, y); + //Set Y error + float errup, errdown; + if (opts.therr) + { + errup = (*it).getrtherrup()->GetBinContent(b + 1) - (*it).getrth()->GetBinContent(b + 1); + errdown = (*it).getrth()->GetBinContent(b + 1) - (*it).getrtherrdown()->GetBinContent(b + 1); + } + else + { + errup = 0; + errdown = 0; + } + r_gtherr->SetPointEYhigh(b, errup); + r_gtherr->SetPointEYlow(b, errdown); + } + if (!opts.multitheory || (it - datahistos.begin() == nth-1)) //if in multitheory mode, plot only the second theory + r_gtherr->Draw("P same"); + } + if (it - datahistos.begin() == nth-1) + { + if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) + if (opts.therr && (*it).HasTherr()) + legr->AddEntry((*it).getrtherr(), (labels[it-datahistos.begin()]).c_str(), "lf"); + else + legr->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "l"); + else + if (opts.therr && (*it).HasTherr()) + legr->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "pe"); + else + leg2->AddEntry((*it).getrth(), (labels[it-datahistos.begin()]).c_str(), "p"); + } + } + //draw theory error borders if (opts.therr) - for (vector <Subplot>::iterator it = datahistos.begin(); it != datahistos.end(); it++) - { - (*it).getrtherrup()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).getrtherrdown()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).getrtherrup()->SetLineWidth(opts.lwidth); - (*it).getrtherrdown()->SetLineWidth(opts.lwidth); - if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) - { - vector <range> rthranges = historanges((*it).getth()); - for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) - { - (*it).getrtherrup()->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.multitheory || (it - datahistos.begin() == nth-1)) //if in multitheory mode, plot only the second theory - (*it).Draw((TH1F*)(*it).getrtherrup()->Clone(), "LX same"); - (*it).getrtherrdown()->SetAxisRange((*r).lowedge, (*r).upedge); - if (!opts.multitheory || (it - datahistos.begin() == nth-1)) //if in multitheory mode, plot only the second theory - (*it).Draw((TH1F*)(*it).getrtherrdown()->Clone(), "LX same"); - } - (*it).getrtherrup()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - (*it).getrtherrdown()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); - } - } + for (vector <Subplot>::iterator it = datahistos.begin(); it != datahistos.end(); it++) + { + (*it).getrtherrup()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).getrtherrdown()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).getrtherrup()->SetLineWidth(opts.lwidth); + (*it).getrtherrdown()->SetLineWidth(opts.lwidth); + if ((!opts.points || (*it).bincenter()) && (*it).nbins() > 1) + { + vector <range> rthranges = historanges((*it).getth()); + for (vector<range>::iterator r = rthranges.begin(); r != rthranges.end(); r++) + { + (*it).getrtherrup()->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.multitheory || (it - datahistos.begin() == nth-1)) //if in multitheory mode, plot only the second theory + (*it).Draw((TH1F*)(*it).getrtherrup()->Clone(), "LX same"); + (*it).getrtherrdown()->SetAxisRange((*r).lowedge, (*r).upedge); + if (!opts.multitheory || (it - datahistos.begin() == nth-1)) //if in multitheory mode, plot only the second theory + (*it).Draw((TH1F*)(*it).getrtherrdown()->Clone(), "LX same"); + } + (*it).getrtherrup()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + (*it).getrtherrdown()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); + } + } //Draw data points if (!opts.onlytheory) - datahistos[0].Draw(r_data, "PE1 same"); + datahistos[0].Draw(r_data, "PE1 same"); legr->SetFillColor(0); legr->SetBorderSize(0); @@ -1448,7 +1461,7 @@ TCanvas * DataPainter(int dataindex, int subplotindex) { Pulls->cd(); if (datahistos[0].getlogx()) - Pulls->SetLogx(); + Pulls->SetLogx(); TH1F * pull = datahistos[0].getpull(); @@ -1482,17 +1495,17 @@ TCanvas * DataPainter(int dataindex, int subplotindex) //plot pulls for (vector <Subplot>::iterator it = datahistos.begin(); it != datahistos.end(); it++) - { - if (datahistos.size() == 1) - { - (*it).getpull()->SetFillColor(opts.colors[labels[it-datahistos.begin()]]); - (*it).getpull()->SetFillStyle(1001); - } - (*it).getpull()->SetLineStyle(1); - (*it).getpull()->SetLineWidth(opts.lwidth); - (*it).getpull()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); - datahistos[0].Draw((TH1F*)(*it).getpull()->Clone(), "same ]["); - } + { + if (datahistos.size() == 1) + { + (*it).getpull()->SetFillColor(opts.colors[labels[it-datahistos.begin()]]); + (*it).getpull()->SetFillStyle(1001); + } + (*it).getpull()->SetLineStyle(1); + (*it).getpull()->SetLineWidth(opts.lwidth); + (*it).getpull()->SetLineColor(opts.colors[labels[it-datahistos.begin()]]); + datahistos[0].Draw((TH1F*)(*it).getpull()->Clone(), "same ]["); + } } //Labels @@ -1501,7 +1514,7 @@ TCanvas * DataPainter(int dataindex, int subplotindex) cnv->cd(1); DrawLabels(); if (opts.drawlogo) - DrawLogo()->Draw(); + DrawLogo()->Draw(); cnv->cd(2); DrawLabels(); } @@ -1510,7 +1523,7 @@ TCanvas * DataPainter(int dataindex, int subplotindex) cnv->cd(); DrawLabels(); if (opts.drawlogo) - DrawLogo()->Draw(); + DrawLogo()->Draw(); } return cnv; diff --git a/tools/draw/src/Dataset.cc b/tools/draw/src/Dataset.cc index 6977b62713fa051490e8a2f129c1d7682aa1097e..c05b9c19794a997ebefe038ec1891d517bc4661d 100644 --- a/tools/draw/src/Dataset.cc +++ b/tools/draw/src/Dataset.cc @@ -43,79 +43,79 @@ Subplot::Subplot(string plotoptions) : xmin(0), xmax(0), yminr(0), ymaxr(0), ym //Rewrite in standard C parsing, avoid Root string parsing TString popt(plotoptions.c_str()); TObjArray* array = popt.Tokenize("@"); - + // first loop to detect x axis - for (int i=0; i<array->GetEntries(); i++) + for (int i=0; i<array->GetEntries(); i++) { TString str( ((TObjString*)array->At(i))->GetString().Data()); - if(str.BeginsWith("Xmin:")) - { - str.ReplaceAll("Xmin:",""); - xmin = str.Atof(); - } - else if(str.BeginsWith("Xmax:")) - { - str.ReplaceAll("Xmax:",""); - xmax = str.Atof(); - } + if(str.BeginsWith("Xmin:")) + { + str.ReplaceAll("Xmin:",""); + xmin = str.Atof(); + } + else if(str.BeginsWith("Xmax:")) + { + str.ReplaceAll("Xmax:",""); + xmax = str.Atof(); + } } - for(int i=0; i<array->GetEntries(); i++) + for(int i=0; i<array->GetEntries(); i++) { TString str( ((TObjString*)array->At(i))->GetString().Data()); - if(str.BeginsWith("ExtraLabel:")) - { - str.ReplaceAll("ExtraLabel:",""); - extralabel = str.Data(); - } - if(str.BeginsWith("Experiment:")) - { - str.ReplaceAll("Experiment:",""); - experiment = str.Data(); - } - if(str.BeginsWith("Lumi:")) - { - str.ReplaceAll("Lumi:",""); - lumilabel = str.Data(); - } - if(str.BeginsWith("Title:")) - { - str.ReplaceAll("Title:",""); - title = str.Data(); - } - else if(str.BeginsWith("XTitle:")) - { - str.ReplaceAll("XTitle:",""); - xlabel = str.Data(); - } - else if(str.BeginsWith("YTitle:")) - { - str.ReplaceAll("YTitle:",""); - ylabel = str.Data(); - } - else if(str.BeginsWith("YminR:")) - { - str.ReplaceAll("YminR:",""); - yminr = str.Atof(); - } - else if(str.BeginsWith("YmaxR:")) - { - str.ReplaceAll("YmaxR:",""); - ymaxr = str.Atof(); - } - else if(str.BeginsWith("Ymin:")) - { - str.ReplaceAll("Ymin:",""); - ymin = str.Atof(); - } - else if(str.BeginsWith("Ymax:")) - { - str.ReplaceAll("Ymax:",""); - ymax = str.Atof(); - } + if(str.BeginsWith("ExtraLabel:")) + { + str.ReplaceAll("ExtraLabel:",""); + extralabel = str.Data(); + } + if(str.BeginsWith("Experiment:")) + { + str.ReplaceAll("Experiment:",""); + experiment = str.Data(); + } + if(str.BeginsWith("Lumi:")) + { + str.ReplaceAll("Lumi:",""); + lumilabel = str.Data(); + } + if(str.BeginsWith("Title:")) + { + str.ReplaceAll("Title:",""); + title = str.Data(); + } + else if(str.BeginsWith("XTitle:")) + { + str.ReplaceAll("XTitle:",""); + xlabel = str.Data(); + } + else if(str.BeginsWith("YTitle:")) + { + str.ReplaceAll("YTitle:",""); + ylabel = str.Data(); + } + else if(str.BeginsWith("YminR:")) + { + str.ReplaceAll("YminR:",""); + yminr = str.Atof(); + } + else if(str.BeginsWith("YmaxR:")) + { + str.ReplaceAll("YmaxR:",""); + ymaxr = str.Atof(); + } + else if(str.BeginsWith("Ymin:")) + { + str.ReplaceAll("Ymin:",""); + ymin = str.Atof(); + } + else if(str.BeginsWith("Ymax:")) + { + str.ReplaceAll("Ymax:",""); + ymax = str.Atof(); + } else if(str.BeginsWith("Xlog")) - logx = true; + logx = true; else if(str.BeginsWith("Ylog")) - logy = true; + logy = true; } delete array; } @@ -143,11 +143,11 @@ void Subplot::Init(string label, int dataindex, int subplotindex) if (skip) { if (!maketgraph) - { - cout << "bin inconsistency for " << label << ", dataset: " << dataindex << ", subplot: " << subplotindex << ". skipping..." << endl; - cout << "Cannot plot data, skipping" << endl; - return; - } + { + cout << "bin inconsistency for " << label << ", dataset: " << dataindex << ", subplot: " << subplotindex << ". skipping..." << endl; + cout << "Cannot plot data, skipping" << endl; + return; + } } @@ -160,42 +160,42 @@ void Subplot::Init(string label, int dataindex, int subplotindex) //make arbitrary bin edges vector <double> temp; for (vector <float>::iterator it = valx.begin(); it != valx.end(); it++) - temp.push_back(*it); - + temp.push_back(*it); + sort(temp.begin(), temp.end()); //adjust x axis double xaxmin = *(temp.begin()); double xaxmax = *(temp.end()-1); if (valx.size() > 1 && (*(temp.end()-1) - *(temp.begin()) > 0)) - { - if (logx) - { - double axislength = *(temp.end()-1) / *(temp.begin()); - xaxmin = xaxmin / pow(axislength,1./20); - xaxmax = xaxmax * pow(axislength,1./20); - } - else - { - double axislength = *(temp.end()-1) - *(temp.begin()); - xaxmin = xaxmin - axislength/20.; - xaxmax = xaxmax + axislength/20.; - } - } + { + if (logx) + { + double axislength = *(temp.end()-1) / *(temp.begin()); + xaxmin = xaxmin / pow(axislength,1./20); + xaxmax = xaxmax * pow(axislength,1./20); + } + else + { + double axislength = *(temp.end()-1) - *(temp.begin()); + xaxmin = xaxmin - axislength/20.; + xaxmax = xaxmax + axislength/20.; + } + } else - { - xaxmin *= 0.9999; - xaxmax *= 1.0001; - } + { + xaxmin *= 0.9999; + xaxmax *= 1.0001; + } bins.push_back(xaxmin); if (valx.size() > 1) - for (vector<double>::iterator it = temp.begin()+1; it != temp.end(); it++) - bins.push_back((*it + *(it-1))/2); + for (vector<double>::iterator it = temp.begin()+1; it != temp.end(); it++) + bins.push_back((*it + *(it-1))/2); bins.push_back(xaxmax); for (vector<double>::iterator it = bins.begin(); it != bins.end(); it++) - bin[it-bins.begin()] = *it; + bin[it-bins.begin()] = *it; } else { @@ -203,52 +203,52 @@ void Subplot::Init(string label, int dataindex, int subplotindex) int pos = 0; float bmin, bmax; while (pos != -1) - { - pos = -1; - vector<float>::iterator it1 = bins1.begin(); - vector<float>::iterator it2 = bins2.begin(); - for (; (it1+1) != bins1.end(); it1++, it2++) - if (*(it1+1) != *it2 && *it2 < *(it1+1)) - { - pos = (it1 - bins1.begin()) + 1; - bmin = *it2; - bmax = *(it1+1); - } - if (pos != -1) - { - bins1.insert(bins1.begin()+pos, bmin); - bins2.insert(bins2.begin()+pos, bmax); - data.insert(data.begin()+pos, 0); - uncorerr.insert(uncorerr.begin()+pos, 0); - toterr.insert(toterr.begin()+pos, 0); - theory.insert(theory.begin()+pos, 0); - theoryshifted.insert(theoryshifted.begin() +pos, 0); - therrup.insert(therrup.begin()+pos, 0); - therrdown.insert(therrdown.begin()+pos, 0); - pulls.insert(pulls.begin()+pos, 0); - } - } + { + pos = -1; + vector<float>::iterator it1 = bins1.begin(); + vector<float>::iterator it2 = bins2.begin(); + for (; (it1+1) != bins1.end(); it1++, it2++) + if (*(it1+1) != *it2 && *it2 < *(it1+1)) + { + pos = (it1 - bins1.begin()) + 1; + bmin = *it2; + bmax = *(it1+1); + } + if (pos != -1) + { + bins1.insert(bins1.begin()+pos, bmin); + bins2.insert(bins2.begin()+pos, bmax); + data.insert(data.begin()+pos, 0); + uncorerr.insert(uncorerr.begin()+pos, 0); + toterr.insert(toterr.begin()+pos, 0); + theory.insert(theory.begin()+pos, 0); + theoryshifted.insert(theoryshifted.begin() +pos, 0); + therrup.insert(therrup.begin()+pos, 0); + therrdown.insert(therrdown.begin()+pos, 0); + pulls.insert(pulls.begin()+pos, 0); + } + } //make bins array int i = 0; for (vector<float>::iterator it = bins1.begin(); it != bins1.end(); it++) - { - bin[i] = *it; - i++; - } + { + bin[i] = *it; + i++; + } bin[i] = *(bins2.end()-1); } char hnm[300]; sprintf (hnm, "data_%s_%d-%d", label.c_str(), dataindex, subplotindex); string hname = hnm; - hdata = new TH1F((hname +"_data").c_str(), "", bins1.size(), bin); - hdatatot = new TH1F((hname + "_datatot").c_str(), "", bins1.size(), bin); - hth = new TH1F((hname + "_th").c_str(), "", bins1.size(), bin); - hthshift = new TH1F((hname + "_thshift").c_str(), "", bins1.size(), bin); - htherr = new TH1F((hname + "_therr").c_str(), "", bins1.size(), bin); - htherrup = new TH1F((hname + "_therrup").c_str(), "", bins1.size(), bin); - htherrdown = new TH1F((hname + "_therrdown").c_str(), "", bins1.size(), bin); - hpull = new TH1F((hname + "_pull").c_str(), "", bins1.size(), bin); + hdata = new TH1F((hname + "_data" ).c_str(), "", bins1.size(), bin); + hdatatot = new TH1F((hname + "_datatot" ).c_str(), "", bins1.size(), bin); + hth = new TH1F((hname + "_th" ).c_str(), "", bins1.size(), bin); + hthshift = new TH1F((hname + "_thshift" ).c_str(), "", bins1.size(), bin); + htherr = new TH1F((hname + "_therr" ).c_str(), "", bins1.size(), bin); + htherrup = new TH1F((hname + "_therrup" ).c_str(), "", bins1.size(), bin); + htherrdown = new TH1F((hname + "_therrdown").c_str(), "", bins1.size(), bin); + hpull = new TH1F((hname + "_pull" ).c_str(), "", bins1.size(), bin); if (xmin == 0 && xmax == 0) { @@ -266,7 +266,7 @@ void Subplot::Init(string label, int dataindex, int subplotindex) htherrup->GetXaxis()->SetRange(lowrange, uprange); htherrdown->GetXaxis()->SetRange(lowrange, uprange); hpull->GetXaxis()->SetRange(lowrange, uprange); - + hdata->SetXTitle(xlabel.c_str()); hdata->SetYTitle(ylabel.c_str()); @@ -292,12 +292,12 @@ void Subplot::Init(string label, int dataindex, int subplotindex) htherrdown->SetBinContent(b + 1, theory[b] - therrdown[b]); //invert pulls -> (theory - data) if (!opts.ratiototheory) - hpull->SetBinContent(b + 1, -pulls[b]); + hpull->SetBinContent(b + 1, -pulls[b]); else - hpull->SetBinContent(b + 1, pulls[b]); + hpull->SetBinContent(b + 1, pulls[b]); hpull->SetBinError(b + 1, 0); } - + for (unsigned int b = 0; b < data.size(); b++) if (therrup[b] != 0 || therrdown[b] != 0) hastherr = true; @@ -340,11 +340,11 @@ void getTheoryShift (vector<pdfshift> pdfshifts, vector <vector <double> > cor_m { // Decode string thing int N = ( err == AsymHess ) ? 2*pdfshifts.size()+1 : pdfshifts.size()+1; - + if (N == 1) return; - + vector <double> val; - + for (int i=0; i<N; i++) { istringstream iss(lines[i]); // Hardwire !!! // @@ -361,45 +361,45 @@ void getTheoryShift (vector<pdfshift> pdfshifts, vector <vector <double> > cor_m if ( err == AsymHess ) { for ( int i = 0; i<pdfshifts.size(); i++ ) - { - double plus, minus; - if (scale68) - { - plus = (val[i*2+1] - cent) / 1.645; - minus = (val[i*2+2] - cent) / 1.645; - } - else - { - plus = val[i*2+1] - cent; - minus = val[i*2+2] - cent; - } - double valShift = pdfshifts[i].val; - double errShift = pdfshifts[i].err; - //compute shifted central value - double cor = 0.5*(plus - minus)*valShift + 0.5*(plus+minus)*valShift*valShift; - corSum += cor; - //compute reduced uncertainties - xi.push_back(plus*errShift+cent); - xi.push_back(minus*errShift+cent); - } - ahessdeltaasym(xi, errplus, errminus, cor_matrix); + { + double plus, minus; + if (scale68) + { + plus = (val[i*2+1] - cent) / 1.645; + minus = (val[i*2+2] - cent) / 1.645; + } + else + { + plus = val[i*2+1] - cent; + minus = val[i*2+2] - cent; + } + double valShift = pdfshifts[i].val; + double errShift = pdfshifts[i].err; + //compute shifted central value + double cor = 0.5*(plus - minus)*valShift + 0.5*(plus+minus)*valShift*valShift; + corSum += cor; + //compute reduced uncertainties + xi.push_back(plus*errShift+cent); + xi.push_back(minus*errShift+cent); + } + ahessdeltaasym(xi, errplus, errminus, cor_matrix); } else if ( err = SymHess ) { for ( int i = 0; i < pdfshifts.size(); i++ ) - { - double plus = val[i+1] - cent; - double valShift = pdfshifts[i].val; - double errShift = pdfshifts[i].err; - //compute shifted central value - double cor = plus*valShift; - corSum += cor; - //compute reduced uncertainties - xi.push_back(plus*errShift+cent); - } + { + double plus = val[i+1] - cent; + double valShift = pdfshifts[i].val; + double errShift = pdfshifts[i].err; + //compute shifted central value + double cor = plus*valShift; + corSum += cor; + //compute reduced uncertainties + xi.push_back(plus*errShift+cent); + } errplus = errminus = shessdelta(xi, cor_matrix); } -} +} void getTheoryReweight (vector<double> weights, vector <string> lines, double& val, double& err) { @@ -408,12 +408,12 @@ void getTheoryReweight (vector<double> weights, vector <string> lines, double& v // Decode string thing int N = lines.size(); - + if (N == 0) return; - + vector <double> xi; - + for (int i = 0; i < N; i++) { istringstream iss(lines[i]); // Hardwire !!! // @@ -425,7 +425,7 @@ void getTheoryReweight (vector<double> weights, vector <string> lines, double& v val = mean(xi, weights); err = rms(xi, weights); -} +} Data::Data(string dirname, string label) { @@ -449,14 +449,14 @@ Data::Data(string dirname, string label) if (outdirs[label].IsProfiled()) { if (err == AsymHess) - nfiles = pdfmap[label].pdfshifts.size()*2+1; + nfiles = pdfmap[label].pdfshifts.size()*2+1; else if (err == SymHess) - nfiles = pdfmap[label].pdfshifts.size()+1; + nfiles = pdfmap[label].pdfshifts.size()+1; } if (outdirs[label].IsReweighted()) if (err == MC) nfiles = pdfmap[label].mcw.size(); - + // reset if shifts is empty. if (nfiles == 1) {nfiles = 0;} @@ -466,26 +466,26 @@ Data::Data(string dirname, string label) sprintf (filename, "%s/fittedresults.txt_set_%04i", dirname.c_str(), i); infiles.push_back(new ifstream(filename)); if (!infiles[i]->is_open()) - { - cout << "Error " << filename << " not found " << endl; - return; - } + { + cout << "Error " << filename << " not found " << endl; + return; + } } - + if (outdirs[label].IsMCreplica()) for (vector <string>::iterator it = outdirs[label].dirlist.begin(); it != outdirs[label].dirlist.end(); it++) { - string fname = (*it) + "/fittedresults.txt"; - infiles.push_back(new ifstream(fname.c_str())); - if (!infiles[infiles.size()-1]->is_open()) - { - cout << "Error " << filename << " not found " << endl; - return; - } + string fname = (*it) + "/fittedresults.txt"; + infiles.push_back(new ifstream(fname.c_str())); + if (!infiles[infiles.size()-1]->is_open()) + { + cout << "Error " << filename << " not found " << endl; + return; + } } //Read datasets - string line; + string line; vector <string> lines; lines.reserve(nfiles); @@ -509,35 +509,35 @@ Data::Data(string dirname, string label) issdi >> nextdtindex; //Dataset index //Loop on datasets - while(!infile.eof()) + while(!infile.eof()) { dtindex = nextdtindex; //Read dataset name getline(infile, name); lines = readLineFiles(infiles); - + //Initialise new dataset Dataset dtset(dtindex, name); getline(infile, line); lines = readLineFiles(infiles); //Read plot options while(!infile.eof()) - { - if (line.find("Plot") == string::npos) - break; - - //Read subplot index - TString temp(line.c_str()); - TObjArray* array = temp.Tokenize("@"); - temp.Form(((TObjString*) array->At(0))->GetString().Data()); - delete array; - temp.ReplaceAll("Plot",""); - int iplot = temp.Atoi(); - //End of reading subplotindex - - dtset.subplots[iplot] = Subplot(line); - getline(infile, line); lines = readLineFiles(infiles); - } + { + if (line.find("Plot") == string::npos) + break; + + //Read subplot index + TString temp(line.c_str()); + TObjArray* array = temp.Tokenize("@"); + temp.Form(((TObjString*) array->At(0))->GetString().Data()); + delete array; + temp.ReplaceAll("Plot",""); + int iplot = temp.Atoi(); + //End of reading subplotindex + + dtset.subplots[iplot] = Subplot(line); + getline(infile, line); lines = readLineFiles(infiles); + } //Read columns tags string col; @@ -545,10 +545,10 @@ Data::Data(string dirname, string label) istringstream iss(line); while (iss >> col) - { - // coltag[i] = col; - // i++; - } + { + // coltag[i] = col; + // i++; + } //hard coded patch, until the fittedresults.txt format is improved coltag[0] = "lbin"; @@ -570,88 +570,88 @@ Data::Data(string dirname, string label) getline(infile, line); lines = readLineFiles(infiles); //Loop on data points while(!infile.eof()) - { - istringstream iss(line); - - //Read a line of data - int iplot; - map <string, float> fline; - int i = 0; - while (iss >> buffer) - { - if (coltag[i] == "iplot") - iplot = (int) buffer; - else - fline[coltag[i]] = buffer; - i++; - //patch, to be cleaned up when the fittedresults.txt format is improved - if (i == 12) - { - string s; - iss >> s; - TString str(s.c_str()); - TObjArray* array = str.Tokenize("/"); - iplot = ((TObjString*) array->At(0))->GetString().Atoi(); - fline["x"] = ((TObjString*) array->At(1))->GetString().Atof(); - - break; - } - //end of patch - } - if (i == 1) //New dataset - { - //Set dataset index for the incoming dataset - nextdtindex = (int) buffer; - break; - } - - //Plain 90cl -> 68cl scaling - if (outdirs[label].Scale68()) - { - fline["therr+"] = fline["therr+"]/1.645; - fline["therr-"] = fline["therr-"]/1.645; - } - - //Hessian profile the theory prediction - if (outdirs[label].IsProfiled()) - { - double cor, eplus, eminus; - getTheoryShift(pdfmap[label].pdfshifts, pdfmap[label].cor_matrix, err, lines, outdirs[label].Scale68(), cor, eplus, eminus); - fline["thorig"] += cor; - fline["therr+"] = eplus; - fline["therr-"] = eminus; - } - - //Bayesian reweight the theory prediction - if (outdirs[label].IsReweighted()) - { - double value, error; - getTheoryReweight(pdfmap[label].mcw, lines, value, error); - fline["thorig"] = value; - fline["therr+"] = error; - fline["therr-"] = error; - } - - //Cumulative theory predictions for MC replica runs - if (outdirs[label].IsMCreplica()) - { - double value, error; - vector <double> w; - getTheoryReweight(w, lines, value, error); - fline["thorig"] = value; - fline["therr+"] = error; - fline["therr-"] = error; - fline["thmod"] = value; - fline["pull"] = 0.; - } - - //Add point to subplot - dtset.subplots[iplot].AddPoint(fline); - getline(infile, line); lines = readLineFiles(infiles); - - }//End loop on data points + { + istringstream iss(line); + + //Read a line of data + int iplot; + map <string, float> fline; + int i = 0; + while (iss >> buffer) + { + if (coltag[i] == "iplot") + iplot = (int) buffer; + else + fline[coltag[i]] = buffer; + i++; + //patch, to be cleaned up when the fittedresults.txt format is improved + if (i == 12) + { + string s; + iss >> s; + TString str(s.c_str()); + TObjArray* array = str.Tokenize("/"); + iplot = ((TObjString*) array->At(0))->GetString().Atoi(); + fline["x"] = ((TObjString*) array->At(1))->GetString().Atof(); + + break; + } + //end of patch + } + if (i == 1) //New dataset + { + //Set dataset index for the incoming dataset + nextdtindex = (int) buffer; + break; + } + + //Plain 90cl -> 68cl scaling + if (outdirs[label].Scale68()) + { + fline["therr+"] = fline["therr+"]/1.645; + fline["therr-"] = fline["therr-"]/1.645; + } + + //Hessian profile the theory prediction + if (outdirs[label].IsProfiled()) + { + double cor, eplus, eminus; + getTheoryShift(pdfmap[label].pdfshifts, pdfmap[label].cor_matrix, err, lines, outdirs[label].Scale68(), cor, eplus, eminus); + fline["thorig"] += cor; + fline["therr+"] = eplus; + fline["therr-"] = eminus; + } + + //Bayesian reweight the theory prediction + if (outdirs[label].IsReweighted()) + { + double value, error; + getTheoryReweight(pdfmap[label].mcw, lines, value, error); + fline["thorig"] = value; + fline["therr+"] = error; + fline["therr-"] = error; + } + + //Cumulative theory predictions for MC replica runs + if (outdirs[label].IsMCreplica()) + { + double value, error; + vector <double> w; + getTheoryReweight(w, lines, value, error); + fline["thorig"] = value; + fline["therr+"] = error; + fline["therr-"] = error; + fline["thmod"] = value; + fline["pull"] = 0.; + } + + //Add point to subplot + dtset.subplots[iplot].AddPoint(fline); + getline(infile, line); lines = readLineFiles(infiles); + + }//End loop on data points for (map <int, Subplot>::iterator sit = dtset.subplots.begin(); sit != dtset.subplots.end(); sit++) - (*sit).second.Init(label, dtindex, (*sit).first); + (*sit).second.Init(label, dtindex, (*sit).first); datamap[dtindex] = dtset; }//End loop on datasets diff --git a/tools/draw/src/PdfsPainter.cc b/tools/draw/src/PdfsPainter.cc index cf8c97cb1a9e0c16efc08a24dc5bb8dd52d0f27f..791b9c8660a2f43533ff943998734b9280b41f3f 100644 --- a/tools/draw/src/PdfsPainter.cc +++ b/tools/draw/src/PdfsPainter.cc @@ -17,7 +17,7 @@ vector <TCanvas*> PdfsPainter(double q2, pdftype ipdf) { vector <TCanvas*> cnvs; - char q2str[30]; + char q2str[30]; if (q2 < 10) sprintf(q2str, "%.1f", q2); else @@ -34,10 +34,10 @@ vector <TCanvas*> PdfsPainter(double q2, pdftype ipdf) blIs3bands[itl - opts.labels.begin()] = outdirs[*itl].Is3bands(); if (!blIs3bands[itl - opts.labels.begin()]) { pdfgraphs.push_back(pdfmap[*itl].Central[q2].GetPdf(ipdf)); - labels.push_back(*itl); + labels.push_back(*itl); - sprintf(pdfname, "dir%d_q2_%s_pdf_%s", (itl-opts.labels.begin()+1), q2str, pdffiles[ipdf].c_str()); - pdfgraphs.back()->SetName(pdfname); + sprintf(pdfname, "dir%d_q2_%s_pdf_%s", (itl-opts.labels.begin()+1), q2str, pdffiles[ipdf].c_str()); + pdfgraphs.back()->SetName(pdfname); } else { pdfgraphs.push_back(pdfmap[*itl].Central[q2].GetPdfCen(ipdf)); labels.push_back(*itl); @@ -70,43 +70,43 @@ vector <TCanvas*> PdfsPainter(double q2, pdftype ipdf) char cnvname[30]; sprintf(cnvname, "q2_%s_pdf_%s", q2str, pdffiles[ipdf].c_str()); - //Set xmin xmax - for (vector <TGraphAsymmErrors*>::iterator it = pdfgraphs.begin(); it != pdfgraphs.end(); it++) - { - if (*it == 0) - continue; - if (opts.xmin == -1 && opts.xmax == -1) - { - opts.xmin = (*it)->GetX()[0]; - opts.xmax = (*it)->GetX()[(*it)->GetN() - 1]; - } + if(opts.xmin==-1&&opts.xmax==-1){ + //Set xmin xmax + opts.xmin= 1e100; + opts.xmax=-1e100; + for(const auto&graph:pdfgraphs){ + if(!graph)continue; + TAxis*ax=graph->GetXaxis(); + opts.xmin=min(opts.xmin,ax->GetXmin()); + opts.xmax=max(opts.xmax,ax->GetXmax()); } + } //Remove points out of x range for (vector <TGraphAsymmErrors*>::iterator it = pdfgraphs.begin(); it != pdfgraphs.end(); it++) { if (*it == 0) - continue; + continue; bool removed = true; while (removed) - { - removed = false; - for (int i = 0; i < (*it)->GetN(); i++) - { - double xi = (*it)->GetX()[i]; - if (xi <= opts.xmin || xi >= opts.xmax) - { - (*it)->RemovePoint(i); + { + removed = false; + for (int i = 0; i < (*it)->GetN(); i++) + { + double xi = (*it)->GetX()[i]; + if (xi < opts.xmin || xi > opts.xmax) + { + (*it)->RemovePoint(i); if (blIs3bands[it - pdfgraphs.begin()]) { pdfempunc[it - pdfgraphs.begin()][0]->RemovePoint(i); pdfempunc[it - pdfgraphs.begin()][1]->RemovePoint(i); pdfempunc[it - pdfgraphs.begin()][2]->RemovePoint(i); } - removed = true; - break; - } - } - } + removed = true; + break; + } + } + } } //Set colors and styles @@ -125,45 +125,45 @@ vector <TCanvas*> PdfsPainter(double q2, pdftype ipdf) (*it)->SetFillColor(opts.colors[labels[it-pdfgraphs.begin()]]); if (opts.filledbands) - (*it)->SetFillStyle(1001); + (*it)->SetFillStyle(1001); else - (*it)->SetFillStyle(opts.styles[labels[it-pdfgraphs.begin()]]); + (*it)->SetFillStyle(opts.styles[labels[it-pdfgraphs.begin()]]); (*it)->SetLineStyle(1); (*it)->SetLineWidth(opts.lwidth); (*it)->SetLineColor(opts.colors[labels[it-pdfgraphs.begin()]]); colindx++; - } + } //Calculate maximum and minimum of y axis double mx = 0; for (vector <TGraphAsymmErrors*>::iterator it = pdfgraphs.begin(); it != pdfgraphs.end(); it++) for (int i = 0; i < (*it)->GetN(); i++) { - double xi = (*it)->GetX()[i]; - double val = (*it)->GetY()[i]; - double errhigh; + double xi = (*it)->GetX()[i]; + double val = (*it)->GetY()[i]; + double errhigh; if (!blIs3bands[it - pdfgraphs.begin()]) { errhigh = (*it)->GetErrorYhigh(i); } else { errhigh = pdfempunc[it - pdfgraphs.begin()][0]->GetErrorYhigh(i); } - if (xi >= opts.xmin && xi <= opts.xmax) - mx = max(mx, val+errhigh); + if (xi >= opts.xmin && xi <= opts.xmax) + mx = max(mx, val+errhigh); } double mn = mx; for (vector <TGraphAsymmErrors*>::iterator it = pdfgraphs.begin(); it != pdfgraphs.end(); it++) for (int i = 0; i < (*it)->GetN(); i++) { - double xi = (*it)->GetX()[i]; - double val = (*it)->GetY()[i]; - double errlow; + double xi = (*it)->GetX()[i]; + double val = (*it)->GetY()[i]; + double errlow; if (!blIs3bands[it - pdfgraphs.begin()]) { errlow = (*it)->GetErrorYlow(i); } else { errlow = pdfempunc[it - pdfgraphs.begin()][0]->GetErrorYlow(i); } - if (xi >= opts.xmin && xi <= opts.xmax) - mn = min(mn, val-errlow); + if (xi >= opts.xmin && xi <= opts.xmax) + mn = min(mn, val-errlow); } //Prepare TGraphs @@ -174,28 +174,28 @@ vector <TCanvas*> PdfsPainter(double q2, pdftype ipdf) for (vector <TGraphAsymmErrors*>::iterator it = pdfgraphs.begin(); it != pdfgraphs.end(); it++) { if (*it == 0) - continue; + continue; //Prepare graph line borders and graph shade int npoints = (*it)->GetN(); - double val_x[npoints], val_y[npoints], val_high_y[npoints], val_low_y[npoints]; + double val_x[npoints], val_y[npoints], val_high_y[npoints], val_low_y[npoints]; double xsh[2*npoints], ysh[2*npoints], yshEMP[3][2*npoints]; for (int i = 0; i < (*it)->GetN(); i++) - { - double val = (*it)->GetY()[i]; - double errhigh = (*it)->GetErrorYhigh(i); - double errlow = (*it)->GetErrorYlow(i); - - val_x[i] = (*it)->GetX()[i]; - val_y[i] = val; - val_high_y[i] = val + errhigh; - val_low_y[i] = val - errlow; - - //shade TGraph - xsh[i] = (*it)->GetX()[i]; - ysh[i] = val + errhigh; - xsh[npoints + i] = (*it)->GetX()[npoints-i-1]; - ysh[npoints + i] = (*it)->GetY()[npoints-i-1] - (*it)->GetErrorYlow(npoints-i-1); + { + double val = (*it)->GetY()[i]; + double errhigh = (*it)->GetErrorYhigh(i); + double errlow = (*it)->GetErrorYlow(i); + + val_x[i] = (*it)->GetX()[i]; + val_y[i] = val; + val_high_y[i] = val + errhigh; + val_low_y[i] = val - errlow; + + //shade TGraph + xsh[i] = (*it)->GetX()[i]; + ysh[i] = val + errhigh; + xsh[npoints + i] = (*it)->GetX()[npoints-i-1]; + ysh[npoints + i] = (*it)->GetY()[npoints-i-1] - (*it)->GetErrorYlow(npoints-i-1); if (blIs3bands[it - pdfgraphs.begin()]) { for (int iun=0; iun<3; iun++) { @@ -203,7 +203,7 @@ vector <TCanvas*> PdfsPainter(double q2, pdftype ipdf) yshEMP[iun][npoints + i] = (*it)->GetY()[npoints-i-1] - pdfempunc[it - pdfgraphs.begin()][iun]->GetErrorYlow(npoints-i-1); } } - } + } TGraph *centr = new TGraph(npoints, val_x, val_y); TGraph *high = new TGraph(npoints, val_x, val_high_y); @@ -259,10 +259,10 @@ vector <TCanvas*> PdfsPainter(double q2, pdftype ipdf) mg_lines->Add(high); mg_lines->Add(low); if (it+1 != pdfgraphs.end()) - { - mg_dotted_lines->Add(high_dot); - mg_dotted_lines->Add(low_dot); - } + { + mg_dotted_lines->Add(high_dot); + mg_dotted_lines->Add(low_dot); + } mg_shade->Add(shade, "f"); if (blIs3bands[it - pdfgraphs.begin()]) { for (int iun=0; iun<3; iun++) { @@ -297,22 +297,19 @@ vector <TCanvas*> PdfsPainter(double q2, pdftype ipdf) mg->SetMaximum(mx); mg->SetMinimum(mn); } - + mg->GetXaxis()->Set(100, opts.xmin, opts.xmax); - mg->GetXaxis()->SetRange(opts.xmin, opts.xmax); mg->GetXaxis()->SetTitleFont(62); mg->GetXaxis()->SetLabelFont(62); mg->GetXaxis()->SetTitleSize(txtsize); mg->GetXaxis()->SetLabelSize(txtsize); - // mg->GetXaxis()->SetTitleOffset(offset); mg->GetYaxis()->SetTitleFont(62); mg->GetYaxis()->SetLabelFont(62); mg->GetYaxis()->SetTitleSize(txtsize); - mg->GetYaxis()->SetLabelSize(txtsize); + mg->GetYaxis()->SetLabelSize(txtsize); mg->GetYaxis()->SetTitleOffset(offset); - //mg->Draw("LE3"); mg_shade->Draw(""); if (opts.filledbands) mg_dotted_lines->Draw("l"); @@ -332,11 +329,11 @@ vector <TCanvas*> PdfsPainter(double q2, pdftype ipdf) for (vector <TGraphAsymmErrors*>::iterator it = pdfgraphs.begin(); it != pdfgraphs.end(); it++) { if (*it == 0) - continue; + continue; if (opts.dobands) - leg->AddEntry((*it), labels[it-pdfgraphs.begin()].c_str(), "lf"); + leg->AddEntry((*it), labels[it-pdfgraphs.begin()].c_str(), "lf"); else - leg->AddEntry((*it), labels[it-pdfgraphs.begin()].c_str(), "l"); + leg->AddEntry((*it), labels[it-pdfgraphs.begin()].c_str(), "l"); } leg->Draw(); @@ -358,48 +355,48 @@ vector <TCanvas*> PdfsPainter(double q2, pdftype ipdf) for (vector <TGraphAsymmErrors*>::iterator it = pdfgraphs.begin(); it != pdfgraphs.end(); it++) { if (*it == 0) - { - rlist.push_back(0); - continue; - } + { + rlist.push_back(0); + continue; + } TGraphAsymmErrors *r = (TGraphAsymmErrors*)(*it)->Clone(); r->SetName(((string)(*it)->GetName() + "_ratio").c_str()); for (int i = 0; i < (*it)->GetN(); i++) - { - double ratio, rathigh, ratlow; - - double val = (*it)->GetY()[i]; - double ref = (*fit)->GetY()[i]; - if (ref != 0) - ratio = val/ref; - else - ratio = 1; - - if (opts.relerror) - ratio = 1; - - if (opts.abserror) - ratio = 0; - - double errhigh = (*it)->GetErrorYhigh(i); - double errlow = (*it)->GetErrorYlow(i); - - rathigh = ( ref != 0)? (errhigh/ref) : 0; - ratlow = ( ref != 0)? (errlow/ref) : 0; - if (opts.relerror) - { - rathigh = ( val != 0 )? (errhigh/val) : 0; - ratlow = ( val != 0 )? (errlow/val) : 0; - } - if (opts.abserror) - { - rathigh = errhigh; - ratlow = errlow; - } - r->SetPoint(i, (*it)->GetX()[i], ratio); - r->SetPointError(i, 0, 0, ratlow, rathigh); - } + { + double ratio, rathigh, ratlow; + + double val = (*it)->GetY()[i]; + double ref = (*fit)->GetY()[i]; + if (ref != 0) + ratio = val/ref; + else + ratio = 1; + + if (opts.relerror) + ratio = 1; + + if (opts.abserror) + ratio = 0; + + double errhigh = (*it)->GetErrorYhigh(i); + double errlow = (*it)->GetErrorYlow(i); + + rathigh = ( ref != 0)? (errhigh/ref) : 0; + ratlow = ( ref != 0)? (errlow/ref) : 0; + if (opts.relerror) + { + rathigh = ( val != 0 )? (errhigh/val) : 0; + ratlow = ( val != 0 )? (errlow/val) : 0; + } + if (opts.abserror) + { + rathigh = errhigh; + ratlow = errlow; + } + r->SetPoint(i, (*it)->GetX()[i], ratio); + r->SetPointError(i, 0, 0, ratlow, rathigh); + } rlist.push_back(r); } @@ -408,21 +405,21 @@ vector <TCanvas*> PdfsPainter(double q2, pdftype ipdf) for (vector <TGraphAsymmErrors*>::iterator it = rlist.begin(); it != rlist.end(); it++) { if (*it == 0) - continue; + continue; TGraphAsymmErrors* r = *it; if (opts.logx) - { - double xaxlength = opts.xmax / opts.xmin; - xmnforbds = opts.xmin * pow(xaxlength,1./4.); - xmxforbds = opts.xmax / pow(xaxlength,1./5.); - } + { + double xaxlength = opts.xmax / opts.xmin; + xmnforbds = opts.xmin * pow(xaxlength,1./4.); + xmxforbds = opts.xmax / pow(xaxlength,1./5.); + } else - { - double xaxlength = opts.xmax - opts.xmin; - xmnforbds = opts.xmin + xaxlength / 4.; - xmxforbds = opts.xmax - xaxlength / 5.; - } + { + double xaxlength = opts.xmax - opts.xmin; + xmnforbds = opts.xmin + xaxlength / 4.; + xmxforbds = opts.xmax - xaxlength / 5.; + } } //Calculate maximum and minimum of y axis @@ -432,45 +429,45 @@ vector <TCanvas*> PdfsPainter(double q2, pdftype ipdf) for (vector <TGraphAsymmErrors*>::iterator it = rlist.begin(); it != rlist.end(); it++) { if (*it == 0) - continue; + continue; TGraphAsymmErrors* r = *it; if (opts.rmax == 0 && opts.rmin == 0) - for (int i = 0; i < r->GetN(); i++) - { - double xi = r->GetX()[i]; - double yi_h = r->GetY()[i] + fabs(r->GetErrorYhigh(i)); - if (xi >= xmnforbds && xi <= xmxforbds) - mx = max(mx, yi_h); - } + for (int i = 0; i < r->GetN(); i++) + { + double xi = r->GetX()[i]; + double yi_h = r->GetY()[i] + fabs(r->GetErrorYhigh(i)); + if (xi >= xmnforbds && xi <= xmxforbds) + mx = max(mx, yi_h); + } else - mx = opts.rmax; + mx = opts.rmax; } mn = mx; for (vector <TGraphAsymmErrors*>::iterator it = rlist.begin(); it != rlist.end(); it++) { if (*it == 0) - continue; + continue; TGraphAsymmErrors* r = *it; if (opts.rmax == 0 && opts.rmin == 0) - for (int i = 0; i < r->GetN(); i++) - { - double xi = r->GetX()[i]; - double yi_l = r->GetY()[i] - fabs(r->GetErrorYlow(i)); - if (xi >= xmnforbds && xi <= xmxforbds) - mn = min(mn, yi_l); - } + for (int i = 0; i < r->GetN(); i++) + { + double xi = r->GetX()[i]; + double yi_l = r->GetY()[i] - fabs(r->GetErrorYlow(i)); + if (xi >= xmnforbds && xi <= xmxforbds) + mn = min(mn, yi_l); + } else - mn = opts.rmin; + mn = opts.rmin; } if (opts.rmax == 0 && opts.rmin == 0) if ((opts.abserror && (mx != 0 || mn != 0)) || (!opts.abserror && (mx != 1 || mn != 1))) { - double delta = mx - mn; - mx = mx + delta * (0.15 + 0.11 * pdfgraphs.size()); - mn = mn - delta * 0.3; + double delta = mx - mn; + mx = mx + delta * (0.15 + 0.11 * pdfgraphs.size()); + mn = mn - delta * 0.3; } //prepare TGraphs for line borders and graph shade @@ -482,75 +479,75 @@ vector <TCanvas*> PdfsPainter(double q2, pdftype ipdf) for (vector <TGraphAsymmErrors*>::iterator it = rlist.begin(); it != rlist.end(); it++) { if (*it == 0) - continue; + continue; TGraphAsymmErrors* r = *it; int npoints = r->GetN(); - double val_x[npoints], val_y[npoints], val_high_y[npoints], val_low_y[npoints]; + double val_x[npoints], val_y[npoints], val_high_y[npoints], val_low_y[npoints]; double xsh[2*npoints], ysh[2*npoints]; - + for (int i = 0; i < npoints; i++) - { - //Set graphical safety boundaries - double val = r->GetY()[i]; - - double ratio = r->GetY()[i]; - double high = r->GetY()[i] + r->GetErrorYhigh(i); - double low = r->GetY()[i] - r->GetErrorYlow(i); - - double ratio_tol = ratio; - double high_tol = high; - double low_tol = low; - - double delta = mx - mn; - if (ratio > (mx + delta * -tolerance)) - ratio = mx + delta * -tolerance; - if (high > (mx + delta * -tolerance)) - high = mx + delta * -tolerance; - if (low > (mx + delta * -tolerance)) - low = mx + delta * -tolerance; - - if (ratio_tol > (mx + delta * tolerance)) - ratio_tol = mx + delta * tolerance; - if (high_tol > (mx + delta * tolerance)) - high_tol = mx + delta * tolerance; - if (low_tol > (mx + delta * tolerance)) - low_tol = mx + delta * tolerance; - - if (ratio < (mn - delta * -tolerance)) - ratio = mn - delta * -tolerance; - if (high < (mn - delta * -tolerance)) - high = mn - delta * -tolerance ; - if (low < (mn - delta * -tolerance)) - low = mn - delta * -tolerance; - - if (ratio_tol < (mn - delta * tolerance)) - ratio_tol = mn - delta * tolerance; - if (high_tol < (mn - delta * tolerance)) - high_tol = mn - delta * tolerance; - if (low_tol < (mn - delta * tolerance)) - low_tol = mn - delta * tolerance; - - double errhigh = high - ratio; - double errlow = ratio - low; - r->SetPoint(i, r->GetX()[i], ratio); - r->SetPointError(i, 0, 0, errlow, errhigh); - - val_x[i] = r->GetX()[i]; - val_y[i] = ratio; - val_high_y[i] = high; - val_low_y[i] = low; - } - + { + //Set graphical safety boundaries + double val = r->GetY()[i]; + + double ratio = r->GetY()[i]; + double high = r->GetY()[i] + r->GetErrorYhigh(i); + double low = r->GetY()[i] - r->GetErrorYlow(i); + + double ratio_tol = ratio; + double high_tol = high; + double low_tol = low; + + double delta = mx - mn; + if (ratio > (mx + delta * -tolerance)) + ratio = mx + delta * -tolerance; + if (high > (mx + delta * -tolerance)) + high = mx + delta * -tolerance; + if (low > (mx + delta * -tolerance)) + low = mx + delta * -tolerance; + + if (ratio_tol > (mx + delta * tolerance)) + ratio_tol = mx + delta * tolerance; + if (high_tol > (mx + delta * tolerance)) + high_tol = mx + delta * tolerance; + if (low_tol > (mx + delta * tolerance)) + low_tol = mx + delta * tolerance; + + if (ratio < (mn - delta * -tolerance)) + ratio = mn - delta * -tolerance; + if (high < (mn - delta * -tolerance)) + high = mn - delta * -tolerance ; + if (low < (mn - delta * -tolerance)) + low = mn - delta * -tolerance; + + if (ratio_tol < (mn - delta * tolerance)) + ratio_tol = mn - delta * tolerance; + if (high_tol < (mn - delta * tolerance)) + high_tol = mn - delta * tolerance; + if (low_tol < (mn - delta * tolerance)) + low_tol = mn - delta * tolerance; + + double errhigh = high - ratio; + double errlow = ratio - low; + r->SetPoint(i, r->GetX()[i], ratio); + r->SetPointError(i, 0, 0, errlow, errhigh); + + val_x[i] = r->GetX()[i]; + val_y[i] = ratio; + val_high_y[i] = high; + val_low_y[i] = low; + } + //shade TGraph for (int i = 0; i < r->GetN(); i++) - { - xsh[i] = r->GetX()[i]; - ysh[i] = r->GetY()[i] + r->GetErrorYhigh(i); - xsh[npoints + i] = r->GetX()[npoints-i-1]; - ysh[npoints + i] = r->GetY()[npoints-i-1] - r->GetErrorYlow(npoints-i-1); - } + { + xsh[i] = r->GetX()[i]; + ysh[i] = r->GetY()[i] + r->GetErrorYhigh(i); + xsh[npoints + i] = r->GetX()[npoints-i-1]; + ysh[npoints + i] = r->GetY()[npoints-i-1] - r->GetErrorYlow(npoints-i-1); + } TGraph *r_centr = new TGraph(npoints, val_x, val_y); TGraph *r_high = new TGraph(npoints, val_x, val_high_y); @@ -598,10 +595,10 @@ vector <TCanvas*> PdfsPainter(double q2, pdftype ipdf) mg_ratio_lines->Add(r_high); mg_ratio_lines->Add(r_low); if (it+1 != pdfgraphs.end()) - { - mg_ratio_dotted_lines->Add(r_high_dot); - mg_ratio_dotted_lines->Add(r_low_dot); - } + { + mg_ratio_dotted_lines->Add(r_high_dot); + mg_ratio_dotted_lines->Add(r_low_dot); + } mg_ratio_shade->Add(r_shade, "f"); mg_ratio_shade->Add(r_high_shade, "l"); mg_ratio_shade->Add(r_low_shade, "l"); @@ -627,15 +624,15 @@ vector <TCanvas*> PdfsPainter(double q2, pdftype ipdf) else { if (opts.abserror) - { - mg_ratio->SetMaximum(1); - mg_ratio->SetMinimum(-1); - } + { + mg_ratio->SetMaximum(1); + mg_ratio->SetMinimum(-1); + } else - { - mg_ratio->SetMaximum(2); - mg_ratio->SetMinimum(0); - } + { + mg_ratio->SetMaximum(2); + mg_ratio->SetMinimum(0); + } } mg_ratio->GetXaxis()->SetTitle(" x "); @@ -652,7 +649,7 @@ vector <TCanvas*> PdfsPainter(double q2, pdftype ipdf) mg_ratio->GetXaxis()->SetTitleSize(txtsize); mg_ratio->GetXaxis()->SetLabelSize(txtsize); // mg_ratio->GetXaxis()->SetTitleOffset(offset); - + mg_ratio->GetYaxis()->SetTitleFont(62); mg_ratio->GetYaxis()->SetLabelFont(62); mg_ratio->GetYaxis()->SetTitleSize(txtsize); @@ -683,11 +680,11 @@ vector <TCanvas*> PdfsPainter(double q2, pdftype ipdf) for (vector <TGraphAsymmErrors*>::iterator it = pdfgraphs.begin(); it != pdfgraphs.end(); it++) { if (*it == 0) - continue; + continue; if (opts.dobands) - leg2->AddEntry((*it), labels[it-pdfgraphs.begin()].c_str(), "lf"); + leg2->AddEntry((*it), labels[it-pdfgraphs.begin()].c_str(), "lf"); else - leg2->AddEntry((*it), labels[it-pdfgraphs.begin()].c_str(), "l"); + leg2->AddEntry((*it), labels[it-pdfgraphs.begin()].c_str(), "l"); } leg2->Draw(); diff --git a/tools/draw/src/xfitter-draw.cc b/tools/draw/src/xfitter-draw.cc index ea5d5cef9588c5e9ef175c85a9fbe7a997096d7a..209530a72e7e532272c168f82298da5d383ac4ed 100644 --- a/tools/draw/src/xfitter-draw.cc +++ b/tools/draw/src/xfitter-draw.cc @@ -19,7 +19,7 @@ using namespace std; -int main(int argc, char **argv) +int main(int argc, char **argv) { //-------------------------------------------------- //parse command line arguments @@ -55,7 +55,7 @@ int main(int argc, char **argv) //Set default out directory if (opts.outdir == "") opts.outdir = "plots/"; - + if (opts.outdir.rfind("/") != opts.outdir.size() - 1) opts.outdir.append("/"); @@ -81,7 +81,7 @@ int main(int argc, char **argv) break; } } - + //-------------------------------------------------- //Data plots gStyle->SetEndErrorSize(4); @@ -315,17 +315,17 @@ int main(int argc, char **argv) inputfiles = inputfiles + " " + opts.outdir + "plots_" + pgnum + ".eps"; } - // if (!chi2tab) - if (!opts.notables) + // if (!chi2tab) + if (!opts.notables) inputfiles = inputfiles + " " + opts.outdir + "chi2.pdf"; if (!partab) inputfiles = inputfiles + " " + opts.outdir + "par.pdf"; //A4 is /PageSize [842 595] - string gscommand = "gs -dBATCH -q -sDEVICE=" + format + "write -sOutputFile=" + opts.outdir + "plots." + format - + " -dNOPAUSE -dEPSFitPage -c \"<< /PageSize [595 595] >> setpagedevice\" -f " + string gscommand = "gs -dBATCH -q -sDEVICE=" + format + "write -sOutputFile=" + opts.outdir + "plots." + format + + " -dNOPAUSE -dEPSFitPage -c \"<< /PageSize [595 595] >> setpagedevice\" -f " + inputfiles; - + bool makeplots = system(gscommand.c_str()); if (!makeplots) cout << "Plots saved in: " << (opts.outdir + "plots." + format) << endl; diff --git a/tools/process/Makefile.am b/tools/process/Makefile.am index baf3d89e417e2765d580c7ae5f1ea668031bb857..446f9bf29c835b06d16537ff56c2c9abb093c604 100644 --- a/tools/process/Makefile.am +++ b/tools/process/Makefile.am @@ -14,6 +14,7 @@ dist_noinst_HEADERS = rotate.h utils.h xfitter-process.h xfitter_process_CFLAGS = -I$(srcdir)/../../include xfitter_process_CFLAGS += -Wall xfitter_process_CPPFLAGS = +xfitter_process_CXXFLAGS= if ENABLE_LHAPDF xfitter_process_SOURCES += interpolation.cc @@ -31,9 +32,10 @@ endif if HAVE_ROOT - xfitter_process_SOURCES += rootplot.cc - xfitter_process_CPPFLAGS += -I$(srcdir)/../../include $(ROOT_CFLAGS) -w - xfitter_process_LDADD += $(ROOT_LIBS) + xfitter_process_SOURCES+=rootplot.cc + xfitter_process_CPPFLAGS+=-I$(srcdir)/../../include + xfitter_process_CXXFLAGS+=$(ROOT_CFLAGS) + xfitter_process_LDADD+=$(ROOT_LIBS) else xfitter_process_SOURCES += rootplot_dummy.cc endif diff --git a/tools/process/rootplot.cc b/tools/process/rootplot.cc index 5cc590b94b5fde9360331ab1a81fc5569ef41e2f..8d67202079888b45fc8ecc48dc538c07bfba8f93 100644 --- a/tools/process/rootplot.cc +++ b/tools/process/rootplot.cc @@ -1,7 +1,9 @@ #include <TH1F.h> #include <TCanvas.h> #include <stdlib.h> +#define Info xfitter_Info #include "utils.h" +#undef Info #include <string.h> #include <libgen.h> #include <iostream> diff --git a/xfitter/Makefile.am b/xfitter/Makefile.am index e516d4fe528d4b2afada66c571e9aadbb52343f7..a55062c4e50a5663fdee3b0ebe21dd9e8260fdfd 100644 --- a/xfitter/Makefile.am +++ b/xfitter/Makefile.am @@ -9,7 +9,7 @@ bin_PROGRAMS = xfitter xfitter_SOURCES = cmain.cxx # XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -xfitter_LDFLAGS = -L${srcdir}/../lib -lxfmain -lxffnlo -lmyrt -lHFcommon -lmyew ../minuit/src/.libs/minuit.o -lmyminuit -lgenetic -lQEDevol -lnum_utils -linterfaces +xfitter_LDFLAGS = -L${srcdir}/../lib -lxfmain -lxffnlo -lmyrt -lHFcommon -lmyew ../minuit/src/.libs/minuit.o -lmyminuit -lgenetic -lQEDevol -lnum_utils AM_CXXFLAGS = -I$(srcdir)/../include -I$(srcdir)/../interfaces/include -I$(srcdir)/../FastNLO/include