diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0c6c8bebcf1417d43e2721fe7ae0659ffd87d52e..2fad6e219fe5be553558a232cd2d6bd5fa64a6db 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,7 +1,7 @@ image: "gitlab-registry.cern.ch/fitters/xfitter" cache: - paths: + paths: - deps stages: @@ -9,7 +9,8 @@ stages: - nightly before_script: - - yum -y install which yaml-cpp-devel libyaml-devel gsl-devel + - yum -y install yum-plugin-ovl + - yum -y install which yaml-cpp-devel libyaml-devel gsl-devel - . ./scripts/setup.sh - ./scripts/install-deps.sh diff --git a/ABM/src/Makefile.am b/ABM/src/Makefile.am index 762210d6af292f7ce4ff206f3faaec0cb2fdba80..6b42757c3ff36af79f2d48b313c780f3f8b0a324 100644 --- a/ABM/src/Makefile.am +++ b/ABM/src/Makefile.am @@ -1,12 +1,5 @@ AUTOMAKE_OPTIONS = foreign -dist_noinst_HEADERS = APSCOM6. CONSTCOM. PDFCOM. PRECCOM. -# Force recompilation if include files are modified: -*.o: APSCOM6. CONSTCOM. PDFCOM. PRECCOM. +lib_LTLIBRARIES = libmyabm.la -noinst_LIBRARIES = libmyabm.a - -libmyabm_a_SOURCES = sf_abkm_wrap.f alphasvfn.f asympcoef.f cq.f dishq.f dislt.f disqpm.f f2ccbmsn.f gauss.f hqcoef.f hqnnlocoef.f lpcoef.f ome.f spline.f grid.F initgridconst.F asy-hpcc.f hplog.f split.f s2nlo.f -AM_FFLAGS = -I$(srcdir)/../../include $(NOAUTOFCFLAG) - -#AM_FFLAGS = -I$(srcdir)/../../include -fno-automatic -finit-local-zero -ffixed-line-length-132 +libmyabm_la_SOURCES = sf_abkm_wrap.f alphasvfn.f asympcoef.f dishq.f dislt.f disqpm.f f2ccbmsn.f gauss.f hqcoef.f hqnnlocoef.f lpcoef.f ome.f spline.f grid.F initgridconst.F asy-hpcc.f hplog.f split.f s2nlo.f diff --git a/ABM/src/cq.f b/ABM/src/cq.f deleted file mode 100644 index 93584192b9fa5a43da1d1057c96dc0340637ff61..0000000000000000000000000000000000000000 --- a/ABM/src/cq.f +++ /dev/null @@ -1,229 +0,0 @@ -c------------- - subroutine fillvfngrid - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - - INCLUDE 'APSCOM6.' - INCLUDE 'CONSTCOM.' - INCLUDE 'PDFCOM.' - - common /forqgspl/ xb0,q2,xlog,an,an2,an3,kn,iqn,ixn,isn,ihqn - -! Set up the boundary conditions for the strong coupling evolution -! using the 3-flavour strong coupling at the scale of 4-flavour matching -! stored in the grid - alphas0=xqg(0,0.1d0,vfnth(4)**2,0) - q20alphas=vfnth(4)**2 - - do is=-nsmgrid,nspgrid -! Take the 3-flavour PDFs as an input for the matching conditions - isch0=0 -! filling the LO 4-flavour PDFs - isch1=-2 - Q2=0.04*exp(exp(sgrid(is,isch1))*log(q2ini(isch1)/0.04)) - AN=ALPHAS_ffn4(q2)/4./PI - an2=an**2 - call fillvfx(is,8,0,isch0,isch1) -! filling the NLO 4-flavour PDFs - if (kordhq.ge.1) then - isch1=-3 - call fillvfx(is,8,1,isch0,isch1) - end if -! Take the LO 4-flavour PDFs as an input for the matching conditions - isch0=-2 -! filling the LO 5-flavour PDFs - isch1=-4 - Q2=0.04*exp(exp(sgrid(is,isch1))*log(q2ini(isch1)/0.04)) - AN=ALPHAS_ffn5(q2)/4./PI - an2=an**2 - call fillvfx(is,10,0,isch0,isch1) - if (kordhq.ge.1) then -! Take the NLO 4-flavour PDFs as an input for the matching conditions - isch0=-3 -! filling the NLO 5-flavour PDFs - isch1=-5 - call fillvfx(is,10,1,isch0,isch1) - end if - end do - - return - end -c------------- - subroutine fillvfx(is,nhq,kome,isch0,isch1) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - - INCLUDE 'APSCOM6.' - INCLUDE 'CONSTCOM.' - INCLUDE 'PDFCOM.' - - common /forqgspl/ xb0,q2,xlog,an,an2,an3,kn,iqn,ixn,isn,ihqn - - real*8 fsp(nxtot),bs(nxtot),cs(nxtot),ds(nxtot),xx(nxtot) - - an2=an**2 - ischem=isch1 - - do ix=-nxmgrid,nxpgrid - xx(ix+nxmgrid+1)=xgrid(ix) - end do - - do IX=-nxmgrid,nxpgrid-1 -! FOPT matching - Y(ischem,0,IX,is)=an*4*pi - do iq=1,nhq - Y(ischem,iq,IX,is)=hqpdf(XGRID(IX),is,iq,nhq,kome,isch0) - end do - Y(ischem,nhq+1,IX,is)=Y(ischem,nhq,IX,is) - end do - Y(ischem,0,nxpgrid,is)=Y(ischem,0,nxpgrid-1,is) - - do iq=1,nhq+1 - Y(ischem,iq,nxpgrid,is)=0D0 - do ix=-nxmgrid,nxpgrid - fsp(ix+nxmgrid+1)=y(ischem,iq,ix,is) - end do - call spline (nxmgrid+nxpgrid+1,xx,fsp,bs,cs,ds) - do ix=-nxmgrid,nxpgrid - bcoeff(ischem,iq,ix,is)=bs(ix+nxmgrid+1) - ccoeff(ischem,iq,ix,is)=cs(ix+nxmgrid+1) - dcoeff(ischem,iq,ix,is)=ds(ix+nxmgrid+1) - end do - end do - - return - end -C------------------ - real*8 function hqpdf(xb,is,iq,ihq,kome,isch0) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - - INCLUDE 'APSCOM6.' - include 'CONSTCOM.' - include 'PRECCOM.' - - COMMON /FORQGSPL/ XB0,Q2,XLOG,AN,AN2,an3,kn,iqn,ixn,isn,ihqn - common /forhqpdf/ r,kome0 - real*8 q(nflim) - - external hqpdfi1,hqpdfi2 - - xb0=xb - iqn=iq - isn=is - ihqn=ihq - kn=isch0 - - kome0=kome - r=q2/rmass(ihq)**2 - - hqpdf=0. - -c Local terms for the gluon and quark pieces - - if (iq.eq.1) then - hqpdf=1 + an*ome_gg_1_local(xb,r) - if (kome.ge.1) hqpdf=hqpdf + an2*ome_gg_2_local(xb,r) - hqpdf=hqpdf*XQGX1(kn,1,XB,IS) - end if - - if (iq.ge.2.and.iq.le.ihq-1) then - hqpdf=1 - if (kome.ge.1) hqpdf=hqpdf + an2*ome_qqns_2_local(xb,r) - hqpdf=hqpdf*XQGX1(kn,iq,XB,IS) - end if - -c integration of the total regular term - - if (xb.ge.0.1) then - CALL GAUSS1(hqpdfi1 - , ,log(1d-8),log(1.-xb),nmthq,res,EPS) - else - CALL GAUSS1(hqpdfi1 - , ,log(1d-8),log(0.9d0),nmthq,df1,EPS1) - CALL GAUSS1(hqpdfi2,log(xb) - , ,log(0.1d0),nmthq,df2,EPS2) - res=df1+df2 - eps=sqrt(eps1**2+eps2**2) - end if - - hqpdf=hqpdf+res - - return - end -C------------------ - real*8 function hqpdfi1(t) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - - y=1.-exp(t) - - hqpdfi1=hqpdfi(y)*(1-y) - - return - end -C------------------ - real*8 function hqpdfi2(t) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - - y=exp(t) - - hqpdfi2=hqpdfi(y)*y - - return - end -C------------------ - real*8 function hqpdfi(z) - IMPLICIT DOUBLE PRECISION (A-H,O-Z) - - include 'CONSTCOM.' - real*8 q(13) - - common /forqgspl/ xb0,q2,xlog,an,an2,an3,kn,iqn,ixn,isn,ihqn - common /forhqpdf/ r,kome0 - - y=xb0/z - hqpdfi=0. - -c gluon distribution - - if (iqn.eq.1) then - if (kome0.ge.1) then - glu0=xqgx1(kn,1,xb0,isn) - glu=xqgx1(kn,1,y,isn) - hqpdfi=hqpdfi+ome_gg_2_singular(z,r)*(glu-glu0) - hqpdfi=hqpdfi+ome_gg_2(z,r)*glu - qps=0. - do k=2,ihqn-1 - qps=qps+xqgx1(kn,k,y,isn) - end do - hqpdfi=hqpdfi+ome_gq_2(z,r)*qps - hqpdfi=hqpdfi*an2 - end if - end if - -c light quark disributions - - if (iqn.ge.2.and.iqn.le.ihqn-1) then - if (kome0.ge.1) then - pdf0=xqgx1(kn,iqn,xb0,isn) - pdfc=xqgx1(kn,iqn,y,isn) - hqpdfi=hqpdfi+ome_qqns_2_singular(z,r)*(pdfc-pdf0) - hqpdfi=hqpdfi+ome_qqns_2(z,r)*pdfc - hqpdfi=hqpdfi*an2 - end if - end if - -c heavy quark distributions - - if (iqn.ge.ihqn) then - glu=xqgx1(kn,1,y,isn) - hqpdfi=hqpdfi+ome_g_1(z,r)*an*glu - if (kome0.ge.1) then - qps=0. - do k=2,ihqn-1 - qps=qps+xqgx1(kn,k,y,isn) - end do - hqpdfi=hqpdfi + an2*(ome_g_2(z,r)*glu + ome_q_2(z,r)*qps) - end if - hqpdfi=hqpdfi/2. - end if - - return - end diff --git a/ABM/src/sf_abkm_wrap.f b/ABM/src/sf_abkm_wrap.f index 06f38a0c1dc782bf18d6ff3b2781e8c6c706ec19..5a1bf2e58e4cd383d0c44ea31c2f4470cba9c71d 100644 --- a/ABM/src/sf_abkm_wrap.f +++ b/ABM/src/sf_abkm_wrap.f @@ -25,8 +25,15 @@ C New user routines for PDFs and alpha_S have to be provided in this version RETURN END - - + + DOUBLE PRECISION FUNCTION DiLog(X) + double precision x + double precision ddilog + dilog = ddilog(x) + return + end + + subroutine sf_abkm_wrap(x,q2,f2abkm,flabkm,f3abkm,f2cabkm, $ flcabkm,f3cabkm,f2babkm,flbabkm,f3babkm,ncflag,charge, $ polar,sin2thw,cos2thw,MZ) @@ -58,11 +65,11 @@ c f2qcd(nb,nt,ni,xb,q2) nt = 1 -c --------------- Neutral Currents ! ---------------- +c --------------- Neutral Currents ! ---------------- if(ncflag.eq.1) then -c new rewriten version of ABM, now with Z cont available need to calculate full SFs -c we also take polarisation into account +c new rewriten version of ABM, now with Z cont available need to calculate full SFs +c we also take polarisation into account c------- Z-ELECTRON COUPLINGS: eleVec= -0.5d0 + 2.0d0 * sin2thw @@ -86,13 +93,13 @@ C Propagator factor PZ PZ = 4.d0 * sin2thw * cos2thw * (1.+Mz**2/q2) PZ = 1./Pz - f2abkm = f2qcd(3,nt,22,x,q2) + facgz*PZ*f2qcd(3,nt,25,x,q2) + f2abkm = f2qcd(3,nt,22,x,q2) + facgz*PZ*f2qcd(3,nt,25,x,q2) $ + faczz*PZ*PZ*f2qcd(3,nt,23,x,q2) - flabkm = flqcd(3,nt,22,x,q2) + facgz*PZ*flqcd(3,nt,25,x,q2) + flabkm = flqcd(3,nt,22,x,q2) + facgz*PZ*flqcd(3,nt,25,x,q2) $ + faczz*PZ*PZ*flqcd(3,nt,23,x,q2) f3abkm = facgzf3*PZ*f3qcd(3,nt,25,x,q2) + faczzf3*PZ*PZ $ *f3qcd(3,nt,23,x,q2) -c add the negative sign in front of Y_xF3 for neg charge +c add the negative sign in front of Y_xF3 for neg charge if(charge.gt.0) then f3abkm = -1*f3abkm endif @@ -109,15 +116,15 @@ c b quark f3babkm = 0.0d0 -c --------------- Charged Currents ! ---------------- +c --------------- Charged Currents ! ---------------- elseif(ncflag.eq.0) then ni = 24 if(charge.gt.0) then -c W+ +c W+ nb = 6 else -c W- +c W- nb = 7 endif @@ -125,7 +132,7 @@ c divide all SFs by 2 to get e+/- f2abkm = f2qcd(nb,nt,ni,x,q2)/2 flabkm = flqcd(nb,nt,ni,x,q2)/2 f3abkm = f3qcd(nb,nt,ni,x,q2)/2 - + c c quark f2cabkm = f2nucharm(nb,nt,ni,x,q2,8)/2 flcabkm = f2nucharm(nb,nt,ni,x,q2,8)/2 @@ -141,7 +148,7 @@ c b quark end Subroutine ABKM_Set_Input(kschemepdfin,kordpdfin,rmass8in, - $ rmass10in,msbarmin,hqscale1in,hqscale2in,flagthinterface) + $ rmass10in,msbarmin,hqscale1in,hqscale2in,flagthinterface) C--------------------------------------------------------------------------- C Wraper for INPUT common, set parameters C--------------------------------------------------------------------------- @@ -151,7 +158,7 @@ C Input variables: integer kschemepdfin,kordpdfin logical msbarmin double precision hqscale1in,hqscale2in - + C Common variables: integer npdftot,kordpdf,kschemepdf,kpdfset,kordkernel c common /forpdfset/ kschemepdf,kordpdf @@ -164,10 +171,10 @@ c common /forpdfset/ kschemepdf,kordpdf double precision alpsc,alpsb,alpst,tscale,rscale,fscale,hqscale1 double precision hqscale2 integer nfeff,kordalps,kfeff,kordhq,kordf2,kordfl,kordf3 - logical alsmz - + logical alsmz + C OZ 17.10.17 Flags to check that this routine is called only once. -C In the future it should be removed, now it is needed to avoid +C In the future it should be removed, now it is needed to avoid C interference with legacy call from init_theory.f integer flagthinterface integer flaginit @@ -184,7 +191,7 @@ C interference with legacy call from init_theory.f double precision ddnnlohq common /forschemedef/ ddnnlohq,msbarm,hqnons , ,bmsnfopt,bmsnnlo,vloop -C------------------------------- +C------------------------------- C OZ 17.10.17 TODO avoid this in the future C (or stop execution if called not from theory interface) @@ -201,51 +208,51 @@ C (or stop execution if called not from theory interface) rmass(8) = rmass8in rmass(10) = rmass10in kschemepdf= kschemepdfin -c set same order for pdf, light, heavy quarks +c set same order for pdf, light, heavy quarks print*,'kordpdfin, rmass8in,rmass10in ', kordpdfin, rmass8in,rmass10in kordpdf = kordpdfin kordhq = kordpdfin kordf2 = kordpdfin -c follow recommendation of Sergey Alekhin for FL +c follow recommendation of Sergey Alekhin for FL kordfl = kordpdfin+1 kordf3 = kordpdfin kordalps = kordpdfin c run in running m scheme - msbarm = msbarmin + msbarm = msbarmin hqscale1 = hqscale1in hqscale2 = hqscale2in - + c 10.10.2017 Discussion with Sergey Alekhin: -c The parameter HQNONS drives the nonsinglet contribution to the charm production. -c It is infrared unsafe in the NNLO therefore there are pro and contra for including it and it is up to user. +c The parameter HQNONS drives the nonsinglet contribution to the charm production. +c It is infrared unsafe in the NNLO therefore there are pro and contra for including it and it is up to user. c In ABMP16 fit it was set to .false. c (makes small difference which reaches few % only at highest Q2 of the charm HERA data and is negligible for practical purposes) hqnons = .false. - + C-------------------------------------------------------------------------- end - + Subroutine ABKM_Set_Input_OrderFl(flordin) C OZ 1.10.2017 set O(alpha_S) for F_L C--------------------------------------------------------------------------- implicit none C Input variables: integer flordin - + C Common variables: double precision q20,q2rep,q2s,q20alphas,alphas0,alpsz,alpss double precision alpsc,alpsb,alpst,tscale,rscale,fscale,hqscale1 double precision hqscale2 integer nfeff,kordalps,kfeff,kordhq,kordf2,kordfl,kordf3 - logical alsmz + logical alsmz common /FORALPSRENORM/ q20,q2rep,q2s,q20alphas,alphas0,alpsz,alpss , ,alpsc,alpsb,alpst,tscale,rscale,fscale,hqscale1,hqscale2 , ,nfeff,kordalps,kfeff , ,kordhq,kordf2,kordfl,kordf3 , ,alsmz - -C------------------------------- + +C------------------------------- kordfl = kordf2+flordin -C------------------------------- +C------------------------------- end diff --git a/Makefile.am b/Makefile.am index ed9c5b65bb410a7c91c2fddd334b534f9b699292..f28ece38eb0f7831f8fe06249964a6157b7c0535 100644 --- a/Makefile.am +++ b/Makefile.am @@ -19,18 +19,25 @@ SUBDIRS = minuit/src DY/src DIPOLE/src RT/src EW/src common common/linalg \ reactions/BaseHVQMNR/src \ reactions/HVQMNR_LHCb_7TeV_beauty/src \ reactions/HVQMNR_LHCb_7TeV_charm/src \ + reactions/cbdiff/src \ reactions/testZMVFNS/src \ reactions/fastNLO/src/ \ reactions/FONLL_DISCC/src \ reactions/FONLL_DISNC/src \ reactions/AFB/src \ reactions/KMatrix/src \ + reactions/KRunning/src \ pdfparams/BasePdfParam/src \ pdfparams/Expression/src\ pdfparams/HERAPDF_PdfParam/src \ pdfparams/PolySqrtPdfParam/src \ pdfparams/NegativeGluonPdfParam/src\ + pdfparams/ABMPgluonPdfParam/src\ + pdfparams/ABMPseaPdfParam/src \ + pdfparams/ABMPvalencePdfParam/src \ pdfdecompositions/BasePdfDecomposition/src\ + pdfdecompositions/UvDvUbarDbarSSbarPdfDecomposition/src \ + pdfdecompositions/LHAPDFDecomposition/src \ pdfdecompositions/UvDvUbarDbarS/src \ pdfdecompositions/SU3_PionPdfDecomposition/src \ evolutions/BaseEvolution/src \ @@ -60,7 +67,7 @@ if BUILD_DOCS DOCS=doc/tex/manual doc/tex/paper endif #$(info ${DOCS}) -SUBDIRS+= src xfitter $(DOCS) +SUBDIRS+= src xfitter $(DOCS) include $(top_srcdir)/aminclude.am @@ -69,7 +76,7 @@ reaction_DATA = Reactions.txt # Tell which program should run the .sh scripts. #SH_LOG_COMPILER = $(SHELL) -ex -#TESTS_ENVIRONMENT = $(SHELL) +#TESTS_ENVIRONMENT = $(SHELL) #TESTS= ./tools/check.sh EXTRA_DIST= README INSTALLATION LICENCE REFERENCES \ diff --git a/Reactions.txt b/Reactions.txt index a62c8ebeb2859c4adaeb31ab63ce344fcf1835e4..6307128e3a3bcf16b6eaa1391d6496433ca47a53 100644 --- a/Reactions.txt +++ b/Reactions.txt @@ -30,3 +30,9 @@ NegativeGluon libNegativeGluonPdfParam_xfitter.so PolySqrt libPolySqrtPdfParam_xfitter.so AFB libafb_xfitter.so KMatrix libkmatrix_xfitter.so +UvDvUbarDbarSSbar libuvdvubardbarssbarpdfdecomposition_xfitter.so +ABMPvalence libABMPvalencePdfParam_xfitter.so +ABMPsea libABMPseaPdfParam_xfitter.so +ABMPgluon libABMPgluonPdfParam_xfitter.so +KRunning libkrunning_xfitter.so +cbdiff libcbdiff_xfitter.so diff --git a/common/linalg/spline.h b/common/linalg/spline.h new file mode 100644 index 0000000000000000000000000000000000000000..2e46efa67f5ab03b21b292cb9d92d5f2058f99e6 --- /dev/null +++ b/common/linalg/spline.h @@ -0,0 +1,427 @@ +/* + * spline.h + * + * simple cubic spline interpolation library without external + * dependencies + * + * --------------------------------------------------------------------- + * Copyright (C) 2011, 2014 Tino Kluge (ttk448 at gmail.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * --------------------------------------------------------------------- + * + */ + +// 21.01.2019 Modified by Oleksandr Zenaiev (implemented input checks and derivative calculation) + + +#ifndef TK_SPLINE_H +#define TK_SPLINE_H + +#include <cstdio> +#include <cassert> +#include <vector> +#include <algorithm> + + +// unnamed namespace only because the implementation is in this +// header file and we don't want to export symbols to the obj files +namespace +{ + +namespace tk +{ + +// band matrix solver +class band_matrix +{ +private: + std::vector< std::vector<double> > m_upper; // upper band + std::vector< std::vector<double> > m_lower; // lower band +public: + band_matrix() {}; // constructor + band_matrix(int dim, int n_u, int n_l); // constructor + ~band_matrix() {}; // destructor + void resize(int dim, int n_u, int n_l); // init with dim,n_u,n_l + int dim() const; // matrix dimension + int num_upper() const + { + return m_upper.size()-1; + } + int num_lower() const + { + return m_lower.size()-1; + } + // access operator + double & operator () (int i, int j); // write + double operator () (int i, int j) const; // read + // we can store an additional diogonal (in m_lower) + double& saved_diag(int i); + double saved_diag(int i) const; + void lu_decompose(); + std::vector<double> r_solve(const std::vector<double>& b) const; + std::vector<double> l_solve(const std::vector<double>& b) const; + std::vector<double> lu_solve(const std::vector<double>& b, + bool is_lu_decomposed=false); + +}; + + +// spline interpolation +class spline +{ +public: + enum bd_type { + first_deriv = 1, + second_deriv = 2 + }; + +private: + std::vector<double> m_x,m_y; // x,y coordinates of points + // interpolation parameters + // f(x) = a*(x-x_i)^3 + b*(x-x_i)^2 + c*(x-x_i) + y_i + std::vector<double> m_a,m_b,m_c; // spline coefficients + double m_b0, m_c0; // for left extrapol + bd_type m_left, m_right; + double m_left_value, m_right_value; + bool m_force_linear_extrapolation; + +public: + // set default boundary condition to be zero curvature at both ends + spline(): m_left(second_deriv), m_right(second_deriv), + m_left_value(0.0), m_right_value(0.0), + m_force_linear_extrapolation(false) + { + ; + } + + // optional, but if called it has to come be before set_points() + void set_boundary(bd_type left, double left_value, + bd_type right, double right_value, + bool force_linear_extrapolation=false); + void set_points(const std::vector<double>& x, + const std::vector<double>& y, bool cubic_spline=true); + double operator() (double x, bool flagDerivative = false) const; +}; + + + +// --------------------------------------------------------------------- +// implementation part, which could be separated into a cpp file +// --------------------------------------------------------------------- + + +// band_matrix implementation +// ------------------------- + +band_matrix::band_matrix(int dim, int n_u, int n_l) +{ + resize(dim, n_u, n_l); +} +void band_matrix::resize(int dim, int n_u, int n_l) +{ + assert(dim>0); + assert(n_u>=0); + assert(n_l>=0); + m_upper.resize(n_u+1); + m_lower.resize(n_l+1); + for(size_t i=0; i<m_upper.size(); i++) { + m_upper[i].resize(dim); + } + for(size_t i=0; i<m_lower.size(); i++) { + m_lower[i].resize(dim); + } +} +int band_matrix::dim() const +{ + if(m_upper.size()>0) { + return m_upper[0].size(); + } else { + return 0; + } +} + + +// defines the new operator (), so that we can access the elements +// by A(i,j), index going from i=0,...,dim()-1 +double & band_matrix::operator () (int i, int j) +{ + int k=j-i; // what band is the entry + assert( (i>=0) && (i<dim()) && (j>=0) && (j<dim()) ); + assert( (-num_lower()<=k) && (k<=num_upper()) ); + // k=0 -> diogonal, k<0 lower left part, k>0 upper right part + if(k>=0) return m_upper[k][i]; + else return m_lower[-k][i]; +} +double band_matrix::operator () (int i, int j) const +{ + int k=j-i; // what band is the entry + assert( (i>=0) && (i<dim()) && (j>=0) && (j<dim()) ); + assert( (-num_lower()<=k) && (k<=num_upper()) ); + // k=0 -> diogonal, k<0 lower left part, k>0 upper right part + if(k>=0) return m_upper[k][i]; + else return m_lower[-k][i]; +} +// second diag (used in LU decomposition), saved in m_lower +double band_matrix::saved_diag(int i) const +{ + assert( (i>=0) && (i<dim()) ); + return m_lower[0][i]; +} +double & band_matrix::saved_diag(int i) +{ + assert( (i>=0) && (i<dim()) ); + return m_lower[0][i]; +} + +// LR-Decomposition of a band matrix +void band_matrix::lu_decompose() +{ + int i_max,j_max; + int j_min; + double x; + + // preconditioning + // normalize column i so that a_ii=1 + for(int i=0; i<this->dim(); i++) { + assert(this->operator()(i,i)!=0.0); + this->saved_diag(i)=1.0/this->operator()(i,i); + j_min=std::max(0,i-this->num_lower()); + j_max=std::min(this->dim()-1,i+this->num_upper()); + for(int j=j_min; j<=j_max; j++) { + this->operator()(i,j) *= this->saved_diag(i); + } + this->operator()(i,i)=1.0; // prevents rounding errors + } + + // Gauss LR-Decomposition + for(int k=0; k<this->dim(); k++) { + i_max=std::min(this->dim()-1,k+this->num_lower()); // num_lower not a mistake! + for(int i=k+1; i<=i_max; i++) { + assert(this->operator()(k,k)!=0.0); + x=-this->operator()(i,k)/this->operator()(k,k); + this->operator()(i,k)=-x; // assembly part of L + j_max=std::min(this->dim()-1,k+this->num_upper()); + for(int j=k+1; j<=j_max; j++) { + // assembly part of R + this->operator()(i,j)=this->operator()(i,j)+x*this->operator()(k,j); + } + } + } +} +// solves Ly=b +std::vector<double> band_matrix::l_solve(const std::vector<double>& b) const +{ + assert( this->dim()==(int)b.size() ); + std::vector<double> x(this->dim()); + int j_start; + double sum; + for(int i=0; i<this->dim(); i++) { + sum=0; + j_start=std::max(0,i-this->num_lower()); + for(int j=j_start; j<i; j++) sum += this->operator()(i,j)*x[j]; + x[i]=(b[i]*this->saved_diag(i)) - sum; + } + return x; +} +// solves Rx=y +std::vector<double> band_matrix::r_solve(const std::vector<double>& b) const +{ + assert( this->dim()==(int)b.size() ); + std::vector<double> x(this->dim()); + int j_stop; + double sum; + for(int i=this->dim()-1; i>=0; i--) { + sum=0; + j_stop=std::min(this->dim()-1,i+this->num_upper()); + for(int j=i+1; j<=j_stop; j++) sum += this->operator()(i,j)*x[j]; + x[i]=( b[i] - sum ) / this->operator()(i,i); + } + return x; +} + +std::vector<double> band_matrix::lu_solve(const std::vector<double>& b, + bool is_lu_decomposed) +{ + assert( this->dim()==(int)b.size() ); + std::vector<double> x,y; + if(is_lu_decomposed==false) { + this->lu_decompose(); + } + y=this->l_solve(b); + x=this->r_solve(y); + return x; +} + + + + +// spline implementation +// ----------------------- + +void spline::set_boundary(spline::bd_type left, double left_value, + spline::bd_type right, double right_value, + bool force_linear_extrapolation) +{ + assert(m_x.size()==0); // set_points() must not have happened yet + m_left=left; + m_right=right; + m_left_value=left_value; + m_right_value=right_value; + m_force_linear_extrapolation=force_linear_extrapolation; +} + + +void spline::set_points(const std::vector<double>& x, + const std::vector<double>& y, bool cubic_spline) +{ + // check input and provide informative error messages + // at least 4 points + if(x.size() < 4) + hf_errlog(18091000, "F: Spline needs at least 4 input points"); + // x in ascending order + for(size_t s = 1; s < x.size(); s++) + if(x[s] <= x[s - 1]) + hf_errlog(18091001, "F: Spline needs x points in accessing order"); + // x and y have same size + if(x.size() != y.size()) + hf_errlog(18091002, "F: Spline needs same number of x and y points"); + + assert(x.size()==y.size()); + assert(x.size()>2); + m_x=x; + m_y=y; + int n=x.size(); + // TODO: maybe sort x and y, rather than returning an error + for(int i=0; i<n-1; i++) { + assert(m_x[i]<m_x[i+1]); + } + + if(cubic_spline==true) { // cubic spline interpolation + // setting up the matrix and right hand side of the equation system + // for the parameters b[] + band_matrix A(n,1,1); + std::vector<double> rhs(n); + for(int i=1; i<n-1; i++) { + A(i,i-1)=1.0/3.0*(x[i]-x[i-1]); + A(i,i)=2.0/3.0*(x[i+1]-x[i-1]); + A(i,i+1)=1.0/3.0*(x[i+1]-x[i]); + rhs[i]=(y[i+1]-y[i])/(x[i+1]-x[i]) - (y[i]-y[i-1])/(x[i]-x[i-1]); + } + // boundary conditions + if(m_left == spline::second_deriv) { + // 2*b[0] = f'' + A(0,0)=2.0; + A(0,1)=0.0; + rhs[0]=m_left_value; + } else if(m_left == spline::first_deriv) { + // c[0] = f', needs to be re-expressed in terms of b: + // (2b[0]+b[1])(x[1]-x[0]) = 3 ((y[1]-y[0])/(x[1]-x[0]) - f') + A(0,0)=2.0*(x[1]-x[0]); + A(0,1)=1.0*(x[1]-x[0]); + rhs[0]=3.0*((y[1]-y[0])/(x[1]-x[0])-m_left_value); + } else { + assert(false); + } + if(m_right == spline::second_deriv) { + // 2*b[n-1] = f'' + A(n-1,n-1)=2.0; + A(n-1,n-2)=0.0; + rhs[n-1]=m_right_value; + } else if(m_right == spline::first_deriv) { + // c[n-1] = f', needs to be re-expressed in terms of b: + // (b[n-2]+2b[n-1])(x[n-1]-x[n-2]) + // = 3 (f' - (y[n-1]-y[n-2])/(x[n-1]-x[n-2])) + A(n-1,n-1)=2.0*(x[n-1]-x[n-2]); + A(n-1,n-2)=1.0*(x[n-1]-x[n-2]); + rhs[n-1]=3.0*(m_right_value-(y[n-1]-y[n-2])/(x[n-1]-x[n-2])); + } else { + assert(false); + } + + // solve the equation system to obtain the parameters b[] + m_b=A.lu_solve(rhs); + + // calculate parameters a[] and c[] based on b[] + m_a.resize(n); + m_c.resize(n); + for(int i=0; i<n-1; i++) { + m_a[i]=1.0/3.0*(m_b[i+1]-m_b[i])/(x[i+1]-x[i]); + m_c[i]=(y[i+1]-y[i])/(x[i+1]-x[i]) + - 1.0/3.0*(2.0*m_b[i]+m_b[i+1])*(x[i+1]-x[i]); + } + } else { // linear interpolation + m_a.resize(n); + m_b.resize(n); + m_c.resize(n); + for(int i=0; i<n-1; i++) { + m_a[i]=0.0; + m_b[i]=0.0; + m_c[i]=(m_y[i+1]-m_y[i])/(m_x[i+1]-m_x[i]); + } + } + + // for left extrapolation coefficients + m_b0 = (m_force_linear_extrapolation==false) ? m_b[0] : 0.0; + m_c0 = m_c[0]; + + // for the right extrapolation coefficients + // f_{n-1}(x) = b*(x-x_{n-1})^2 + c*(x-x_{n-1}) + y_{n-1} + double h=x[n-1]-x[n-2]; + // m_b[n-1] is determined by the boundary condition + m_a[n-1]=0.0; + m_c[n-1]=3.0*m_a[n-2]*h*h+2.0*m_b[n-2]*h+m_c[n-2]; // = f'_{n-2}(x_{n-1}) + if(m_force_linear_extrapolation==true) + m_b[n-1]=0.0; +} + +double spline::operator() (double x, bool flagDerivative) const +{ + size_t n=m_x.size(); + // find the closest point m_x[idx] < x, idx=0 even if x<m_x[0] + std::vector<double>::const_iterator it; + it=std::lower_bound(m_x.begin(),m_x.end(),x); + int idx=std::max( int(it-m_x.begin())-1, 0); + + double h=x-m_x[idx]; + double interpol; + if(x<m_x[0]) { + // extrapolation to the left + if(!flagDerivative) + interpol=(m_b0*h + m_c0)*h + m_y[0]; + else + interpol=2*h*m_b0 + m_c0; + } else if(x>m_x[n-1]) { + // extrapolation to the right + if(!flagDerivative) + interpol=(m_b[n-1]*h + m_c[n-1])*h + m_y[n-1]; + else + interpol=2*h*m_b[n-1] + m_c[n-1]; + } else { + // interpolation + if(!flagDerivative) + interpol=((m_a[idx]*h + m_b[idx])*h + m_c[idx])*h + m_y[idx]; + else + interpol=3*h*h*m_a[idx] + 2*h*m_b[idx] + m_c[idx]; + } + return interpol; +} + + +} // namespace tk + + +} // namespace + +#endif /* TK_SPLINE_H */ diff --git a/configure.ac b/configure.ac index 9975dad408d6c8a60dc9ff511a55ab9a69ca31ab..c53697ceb1a93abc8ca80f4666e213677900b994 100644 --- a/configure.ac +++ b/configure.ac @@ -63,7 +63,7 @@ else echo "Disabling mcmodel=medium option" FCFLAGS="$FCFLAGS -fPIC -cpp" FFLAGS="$FFLAGS -fPIC -cpp" -fi +fi # fortran detection: @@ -119,7 +119,7 @@ AC_LANG_POP(C++) AC_ARG_ENABLE([process], [AS_HELP_STRING([--enable-process],[enable xfitter-process tool])]) -: ${enable_process=yes} +: ${enable_process=yes} AS_IF([test "$enable_process" = "yes"], [ AC_MSG_CHECKING([for libyaml]) @@ -141,7 +141,7 @@ AS_IF([test "$enable_process" = "yes"], [ AC_ARG_ENABLE([openmp], [AS_HELP_STRING([--enable-openmp],[enable openmp support])]) -: ${enable_openmp=no} +: ${enable_openmp=no} AS_IF([test "$enable_openmp" = "yes"], [ AX_OPENMP([AM_CONDITIONAL(ENABLE_OPENMP,true)], @@ -205,7 +205,7 @@ AC_MSG_CHECKING([for QCDNUM installation]) qcdnum_config=`which qcdnum-config` if test x$qcdnum_config == x; then AC_MSG_ERROR([Unable to find qcdnum-config.]) -else +else QCDNUMVERS=`qcdnum-config --version` QCDNUMLIBS=`qcdnum-config --ldflags` QCDNUM_CPPFLAGS=`qcdnum-config --cppflags` @@ -223,7 +223,7 @@ if test $enable_lhapdf; then lhapdf_config=`which lhapdf-config` if test x$lhapdf_config == x; then AC_MSG_ERROR([Unable to find lhapdf-config.]) - else + else LHAPDFVERS=`lhapdf-config --version` LHAPDF_CPPFLAGS=`lhapdf-config --cppflags` LHAPDF_LDFLAGS=`lhapdf-config --ldflags` @@ -234,7 +234,7 @@ if test $enable_lhapdf; then AC_DEFINE([LHAPDF_ENABLED],[1],[Define if LHAPDF is enabled]) lhapdf_family=`lhapdf-config --version | sed 's/\..*$//'` - if test "x$lhapdf_family" != "x5"; then + if test "x$lhapdf_family" != "x5"; then AC_DEFINE([LHAPDF_FAMILY],[6],[Define value of LHAPDF family]) fi fi @@ -249,7 +249,7 @@ if test $enable_apfel; then apfel_config=`which apfel-config` if test x$apfel_config == x; then AC_MSG_ERROR([Unable to find apfel-config.]) - else + else APFELVERS=`apfel-config --version` APFEL_CPPFLAGS=`apfel-config --cppflags` APFEL_LDFLAGS=`apfel-config --ldflags` @@ -296,7 +296,7 @@ if test $enable_ceres; then CERES_LDFLAGS="-L${CERES_DIR}/lib -lceres -lglog -lgflags -lpthread -lspqr -lcholmod -lccolamd -lcamd -lcolamd -lamd -llapack -lblas -lsuitesparseconfig -lrt -lcxsparse -lrt -lcxsparse -lgomp" AC_SUBST(CERES_CPPFLAGS) AC_SUBST(CERES_LDFLAGS) - AC_DEFINE([CERES_ENABLED],[1],[Define if APFEL++ is enabled]) + AC_DEFINE([CERES_ENABLED],[1],[Define if APFEL++ is enabled]) fi fi AM_CONDITIONAL(ENABLE_CERES, [test $enable_ceres]) @@ -309,7 +309,7 @@ if test $enable_mela; then mela_config=`which mela-config` if test x$mela_config == x; then AC_MSG_ERROR([Unable to find mela-config.]) - else + else MELAVERS=`mela-config --version` MELA_LDFLAGS=`mela-config --ldflags` AC_MSG_RESULT([Using $mela_config version $MELAVERS]) @@ -328,7 +328,7 @@ if test x$enable_applgrid == xyes; then applgrid_config=`which applgrid-config` if test x$applgrid_config == x; then AC_MSG_ERROR([Unable to find applgrid-config.]) - else + else APPLGRIDVERS=`applgrid-config --version` APPLGRID_CPPFLAGS=`applgrid-config --cxxflags` APPLGRID_LDFLAGS=`applgrid-config --ldcflags` @@ -406,7 +406,7 @@ if test x$enable_hathor == xyes; then AC_MSG_CHECKING([for hathor installation]) if test "x$HATHOR_ROOT" = "x"; then AC_MSG_ERROR([HATHOR_ROOT environment variable is not set!.]) - else + else AC_MSG_RESULT([Using $HATHOR_ROOT]) AC_CHECK_FILE([$HATHOR_ROOT/libHathor.a],,[AC_MSG_ERROR([HATHOR_ROOT must contain libHathor.a])]) # check which hathor libraries are available: @@ -470,7 +470,7 @@ AM_CONDITIONAL([HAVE_ROOT],test $root_ok) # if test x$enable_nnpdfWeight == xyes; then # AC_MSG_ERROR([Root is required for NNPDF]) # fi -#else +#else # AC_MSG_RESULT([Using $root_config]) # root_ok=1 # ROOT_CFLAGS=`root-config --cflags` @@ -526,7 +526,7 @@ if test x$enable_doc = xyes; then if test -z "$PDFLATEX"; then AC_MSG_ERROR([Pdflatex is required to create the user manual.]) fi - + # Check for presence of pdfLaTeX AC_CHECK_PROG(BIBTEX, bibtex, bibtex) if test -z "$BIBTEX"; then @@ -549,7 +549,7 @@ DX_PS_FEATURE(OFF) DX_INIT_DOXYGEN([$PACKAGE_NAME],[doxygen.cfg]) # Output -AC_CONFIG_FILES([include/Makefile +AC_CONFIG_FILES([include/Makefile src/Makefile xfitter/Makefile common/Makefile @@ -576,7 +576,7 @@ AC_CONFIG_FILES([include/Makefile ACOT/src/Makefile ACOT/Makefile SACOT/src/Makefile - SACOT/Makefile + SACOT/Makefile ABM/src/Makefile FONLL/src/Makefile FONLL/Makefile @@ -599,6 +599,10 @@ AC_CONFIG_FILES([include/Makefile examples/Makefile python/Makefile xfitter-config + pdfparams/ABMPgluonPdfParam/src/Makefile + pdfparams/ABMPseaPdfParam/src/Makefile + pdfparams/ABMPvalencePdfParam/src/Makefile + pdfdecompositions/UvDvUbarDbarSSbarPdfDecomposition/src/Makefile pdfparams/NegativeGluonPdfParam/src/Makefile evolutions/LHAPDF/src/Makefile minimizers/CERESMinimizer/src/Makefile @@ -612,6 +616,7 @@ AC_CONFIG_FILES([include/Makefile pdfdecompositions/UvDvUbarDbarS/src/Makefile pdfdecompositions/SU3_PionPdfDecomposition/src/Makefile pdfdecompositions/BasePdfDecomposition/src/Makefile + reactions/KRunning/src/Makefile reactions/AFB/src/Makefile reactions/KFactor/src/Makefile reactions/BaseDISCC/src/Makefile @@ -625,6 +630,7 @@ AC_CONFIG_FILES([include/Makefile reactions/BaseHVQMNR/src/Makefile reactions/HVQMNR_LHCb_7TeV_beauty/src/Makefile reactions/HVQMNR_LHCb_7TeV_charm/src/Makefile + reactions/cbdiff/src/Makefile reactions/testZMVFNS/src/Makefile reactions/fastNLO/src/Makefile reactions/FONLL_DISCC/src/Makefile diff --git a/doxygen.cfg b/doxygen.cfg index 44dfa39edd7b68fb3ef0ddf4f1bea047580a6f85..b4c05ac75480f852631896d1682d20ced599462c 100644 --- a/doxygen.cfg +++ b/doxygen.cfg @@ -216,7 +216,7 @@ OPTIMIZE_OUTPUT_VHDL = NO # .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran), # use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. -EXTENSION_MAPPING = +EXTENSION_MAPPING = # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should @@ -607,15 +607,22 @@ INPUT = include src \ reactions/fastNLO/include \ reactions/fastNLO/src \ reactions/BaseHVQMNR/include \ - reactions/BaseHVQMNR/src + reactions/BaseHVQMNR/src\ reactions/HVQMNR_LHCb_7TeV_beauty/include \ reactions/HVQMNR_LHCb_7TeV_beauty/src \ + reactions/APPLgrid/include \ + reactions/cbdiff/src reactions/cbdiff/include \ + reactions/KRunning/src reactions/KRunning/include \ pdfparams/BasePdfParam/include\ pdfparams/Expression/include\ pdfparams/NegativeGluonPdf/include \ + pdfparams/ABMPgluonPdfParam/include \ + pdfparams/ABMPseaPdfParam/include \ + pdfparams/ABMPvalencePdfParam/include \ pdfparams/HERAPDF_PdfParam/include \ pdfparams/PolySqrtPdfParam/include \ pdfdecompositions/BasePdfDecomposition/include \ + pdfdecompositions/UvDvUbarDbarSSbarPdfDecomposition/include \ pdfdecompositions/LHAPDFDecomposition/include \ pdfdecompositions/UvDvUbarDbarS/include \ pdfdecompositions/GRV_PionPdfDecomposition/include \ @@ -637,7 +644,7 @@ INPUT_ENCODING = UTF-8 # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 -FILE_PATTERNS = +FILE_PATTERNS = # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. diff --git a/evolutions/APFELxx/src/Makefile.am b/evolutions/APFELxx/src/Makefile.am index 35a3381d65b9aea6c1ea55d1d3e10d6fe078027a..3355f204c92abea99a0450f2a607903d49f0425b 100644 --- a/evolutions/APFELxx/src/Makefile.am +++ b/evolutions/APFELxx/src/Makefile.am @@ -1,5 +1,5 @@ if ENABLE_APFELXX - AM_CXXFLAGS = $(APFEL_CPPFLAGS) \ + AM_CPPFLAGS=$(APFELXX_CPPFLAGS)\ -I$(srcdir)/../include \ -I$(srcdir)/../../BaseEvolution/include \ -I$(srcdir)/../../../pdfdecompositions/BasePdfDecomposition/include \ diff --git a/evolutions/QCDNUM/src/EvolutionQCDNUM.cc b/evolutions/QCDNUM/src/EvolutionQCDNUM.cc index f1825158a51008a736ffd7af93c73aeb4e06de52..bbda144246f58e4ec21ff286f7e245257fc64379 100644 --- a/evolutions/QCDNUM/src/EvolutionQCDNUM.cc +++ b/evolutions/QCDNUM/src/EvolutionQCDNUM.cc @@ -1,4 +1,4 @@ - + /* @file EvolutionQCDNUM.cc @date 2018-08-14 @@ -13,7 +13,7 @@ #include <algorithm> // Global var to hold current pdfDecomposition -std::function<std::map<int,double>(double const& x)> gPdfDecomp; +std::function<std::map<int,double>(double const& x)> gPdfDecomp; // Wrapper for QCDNUM double funcPDF(int *ipdf, double *x) { @@ -45,12 +45,12 @@ double static qcdnumDef[] = { 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., // d 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., // u 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., // s - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., // - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., // - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., // - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., // - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., // - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. // + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., // + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., // + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., // + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., // + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., // + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. // }; @@ -115,9 +115,10 @@ namespace xfitter int nxgrid = yQCDNUM["NXbins"].as<int>(); int nxout = 0; - + QCDNUM::setord(PtOrder); std::cout << "Set evolution order "<<PtOrder <<"\n"; + std::cout << "xGrid[0]: " << xGrid[0] << std::endl; QCDNUM::gxmake(xGrid.data(),xGridW.data(),xGrid.size(),nxgrid,nxout,_splineOrder); // x-grid std::cout << "Requested (actual) number of x grid points: "<<nxgrid << "(" << nxout << ")\n"; @@ -148,14 +149,14 @@ namespace xfitter Q2Grid[i] = tmp[i].first; Q2GridW[i] = tmp[i].second; } - + //for (size_t i = 0; i<Q2Grid.size(); i++) { // std::cout << " Q2= " << Q2Grid[i] << " weight = " << Q2GridW[i] << "\n"; //} int nq2grid = yQCDNUM["NQ2bins"].as<int>(); int nq2out = 0; - + QCDNUM::gqmake(Q2Grid.data(), Q2GridW.data(), Q2Grid.size(), nq2grid, nq2out); std::cout << "Requested (actual) number of Q2 grid points: "<<nq2grid << "(" << nq2out << ")\n"; @@ -164,9 +165,23 @@ namespace xfitter int iqb = QCDNUM::iqfrmq( (*mbt)*(*mbt) + 1.e-6 ); int iqt = 0; // top off for now - // For now VFNS only - QCDNUM::setcbt(0,iqc,iqb,iqt); - + // For now VFNS only and NFlavour = 3 only + int nflavour = XFITTER_PARS::gParametersI.at("NFlavour"); + if(nflavour == 3) + { + std::cout << "Fixed Flavour Number Scheme set with nf=3" << std::endl; + QCDNUM::setcbt(3,iqc,iqb,iqt); + } + else if(nflavour == 5) + { + std::cout << "Variable Flavour Number Scheme set with nf=5" << std::endl; + QCDNUM::setcbt(0,iqc,iqb,iqt); + } + else + { + hf_errlog(280320191, "F: Unsupported NFlavour = " + std::to_string(nflavour)); + } + // Init SF int id1=0; int id2=0; int nw=0; int ierr=1; if (_readTables>0 ) { @@ -177,7 +192,7 @@ namespace xfitter QCDNUM::fillwt(0,id1,id2,nw); QCDNUM::dmpwgt(1,22,"unpolarised.wgt"); } - + //Evolution gets its decomposition from YAML gPdfDecomp=XFITTER_PARS::getInputFunctionFromYaml(yQCDNUM); atConfigurationChange(); @@ -200,7 +215,7 @@ namespace xfitter } std::function<std::map<int,double>(double const& x, double const& Q)> EvolutionQCDNUM::xfxQMap() { - + const auto _f0 = [=] (double const& x, double const& Q) -> std::map<int, double> { std::map<int, double> res; for (int ipdf =-6; ipdf<7; ipdf++) { diff --git a/include/ReactionTheory.h b/include/ReactionTheory.h index 3bc13a6d3d91e18b8b5d3bee6ee1dbf35ccd4c9f..03d240e4d48b998c7acbf776436dab0cf03deadd 100644 --- a/include/ReactionTheory.h +++ b/include/ReactionTheory.h @@ -21,7 +21,7 @@ using std::valarray; typedef double (*pZeroParFunc)(); typedef double (*pOneParFunc)(const double&); typedef double (*pTwoParFunc)(const double&, const double& ); -typedef void (*pThreeParSub)(const double& , const double&, const double&); +typedef void (*pThreeParSub)(const double& , const double&, const double&); // Function to emulate LHAPDF xfx behavior: typedef void (*pXFXlike)(const double&x,const double&Q,double*results); @@ -53,7 +53,7 @@ typedef void (*pXFXlike)(const double&x,const double&Q,double*results); //class Evolution; //why is this not in namespace xfitter? --Ivan -class ReactionTheory +class ReactionTheory { public: ReactionTheory() {}; @@ -78,20 +78,20 @@ class ReactionTheory virtual void setxFitterparametersYaml(map<string,YAML::Node> &xfitter_pars) {_xfitter_pars_node = xfitter_pars; }; ///< Set map for complex parameters virtual void setxFitterparametersVec(map<string,vector<double> > &xfitter_pars) {_xfitter_pars_vec = xfitter_pars; }; ///< Set map for vector parameters - virtual void resetParameters(const YAML::Node& node); ///< Reset reaction-specific parameters + virtual void resetParameters(const YAML::Node& node); ///< Reset reaction-specific parameters - virtual void setEvolFunctions(double (*palpha_S)(const double& ), map<string, pTwoParFunc> *func2D ) { _alpha_S = palpha_S; PDFs = func2D; }; + virtual void setEvolFunctions(double (*palpha_S)(const double& ), map<string, pTwoParFunc> *func2D ) { _alpha_S = palpha_S; PDFs = func2D; }; ///< Set alpha_S and PDF maps virtual void setExtraFunctions(map<string, pZeroParFunc>, map<string, pOneParFunc>, map<string, pTwoParFunc>) { }; //! Set XFX function for different hadrons (proton: p, neutron: n, anti-proton: pbar) virtual void setXFX(pXFXlike xfx, string type="p" ){ _xfx[type] = xfx; };//DEPRECATED - + virtual void setBinning(int dataSetID, map<string,valarray<double> > *dsBins){ _dsIDs.push_back(dataSetID); _dsBins[dataSetID] = dsBins; } ; - /// Perform optional re-initialization for a given iteration. Interface for old-style pdf functions + /// Perform optional re-initialization for a given iteration. Interface for old-style pdf functions //A better name would be atIteration - virtual void initAtIteration(); + virtual void initAtIteration(); //! Perform optional action when minuit fcn 3 is called (normally after fit) virtual void actionAtFCN3() {}; @@ -102,21 +102,21 @@ class ReactionTheory //! Set dataset @param dataSetID parameters which can be term- and dataset-specific virtual void setDatasetParameters( int dataSetID, map<string,string> parsReaction, map<string,double> parsDataset) {} ; - //! Main function to compute predictions for @param dataSetID - virtual int compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err) = 0; - + //! Main function to compute predictions for @param dataSetID + virtual int compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err) = 0; + //! Provide additional optional information about the reaction virtual void printInfo(){}; //! Helper function to emmulate LHAPDF6 calls to get PDFs void xfx(const double& x, const double& q, double* results) const;//Currently accesses default evolution, to be replaced later --Ivan - + //! Helper function to emmulate LHAPDF6 calls to get PDFs double xfx(double x, double q, int iPDF) const { double pdfs[13]; xfx(x,q,pdfs); return pdfs[iPDF+6];}; - + //! Helper function to emmulate LHAPDF6 calls to get PDFs std::vector<double> xfx(double x, double q) const { vector<double> pdfs(13); xfx(x,q,&pdfs[0]); return pdfs;}; - + //! strong coupling at scale q [GeV] double alphaS(double q) const { return _alpha_S(q); } @@ -128,19 +128,19 @@ class ReactionTheory //! Default helper to determine if bin is masked or not virtual bool notMasked(int DSID, int Bin); - - //! Helper function to report not implemented functionality + + //! Helper function to report not implemented functionality void NOT_IMPLEMENTED(const string& functionName) { string message = "F: Function "+functionName+" is not implemented for module" + getReactionName(); hf_errlog_(17040701,message.c_str(),message.size()); - } + } /// Set evolution name void setEvolution(std::string& evolution) { _evolution = evolution; } /// Retrieve evolution name //A better name would be "getEvolutionName" -- Ivan - const std::string getEvolution() const { return _evolution; } - + const std::string getEvolution() const { return _evolution; } + protected: map<string, pTwoParFunc> *PDFs; @@ -149,20 +149,20 @@ class ReactionTheory /// Generate report on the state of parameters, local for the given reaction. std::string emitReactionLocalPars() const; - bool checkParam(string name) ///< Check if a parameter is present on one of the global list + bool checkParam(string name) const ///< Check if a parameter is present on one of the global list { - return (_xfitter_pars.find(name) != _xfitter_pars.end()) - || (_xfitter_pars_i.find(name) != _xfitter_pars_i.end()) - || (_xfitter_pars_s.find(name) != _xfitter_pars_s.end()) - || (_xfitter_pars_vec.find(name) != _xfitter_pars_vec.end()) - || (_xfitter_pars_node.find(name) != _xfitter_pars_node.end()) + return (_xfitter_pars.find(name) != _xfitter_pars.end()) + || (_xfitter_pars_i.find(name) != _xfitter_pars_i.end()) + || (_xfitter_pars_s.find(name) != _xfitter_pars_s.end()) + || (_xfitter_pars_vec.find(name) != _xfitter_pars_vec.end()) + || (_xfitter_pars_node.find(name) != _xfitter_pars_node.end()) ; } // Helper function to get a parameter (double) double GetParam(const string& name) const - { - if (_xfitter_pars.find(name) != _xfitter_pars.end() ) + { + if (_xfitter_pars.find(name) != _xfitter_pars.end() ) return *_xfitter_pars.at(name); else return 0; @@ -183,7 +183,7 @@ class ReactionTheory return _xfitter_pars_s.at(name); } else { - return GetParamY(name, dsID); // + return GetParamY(name, dsID); // } } @@ -192,12 +192,12 @@ class ReactionTheory // Helper function to get bin values for a given data set, bin name. Returns null if not found virtual valarray<double> *GetBinValues(int idDS, const string& binName) - { + { map<string, valarray<double> >* mapBins = _dsBins[idDS]; if (mapBins == nullptr ) { return nullptr; } - else { + else { map<string, valarray<double> >::iterator binPair = mapBins->find(binName); if ( binPair == mapBins->end() ) { return nullptr; @@ -228,14 +228,14 @@ class ReactionTheory /// Global vector of double parameters: map< string, vector<double> > _xfitter_pars_vec; - + /// Global YAML node parameters, for complex cases. map< string, YAML::Node > _xfitter_pars_node; private: /// Default evolution: string _evolution; - + /// list of xfx-like PDFs (using pointer function) map<string,pXFXlike> _xfx; /// list of alphaS (using pointer function) diff --git a/include/TheorEval.h b/include/TheorEval.h index 9a7c41e0fbf27840c8d9b4fc2fe7782104cb25d0..ddc00122b709f0638b4e81f041466ed1adcbdc22 100644 --- a/include/TheorEval.h +++ b/include/TheorEval.h @@ -1,7 +1,7 @@ /** @class TheorEval - @brief Theory expression evaluation + @brief Theory expression evaluation This class performs evaluation of expression that describes theoretical prediction for a given dataset. The expression may contain APPLgrid terms, @@ -46,13 +46,14 @@ using std::list; -1, -2 -- brackets (, ) 1 -- operators +, - 3 -- operators *, / - 4 -- functions sum, avg + 4 -- functions sum, avg 5 -- Matrix . Vector / vector . Matrix */ struct tToken { short int opr; // operator flag, includes precedence string name; // string token valarray<double> *val; // value token + int narg; }; class TheorEval{ @@ -62,7 +63,7 @@ class TheorEval{ public: ~TheorEval(); //! Proper constructor for TheorEval class - /*! + /*! \param dsID dataset ID for which the expression is evaluated \param nTerms the number of terms in the expression \param stn array of strings with term names @@ -75,7 +76,7 @@ class TheorEval{ */ TheorEval(const int dsID, const int nTerms, const std::vector<string> stn, const std::vector<string> stt, const std::vector<string> sti, const std::vector<string> sts, const string& ste); - + //! Evaluates array of predictions for requested expression /*! \param iorder order for grids evaluation @@ -140,7 +141,7 @@ class TheorEval{ int initReactionTerm(int iterm, valarray<double> *val); //! Update the reaction values into the tokens int getReactionValues(); - //! + //! map<string, string> SplitTermInfo(const string& term_info); private: @@ -177,7 +178,7 @@ class TheorEval{ /// bin-by-bin dynamic scale float _dynamicscale; - /// also keep DS pars, which are valid for all terms + /// also keep DS pars, which are valid for all terms map <string, double> _dsPars; /// Name ! diff --git a/include/appl_grid/appl_grid.h b/include/appl_grid/appl_grid.h deleted file mode 100644 index bc74a2050b09d2d0caa53b8fb56e4dd69b879f9e..0000000000000000000000000000000000000000 --- a/include/appl_grid/appl_grid.h +++ /dev/null @@ -1,696 +0,0 @@ -// emacs: this is -*- c++ -*- - -// appl_grid.h - -// grid class header - all the functions needed to create and -// fill the grid from an NLO calculation program -// -// Copyright (C) 2007 Mark Sutton (sutt@hep.ucl.ac.uk) - -// $Id: appl_grid.h, v1.00 2007/10/16 17:01:39 sutt - -// Fixme: this needs to be tidied up. eg there are too many different, -// and too many version of, accessors for x/y, Q2/tau etc there -// should be only one set, for x and Q2 *or* y and tau, but -// not both. In fact they should be for x and Q2, since y and tau -// should be purely an internal grid issue of no concern for the -// user. - -#ifndef __APPL_GRID_H -#define __APPL_GRID_H - -#include <vector> -#include <iostream> -#include <sstream> -#include <cmath> -#include <string> -#include <exception> - -#include "TH1D.h" - - -double _fy(double x); -double _fx(double y); -double _fun(double y); - - -#include "correction.h" - -namespace appl { - - -/// forward declarations - full definitions included -/// from appl_grid.cxx -class igrid; -class appl_pdf; - - -const int MAXGRIDS = 5; - - -/// externally visible grid class -class grid { - -public: - - // grid error exception - class exception : public std::exception { - public: - exception(const std::string& s) { std::cerr << what() << " " << s << std::endl; }; - //exception(std::ostream& s) { std::cerr << what() << " " << s << std::endl; }; - exception(std::ostream& s) { std::stringstream ss; ss << s.rdbuf(); std::cerr << what() << " " << ss.str() << std::endl; }; - virtual const char* what() const throw() { return "appl::grid::exception"; } - }; - - typedef enum { STANDARD=0, AMCATNLO=1, SHERPA=2, LAST_TYPE=3 } CALCULATION; - -public: - - grid(int NQ2=50, double Q2min=10000.0, double Q2max=25000000.0, int Q2order=5, - int Nx=50, double xmin=1e-5, double xmax=0.9, int xorder=5, - int Nobs=20, double obsmin=100.0, double obsmax=7000.0, - std::string genpdf="mcfm_pdf", - int leading_order=0, int nloops=1, - std::string transform="f2"); - - grid( int Nobs, const double* obsbins, - int NQ2=50, double Q2min=10000.0, double Q2max=25000000.0, int Q2order=5, - int Nx=50, double xmin=1e-5, double xmax=0.9, int xorder=5, - std::string genpdf="mcfm_pdf", - int leading_order=0, int nloops=1, - std::string transform="f2" ); - - grid( const std::vector<double>& obs, - int NQ2=50, double Q2min=10000.0, double Q2max=25000000.0, int Q2order=5, - int Nx=50, double xmin=1e-5, double xmax=0.9, int xorder=5, - std::string genpdf="mcfm_pdf", - int leading_order=0, int nloops=1, - std::string transform="f2" ); - - // build a grid but don't build the internal igrids - these can be added later - grid( const std::vector<double>& obs, - std::string genpdf="nlojet_pdf", - int leading_order=0, int nloops=1, - std::string transform="f2" ); - - // copy constructor - grid(const grid& g); - - // read from a file - grid(const std::string& filename="./grid.root", const std::string& dirname="grid"); - - // add an igrid for a given bin and a given order - void add_igrid(int bin, int order, igrid* g); - - virtual ~grid(); - - // update grid with one set of event weights - void fill(const double x1, const double x2, const double Q2, - const double obs, - const double* weight, const int iorder); - - - void fill_phasespace(const double x1, const double x2, const double Q2, - const double obs, - const double* weight, const int iorder); - - - void fill_grid(const double x1, const double x2, const double Q2, - const double obs, - const double* weight, const int iorder) { - if (isOptimised()) fill(x1, x2, Q2, obs, weight, iorder); - else fill_phasespace(x1, x2, Q2, obs, weight, iorder); - } - - - void fill_index(const int ix1, const int ix2, const int iQ2, - const int iobs, - const double* weight, const int iorder); - - - // trim/untrim the grid to reduce memory footprint - void trim(); - void untrim(); - - // formatted output - std::ostream& print(std::ostream& s=std::cout) const; - - // don't do anything anymore - // void setuppdf(void (*pdf)(const double& , const double&, double* ) ); - - // get the interpolated pdf's - // void pdfinterp(double x1, double Q2, double* f); - - - // perform the convolution to a specified number of loops - // nloops=-1 gives the nlo part only - std::vector<double> vconvolute(void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1, - double fscale_factor=1, - double Escale=1 ); - - std::vector<double> vconvolute(void (*pdf1)(const double& , const double&, double* ), - void (*pdf2)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1, - double fscale_factor=1, - double Escale=1 ); - - - // perform the convolution to a specified number of loops - // nloops=-1 gives the nlo part only - std::vector<double> vconvolute(double Escale, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1, - double fscale_factor=1 ) { - return vconvolute(pdf, alphas, nloops, rscale_factor, fscale_factor, Escale); - } - - - // perform the convolution to the max number of loops in grid - std::vector<double> vconvolute(void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ) ) { - return vconvolute( pdf, alphas, m_order-1 ); - } - - - // perform the convolution to the max number of loops in grid - std::vector<double> vconvolute(double Escale, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ) ) { - return vconvolute( Escale, pdf, alphas, m_order-1 ); - } - - - // perform the convolution to a specified number of loops - // for a single sub process, nloops=-1 gives the nlo part only - std::vector<double> vconvolute_subproc(int subproc, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1, double Escale=1 ); - - - // perform the convolution to a specified number of loops - // for a single sub process, nloops=-1 gives the nlo part only - std::vector<double> vconvolute_subproc(int subproc, - double Escale, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1 ) { - return vconvolute_subproc(subproc, pdf, alphas, nloops, rscale_factor, Escale); - } - - - // perform the convolution to the max number of loops in grid - // for a single sub process - std::vector<double> vconvolute_subproc(int subproc, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ) ) { - return vconvolute_subproc( subproc, pdf, alphas, m_order-1 ); - } - - // perform the convolution to the max number of loops in grid - // for a single sub process - std::vector<double> vconvolute_subproc(int subproc, - double Escale, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ) ) { - return vconvolute_subproc( subproc, Escale, pdf, alphas, m_order-1 ); - } - - - double vconvolute_bin( int bin, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double&) ); - - - // perform the convolution to a specified number of loops - // nloops=-1 gives the nlo part only - TH1D* convolute(void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1, - double fscale_factor=1, - double Escale=1 ); - - // perform the convolution to a specified number of loops - // nloops=-1 gives the nlo part only - TH1D* convolute(void (*pdf1)(const double& , const double&, double* ), - void (*pdf2)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1, - double fscale_factor=1, - double Escale=1 ); - - - TH1D* convolute(double Escale, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1, - double fscale_factor=1 ) { - return convolute(pdf, alphas, nloops, rscale_factor, fscale_factor, Escale); - } - - - // perform the convolution to the max number of loops in grid - TH1D* convolute(void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ) ) { - return convolute( pdf, alphas, m_order-1 ); - } - - // perform the convolution to the max number of loops in grid - TH1D* convolute(double Escale, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ) ) { - return convolute( Escale, pdf, alphas, m_order-1 ); - } - - - // perform the convolution to a specified number of loops - // for a single sub process, nloops=-1 gives the nlo part only - TH1D* convolute_subproc(int subproc, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1, double Escale=1 ); - - TH1D* convolute_subproc(int subproc, - double Escale, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ), - int nloops, - double rscale_factor=1 ) { - return convolute_subproc( subproc, pdf, alphas, nloops, rscale_factor, Escale); - } - - // perform the convolution to the max number of loops in grid - // for a single sub process - TH1D* convolute_subproc(int subproc, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ) ) { - return convolute_subproc( subproc, pdf, alphas, m_order-1 ); - } - - TH1D* convolute_subproc(int subproc, - double Escale, - void (*pdf)(const double& , const double&, double* ), - double (*alphas)(const double& ) ) { - return convolute_subproc( subproc, Escale, pdf, alphas, m_order-1 ); - } - - - // optimise the bin limits - void optimise(bool force=false); - void optimise(int NQ2, int Nx); - void optimise(int NQ2, int Nx1, int Nx2); - - // redefine the limits by hand - void redefine(int iobs, int iorder, - int NQ2, double Q2min, double Q2max, - int Nx, double xmin, double xmax); - - bool setNormalised(bool t=true) { return m_normalised=t; } - bool getNormalised() const { return m_normalised; } - - - // set the filling to be symmetric and test status - bool symmetrise(bool t=true) { return m_symmetrise=t; } - bool isSymmetric() const { return m_symmetrise; } - - bool reweight(bool t=false); - - // access to internal grids if need be - const igrid* weightgrid(int iorder, int iobs) const { return m_grids[iorder][iobs]; } - - // save grid to specified file - void Write(const std::string& filename, const std::string& dirname="grid", const std::string& pdfname="" ); - - // accessors for the observable after possible bin combination - int Nobs() const { return m_obs_bins_combined->GetNbinsX(); } - double obs(int iobs) const { return m_obs_bins_combined->GetBinCenter(iobs+1); } - int obsbin(double obs) const { return m_obs_bins_combined->FindBin(obs)-1; } - double obslow(int iobs) const { return m_obs_bins_combined->GetBinLowEdge(iobs+1); } - double obsmin() const { return obslow(0); } - double obsmax() const { return obslow(Nobs()); } - double deltaobs(int iobs) const { return m_obs_bins_combined->GetBinWidth(iobs+1); } - - const TH1D* getReference() const { return m_obs_bins_combined; } - TH1D* getReference() { return m_obs_bins_combined; } - - - // TH1D* getXReference() { - // combineReference(); - // return m_obs_bins_combined; - // } - - - // accessors for the observable befor any bin combination - int Nobs_internal() const { return m_obs_bins->GetNbinsX(); } - double obs_internal(int iobs) const { return m_obs_bins->GetBinCenter(iobs+1); } - int obsbin_internal(double obs) const { return m_obs_bins->FindBin(obs)-1; } - double obslow_internal(int iobs) const { return m_obs_bins->GetBinLowEdge(iobs+1); } - double deltaobs_internal(int iobs) const { return m_obs_bins->GetBinWidth(iobs+1); } - double obsmin_internal() const { return obslow_internal(0); } - double obsmax_internal() const { return obslow_internal(Nobs_internal()); } - - const TH1D* getReference_internal() const { return m_obs_bins; } - TH1D* getReference_internal() { return m_obs_bins; } - - - - - // number of subprocesses - int subProcesses(int i) const; - - // general status accessors - double& run() { return m_run; } - - // accessors for the status information - bool isOptimised() const { return m_optimised; } - bool isTrimmed() const { return m_trimmed; } - - // lowest order of process - int leadingOrder() const { return m_leading_order; } - - /// maximum number of orders ( lo=1, nlo=2, nnlo=3 ) - /// but aMC@NLO uses 4 grids for the NLO, so m_order - /// will be 4, but really it is still only available - /// 1 loop, so take account of this - int nloops() const { - if ( m_type!=AMCATNLO ) return m_order-1; - else if ( m_order>0 ) return 1; - else return 0; - } - - // find out which transform and which pdf combination are being used - std::string getTransform() const { return m_transform; } - - static double transformvar(); - static double transformvar(double v); - - std::string getGenpdf() const { return m_genpdfname; } - - std::string version() const { return m_version; } - std::string appl_version() const; - - double getCMSScale() const { return m_cmsScale; } - void setCMSScale(double cmsScale) { m_cmsScale=cmsScale; } - - double getDynamicScale() const { return m_dynamicScale; } - void setDynamicScale(double dynamicScale) { m_dynamicScale=dynamicScale; } - - - // set optimise flag on all sub grids - bool setOptimised(bool t=true) { - return m_optimised=t; - // for ( int iorder=0 ; iorder<2 ; iorder++ ) { - // for ( int iobs=0 ; iobs<Nobs() ; iobs++ ) m_grids[iorder][iobs]->setOptimised(t); - // } - } - - // find the number of words used for storage - int size() const; - - // get the cross sections - double& crossSection() { return m_total; } - double& crossSectionError() { return m_totalerror; } - - // double Lambda() const { return m_Lambda2; } - - // very lovely algebraic operators - grid& operator=(const grid& g); - grid& operator*=(const double& d); - grid& operator+=(const grid& g); - - /// test if grids have the same limits etc - bool operator==(const grid& g) const; - - // shouldn't have these, the grid is too large a structure - // to be passed in a return - // grid operator*(const double& d) const { return grid(*this)*=d; } - // grid operator+(const grid& g) const { return grid(*this)+=g; } - - void setDocumentation(const std::string& s); - void addDocumentation(const std::string& s); - - std::string getDocumentation() const { return m_documentation; } - std::string& getDocumentation() { return m_documentation; } - - - /// set the range of the observable bins, with an optional - /// scaling of the observable valuesfor channging units - void setBinRange(int ilower, int iupper, double xScaleFactor=1); - void setRange(double lower, double upper, double xScaleFactor=1); - - - /// add a correction as a std::vector - void addCorrection( std::vector<double>& v, const std::string& label="", bool combine=false ); - - - /// add a correction by histogram - void addCorrection(TH1D* h, const std::string& label="", double scale=1, bool combine=false ); - - - /// access the corrections - // const std::vector<std::vector<double> >& corrections() const { - const std::vector<correction>& corrections() const { - return m_corrections; - } - - /// get the correction labels - const std::vector<std::string >& correctionLabels() const { - return m_correctionLabels; - } - - /// will the corrections be applied? - bool getApplyCorrections() const { return m_applyCorrections; } - - bool setApplyCorrections(bool b) { - std::cout << "appl::grid bin-by-bin corrections will " - << ( b ? "" : "not " ) << "be applied" << std::endl; - return m_applyCorrections=b; - } - - /// apply corrections to a std::vector - void applyCorrections(std::vector<double>& v, std::vector<bool>& applied); - - - /// will a specific correction be applied? - bool getApplyCorrection(unsigned i) const { - if ( m_applyCorrections ) return true; - else if ( i<m_applyCorrection.size() ) return m_applyCorrection.at(i); - return false; - } - - bool setApplyCorrection(unsigned i, bool b) { - if ( i>=m_corrections.size() ) return false; - std::cout << "appl::grid bin-by-bin correction will " - << ( b ? "" : "not " ) << "be applied for correction " << i; - if ( m_correctionLabels[i]!="" ) std::cout << " (" << m_correctionLabels[i] << ")"; - std::cout << std::endl; - return m_applyCorrection[i]=b; - } - - /// apply corrections to a std::vector - bool applyCorrection(unsigned i, std::vector<double>& v); - - - /// set the ckm matrix values if need be - /// takes a 3x3 matrix with the format { { Vud, Vus, Vub }, { Vcd, Vcs, Vcb }, { Vtd, Vts, Vtb } } - void setckm( const std::vector<std::vector<double> >& ckm ); - - /// takes a flat 9 element vector (or c array) with the format { Vud, Vus, Vub, Vcd, Vcs, Vcb, Vtd, Vts, Vtb } - void setckm( const std::vector<double>& ckm ); - void setckm( const double* ckm ); - - - /// set the squared ckm matrix values if need be - /// the squared terms for eihter W+ or W- production - you probably should use setckm() - void setckm2( const std::vector<std::vector<double> >& ckm2 ); - - /// set the ckm matrix and squared ckm matrix values if need be - const std::vector<std::vector<double> >& getckm() const; - const std::vector<std::vector<double> >& getckm2() const; - - - /// flag custom convolution routines - - void sherpa() { m_type = SHERPA; std::cout << "appl::grid::sherpa() using SHERPA convolution" << std::endl; } - void amcatnlo() { m_type = AMCATNLO; std::cout << "appl::grid::amcatnlo() using aMC@NLO convolution" << std::endl; } - void standard() { m_type = STANDARD; std::cout << "appl::grid::standard() using standard convolution" << std::endl; } - - CALCULATION calculation() const { return m_type; } - - static std::string _calculation(CALCULATION C) { - switch (C) { - case STANDARD: - return "standard"; - case SHERPA: - return "sherpa"; - case AMCATNLO: - return "amcatnlo"; - case LAST_TYPE: - return "last_type"; // NB: shouldn't ever be used - } - return "unknown"; - } - - /// reduce number of subprocesses if possible - void shrink(const std::string& name, int ckmcharge=0); - - /// set bins to be combined after the convolution - void combine( std::vector<int>& v) { if ( (m_combine=v).size() ) combineReference(true); } - - /// set combine the be combined after the convolution - void combineReference(bool force=false); - - void combineBins(std::vector<double>& v, int power=1 ) const; - - double fx(double x) const; - double fy(double x) const; - - const appl_pdf* genpdf(int i) const { return m_genpdf[i]; } - - std::vector<double>& userdata() { return m_userdata; } - const std::vector<double>& userdata() const { return m_userdata; } - -protected: - - // internal common construct for the different types of constructor - void construct(int Nobs, - int NQ2=50, double Q2min=10000.0, double Q2max=25000000.0, int Q2order=4, - int Nx=50, double xmin=1e-5, double xmax=0.9, int xorder=3, - int order=2, - std::string transform="f" ); - -protected: - - /// std::string manipulators to parse the pdf names - - /// return chomped std::string - static std::string chomptoken(std::string& s1, const std::string& s2) - { - std::string s3 = ""; - std::string::size_type pos = s1.find(s2); - if ( pos != std::string::npos ) { - s3 = s1.substr(0, pos); - s1.erase(0, pos+1); - } - else { - s3 = s1.substr(0, s1.size()); - s1.erase(0, s1.size()+1); - } - return s3; - } - - static std::vector<std::string> parse(std::string s, const std::string& key) { - std::vector<std::string> clauses; - while ( s.size() ) clauses.push_back( chomptoken(s, key) ); - return clauses; - } - - /// get the required pdf combinations from those registered - void findgenpdf( std::string s ); - - /// add a generic pdf to the data base of registered pdfs - void addpdf( const std::string& s, const std::vector<int>& combinations=std::vector<int>() ); - - appl_pdf* genpdf(int i) { return m_genpdf[i]; } - -public: - - int subproc() const { return m_subproc; } - -protected: - - // histograms for saving the observable - TH1D* m_obs_bins; - TH1D* m_obs_bins_combined; - - // order in alpha_s of tree level contribution - int m_leading_order; - - // how many orders in the calculation, lo, nlo, nnlo etc - int m_order; - - // the actual weight grids themselves - igrid** m_grids[MAXGRIDS]; /// up to MAXGRIDS grids LO, NLO, NNLO, Real virtual, etc - - // total cross section qand uncertainty - double m_total; - double m_totalerror; - - // state variables - double m_run; - bool m_optimised; - bool m_trimmed; - - bool m_normalised; - - bool m_symmetrise; - - // transform and pdf combination tags - std::string m_transform; - std::string m_genpdfname; - - // pdf combination class - appl_pdf* m_genpdf[MAXGRIDS]; - - static const std::string m_version; - - double m_cmsScale; - - double m_dynamicScale; - - /// bin by bin correction factors - // std::vector<std::vector<double> > m_corrections; - std::vector<correction> m_corrections; - std::vector<std::string> m_correctionLabels; - - - /// should we apply the corrections? - bool m_applyCorrections; - - /// flag vector to determine whether each individual - /// correction should be applied - std::vector<bool> m_applyCorrection; - - std::string m_documentation; - - std::vector<double> m_ckmsum; - std::vector<std::vector<double> > m_ckm2; - std::vector<std::vector<double> > m_ckm; - - CALCULATION m_type; - - bool m_read; - - std::vector<int> m_combine; - - int m_subproc; - int m_bin; - - std::vector<double> m_userdata; - -}; - - -}; - -// shouldn't have this, grid is too large a structure -// grid operator*(const double& d, const appl::grid& g) { return g*d; } - -std::ostream& operator<<(std::ostream& s, const appl::grid& mygrid); - - - -#endif // __APPL_GRID_H diff --git a/include/appl_grid/appl_pdf.h b/include/appl_grid/appl_pdf.h deleted file mode 100644 index e0e3c23b48ce4467b6af7dc25881d9966223c963..0000000000000000000000000000000000000000 --- a/include/appl_grid/appl_pdf.h +++ /dev/null @@ -1,198 +0,0 @@ -// emacs: this is -*- c++ -*- -// -// appl_pdf.h -// -// pdf transform functions header -// -// Copyright (C) 2007 M.Sutton (sutt@hep.ucl.ac.uk) -// -// $Id: appl_pdf.h, v Fri Dec 21 22:19:50 GMT 2007 sutt $ - - -#ifndef __APPL_PDF_H -#define __APPL_PDF_H - -#include <iostream> -#include <fstream> -#include <sstream> - -#include <vector> -#include <map> -#include <string> - -#include <exception> - - -namespace appl { - - -class appl_pdf; - -typedef std::map<const std::string, appl_pdf*> pdfmap; - - -// this is a *maybe* nice class, a base class for pdf -// functions -// -// it has a virtual evaluate() method to be definied in -// the derived class, and a static std::map of all the names -// of instances of the derived classes -// -// when a new instance of the class is created, it -// automatically adds it's name to the std::map, so the user -// doesn't need to worry about consistency, and removes -// itself when the derived instance is deleted - -class appl_pdf { - -public: - - // pdf error exception - class exception : public std::exception { - public: - exception(const std::string& s="") { std::cerr << what() << " " << s << std::endl; }; - //exception(std::ostream& s) { std::cerr << what() << " " << s << std::endl; }; - exception(std::ostream& s) { std::stringstream ss; ss << s.rdbuf(); std::cerr << what() << " " << ss.str() << std::endl; }; - const char* what() const throw() { return "appl::appl_pdf::exception "; } - }; - -public: - - /// constructor and destructor - appl_pdf(const std::string& name); - - virtual ~appl_pdf(); - - /// retrieve an instance from the std::map - static appl_pdf* getpdf(const std::string& s, bool printout=true); - - /// print out the pdf std::map - static void printmap(std::ostream& s=std::cout) { - pdfmap::iterator itr = __pdfmap.begin(); - while ( itr!=__pdfmap.end() ) { - s << "pdfmap " << itr->first << "\t\t" << itr->second << std::endl; - itr++; - } - } - - /// initialise the factory - static bool create_map(); - - virtual void evaluate(const double* fA, const double* fB, double* H) = 0; - - virtual int decideSubProcess( const int , const int ) const; - - std::string name() const { return m_name; } - - int Nproc() const { return m_Nproc; } - int size() const { return m_Nproc; } - - - - std::string rename(const std::string& name) { - /// remove my entry from the std::map, and add me again with my new name - if ( __pdfmap.find(m_name)!=__pdfmap.end() ) { - __pdfmap.erase(__pdfmap.find(m_name)); - } - else { - std::cout << "appl_pdf::rename() " << m_name << " not in std::map" << std::endl; - } - m_name = name; - addtopdfmap(m_name, this); - return m_name; - } - - - /// code to allow optional std::vector of subprocess contribution names - - const std::vector<std::string>& subnames() const { return m_subnames; } - - void addSubnames( const std::vector<std::string>& subnames ) { m_subnames = subnames; } - - void addSubname( const std::string& subname ) { - if ( int(m_subnames.size())<m_Nproc-1 ) m_subnames.push_back(subname); - } - - - - /// is this a W+ or a W- pdf combination? or neither? - int getckmcharge() const { return m_ckmcharge; } - - /// access the ckm matrices - if no matrices are required these std::vectors have - /// zero size - - const std::vector<double>& getckmsum() const { return m_ckmsum; } - const std::vector<std::vector<double> >& getckm2() const { return m_ckm2; } - const std::vector<std::vector<double> >& getckm() const { return m_ckm; } - - /// set the ckm matrices from external values - void setckm( const std::vector<std::vector<double> >& ckm ); - void setckm2( const std::vector<std::vector<double> >& ckm2 ); - - /// code to create the ckm matrices using the hardcoded default - /// values if required - /// takes a bool input - if true creates the ckm for Wplus, - /// false for Wminus - void make_ckm( bool Wp=true ); - - void SetNProc(int Nsub){ m_Nproc=Nsub; return;}; - - /// set some useful names for the different subprocesses - void setnames( const std::vector<std::string>& names) { m_names = names; } - std::vector<std::string> getnames() const { return m_names; } - -protected: - - /// search the path for configuration files - static std::ifstream& openpdf( const std::string& filename ); - -private: - - static void addtopdfmap(const std::string& s, appl_pdf* f) { - if ( __pdfmap.find(s)==__pdfmap.end() ) { - __pdfmap.insert( pdfmap::value_type( s, f ) ); - // std::cout << "appl_pdf::addtomap() registering " << s << " in std::map addr \t" << f << std::endl; - } - else { - throw exception( std::cerr << "appl_pdf::addtopdfmap() " << s << " already in std::map\t0x" << __pdfmap.find(s)->second ); - } - } - -protected: - - int m_Nproc; - std::string m_name; - - std::vector<std::string> m_subnames; - - /// ckm matrix related information - /// W+, W- or neither? - int m_ckmcharge; - - // ckm matrices - std::vector<double> m_ckmsum; - std::vector<std::vector<double> > m_ckm2; /// squared 13x13 matrix - std::vector<std::vector<double> > m_ckm; /// simple 3x3 - - /// some strings for more useful name if required - std::vector<std::string> m_names; - - static pdfmap __pdfmap; - static std::vector<std::string> __pdfpath; -}; - - -} - - -#endif // __APPL_PDF_H - - - - - - - - - - diff --git a/include/appl_grid/correction.h b/include/appl_grid/correction.h deleted file mode 100644 index 647bbf86df3684e43fb86d9dec1ec9680efb2ad4..0000000000000000000000000000000000000000 --- a/include/appl_grid/correction.h +++ /dev/null @@ -1,67 +0,0 @@ -// emacs: this is -*- c++ -*- -// -// @file correction.h -// class to store the multipliciative post processing -// corrections to be applied, only basic for the time -// but will be extended as appropriate -// -// Copyright (C) 2014 M.Sutton (sutt@cern.ch) -// -// $Id: correction.h, v0.0 Sun 23 Mar 2014 09:08:46 CET sutt $ - - -#ifndef CORRECTION_H -#define CORRECTION_H - -#include <iostream> -#include <vector> -#include <string> - - -// typedef std::vector<double> correction; - - -class correction { - -public: - - correction(const std::vector<double>& v, const std::string& s="" ) : mlabel(s), mv(v) { } - - virtual ~correction() { } - - std::string label() const { return mlabel; } - - unsigned size() const { return mv.size(); } - - double& operator[](int i) { return mv[i]; } - double operator[](int i) const { return mv[i]; } - - operator std::vector<double>&() { return mv; } - - correction operator=(const std::vector<double>& v) { mv=v; return *this; } - -private: - - std::string mlabel; - std::vector<double> mv; - -}; - - -// inline std::ostream& operator<<( std::ostream& s, const correction& /* _c */ ) { -// return s; -// } - - - -#endif // CORRECTION_H - - - - - - - - - - diff --git a/include/theorexpr.inc b/include/theorexpr.inc index 99c45b673936a364be0c535a4fc56a6edeb2d8af..6b126ba611220cf20fea62d4b4245257f76ac544 100644 --- a/include/theorexpr.inc +++ b/include/theorexpr.inc @@ -1,14 +1,14 @@ C> Common block for theory expression integer NTermsMax - parameter (NTermsMax = 16) + parameter (NTermsMax = 128) double precision dynscale integer NTerms - character*8 TermName(NTermsMax) + character*32 TermName(NTermsMax) character*80 TermType(NTermsMax) - character*2048 TermInfo(NTermsMax) + character*4096 TermInfo(NTermsMax) character*256 TermSource(NTermsMax) - character*1000 TheorExpr + character*10000 TheorExpr integer ppbar_collisions integer normalised integer murdef @@ -25,7 +25,7 @@ C Also store dataset info here C And some basic vars: - common/theorexpr/ dynscale, NTerms, TermName, TermType, TermInfo, - & TermSource, TheorExpr, ppbar_collisions, normalised, + common/theorexpr/ dynscale, NTerms, TermName, TermType, TermInfo, + & TermSource, TheorExpr, ppbar_collisions, normalised, $ murdef, mufdef, $ ninfo, datainfo, CInfo, dsname, ds_index diff --git a/input_steering/parameters.yaml.abmp16 b/input_steering/parameters.yaml.abmp16 new file mode 100644 index 0000000000000000000000000000000000000000..4535f8ede1b62bf74dd2086002ed21f268cfc918 --- /dev/null +++ b/input_steering/parameters.yaml.abmp16 @@ -0,0 +1,197 @@ +Minimizer: MINUIT # CERES +MINUIT: + Commands: | + call fcn 3 +# doErrors : Hesse # None + +Parameters: + Ag : DEPENDENT + Bg : [ -0.061953] + Cg : [ 5.562367 , 0.55] + Agp : [ 0.07311 ] # negative gluon .... + Bgp : [ -0.383100 ] + Cgp : [ 25.0, 0.] # fix C of negative gluon + Auv : DEPENDENT + Buv : [ 0.810476 ] + Cuv : [ 4.823512 ] + Duv : [ 0 ] + Euv : [ 9.921366 ] + Adv : DEPENDENT + Bdv : [ 1.029995 ] + Cdv : [ 4.846279 ] + Aubar: [ 0.1613 ] + Bubar: [ -0.1273 ] + Cubar: [ 7.059694] + Dubar: [ 1.548098] + Adbar: [ 0.1613 ] + Bdbar: [ -0.1273 ] + Cdbar: + value: 9.586246 + step: 1.2345 + #min + #max + #pr_mean + #pr_sigma + As : [ 0.1075 ] + Bs : [ -0.1273 ] + Cs : [ 9.586246 ] + Asbar : [ 0.1075 ] + Bsbar : [ -0.1473 ] + Csbar : [ 8.586246 ] + ZERO : [ 0. ] # zero + + ABMP_uv_A : DEPENDENT + ABMP_uv_a : [ 0.613, 0.033 ] + ABMP_uv_b : [ 3.443, 0.064 ] + ABMP_uv_g1 : [ -0.22, 0.33 ] + ABMP_uv_g2 : [ -2.88, 0.46 ] + ABMP_uv_g3 : [ 2.67, 0.80 ] + + ABMP_dv_A : DEPENDENT + ABMP_dv_a : [ 0.372, 0.068 ] + ABMP_dv_b : [ 4.47, 0.55 ] + ABMP_dv_g1 : [ -3.20, 0.77 ] + ABMP_dv_g2 : [ -0.61, 1.96 ] + ABMP_dv_g3 : [ 0.0, 0.001 ] + + ABMP_us_A : [ 0.0703, 0.0081] + ABMP_us_a : [-0.4155, 0.031 ] + ABMP_us_b : [ 7.75, 0.39 ] + ABMP_us_gm1 : [ 0.0373, 0.0032] + ABMP_us_g1 : [ 4.44, 0.95 ] + + ABMP_ds_A : [ 0.1408, 0.0076] + ABMP_ds_a : [-0.1731, 0.011 ] + ABMP_ds_b : [ 8.41, 0.34 ] + ABMP_ds_g1 : [ 13.3, 1.7 ] + + ABMP_ss_A : [ 0.0594, 0.0042] + ABMP_ss_a : [-0.3445, 0.019 ] + ABMP_ss_b : [ 6.52, 0.27 ] + + #ABMP_ssbar_A: [ 0.0594, 0.0042] + #ABMP_ssbar_a: [-0.3445, 0.019 ] + #ABMP_ss_bbar: [ 6.52, 0.27 ] + + ABMP_g_A : DEPENDENT + ABMP_g_a : [-0.1534, 0.0094] + ABMP_g_b : [ 6.42, 0.83 ] + ABMP_g_g1 : [ -11.8, 3.7 ] + +Parameterisations: + par_uv: + #class: HERAPDF + #parameters: [Auv,Buv,Cuv,Duv,Euv] + class: ABMPvalence + parameters: [ABMP_uv_A, ABMP_uv_a, ABMP_uv_b, ZERO, ABMP_uv_g1, ABMP_uv_g2, ABMP_uv_g3] + par_dv: + #class: HERAPDF + #parameters: [Adv,Bdv,Cdv] + class: ABMPvalence + parameters: [ABMP_dv_A, ABMP_dv_a, ABMP_dv_b, ZERO, ABMP_dv_g1, ABMP_dv_g2, ABMP_dv_g3] + par_ubar: + #class: HERAPDF + #parameters: [Aubar,Bubar,Cubar,Dubar] + class: ABMPsea + parameters: [ABMP_us_A, ABMP_us_a, ABMP_us_b, ABMP_us_gm1, ABMP_us_g1, ZERO, ZERO] + par_dbar: + #class: HERAPDF + #parameters: [Adbar,Bdbar,Cdbar] + class: ABMPsea + parameters: [ABMP_ds_A, ABMP_ds_a, ABMP_ds_b, ZERO, ABMP_ds_g1, ZERO, ZERO] + par_s: + #class: HERAPDF + #parameters: [As,Bs,Cs] + class: ABMPsea + parameters: [ABMP_ss_A, ABMP_ss_a, ABMP_ss_b, ZERO, ZERO, ZERO, ZERO] + par_sbar: + #class: HERAPDF + #parameters: [Asbar,Bsbar,Csbar] + class: ABMPsea + parameters: [ABMP_ss_A, ABMP_ss_a, ABMP_ss_b, ZERO, ZERO, ZERO, ZERO] + #class: ABMPPdfParam + #parameters: [ABMP_ssbar_A, ABMP_ssbar_a, ABMP_ssbar_b, ZERO, ZERO, ZERO, ZERO] + par_g: + #class: NegativeGluon + #parameters: [Ag,Bg,Cg,ZERO,ZERO,Agp,Bgp,Cgp] + class: ABMPgluon + parameters: [ABMP_g_A, ABMP_g_a, ABMP_g_b, ZERO, ABMP_g_g1, ZERO, ZERO] + +DefaultDecomposition: proton +Decompositions: + proton: + #class: UvDvubardbars + class: UvDvUbarDbarSSbar + xuv: par_uv + xdv: par_dv + xubar: par_ubar + xdbar: par_dbar + xs: par_s + xsbar: par_sbar + xg: par_g + +DefaultEvolution: proton-QCDNUM +#DefaultEvolution: proton-LHAPDF +Evolutions: + proton-QCDNUM: + ? !include yaml/evolutions/QCDNUM/parameters.yaml + decomposition: proton #this could be omitted, as the default decomposition is set + proton-LHAPDF: + class: LHAPDF + set: "CT10" + member: 0 +# proton-APFEL: +# ? !include yaml/evolutions/APFELxx/parameters.yaml +# decomposition: proton + +#Q0 : 1.378404875209 # Initial scale =sqrt(1.9) +Q0 : 3.0 # ABMP16 + +? !include constants.yaml + +# just for test of ABMP16 parametrisation (in QCDNUM charm quark mass should be above starting scale) +mch : 3.1 + +# AlphaS, incuding options to fit it: +alphas : 0.118 +# value: 0.118 +# step: 0.01 +# prior: 0.118 +# priorUnc: 0.1 +# min: 0.1 +# max: 0.3 + +# Strange and charm fractions: +fs: 0.4 +fcharm: 0. + +# RT DIS scheme settings: +? !include yaml/reactions/RT_DISNC/parameters.yaml +# FONLL scheme settings: +? !include yaml/reactions/FONLL_DISNC/parameters.yaml +? !include yaml/reactions/FONLL_DISCC/parameters.yaml +# FF ABM scheme settings: +? !include yaml/reactions/FFABM_DISNC/parameters.yaml +? !include yaml/reactions/FFABM_DISCC/parameters.yaml +# AFB settings: +? !include yaml/reactions/AFB/parameters.yaml +# APPLgrid settings: +? !include yaml/reactions/APPLgrid/parameters.yaml +# (optional) Fractal module settings: +# ? !include yaml/reactions/Fractal_DISNC/parameters.yaml + +# Specify HF scheme used for DIS NC processes: +hf_scheme_DISNC : + defaultValue : 'RT_DISNC' # global specification +# defaultValue : 'BaseDISNC' # global specification +# defaultValue : 'FONLL_DISNC' # global specification +# defaultValue : 'FFABM_DISNC' +# 'HERA1+2 NCep 920' : 'BaseDISNC' # datafile specific (based on name) +# 1 : BaseDISNC +# 'HERA1+2 NCep 920' : 'Fractal_DISNC' # Fractal model. Add parameters file if you want to try it (see above) + +# Specify HF scheme used for DIS CC processes: +hf_scheme_DISCC : + defaultValue : 'BaseDISCC' # global specification +# defaultValue : 'FONLL_DISCC' # global specification +# defaultValue : 'FFABM_DISCC' # global specification diff --git a/pdfdecompositions/UvDvUbarDbarSSbarPdfDecomposition/include/UvDvUbarDbarSSbarPdfDecomposition.h b/pdfdecompositions/UvDvUbarDbarSSbarPdfDecomposition/include/UvDvUbarDbarSSbarPdfDecomposition.h new file mode 100644 index 0000000000000000000000000000000000000000..65ea8542c7f66da15ae8ed994ce69d5d43049e8e --- /dev/null +++ b/pdfdecompositions/UvDvUbarDbarSSbarPdfDecomposition/include/UvDvUbarDbarSSbarPdfDecomposition.h @@ -0,0 +1,43 @@ + +#pragma once + +#include "BasePdfDecomposition.h" + +/** + @class UvDvUbarDbarSSbarPdfDecomposition + + @brief A class for UvDvUbarDbarSSbar pdf decomposition + + @version 0.1 + @date 2019-02-25 + */ + +namespace xfitter { + + class UvDvUbarDbarSSbarPdfDecomposition : public BasePdfDecomposition + { + public: + /// Default constructor. Name is the PDF name + UvDvUbarDbarSSbarPdfDecomposition (const char *inName); + + virtual const char*getClassName()const override final; + + /// Optional initialization at the first call + virtual void atStart() override final; + + /// Compute sum-rules + virtual void atIteration() override final; + + /// Compute PDF in a physical base in LHAPDF format for given x and Q + virtual std::function<std::map<int,double>(const double& x)> f0() const override final; + + private: + BasePdfParam*par_xuv{nullptr}, + *par_xdv{nullptr}, + *par_xubar{nullptr}, + *par_xdbar{nullptr}, + *par_xs{nullptr}, + *par_xsbar{nullptr}, + *par_xg{nullptr}; + }; +} diff --git a/pdfdecompositions/UvDvUbarDbarSSbarPdfDecomposition/src/Makefile.am b/pdfdecompositions/UvDvUbarDbarSSbarPdfDecomposition/src/Makefile.am new file mode 100644 index 0000000000000000000000000000000000000000..110a22006a02306ad1b09ee25ddad0cb0c48bb9d --- /dev/null +++ b/pdfdecompositions/UvDvUbarDbarSSbarPdfDecomposition/src/Makefile.am @@ -0,0 +1,13 @@ + +# Created by AddPdfDecomposition.py on 2019-02-25 + +AM_CXXFLAGS = -I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../../pdfparams/BasePdfParam/include/ -I$(srcdir)/../../BasePdfDecomposition/include -Wall -fPIC -Wno-deprecated + +#lib_LTLIBRARIES = libUvDvUbarDbarSSbarPdfDecomposition_xfitter.la +lib_LTLIBRARIES = libuvdvubardbarssbarpdfdecomposition_xfitter.la +libuvdvubardbarssbarpdfdecomposition_xfitter_la_SOURCES = UvDvUbarDbarSSbarPdfDecomposition.cc + +datadir = ${prefix}/yaml/pdfdecompositions/UvDvUbarDbarSSbar +data_DATA = ../yaml/parameters.yaml + +dist_noinst_HEADERS = ../include ../yaml diff --git a/pdfdecompositions/UvDvUbarDbarSSbarPdfDecomposition/src/UvDvUbarDbarSSbarPdfDecomposition.cc b/pdfdecompositions/UvDvUbarDbarSSbarPdfDecomposition/src/UvDvUbarDbarSSbarPdfDecomposition.cc new file mode 100644 index 0000000000000000000000000000000000000000..2c69e4e3f03afa842751ff65ace06034979e5098 --- /dev/null +++ b/pdfdecompositions/UvDvUbarDbarSSbarPdfDecomposition/src/UvDvUbarDbarSSbarPdfDecomposition.cc @@ -0,0 +1,93 @@ + +/* + @file UvDvUbarDbarSSbarPdfDecomposition.cc + @date 2019-02-25 + @author AddPdfDecomposition.py + Created by AddPdfDecomposition.py on 2019-02-25 +*/ + +#include "UvDvUbarDbarSSbarPdfDecomposition.h" +#include "xfitter_pars.h" +#include "xfitter_steer.h" +#include <iostream> +#include <iomanip> +#include <cmath> + +namespace xfitter { + + /// the class factories, for dynamic loading + extern "C" UvDvUbarDbarSSbarPdfDecomposition* create(const char*name) { + return new UvDvUbarDbarSSbarPdfDecomposition(name); + } + + // Constructor + UvDvUbarDbarSSbarPdfDecomposition::UvDvUbarDbarSSbarPdfDecomposition(const char* inName) : BasePdfDecomposition(inName) { + } + + const char*UvDvUbarDbarSSbarPdfDecomposition::getClassName()const{return"UvDvUbarDbarSSbar";} + + // Init at start: + void UvDvUbarDbarSSbarPdfDecomposition::atStart() { + const YAML::Node node=XFITTER_PARS::getDecompositionNode(_name); + //TODO: handle errors + par_xuv =getParameterisation(node["xuv"].as<string>()); + par_xdv =getParameterisation(node["xdv"].as<string>()); + par_xubar=getParameterisation(node["xubar"].as<string>()); + par_xdbar=getParameterisation(node["xdbar"].as<string>()); + par_xs =getParameterisation(node["xs"].as<string>()); + par_xsbar=getParameterisation(node["xsbar"].as<string>()); + par_xg =getParameterisation(node["xg"].as<string>()); + } + + void UvDvUbarDbarSSbarPdfDecomposition::atIteration() { + //Enforce sum rules + // counting sum-rules for uv and dv + par_xuv->setMoment(-1,2.0); + par_xdv->setMoment(-1,1.0); + // momentum sum-rule + // quark part + double xsumq=0; + xsumq+= par_xuv ->moment(0); + xsumq+= par_xdv ->moment(0); + xsumq+=2*par_xubar->moment(0); + xsumq+=2*par_xdbar->moment(0); + xsumq+= par_xs ->moment(0); + xsumq+= par_xsbar->moment(0); + // gluon part + par_xg->setMoment(0,1-xsumq); + + printParams(); + } + + // Returns a LHAPDF-style function, that returns PDFs in a physical basis for given x + std::function<std::map<int,double>(const double& x)> UvDvUbarDbarSSbarPdfDecomposition::f0() const + { + // lambda function + return [=] (double const& x)->std::map<int, double> { + double ubar=(*par_xubar)(x); + double dbar=(*par_xdbar)(x); + double u=(*par_xuv)(x)+ubar; + double d=(*par_xdv)(x)+dbar; + double s=(*par_xs)(x); + double sbar=(*par_xsbar)(x); + double g=(*par_xg)(x); + std::map<int, double> res = { + {-6,0}, + {-5,0}, + {-4,0}, + {-3,sbar}, + {-2,ubar}, + {-1,dbar}, + { 1,d}, + { 2,u}, + { 3,s}, + { 4,0}, + { 5,0}, + { 6,0}, + {21,g} + }; + return res; + }; + } + +} diff --git a/pdfdecompositions/UvDvUbarDbarSSbarPdfDecomposition/yaml/parameters.yaml b/pdfdecompositions/UvDvUbarDbarSSbarPdfDecomposition/yaml/parameters.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pdfparams/ABMPgluonPdfParam/include/ABMPgluonPdfParam.h b/pdfparams/ABMPgluonPdfParam/include/ABMPgluonPdfParam.h new file mode 100644 index 0000000000000000000000000000000000000000..8294a5475cac8262f271e034ddc252dab86ba8a7 --- /dev/null +++ b/pdfparams/ABMPgluonPdfParam/include/ABMPgluonPdfParam.h @@ -0,0 +1,31 @@ + +#pragma once + +#include "BasePdfParam.h" + +/** + @class ABMPgluonPdfParam + + @brief A class for ABMPgluon pdf parameterisation according to Eqs. 19-22 from Phys.Rev. D96 (2017) no.1, 014011 + xg(x) = A * (1 - x)^b * x^[a * (1 + gam_m1 * ln(x)) * (1 + gam_1 * x + gam_2 * x^2 + gam_3 * x^3)] + (Note that gam_m1 is zero for gluon in the ABMP16 fit so it appears for generality with other PDFs.) + + @version 0.1 + @date 2019-02-25 + */ + +namespace xfitter{ + class ABMPgluonPdfParam:public BasePdfParam{ + public: + ABMPgluonPdfParam(const std::string&inName):BasePdfParam(inName){} + //Evaluate xf(x) at given x with current parameters + virtual double operator()(double x)const override final; + // (Optional) compute moments: + // virtual double moment(int nMoment=-1)const override final; + // (Optional) set moments: + // virtual void setMoment(int nMoment,double value)override final; + // (Optional) + //Initialize from a yaml node. Uses node[getName] as the basis + // virtual void initFromYaml(YAML::Node value)override final; + }; +} diff --git a/pdfparams/ABMPgluonPdfParam/src/ABMPgluonPdfParam.cc b/pdfparams/ABMPgluonPdfParam/src/ABMPgluonPdfParam.cc new file mode 100644 index 0000000000000000000000000000000000000000..6715da10676cbdd632a1391ac5f9f4be916b4228 --- /dev/null +++ b/pdfparams/ABMPgluonPdfParam/src/ABMPgluonPdfParam.cc @@ -0,0 +1,27 @@ + +/* + @file ABMPgluonPdfParam.cc + @date 2019-02-25 + @author AddPdfParam.py + Created by AddPdfParam.py on 2019-02-25 +*/ + +#include "ABMPgluonPdfParam.h" +#include <cmath> + +namespace xfitter{ + //for dynamic loading + extern"C" ABMPgluonPdfParam*create(const char*name){ + return new ABMPgluonPdfParam(name); + } + // Main function to compute PDF + double ABMPgluonPdfParam::operator()(double x)const{ + const int npar = getNPar(); + if (npar<7) { + return NAN; + } + double power = *pars[1] * (1 + *pars[3] * log(x)) * (1 + *pars[4] * x + *pars[5] * x * x + *pars[6] * x * x * x); + double val = *pars[0] * pow(1 - x, *pars[2]) * pow(x, power); + return val; + } +} diff --git a/pdfparams/ABMPgluonPdfParam/src/Makefile.am b/pdfparams/ABMPgluonPdfParam/src/Makefile.am new file mode 100644 index 0000000000000000000000000000000000000000..33c8aeb4f201ce2c8f91e201324f693072177b79 --- /dev/null +++ b/pdfparams/ABMPgluonPdfParam/src/Makefile.am @@ -0,0 +1,13 @@ + +# Created by AddPdfParam.py on 2019-02-25 + +AM_CXXFLAGS = -I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../BasePdfParam/include -Wall -fPIC -Wno-deprecated + +lib_LTLIBRARIES = libABMPgluonPdfParam_xfitter.la +libABMPgluonPdfParam_xfitter_la_SOURCES = ABMPgluonPdfParam.cc + +datadir = ${prefix}/yaml/pdfparams/ABMPgluon +data_DATA = ../yaml/parameters.yaml + +dist_noinst_HEADERS = ../include ../yaml +libABMPgluonPdfParam_xfitter_la_LDFLAGS=-lBasePdfParam_xfitter -L$(libdir) diff --git a/pdfparams/ABMPgluonPdfParam/yaml/parameters.yaml b/pdfparams/ABMPgluonPdfParam/yaml/parameters.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pdfparams/ABMPseaPdfParam/include/ABMPseaPdfParam.h b/pdfparams/ABMPseaPdfParam/include/ABMPseaPdfParam.h new file mode 100644 index 0000000000000000000000000000000000000000..847278e85b93780968195b6e2b430237ae8936a9 --- /dev/null +++ b/pdfparams/ABMPseaPdfParam/include/ABMPseaPdfParam.h @@ -0,0 +1,30 @@ + +#pragma once + +#include "BasePdfParam.h" + +/** + @class ABMPseaPdfParam + + @brief A class for ABMPsea pdf parameterisation according to Eqs. 19-22 from Phys.Rev. D96 (2017) no.1, 014011 + xv(x) = A * (1 - x)^b * x^[a * (1 + gam_m1 * ln(x)) * (1 + gam_1 * x + gam_2 * x^2 + gam_3 * x^3)] + + @version 0.1 + @date 2019-02-25 + */ + +namespace xfitter{ + class ABMPseaPdfParam:public BasePdfParam{ + public: + ABMPseaPdfParam(const std::string&inName):BasePdfParam(inName){} + //Evaluate xf(x) at given x with current parameters + virtual double operator()(double x)const override final; + // (Optional) compute moments: + // virtual double moment(int nMoment=-1)const override final; + // (Optional) set moments: + // virtual void setMoment(int nMoment,double value)override final; + // (Optional) + //Initialize from a yaml node. Uses node[getName] as the basis + // virtual void initFromYaml(YAML::Node value)override final; + }; +} diff --git a/pdfparams/ABMPseaPdfParam/src/ABMPseaPdfParam.cc b/pdfparams/ABMPseaPdfParam/src/ABMPseaPdfParam.cc new file mode 100644 index 0000000000000000000000000000000000000000..574c0439d77ea1afc39e12775b84868287b2279b --- /dev/null +++ b/pdfparams/ABMPseaPdfParam/src/ABMPseaPdfParam.cc @@ -0,0 +1,27 @@ + +/* + @file ABMPseaPdfParam.cc + @date 2019-02-25 + @author AddPdfParam.py + Created by AddPdfParam.py on 2019-02-25 +*/ + +#include "ABMPseaPdfParam.h" +#include <cmath> + +namespace xfitter{ + //for dynamic loading + extern"C" ABMPseaPdfParam*create(const char*name){ + return new ABMPseaPdfParam(name); + } + // Main function to compute PDF + double ABMPseaPdfParam::operator()(double x)const{ + const int npar = getNPar(); + if (npar<7) { + return NAN; + } + double power = *pars[1] * (1 + *pars[3] * log(x)) * (1 + *pars[4] * x + *pars[5] * x * x + *pars[6] * x * x * x); + double val = *pars[0] * pow(1 - x, *pars[2]) * pow(x, power); + return val; + } +} diff --git a/pdfparams/ABMPseaPdfParam/src/Makefile.am b/pdfparams/ABMPseaPdfParam/src/Makefile.am new file mode 100644 index 0000000000000000000000000000000000000000..d48c7c85792f2f287b5a2c1cdef8a29f514b2022 --- /dev/null +++ b/pdfparams/ABMPseaPdfParam/src/Makefile.am @@ -0,0 +1,13 @@ + +# Created by AddPdfParam.py on 2019-02-25 + +AM_CXXFLAGS = -I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../BasePdfParam/include -Wall -fPIC -Wno-deprecated + +lib_LTLIBRARIES = libABMPseaPdfParam_xfitter.la +libABMPseaPdfParam_xfitter_la_SOURCES = ABMPseaPdfParam.cc + +datadir = ${prefix}/yaml/pdfparams/ABMPsea +data_DATA = ../yaml/parameters.yaml + +dist_noinst_HEADERS = ../include ../yaml +libABMPseaPdfParam_xfitter_la_LDFLAGS=-lBasePdfParam_xfitter -L$(libdir) diff --git a/pdfparams/ABMPseaPdfParam/yaml/parameters.yaml b/pdfparams/ABMPseaPdfParam/yaml/parameters.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pdfparams/ABMPvalencePdfParam/include/ABMPvalencePdfParam.h b/pdfparams/ABMPvalencePdfParam/include/ABMPvalencePdfParam.h new file mode 100644 index 0000000000000000000000000000000000000000..c28e425743b58bcfab37913aa1875e9ae7d25f46 --- /dev/null +++ b/pdfparams/ABMPvalencePdfParam/include/ABMPvalencePdfParam.h @@ -0,0 +1,31 @@ + +#pragma once + +#include "BasePdfParam.h" + +/** + @class ABMPvalencePdfParam + + @brief A class for ABMPvalence pdf parameterisation according to Eqs. 19-22 from Phys.Rev. D96 (2017) no.1, 014011 + xv(x) = A * (1 - x)^b * x^[(1 + gam_m1 * ln(x)) * (a + gam_1 * x + gam_2 * x^2 + gam_3 * x^3)] + (Note that gam_m1 is zero for both u- and d-valence in the ABMP16 fit.) + + @version 0.1 + @date 2019-02-25 + */ + +namespace xfitter{ + class ABMPvalencePdfParam:public BasePdfParam{ + public: + ABMPvalencePdfParam(const std::string&inName):BasePdfParam(inName){} + //Evaluate xf(x) at given x with current parameters + virtual double operator()(double x)const override final; + // (Optional) compute moments: + // virtual double moment(int nMoment=-1)const override final; + // (Optional) set moments: + // virtual void setMoment(int nMoment,double value)override final; + // (Optional) + //Initialize from a yaml node. Uses node[getName] as the basis + // virtual void initFromYaml(YAML::Node value)override final; + }; +} diff --git a/pdfparams/ABMPvalencePdfParam/src/ABMPvalencePdfParam.cc b/pdfparams/ABMPvalencePdfParam/src/ABMPvalencePdfParam.cc new file mode 100644 index 0000000000000000000000000000000000000000..10094d4bad6695333dcb02bff29654d55d15e8f2 --- /dev/null +++ b/pdfparams/ABMPvalencePdfParam/src/ABMPvalencePdfParam.cc @@ -0,0 +1,27 @@ + +/* + @file ABMPvalencePdfParam.cc + @date 2019-02-25 + @author AddPdfParam.py + Created by AddPdfParam.py on 2019-02-25 +*/ + +#include "ABMPvalencePdfParam.h" +#include <cmath> + +namespace xfitter{ + //for dynamic loading + extern"C" ABMPvalencePdfParam*create(const char*name){ + return new ABMPvalencePdfParam(name); + } + // Main function to compute PDF + double ABMPvalencePdfParam::operator()(double x)const{ + const int npar = getNPar(); + if (npar<7) { + return NAN; + } + double power = (*pars[1] + *pars[4] * x + *pars[5] * x * x + *pars[6] * x * x * x) * (1 + *pars[3] * log(x)); + double val = *pars[0] * pow(1 - x, *pars[2]) * pow(x, power); + return val; + } +} diff --git a/pdfparams/ABMPvalencePdfParam/src/Makefile.am b/pdfparams/ABMPvalencePdfParam/src/Makefile.am new file mode 100644 index 0000000000000000000000000000000000000000..d98d0a19d7eb17705aa4ca5c44177d3d537dde18 --- /dev/null +++ b/pdfparams/ABMPvalencePdfParam/src/Makefile.am @@ -0,0 +1,13 @@ + +# Created by AddPdfParam.py on 2019-02-25 + +AM_CXXFLAGS = -I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../BasePdfParam/include -Wall -fPIC -Wno-deprecated + +lib_LTLIBRARIES = libABMPvalencePdfParam_xfitter.la +libABMPvalencePdfParam_xfitter_la_SOURCES = ABMPvalencePdfParam.cc + +datadir = ${prefix}/yaml/pdfparams/ABMPvalence +data_DATA = ../yaml/parameters.yaml + +dist_noinst_HEADERS = ../include ../yaml +libABMPvalencePdfParam_xfitter_la_LDFLAGS=-lBasePdfParam_xfitter -L$(libdir) diff --git a/pdfparams/ABMPvalencePdfParam/yaml/parameters.yaml b/pdfparams/ABMPvalencePdfParam/yaml/parameters.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/reactions/APPLgrid/include/ReactionAPPLgrid.h b/reactions/APPLgrid/include/ReactionAPPLgrid.h index e860f7e4ab0ab83e27cbcbeb4a67a2977aab1eeb..4d4c12da5ded73c931f6a115b19731afe63d557a 100644 --- a/reactions/APPLgrid/include/ReactionAPPLgrid.h +++ b/reactions/APPLgrid/include/ReactionAPPLgrid.h @@ -22,7 +22,8 @@ struct DatasetData{ double muR,muF; // !> renormalisation and factorisation scales bool flagNorm; // !> if true, multiply by bin width bool flagUseReference; // !> if true, prediction will be calculated from reference histogram (for tests and grids validation) - std::vector<TH1D*> references; + std::vector<TH1D*> references; // !> reference predictions + std::vector<int> emptyPoints; // !> optional numbers of empty points for manipulation with bins std::vector<double> eScale; // !> CMS energy double*scaleParameter=nullptr; // !> pointer to a minimization parameter which by which the predicted cross-section will be additionally multiplied. If this pointer is nullptr, no additional scaling is used. xfitter::BaseEvolution*evolutions[2]; diff --git a/reactions/APPLgrid/src/ReactionAPPLgrid.cc b/reactions/APPLgrid/src/ReactionAPPLgrid.cc index 132a1dad4f9deb854565d207b32a756a09eb86d1..162eb17f6c9ec2df8115d720c4f3e0f47fc57c08 100644 --- a/reactions/APPLgrid/src/ReactionAPPLgrid.cc +++ b/reactions/APPLgrid/src/ReactionAPPLgrid.cc @@ -36,15 +36,28 @@ void ReactionAPPLgrid::setDatasetParameters(int dataSetID, map<string,string> pa std::istringstream ss(it->second); std::string token; while(std::getline(ss, token, ',')){ - std::shared_ptr<appl::grid> g(new appl::grid(token)); - g->trim(); - data.grids.push_back(g); - TFile file(token.c_str()); - references.push_back((TH1D*)file.Get("grid/reference")); - if(!references.back()) - hf_errlog(17033000, "W: no reference histogram grid/reference in " + token); - else - references.back()->SetDirectory(0); + //std::cout << token << '\n'; + // dummy empty points (for bin manipulations etc.) + // GridName=DUMMYX where X is number of bins (e.g. GridName=DUMMY12 for 12 empty bins) + if(std::string(token.c_str(), 5) == std::string("DUMMY")) + { + int nb = atoi(token.c_str() + 5); + data.emptyPoints.push_back(nb); + data.grids.push_back(NULL); + } + else + { + std::shared_ptr<appl::grid> g(new appl::grid(token)); + g->trim(); + data.grids.push_back(g); + TFile file(token.c_str()); + references.push_back((TH1D*)file.Get("grid/reference")); + if(!references.back()) + hf_errlog(17033000, "W: no reference histogram grid/reference in " + token); + else + references.back()->SetDirectory(0); + data.emptyPoints.push_back(-1); + } } } catch ( const std::exception& e ) { @@ -165,44 +178,67 @@ int ReactionAPPLgrid::compute(int dataSetID, valarray<double> &val, map<string, const double muR=data.muR; const double muF=data.muF; BaseEvolution*(&evolutions)[2]=data.evolutions; - unsigned int pos = 0; - for(unsigned int g=0;g<data.grids.size();g++) + // iterate over grids + int pos = 0; + //printf("1val.size() = %d\n", val.size()); + //val.resize(0); + int np = 0; + for(unsigned int g = 0; g < data.grids.size(); g++) { - auto grid=data.grids[g]; - double eScale=data.eScale[g]; - std::vector<double> gridVals(grid->Nobs()); - if(!data.flagUseReference){ - //For some reason we do not take alphaS from evolutions? --Ivan - active_xfxQ_functions[0]=evolutions[0]->xfxQArray(); - if(evolutions[0]==evolutions[1]){ - gridVals=grid->vconvolute(xfxWrapper0,getAlphaS(),order-1,muR,muF,eScale); - }else{ - active_xfxQ_functions[1]=evolutions[1]->xfxQArray(); - gridVals=grid->vconvolute(xfxWrapper0,xfxWrapper1,getAlphaS(),order-1,muR,muF,eScale); - } - } + // dummy points + if(data.emptyPoints[g] > 0) + np += data.emptyPoints[g]; + // grids or reference histograms + else + np += data.grids[g]->Nobs(); + } + val.resize(np); + for(unsigned int g = 0; g < data.grids.size(); g++) + { + std::vector<double> gridVals; + // dummy points + if(data.emptyPoints[g] > 0) + gridVals = std::vector<double>(data.emptyPoints[g], 0.0); + // grids or reference histograms else { - // use reference histogram - for(std::size_t i=0; i<gridVals.size(); i++) - gridVals[i]=data.references[g]->GetBinContent(i + 1); + auto grid = data.grids[g]; + double eScale=data.eScale[g]; + gridVals = std::vector<double>(grid->Nobs()); + if(!data.flagUseReference) + { + //For some reason we do not take alphaS from evolutions? --Ivan + active_xfxQ_functions[0]=evolutions[0]->xfxQArray(); + if(evolutions[0]==evolutions[1]){ + gridVals=grid->vconvolute(xfxWrapper0,getAlphaS(),order-1,muR,muF,eScale); + }else{ + active_xfxQ_functions[1]=evolutions[1]->xfxQArray(); + gridVals=grid->vconvolute(xfxWrapper0,xfxWrapper1,getAlphaS(),order-1,muR,muF,eScale); + } + } + else + { + // use reference histogram + for(std::size_t i=0; i<gridVals.size(); i++) + gridVals[i]=data.references[g]->GetBinContent(i + 1); + } + // scale by bin width if requested + if(data.flagNorm) + for (std::size_t i=0; i<gridVals.size(); i++) + gridVals[i] *= grid->deltaobs(i); } - // scale by bin width if requested - if(data.flagNorm) - for (std::size_t i=0; i<gridVals.size(); i++) - gridVals[i] *= grid->deltaobs(i); - // insert values from this grid into output array //val.resize(val.size() + grid->Nobs()); std::copy_n(gridVals.begin(), gridVals.size(), &val[pos]); - pos += grid->Nobs(); - } - if(val.size()!=pos){//TODO: number of data points actually doesn't have to match grid size in some cases, so this check should be replaced by something else - std::ostringstream s; - s<<"F: ReactionAPPLgrid: Number of data points ("<<val.size()<<") in dataset (ID="<<dataSetID<<") does not match total grid size ("<<pos<<")"; - hf_errlog(18072311,s.str().c_str()); + pos += gridVals.size(); } + // SZ 27.03.2019 val.size()!=pos should be allowed for bin manipulations + //if(val.size()!=pos){//TODO: number of data points actually doesn't have to match grid size in some cases, so this check should be replaced by something else + // std::ostringstream s; + // s<<"F: ReactionAPPLgrid: Number of data points ("<<val.size()<<") in dataset (ID="<<dataSetID<<") does not match total grid size ("<<pos<<")"; + // hf_errlog(18072311,s.str().c_str()); + //} if(data.scaleParameter)val*=*data.scaleParameter; return 0; } diff --git a/reactions/BaseHVQMNR/include/MNR.h b/reactions/BaseHVQMNR/include/MNR.h index 462cc804185d7ff177fbff0e3b3ed6b422385f35..69dd1e361eaae1d85aa9c4995d9650b6b8e0f73f 100644 --- a/reactions/BaseHVQMNR/include/MNR.h +++ b/reactions/BaseHVQMNR/include/MNR.h @@ -44,6 +44,9 @@ namespace MNR // Calculate cross sections for provided grid and heavy-quark mass xm void CalcXS(Grid* grid, double xm); + // Get number of light flavours + int GetNl() { return fC_nl; } + // Private members private: @@ -86,6 +89,20 @@ namespace MNR bool bFS_Q; // particle final state bool bFS_A; // antiparticle final state + // PDF range + double fSF_min_x; + double fSF_max_x; + double fSF_min_mf2; + double fSF_max_mf2; + + // Perturbative scale coefficients + double fMf_A; + double fMf_B; + double fMf_C; + double fMr_A; + double fMr_B; + double fMr_C; + // Private fields private: // Constants @@ -99,20 +116,9 @@ namespace MNR double fC_sqrt_sh; // Normalisation factor double fC_xnorm; - // Perturbative scale coefficients - double fMf_A; - double fMf_B; - double fMf_C; - double fMr_A; - double fMr_B; - double fMr_C; // Variables for fast structure functions evaluation const static int fSF_npart; - const static double fSF_min_x; - const static double fSF_max_x; - const static double fSF_min_mf2; - const static double fSF_max_mf2; double fSF_log10_min_x; double fSF_log10_max_x; double fSF_min_adoptx; diff --git a/reactions/BaseHVQMNR/include/MNRFrag.h b/reactions/BaseHVQMNR/include/MNRFrag.h index 2f120ae167b211b58c801e8706bc18f238a664d7..a17d53f6fd1f819e412d3d9484792ad355aed673 100644 --- a/reactions/BaseHVQMNR/include/MNRFrag.h +++ b/reactions/BaseHVQMNR/include/MNRFrag.h @@ -95,6 +95,8 @@ namespace MNR // is released, use ff = 10 and ff = 20 with great caution! static TF1* GetFragFunction(int ff, const char* meson, double par, double* mean = 0); + static double GetHadronMass(const char* meson); + // Private methods private: // Precalculate variables for provided grid and heavy-quark mass xm diff --git a/reactions/BaseHVQMNR/include/MNRGrid.h b/reactions/BaseHVQMNR/include/MNRGrid.h index 2f17859d5f34e1ea79573a7fac3c2dfec43fec05..01415702413586a3ff4eab5b03f30a21f514557e 100644 --- a/reactions/BaseHVQMNR/include/MNRGrid.h +++ b/reactions/BaseHVQMNR/include/MNRGrid.h @@ -70,6 +70,12 @@ namespace MNR // Get cross section (by reference) in specified bin inline double& CS(int contr, int bl, int by, int bw=0) { return fCS[contr][bl][by][bw]; }; + // Get alpha_s (by reference) in specified bin + inline double& AlphaS(int bl) { return fAs[bl]; }; + + // Get mu_r (by reference) in specified bin + inline double& MuR(int bl) { return fMr[bl]; }; + // Get number of pT (L) bins inline int NL() { return fNL; }; @@ -115,6 +121,10 @@ namespace MNR // Transformation from original grid (gridorig) to new one (gridtrg) // (using cubic spline interpolation) static void InterpolateGrid(Grid* gridorig, Grid* gridtrg, double mq); + static void InterpolateGrid(Grid* gridorig, Grid* gridtrg, double mq, Grid* gridorig_LO_massUp, double mq_masUp, Grid* gridorig_LO_massDown, double mq_masDown, int flag = 0, double* R = 0, int* nf = 0); + + // Transformation from pole mass scaheme into MSbar mass scheme + static void TransformGridToMSbarMassScheme(Grid* grid, Grid* gridLOMassUp, Grid* gridLOMassDown, double mq, double mqDiff); // Private fields private: @@ -138,5 +148,9 @@ namespace MNR int fNContr; // Contributions MNRContribution** fContr; + // alpha_s in pT (L) bins + double* fAs; + // mu_r in pT (L) bins + double* fMr; }; } diff --git a/reactions/BaseHVQMNR/include/ReactionBaseHVQMNR.h b/reactions/BaseHVQMNR/include/ReactionBaseHVQMNR.h index b75626275bc1bc229944ce4d063ba860da7d6ad9..c816e2f41ff43fe1a2593a122868f7ce09843567 100644 --- a/reactions/BaseHVQMNR/include/ReactionBaseHVQMNR.h +++ b/reactions/BaseHVQMNR/include/ReactionBaseHVQMNR.h @@ -60,19 +60,38 @@ class ReactionBaseHVQMNR : public ReactionTheory struct Parameters { // heavy-quark masses - double mc, mb; + double mc = 0.0; + double mb = 0.0; + // flavour (used by cbdiff reaction) + char flav; + // flag that mass is taken from global parameters and should be updated at each iteration + bool flagMassIsGlobal = false; // scale parameters - double mf_A_c, mf_B_c, mf_C_c; - double mr_A_c, mr_B_c, mr_C_c; - double mf_A_b, mf_B_b, mf_C_b; - double mr_A_b, mr_B_b, mr_C_b; + double mf_A_c = 0.0; + double mf_B_c = 0.0; + double mf_C_c = 0.0; + double mr_A_c = 0.0; + double mr_B_c = 0.0; + double mr_C_c = 0.0; + double mf_A_b = 0.0; + double mf_B_b = 0.0; + double mf_C_b = 0.0; + double mr_A_b = 0.0; + double mr_B_b = 0.0; + double mr_C_b = 0.0; // fragmentation parameters - double fragpar_c, fragpar_b; + double fragpar_c = 0.0; + double fragpar_b = 0.0; + // divide by bin width + bool flagDivideBinWidth = false; + // verbose output for debugging + bool debug = false; }; // structure to store steering parameters struct Steering { + int nf; double ptmin; double ptmax; int npt; @@ -84,6 +103,13 @@ class ReactionBaseHVQMNR : public ReactionTheory int nx3; int nx4; int nbz; + double xmin; + double xmax; + double mf2min; + double mf2max; + bool q; + bool a; + char flav; // c, b or t }; // all datasets @@ -102,6 +128,8 @@ class ReactionBaseHVQMNR : public ReactionTheory // status flags bool _isInitAtStart; //int _ifcncount_last; + // heavy-quark mass + //std::map // check if appropriate heavy-flavour scheme is used void CheckHFScheme(); @@ -110,15 +138,18 @@ class ReactionBaseHVQMNR : public ReactionTheory void UpdateParameters(); // print theory parameters - void PrintParameters() const; + void PrintParameters(Parameters const* pars = NULL) const; // initialise calculation with default parameters void DefaultInit(const Steering& steer, const double mq, MNR::MNR& mnr, MNR::Frag& frag, MNR::Grid& grid, MNR::Grid& grid_smoothed); + void DefaultInitMNR(const Steering& steer, const double mq, MNR::MNR& mnr); + void DefaultInitGrid(const Steering& steer, const double mq, const int npt, MNR::Grid& grid); + void DefaultInitFrag(const Steering& steer, MNR::Frag& frag); // return cross section in provided pT-y bin double FindXSecPtYBin(const TH2* histXSec, const double ymin, const double ymax, const double ptmin, const double ptmax, const bool diff_pt, const bool diff_y); - private: + //private: // check equality of float numbers with tolerance bool IsEqual(const double val1, const double val2, const double eps = 1e-6); @@ -130,9 +161,45 @@ class ReactionBaseHVQMNR : public ReactionTheory int readFromTermInfo(const std::string& str, const std::string& key, std::string& value);*/ // read parameters for perturbative scales from MINUIT extra parameters - void GetMuPar(const char mu, const char q, double& A, double& B, double& C); + void GetMuPar(const char mu, const char q, double& A, double& B, double& C, const map<string,string> pars = map<string,string>()); // read fragmentation parameter from MINUIT extra parameters - double GetFragPar(const char q); + double GetFragPar(const char q, const map<string,string> pars = map<string,string>()); + + // check parameter respecting priority: (1) supplied map (if supplied), (2) global + bool checkParamInPriority(const string& name, const std::map<string,string> pars = std::map<string,string>()) const + { + if(pars.size() != 0) + return (pars.find(name) != pars.end()); + else + return checkParam(name); + } + + // get parameter respecting priority: (1) supplied map (if supplied), (2) global + double GetParamInPriority(const string& name, const std::map<string,string> pars = std::map<string,string>()) const + { + if(pars.find(name) != pars.end()) + return std::stod(pars.at(name)); + else + return GetParam(name); + } + + // get parameter respecting priority: (1) supplied map (if supplied), (2) global + int GetParamIInPriority(const string& name, const std::map<string,string> pars = std::map<string,string>()) const + { + if(pars.find(name) != pars.end()) + return std::stod(pars.at(name)); + else + return GetParamI(name); + } + + // get parameter respecting priority: (1) supplied map (if supplied), (2) global + std::string GetParamSInPriority(const string& name, const std::map<string,string> pars = std::map<string,string>()) const + { + if(pars.find(name) != pars.end()) + return pars.at(name); + else + return GetParamS(name); + } }; diff --git a/reactions/BaseHVQMNR/src/MNR.cc b/reactions/BaseHVQMNR/src/MNR.cc index c2884d2091dcb905449a60392731667ba70daab8..f3da8279386e5d9255df617e185a39e285f03b77 100644 --- a/reactions/BaseHVQMNR/src/MNR.cc +++ b/reactions/BaseHVQMNR/src/MNR.cc @@ -152,6 +152,7 @@ namespace MNR void MNR::PrecalculatePDF(double mf2) { + //printf("mf2 = %f\n", mf2); if(mf2 < fSF_min_mf2 || mf2 > fSF_max_mf2) { printf("WARNING in MNR::PrecalculatePDF(): mf2 %e out of range %e .. %e\n", mf2, fSF_min_mf2, fSF_max_mf2); @@ -499,6 +500,7 @@ namespace MNR this->PrecalculatePDF(mf2); // Renormalisation scale double mr2 = this->GetMr2(xm2, pt2); + double mr = TMath::Sqrt(mr2); if(mr2 <= 0.0) { grid->NonPhys(c_l); @@ -508,6 +510,10 @@ namespace MNR double as = GetAs(mr2); double as2 = as * as; double as3 = as2 * as; + // store alpha_s in grid + grid->AlphaS(c_l) = as; + // store mu_r in grid + grid->MuR(c_l) = mr; // Ratios of scales double xmf = TMath::Log(mf2 / xm2); double xmr = 4 * fC_pi * fC_b0 * TMath::Log(mr2 / mf2); @@ -657,8 +663,8 @@ namespace MNR const double MNR::fC_vca = 3.0e0; const double MNR::fC_vtf = 0.5e0; const int MNR::fSF_npart = 13; - const double MNR::fSF_min_x = 1e-6; - const double MNR::fSF_max_x = 1e0; - const double MNR::fSF_min_mf2 = 1e0; - const double MNR::fSF_max_mf2 = 8e4; + //const double MNR::fSF_min_x = 1e-6; + //const double MNR::fSF_max_x = 1e0; + //const double MNR::fSF_min_mf2 = 1e0; + //const double MNR::fSF_max_mf2 = 8e4; } diff --git a/reactions/BaseHVQMNR/src/MNRFrag.cc b/reactions/BaseHVQMNR/src/MNRFrag.cc index b4923917cd1bb030b3bca1150080d281fbb2cc39..94093e0a52c3b0c8d1fae638ba26bfdaa5b9d97b 100644 --- a/reactions/BaseHVQMNR/src/MNRFrag.cc +++ b/reactions/BaseHVQMNR/src/MNRFrag.cc @@ -557,6 +557,29 @@ namespace MNR return p[0] * TMath::Power(x[0], -1.) * TMath::Power(1. - 1./x[0] - p[1]/(1.-x[0]), -2.); } + double Frag::GetHadronMass(const char* meson) + { + if(std::string(meson) == "dzero") + return fM_dzero; + else if(std::string(meson) == "dch") + return fM_dch; + else if(std::string(meson) == "dstar") + return fM_dstar; + else if(std::string(meson) == "ds") + return fM_ds; + else if(std::string(meson) == "lambdac") + return fM_lambdac; + else if(std::string(meson) == "bzero") + return fM_bzero; + else if(std::string(meson) == "bch") + return fM_bch; + else if(std::string(meson) == "bs") + return fM_bs; + else + return -1.0; + } + + // Values from PDG const double Frag::fM_dzero = 1.865; const double Frag::fM_dch = 1.867; diff --git a/reactions/BaseHVQMNR/src/MNRGrid.cc b/reactions/BaseHVQMNR/src/MNRGrid.cc index 839e682937eefe96b5f1e50b41777e849cc92dc3..c4d8c8b7d58f41488e36e18733dd937e1f54d2d1 100644 --- a/reactions/BaseHVQMNR/src/MNRGrid.cc +++ b/reactions/BaseHVQMNR/src/MNRGrid.cc @@ -11,6 +11,8 @@ namespace MNR fNY = 0; fNW = 0; fL = NULL; + fAs = NULL; + fMr = NULL; fY = NULL; fW = NULL; fBW = NULL; @@ -28,6 +30,8 @@ namespace MNR fNY = 0; fNW = 0; fL = NULL; + fAs = NULL; + fMr = NULL; fY = NULL; fW = NULL; fBW = NULL; @@ -43,6 +47,8 @@ namespace MNR { //printf("OZ Grid::~Grid()\n"); if(fL) delete fL; + if(fAs) delete fAs; + if(fMr) delete fMr; if(fY) delete fY; if(fW) delete fW; if(fBW) delete fBW; @@ -108,6 +114,12 @@ namespace MNR double pt = TMath::Power(minpower + i * steppower, 1.0 / power); fL[i] = xm2 / (xm2 + pt * pt); } + // array for alpha_s values + if(fAs) delete fAs; + fAs = new double[fNL]; + // array for mu_r values + if(fMr) delete fMr; + fMr = new double[fNL]; } void Grid::FillPt(double* ptall, double xm) @@ -255,5 +267,126 @@ namespace MNR TSpline3 spline("", spline_x, spline_y, nlorig); for(int l = 0; l < nltrg; l++) gridtrg->CS(c,l,y,w) = spline.Eval(ltrg[l]); } + // interpolate as and mu_r + double spline_y_as[nlorig], spline_y_mr[nlorig]; + for(int l = 0; l < nlorig; l++) + { + spline_y_as[nlorig-1-l] = gridorig->AlphaS(l); + spline_y_mr[nlorig-1-l] = gridorig->MuR(l); + } + TSpline3 spline_as("", spline_x, spline_y_as, nlorig); + TSpline3 spline_mr("", spline_x, spline_y_mr, nlorig); + for(int l = 0; l < nltrg; l++) + { + gridtrg->AlphaS(l) = spline_as.Eval(ltrg[l]); + gridtrg->MuR(l) = spline_mr.Eval(ltrg[l]); + } + } + + // Different mass schemes: + // flag = 0: MSbar mass scheme with mu = mu_R + // flag = 1: MSbar mass scheme with mu = mu_R and l != 0 + // flag = 2: MSR mass scheme with R provided (nf should be provided - number of flavours) + void Grid::InterpolateGrid(Grid *gridorig, Grid *gridtrg, double mq, Grid *gridorig_LO_massUp, double mq_masUp, Grid *gridorig_LO_massDown, double mq_masDown, int flag, double* R, int* nf) + { + double mqDiff = mq_masUp - mq_masDown; + // Get pT array of target grid + double pt[gridtrg->fNL]; + gridtrg->FillPt(pt, mq); + // Transform this pT array into array of L = m^2 / (m^2 + pT^2) + double mq2 = mq * mq; + double mq_masUp2 = mq_masUp * mq_masUp; + double mq_masDown2 = mq_masDown * mq_masDown; + int nltrg = gridtrg->NL(); + double* ltrg = gridtrg->LPtr(); + for(int i = 0; i < nltrg; i++) ltrg[i] = mq2 / (mq2 + pt[i] * pt[i]); + // For spline: prepare L array of original grid in reversed order + int nlorig = gridorig->NL(); + double* lorig = gridorig->LPtr(); + double* lorig_LO_massUp = gridorig_LO_massUp->LPtr(); + double* lorig_LO_massDown = gridorig_LO_massDown->LPtr(); + double spline_x[3][nlorig], spline_y[5][nlorig]; + for(int i = 0; i < nlorig; i++) + { + spline_x[0][i] = mq2 / lorig[i] - mq2; + spline_x[1][i] = mq_masUp2 / lorig_LO_massUp[i] - mq_masUp2; + spline_x[2][i] = mq_masDown2 / lorig_LO_massDown[i] - mq_masDown2; + } + // Loop over contributions + for(int c = 0; c < gridorig->GetNContr(); c++) + // Loop over y bins + for(int y = 0; y < gridorig->NY(); y++) + // Loop over W bins + for(int w = 0; w < gridorig->NW(); w++) + { + // For spline: prepare X-section array of original grid in reversed order + for(int l = 0; l < nlorig; l++) + { + spline_y[0][l] = gridorig->CS(c,l,y,w); + spline_y[1][l] = gridorig_LO_massUp->CS(c,l,y,w); + spline_y[2][l] = gridorig_LO_massDown->CS(c,l,y,w); + spline_y[3][l] = gridorig->AlphaS(l); + spline_y[4][l] = gridorig->MuR(l); + } + // Spline interpolation + TSpline3 spline("", spline_x[0], spline_y[0], nlorig); + TSpline3 spline_LO_massUp("", spline_x[1], spline_y[1], nlorig); + TSpline3 spline_LO_massDown("", spline_x[2], spline_y[2], nlorig); + TSpline3 spline_as("", spline_x[0], spline_y[3], nlorig); + TSpline3 spline_mr("", spline_x[0], spline_y[4], nlorig); + for(int l = 0; l < nltrg; l++) + { + double pt2 = pt[l] * pt[l]; + double xsecOld = spline.Eval(pt2); + double xsecOld_LO_massUp = spline_LO_massUp.Eval(pt2); + double xsecOld_LO_massDown = spline_LO_massDown.Eval(pt2); + double delta = 0.0; + if(flag == 0 || flag == 1) + { + double as = spline_as.Eval(pt2); + double mr = spline_mr.Eval(pt2); + double d1 = 4.0 / 3.0; + if(flag == 1) + d1 += 2.0 * TMath::Log(mr / mq); + delta = as / TMath::Pi() * d1 * mq * (xsecOld_LO_massUp - xsecOld_LO_massDown) / mqDiff; + } + else if(flag == 2) + { + double as = spline_as.Eval(*R); + double b0 = 11.0 - 2.0 / 3.0 * (*nf); + double a1 = 2 * b0 * 0.348; + delta = (*R) * a1 * as / (4.0 * TMath::Pi()); + } + double xsecNew = xsecOld + delta; + gridtrg->CS(c,l,y,w) = xsecNew; + } + } + } + + void Grid::TransformGridToMSbarMassScheme(Grid *grid, Grid *gridLOMassUp, Grid *gridLOMassDown, double mq, double mqDiff) + { + // Loop over contributions + for(int c = 0; c < grid->GetNContr(); c++) + { + // Loop over y bins + for(int y = 0; y < grid->NY(); y++) + { + // Loop over W bins + for(int w = 0; w < grid->NW(); w++) + { + // Loop over pT (L) bins + for(int l = 0; l < grid->NL(); l++) + { + double as = grid->AlphaS(l); + //printf("as = %f\n", as); + double d1 = 4.0 / 3.0; + double xsecNew = grid->CS(c,l,y,w) + as / TMath::Pi() * d1 * mq * (gridLOMassUp->CS(c,l,y,w) - gridLOMassDown->CS(c,l,y,w)) / (2 * mqDiff); + if(y == 20) + printf("c,y,w,l,g,u,d: %d %d %d %d %f[%f] %f[%f] %f[%f] --> %f [%.2f]\n", c, y, w, l, grid->CS(c,l,y,w), grid->LPtr()[l], gridLOMassUp->CS(c,l,y,w), gridLOMassUp->LPtr()[l], gridLOMassDown->CS(c,l,y,w), gridLOMassDown->LPtr()[l], xsecNew, xsecNew / grid->CS(c,l,y,w) * 100); + grid->CS(c,l,y,w) = xsecNew; + } + } + } + } } } diff --git a/reactions/BaseHVQMNR/src/Makefile.am b/reactions/BaseHVQMNR/src/Makefile.am index 00b751efcab6831ec1358ff035764bed24848d62..2755e48618f081be390097c401708dc3c4ab989f 100644 --- a/reactions/BaseHVQMNR/src/Makefile.am +++ b/reactions/BaseHVQMNR/src/Makefile.am @@ -3,13 +3,14 @@ if HAVE_ROOT -AM_CXXFLAGS = $(ROOT_CFLAGS) -I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../../interfaces/include -Wall -fPIC -Wno-deprecated +AM_CXXFLAGS = $(ROOT_CFLAGS) -I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../../interfaces/include -Wall -fPIC -Wno-deprecated lib_LTLIBRARIES = libbasehvqmnr_xfitter.la libbasehvqmnr_xfitter_la_SOURCES = ReactionBaseHVQMNR.cc MNR.cc MNRFrag.cc MNRGrid.cc hvqcrsx.f -# libbasehvqmnr_xfitter_la_LDFLAGS = place_if_needed +# libbasehvqmnr_xfitter_la_LDFLAGS = place_if_needed +libbasehvqmnr_xfitter_la_LDFLAGS = $(ROOT_LIBS) -endif +endif dist_noinst_HEADERS = ../include ../yaml diff --git a/reactions/BaseHVQMNR/src/ReactionBaseHVQMNR.cc b/reactions/BaseHVQMNR/src/ReactionBaseHVQMNR.cc index ece984d0ef81918c6ec1b974d3f49a51e8d114b4..3a31a37f63f1ba32b6e54a38eba440f8732cc373 100644 --- a/reactions/BaseHVQMNR/src/ReactionBaseHVQMNR.cc +++ b/reactions/BaseHVQMNR/src/ReactionBaseHVQMNR.cc @@ -87,8 +87,8 @@ void ReactionBaseHVQMNR::setDatasetParameters(int dataSetID, map<string,string> ds.BinsYMax = GetBinValues(dataSetID, "ymax"); ds.BinsPtMin = GetBinValues(dataSetID, "pTmin"); ds.BinsPtMax = GetBinValues(dataSetID, "pTmax"); - if (ds.BinsYMin == NULL || ds.BinsYMax == NULL || ds.BinsPtMin == NULL || ds.BinsPtMax == NULL ) - hf_errlog(16123004, "F: No bins ymin or ymax or ptmin or ptmax"); + //if (ds.BinsYMin == NULL || ds.BinsYMax == NULL || ds.BinsPtMin == NULL || ds.BinsPtMax == NULL ) + // hf_errlog(16123004, "F: No bins ymin or ymax or ptmin or ptmax"); // set reference y bins if needed if(ds.NormY == 1) { @@ -116,23 +116,42 @@ bool ReactionBaseHVQMNR::IsEqual(const double val1, const double val2, const dou // initialise calculation with default parameters void ReactionBaseHVQMNR::DefaultInit(const Steering& steer, const double mq, MNR::MNR& mnr, MNR::Frag& frag, MNR::Grid& grid, MNR::Grid& grid_smoothed) { - // MNR (parton level cross sections) - mnr.bFS_Q = true; - mnr.bFS_A = true; + DefaultInitMNR(steer, mq, mnr); + DefaultInitGrid(steer, mq, steer.npt, grid); + DefaultInitGrid(steer, mq, steer.nptsm, grid_smoothed); + DefaultInitFrag(steer, frag); +} + +void ReactionBaseHVQMNR::DefaultInitMNR(const ReactionBaseHVQMNR::Steering &steer, const double mq, MNR::MNR &mnr) +{ + // MNR parton level cross sections, quark-antiquark contributions + mnr.bFS_Q = steer.q; + mnr.bFS_A = steer.a; + // number of light flavours + mnr.fC_nl = steer.nf; // x3 and x4 binning mnr.fBn_x3 = steer.nx3; mnr.fBn_x4 = steer.nx4; mnr.fSF_nb = steer.nsfnb; + // PDF range + mnr.fSF_min_x = steer.xmin; + mnr.fSF_max_x = steer.xmax; + mnr.fSF_min_mf2 = steer.mf2min; + mnr.fSF_max_mf2 = steer.mf2max; + // precalculation (memory allocation etc.) mnr.CalcBinning(); - // Number of flavours - mnr.fC_nl = 3; +} + +void ReactionBaseHVQMNR::DefaultInitGrid(const ReactionBaseHVQMNR::Steering &steer, const double mq, const int npt, MNR::Grid &grid) +{ // Parton level pT-y grids - grid.SetL(steer.npt, steer.ptmin, steer.ptmax, mq); + grid.SetL(npt, steer.ptmin, steer.ptmax, mq); grid.SetY(steer.ny, steer.ymin, steer.ymax); grid.SetW(1); - grid_smoothed.SetL(steer.nptsm, steer.ptmin, steer.ptmax, mq); - grid_smoothed.SetY(steer.ny, steer.ymin, steer.ymax); - grid_smoothed.SetW(1); +} + +void ReactionBaseHVQMNR::DefaultInitFrag(const ReactionBaseHVQMNR::Steering &steer, MNR::Frag &frag) +{ // Fragmentation frag.SetNz(steer.nbz); } @@ -191,7 +210,7 @@ void ReactionBaseHVQMNR::CheckHFScheme() } // read parameters for perturbative scales from MINUIT extra parameters -void ReactionBaseHVQMNR::GetMuPar(const char mu, const char q, double& A, double& B, double& C) +void ReactionBaseHVQMNR::GetMuPar(const char mu, const char q, double& A, double& B, double& C, const map<std::string, std::string> pars) { // *********************************************************************************************** // Scales for charm and beauty production are parametrised as: @@ -226,39 +245,39 @@ void ReactionBaseHVQMNR::GetMuPar(const char mu, const char q, double& A, double std::string baseParameterName = "MNRm" + std::string(1, mu); // A and B parameters - if(checkParam(baseParameterName + "_AB")) - A = B = GetParam(baseParameterName + "_AB"); + if(GetParamInPriority(baseParameterName + "_AB", pars)) + A = B = GetParamInPriority(baseParameterName + "_AB", pars); else { - if(checkParam(baseParameterName + "_A") && checkParam(baseParameterName + "_B")) + if(checkParamInPriority(baseParameterName + "_A", pars) && checkParamInPriority(baseParameterName + "_B", pars)) { - A = GetParam(baseParameterName + "_A"); - B = GetParam(baseParameterName + "_B"); + A = GetParamInPriority(baseParameterName + "_A", pars); + B = GetParamInPriority(baseParameterName + "_B", pars); } else { - if(checkParam(baseParameterName + "_AB_" + std::string(1, q))) - A = B = GetParam(baseParameterName + "_AB_" + std::string(1, q)); + if(checkParamInPriority(baseParameterName + "_AB_" + std::string(1, q), pars)) + A = B = GetParamInPriority(baseParameterName + "_AB_" + std::string(1, q), pars); else { - if(checkParam(baseParameterName + "_A_" + std::string(1, q))) - A = GetParam(baseParameterName + "_A_" + std::string(1, q)); + if(checkParamInPriority(baseParameterName + "_A_" + std::string(1, q), pars)) + A = GetParamInPriority(baseParameterName + "_A_" + std::string(1, q), pars); else A = defA; - if(checkParam(baseParameterName + "_B_" + std::string(1, q))) - B = GetParam(baseParameterName + "_B_" + std::string(1, q)); + if(checkParamInPriority(baseParameterName + "_B_" + std::string(1, q), pars)) + B = GetParamInPriority(baseParameterName + "_B_" + std::string(1, q), pars); else B = defB; } } } // C parameter - if(checkParam(baseParameterName + "_C")) - C = GetParam(baseParameterName + "_C"); + if(checkParamInPriority(baseParameterName + "_C", pars)) + C = GetParamInPriority(baseParameterName + "_C", pars); else { - if(checkParam(baseParameterName + "_C_" + std::string(1, q))) - C = GetParam(baseParameterName + "_C_" + std::string(1, q)); + if(checkParamInPriority(baseParameterName + "_C_" + std::string(1, q), pars)) + C = GetParamInPriority(baseParameterName + "_C_" + std::string(1, q), pars); else C = defC; } @@ -266,7 +285,7 @@ void ReactionBaseHVQMNR::GetMuPar(const char mu, const char q, double& A, double // read fragmentation parameter from MINUIT extra parameters -double ReactionBaseHVQMNR::GetFragPar(const char q) +double ReactionBaseHVQMNR::GetFragPar(const char q, const map<string,string> pars) { // ********************************************************************* // Parameters for non-perturbative fragmentation can be provided @@ -282,7 +301,7 @@ double ReactionBaseHVQMNR::GetFragPar(const char q) double parvalue = NAN; char parname[16]; sprintf(parname, "MNRfrag_%c", q); - if(!checkParam(parname)) + if(!checkParamInPriority(parname, pars)) { // parameter not in ExtraParamMinuit -> using default value if(q == 'c') @@ -293,7 +312,7 @@ double ReactionBaseHVQMNR::GetFragPar(const char q) hf_errlog(17102103, "F: no default value for q = " + std::string(1, q) + " in ReactionBaseHVQMNR::GetFragPar()"); } else - parvalue = GetParam(parname); + parvalue = GetParamInPriority(parname, pars); // TODO check below whether it is still relevant /* ! parameter in ExtraParamMinuit, but not in MINUIT: this happens, if we are not in 'Fit' mode -> using default value @@ -326,18 +345,26 @@ void ReactionBaseHVQMNR::UpdateParameters() // fragmentation parameters _pars.fragpar_c = GetFragPar('c'); _pars.fragpar_b = GetFragPar('b'); + + // protection against not positive or nan masses + if(_pars.mc <= 0.0 || _pars.mc != _pars.mc) + _pars.mc = 1000.0; + if(_pars.mb <= 0.0 || _pars.mb != _pars.mb) + _pars.mb = 1000.0; } // print theory parameters -void ReactionBaseHVQMNR::PrintParameters() const +void ReactionBaseHVQMNR::PrintParameters(Parameters const* pars) const { + if(pars == NULL) + pars = &(this->_pars); printf("MNR scale parameters:\n"); - printf("%f %f %f\n", _pars.mf_A_c, _pars.mf_B_c, _pars.mf_C_c); - printf("%f %f %f\n", _pars.mr_A_c, _pars.mr_B_c, _pars.mr_C_c); - printf("%f %f %f\n", _pars.mf_A_b, _pars.mf_B_b, _pars.mf_C_b); - printf("%f %f %f\n", _pars.mr_A_b, _pars.mr_B_b, _pars.mr_C_b); + printf("%f %f %f\n", pars->mf_A_c, pars->mf_B_c, pars->mf_C_c); + printf("%f %f %f\n", pars->mr_A_c, pars->mr_B_c, pars->mr_C_c); + printf("%f %f %f\n", pars->mf_A_b, pars->mf_B_b, pars->mf_C_b); + printf("%f %f %f\n", pars->mr_A_b, pars->mr_B_b, pars->mr_C_b); printf("MNR masses:\n"); - printf("mc = %f mb = %f\n", _pars.mc, _pars.mb); + printf("mc = %f mb = %f\n", pars->mc, pars->mb); printf("MNR fragmentation parameters:\n"); - printf("fragpar_c = %f fragpar_b = %f\n", _pars.fragpar_c, _pars.fragpar_b); + printf("fragpar_c = %f fragpar_b = %f\n", pars->fragpar_c, pars->fragpar_b); } diff --git a/reactions/FFABM_DISCC/src/Makefile.am b/reactions/FFABM_DISCC/src/Makefile.am index 59a2e9cadbd79d1795756aef06dda31d6b21a4bf..558892749eceacd7f6cfdcd0f4ad6b7fef36a74e 100644 --- a/reactions/FFABM_DISCC/src/Makefile.am +++ b/reactions/FFABM_DISCC/src/Makefile.am @@ -1,12 +1,12 @@ # Created by AddReaction.py on 2017-10-09 -AM_CXXFLAGS = -I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../../interfaces/include -I$(srcdir)/../../../reactions/BaseDISCC/include -Wall -fPIC -Wno-deprecated +AM_CXXFLAGS = -I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../../interfaces/include -I$(srcdir)/../../../reactions/BaseDISCC/include -Wall -fPIC -Wno-deprecated lib_LTLIBRARIES = libffabm_discc_xfitter.la libffabm_discc_xfitter_la_SOURCES = ReactionFFABM_DISCC.cc -libffabm_discc_xfitter_la_LDFLAGS = -lbasediscc_xfitter -L$(top_srcdir)/reactions/BaseDISCC/src/.libs +libffabm_discc_xfitter_la_LDFLAGS = -lmyabm -lbasediscc_xfitter -L$(top_srcdir)/reactions/BaseDISCC/src/.libs -L$(top_srcdir)/ABM/src/.libs datadir = ${prefix}/yaml/reactions/FFABM_DISCC data_DATA = ../yaml/parameters.yaml diff --git a/reactions/FFABM_DISCC/src/ReactionFFABM_DISCC.cc b/reactions/FFABM_DISCC/src/ReactionFFABM_DISCC.cc index 7fbfc7055183de0aaff1ed556a34ad4f188866a0..39615448c62b294946b1017ba77a1ff2af754273 100644 --- a/reactions/FFABM_DISCC/src/ReactionFFABM_DISCC.cc +++ b/reactions/FFABM_DISCC/src/ReactionFFABM_DISCC.cc @@ -1,4 +1,4 @@ - + /* @file ReactionFFABM_DISCC.cc @date 2017-10-09 diff --git a/reactions/FFABM_DISNC/src/Makefile.am b/reactions/FFABM_DISNC/src/Makefile.am index f38b30a026bd3ce0a6dc6da96db732f499d2f01d..6fe39d7cfda01cc968069d599c0f3e08e5eea02b 100644 --- a/reactions/FFABM_DISNC/src/Makefile.am +++ b/reactions/FFABM_DISNC/src/Makefile.am @@ -1,12 +1,12 @@ # Created by AddReaction.py on 2017-09-29 -AM_CXXFLAGS = -I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../../interfaces/include -I$(srcdir)/../../../reactions/BaseDISNC/include -Wall -fPIC -Wno-deprecated +AM_CXXFLAGS = -I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../../interfaces/include -I$(srcdir)/../../../reactions/BaseDISNC/include -Wall -fPIC -Wno-deprecated lib_LTLIBRARIES = libffabm_disnc_xfitter.la libffabm_disnc_xfitter_la_SOURCES = ReactionFFABM_DISNC.cc -libffabm_disnc_xfitter_la_LDFLAGS = -lbasedisnc_xfitter -L$(top_srcdir)/reactions/BaseDISNC/src/.libs +libffabm_disnc_xfitter_la_LDFLAGS = -lbasedisnc_xfitter -L$(top_srcdir)/reactions/BaseDISNC/src/.libs -l:libmyabm.a -L$(top_srcdir)/ABM/src/.libs datadir = ${prefix}/yaml/reactions/FFABM_DISNC data_DATA = ../yaml/parameters.yaml diff --git a/reactions/FFABM_DISNC/src/ReactionFFABM_DISNC.cc b/reactions/FFABM_DISNC/src/ReactionFFABM_DISNC.cc index f4c6f722bc45d3c693f2e49798990326b292a39b..46202eb42a93c4f363f237a587485bbca71062ea 100644 --- a/reactions/FFABM_DISNC/src/ReactionFFABM_DISNC.cc +++ b/reactions/FFABM_DISNC/src/ReactionFFABM_DISNC.cc @@ -35,7 +35,7 @@ extern "C" { struct COMMON_masses { double rmass[150]; - double rmassp[150]; + double rmassp[50]; double rcharge[150]; }; extern COMMON_masses masses_; @@ -74,9 +74,11 @@ int ReactionFFABM_DISNC::atStart(const string &s) // heavy quark masses double rmass8in = GetParam("mch"); masses_.rmass[7] = rmass8in; + masses_.rcharge[7] = 0.6666666; _mc = rmass8in; double rmass10in = GetParam("mbt"); masses_.rmass[9] = rmass10in; + masses_.rcharge[9] = 0.3333333; _mb = rmass10in; printf("---------------------------------------------\n"); diff --git a/reactions/HVQMNR_LHCb_7TeV_beauty/src/ReactionHVQMNR_LHCb_7TeV_beauty.cc b/reactions/HVQMNR_LHCb_7TeV_beauty/src/ReactionHVQMNR_LHCb_7TeV_beauty.cc index ea10469304661e03bdead956b8d4bc151bb515bb..604a324576cf83561e01d3cf7eda6ef1a0ebe431 100644 --- a/reactions/HVQMNR_LHCb_7TeV_beauty/src/ReactionHVQMNR_LHCb_7TeV_beauty.cc +++ b/reactions/HVQMNR_LHCb_7TeV_beauty/src/ReactionHVQMNR_LHCb_7TeV_beauty.cc @@ -46,6 +46,9 @@ int ReactionHVQMNR_LHCb_7TeV_beauty::atStart(const string &s) // stereing parameters for this calculation (modify only if you understand what you are doing) Steering steer; + steer.q = true; + steer.a = true; + steer.nf = 3; steer.ptmin = 0.001; steer.ptmax = 70.0; steer.npt = 35; @@ -57,6 +60,10 @@ int ReactionHVQMNR_LHCb_7TeV_beauty::atStart(const string &s) steer.nx3 = 125; steer.nx4 = 125; steer.nbz = 100; + steer.xmin = 1e-6; + steer.xmax = 1e0; + steer.mf2min = 1e0; + steer.mf2max = 8e4; DefaultInit(steer, _pars.mb, _mnr, _frag, _grid, _gridSmoothed); //if(_debug) diff --git a/reactions/HVQMNR_LHCb_7TeV_charm/src/ReactionHVQMNR_LHCb_7TeV_charm.cc b/reactions/HVQMNR_LHCb_7TeV_charm/src/ReactionHVQMNR_LHCb_7TeV_charm.cc index f2051a3b261627ed0f67152d6ad9cf7b16b49444..cd94eeb0716946ed61435a5921022636e01bcdcb 100644 --- a/reactions/HVQMNR_LHCb_7TeV_charm/src/ReactionHVQMNR_LHCb_7TeV_charm.cc +++ b/reactions/HVQMNR_LHCb_7TeV_charm/src/ReactionHVQMNR_LHCb_7TeV_charm.cc @@ -46,6 +46,9 @@ int ReactionHVQMNR_LHCb_7TeV_charm::atStart(const string &s) // stereing parameters for this calculation (modify only if you understand what you are doing) Steering steer; + steer.q = true; + steer.a = true; + steer.nf = 3; steer.ptmin = 0.001; steer.ptmax = 20.0; steer.npt = 25; @@ -57,6 +60,10 @@ int ReactionHVQMNR_LHCb_7TeV_charm::atStart(const string &s) steer.nx3 = 25; steer.nx4 = 125; steer.nbz = 50; + steer.xmin = 1e-6; + steer.xmax = 1e0; + steer.mf2min = 1e0; + steer.mf2max = 8e4; DefaultInit(steer, _pars.mc, _mnr, _frag, _grid, _gridSmoothed); //if(_debug) diff --git a/reactions/Hathor/include/ReactionHathor.h b/reactions/Hathor/include/ReactionHathor.h index 80c03208466c60e3d60475350eb202ab79a24313..bf50bb79c866e683ffa5617907e5af541d26600e 100644 --- a/reactions/Hathor/include/ReactionHathor.h +++ b/reactions/Hathor/include/ReactionHathor.h @@ -42,9 +42,9 @@ class ReactionHathor : public ReactionTheory HathorPdfxFitter* _pdf; int* _rndStore; - int _scheme; - double _mtop; - double _mr; - double _mf; + //double _mtop; + std::map<int, std::shared_ptr<double> > _mtopPerInstance; + std::map<int, std::shared_ptr<double> > _mrPerInstance; + std::map<int, std::shared_ptr<double> > _mfPerInstance; }; diff --git a/reactions/Hathor/src/HathorPdfxFitter.cc b/reactions/Hathor/src/HathorPdfxFitter.cc index 58b83c252119978896ff7d6dc99ad2e2e02f82eb..13ff51fb57f89fb838e220ea06ea87f8cedb9a34 100644 --- a/reactions/Hathor/src/HathorPdfxFitter.cc +++ b/reactions/Hathor/src/HathorPdfxFitter.cc @@ -15,6 +15,14 @@ HathorPdfxFitter::HathorPdfxFitter(ReactionHathor *ptrReactionTheory) void HathorPdfxFitter::GetPdf(double x, double muf, double h[13]) { + /*static double xmin = 1.0; + static double xmax = 0.0; + if(x < xmin) + xmin = x; + if(x > xmax) + xmax = x; + printf("HATHORGetPdf x = %f [%12.8f%12.8f]\n", x, xmin, xmax);*/ + if(!IsValid) return; diff --git a/reactions/Hathor/src/ReactionHathor.cc b/reactions/Hathor/src/ReactionHathor.cc index 55146a3a83888e3bb9c8f0332e6310293bc30976..e51d83d142a8d2fd125d79bbb5feab41d679395e 100644 --- a/reactions/Hathor/src/ReactionHathor.cc +++ b/reactions/Hathor/src/ReactionHathor.cc @@ -29,10 +29,9 @@ ReactionHathor::ReactionHathor() { _pdf = NULL; _rndStore = NULL; - _scheme = -1; - _mtop = -1.0; - _mr = -1.0; - _mf = -1.0; + //_mtop = -1.0; + //_mr = -1.0; + //_mf = -1.0; } ReactionHathor::~ReactionHathor() @@ -63,44 +62,20 @@ int ReactionHathor::atStart(const string &s) _rndStore = new int[nRnd]; rlxd_get(_rndStore); - // scheme (perturbative order and pole/MSbar mass treatment) - const string order = GetParamS("Order"); - const int pertubOrder = OrderMap(order); - _scheme = Hathor::LO; - if(pertubOrder > 1) - _scheme = _scheme | Hathor::NLO; - if(pertubOrder > 2) - _scheme = _scheme | Hathor::NNLO; - int msMass = 0; // pole mass by default - if(checkParam("MS_MASS")) - msMass = GetParamI("MS_MASS"); - if(msMass) - _scheme = _scheme | Hathor::MS_MASS; - // top quark mass - std::string mtopName = "mtp";// shouldn't we distinguish somehow between pole and running masses? - if(!checkParam(mtopName)) - { - std::string str = "F: no top quark mass (\"" + mtopName + "\" parameter) for Hathor"; - hf_errlog_(17081101, str.c_str(), strlen(str.c_str())); - } - _mtop = GetParam("mtp"); - - // renorm. scale - _mr = _mtop; - if(checkParam("muR")) - _mr *= GetParam("muR"); - - // fact. scale - _mf = _mtop; - if(checkParam("muF")) - _mf *= GetParam("muF"); - - std::cout << " Hathor will use:"; - std::cout << " mtop = " << _mtop << "[GeV] "; - std::cout << " renorm. scale = " << _mr << "[GeV] "; - std::cout << " fact. scale = " << _mf << "[GeV]"; - std::cout << std::endl; + //std::string mtopName = "mtp";// shouldn't we distinguish somehow between pole and running masses? + //if(!checkParam(mtopName)) + //{ + // std::string str = "F: no top quark mass (\"" + mtopName + "\" parameter) for Hathor"; + // hf_errlog_(17081101, str.c_str(), strlen(str.c_str())); + //} + //_mtop = GetParam("mtp"); + + // !!!! + //for(map<string, double* >::iterator it = _xfitter_pars.begin(); it != _xfitter_pars.end(); it++) + //{ + // printf("_xfitter_pars[%s] = %f\n", it->first.c_str(), *it->second); + //} return 0; } @@ -115,73 +90,104 @@ void ReactionHathor::setDatasetParameters(int dataSetID, map<std::string, std::s hf_errlog_(17080701, str, strlen(str)); } - // read centre-of-mass energy from provided dataset parameters - // (must be provided) - auto it = pars.find("SqrtS"); - if(it == pars.end()) - { - char str[256]; - sprintf(str, "F: no SqrtS for dataset with id = %d", dataSetID); - hf_errlog_(17080702, str, strlen(str)); - } - double sqrtS = atof(it->second.c_str()); - - // read precision level from provided dataset parameters - // if not specified set to default 2 -> Hathor::MEDIUM - int precisionLevel = Hathor::MEDIUM; - it = pars.find("precisionLevel"); - if(it != pars.end()) - { - precisionLevel = std::pow(10, 2 + atoi(it->second.c_str())); - // check that this setting is allowed - // see in AbstractHathor.h: - // enum ACCURACY { LOW=1000, MEDIUM=10000, HIGH=100000 }; - // and - // precisionLevel = 1 -> Hathor::LOW - // precisionLevel = 2 -> Hathor::MEDIUM - // precisionLevel = 3 -> Hathor::HIGH - if(precisionLevel != Hathor::LOW && precisionLevel != Hathor::MEDIUM && precisionLevel != Hathor::HIGH) - { - char str[256]; - sprintf(str, "F: provided precision level = %d not supported by Hathor", precisionLevel); - hf_errlog_(17081102, str, strlen(str)); - } - } - - // read ppbar from provided dataset parameters - // if not specified assume it is false (pp collisions) - int ppbar = false; - it = pars.find("ppbar"); - if(it != pars.end()) - { - ppbar = atoi(it->second.c_str()); - if(ppbar != 0 && ppbar != 1) - { - char str[256]; - sprintf(str, "F: provided ppbar = %d not recognised (must be 0 or 1)", ppbar); - hf_errlog_(17081103, str, strlen(str)); - } - } - // instantiate Hathor Hathor* hathor = new Hathor(*_pdf); //Hathor* hathor = new Hathor(); // set collision type + // read ppbar (0 for pp, 1 for ppbar) from provided dataset parameters + // if not specified assume it is 0 (pp collisions) + // here local value is preferred over global one (to allow different data sets for different collision types) + int ppbar = false; + if(pars.find("ppbar") != pars.end()) + ppbar = atoi(pars.find("ppbar")->second.c_str()); + else if(checkParam("ppbar")) + ppbar = GetParamI("ppbar"); if(ppbar) hathor->setColliderType(Hathor::PPBAR); else hathor->setColliderType(Hathor::PP); - // set centre-of-mass energy + // read centre-of-mass energy from provided dataset parameters (must be provided) + // here local value is preferred over global one (to allow different data sets with difference centre-of-mass energies) + double sqrtS = (pars.find("SqrtS") != pars.end()) ? atof(pars.find("SqrtS")->second.c_str()) : GetParam("SqrtS"); + if(sqrtS == 0.0) + hf_errlog(17080702, "F: no SqrtS for dataset with id = " + std::to_string(dataSetID)); hathor->setSqrtShad(sqrtS); - // set scheme - hathor->setScheme(_scheme); + // set mass + // here local value is preferred over global one (to allow calculations with several mass values, e.g. for translation into MSbar mass scheme) + _mtopPerInstance[dataSetID] = std::shared_ptr<double>(new double(pars.find("mtp") == pars.end() ? GetParam("mtp") : atof(pars.find("mtp")->second.c_str()))); + + // set renorm. scale + _mrPerInstance[dataSetID] = std::shared_ptr<double>(new double(*_mtopPerInstance[dataSetID])); + if(checkParam("muR") || pars.find("muR") != pars.end()) + { + if(pars.find("muR") != pars.end()) + *_mrPerInstance[dataSetID] *= stod(pars["muR"]); + else + *_mrPerInstance[dataSetID] *= GetParam("muR"); + } + + // set fact. scale + _mfPerInstance[dataSetID] = std::shared_ptr<double>(new double(*_mtopPerInstance[dataSetID])); + if(checkParam("muF") || pars.find("muF") != pars.end()) + { + if(pars.find("muF") != pars.end()) + *_mfPerInstance[dataSetID] *= stod(pars["muF"]); + else + *_mfPerInstance[dataSetID] *= GetParam("muF"); + } + + // set perturbative order + // here local value is preferred over global one (to allow LO and NLO calculations in one run, e.g. for translation into MSbar mass scheme) + std::string schemeName = (pars.find("Order") != pars.end()) ? pars.find("Order")->second : GetParamS("Order"); + int scheme = Hathor::LO; + if(schemeName == "NLO") + scheme = scheme | Hathor::NLO; + else if(schemeName == "NNLO") + scheme = scheme | Hathor::NNLO; + // set mass scheme (default is pole mass scheme) + // here local value is preferred over global one + int msMass = 0; + if(pars.find("MS_MASS") != pars.end()) + msMass = atoi(pars.find("MS_MASS")->second.c_str()); + else if(checkParam("MS_MASS")) + msMass = GetParamI("MS_MASS"); + if(msMass) + scheme = scheme | Hathor::MS_MASS; + hathor->setScheme(scheme); // set precision level + // read precision level from provided dataset parameters + // if not specified set to default 2 -> Hathor::MEDIUM + int precisionLevel = 2; + if(checkParam("precisionLevel")) + precisionLevel = GetParamI("precisionLevel"); + else if(pars.find("precisionLevel") != pars.end()) + precisionLevel = atoi(pars.find("precisionLevel")->second.c_str()); + precisionLevel = std::pow(10, 2 + precisionLevel); + // check that this setting is allowed + // see in AbstractHathor.h: + // enum ACCURACY { LOW=1000, MEDIUM=10000, HIGH=100000 }; + // and + // precisionLevel = 1 -> Hathor::LOW + // precisionLevel = 2 -> Hathor::MEDIUM + // precisionLevel = 3 -> Hathor::HIGH + if(precisionLevel != Hathor::LOW && precisionLevel != Hathor::MEDIUM && precisionLevel != Hathor::HIGH) + hf_errlog(17081102, "F: provided precision level = " + std::to_string(precisionLevel) + " not supported by Hathor"); hathor->setPrecision(precisionLevel); + std::cout << " Hathor will use for this instance (" + std::to_string(dataSetID) + "):" << std::endl; + double mt = *_mtopPerInstance[dataSetID]; + std::cout << " mtop = " << mt << "[GeV] " << std::endl; + std::cout << " renorm. scale = " << *_mrPerInstance[dataSetID] << "[GeV] " << std::endl; + std::cout << " factor. scale = " << *_mfPerInstance[dataSetID] << "[GeV] " << std::endl; + std::cout << " SqrtS = " << sqrtS << std::endl; + std::cout << " scheme: " << scheme << std::endl; + std::cout << " precisionLevel: " << precisionLevel << std::endl; + std::cout << std::endl; + // done hathor->PrintOptions(); _hathorArray[dataSetID] = hathor; @@ -194,12 +200,17 @@ int ReactionHathor::compute(int dataSetID, valarray<double> &val, map<string, va rlxd_reset(_rndStore); Hathor* hathor = _hathorArray.at(dataSetID); - hathor->getXsection(_mtop, _mr, _mf); + //hathor->getXsection(_mtop, _mr, _mf); + double mt = _mtopPerInstance[dataSetID] ? (*_mtopPerInstance[dataSetID]) : GetParam("mtp"); + double mr = *_mrPerInstance[dataSetID]; + double mf = *_mfPerInstance[dataSetID]; + hathor->getXsection(mt, mr, mf); double dum = 0.0; - val[0] = 0.0; - hathor->getResult(0, val[0], dum); + double xsec = 0.0; + hathor->getResult(0, xsec, dum); + //printf("mt,mr,mf,xsec: %f %f %f %f\n", mt, mr, mf, xsec); + val = xsec; //printf("VAL ************ %f\n", val[0]); return 0; } - diff --git a/reactions/KFactor/include/ReactionKFactor.h b/reactions/KFactor/include/ReactionKFactor.h index ae861ae2c91b1caea0b3b2b4c8c165fdf5c7eb1b..c226129ed57562b4c8f81f84869ca83af6aea7dd 100644 --- a/reactions/KFactor/include/ReactionKFactor.h +++ b/reactions/KFactor/include/ReactionKFactor.h @@ -33,6 +33,13 @@ class ReactionKFactor : public ReactionTheory virtual int parseOptions(){ return 0;}; private: map<int, std::vector<double> > _values; - map<int, std::pair<std::string, double> > _parameterNames; + + struct Parameter + { + std::string Name; + double Value; + int NPoints; + }; + map<int, Parameter> _parameters; }; diff --git a/reactions/KFactor/src/ReactionKFactor.cc b/reactions/KFactor/src/ReactionKFactor.cc index 76c57db5a7c4ba01c20be7a9c16cbd3180749f23..78d77429235796f9eed894181c17c7c1c0859e0d 100644 --- a/reactions/KFactor/src/ReactionKFactor.cc +++ b/reactions/KFactor/src/ReactionKFactor.cc @@ -104,7 +104,7 @@ void ReactionKFactor::setDatasetParameters(int dataSetID, map<string,string> par } file.close(); } - // check if kfactors should be read from data file (soecifying column is mandatory) + // check if kfactors should be read from data file (specifying column is mandatory) else if (pars.find("DataColumn") != pars.end()) { std::string columnName = pars["DataColumn"]; @@ -118,26 +118,31 @@ void ReactionKFactor::setDatasetParameters(int dataSetID, map<string,string> par // check if kfactor is a parameter (possibly free); the value of this parameter will be used for all bins in data set else if (pars.find("Parameter") != pars.end()) { - _parameterNames[dataSetID] = std::make_pair(pars["Parameter"], 0.0); + _parameters[dataSetID].Name = pars["Parameter"]; + _parameters[dataSetID].Value = 0.0; + if (pars.find("N") != pars.end()) + _parameters[dataSetID].NPoints = atoi(pars["N"].c_str()); + else + _parameters[dataSetID].NPoints = _dsBins[dataSetID]->begin()->second.size(); } else hf_errlog(17102804, "F: FileName or DataColumn or Parameter must be provided for KFactor"); } void ReactionKFactor::initAtIteration() { - for(auto& it : _parameterNames) - it.second.second = GetParam(it.second.first); + for(auto& it : _parameters) + it.second.Value = GetParam(it.second.Name); } // Main function to compute results at an iteration int ReactionKFactor::compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err) { - const auto& it = _parameterNames.find(dataSetID); - if(it != _parameterNames.end()) + const auto& it = _parameters.find(dataSetID); + if(it != _parameters.end()) { // kfactor given as a fit parameter read in initAtIteration() - int np = _dsBins[dataSetID]->begin()->second.size(); // number of data points - val = std::valarray<double>(it->second.second, np); + //int np = _dsBins[dataSetID]->begin()->second.size(); // number of data points + val = std::valarray<double>(it->second.Value, it->second.NPoints); } else // kfactor is constant value read in setDatasetParameters() diff --git a/reactions/KMatrix/src/ReactionKMatrix.cc b/reactions/KMatrix/src/ReactionKMatrix.cc index ad59e37d793b436f5de78de367719bfd7f302682..74faf47419fda7ef0f7d4213ebf32a5cbb33d4ea 100644 --- a/reactions/KMatrix/src/ReactionKMatrix.cc +++ b/reactions/KMatrix/src/ReactionKMatrix.cc @@ -26,97 +26,104 @@ int ReactionKMatrix::atStart(const string &s) // Initialisze for a given dataset: void ReactionKMatrix::setDatasetParameters(int dataSetID, map<string,string> pars, map<string, double> parsDataset){ - std::vector<double> temp; - double var; - // check if KMatrixs should be read from separate file - if (pars.find("FileName") != pars.end()) + std::vector<double> temp; + double var; + // check if KMatrixs should be read from separate file + if (pars.find("FileName") != pars.end()) + { + // file name to read KMatrixs + std::string fileName = pars["FileName"]; + + // TODO: how to get number of data points? + // not very elegant way below + int np = _dsBins[dataSetID]->begin()->second.size(); + + // requested starting column from file (by default 1st) + int column_start = 1; + if(pars.find("FileColumnStart") != pars.end()){ + column_start = atoi(pars["FileColumnStart"].c_str()); + } + // requested finishing column from file (by default 1st) + int column_finish = np; + if(pars.find("FileColumnFinish") != pars.end()){ + column_finish = atoi(pars["FileColumnFinish"].c_str()); + } + + // check that the column is reasonable + if(column_start < 1){ + hf_errlog(18080700, "F: wrong starting column = " + std::to_string(column_start)); + } + if(column_start > column_finish){ + hf_errlog(18080701, "F: starting column greater than finishing column "); + } + + // requested starting line from file (by default 1st) and last line (by default last line) + int lineStart = 1; + if(pars.find("FileLineStart") != pars.end()) + lineStart = atoi(pars["FileLineStart"].c_str()); + int lineFinish = -1; + if(pars.find("FileLineFinish") != pars.end()) + lineFinish = atoi(pars["FileLineFinish"].c_str()); + + // open file + std::ifstream file(fileName.c_str()); + string line; + if (!file.is_open()){ + hf_errlog(18080702, "F: error opening KMatrix file = " + fileName); + } + + // skip lineStart lines + int readLines = 0; + for(int l = 1; l < lineStart; l++){ + readLines++; + getline(file, line); + } + + while(getline(file,line)) { - // file name to read KMatrixs - std::string fileName = pars["FileName"]; - - // TODO: how to get number of data points? - // not very elegant way below - int np = _dsBins[dataSetID]->begin()->second.size(); - - // requested starting column from file (by default 1st) - int column_start = 1; - if(pars.find("FileColumnStart") != pars.end()){ - column_start = atoi(pars["FileColumnStart"].c_str()); - } - // requested finishing column from file (by default 1st) - int column_finish = np; - if(pars.find("FileColumnFinish") != pars.end()){ - column_finish = atoi(pars["FileColumnFinish"].c_str()); - } - - // check that the column is reasonable - if(column_start < 1){ - hf_errlog(18080700, "F: wrong starting column = " + std::to_string(column_start)); - } - if(column_start > column_finish){ - hf_errlog(18080701, "F: starting column greater than finishing column "); - } - - // requested starting line from file (by default 1st) - int lineStart = 1; - if(pars.find("FileLine") != pars.end()){ - lineStart = atoi(pars["FileLine"].c_str()); - } - - // open file - std::ifstream file(fileName.c_str()); - string line; - if (!file.is_open()){ - hf_errlog(18080702, "F: error opening KMatrix file = " + fileName); + readLines++; + if(lineFinish != -1 && readLines > lineFinish) + break; + if(line.at(0) == '#') continue; //ignore comments + line.erase(line.find_last_not_of(" \n\r\t")+1); // trim trailing whitespaces + + std::stringstream sline(line); + + int current_col = 1; + while(sline.good()){ + sline >> var; + if(current_col >= column_start && current_col <= column_finish){ //Only using range between spezified columns + temp.push_back(var); + } + current_col++; } - - // skip lineStart lines - for(int l = 1; l < lineStart; l++){ - getline(file, line); + _values2D[dataSetID].push_back(temp); + temp.clear(); + sline.clear(); + + } + file.close(); + + //mapping 2d matrix (m x n) to 1d vector (m*n): list of column vectors, mapping with vec(i*n + j) = mat(j,i) + int m = _values2D[dataSetID].size(); + int n = _values2D[dataSetID].at(0).size(); + _values[dataSetID].resize(n*m); + for(int i = 0; i < m; i++){ + for(int j = 0; j < n; j++){ + _values[dataSetID].at(i*n+j)=_values2D[dataSetID].at(i).at(j); } - - while(getline(file,line)) - { - if(line.at(0) == '#') continue; //ignore comments - line.erase(line.find_last_not_of(" \n\r\t")+1); // trim trailing whitespaces - - std::stringstream sline(line); - - int current_col = 1; - while(sline.good()){ - sline >> var; - if(current_col >= column_start && current_col <= column_finish){ //Only using range between spezified columns - temp.push_back(var); - } - current_col++; - } - _values2D[dataSetID].push_back(temp); - temp.clear(); - sline.clear(); - - } - file.close(); - - //mapping 2d matrix (m x n) to 1d vector (m*n): list of column vectors, mapping with vec(i*n + j) = mat(j,i) - int m = _values2D[dataSetID].size(); - int n = _values2D[dataSetID].at(0).size(); - _values[dataSetID].resize(n*m); - for(int i = 0; i < m; i++){ - for(int j = 0; j < n; j++){ - _values[dataSetID].at(i*n+j)=_values2D[dataSetID].at(i).at(j); - } - } - }else{ - hf_errlog(18080703, "F: FileName must be provided for KMatrix"); + } + }else{ + hf_errlog(18080703, "F: FileName must be provided for KMatrix"); } } // Main function to compute results at an iteration int ReactionKMatrix::compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err) { - // kmatrix is constant value read in setDatasetParameters() - val = std::valarray<double>(_values[dataSetID].data(), _values[dataSetID].size()); + // kmatrix is constant value read in setDatasetParameters() + val = std::valarray<double>(_values[dataSetID].data(), _values[dataSetID].size()); - return 0; + return 0; } diff --git a/reactions/KRunning/include/ReactionKRunning.h b/reactions/KRunning/include/ReactionKRunning.h new file mode 100644 index 0000000000000000000000000000000000000000..ce5b625f903558dde30febbc848418932a11c247 --- /dev/null +++ b/reactions/KRunning/include/ReactionKRunning.h @@ -0,0 +1,49 @@ + +#pragma once + +#include "ReactionTheory.h" + +/** + @class' ReactionKRunning + + @brief A wrapper class for KRunning reaction + + Based on the ReactionTheory class. Reads options produces 3d cross section. + + @version 0.1 + @date 2019-01-16 + */ + +class ReactionKRunning : public ReactionTheory +{ + public: + ReactionKRunning(){}; + +// ~ReactionKRunning(){}; +// ~ReactionKRunning(const ReactionKRunning &){}; +// ReactionKRunning & operator =(const ReactionKRunning &r){return *(new ReactionKRunning(r));}; + + public: + virtual string getReactionName() const { return "KRunning" ;}; + int atStart(const string &); + virtual void setDatasetParameters(int dataSetID, map<string,string> pars, map<string,double> dsPars) override; + virtual int compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err); + protected: + virtual int parseOptions(){ return 0;}; + + std::map<int, std::string> _type; + std::map<int, std::string> _q; + std::map<int, double> _qValue; + std::map<int, std::string> _q0; + std::map<int, int> _NPoints; + + double getAlphaS(double q) { return alphaS(q); } + double getMassMSbar(const double m0, const double q, const double as, const double as0) + { + const double c0 = 4.0 / 9.0; + // m0 is m(m) + double mMsBar = m0 * pow(as / as0, c0); + return mMsBar; + } +}; + diff --git a/reactions/KRunning/src/Makefile.am b/reactions/KRunning/src/Makefile.am new file mode 100644 index 0000000000000000000000000000000000000000..b3f7529d6078399651e2e5c2b3be5cafdc12675f --- /dev/null +++ b/reactions/KRunning/src/Makefile.am @@ -0,0 +1,14 @@ + +# Created by AddReaction.py on 2019-01-16 + +AM_CXXFLAGS = -I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../../interfaces/include -Wall -fPIC -Wno-deprecated + +lib_LTLIBRARIES = libkrunning_xfitter.la +libkrunning_xfitter_la_SOURCES = ReactionKRunning.cc + +# libkrunning_xfitter_la_LDFLAGS = place_if_needed + +datadir = ${prefix}/yaml/reactions/KRunning +data_DATA = ../yaml/parameters.yaml + +dist_noinst_HEADERS = ../include ../yaml diff --git a/reactions/KRunning/src/ReactionKRunning.cc b/reactions/KRunning/src/ReactionKRunning.cc new file mode 100644 index 0000000000000000000000000000000000000000..26c35a8a50d4d43b5cafcddeb7c7b0178086dd51 --- /dev/null +++ b/reactions/KRunning/src/ReactionKRunning.cc @@ -0,0 +1,72 @@ + +/* + @file ReactionKRunning.cc + @date 2019-01-16 + @author AddReaction.py + Created by AddReaction.py on 2019-01-16 +*/ + +#include "ReactionKRunning.h" + +// the class factories +extern "C" ReactionKRunning* create() { + return new ReactionKRunning(); +} + + +// Initialize at the start of the computation +int ReactionKRunning::atStart(const string &s) +{ + return 0; +} + +void ReactionKRunning::setDatasetParameters(int dataSetID, map<std::string, std::string> pars, map<std::string, double> dsPars) +{ + // check if dataset with provided ID already exists + if(_type.find(dataSetID) != _type.end()) + hf_errlog(19011501, "F: dataset with id " + std::to_string(dataSetID) + " already exists"); + + // read type of running + // presently only alphaS (accesses whatever running implemented in xFitter) and mass MSbar running (at NLO) is implemented + auto it = pars.find("type"); + if(it == pars.end()) + hf_errlog(19011502, "F: no type of running for dataset with id " + std::to_string(dataSetID)); + if(it->second != "as" && it->second != "massMSbarNLO") + hf_errlog(19011503, "F: unsupported running type = " + it->second); + _type[dataSetID] = it->second; + + // read scale + it = pars.find("q"); + if(!checkParam(it->second)) // value provided + _qValue[dataSetID] = stod(it->second); + else // parameter name is provided + _q[dataSetID] = it->second; + + // for type=massMSbarNLO read q0: scale at which m(m) is quoted + if(_type[dataSetID] == "massMSbarNLO") + _q0[dataSetID] = pars.find("q0")->second; + + // read optional number of points (if not provided, use the number of data points) + it = pars.find("N"); + if(it == pars.end()) + _NPoints[dataSetID] = _dsBins[dataSetID]->begin()->second.size(); + else + _NPoints[dataSetID] = atoi(pars["N"].c_str()); + //printf("npoints: %d\n", _NPoints[dataSetID]); +} + +// Main function to compute results at an iteration +int ReactionKRunning::compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err) +{ + double q = (_qValue.find(dataSetID) != _qValue.end()) ? _qValue[dataSetID] : GetParam(_q[dataSetID]); + if(_type[dataSetID] == "as") + val = valarray<double>(getAlphaS(q), _NPoints[dataSetID]); + else if(_type[dataSetID] == "massMSbarNLO") + { + double q0 = GetParam(_q0[dataSetID]); + val = valarray<double>(getMassMSbar(q0, q, getAlphaS(q0), getAlphaS(q)), _NPoints[dataSetID]); + } + //for(int i = 0; i < val.size(); i++) + // printf("val[%d] = %f\n", i, val[i]); + return 0; +} diff --git a/reactions/KRunning/yaml/parameters.yaml b/reactions/KRunning/yaml/parameters.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/reactions/cbdiff/include/Reactioncbdiff.h b/reactions/cbdiff/include/Reactioncbdiff.h new file mode 100644 index 0000000000000000000000000000000000000000..215a8fbc7eb9f645d200bee771adbc81cac4e6bc --- /dev/null +++ b/reactions/cbdiff/include/Reactioncbdiff.h @@ -0,0 +1,54 @@ + +#pragma once + +#include "ReactionBaseHVQMNR.h" +//#include "ReactionTheory.h" + +/** + @class' Reactioncbdiff + + @brief A wrapper class for cbdiff reaction + + Based on the ReactionTheory class. Reads options produces 3d cross section. + + @version 0.1 + @date 2019-02-01 + */ + +class Reactioncbdiff : public ReactionBaseHVQMNR +//class Reactioncbdiff : public ReactionTheory +{ + public: + Reactioncbdiff(){}; + +// ~Reactioncbdiff(){}; +// ~Reactioncbdiff(const Reactioncbdiff &){}; +// Reactioncbdiff & operator =(const Reactioncbdiff &r){return *(new Reactioncbdiff(r));}; + + public: + virtual string getReactionName() const { return "cbdiff" ;}; + virtual int atStart(const string &); + virtual void initAtIteration(); + virtual int compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err); + virtual void setDatasetParameters(int dataSetID, map<string,string> pars, map<string,double> dsPars) override; + protected: + virtual int parseOptions(){ return 0;}; + + std::map<int, std::shared_ptr<MNR::MNR> > _mapMNR; + std::map<int, std::shared_ptr<MNR::Grid> > _mapGrid; + std::map<int, int> _mapMSbarMass; + std::map<int, double> _mapMassDiff; + std::map<int, std::shared_ptr<MNR::Grid> > _mapGridLOMassUp; + std::map<int, std::shared_ptr<MNR::Grid> > _mapGridLOMassDown; + std::map<int, std::shared_ptr<MNR::Grid> > _mapGridSmLOMassUp; + std::map<int, std::shared_ptr<MNR::Grid> > _mapGridSmLOMassDown; + std::map<int, std::shared_ptr<MNR::Grid> > _mapGridSm; + std::map<int, std::shared_ptr<MNR::Frag> > _mapFrag; + std::map<int, std::shared_ptr<Parameters> > _mapPar; + std::map<int, std::vector<TH2D*> > _mapXSec; + std::map<int, double> _mapFF; + std::map<int, int> _mapN; + std::map<int, double> _mapPrecision; + std::map<int, double> _mapHadronMass; +}; + diff --git a/reactions/cbdiff/src/Makefile.am b/reactions/cbdiff/src/Makefile.am new file mode 100644 index 0000000000000000000000000000000000000000..1abd1707f2529d76c6498bb27c1bf906776b29d3 --- /dev/null +++ b/reactions/cbdiff/src/Makefile.am @@ -0,0 +1,20 @@ + +# Created by AddReaction.py on 2019-02-01 + +if HAVE_ROOT + +AM_CXXFLAGS = $(ROOT_CFLAGS) -I$(srcdir)/../../../reactions/BaseHVQMNR/include -I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../../interfaces/include -Wall -fPIC -Wno-deprecated + +lib_LTLIBRARIES = libcbdiff_xfitter.la +libcbdiff_xfitter_la_SOURCES = Reactioncbdiff.cc + +# libcbdiff_xfitter_la_LDFLAGS = place_if_needed +libcbdiff_xfitter_la_LDFLAGS = -lbasehvqmnr_xfitter -L$(srcdir)/../../../reactions/BaseHVQMNR/src/.libs + +datadir = ${prefix}/yaml/reactions/cbdiff +data_DATA = ../yaml/parameters.yaml + +endif + +dist_noinst_HEADERS = ../include ../yaml + diff --git a/reactions/cbdiff/src/Reactioncbdiff.cc b/reactions/cbdiff/src/Reactioncbdiff.cc new file mode 100644 index 0000000000000000000000000000000000000000..f5cc69c5f82166259387728b01c05be96cf097ac --- /dev/null +++ b/reactions/cbdiff/src/Reactioncbdiff.cc @@ -0,0 +1,354 @@ + +/* + @file Reactioncbdiff.cc + @date 2019-02-01 + @author AddReaction.py + Created by AddReaction.py on 2019-02-01 +*/ + +#include "Reactioncbdiff.h" +#include <TMath.h> +#include <TF1.h> + +// the class factories +extern "C" Reactioncbdiff* create() { + return new Reactioncbdiff(); +} + +void Reactioncbdiff::setDatasetParameters(int dataSetID, map<string,string> pars, map<string,double> dsPars) +{ + ReactionBaseHVQMNR::setDatasetParameters(dataSetID, pars, dsPars); + //_debug = 1; + Steering steer; + if(checkParam("steer_q") || pars.find("steer_q") != pars.end()) + steer.q = GetParamIInPriority("steer_q", pars); + else + steer.q = true; + if(checkParam("steer_a") || pars.find("steer_a") != pars.end()) + steer.a = GetParamIInPriority("steer_a", pars); + else + steer.a = true; + steer.nf = GetParamIInPriority("steer_nf", pars); + steer.ptmin = GetParamInPriority("steer_ptmin", pars); + steer.ptmax = GetParamInPriority("steer_ptmax", pars); + steer.npt = GetParamIInPriority("steer_npt", pars); + steer.nptsm = GetParamIInPriority("steer_nptsm", pars); + steer.ymin = GetParamInPriority("steer_ymin", pars); + steer.ymax = GetParamInPriority("steer_ymax", pars); + steer.ny = GetParamIInPriority("steer_ny", pars); + steer.nsfnb = GetParamIInPriority("steer_nsfnb", pars); + steer.nx3 = GetParamIInPriority("steer_nx3", pars); + steer.nx4 = GetParamIInPriority("steer_nx4", pars); + steer.nbz = GetParamIInPriority("steer_nbz", pars); + steer.xmin = GetParamInPriority("steer_xmin", pars); + steer.xmax = GetParamInPriority("steer_xmax", pars); + steer.mf2min = GetParamInPriority("steer_mf2min", pars); + steer.mf2max = GetParamInPriority("steer_mf2max", pars); + + // precision: 1.0 is default + _mapPrecision[dataSetID] = 1.0; + if(pars.find("precision") != pars.end() || checkParam("precision")) + { + _mapPrecision[dataSetID] = GetParamInPriority("precision", pars); + if(_mapPrecision[dataSetID] != 1.0) + { + printf("Using precision factor %f\n", _mapPrecision[dataSetID]); + steer.npt *= _mapPrecision[dataSetID]; + steer.nptsm *= _mapPrecision[dataSetID]; + steer.ny *= _mapPrecision[dataSetID]; + steer.nsfnb *= _mapPrecision[dataSetID]; + steer.nx3 *= _mapPrecision[dataSetID]; + steer.nx4 *= _mapPrecision[dataSetID]; + steer.nbz *= _mapPrecision[dataSetID]; + } + } + + std::shared_ptr<Parameters>& par = _mapPar[dataSetID]; + par = std::shared_ptr<Parameters>(new Parameters); + // Flavour + if(checkParam("flav") || pars.find("flav") != pars.end()) + par->flav = GetParamSInPriority("flav", pars).c_str()[0]; + else + par->flav = 'c'; + // Order + std::string order = GetParamSInPriority("Order", pars); + MNR::MNRContribution contrNLO = 11111; // NLO + MNR::MNRContribution contrLO = 10111; // NLO + MNR::MNRContribution contr = contrNLO; + if(order == "LO") + contr = contrLO; + else if(order != "NLO") + hf_errlog(19020301, "F: order " + order + " not supported"); + const int ncontr = 1; + MNR::MNRContribution** ptrContr = new MNR::MNRContribution*[ncontr]; + ptrContr[0] = new MNR::MNRContribution(contr); + MNR::MNRContribution** ptrContrLO = new MNR::MNRContribution*[ncontr]; + ptrContrLO[0] = new MNR::MNRContribution(contrLO); + // HQ masses + if(checkParam("mq") || pars.find("mq") != pars.end()) + par->mc = GetParamInPriority("mq", pars); + else + { + par->flagMassIsGlobal = true; + if(par->flav == 'c') + par->mc = GetParam("mch"); + else if(par->flav == 'b') + par->mc = GetParam("mbt"); + else if(par->flav == 't') + par->mc = GetParam("mtp"); + } + _mapMassDiff[dataSetID] = 0.001; // for MSbar mass transformation; 1 MeV should work for c, b and t + //_mapMassDiff[dataSetID] = 0.150; + // scale parameters + //GetMuPar('f', 'q', par->mf_A_c, par->mf_B_c, par->mf_C_c, pars); + //GetMuPar('r', 'q', par->mr_A_c, par->mr_B_c, par->mr_C_c, pars); + par->mf_A_c = GetParamInPriority("mf_A", pars); + par->mf_B_c = GetParamInPriority("mf_B", pars); + par->mr_A_c = GetParamInPriority("mr_A", pars); + par->mr_B_c = GetParamInPriority("mr_B", pars); + // fragmentation parameters + par->fragpar_c = GetParamInPriority("FragPar", pars); + PrintParameters(par.get()); + // pole, MSbar or MSR mass (0 pole, 1 MSbar, 2 MSR) + _mapMSbarMass[dataSetID] = 0; + if(pars.find("MS_MASS") != pars.end() || checkParam("MS_MASS")) + _mapMSbarMass[dataSetID] = GetParamIInPriority("MS_MASS", pars); + printf("MNR: order = %s MS_MASS = %d\n", order.c_str(), _mapMSbarMass[dataSetID]); + // divide or not by bin width + if(checkParam("dividebw") || pars.find("dividebw") != pars.end()) + par->flagDivideBinWidth = GetParamIInPriority("dividebw", pars); + // debug mode + if(checkParam("debug") || pars.find("debug") != pars.end()) + par->debug = GetParamIInPriority("debug", pars); + + std::shared_ptr<MNR::MNR>& mnr = _mapMNR[dataSetID]; + mnr = std::shared_ptr<MNR::MNR>(new MNR::MNR(this)); + mnr->SetScaleCoef(par->mf_A_c, par->mf_B_c, par->mf_C_c, par->mr_A_c, par->mr_B_c, par->mr_C_c); + + // main grid (LO or NLO) + std::shared_ptr<MNR::Grid>& grid = _mapGrid[dataSetID]; + grid = std::shared_ptr<MNR::Grid>(new MNR::Grid(ncontr, ptrContr)); + // LO mass up grid (for MSbar mass) + std::shared_ptr<MNR::Grid>& gridLOMassUp = _mapGridLOMassUp[dataSetID]; + gridLOMassUp = std::shared_ptr<MNR::Grid>(new MNR::Grid(ncontr, ptrContrLO)); + std::shared_ptr<MNR::Grid>& gridSmLOMassUp = _mapGridSmLOMassUp[dataSetID]; + gridSmLOMassUp = std::shared_ptr<MNR::Grid>(new MNR::Grid(ncontr, ptrContrLO)); + // LO mass down grid (for MSbar mass) + std::shared_ptr<MNR::Grid>& gridLOMassDown = _mapGridLOMassDown[dataSetID]; + gridLOMassDown = std::shared_ptr<MNR::Grid>(new MNR::Grid(ncontr, ptrContrLO)); + std::shared_ptr<MNR::Grid>& gridSmLOMassDown = _mapGridSmLOMassDown[dataSetID]; + gridSmLOMassDown = std::shared_ptr<MNR::Grid>(new MNR::Grid(ncontr, ptrContrLO)); + // final smoothed grid + std::shared_ptr<MNR::Grid>& gridSm = _mapGridSm[dataSetID]; + gridSm = std::shared_ptr<MNR::Grid>(new MNR::Grid(ncontr, ptrContr)); + + std::shared_ptr<MNR::Frag>& frag = _mapFrag[dataSetID]; + frag = std::shared_ptr<MNR::Frag>(new MNR::Frag); + std::vector<TH2D*>& xsec = _mapXSec[dataSetID]; + + mnr->SetDebug(_debug); + DefaultInitMNR(steer, par->mc, *mnr.get()); + DefaultInitGrid(steer, par->mc, steer.npt, *grid.get()); + DefaultInitGrid(steer, par->mc, steer.npt, *gridLOMassUp.get()); + DefaultInitGrid(steer, par->mc, steer.npt, *gridLOMassDown.get()); + DefaultInitGrid(steer, par->mc, steer.nptsm, *gridSm.get()); + DefaultInitGrid(steer, par->mc, steer.nptsm, *gridSmLOMassUp.get()); + DefaultInitGrid(steer, par->mc, steer.nptsm, *gridSmLOMassDown.get()); + DefaultInitFrag(steer, *frag.get()); + mnr->fC_sh = TMath::Power(stod(pars["energy"]), 2.0); // centre-of-mass energy squared + mnr->CalcConstants(); + std::string finalState = pars["FinalState"]; + if(finalState == "parton") + { + frag->AddOut(NULL, par->mc); + _mapHadronMass[dataSetID] = 0.0; // hadron mass is irrelevant + } + else + { + int fragType = 0; // Kartvelishvili + // read hadron mass (if <0 then PDG values are used) + _mapHadronMass[dataSetID] = -1.0; + if(pars.find("hadronMass") != pars.end() || checkParam("hadronMass")) + _mapHadronMass[dataSetID] = GetParamInPriority("hadronMass", pars); + if(_mapHadronMass[dataSetID] < 0.0) + _mapHadronMass[dataSetID] = MNR::Frag::GetHadronMass(finalState.c_str()); + //frag->AddOut(MNR::Frag::GetFragFunction(fragType, finalState.c_str(), par->fragpar_c), MNR::Frag::GetHadronMass(finalState.c_str())); + printf("MNRFrag: using Kartvelishvili function with par = %.1f and hadronMass = %.3f\n", par->fragpar_c, _mapHadronMass[dataSetID]); + frag->AddOut(MNR::Frag::GetFragFunction(fragType, finalState.c_str(), par->fragpar_c), _mapHadronMass[dataSetID]); + frag->GetFF(0)->SetParameter(1, par->fragpar_c); + } + _mapFF[dataSetID] = stod(pars["FragFrac"]); + + xsec.resize(1); + for(size_t i = 0; i < xsec.size(); i++) + { + xsec[i] = new TH2D; + // pT binning + std::vector<double> binsPt; + if(pars.find("pTn") != pars.end() && pars.find("pTmin") != pars.end() && pars.find("pTmax") != pars.end()) + { + int nb = stoi(pars["pTn"]); + binsPt.resize(nb + 1); + double w = (stod(pars["pTmax"]) - stod(pars["pTmin"])) / nb; + for(int b = 0; b < nb + 1; b++) + binsPt[b] = stod(pars["pTmin"]) + w * b; + } + else if(pars.find("pT") != pars.end()) + { + std::istringstream ss(pars["pT"]); + std::string token; + while(std::getline(ss, token, ',')) + binsPt.push_back(stod(token)); + } + else + hf_errlog(19021900, "F: no pT binning provided"); + // y binning + std::vector<double> binsY; + if(pars.find("yn") != pars.end() && pars.find("ymin") != pars.end() && pars.find("ymax") != pars.end()) + { + int nb = stoi(pars["yn"]); + binsY.resize(nb + 1); + double w = (stod(pars["ymax"]) - stod(pars["ymin"])) / nb; + for(int b = 0; b < nb + 1; b++) + binsY[b] = stod(pars["ymin"]) + w * b; + } + else if(pars.find("y") != pars.end()) + { + std::istringstream ss(pars["y"]); + std::string token; + while(std::getline(ss, token, ',')) + binsY.push_back(stod(token)); + } + else + hf_errlog(19021901, "F: no y binning provided"); + xsec[i]->SetBins(binsPt.size() - 1, &binsPt[0], binsY.size() - 1, &binsY[0]); + } + _mapN[dataSetID] = 1; + if(pars.find("N") != pars.end()) + _mapN[dataSetID] = stod(pars["N"]); +} + +// Initialize at the start of the computation +int Reactioncbdiff::atStart(const string &s) +{ + return 0; +} + +void Reactioncbdiff::initAtIteration() +{ + ; +} + +// Main function to compute results at an iteration +int Reactioncbdiff::compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err) +{ + //printf("COMPUTE\n"); + std::shared_ptr<MNR::MNR> mnr(_mapMNR[dataSetID]); + std::shared_ptr<Parameters> par(_mapPar[dataSetID]); + std::shared_ptr<MNR::Grid> grid(_mapGrid[dataSetID]); + std::shared_ptr<MNR::Grid> gridSm(_mapGridSm[dataSetID]); + std::shared_ptr<MNR::Frag> frag(_mapFrag[dataSetID]); + std::vector<TH2D*> xsec(_mapXSec[dataSetID]); + + // update mass which may be fitted (scales or frag. par. cannot be fitted with this implementation) + if(par->flagMassIsGlobal) + { + if(par->flav == 'c') + par->mc = GetParam("mch"); + else if(par->flav == 'b') + par->mc = GetParam("mbt"); + else if(par->flav == 't') + par->mc = GetParam("mtp"); + } + + // protection against not positive or nan mass + if(par->mc < 0.0 || par->mc != par->mc) + par->mc = 1000.0; // probably there are no heavy quarks for which mass of 1000 GeV would be reasonable + + mnr->CalcXS(grid.get(), par->mc); + + // tarnsformation to MSbar mass scheme + if(_mapMSbarMass[dataSetID]) + { + // store original scale B parameters which need to be modified for changed mass + const double mfB = mnr->fMf_B; + const double mrB = mnr->fMr_B; + + // LO mass up variation + double massU = par->mc + _mapMassDiff[dataSetID]; + mnr->fMf_B = mfB * pow(par->mc / massU, 2.0); + mnr->fMr_B = mrB * pow(par->mc / massU, 2.0); + std::shared_ptr<MNR::Grid> gridLOMassU(_mapGridLOMassUp[dataSetID]); + mnr->CalcXS(gridLOMassU.get(), massU); + + // LO mass down variation + double massD = par->mc - _mapMassDiff[dataSetID]; + mnr->fMf_B = mfB * pow(par->mc / massD, 2.0); + mnr->fMr_B = mrB * pow(par->mc / massD, 2.0); + std::shared_ptr<MNR::Grid> gridLOMassD(_mapGridLOMassDown[dataSetID]); + mnr->CalcXS(gridLOMassD.get(), massD); + + // restore original scales + mnr->fMf_B = mfB; + mnr->fMr_B = mrB; + + if(_mapMSbarMass[dataSetID] == 1) + { + int flagMSbarTransformation = 0; // d1=4/3 (no ln) + MNR::Grid::InterpolateGrid(grid.get(), gridSm.get(), par->mc, gridLOMassU.get(), massU, gridLOMassD.get(), massD, flagMSbarTransformation); + } + else if(_mapMSbarMass[dataSetID] == 2) + { + double R = 3.0; + int nl = mnr->GetNl(); + MNR::Grid::InterpolateGrid(grid.get(), gridSm.get(), par->mc, gridLOMassU.get(), massU, gridLOMassD.get(), massD, 2, &R, &nl); + } + } + else + MNR::Grid::InterpolateGrid(grid.get(), gridSm.get(), par->mc); + + frag->CalcCS(gridSm.get(), par->mc, xsec); + if(par->debug) + xsec[0]->Print("all"); + + DataSet& ds = _dataSets[dataSetID]; + val.resize(xsec[0]->GetNbinsX() * xsec[0]->GetNbinsY() * _mapN[dataSetID]); + if (ds.BinsYMin == NULL || ds.BinsYMax == NULL || ds.BinsPtMin == NULL || ds.BinsPtMax == NULL ) + { + // fill results array with cross sections bin by bin, optionally repeat N times + for(int in = 0; in < _mapN[dataSetID]; in++) + { + for(int ix = 1; ix <= xsec[0]->GetNbinsX(); ix++) + { + for(int iy = 1; iy <= xsec[0]->GetNbinsY(); iy++) + { + //int ival = (ix - 1) * xsec[0]->GetNbinsY() + iy - 1 + in * xsec[0]->GetNbinsX() * xsec[0]->GetNbinsY(); + int ival = (iy - 1) * xsec[0]->GetNbinsX() + ix - 1 + in * xsec[0]->GetNbinsX() * xsec[0]->GetNbinsY(); + val[ival] = xsec[0]->GetBinContent(ix, iy) * _mapFF[dataSetID]; + if(par->flagDivideBinWidth) + { + val[ival] /= xsec[0]->GetXaxis()->GetBinWidth(ix); + val[ival] /= xsec[0]->GetYaxis()->GetBinWidth(iy); + } + } + } + } + } + else + { + // fill results array with cross sections by matching bins + // make sure number of bins is consistent + if(ds.BinsYMin->size() > (size_t)(xsec[0]->GetNbinsX() * xsec[0]->GetNbinsY())) + hf_errlog(19021900, "F: inconsistent number of bins"); + for(unsigned int i = 0; i < ds.BinsYMin->size(); i++) + val[i] = FindXSecPtYBin(xsec[0], (*ds.BinsYMin)[i], (*ds.BinsYMax)[i], (*ds.BinsPtMin)[i], (*ds.BinsPtMax)[i], par->flagDivideBinWidth, par->flagDivideBinWidth) * _mapFF[dataSetID]; + } + if(par->debug) + { + for(size_t i = 0; i < val.size(); i++) + printf("val[%lu] = %f\n", i, val[i]); + } + + return 0; +} + diff --git a/reactions/cbdiff/yaml/parameters.yaml b/reactions/cbdiff/yaml/parameters.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/CheckForPDF.cxx b/src/CheckForPDF.cxx index 475ba60693c665882d4772564511719aa30a91d6..da3589760581ad1b39adbf5eed0ce90b958b86f1 100644 --- a/src/CheckForPDF.cxx +++ b/src/CheckForPDF.cxx @@ -2,9 +2,7 @@ #include "LHAPDF/Paths.h" #include <iostream> #include <algorithm> - - - +#include <cstring> using namespace std; diff --git a/src/TheorEval.cc b/src/TheorEval.cc index e42a01fc4015f32995b9eab182d4007575500888..a358a5e013b4684a8b09061f8b04afef47844b17 100644 --- a/src/TheorEval.cc +++ b/src/TheorEval.cc @@ -25,6 +25,10 @@ #include "xfitter_steer.h" #include "BaseEvolution.h" +// ROOT spline can be uncommented (here and below in the code) and used e.g. for cross checks (obviously requires ROOT) +//#include <TSpline.h> +#include <spline.h> + using namespace std; // Global variable to hold current alphaS @@ -143,7 +147,103 @@ TheorEval::assignTokens(list<tToken> &sl) sl.push_back(t); continue; } + if ( term == string("spline") || term == string("splinederivative") ) + { + // special case for natural cubic spline interpolation + if ( term == string("spline")) + { + t.opr = 6; + t.name = "spline"; + } + else if ( term == string("splinederivative")) + { + t.opr = 7; + t.name = "splinederivative"; + } + // push spline + sl.push_back(t); + int& narg_spline = sl.back().narg; + + // process arguments + t.val = new valarray<double>(0., nb); + t.narg = 0; + t.opr = 0; + // format: spline[x1,y1,x2,y2,x3,y3,x4,y4,...,x] + strexpr.get(c); + if(c != '[') + hf_errlog(18090900, "F: Theory expression syntax error: expected ["); + narg_spline = 0; + bool flagDone = false; + while(true) + { + strexpr.get(c); + int nsymbols = 0; + term.assign(1,c); + while(strexpr.get(c)) + { + if(c == ',' || c == ']') + { + if(nsymbols == 0) + hf_errlog(18090903, "F: Theory expression syntax error: error reading arguments"); + if(c == ']') + flagDone = true; + break; + } + if (!isalnum(c)) + hf_errlog(18090904, "F: Theory expression syntax error: error reading arguments"); + term.append(1,c); + nsymbols++; + } + // have read new argument: push it + if(nsymbols > 0) + { + vector<string>::iterator found_term = find(_termNames.begin(), _termNames.end(), term); + if ( found_term == _termNames.end() ) { + cout << "Undeclared term " << term << " in expression " << _expr << endl; + return -1; + } else { + t.opr = 0; + t.name = term; + if ( _mapInitdTerms.find(term) != _mapInitdTerms.end()){ + t.val = _mapInitdTerms[term]; + } else { + t.val = new valarray<double>(0.,nb); + this->initTerm(int(found_term-_termNames.begin()), t.val); + _mapInitdTerms[term] = t.val; + } + } + sl.push_back(t); + narg_spline++; + // finish reading spline arguments + if(flagDone) + break; + } + else + { + if(!flagDone) + // should not be here + //assert(0); + hf_errlog(18090901, "F: Theory expression syntax error reading spline arguments"); + if(narg_spline % 2 == 0) + hf_errlog(18090901, "F: Theory expression syntax error: spline expected odd number of arguments"); + if(narg_spline < 9) + hf_errlog(18090902, "F: Theory expression syntax error: spline expected at least 9 arguments"); + break; + } + } + continue; + } + if ( term == string("norm") ) + { + // special case for normalised expression: norm(A)=A/sum(A) + // (A is coomputed once) + t.opr = 8; + t.name = "norm"; + t.val = new valarray<double>(0., nb); + sl.push_back(t); + continue; + } /* if ( term == string("avg") ) { // special case for avg() function t.opr = 4; @@ -181,7 +281,7 @@ TheorEval::assignTokens(list<tToken> &sl) case '*': t.opr = 3; break; case '/': t.opr = 3; break; - case '.': t.opr = 5; break; //change + case '.': t.opr = 5; break; default: cout << "Unknown operator "<< c << " in expression " << _expr << endl; } @@ -391,6 +491,9 @@ TheorEval::initReactionTerm(int iterm, valarray<double> *val) LoadParametersFromYAML(pars,rt->getReactionName()); // and transfer to the module + //printf("pars\n"); + //for(map<string,string>::iterator it = pars.begin(); it != pars.end(); it++) + // printf("%s = %s\n", it->first.c_str(), it->second.c_str()); rt->setDatasetParameters(_dsId*1000+iterm, pars, _dsPars); _mapReactionToken[ std::pair<ReactionTheory*,int>(rt,iterm) ] = val; @@ -442,6 +545,52 @@ TheorEval::Evaluate(valarray<double> &vte ) } double avg = stk.top().sum()/stk.top().size(); stk.top() = avg;*/ + } else if ( it->name == string("spline") || it->name == string("splinederivative") ) + { + // load all arguments + int narg = it->narg; + std::valarray<double> x0 = stk.top(); + stk.pop(); + int nsections = (it->narg - 1) / 2; + std::valarray<std::valarray<double> > x(nsections); + std::valarray<std::valarray<double> > y(nsections); + for(int sect = nsections - 1; sect >= 0; sect--) + { + y[sect] = stk.top(); + stk.pop(); + x[sect] = stk.top(); + stk.pop(); + } + auto result = x0; + for(int p = 0; p < x0.size(); p++) + { + std::vector<double> xSpline(nsections); + std::vector<double> ySpline(nsections); + for(int sect = 0; sect < nsections; sect++) + { + xSpline[sect] = x[sect][p]; + ySpline[sect] = y[sect][p]; + } + //TSpline3 spline("", &xSpline[0], &ySpline[0], ySpline.size()); + tk::spline spline; + spline.set_points(xSpline, ySpline); + if(it->name == string("spline")) + { + //result[p] = spline.Eval(x0[p]); + result[p] = spline(x0[0]); + } + else if(it->name == string("splinederivative")) + { + //result[p] = spline.Derivative(x0[p]); + result[p] = spline(x0[0], 1); + } + } + stk.push(result); + } + else if ( it->name == string("norm") ) + { + double sum = stk.top().sum(); + stk.top() = stk.top() / sum; } else if ( it->name == string("+") ){ valarray<double> a(stk.top()); stk.pop(); diff --git a/src/ftheor_eval.cc b/src/ftheor_eval.cc index 37f0f2f051e654f171bbe77754ee3a52a5ea85ee..353fc09d9a99cb338085b64c3f3ea55190a001fe 100644 --- a/src/ftheor_eval.cc +++ b/src/ftheor_eval.cc @@ -64,14 +64,15 @@ tDataBins gDataBins; t2Dfunctions g2Dfunctions; +#define NTERMMAX 128 extern struct thexpr_cb { double dynscale; int nterms; - char termname[16][8]; - char termtype[16][80]; - char terminfo[16][2048]; - char termsource[16][256]; - char theorexpr[1000]; + char termname[NTERMMAX][32]; + char termtype[NTERMMAX][80]; + char terminfo[NTERMMAX][4096]; + char termsource[NTERMMAX][256]; + char theorexpr[10000]; int ppbar_collisions; int normalised; int murdef; diff --git a/src/store_output.f b/src/store_output.f index 2f847d705b414ce4cc6d6a6a31f5863095c1e7ab..a4d3c62b9177840e65da65ddb5893e5a9695c1f0 100644 --- a/src/store_output.f +++ b/src/store_output.f @@ -15,7 +15,7 @@ C-------------------------------------------------------------- double precision q2,x,gval,sing,umin,dmin *new jf double precision QPDFXQ,cplus,splus,bplus,uplus,dplus,U,D,sea,DbmUb - double precision d_Ubar,d_Dbar,u_sea,d_sea,str,chm,bot, photon + double precision d_Ubar,d_Dbar,u_sea,d_sea,str,strbar,chm,bot,photon double precision totstr,totDbar,totcha,totUbar,afs,afc,xbelow,delx double precision totusea,totdsea,afs_ud @@ -30,7 +30,7 @@ C-------------------------------------------------------------- + rt_flc,rt_f1c,rt_f2b, + rt_flb,rt_f1b - double precision xnu, xrho, Qsimple + double precision xnu, xrho, Qsimple double precision F123(3) character*48 name @@ -64,8 +64,8 @@ C-------------------------------------------------------------- integer iq,jx,j ! Store how many PDFs are written out: - integer NPdfs - parameter (NPdfs = 14) + integer NPdfs + parameter (NPdfs = 15) C--------------------------------------------------------------- @@ -78,7 +78,7 @@ C--------------------------------------------------------------- nf = 5. if (q2.lt.qb) nf=4. if (q2.lt.qc) nf=3. - + if (idx.gt.0) then name =base(1:idx)//tag(iq2)//'.txt' h1name = base(1:idx)//tag(iq2)//'.txt' @@ -92,10 +92,11 @@ c open(82,file=h1name) write (81,*) q2val(iq2),outnx, NPdfs, outxrange(1), outxrange(2) ! Write the names of PDFs - write (81,'(15(2x,A12))') + write (81,'(16(2x,A12))') $ ' x ',' g ',' U ',' D ',' Ubar ', ' Dbar ', $ ' u_val ', ' d_val ', ' sea ' ,' u_sea ', - $ ' d_sea ', ' str ',' chm ',' bot ', ' ph ' + $ ' d_sea ', ' str ',' chm ',' bot ', ' ph ', + $ 'strbar' totstr= 0.d0 totDbar= 0.d0 @@ -135,7 +136,7 @@ c open(82,file=h1name) else D=pdf(1)+pdf(-3) endif - + umin=pdf(2)-pdf(-2) dmin=pdf(1)-pdf(-1) @@ -144,7 +145,7 @@ c open(82,file=h1name) else d_Ubar=pdf(-2) endif - + if (q2.gt.qb) then d_Dbar=pdf(-1)+pdf(-3)+pdf(-5) else @@ -157,13 +158,14 @@ c open(82,file=h1name) u_sea=pdf(-2) d_sea=pdf(-1) str = (pdf(-3)+pdf(3))/2.d0 + strbar = pdf(-3) chm = 0.0d0 if (q2.gt.qc) then chm=pdf(-4) endif - + bot = 0.d0 if (q2.gt.qb) then bot=pdf(-5) @@ -181,15 +183,15 @@ c open(82,file=h1name) write(81,810) + x,gval,U,D,d_Ubar,d_Dbar,umin,dmin,sea,u_sea,d_sea,str, - $ chm,bot,photon - 810 format(15(2x,G12.6)) + $ chm,bot,photon,strbar + 810 format(16(2x,G12.6)) 811 format(I3,2x,23(2x,G12.6)) enddo close(81) - + 999 continue @@ -197,13 +199,13 @@ c open(82,file=h1name) cv store for LHAPDF cv HERAPDF in LHAPDF5 format -cv PDFs are Glue Uval Dval Ubar Dbar Str Chrm Bot +cv PDFs are Glue Uval Dval Ubar Dbar Str Chrm Bot DO Iq=0,160 Q2=10**(8.30103/160D0*Iq ) q2valpdf(iq) = q2 alphas(iq)=hf_get_alphas(q2) enddo - + DO jx=0,160 IF(Jx.LE.80)THEN @@ -213,22 +215,22 @@ cv PDFs are Glue Uval Dval Ubar Dbar Str Chrm Bot ENDIF xvalpdf(jx) = x enddo - + C Prepare LHAPDF output do iq2=1,23 write (76,'(7E12.4)') (q2valpdf((iq2-1)*7+j),j=0,6) enddo - + do jx=1,23 write (76,'(7E12.4)') (xvalpdf((jx-1)*7+j),j=0,6) enddo - + do iq2=1,23 write (76,'(7E12.4)') (alphas((iq2-1)*7+j),j=0,6) enddo - + DO Iq=0,160 Q2=10**(8.30103/160D0*Iq ) @@ -245,7 +247,7 @@ c v grid(1+jx)=x write(76,666) PDFl(0), PDFl(2)-PDFl(-2), PDFl(1)-PDFl(-1), $ PDFl(-2), PDFl(-1), $ PDFl(3), PDFl(4), PDFl(5) - + enddo ENDDO @@ -264,22 +266,22 @@ c v grid(1+jx)=x C-------------------------------------------------- C> \brief Write results of fit -C> \details Write to a text file binning, +C> \details Write to a text file binning, C> data points with uncorrelated and total uncertainties, -C> fitted points and pulls. +C> fitted points and pulls. C-------------------------------------------------- subroutine WriteFittedPoints implicit none - + #include "steering.inc" #include "ntot.inc" #include "datasets.inc" #include "indata.inc" #include "systematics.inc" #include "theo.inc" - + integer i,j,index,PlotVarColIdx,PreviousPlots double precision PlotVar,PullVar @@ -293,7 +295,7 @@ C-------------------------------------------------- do i=1,ndatasets write(90,*)DATASETNUMBER(i) write(90,*) DATASETLABEL(i) - + do j=1,GNPlots(i) PreviousPlots = PreviousPlots + 1 write(90,16) 'Plot',j,'@',TRIM(GPlotOptions(PreviousPlots)) @@ -317,14 +319,14 @@ c & ' +-toterr theory pull dataset ' if(PlotVarColIdx.eq.0.and.GNPlots(i).eq.0) then if(Gplotvarcol(i).eq.'undefined') then call HF_Errlog(13021000, - $ 'W: Plotting options not set for data set: ' + $ 'W: Plotting options not set for data set: ' $ //DATASETLABEL(i)) else call HF_Errlog(13012901, $ 'W: Plotting: Can not find one of the columns') endif PlotVar = 0. - else + else if ( PlotVarColIdx.eq.0) then PlotVar = 0 else @@ -332,14 +334,14 @@ c & ' +-toterr theory pull dataset ' endif endif -c set pull to zero if no unc error +c set pull to zero if no unc error if(ALPHA_MOD(index).gt.0d0) then PullVar = (DATEN(index)-THEO_MOD(index))/ALPHA_MOD(index) - else + else PullVar = 0d0 endif - write(90,'(1X,11(e11.5,1X),i4,i4,A1,E11.5)') + write(90,'(1X,11(e11.5,1X),i4,i4,A1,E11.5)') $ AbstractBins(1,index), $ AbstractBins(2,index),AbstractBins(3,index), & DATEN(index),ALPHA_MOD(index), @@ -355,8 +357,8 @@ cv write(34,*), index,i,DATASETNUMBER(i) enddo 111 format(1X, F10.3, 2X, F12.6, 2X, 3(F12.6,2X)) close(90) - - + + RETURN END @@ -388,7 +390,7 @@ C------------------------------------------------------------- character*300 fname double precision, allocatable :: errIterate(:,:) - + C------------------------------------------------------------- if (ifcn3.lt.10) then @@ -473,7 +475,7 @@ C------------------------------------------------------------------- print *,' ' print *,' ' print *,'======================================================' - print '('' Use NNPDF overfitting method. + print '('' Use NNPDF overfitting method. $ Prepare output PDF files '')' print '('' Best FCN3 call='',i4,'' out of '',i4,'' calls'')', $ iminCont,nfcn3 @@ -493,10 +495,10 @@ C call Evolution !I'm commenting this out because it conflicts with C the new evolution interface, but I do not know why it was here C --Ivan -C ! Ready to store: +C ! Ready to store: cv open (76,file='output/lhapdf.block.txt',status='unknown') cv call store_pdfs('output/pdfs_q2val_') -C store the optimal values +C store the optimal values open (76,file=TRIM(OutDirName)//'/opt_lhapdf.block.txt', & status='unknown') @@ -544,7 +546,7 @@ C---------------------------------------------------- #include "theo.inc" integer i,j,index,k,reacindx - + double precision currEcharge open(90,file='./'//TRIM(OutDirName)//'/heraverager.dat') @@ -554,7 +556,7 @@ C write(90,*)ndatasets write(90,*) '!* ' - write(90,*) + write(90,*) $ '!* Swimming set from XFITTER for the HERAverager' write(90,*) '&Data' write(90,*) ' Name = ''Swimming'' ' @@ -594,8 +596,8 @@ C write(90,*)ndatasets endif do j=1,NDATAPOINTS(i) index = DATASETIDX(i,j) - - write(90,'(1X,i5,1X,4(e11.5,1X),i4)') + + write(90,'(1X,i5,1X,4(e11.5,1X),i4)') $ reacindx, $ AbstractBins(1,index), $ AbstractBins(2,index), @@ -615,7 +617,7 @@ C---------------------------------------------------------------- C> \brief Write theory prediction in format of input data tables. C> \param NNuisance number of error sets C> \param Theo_cent central value of theory predction -C> \param SymmetricPDFErr use symmetric or assymmetric errros (beta vs betaasym) +C> \param SymmetricPDFErr use symmetric or assymmetric errros (beta vs betaasym) C---------------------------------------------------------------- Subroutine WriteTheoryFiles(NNuisance,Theo_cent,SymmetricPDFErr) implicit none @@ -629,7 +631,7 @@ C---------------------------------------------------------------- double precision Theo_cent(Ntot) logical SymmetricPDFErr integer iset, ipoint, j, i - + character*2 c C--------------------------------------------------------- @@ -646,8 +648,8 @@ C--------------------------------------------------------- $ ,file=Trim(OutDirName)//'/theo_'//c//'.dat' $ ,status='unknown') - ! Write a header - write (51,'(''* Theory file for '',A)') + ! Write a header + write (51,'(''* Theory file for '',A)') $ Trim(DATASETLABEL(iset)) write (51,'(''&Data '')') @@ -656,7 +658,7 @@ C--------------------------------------------------------- write (51,'('' NData = '',I5)') NDATAPOINTS(iset) if (SymmetricPDFErr) then - write (51,'('' NColumn = '',I5)') NNuisance+1 + write (51,'('' NColumn = '',I5)') NNuisance+1 $ + DATASETBinningDimension(iset) write (51, @@ -668,11 +670,11 @@ C--------------------------------------------------------- $ ( trim(DATASETBinNames(i,iset)), $ i=1,DATASETBinningDimension(iset) ), 'theory', $ ( trim(System(nsys+i)),i=1,NNuisance-1) - write (51,'(A,''"'')') + write (51,'(A,''"'')') $ ( trim(System(nsys+i)),i=NNuisance,NNuisance) write (51,'('' Percent = '',I3,''*True'')') NNuisance else - write (51,'('' NColumn = '',I5)') NNuisance*2+1 + write (51,'('' NColumn = '',I5)') NNuisance*2+1 $ + DATASETBinningDimension(iset) write (51, $'('' ColumnType = '',I1,''*"Bin","Theory",'',i3,''*"Error"'')') @@ -681,12 +683,12 @@ C--------------------------------------------------------- $ ,advance='no' ) $ ( trim(DATASETBinNames(i,iset)), $ i=1,DATASETBinningDimension(iset) ), 'theory', - $ ( trim(System(nsys+i))//'+', + $ ( trim(System(nsys+i))//'+', $ trim(System(nsys+i))//'-',i=1,NNuisance-1) - write (51,'(A,''",'',''"'',A,''"'')') - $ ( trim(System(nsys+i))//'+', + write (51,'(A,''",'',''"'',A,''"'')') + $ ( trim(System(nsys+i))//'+', $ trim(System(nsys+i))//'-',i=NNuisance,NNuisance) - write (51,'('' Percent = '',I3,''*True'')') NNuisance*2 + write (51,'('' Percent = '',I3,''*True'')') NNuisance*2 endif write (51,'(''&End '')') @@ -694,19 +696,19 @@ C--------------------------------------------------------- do i = 1, NDATAPOINTS(iset) ipoint = Datasetidx(iset,i) if (SymmetricPDFErr) then - write (51,'(200E12.4)') + write (51,'(200E12.4)') $ ( AbstractBins(j,ipoint),j=1,DATASETBinningDimension(iset)), $ theo_cent(ipoint), $ ( -Beta(j,ipoint)*100.0, ! negative sign, since it is inverted in lhapdferrors.cc $ j=NSys+1 - $ ,NSys+NNuisance) + $ ,NSys+NNuisance) else - write (51,'(200E14.6)') + write (51,'(200E14.6)') $ ( AbstractBins(j,ipoint),j=1,DATASETBinningDimension(iset)), $ theo_cent(ipoint), - $ ( -BetaAsym(j,1,ipoint)*100.0, -BetaAsym(j,2,ipoint)*100., + $ ( -BetaAsym(j,1,ipoint)*100.0, -BetaAsym(j,2,ipoint)*100., $ j=NSys+1 - $ ,NSys+NNuisance) + $ ,NSys+NNuisance) endif enddo close (51) diff --git a/src/xfitter_pars.cc b/src/xfitter_pars.cc index 7b83b0bec7fc3b0577d7dc9537ce3d564142b8a9..287ccb6ba8729e71edc3ff388012d2db337290d7 100644 --- a/src/xfitter_pars.cc +++ b/src/xfitter_pars.cc @@ -3,7 +3,7 @@ @date Sun 16 April 2017 @author SG - Contains functions to read parameters.yaml, + Contains functions to read parameters.yaml, global maps to store parameters, and fortran interface functions. */ @@ -23,18 +23,18 @@ extern "C" { void parse_params_(); //!< Fortran callable parsing /// Interface to minuit parameters - void addexternalparam_(const char name[], const double &val, + void addexternalparam_(const char name[], const double &val, const double &step, - const double &min, const double &max, + const double &min, const double &max, const double &prior, const double &priorUnc, - const int &add, + const int &add, map<std::string,double*> *map, int len); void add_to_param_map_(map<std::string,double*>* map,double &value, int& global, char *name, int len); // Get parameters in fortran, for backward compatibility: double getparamd_(const char* name, int len); // Update of EWK/QCD parameters, can be fitted at each iteration. - void update_pars_fortran_(); + void update_pars_fortran_(); } @@ -48,7 +48,7 @@ namespace XFITTER_PARS { map <string, string> gParametersS; map <string, vector<double> > gParametersV; ///< Vectors of double parameters map <string, YAML::Node > gParametersY; ///< Store complete nodes for complex cases - + map<string,std::function<void(double const& x, double const& Q, double* pdfs)> > gXfxQArrays; // Also keep list of loaded evolutions here: @@ -89,7 +89,7 @@ namespace XFITTER_PARS { name=node.as<string>(); }catch(YAML::TypedBadConversion<string>ex){ ostringstream s;s<<"W: YAML exception: "<<ex.what()<<"; while trying to extract decomposition name from node: "<<node<<"; using default decomposition name"; - hf_errlog(18082930,s.str()); + hf_errlog(18082930,s.str()); name=getDefaultDecompositionName(); } return xfitter::get_pdfDecomposition(name)->f0(); @@ -146,7 +146,7 @@ namespace XFITTER_PARS { return search->second; } else { - hf_errlog(18071301,"W: string parameter "+name+" not found"); + hf_errlog(18071301,"W: string parameter "+name+" not found"); return ""; // not found } } @@ -184,7 +184,7 @@ bool fileExists(const string&fileName){ string text = "F: Problems reading yaml-parameters file: " + name + " : "+e.what(); hf_errlog_(17032503,text.c_str(),text.size()); } - + return; } */ @@ -217,7 +217,7 @@ void expandIncludes(YAML::Node&node,unsigned int recursionLimit=256){ expandIncludes(val,recursionLimit-1); } } - //Load and merge + //Load and merge for(vector<YAML::Node>::const_iterator kit=include_keys.begin();kit!=include_keys.end();++kit){ node.remove(*kit); string filename=(*kit).as<string>(""); @@ -318,9 +318,9 @@ void expandIncludes(YAML::Node&node,unsigned int recursionLimit=256){ void ParsToFortran(){ // helper macros -#define FortAssignD(NameV,Struc) if (gParameters.find(#NameV) != gParameters.end()) Struc.NameV = *gParameters[#NameV]; +#define FortAssignD(NameV,Struc) if (gParameters.find(#NameV) != gParameters.end()) Struc.NameV = *gParameters[#NameV]; #define FortAssignS(NameV,Struc) if (gParametersS.find(#NameV) != gParametersS.end()) strcpy(Struc.NameV,gParametersS[#NameV].c_str()); -#define FortAssignI(NameV,Struc) if (gParametersI.find(#NameV) != gParametersI.end()) Struc.NameV = gParametersI[#NameV]; +#define FortAssignI(NameV,Struc) if (gParametersI.find(#NameV) != gParametersI.end()) Struc.NameV = gParametersI[#NameV]; // CKM: FortAssignD(Vud,ckm_matrix_) @@ -355,7 +355,7 @@ void expandIncludes(YAML::Node&node,unsigned int recursionLimit=256){ // OZ 26.04.18 some lines above do not do what expected because it is gf, convFac and alphaem in parameters.yaml, not Gf, ConvFac and Alphaem // as a result CC DIS cross section and integrated NC and CC cross sections are always zero with old interface - // temporary fix: set these parameters manually + // temporary fix: set these parameters manually // (maybe some other parameters are not assigned as well) if(gParameters.find("alphaem") != gParameters.end()) ew_couplings_.Alphaem = *gParameters["alphaem"]; @@ -581,7 +581,7 @@ double getparamd_(const char* name,int len){ char buff[128]; memcpy( buff, &name[0], len); buff[len] = '\0'; - std::string key(buff); + std::string key(buff); if (XFITTER_PARS::gParameters.find(key) != XFITTER_PARS::gParameters.end()) { return *XFITTER_PARS::gParameters[key]; } diff --git a/tools/AddReaction.py b/tools/AddReaction.py index 77480f1a90181c8f09f81a3327bb0a8069040e6c..1e4a34cec8a274f6f959732d4e1f1e18ede1534a 100644 --- a/tools/AddReaction.py +++ b/tools/AddReaction.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python ''' Script to generate templates for a new theory module ''' @@ -7,7 +7,7 @@ import os import datetime if len(sys.argv)<2: - print ''' + print ''' Usage: AddReaction.py NAME ''' exit(0) @@ -25,6 +25,7 @@ with open("Reactions.txt","r+") as f: # Not present, add new line to the Reactions.txt file +print "Update Reactions.txt file" with open("Reactions.txt","a") as f: f.write(name+" "+"lib"+name.lower()+"_xfitter.so\n") @@ -35,7 +36,6 @@ print "Creating directories in reactions/"+name os.system("mkdir -p reactions/"+name+"/include") os.system("mkdir -p reactions/"+name+"/src") os.system("mkdir -p reactions/"+name+"/yaml") -os.system("touch reactions/"+name+"/yaml/parameters.yaml") print "Creating header file reactions/"+name+"/include/Reaction"+name+".h" @@ -50,7 +50,7 @@ with open("reactions/"+name+"/include/Reaction"+name+".h","w+") as f: /** @class' Reaction'''+name+''' - @brief A wrapper class for '''+name+''' reaction + @brief A wrapper class for '''+name+''' reaction Based on the ReactionTheory class. Reads options produces 3d cross section. @@ -69,7 +69,7 @@ class Reaction'''+name+''' : public ReactionTheory public: virtual string getReactionName() const { return "'''+name+ '''" ;}; - int initAtStart(const string &); + int initAtStart(const string &); virtual int compute(int dataSetID, valarray<double> &val, map<string, valarray<double> > &err); protected: virtual int parseOptions(){ return 0;}; @@ -79,7 +79,7 @@ class Reaction'''+name+''' : public ReactionTheory print "Creating source file reactions/"+name+"/src/Reaction"+name+".cc" with open("reactions/"+name+"/src/Reaction"+name+".cc","w+") as f: - f.write(''' + f.write(''' /* @file Reaction'''+name+'''.cc @date ''' + datetime.date.today().isoformat() + ''' @@ -115,12 +115,12 @@ with open("reactions/"+name+"/src/Makefile.am","w+") as f: f.write(''' # Created by AddReaction.py on ''' + datetime.date.today().isoformat() + ''' -AM_CXXFLAGS = -I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../../interfaces/include -Wall -fPIC -Wno-deprecated +AM_CXXFLAGS = -I$(srcdir)/../include -I$(srcdir)/../../../include -I$(srcdir)/../../../interfaces/include -Wall -fPIC -Wno-deprecated lib_LTLIBRARIES = lib'''+ name.lower() + '''_xfitter.la lib'''+ name.lower()+'''_xfitter_la_SOURCES = Reaction'''+name+'''.cc -# lib'''+ name.lower()+'''_xfitter_la_LDFLAGS = place_if_needed +# lib'''+ name.lower()+'''_xfitter_la_LDFLAGS = place_if_needed datadir = ${prefix}/yaml/reactions/'''+name+''' data_DATA = ../yaml/parameters.yaml @@ -129,6 +129,10 @@ dist_noinst_HEADERS = ../include ../yaml ''') +pFile="reactions/"+name+"/yaml/parameters.yaml" +print "Creating (empty) parameter file "+pFile +os.system("touch "+pFile) + print "Update configure.ac file" os.system("sed 's|xfitter-config|xfitter-config\\n reactions/" +name +"/src/Makefile|' configure.ac >/tmp/configure.ac") os.system("cp /tmp/configure.ac configure.ac") diff --git a/tools/draw/include/CommandParser.h b/tools/draw/include/CommandParser.h index c149b77a5e74b8df43859d9ee41736f661f50923..abbe7de702f72abaef9b4ca8f210dba6322453eb 100644 --- a/tools/draw/include/CommandParser.h +++ b/tools/draw/include/CommandParser.h @@ -46,7 +46,7 @@ class CommandParser bool diff; bool noupband; int errbandcol; - + //shifts options int spp, shgth; bool adjshift; @@ -67,10 +67,14 @@ class CommandParser map <string, int> styles; map <string, int> lstyles; map <string, int> markers; - int col[6]; - int styl[6]; - int lstyl[6]; - int mark[6]; + //int col[6]; + //int styl[6]; + //int lstyl[6]; + //int mark[6]; + int col[12]; + int styl[12]; + int lstyl[12]; + int mark[12]; float lwidth; float resolution, pagewidth; bool nodata; @@ -84,7 +88,7 @@ class CommandParser bool profile, reweight, BAYweight, GKweight; bool bw; bool looseRepSelection; - + private: vector <string> allargs; void help(void) @@ -141,7 +145,7 @@ private: cout << "\t \t Number of rows and columns of PDF and data plots per page, default value is 2" << endl; cout << "\t --loose-mc-replica-selection" <<endl; cout << "\t \t Do not check for fit convergence for MC replica " <<endl; - + cout << "options for pdf plots:" << endl; cout << "\t --no-pdfs" << endl; @@ -150,7 +154,7 @@ private: cout << "\t \t Draw PDF uncertainty bands" << endl; cout << "\t --profile" << endl; cout << "\t \t Draw Profiled PDF (only for Hessian sets)" << endl; - cout << "\t \t To set this option only for one directory, use the syntax profiled:directory[:label]" << endl; + cout << "\t \t To set this option only for one directory, use the syntax profile:directory[:label]" << endl; cout << "\t Example: xfitter-draw profile:output:\"profiled\" output:\"not-profiled\"" << endl; cout << "\t --reweight(-BAY/-GK)" << endl; cout << "\t \t Draw Reweighted PDF (only for MC replica sets)" << endl; @@ -264,14 +268,14 @@ private: cout << "\t PlotDefColumn = 'y2' ! Variable used to divide the data in SubPlots" << endl; cout << "\t PlotDefValue = 0., 5.! Ranges of PlotDefColumn used to divide the data in SubPlots" << endl; cout << "\t PlotVarColumn = 'x'! Variable providing bin center information (use only if bin edges are missing)" << endl; - cout << "\t PlotOptions(1) = 'Experiment:ATLAS@Title: pp #rightarrow Z@XTitle: y_{Z} @YTitle: d#sigma/dy_{Z} [pb] @ExtraLabel:#int L = 100 fb^{-1}@Xmin:0.0@Xmax:2.5@Xlog@Ylog@YminR:0.91@YmaxR:1.09'" + cout << "\t PlotOptions(1) = 'Experiment:ATLAS@Title: pp #rightarrow Z@XTitle: y_{Z} @YTitle: d#sigma/dy_{Z} [pb] @ExtraLabel:#int L = 100 fb^{-1}@Xmin:0.0@Xmax:2.5@Xlog@Ylog@YminR:0.91@YmaxR:1.09'" << endl; cout << "\t &End" << endl; cout << endl; cout << "Option for 3-band PDF uncertainty bands (HERAPDF style) in PDF plots." << endl; cout << "\t --bands 3bands:<dir-full-uncert>" << endl; cout << "\t \t draw PDFs with three uncertainty bands: experimental (red), model (yellow) parametrisation (green)." << endl; - cout << "\t The model uncertainty originates from variation of model parameters (e.g. masses of heavy quarks, Q^2 cuts on data, etc.)," << endl; + cout << "\t The model uncertainty originates from variation of model parameters (e.g. masses of heavy quarks, Q^2 cuts on data, etc.)," << endl; cout << "\t parametrisation - from variations of the parameters in the fit and variation of the starting scale Q_0^2."<< endl; cout << " \t Directory <dir-full-uncert> should have fit results for experimental, model and parametrisation variations. " << endl; cout << " \t The file names for experimental variations should follow convention as follows: " << endl; @@ -284,7 +288,7 @@ private: cout << " \t Finally, p14s stands for parametrisation uncertainty and the number should start from N+K+1 (here assuming that K=3 for model errors)." << endl; cout << " \t NOTE: if command '--bands <dir-full-uncert>' is used, the total uncertainty in red is drawn." << endl; cout << endl; - }; + }; }; extern CommandParser opts; diff --git a/tools/draw/include/PdfData.h b/tools/draw/include/PdfData.h index 5df80031e728d1f8539ab6caefd6b1a281086fed..289da36a594e247625502dd1958dcdcce816ed15 100644 --- a/tools/draw/include/PdfData.h +++ b/tools/draw/include/PdfData.h @@ -10,7 +10,8 @@ using namespace std; enum pdferr {None, AsymHess, SymHess, MC}; -enum pdftype{uv=0, dv, g, Sea, ubar, dbar, s, Rs, c, b, dbarminubar, uvmindv, U, D, Ubar, Dbar, goversea, doveru, dbaroverubar, dvoveruv, rs, photon, SeaOverGlue, photonOverGlue}; +//enum pdftype{uv=0, dv, g, Sea, ubar, dbar, s, Rs, c, b, dbarminubar, uvmindv, U, D, Ubar, Dbar, goversea, doveru, dbaroverubar, dvoveruv, rs, photon, SeaOverGlue, photonOverGlue}; +enum pdftype{uv=0, dv, g, Sea, ubar, dbar, s, sbar, Rs, soversbar, c, b, dbarminubar, uvmindv, U, D, Ubar, Dbar, goversea, doveru, dbaroverubar, dvoveruv, rs, photon, SeaOverGlue, photonOverGlue, uvplusdv, uvplusdvplusSea}; extern vector <pdftype> pdfs; extern vector <string> pdflabels; @@ -70,7 +71,7 @@ class Pdf vector <double> xpoints; }; -struct pdfshift +struct pdfshift { double val; double err; @@ -88,7 +89,7 @@ class PdfData //PdfData(const PdfData &Prior, string dirname, string label); void profile(string dirname, string label); //profile PDF uncertainty bands void pdfRotate(string dirname, string label); //rotate PDF - void pdfSet(string dirname, string label); // get single set + void pdfSet(string dirname, string label); // get single set pdferr err; //Type of PDF uncertainties bool model; //Model PDF uncertainties bool par; //Parametrisation PDF uncertainties diff --git a/tools/draw/src/CommandParser.cc b/tools/draw/src/CommandParser.cc index cf402c6c8cea3b04fcd3db695a61378523203710..c5f74f3c4b3a2e24cbbcd6f6ae3b3ee5ced2975e 100644 --- a/tools/draw/src/CommandParser.cc +++ b/tools/draw/src/CommandParser.cc @@ -78,12 +78,29 @@ CommandParser::CommandParser(int argc, char **argv): outdir("") { //initialise colors and styles - col[0] = kRed + 2; + /*col[0] = kRed + 2; col[1] = kBlue + 2; col[2] = kGreen + 3; col[3] = kOrange + 7; col[4] = kAzure + 1; - col[5] = kMagenta + 1; + col[5] = kMagenta + 1;*/ + + //static int cols[NCOLSXYZ] = {kBlack, kBlue, kRed, kMagenta, kGreen + 2, kYellow + 1, kAzure + 4, kSpring + 4, kOrange + 2, kRed - 7, kBlue - 9, kRed + 3, kViolet - 7}; + col[0] = kRed + 2; + col[1] = kBlue + 2; + //col[0] = kBlack; + //col[1] = kBlue; + //col[2] = kRed; + col[2] = kMagenta; + col[3] = kGreen + 2; + col[4] = kYellow + 1; + col[5] = kAzure + 4; + col[6] = kOrange + 2; + col[7] = kRed - 7; + col[8] = kBlue - 9; + col[9] = kRed + 3; + col[10] = kViolet - 7; + col[11] = kBlue - 2; styl[0] = 3354; styl[1] = 3345; @@ -98,6 +115,8 @@ CommandParser::CommandParser(int argc, char **argv): mark[3] = 32; mark[4] = 31; mark[5] = 27; + //for(int i = 6; i < 12; i++) + // mark[i] = 27; lstyl[0] = 1; lstyl[1] = 2; @@ -105,10 +124,12 @@ CommandParser::CommandParser(int argc, char **argv): lstyl[3] = 4; lstyl[4] = 5; lstyl[5] = 6; + //for(int i = 6; i < 12; i++) + // lstyl[i] = 6; // tight MC replica selection by default: looseRepSelection = false; - + //Set Hatches style gStyle->SetHatchesSpacing(2); gStyle->SetHatchesLineWidth(lwidth); @@ -465,7 +486,7 @@ CommandParser::CommandParser(int argc, char **argv): allargs.erase(it); it = allargs.begin(); } - + for (vector<string>::iterator it = allargs.begin() + 1; it != allargs.end(); it++) dirs.push_back(*it); @@ -478,7 +499,8 @@ CommandParser::CommandParser(int argc, char **argv): exit(-1); } - if (dirs.size() > 6) + //if (dirs.size() > 6) + if (dirs.size() > 12) { cout << endl; cout << "Maximum number of directories is 6" << endl; @@ -517,6 +539,6 @@ vector<string> Round(double value, double error, bool sign) result.push_back(Numb); sprintf (Numb, ((string)"%." + D + "f").c_str(), error); result.push_back(Numb); - + return result; } diff --git a/tools/draw/src/DataPainter.cc b/tools/draw/src/DataPainter.cc index dc73a1216d3af935346bd2900aeca7d753706f86..d2d0340a263ca7553b4f0fa7204e34be4c340d6c 100644 --- a/tools/draw/src/DataPainter.cc +++ b/tools/draw/src/DataPainter.cc @@ -543,7 +543,8 @@ TCanvas * DataPainter(int dataindex, int subplotindex) for (vector<range>::iterator r = thranges.begin(); r != thranges.end(); r++) { (*it).gettherr()->SetAxisRange((*r).lowedge, (*r).upedge); - (*it).Draw((TH1F*)(*it).gettherr()->Clone(), "E3L same"); + //(*it).Draw((TH1F*)(*it).gettherr()->Clone(), "E3L same"); + (*it).Draw((TH1F*)(*it).gettherr()->Clone(), "E3 same"); } (*it).gettherr()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); } @@ -892,7 +893,8 @@ TCanvas * DataPainter(int dataindex, int subplotindex) { (*it).getrtherr()->SetAxisRange((*r).lowedge, (*r).upedge); if (!opts.multitheory || (it - datahistos.begin() == 0)) //if in multitheory mode, plot only the first theory - (*it).Draw((TH1F*)(*it).getrtherr()->Clone(), "E3L same"); + //(*it).Draw((TH1F*)(*it).getrtherr()->Clone(), "E3L same"); + (*it).Draw((TH1F*)(*it).getrtherr()->Clone(), "E3 same"); } (*it).getrtherr()->GetXaxis()->SetRange((*it).getlowrange(), (*it).getuprange()); } diff --git a/tools/draw/src/PdfData.cc b/tools/draw/src/PdfData.cc index 034a3ebe5847cb2c94b0d423c686448d56980ecd..6592866d71749add73a8899d0f713742e3a8ef0a 100644 --- a/tools/draw/src/PdfData.cc +++ b/tools/draw/src/PdfData.cc @@ -20,13 +20,18 @@ #include "FileOpener.h" //pdf type -pdftype pdfts[] = {uv, dv, g, Sea, ubar, dbar, s, Rs, c, b, dbarminubar, uvmindv, U, D, Ubar, Dbar, goversea, doveru, dbaroverubar, dvoveruv,rs,photon,SeaOverGlue, photonOverGlue }; +//pdftype pdfts[] = {uv, dv, g, Sea, ubar, dbar, s, Rs, c, b, dbarminubar, uvmindv, U, D, Ubar, Dbar, goversea, doveru, dbaroverubar, dvoveruv,rs,photon,SeaOverGlue, photonOverGlue }; +pdftype pdfts[] = {uv, dv, g, Sea, ubar, dbar, s, sbar, Rs, soversbar, c, b, dbarminubar, uvmindv, U, D, Ubar, Dbar, goversea, doveru, dbaroverubar, dvoveruv,rs,photon,SeaOverGlue, photonOverGlue, uvplusdv, uvplusdvplusSea }; //pdf labels -string pdflab[] = {"u_{V}", "d_{V}", "g", "#Sigma", "#bar{u}", "#bar{d}", "s", "(s+#bar{s})/(#bar{u}+#bar{d})", "c", "b", "#bar{d}-#bar{u}", "d_{V}-u_{V}", "U", "D", "#bar{U}", "#bar{D}", "g/#Sigma", - "d/u", "#bar{d}/#bar{u}", "d_{V}/u_{V}","rs","#gamma","#Sigma/g","#gamma/g"}; +//string pdflab[] = {"u_{V}", "d_{V}", "g", "#Sigma", "#bar{u}", "#bar{d}", "s", "(s+#bar{s})/(#bar{u}+#bar{d})", "c", "b", "#bar{d}-#bar{u}", "d_{V}-u_{V}", "U", "D", "#bar{U}", "#bar{D}", "g/#Sigma", +// "d/u", "#bar{d}/#bar{u}", "d_{V}/u_{V}","rs","#gamma","#Sigma/g","#gamma/g"}; +string pdflab[] = {"u_{V}", "d_{V}", "g", "#Sigma", "#bar{u}", "#bar{d}", "(s+#bar{s})/2", "#bar{s}", "(s+#bar{s})/(#bar{u}+#bar{d})", "s/#bar{s}", "c", "b", "#bar{d}-#bar{u}", "d_{V}-u_{V}", "U", "D", "#bar{U}", "#bar{D}", "g/#Sigma", + "d/u", "#bar{d}/#bar{u}", "d_{V}/u_{V}","rs","#gamma","#Sigma/g","#gamma/g","u_{V}+d_{V}", "u_{V}+d_{V}+2#Sigma"}; //pdf filenames -string pdffil[] = {"uv", "dv", "g", "Sea", "ubar", "dbar", "s", "Rs", "c", "b", "dbar-ubar", "uv-dv", "U", "D", "UBar", "DBar", "goversea", "doveru", "dbaroverubar", "dvoveruv","rs","ph","sg","gg" - }; +//string pdffil[] = {"uv", "dv", "g", "Sea", "ubar", "dbar", "s", "Rs", "c", "b", "dbar-ubar", "uv-dv", "U", "D", "UBar", "DBar", "goversea", "doveru", "dbaroverubar", "dvoveruv","rs","ph","sg","gg" +// }; +string pdffil[] = {"uv", "dv", "g", "Sea", "ubar", "dbar", "s", "sbar", "Rs", "soversbar", "c", "b", "dbar-ubar", "uv-dv", "U", "D", "UBar", "DBar", "goversea", "doveru", "dbaroverubar", "dvoveruv","rs","ph","sg","gg","uv+dv","uv+dv+2Sea" + }; vector <pdftype> pdfs(pdfts, pdfts + sizeof(pdfts) / sizeof(pdftype)); vector <string> pdflabels(pdflab, pdflab + sizeof(pdflab) / sizeof(string)); @@ -60,6 +65,7 @@ Pdf::Pdf(string filename) : Q2value(0), NxValues(0), NPdfs(0), Xmin(0), Xmax(0) else if (var == "u_sea") ipdf = ubar; else if (var == "d_sea") ipdf = dbar; else if (var == "str") ipdf = s; + else if (var == "strbar") ipdf = sbar; else if (var == "chm") ipdf = c; else if (var == "bot") ipdf = b; else if (var == "ph") ipdf = photon; @@ -85,11 +91,29 @@ Pdf::Pdf(string filename) : Q2value(0), NxValues(0), NPdfs(0), Xmin(0), Xmax(0) } } + // for backward compatibility: if no strbar, set strbar=-999 + if(std::find(PdfTypes.begin(), PdfTypes.end(), sbar) == PdfTypes.end()) + { + PdfTypes.push_back(sbar); + for (int ix = 0; ix < NxValues; ix++) + { + tablemap[sbar].push_back(-999); + } + } + + //custom pdf types PdfTypes.push_back(dbarminubar); NPdfs++; for (int ix = 0; ix < NxValues; ix++) tablemap[dbarminubar].push_back(tablemap[dbar][ix] - tablemap[ubar][ix]); + PdfTypes.push_back(soversbar); NPdfs++; + for (int ix = 0; ix < NxValues; ix++) + if (tablemap[sbar][ix] != -999) + tablemap[soversbar].push_back(2*tablemap[s][ix]/tablemap[sbar][ix]-1); + else + tablemap[soversbar].push_back(-999); + PdfTypes.push_back(Rs); NPdfs++; for (int ix = 0; ix < NxValues; ix++) if (tablemap[dbar][ix] != 0) @@ -154,6 +178,27 @@ Pdf::Pdf(string filename) : Q2value(0), NxValues(0), NPdfs(0), Xmin(0), Xmax(0) else tablemap[photonOverGlue].push_back(0); + PdfTypes.push_back(uvplusdv); NPdfs++; + for (int ix = 0; ix < NxValues; ix++) + if (tablemap[g][ix] != 0) + tablemap[uvplusdv].push_back(tablemap[dv][ix]+tablemap[uv][ix]); + else + tablemap[uvplusdv].push_back(0); + + PdfTypes.push_back(uvplusdvplusSea); NPdfs++; + for (int ix = 0; ix < NxValues; ix++) + if (tablemap[g][ix] != 0) + tablemap[uvplusdvplusSea].push_back(tablemap[dv][ix]+tablemap[uv][ix]+2.0*tablemap[Sea][ix]); + else + tablemap[uvplusdvplusSea].push_back(0); + + /*PdfTypes.push_back(uvplusdvminSea); NPdfs++; + for (int ix = 0; ix < NxValues; ix++) + if (tablemap[g][ix] != 0) + tablemap[uvplusdvminSea].push_back(tablemap[dv][ix]+tablemap[uv][ix]-2.0*tablemap[Sea][ix]); + else + tablemap[uvplusdvminSea].push_back(0);*/ + } @@ -268,10 +313,10 @@ PdfData::PdfData(string dirname, string label) : model(false), par(false) Central[temppdf.GetQ2()] = temppdf; - //Get Pdf errors if requested - if (!opts.dobands && !outdirs[label].IsProfiled() && !outdirs[label].IsRotated() && !outdirs[label].IsReweighted() && !outdirs[label].IsSingleSet()) - continue; - + //Get Pdf errors if requested + if (!opts.dobands && !outdirs[label].IsProfiled() && !outdirs[label].IsRotated() && !outdirs[label].IsReweighted() && !outdirs[label].IsSingleSet()) + continue; + //Load PDF error sets int iband = 1; if (err == MC) @@ -386,12 +431,12 @@ PdfData::PdfData(string dirname, string label) : model(false), par(false) getline (mcwfile,line); getline (mcwfile, line); getline (mcwfile, line); - line.erase(line.begin(),line.begin()+8); + line.erase(line.begin(),line.begin()+8); ndata=atoi( line.c_str() ); getline (mcwfile, line); while (mcwfile >> n >> chi2 >> w) { - mcchi2.push_back(chi2); - mcw.push_back(w); + mcchi2.push_back(chi2); + mcw.push_back(w); } } //Remake central PDF @@ -404,11 +449,11 @@ PdfData::PdfData(string dirname, string label) : model(false), par(false) { vector <double> xi; for (vector <Pdf>::iterator eit = Errors[q2].begin(); eit != Errors[q2].end(); eit++) - xi.push_back((*eit).GetTable(*pit)[ix]); + xi.push_back((*eit).GetTable(*pit)[ix]); double val; - if (outdirs[label].IsReweighted()) + if (outdirs[label].IsReweighted()) val = mean(xi, mcw); - else if (outdirs[label].IsMedian()) + else if (outdirs[label].IsMedian()) val = median(xi); else val = mean(xi); @@ -449,14 +494,14 @@ PdfData::PdfData(string dirname, string label) : model(false), par(false) for (vector <Pdf>::iterator eit = Errors[q2].begin(); eit != Errors[q2].end(); eit++) xi.push_back((*eit).GetTable(*pit)[ix]); - if (outdirs[label].IsReweighted()) + if (outdirs[label].IsReweighted()) val = mean(xi, mcw); else if (outdirs[label].IsMedian()) val = median(xi); else val = mean(xi); - if (outdirs[label].IsReweighted()) + if (outdirs[label].IsReweighted()) eminus = eplus = rms(xi, mcw); else if (outdirs[label].Is68cl()) { @@ -482,30 +527,30 @@ PdfData::PdfData(string dirname, string label) : model(false), par(false) for (vector <Pdf>::iterator eit = Errors[q2].begin(); eit != Errors[q2].end(); eit++) xi.push_back((*eit).GetTable(*pit)[ix]); - if (!outdirs[label].IsAsym()) //symmetrise errors - eplus = eminus = ahessdelta(xi); - else //asymmetric errors - ahessdeltaasym(xi, eplus, eminus); - if (outdirs[label].Scale68()) - { - eplus = eplus/1.645; - eminus = eminus/1.645; - } - } + if (!outdirs[label].IsAsym()) //symmetrise errors + eplus = eminus = ahessdelta(xi); + else //asymmetric errors + ahessdeltaasym(xi, eplus, eminus); + if (outdirs[label].Scale68()) + { + eplus = eplus/1.645; + eminus = eminus/1.645; + } + } else if (err == SymHess) { vector <double> xi; xi.push_back(val); for (vector <Pdf>::iterator eit = Errors[q2].begin(); eit != Errors[q2].end(); eit++) - xi.push_back((*eit).GetTable(*pit)[ix]); + xi.push_back((*eit).GetTable(*pit)[ix]); - eplus = eminus = shessdelta(xi); - if (outdirs[label].Scale68()) - { - eplus = eplus/1.645; - eminus = eminus/1.645; - } - } + eplus = eminus = shessdelta(xi); + if (outdirs[label].Scale68()) + { + eplus = eplus/1.645; + eminus = eminus/1.645; + } + } UpExp[q2].SetPoint(*pit, ix, val+eplus); DownExp[q2].SetPoint(*pit, ix, val-eminus); @@ -521,7 +566,7 @@ PdfData::PdfData(string dirname, string label) : model(false), par(false) for (vector <Pdf>::iterator eit = ModelErrors[q2].begin(); eit != ModelErrors[q2].end(); eit++) xi.push_back((*eit).GetTable(*pit)[ix]); - + double modeplus, modeminus; if (!outdirs[label].IsAsym()) //symmetrise errors modeplus = modeminus = ahessdelta(xi); @@ -568,7 +613,7 @@ PdfData::PdfData(string dirname, string label) : model(false), par(false) profile(dirname, label); if (outdirs[label].IsRotated() ) pdfRotate(dirname, label); - + if (outdirs[label].IsSingleSet() ) pdfSet(dirname,label); @@ -576,7 +621,7 @@ PdfData::PdfData(string dirname, string label) : model(false), par(false) void PdfData::pdfRotate(string dirname, string label) -{ +{ // Extra rotations from rot.dat file string fname = dirname + "/pdf_rotation.dat"; ifstream f(fname.c_str()); @@ -594,18 +639,18 @@ void PdfData::pdfRotate(string dirname, string label) int N; iss >> N; int idx1 = 0; - while ( getline (f,line) ) + while ( getline (f,line) ) { - vector <double> aline; - istringstream iss(line); - int idx2; - iss >> idx2; - for ( int i = 0; i<N; i++) { - double val; - iss >> val; - aline.push_back(val); - } - rotation.push_back(aline); + vector <double> aline; + istringstream iss(line); + int idx2; + iss >> idx2; + for ( int i = 0; i<N; i++) { + double val; + iss >> val; + aline.push_back(val); + } + rotation.push_back(aline); } f.close(); @@ -617,47 +662,47 @@ void PdfData::pdfRotate(string dirname, string label) for ( map<float, Pdf>::iterator pdfit = Central.begin(); pdfit != Central.end(); pdfit++) { float q2 = pdfit->first; Pdf Cent = pdfit->second; - + // loop over pdf types for (vector <pdftype>::iterator pit = pdfs.begin(); pit != pdfs.end(); pit++) { //Loop on x points for (int ix = 0; ix < Cent.GetNx(); ix++) - { - double val = Cent.GetTable(*pit)[ix]; - double corsum = 0; - double eminus = 0; // also errors - double eplus = 0; - - // For now CT10 only: - for ( int id=0; id<N; id++) { - Pdf Up = Errors[q2].at(2*(id)); - Pdf Dn = Errors[q2].at(2*(id)+1); - double plus = Up.GetTable(*pit)[ix] - val; - double minus = Dn.GetTable(*pit)[ix] - val; - - - corsum += 0.5*(plus-minus)*rotation[iRotation][id]; - - // corsum += 0.5*(plus-minus)*rotation[iRotation][id]; - } - - Cent.SetPoint(*pit, ix, val+corsum); - Cent.SetErrUp(*pit, ix, eplus); - Cent.SetErrDn(*pit, ix, eminus); - - - Up[q2].SetPoint(*pit, ix, val+corsum+eplus); - Down[q2].SetPoint(*pit, ix, val+corsum-eminus); - } + { + double val = Cent.GetTable(*pit)[ix]; + double corsum = 0; + double eminus = 0; // also errors + double eplus = 0; + + // For now CT10 only: + for ( int id=0; id<N; id++) { + Pdf Up = Errors[q2].at(2*(id)); + Pdf Dn = Errors[q2].at(2*(id)+1); + double plus = Up.GetTable(*pit)[ix] - val; + double minus = Dn.GetTable(*pit)[ix] - val; + + + corsum += 0.5*(plus-minus)*rotation[iRotation][id]; + + // corsum += 0.5*(plus-minus)*rotation[iRotation][id]; + } + + Cent.SetPoint(*pit, ix, val+corsum); + Cent.SetErrUp(*pit, ix, eplus); + Cent.SetErrDn(*pit, ix, eminus); + + + Up[q2].SetPoint(*pit, ix, val+corsum+eplus); + Down[q2].SetPoint(*pit, ix, val+corsum-eminus); + } } pdfit->second = Cent; } - + } void PdfData::pdfSet(string dirname, string label) -{ +{ int id = outdirs[label].pdfSet()-1; @@ -665,28 +710,28 @@ void PdfData::pdfSet(string dirname, string label) float q2 = pdfit->first; Pdf Cent = pdfit->second; Pdf Pset = Errors[q2].at(id); - + double eminus = 0; // also errors - double eplus = 0; + double eplus = 0; // loop over pdf types for (vector <pdftype>::iterator pit = pdfs.begin(); pit != pdfs.end(); pit++) { //Loop on x points for (int ix = 0; ix < Cent.GetNx(); ix++) - { - double val = Pset.GetTable(*pit)[ix]; - - Cent.SetPoint(*pit, ix, val); - Cent.SetErrUp(*pit, ix, eplus); - Cent.SetErrDn(*pit, ix, eminus); - - Up[q2].SetPoint(*pit, ix, val+eplus); - Down[q2].SetPoint(*pit, ix, val-eminus); - } + { + double val = Pset.GetTable(*pit)[ix]; + + Cent.SetPoint(*pit, ix, val); + Cent.SetErrUp(*pit, ix, eplus); + Cent.SetErrDn(*pit, ix, eminus); + + Up[q2].SetPoint(*pit, ix, val+eplus); + Down[q2].SetPoint(*pit, ix, val-eminus); + } } pdfit->second = Cent; } - + } void PdfData::profile(string dirname, string label) @@ -701,7 +746,7 @@ void PdfData::profile(string dirname, string label) // cout << "File " << fname << " is empty (or io error)" << endl; // return; // } - + InFileOpener_t fo; fo.Add(dirname + "/Results.txt"); fo.Add(dirname + "/Results_0.txt"); @@ -715,7 +760,7 @@ void PdfData::profile(string dirname, string label) { getline(f, line); istringstream iss(line); - iss >> buffer; + iss >> buffer; } string systlabel, dummy; float systindex, value, error; @@ -723,7 +768,7 @@ void PdfData::profile(string dirname, string label) while (getline(f, line)) { istringstream iss(line); - iss >> systindex >> systlabel >> value >> dummy >> error; + iss >> systindex >> systlabel >> value >> dummy >> error; if ( systlabel.find("PDF_nuisance_param") == 0 ) { ++counter; pdfshift shift; @@ -747,7 +792,7 @@ void PdfData::profile(string dirname, string label) int N; iss >> N; int idx1 = 0; - while ( getline (ff,line) ) + while ( getline (ff,line) ) { vector <double> aline; istringstream iss(line); @@ -766,7 +811,7 @@ void PdfData::profile(string dirname, string label) for ( map<float, Pdf>::iterator pdfit = Central.begin(); pdfit != Central.end(); pdfit++) { float q2 = pdfit->first; Pdf Cent = pdfit->second; - + // loop over pdf types for (vector <pdftype>::iterator pit = pdfs.begin(); pit != pdfs.end(); pit++) { @@ -778,11 +823,11 @@ void PdfData::profile(string dirname, string label) double t2 = Down[q2].GetTable(*pit)[ix]; double corsum = 0; double eminus = 0; // also errors - double eplus = 0; + double eplus = 0; vector <double> xi; xi.push_back(val); - for ( vector<pdfshift>::iterator shift = pdfshifts.begin(); shift != pdfshifts.end(); shift++) { + for ( vector<pdfshift>::iterator shift = pdfshifts.begin(); shift != pdfshifts.end(); shift++) { int id = shift->id; double valShift = shift->val; @@ -795,10 +840,10 @@ void PdfData::profile(string dirname, string label) double plus = Up.GetTable(*pit)[ix] - val; double minus = Dn.GetTable(*pit)[ix] - val; - + double cor = 0.5*(plus - minus)*valShift + 0.5*(plus+minus)*valShift*valShift; - + xi.push_back(plus*errShift+val); xi.push_back(minus*errShift+val); @@ -807,11 +852,11 @@ void PdfData::profile(string dirname, string label) else if (err == SymHess) { Pdf Up = Errors[q2].at(id-1); double plus = Up.GetTable(*pit)[ix] - val; - double cor = plus*valShift; + double cor = plus*valShift; xi.push_back(plus*errShift+val); corsum += cor; - } + } } if ( err == AsymHess ) { @@ -819,17 +864,17 @@ void PdfData::profile(string dirname, string label) if (!outdirs[label].IsAsym()) //symmetrise errors eplus = eminus = ahessdelta(xi, cor_matrix); else //asymmetric errors - ahessdeltaasym(xi, eplus, eminus, cor_matrix); + ahessdeltaasym(xi, eplus, eminus, cor_matrix); } else if (err == SymHess) { eplus = eminus = shessdelta(xi, cor_matrix ); - } - if (outdirs[label].Scale68()) - { - eplus = eplus/1.645; - eminus = eminus/1.645; - } + } + if (outdirs[label].Scale68()) + { + eplus = eplus/1.645; + eminus = eminus/1.645; + } Cent.SetPoint(*pit, ix, val+corsum); Cent.SetErrUp(*pit, ix, eplus); Cent.SetErrDn(*pit, ix, eminus); diff --git a/tools/install-xfitter b/tools/install-xfitter index 6e92b54ca16d570f8612487ef39af9c503ab71ae..8571e26ef66ed2ee8c1e34a49029ba5b784a2ffb 100755 --- a/tools/install-xfitter +++ b/tools/install-xfitter @@ -5,7 +5,7 @@ ## Programs versions lhapdfver=6.2.1 hoppetver=1.2.0 -applgridver=1.4.70 +applgridver=1.5.9 qcdnumver=17-01-14 apfelver=3.0.0 melaver=2.0.1 @@ -66,14 +66,14 @@ if [[ $mode != "deps" ]] version=$mode else if [[ ! -e version ]] - then - echo - echo "could not find file \"version\"" - echo "cannot determine current version" - echo "run first:" - echo "$0 <version>" - echo - exit + then + echo + echo "could not find file \"version\"" + echo "cannot determine current version" + echo "run first:" + echo "$0 <version>" + echo + exit fi version=`cat version` echo "reinstalling dependencies for xFitter version $version" @@ -91,31 +91,31 @@ if [[ $version != "master" ]] exist=0 for ver in `` do - if [[ $version == $ver ]] - then - exist=1 - fi + if [[ $version == $ver ]] + then + exist=1 + fi done vers=`git ls-remote --tags https://gitlab.cern.ch/fitters/xfitter.git | sed 's|/| |g; s|\^| |' | awk '{print $4}' | uniq` - + for ver in $vers do - if [[ $version == $ver ]] - then - exist=1 - fi + if [[ $version == $ver ]] + then + exist=1 + fi done if [[ $exist == 0 ]] then - echo - echo "version $version not found, available versions:" - echo "" - echo "$vers" - echo "master" - echo - exit + echo + echo "version $version not found, available versions:" + echo "" + echo "$vers" + echo "master" + echo + exit fi fi @@ -143,22 +143,22 @@ then which sw_vers >& /dev/null if [[ $? == 0 ]] then - echo "Detected Mac OS X system" - MODE=local + echo "Detected Mac OS X system" + MODE=local else - SYS=$(echo `lsb_release -i |cut -d: -f2`) - ver=$(echo `lsb_release -r |cut -d: -f2`) - if [[ $SYS == Scientific* && $ver == 6.* ]] - then - echo "Detected SL6 Linux distribution" - MODE=cern - gccv=4.9 - echo "Using gcc version = ${gccv}" - os=slc6 - arch=x86_64 - rootversion=5.34.36 - boostver=1.53.0 - pyth=2.7 + SYS=$(echo `lsb_release -i |cut -d: -f2`) + ver=$(echo `lsb_release -r |cut -d: -f2`) + if [[ $SYS == Scientific* && $ver == 6.* ]] + then + echo "Detected SL6 Linux distribution" + MODE=cern + gccv=4.9 + echo "Using gcc version = ${gccv}" + os=slc6 + arch=x86_64 + rootversion=5.34.36 + boostver=1.53.0 + pyth=2.7 elif [[ $SYS == CentOS* && $ver == 7.* ]] then echo "Detected CentOS7 Linux distribution" @@ -169,46 +169,46 @@ then rootversion=6.06.08 boostver=1.53.0 pyth=2.7 - elif [[ $SYS == Scientific* && $ver == 5.* ]] - then - echo "Detected SL5 Linux distribution" - MODE=cern - gccv=4.3 - os=slc5 - arch=x86_64 - rootversion=5.34.00 - boostver=1.48.0 - python=2.6.5 - pyth=2.6 - elif [[ $SYS == "Ubuntu" ]] - then - echo "Detected Ubuntu distribution" - MODE=local - else - echo "Sorry, I don't recognize your system:" - echo "$SYS $ver" - echo "I will assume you have root installed in your system," + elif [[ $SYS == Scientific* && $ver == 5.* ]] + then + echo "Detected SL5 Linux distribution" + MODE=cern + gccv=4.3 + os=slc5 + arch=x86_64 + rootversion=5.34.00 + boostver=1.48.0 + python=2.6.5 + pyth=2.6 + elif [[ $SYS == "Ubuntu" ]] + then + echo "Detected Ubuntu distribution" + MODE=local + else + echo "Sorry, I don't recognize your system:" + echo "$SYS $ver" + echo "I will assume you have root installed in your system," echo "gcc version >= 4.3, python, boost libraries, and wget" - echo "If this doesn't work, and you have /cvmfs/sft.cern.ch mounted" - echo "edit me (I am $0) and try to setup appropriate settings" - echo "in the section: manual configuration" - echo - MODE="local" - fi + echo "If this doesn't work, and you have /cvmfs/sft.cern.ch mounted" + echo "edit me (I am $0) and try to setup appropriate settings" + echo "in the section: manual configuration" + echo + MODE="local" + fi fi fi if [[ $MODE == "cern" ]] then # if [[ ! -e /afs/cern.ch ]] if [[ ! -e /cvmfs/sft.cern.ch ]] - then - echo - echo "/cvmfs/sft.cern.ch not mounted, forcing local MODE" - echo "Fasten you seat belt" - echo "I hope you have root, gcc >= 4.8, python and boost libraries" - echo "all installed in your system" - echo - MODE="local" + then + echo + echo "/cvmfs/sft.cern.ch not mounted, forcing local MODE" + echo "Fasten you seat belt" + echo "I hope you have root, gcc >= 4.8, python and boost libraries" + echo "all installed in your system" + echo + MODE="local" fi fi @@ -222,13 +222,13 @@ then if [[ $os == slc5 ]] then echo "LEGACY SL5 ! using afs" - PYTHONBIN=/afs/cern.ch/sw/lcg/external/Python/${python}/${arch}-${os}-${compiler}-opt/bin - PATH=$PYTHONBIN:$PATH - export BOOST=--with-boost=/afs/cern.ch/sw/lcg/external/Boost/${boostver}_python${pyth}/${arch}-${os}-${compiler}-opt + PYTHONBIN=/afs/cern.ch/sw/lcg/external/Python/${python}/${arch}-${os}-${compiler}-opt/bin + PATH=$PYTHONBIN:$PATH + export BOOST=--with-boost=/afs/cern.ch/sw/lcg/external/Boost/${boostver}_python${pyth}/${arch}-${os}-${compiler}-opt fi if [[ $os == slc6 ]] then - export BOOST=--with-boost=/cvmfs/sft.cern.ch/lcg/external/Boost/${boostver}_python${pyth}/${arch}-${os}-${compiler}-opt + export BOOST=--with-boost=/cvmfs/sft.cern.ch/lcg/external/Boost/${boostver}_python${pyth}/${arch}-${os}-${compiler}-opt fi fi @@ -255,13 +255,13 @@ else which curl >& /dev/null if [[ $? == 0 ]] then - http=curl + http=curl else - echo "Error, wget or curl not found" - exit + echo "Error, wget or curl not found" + exit fi fi - + #directory: CURRENTDIR=`pwd` @@ -278,7 +278,7 @@ installDeps=1 if [[ $installDeps == 0 ]] then echo "Skip installation of dependences" -else +else #Make all dependencies rm -rf deps >& /dev/null mkdir deps @@ -287,33 +287,33 @@ else echo "Installing LHAPDF $lhapdfver..." if (( `echo $lhapdfver |cut -d. -f1` >= 6 )) then - lhapdf="LHAPDF" - withboost=$BOOST + lhapdf="LHAPDF" + withboost=$BOOST else - lhapdf="lhapdf" + lhapdf="lhapdf" fi if [[ $http == "curl" ]] then -# curl https://www.hepforge.org/archive/lhapdf/${lhapdf}-${lhapdfver}.tar.gz > ${lhapdf}-${lhapdfver}.tar.gz 2>> $CURRENTDIR/install.log - curl https://lhapdf.hepforge.org/downloads/${lhapdf}-${lhapdfver}.tar.gz > ${lhapdf}-${lhapdfver}.tar.gz 2>> $CURRENTDIR/install.log +# curl https://www.hepforge.org/archive/lhapdf/${lhapdf}-${lhapdfver}.tar.gz > ${lhapdf}-${lhapdfver}.tar.gz 2>> $CURRENTDIR/install.log + curl https://lhapdf.hepforge.org/downloads/${lhapdf}-${lhapdfver}.tar.gz > ${lhapdf}-${lhapdfver}.tar.gz 2>> $CURRENTDIR/install.log else -# wget https://www.hepforge.org/archive/lhapdf/${lhapdf}-${lhapdfver}.tar.gz >> $CURRENTDIR/install.log 2>&1 - wget https://lhapdf.hepforge.org/downloads/${lhapdf}-${lhapdfver}.tar.gz >> $CURRENTDIR/install.log 2>&1 +# wget https://www.hepforge.org/archive/lhapdf/${lhapdf}-${lhapdfver}.tar.gz >> $CURRENTDIR/install.log 2>&1 + wget https://lhapdf.hepforge.org/downloads/${lhapdf}-${lhapdfver}.tar.gz >> $CURRENTDIR/install.log 2>&1 fi tar xfz ${lhapdf}-${lhapdfver}.tar.gz >> $CURRENTDIR/install.log 2>&1 - cd ${lhapdf}-${lhapdfver} + cd ${lhapdf}-${lhapdfver} ./configure --prefix=$CURRENTDIR/deps/lhapdf >> $CURRENTDIR/install.log 2>&1 if [[ $? != 0 ]] then - echo "Error, check install.log for details" - exit + echo "Error, check install.log for details" + exit fi make -j 9 install >> $CURRENTDIR/install.log 2>&1 if [[ $? != 0 ]] then - echo "Error, check install.log for details" - exit + echo "Error, check install.log for details" + exit fi cd - >& /dev/null @@ -321,23 +321,23 @@ else echo "Installing HOPPET $hoppetver..." if [[ $http == "curl" ]] then - curl http://hoppet.hepforge.org/downloads/hoppet-${hoppetver}.tgz > hoppet-${hoppetver}.tgz 2>> $CURRENTDIR/install.log + curl http://hoppet.hepforge.org/downloads/hoppet-${hoppetver}.tgz > hoppet-${hoppetver}.tgz 2>> $CURRENTDIR/install.log else - wget http://hoppet.hepforge.org/downloads/hoppet-${hoppetver}.tgz >> $CURRENTDIR/install.log 2>&1 + wget http://hoppet.hepforge.org/downloads/hoppet-${hoppetver}.tgz >> $CURRENTDIR/install.log 2>&1 fi tar xfz hoppet-${hoppetver}.tgz >> $CURRENTDIR/install.log 2>&1 cd hoppet-${hoppetver} ./configure --prefix=$CURRENTDIR/deps/hoppet >> $CURRENTDIR/install.log 2>&1 if [[ $? != 0 ]] then - echo "Error, check install.log for details" - exit + echo "Error, check install.log for details" + exit fi make -j 9 install >> $CURRENTDIR/install.log 2>&1 if [[ $? != 0 ]] then - echo "Error, check install.log for details" -# exit + echo "Error, check install.log for details" +# exit fi cd - >& /dev/null @@ -353,31 +353,31 @@ else echo "Installing APPLGRID $applgridver..." if [[ $http == "curl" ]] then -# curl https://www.hepforge.org/archive/applgrid/applgrid-$applgridver.tgz > applgrid-$applgridver.tgz 2>> $CURRENTDIR/install.log - curl https://applgrid.hepforge.org/downloads/applgrid-$applgridver.tgz > applgrid-$applgridver.tgz 2>> $CURRENTDIR/install.log +# curl https://www.hepforge.org/archive/applgrid/applgrid-$applgridver.tgz > applgrid-$applgridver.tgz 2>> $CURRENTDIR/install.log + curl https://applgrid.hepforge.org/downloads/applgrid-$applgridver.tgz > applgrid-$applgridver.tgz 2>> $CURRENTDIR/install.log else -# wget https://www.hepforge.org/archive/applgrid/applgrid-$applgridver.tgz >> $CURRENTDIR/install.log 2>&1 - wget https://applgrid.hepforge.org/downloads/applgrid-$applgridver.tgz >> $CURRENTDIR/install.log 2>&1 +# wget https://www.hepforge.org/archive/applgrid/applgrid-$applgridver.tgz >> $CURRENTDIR/install.log 2>&1 + wget https://applgrid.hepforge.org/downloads/applgrid-$applgridver.tgz >> $CURRENTDIR/install.log 2>&1 fi tar xfz applgrid-$applgridver.tgz >> $CURRENTDIR/install.log 2>&1 cd applgrid-$applgridver ./configure --prefix=$CURRENTDIR/deps/applgrid >> $CURRENTDIR/install.log 2>&1 if [[ $? != 0 ]] then - echo "Error, check install.log for details" - exit + echo "Error, check install.log for details" + exit fi make >> $CURRENTDIR/install.log 2>&1 if [[ $? != 0 ]] then - echo "Error, check install.log for details" - exit + echo "Error, check install.log for details" + exit fi make -j 9 install >> $CURRENTDIR/install.log 2>&1 if [[ $? != 0 ]] then - echo "Error, check install.log for details" - exit + echo "Error, check install.log for details" + exit fi cd - >& /dev/null @@ -387,9 +387,9 @@ else echo "Installing APFEL $apfelver..." if [[ $http == "curl" ]] then - curl https://github.com/scarrazza/apfel/archive/${apfelver}.tar.gz > ${apfelver}.tar.gz 2 >> $CURRENTDIR/install.log + curl https://github.com/scarrazza/apfel/archive/${apfelver}.tar.gz > ${apfelver}.tar.gz 2 >> $CURRENTDIR/install.log else - wget https://github.com/scarrazza/apfel/archive/${apfelver}.tar.gz >> $CURRENTDIR/install.log 2>&1 + wget https://github.com/scarrazza/apfel/archive/${apfelver}.tar.gz >> $CURRENTDIR/install.log 2>&1 fi mv ${apfelver}.tar.gz apfel-${apfelver}.tar.gz tar xfvz apfel-${apfelver}.tar.gz >> $CURRENTDIR/install.log 2>&1 @@ -398,31 +398,39 @@ else if [[ $? != 0 ]] then - echo "Error, check install.log for details" - exit + echo "Error, check install.log for details" + exit fi make -j 9 install >> $CURRENTDIR/install.log 2>&1 if [[ $? != 0 ]] then - echo "Error, check install.log for details" - exit + echo "Error, check install.log for details" + exit fi cd - >& /dev/null # setup paths for apfel: export PATH=$CURRENTDIR/deps/apfel/bin/:$PATH #apfelgrid - lhapdf get NNPDF30_nlo_as_0118 - echo "Installing APFELgrid $apfelgridver..." + if [ -d /cvmfs ] + then + lhapdf get NNPDF30_nlo_as_0118 >> $CURRENTDIR/install.log 2>&1 + else + wget http://www.hepforge.org/archive/lhapdf/pdfsets/6.2/NNPDF30_nlo_as_0118.tar.gz >> $CURRENTDIR/install.log 2>&1 + tar xvzpf NNPDF30_nlo_as_0118.tar.gz >> $CURRENTDIR/install.log 2>&1 + mv NNPDF30_nlo_as_0118 `lhapdf-config --datadir` >> $CURRENTDIR/install.log 2>&1 + rm NNPDF30_nlo_as_0118.tar.gz >> $CURRENTDIR/install.log 2>&1 + fi + echo "Installing APFELgrid $apfelgridver..." # tmp solution is to use fork @zenaiev apfelgridver=1.0.5 if [[ $http == "curl" ]] then -# curl https://github.com/nhartland/APFELgrid/archive/v${apfelgridver}.tar.gz > v${apfelgridver}.tar.gz 2 >> $CURRENTDIR/install.log - curl https://github.com/zenaiev/APFELgrid/archive/v${apfelgridver}.tar.gz > v${apfelgridver}.tar.gz 2 >> $CURRENTDIR/install.log +# curl https://github.com/nhartland/APFELgrid/archive/v${apfelgridver}.tar.gz > v${apfelgridver}.tar.gz 2 >> $CURRENTDIR/install.log + curl https://github.com/zenaiev/APFELgrid/archive/v${apfelgridver}.tar.gz > v${apfelgridver}.tar.gz 2 >> $CURRENTDIR/install.log else -# wget https://github.com/nhartland/APFELgrid/archive/v${apfelgridver}.tar.gz >> $CURRENTDIR/install.log 2>&1 - wget https://github.com/zenaiev/APFELgrid/archive/v${apfelgridver}.tar.gz >> $CURRENTDIR/install.log 2>&1 +# wget https://github.com/nhartland/APFELgrid/archive/v${apfelgridver}.tar.gz >> $CURRENTDIR/install.log 2>&1 + wget https://github.com/zenaiev/APFELgrid/archive/v${apfelgridver}.tar.gz >> $CURRENTDIR/install.log 2>&1 fi mv v${apfelgridver}.tar.gz APFELgrid-${apfelgridver}.tar.gz tar xfvz APFELgrid-${apfelgridver}.tar.gz >> $CURRENTDIR/install.log 2>&1 @@ -430,8 +438,8 @@ else ./setup.sh >> $CURRENTDIR/install.log 2>&1 if [[ $? != 1 ]] then - echo "Error, check install.log for details" - exit + echo "Error, check install.log for details" + exit fi cd - >& /dev/null @@ -440,9 +448,9 @@ else if [[ $http == "curl" ]] then - curl https://github.com/vbertone/MELA/archive/${melaver}.tar.gz > ${melaver}.tar.gz 2 >> $CURRENTDIR/install.log + curl https://github.com/vbertone/MELA/archive/${melaver}.tar.gz > ${melaver}.tar.gz 2 >> $CURRENTDIR/install.log else - wget https://github.com/vbertone/MELA/archive/${melaver}.tar.gz >> $CURRENTDIR/install.log 2>&1 + wget https://github.com/vbertone/MELA/archive/${melaver}.tar.gz >> $CURRENTDIR/install.log 2>&1 fi mv ${melaver}.tar.gz MELA-${melaver}.tar.gz tar xfvz MELA-${melaver}.tar.gz >> $CURRENTDIR/install.log 2>&1 @@ -451,14 +459,14 @@ else if [[ $? != 0 ]] then - echo "Error, check install.log for details" - exit - fi + echo "Error, check install.log for details" + exit + fi make -j 9 install >> $CURRENTDIR/install.log 2>&1 if [[ $? != 0 ]] then - echo "Error, check install.log for details" - exit + echo "Error, check install.log for details" + exit fi cd - >& /dev/null # setup paths for mela: @@ -469,26 +477,26 @@ else qcdnumstripver=`echo $qcdnumver |sed "s/-//g"` if [[ $http == "curl" ]] then - curl http://www.nikhef.nl/user/h24/qcdnum-files/download/qcdnum${qcdnumstripver}.tar.gz > qcdnum${qcdnumstripver}.tar.gz 2>> $CURRENTDIR/install.log + curl http://www.nikhef.nl/user/h24/qcdnum-files/download/qcdnum${qcdnumstripver}.tar.gz > qcdnum${qcdnumstripver}.tar.gz 2>> $CURRENTDIR/install.log else - wget http://www.nikhef.nl/user/h24/qcdnum-files/download/qcdnum${qcdnumstripver}.tar.gz >> $CURRENTDIR/install.log 2>&1 + wget http://www.nikhef.nl/user/h24/qcdnum-files/download/qcdnum${qcdnumstripver}.tar.gz >> $CURRENTDIR/install.log 2>&1 fi tar xfz qcdnum${qcdnumstripver}.tar.gz >> $CURRENTDIR/install.log 2>&1 cd qcdnum-${qcdnumver} ./configure --prefix=$CURRENTDIR/deps/qcdnum >> $CURRENTDIR/install.log 2>&1 export PATH=$CURRENTDIR/deps/qcdnum/bin/:$PATH - + if [[ $? != 0 ]] then - echo "Error, check install.log for details" - exit - fi + echo "Error, check install.log for details" + exit + fi make -j 9 install >> $CURRENTDIR/install.log 2>&1 if [[ $? != 0 ]] then - echo "Error, check install.log for details" - exit + echo "Error, check install.log for details" + exit fi cd - >& /dev/null fi @@ -500,26 +508,26 @@ if [[ $mode != "deps" ]] then echo "Installing xFitter $version..." - + if [[ $version == "master" ]] then - git clone https://gitlab.cern.ch/fitters/xfitter.git >> $CURRENTDIR/install.log 2>&1 - mv xfitter xfitter-master + git clone https://gitlab.cern.ch/fitters/xfitter.git >> $CURRENTDIR/install.log 2>&1 + mv xfitter xfitter-master else - if [[ $http == "curl" ]] - then -# curl https://gitlab.cern.ch/fitters/xfitter/repository/archive.tar.gz\?ref=$version > xfitter-$version.tar.gz -# curl https://www.hepforge.org/archive/xfitter/tar_files/xfitter-$version.tar.gz > xfitter-$version.tar.gz - curl https://xfitter.hepforge.org/downloads/tar_files/xfitter-$version.tar.gz > xfitter-$version.tar.gz - else -# wget https://gitlab.cern.ch/fitters/xfitter/repository/archive.tar.gz\?ref=$version >> $CURRENTDIR/install.log 2>&1 -# mv archive.tar.gz\?ref=$version xfitter-$version.tar.gz -# wget https://www.hepforge.org/archive/xfitter/tar_files/xfitter-$version.tar.gz >> $CURRENTDIR/install.log 2>&1 - wget https://xfitter.hepforge.org/downloads/tar_files/xfitter-$version.tar.gz >> $CURRENTDIR/install.log 2>&1 - fi + if [[ $http == "curl" ]] + then +# curl https://gitlab.cern.ch/fitters/xfitter/repository/archive.tar.gz\?ref=$version > xfitter-$version.tar.gz +# curl https://www.hepforge.org/archive/xfitter/tar_files/xfitter-$version.tar.gz > xfitter-$version.tar.gz + curl https://xfitter.hepforge.org/downloads/tar_files/xfitter-$version.tar.gz > xfitter-$version.tar.gz + else +# wget https://gitlab.cern.ch/fitters/xfitter/repository/archive.tar.gz\?ref=$version >> $CURRENTDIR/install.log 2>&1 +# mv archive.tar.gz\?ref=$version xfitter-$version.tar.gz +# wget https://www.hepforge.org/archive/xfitter/tar_files/xfitter-$version.tar.gz >> $CURRENTDIR/install.log 2>&1 + wget https://xfitter.hepforge.org/downloads/tar_files/xfitter-$version.tar.gz >> $CURRENTDIR/install.log 2>&1 + fi # unpack nicely: - rm -fr xfitter-${version} - mkdir xfitter-${version} ; tar xfz xfitter-${version}.tar.gz -C xfitter-${version} --strip-components 1 + rm -fr xfitter-${version} + mkdir xfitter-${version} ; tar xfz xfitter-${version}.tar.gz -C xfitter-${version} --strip-components 1 fi else make -C xfitter-${version} clean >> $CURRENTDIR/install.log 2>&1 @@ -610,10 +618,10 @@ if [[ ! -e run ]] then mkdir -p run cp xfitter-${version}/steering.txt \ - xfitter-${version}/minuit.in.txt \ - xfitter-${version}/ewparam.txt \ - xfitter-${version}/parameters.yaml \ - run + xfitter-${version}/minuit.in.txt \ + xfitter-${version}/ewparam.txt \ + xfitter-${version}/parameters.yaml \ + run rsync -a --exclude=".*" xfitter-${version}/datafiles run/ rsync -a --exclude=".*" xfitter-${version}/theoryfiles run/ else @@ -640,7 +648,7 @@ then echo "-----------------------------------------------------------" cat quickstart-readme.txt echo "-----------------------------------------------------------" - echo + echo echo "To read again these instructions, see quickstart-readme.txt" echo "Have fun!" fi