Skip to content
Snippets Groups Projects
Commit 05a8e6b1 authored by Charles Leggett's avatar Charles Leggett
Browse files

merge master to atlast/v27r1

parents 5c683a78 d7ecf7ef
No related branches found
Tags v27r1.001
No related merge requests found
Showing
with 323 additions and 26 deletions
before_script:
- . /afs/cern.ch/lhcb/software/releases/LBSCRIPTS/dev/InstallArea/scripts/LbLogin.sh
- unset VERBOSE
- export PATH=/afs/cern.ch/sw/lcg/contrib/ninja/1.4.0/x86_64-slc6:${PATH}
- ln -sf Makefile-cmake.mk Makefile
- echo 'set(CMAKE_USE_CCACHE ON CACHE BOOL "")' >> cache_preload.cmake
doxygen:
script:
- make
- make doc
- mv build.${CMTCONFIG}/doxygen .
artifacts:
paths:
- doxygen/
File mode changed from 100755 to 100644
#ifndef GAUDIKERNEL_EVENTIDRANGE_H
#define GAUDIKERNEL_EVENTIDRANGE_H 1
/** **************************************************************************
*
* @file EventIDRange.h
* @brief Event Range object. Holds two EventIDBase instances (start and stop)
*
* @author Charles Leggett
*
*****************************************************************************/
#include "GaudiKernel/EventIDBase.h"
#include <iostream>
#include <string>
#include <sstream>
/**
* @class EventIDRange
* @brief Event ID Range object. Holds two EventIDBases (start and stop)
*/
class EventIDRange {
public:
EventIDRange() {};
EventIDRange( const EventIDBase& start, const EventIDBase& stop );
EventIDRange( const EventIDRange& r ):m_start(r.m_start),m_stop(r.m_stop) {};
EventIDRange& operator= (const EventIDRange& r);
EventIDBase start() const { return m_start; }
EventIDBase stop() const { return m_stop; }
bool isInRange(const EventIDBase& t) const {
return ( t>=m_start && t<m_stop );
}
friend bool operator==(const EventIDRange& lhs, const EventIDRange& rhs);
friend bool operator!=(const EventIDRange& lhs, const EventIDRange& rhs);
friend std::ostream& operator<<(std::ostream& os, const EventIDRange& rhs);
operator std::string() const;
private:
EventIDBase m_start {};
EventIDBase m_stop {};
};
inline bool operator==(const EventIDRange& lhs, const EventIDRange& rhs) {
return lhs.m_start==rhs.m_start &&
lhs.m_stop==rhs.m_stop ;
}
inline bool operator!=(const EventIDRange& lhs, const EventIDRange& rhs) {
return ! (lhs == rhs);
}
inline EventIDRange::operator std::string () const {
std::ostringstream os;
os << "{" << m_start << " - " << m_stop << "}";
return os.str();
}
#endif
#include "GaudiKernel/EventIDRange.h"
/*****************************************************************************
*
* EventIDRange.cpp
*
* Author: Charles Leggett
*
* Validity Range object. Holds two EventIDBases (start and stop)
*
*****************************************************************************/
EventIDRange::EventIDRange( const EventIDBase& start,
const EventIDBase& stop )
: m_start(start), m_stop(stop) {
}
EventIDRange&
EventIDRange::operator= (const EventIDRange& r) {
if (this != &r) {
m_start = r.m_start;
m_stop = r.m_stop;
}
return *this;
}
std::ostream&
operator<< (std::ostream& os, const EventIDRange& rhs) {
os << (std::string) rhs;
return os;
}
Profiling Gaudi jobs with Jemalloc {#profiling-jemalloc}
===============================================================================
Integration of Jemalloc within gaudi jobs.
Integration of Jemalloc within Gaudi jobs.
Uses the Jemalloc library from http://www.canonware.com/jemalloc/
It is possible to profile the memory used by Gaudi jobs, using the jemalloc library
......@@ -14,12 +14,12 @@ To run the profiler, it is necessary to:
gaudirun.py has been updated to set the environment accordingly (a prerequisite is however
that libjemalloc.so has to be available in the library path).
A Gaudi algorithm has also been developped to perform memory heap dumps at various event,
A Gaudi algorithm has also been developed to perform memory heap dumps at various event,
and is configured using the StartFromEventN, StopAtEventN and DumpPeriod, as described
in the example below.
A Gaudi service is also available to provide the same functionality, with the advantage
of being able to profile without modifying the algorithm sequence.
of being able to profile without modifying the algorithm sequence.
Running with the JemallocProfile algorithm
--------------------------------------------------------------------------------
......@@ -48,9 +48,9 @@ $> gaudirun.py --profilerName=jemalloc --run-info-file=runinfo.json myoptions.py
Please note the the `--profilerName=jemalloc` to enbale the profiling, and the `--run-info-file` that produces
a file containing information useful to interpret the results (process id of the Gaudi job, and the absolute path
of the executable, necessary to run the pprof analysis tool).
of the executable, necessary to run the jeprof analysis tool).
Running with the JemallocProfileSvc service
Running with the JemallocProfileSvc service
--------------------------------------------------------------------------------
### Change Options File
......@@ -60,20 +60,22 @@ Simple example of using the JemallocProfileSvc service in a Gaudi configurable:
~~~~~~~{.py}
#!/usr/bin/env gaudirun.py
from Configurables import JemallocProfileSvc
from Gaudi.Configuration import DEBUG
#...
ApplicationMgr().ExtSvc += { "JemallocProfileSvc" }
JemallocProfileSvc().StartFromEventN=3
JemallocProfileSvc().StopAtEventN=7
JemallocProfileSvc().OutputLevel=DEBUG
jps = JemallocProfileSvc(StartFromEventN=3,
StopAtEventN=7,
OutputLevel=DEBUG)
ApplicationMgr().ExtSvc.append(jps)
~~~~~~~
It is also possible to trigger the profiling using incidents:
~~~~~~~{.py}
from Configurables import JemallocProfileSvc
ApplicationMgr().ExtSvc += { "JemallocProfileSvc" }
JemallocProfileSvc().StartFromIncidents= [ "MyStartIncident1", "MyStartIncident2" ]
JemallocProfileSvc().StopAtIncidents= [ "MyStopIncident" ]
jps = JemallocProfileSvc(StartFromIncidents=['MyStartIncident1',
'MyStartIncident2'],
StopAtIncidents=['MyStopIncident'])
ApplicationMgr().ExtSvc.append(jps)
~~~~~~~
### Run the job
......@@ -84,17 +86,17 @@ Analyze
--------------------------------------------------------------------------------
### With text output
The pprof analysis tool from the Google performances tools (http://goog-perftools.sourceforge.net/)
The jeprof analysis tool from the Google performances tools (http://goog-perftools.sourceforge.net/)
is necessary to analyze the heap files.
It can be used to comapre the memory in two heap files in the following way:
It can be used to compare the memory in two heap files in the following way:
~~~~~~~~{.sh}
$> pprof -text --base=<firstheap>.heap <executable name> <comparewith>.heap
$> jeprof -text --base=<firstheap>.heap <executable name> <comparewith>.heap
~~~~~~~~
### To produce a postscript file
~~~~~~~~{.sh}
$> pprof -gv --base=<firstheap>.heap <executable name> <comparewith>.heap
$> jeprof -gv --base=<firstheap>.heap <executable name> <comparewith>.heap
~~~~~~~~
......@@ -62,7 +62,7 @@ Release notes of the packages:
# in principle this is not needed, but it allows to run Doxygen and install
# MathJax at the same time
add_custom_target(install-MathJax DEPENDS ${CMAKE_BINARY_DIR}/doxygen/mathjax)
add_dependencies(doc install-MathJax)
add_dependencies(run-doxygen install-MathJax)
set(MATHJAX_RELPATH "../mathjax")
else()
set(MATHJAX_RELPATH "http://cdn.mathjax.org/mathjax/latest")
......@@ -81,7 +81,7 @@ Release notes of the packages:
-P ${CMAKE_CURRENT_SOURCE_DIR}/get_cppreference_tags.cmake
COMMENT "Getting cppreference.com doxygen tags...")
add_custom_target(get-ccpreference-tags DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/cppreference-doxygen-web.tag.xml)
add_dependencies(doc get-ccpreference-tags)
add_dependencies(run-doxygen get-ccpreference-tags)
set(DOXYGEN_TAGFILES
"${DOXYGEN_TAGFILES} \"${CMAKE_CURRENT_BINARY_DIR}/cppreference-doxygen-web.tag.xml=http://en.cppreference.com/w/\"")
endif()
......
......@@ -786,6 +786,7 @@ RECURSIVE = YES
# run.
EXCLUDE = @CMAKE_SOURCE_DIR@/GaudiRelease/doc/GaudiHive-release-notes.md
EXCLUDE += @CMAKE_SOURCE_DIR@/InstallArea
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
......
......@@ -5,3 +5,7 @@ Gaudi Manual
* @subpage GaudiPluginService-readme
* @subpage MetaDataSvc-readme
* @subpage profiling_tools
## For Mainatiners
* @subpage release-procedure
Procedure to Release Gaudi {#release-procedure}
==========================
## Updated versions and release notes
1. use the script `GaudiRelease/prepare_gaudi_release.py` to update the
versions of all packages and prepare their release notes
2. update `GaudiRelease/doc/release.notes.html` from the release notes
in JIRA (see the [list of versions](https://its.cern.ch/jira/browse/GAUDI/?selectedTab=com.atlassian.jira.jira-projects-plugin:versions-panel))
3. update/correct the release notes of the packages or of the project if
needed
4. push the changes to a branch and create a merge request
## Tagging
1. using the GitLab interface, [create the tag](https://gitlab.cern.ch/gaudi/Gaudi/tags/new)
2. close the corresponding milestone from the [list of milestones](https://gitlab.cern.ch/gaudi/Gaudi/milestones)
3. release the version in JIRA (see the [list of versions](https://its.cern.ch/jira/browse/GAUDI/?selectedTab=com.atlassian.jira.jira-projects-plugin:versions-panel))
## Update the web page
After the release is installed in AFS, run the following commands:
cd /afs/.cern.ch/sw/Gaudi/www
./add_release.sh vXrY
afs_admin vos_release .
## Synchronizing SVN
1. synchronize the version in the trunk:
cd /afs/cern.ch/sw/Gaudi/git/gateway/Gaudi
git checkout master
git pull --ff-only
git checkout gw/trunk
git merge --no-ff -c 'synchronize with Gaudi vXrY' master
git svn dcommit
git checkout master
git svn fetch
git push
2. create the svn tags:
cd /afs/cern.ch/sw/Gaudi/git/gateway/Gaudi
GaudiRelease/cmt/svn_tag_release.py
git checkout master
git svn fetch
git push
......@@ -930,7 +930,7 @@ THistSvc::deReg(TObject* obj) {
auto mitr = m_ids.equal_range(rem);
auto itr3 = std::find_if( mitr.first, mitr.second, [&](idMap::const_reference i)
{ return i.second.obj == obj; } ) ;
if (itr3 != mitr.second ) {
if (itr3 == mitr.second ) {
m_log << MSG::ERROR << "Problems deregistering TObject \""
<< obj->GetName()
<< "\" with id \"" << hid.id << "\"" << endmsg;
......
......@@ -458,13 +458,14 @@ macro(lcg_prepare_paths)
# Required if both Qt3 and Qt4 are available.
if(Qt_config_version)
string(REGEX MATCH "[0-9]+" _qt_major_version ${Qt_config_version})
set(DESIRED_QT_VERSION ${_qt_major_version} CACHE STRING "Pick a version of QT to use: 3 or 4")
set(DESIRED_QT_VERSION ${_qt_major_version} CACHE STRING "Pick a version of QT to use: 4 or 5")
mark_as_advanced(DESIRED_QT_VERSION)
if(Qt5_config_version AND NOT CMAKE_VERSION VERSION_LESS "2.12")
# Required if both Qt(4) and Qt5 are available.
if(EXISTS "${Qt_home}/bin/qmake")
set(QT_QMAKE_EXECUTABLE "${Qt_home}/bin/qmake" CACHE INTERNAL "")
endif()
set(CMAKE_PREFIX_PATH ${Qt5_home}/lib/cmake ${CMAKE_PREFIX_PATH})
endif()
endif()
......
......@@ -180,12 +180,18 @@ macro(reflex_generate_dictionary dictionary _headerfile _selectionfile)
add_custom_target(${dictionary}GenDeps)
get_filename_component(GCCXML_home ${GCCXML} PATH)
set(impl_deps)
foreach(hf ${headerfiles})
set(impl_deps ${impl_deps} CXX ${hf})
endforeach()
add_custom_command(
OUTPUT ${gensrcdict} ${rootmapname} ${gensrcclassdef}
COMMAND ${ROOT_genreflex_CMD}
${headerfiles} -o ${gensrcdict} ${gccxmlopts} ${rootmapopts} --select=${selectionfile}
--gccxmlpath=${GCCXML_home} ${ARG_OPTIONS} ${include_dirs} ${definitions}
DEPENDS ${headerfiles} ${selectionfile} ${dictionary}GenDeps)
DEPENDS ${headerfiles} ${selectionfile} ${dictionary}GenDeps
IMPLICIT_DEPENDS ${impl_deps})
# Creating this target at ALL level enables the possibility to generate dictionaries (genreflex step)
# well before the dependent libraries of the dictionary are build
......
......@@ -6,6 +6,18 @@ set(ROOT_ALL_COMPONENTS Core Cling RIO Hist Tree TreePlayer Matrix
# and build tools
set(ROOT_ALL_TOOLS root rootcling genreflex)
option(ROOT_DICT_USE_IMPLICIT_DEPENDS
"Use CMake IMPLICIT_DEPENDS for dictionary dependencies (if using Makefile generator)"
ON)
# checks for computation of dependencies of dictionaries
if (NOT (CMAKE_GENERATOR MATCHES "Makefile" AND ROOT_DICT_USE_IMPLICIT_DEPENDS))
find_package(PythonInterp QUIET)
if (PYTHON_EXECUTABLE)
set(scan_dicts_deps ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_LIST_DIR}/scan_dict_deps.py)
endif()
endif()
# Helper macro to discover the dependencies between components needed on Mac)
macro(_root_get_deps libpath var)
# reset output var
......@@ -205,12 +217,38 @@ macro(reflex_generate_dictionary dictionary _headerfile _selectionfile)
set(pcmname)
endif()
if (CMAKE_GENERATOR MATCHES "Makefile" AND ROOT_DICT_USE_IMPLICIT_DEPENDS)
set(impl_deps IMPLICIT_DEPENDS)
foreach(hf ${headerfiles})
set(impl_deps ${impl_deps} CXX ${hf})
endforeach()
set(${dictionary}GenFileDeps)
set(deps_scan_cmd)
elseif (scan_dicts_deps)
set(deps_scan_cmd COMMAND ${scan_dicts_deps}
${include_dirs}
${CMAKE_CURRENT_BINARY_DIR}/${dictionary}GenFileDeps.cmake
${dictionary}GenFileDeps
${headerfiles})
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/${dictionary}GenFileDeps.cmake)
message(STATUS "scanning dependencies for ${dictionary}Gen")
execute_process(${deps_scan_cmd})
endif()
include(${CMAKE_CURRENT_BINARY_DIR}/${dictionary}GenFileDeps.cmake)
set(impl_deps)
elseif(NOT _root_dicts_deps_warning)
message(WARNING "generator is not Makefile and Python not available: the dependencies of the dictionary will be incomplete")
set(_root_dicts_deps_warning 1 CACHE INTERNAL "")
endif()
add_custom_command(
OUTPUT ${gensrcdict} ${rootmapname} ${gensrcclassdef} ${pcmname}
COMMAND ${ROOT_genreflex_CMD}
${headerfiles} -o ${gensrcdict} ${rootmapopts} --select=${selectionfile}
${ARG_OPTIONS} ${include_dirs} ${definitions}
DEPENDS ${headerfiles} ${selectionfile} ${dictionary}GenDeps)
${deps_scan_cmd}
DEPENDS ${headerfiles} ${selectionfile} ${dictionary}GenDeps ${${dictionary}GenFileDeps}
${impl_deps})
# Creating this target at ALL level enables the possibility to generate dictionaries (genreflex step)
# well before the dependent libraries of the dictionary are build
......
......@@ -19,13 +19,17 @@ set(PYGRAPHICS_PYTHON_PATH ${pygraphics_home}/lib/python${Python_config_version_
mark_as_advanced(PYGRAPHICS_FOUND PYGRAPHICS_PYTHON_PATH)
if(NOT DEFINED DESIRED_QT_VERSION)
set(DESIRED_QT_VERSION 4)
endif()
# Provides functions to compile .qrc and .ui files into Python modules.
find_package(PythonInterp QUIET REQUIRED)
find_program(pyrcc_cmd pyrcc4)
find_program(pyrcc_cmd pyrcc${DESIRED_QT_VERSION})
mark_as_advanced(pyrcc_cmd)
set(pyuic_cmd ${PYTHON_EXECUTABLE} -m PyQt4.uic.pyuic)
set(pyuic_cmd ${PYTHON_EXECUTABLE} -m PyQt${DESIRED_QT_VERSION}.uic.pyuic)
get_filename_component(PYGRAPHICS_BINARY_PATH ${pyrcc_cmd} PATH)
......
#!/usr/bin/env python
import re
from itertools import imap, ifilter
from os.path import join, exists, isabs
def find_file(filename, searchpath):
'''
Return the absolute path to filename in the searchpath.
If filename is already an absolute path, return it as is, if it exists.
If filename cannot be found, return None.
'''
if isabs(filename):
return filename if exists(filename) else None
try:
return ifilter(exists, imap(lambda x: join(x, filename),
searchpath)).next()
except StopIteration:
return None
def find_deps(filename, searchpath, deps=None):
'''
Return a set with the absolute paths to the files included (directly and
indirectly) by filename.
'''
if deps is None:
deps = set()
filename = find_file(filename, searchpath)
# Look for all "#include" lines in the file, then consider each of the
# included files, ignoring those already included in the recursion
for included in ifilter(lambda f: f and f not in deps,
imap(lambda m: m and find_file(m.group(1), searchpath),
imap(re.compile(r'^\s*#\s*include\s*["<]([^">]*)[">]').match,
open(filename)))):
deps.add(included)
find_deps(included, searchpath, deps)
return deps
def main():
from optparse import OptionParser
parser = OptionParser(usage='%prog [options] output_file variable_name headers...')
parser.add_option('-I', action='append', dest='include_dirs')
opts, args = parser.parse_args()
if len(args) < 2:
parser.error('you must specify output file and variable name')
output, variable = args[:2]
headers = args[2:]
old_deps = open(output).read() if exists(output) else None
# scan for dependencies
deps = set()
for filename in headers:
find_deps(filename, opts.include_dirs, deps)
deps = sorted(deps)
# prepare content of output file
new_deps = 'set({deps_var}\n {deps})\n' \
.format(deps='\n '.join(deps), deps_var=variable)
if new_deps != old_deps: # write it only if it has changed
open(output, 'w').write(new_deps)
if old_deps:
print 'info: dependencies changed: next build will trigger a reconfigure'
if __name__ == '__main__':
main()
PLATFORM: x86_64-slc6-gcc48-opt
PLATFORM: x86_64-slc0-gcc99-opt
VERSION: 123
COMPILER: GNU 4.8.2
# this is a comment
......
......@@ -2,7 +2,7 @@
set(Base_heptools_version 69)
set(Base_heptools_system x86_64-slc6-gcc48)
set(Base_PLATFORM x86_64-slc6-gcc48-opt)
set(Base_PLATFORM x86_64-slc0-gcc99-opt)
set(Base_VERSION v1r0)
set(Base_VERSION_MAJOR 1)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment