Commit 6ef68611 authored by Carlos Vazquez Sierra's avatar Carlos Vazquez Sierra 👁‍🗨
Browse files

Merge branch 'updateCMAKE' into 'master'

Update cmake folder from Gaudi/master

See merge request lhcb/Urania!236
parents 553d13e8 42136a44
[lb-checkout "Gaudi.cmake"]
base = 4c31efdcdc07acf6c60229385cafef993161a073
imported = aeb156f0c40571b5753a9e1dab31e331491b2f3e
......@@ -49,6 +49,26 @@
# it from the environment variables ``BINARY_TAG`` and ``CMTCONFIG``, or from
# system inspection.
#
# list of valid x86 architecture names from smallest to more inclusive
# instruction set (e.g. westmere == nehalem + xyz)
set(BTU_KNOWN_x86_ARCHS
x86_64
core2
nehalem
westmere
sandybridge
ivybridge
haswell
broadwell
skylake
skylake_avx512
canonlake
CACHE STRING "known architectures in order such that any entry can run something compiled for the preceding entries"
)
mark_as_advanced(BTU_KNOWN_x86_ARCHS)
macro(parse_binary_tag)
# parse arguments
if(${ARGC} GREATER 0)
......@@ -211,13 +231,27 @@ function(compatible_binary_tags variable)
# prepare the list of archs as 'main_arch' followed by microach flags
# e.g: arch+ma1+ma2+ma3 -> arch+ma1+ma2+ma3 arch+ma1+ma2 arch+ma1 arch
# - first add all supported main architectures, up to the requested one
set(archs)
list(FIND BTU_KNOWN_x86_ARCHS ${BINARY_TAG_ARCH} arch_idx)
if (arch_idx GREATER -1)
set(archs)
foreach(_arch IN LISTS BTU_KNOWN_x86_ARCHS)
list(APPEND archs ${_arch})
list(LENGTH archs _archs_len)
if(NOT _archs_len LESS arch_idx)
break()
endif()
endforeach()
endif()
# - then add the optional extra flags one by one
set(subarch)
foreach(ma ${BINARY_TAG_MICROARCH})
list(APPEND archs "${BINARY_TAG_ARCH}${subarch}")
set(subarch "${subarch}+${ma}")
endforeach()
list(APPEND archs "${BINARY_TAG_ARCH}${subarch}")
# - finally reverse the list
list(REVERSE archs)
# prepare the list of build sub-types (if needed)
......
......@@ -177,20 +177,21 @@ if(_opt_level_${_up_bt})
message(STATUS "Optimization: ${_opt_level_${_up_bt}} ${_opt_ext_${_up_bt}}")
endif()
# special architecture flags
# extra flags to enable/disable specific instruction sets
set(GAUDI_ARCH_DEFAULT)
if(BINARY_TAG_MICROARCH)
set(GAUDI_ARCH_DEFAULT ${BINARY_TAG_MICROARCH})
elseif(BINARY_TAG_COMP_NAME STREQUAL "gcc" AND BINARY_TAG_COMP_VERSION VERSION_GREATER "5.0" AND
BINARY_TAG_ARCH STREQUAL "x86_64")
# Special case: x86_64-*-gcc6 or higher is equivalent to x86_64+sse4.2-*
set(GAUDI_ARCH_DEFAULT "sse4.2")
else()
if (NOT HOST_BINARY_TAG_ARCH STREQUAL BINARY_TAG_ARCH)
if (HOST_BINARY_TAG_ARCH STREQUAL "x86_64" AND BINARY_TAG_ARCH STREQUAL "i686")
set(GAUDI_ARCH_DEFAULT "32")
else()
message(FATAL_ERROR "Cannot build for ${BINARY_TAG_ARCH} on ${HOST_BINARY_TAG_ARCH}.")
endif()
# if no extra flags and not special case, compare host and target architecture
# (BTU_KNOWN_x86_ARCHS is list ordered such that later entries can build earliler entries)
list(FIND BTU_KNOWN_x86_ARCHS ${BINARY_TAG_ARCH} _target_idx)
list(FIND BTU_KNOWN_x86_ARCHS ${HOST_BINARY_TAG_ARCH} _host_idx)
if (_host_idx LESS _target_idx)
message(FATAL_ERROR "Cannot build for '${BINARY_TAG_ARCH}' on '${HOST_BINARY_TAG_ARCH}'.")
endif()
endif()
set(GAUDI_ARCH "${GAUDI_ARCH_DEFAULT}"
......@@ -230,10 +231,11 @@ if(NOT GAUDI_FLAGS_SET EQUAL GAUDI_FLAGS_OPTIONS)
else()
# special architecture flags
set(arch_opts)
string(REPLACE "_" "-" _gcc_arch_name ${BINARY_TAG_ARCH})
set(arch_opts "-march=${_gcc_arch_name}")
foreach(_arch_opt ${GAUDI_ARCH})
if(_arch_opt STREQUAL "native")
set(_arch_opt "arch=native")
message(FATAL_ERROR "you must use 'native-${BINARY_TAG_OS}-${BINARY_TAG_COMP}-${BINARY_TAG_TYPE}'' instead of '${BINARY_TAG_ARCH}+native-${BINARY_TAG_OS}-${BINARY_TAG_COMP}-${BINARY_TAG_TYPE}'")
endif()
set(arch_opts "${arch_opts} -m${_arch_opt}")
endforeach()
......
......@@ -55,14 +55,25 @@ find_program(ccache_cmd NAMES ccache ccache-swig)
find_program(distcc_cmd distcc)
find_program(icecc_cmd icecc)
set(CLANG_FORMAT_VERSION "7" CACHE STRING "Version of clang-format to use")
set(CLANG_FORMAT_VERSION "8" CACHE STRING "Version of clang-format to use")
find_program(clang_format_cmd
NAMES lcg-clang-format-${CLANG_FORMAT_VERSION}
lcg-clang-format-${CLANG_FORMAT_VERSION}.0
lcg-clang-format-${CLANG_FORMAT_VERSION}.0.0
clang-format-${CLANG_FORMAT_VERSION})
clang-format-${CLANG_FORMAT_VERSION}
gaudi-clang-format-${CLANG_FORMAT_VERSION})
if(clang_format_cmd)
message(STATUS "found clang-format ${CLANG_FORMAT_VERSION}: ${clang_format_cmd}")
execute_process(COMMAND ${clang_format_cmd} -version
RESULT_VARIABLE _clang_format_working
OUTPUT_VARIABLE _clang_format_reported_version
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
if (_clang_format_working EQUAL 0)
message(STATUS "found ${_clang_format_reported_version}: ${clang_format_cmd}")
else()
message(WARNING "could not run ${clang_format_cmd}:
automatic formatting of C++ files will not be possible")
endif()
else()
message(WARNING "could not find clang-format ${CLANG_FORMAT_VERSION}:
automatic formatting of C++ files will not be possible")
......@@ -103,7 +114,7 @@ set(_distributed_compiler)
if(distcc_cmd)
option(CMAKE_USE_DISTCC "Use distcc to speed up compilation." OFF)
if(CMAKE_USE_DISTCC)
set(_distributed_compiler distcc)
set(_distributed_compiler ${distcc_cmd})
endif()
endif()
if(icecc_cmd)
......@@ -112,7 +123,7 @@ if(icecc_cmd)
if(_distributed_compiler)
message(FATAL_ERROR "Cannot use multiple distributed compilers at the same time")
endif()
set(_distributed_compiler icecc)
set(_distributed_compiler ${icecc_cmd})
endif()
endif()
if(_distributed_compiler)
......@@ -300,12 +311,16 @@ macro(gaudi_project project version)
endif()
if(NOT CMAKE_RUNTIME_OUTPUT_DIRECTORY)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin CACHE STRING
"Single build output directory for all executables" FORCE)
foreach( _config "" "_DEBUG" "_RELEASE" "_MINSIZEREL" "_RELWITHDEBINFO" )
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY${_config} ${CMAKE_BINARY_DIR}/bin CACHE STRING
"Single build output directory for all executables" FORCE)
endforeach()
endif()
if(NOT CMAKE_LIBRARY_OUTPUT_DIRECTORY)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib CACHE STRING
"Single build output directory for all libraries" FORCE)
foreach( _config "" "_DEBUG" "_RELEASE" "_MINSIZEREL" "_RELWITHDEBINFO" )
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY${_config} ${CMAKE_BINARY_DIR}/lib CACHE STRING
"Single build output directory for all libraries" FORCE)
endforeach()
endif()
if(NOT CMAKE_CONFIG_OUTPUT_DIRECTORY)
......@@ -2540,6 +2555,7 @@ function(gaudi_add_test name)
RESULT_VARIABLE qmt_deps_retcode)
if(NOT qmt_deps_retcode EQUAL 0)
message(WARNING "failure computing dependencies of QMTest tests")
return()
endif()
include(${CMAKE_CURRENT_BINARY_DIR}/qmt_deps.cmake)
list(LENGTH qmt_files qmt_count)
......
......@@ -5,12 +5,14 @@ Gaudi cmake process (See Instrument.cmake) and create an other file
understandable by kcachegrind for easy visualization of where we spend time in
cmake.
'''
from __future__ import print_function
import sys
def usage():
print "Invalid arguments\nProper syntax is :\n %s <log file> <callgrind file>" % sys.argv[
0]
print(
"Invalid arguments\nProper syntax is :\n %s <log file> <callgrind file>"
% sys.argv[0])
if len(sys.argv) != 3:
......@@ -45,7 +47,7 @@ for line in open(sys.argv[1]).readlines():
elif key == 'ENDTIME':
sfunc, stime = callStack.pop()
if sfunc != func:
print 'Mismatch START/END for %s/%s' % (sfunc, func)
print('Mismatch START/END for %s/%s' % (sfunc, func))
sys.exit()
deltatime = int(time) - stime
# add time spent to this function
......
......@@ -6,11 +6,16 @@ properties.
'''
__author__ = 'Marco Clemencic <marco.clemencic@cern.ch>'
import os
import platform
import xml.etree.ElementTree as ET
import collections
import re
try:
import os
import platform
import xml.etree.ElementTree as ET
import collections
import re
import six
except ImportError:
import sys
sys.exit(1)
def qmt_filename_to_name(path):
......@@ -111,11 +116,11 @@ def analyze_suites(pkg, rootdir):
# transpose the dictionary of lists
test_labels = collections.defaultdict(set)
for label, tests in labels.iteritems():
for label, tests in six.iteritems(labels):
for test in tests:
test_labels[test].add(label)
for test, labels in test_labels.iteritems():
for test, labels in six.iteritems(test_labels):
print('set_property(TEST {0} APPEND PROPERTY LABELS {1})'.format(
test, ' '.join(labels)))
......
#!/bin/bash
source /cvmfs/sft.cern.ch/lcg/releases/clang/8.0.0-ed577/x86_64-slc6/setup.sh
exec /cvmfs/sft.cern.ch/lcg/releases/clang/8.0.0-ed577/x86_64-slc6/bin/clang-format "$@"
......@@ -11,8 +11,71 @@ Inspired by
import os
import re
import sys
import platform
from subprocess import check_output, STDOUT
from collections import OrderedDict
# available flags per architecture
# source: https://gitlab.cern.ch/lhcb-core/LbPlatformUtils/blob/master/LbPlatformUtils/architectures.py
ARCH_DEFS = OrderedDict([
('cannonlake',
set([
'pclmulqdq', 'avx', 'pku', 'umip', 'avx512dq', 'bmi1', 'fsgsbase',
'avx512ifma', 'avx512bw', 'clflushopt', 'sse4_2', 'sse4_1', 'lm',
'adx', '3dnowprefetch', 'sha_ni', 'fma', 'mmx', 'avx512cd', 'avx512f',
'pni', 'rdseed', 'popcnt', 'sse', 'f16c', 'xsavec', 'aes', 'avx2',
'sse2', 'avx512vbmi', 'bmi2', 'ssse3', 'movbe', 'rdrand', 'avx512vl'
])),
('skylake_avx512',
set([
'pclmulqdq', 'avx', 'pku', 'avx512dq', 'fsgsbase', 'avx512bw',
'clflushopt', 'sse4_2', 'sse4_1', 'lm', 'adx', '3dnowprefetch', 'fma',
'mmx', 'avx512cd', 'avx512f', 'clwb', 'pni', 'rdseed', 'popcnt',
'sse', 'f16c', 'xsavec', 'aes', 'avx2', 'sse2', 'bmi1', 'bmi2',
'ssse3', 'movbe', 'rdrand', 'avx512vl'
])),
('skylake',
set([
'avx', 'fsgsbase', 'clflushopt', 'sse4_2', 'sse4_1', 'lm', 'adx',
'3dnowprefetch', 'fma', 'mmx', 'pclmulqdq', 'pni', 'rdseed', 'popcnt',
'sse', 'f16c', 'xsavec', 'aes', 'avx2', 'sse2', 'bmi1', 'bmi2',
'ssse3', 'movbe', 'rdrand'
])),
('broadwell',
set([
'avx', 'fsgsbase', 'sse4_2', 'sse4_1', 'lm', 'adx', '3dnowprefetch',
'fma', 'mmx', 'pclmulqdq', 'pni', 'rdseed', 'popcnt', 'sse', 'f16c',
'aes', 'avx2', 'sse2', 'bmi1', 'bmi2', 'ssse3', 'movbe', 'rdrand'
])),
('haswell',
set([
'avx', 'fsgsbase', 'sse4_2', 'sse4_1', 'lm', 'fma', 'mmx', 'aes',
'pni', 'popcnt', 'sse', 'f16c', 'pclmulqdq', 'avx2', 'sse2', 'bmi1',
'bmi2', 'ssse3', 'movbe', 'rdrand'
])),
('ivybridge',
set([
'pni', 'aes', 'sse4_2', 'mmx', 'sse2', 'sse4_1', 'lm', 'pclmulqdq',
'ssse3', 'fsgsbase', 'popcnt', 'rdrand', 'sse', 'avx', 'f16c'
])),
('sandybridge',
set([
'pni', 'aes', 'sse4_2', 'mmx', 'sse2', 'sse4_1', 'lm', 'pclmulqdq',
'ssse3', 'popcnt', 'sse', 'avx'
])),
('westmere',
set([
'pni', 'aes', 'sse4_2', 'mmx', 'sse2', 'sse4_1', 'lm', 'pclmulqdq',
'ssse3', 'popcnt', 'sse'
])),
('nehalem',
set([
'pni', 'sse4_2', 'mmx', 'sse2', 'sse4_1', 'lm', 'ssse3', 'popcnt',
'sse'
])), ('core2', set(['pni', 'mmx', 'sse2', 'lm', 'ssse3', 'sse'])),
('x86_64', set([]))
])
def _Linux_os():
......@@ -77,8 +140,25 @@ def compiler_id():
return _compiler_version()
arch = platform.machine()
if arch == 'AMD64': # this is what we get on Windows
arch = 'x86_64'
print('-'.join([arch, os_id(), compiler_id(), 'opt']))
def arch():
# Get host flags from /proc/cpuinfo
host_flags = set()
if sys.platform == 'darwin':
for l in check_output(['sysctl', '-a']).split('\n'):
if l.startswith('machdep.cpu.features') or l.startswith(
'machdep.cpu.extfeatures') or l.startswith(
'machdep.cpu.leaf7_features'):
host_flags.update([f.lower() for f in l.split()[1:]])
else:
for l in open('/proc/cpuinfo'):
if l.startswith('flags'):
host_flags.update(l.split()[2:])
break
# compare with known arhitectures
for arch, flags in ARCH_DEFS.items():
if host_flags.issuperset(flags):
return arch
return 'x86_64' # if nothing is found, assume x86_64
print('-'.join([arch(), os_id(), compiler_id(), 'opt']))
......@@ -8,7 +8,7 @@ set(ROOT_ALL_COMPONENTS Core Cling RIO Hist Tree TreePlayer Matrix
set(ROOT_ALL_TOOLS root rootcling genreflex)
# checks for computation of dependencies of dictionaries
if (NOT CMAKE_GENERATOR MATCHES "Makefile")
if (CMAKE_GENERATOR MATCHES "Ninja")
if(CMAKE_VERSION VERSION_LESS 3.12)
find_package(PythonInterp QUIET)
if (PYTHON_EXECUTABLE)
......@@ -237,6 +237,10 @@ macro(reflex_generate_dictionary dictionary _headerfile _selectionfile)
set(_root_dicts_deps_warning 1 CACHE INTERNAL "")
endif()
if (GENREFLEX_JOB_POOL)
set(job_pool JOB_POOL ${GENREFLEX_JOB_POOL})
endif()
add_custom_command(
OUTPUT ${gensrcdict} ${rootmapname} ${gensrcclassdef} ${pcmname}
COMMAND ${ROOT_genreflex_CMD}
......@@ -244,7 +248,8 @@ macro(reflex_generate_dictionary dictionary _headerfile _selectionfile)
${ARG_OPTIONS} ${include_dirs} ${definitions}
${deps_scan_cmd}
DEPENDS ${headerfiles} ${selectionfile}
${impl_deps})
${impl_deps}
${job_pool})
# Creating this target at ALL level enables the possibility to generate dictionaries (genreflex step)
# well before the dependent libraries of the dictionary are build
......
# - Locate GSL library
# Defines:
#
# GSL_FOUND
# GSL_INCLUDE_DIR
# GSL_INCLUDE_DIRS (not cached)
# GSL_LIBRARY
# GSL_CBLAS_LIBRARY
# GSL_LIBRARIES (not cached)
find_path(GSL_INCLUDE_DIR /gsl/gsl_version.h
HINTS $ENV{GSL_ROOT_DIR}/include ${GSL_ROOT_DIR}/include)
find_library(GSL_LIBRARY NAMES gsl
HINTS $ENV{GSL_ROOT_DIR}/lib ${GSL_ROOT_DIR}/lib)
find_library(GSL_CBLAS_LIBRARY NAMES gslcblas
HINTS $ENV{GSL_ROOT_DIR}/lib ${GSL_ROOT_DIR}/lib)
set(GSL_LIBRARIES ${GSL_LIBRARY} ${GSL_CBLAS_LIBRARY})
set(GSL_INCLUDE_DIRS ${GSL_INCLUDE_DIR})
# handle the QUIETLY and REQUIRED arguments and set GSL_FOUND to TRUE if
# all listed variables are TRUE
INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(GSL DEFAULT_MSG GSL_INCLUDE_DIR GSL_LIBRARIES)
mark_as_advanced(GSL_FOUND GSL_INCLUDE_DIR GSL_LIBRARY GSL_CBLAS_LIBRARY)
......@@ -112,7 +112,7 @@ IF (TBB_LIBRARY_RELEASE AND TBB_LIBRARY_DEBUG)
ENDIF (NOT TBB_LIBRARY)
ELSEIF (TBB_LIBRARY_RELEASE)
IF (NOT TBB_LIBRARY)
SET (TBB_LIBRARY ${TBB_LIBRARY_RELEASE} CACHE DOC "TBB library" FORCE)
SET (TBB_LIBRARY ${TBB_LIBRARY_RELEASE} CACHE FILEPATH "TBB library" FORCE)
ENDIF (NOT TBB_LIBRARY)
ENDIF (TBB_LIBRARY_RELEASE AND TBB_LIBRARY_DEBUG)
......@@ -287,6 +287,18 @@ IF (CMAKE_VERSION VERSION_GREATER 2.8.2)
LIST (APPEND _TBB_FPHSA_ADDITIONAL_ARGS VERSION_VAR TBB_VERSION)
ENDIF (CMAKE_VERSION VERSION_GREATER 2.8.2)
# Fix for https://github.com/01org/tbb/issues/22
# Inspired by https://github.com/wjakob/tbb/blob/master/CMakeLists.txt
IF (NOT TBB_USE_GLIBCXX_VERSION AND UNIX AND NOT APPLE)
IF (BINARY_TAG_COMP_NAME STREQUAL "clang")
string(REPLACE "." "0" TBB_USE_GLIBCXX_VERSION ${CMAKE_CXX_COMPILER_VERSION})
ENDIF()
ENDIF()
IF (TBB_USE_GLIBCXX_VERSION)
add_definitions(-DTBB_USE_GLIBCXX_VERSION=${TBB_USE_GLIBCXX_VERSION})
ENDIF()
FIND_PACKAGE_HANDLE_STANDARD_ARGS (TBB REQUIRED_VARS TBB_ROOT_DIR
TBB_INCLUDE_DIR TBB_LIBRARY ${_TBB_MISSING_LIBRARIES}
${_TBB_FPHSA_ADDITIONAL_ARGS})
#!/usr/bin/env python
import re
from itertools import imap, ifilter
import six
from os.path import join, exists, isabs
......@@ -16,8 +16,10 @@ def find_file(filename, searchpath):
if isabs(filename):
return filename if exists(filename) else None
try:
return ifilter(exists, imap(lambda x: join(x, filename),
searchpath)).next()
return six.next(
six.moves.filter(
exists, six.moves.map(lambda x: join(x, filename),
searchpath)))
except StopIteration:
return None
......@@ -37,11 +39,11 @@ def find_deps(filename, searchpath, deps=None):
# Look for all "#include" lines in the file, then consider each of the
# included files, ignoring those already included in the recursion
for included in ifilter(
for included in six.moves.filter(
lambda f: f and f not in deps,
imap(
six.moves.map(
lambda m: m and find_file(m.group(1), searchpath),
imap(
six.moves.map(
re.compile(r'^\s*#\s*include\s*["<]([^">]*)[">]').match,
open(filename)))):
deps.add(included)
......@@ -90,7 +92,9 @@ def main():
if new_deps != old_deps: # write it only if it has changed
open(output, 'w').write(new_deps)
if old_deps and not opts.for_make:
print 'info: dependencies changed: next build will trigger a reconfigure'
print(
'info: dependencies changed: next build will trigger a reconfigure'
)
if __name__ == '__main__':
......
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import re
import atexit
import cPickle
from six.moves import cPickle
from subprocess import Popen, PIPE
from collections import defaultdict
......@@ -22,7 +23,7 @@ def update_coverage():
else:
data = {'lines': {}}
lines = data['lines']
for filename, linenumbers in coverage.iteritems():
for filename, linenumbers in coverage.items():
lines[filename] = sorted(linenumbers.union(lines.get(filename, [])))
with open(COVERAGE_FILE, 'w') as report:
cPickle.dump(data, report)
......@@ -84,9 +85,9 @@ if __name__ == '__main__':
lines = data['lines']
for filename in sorted(lines):
if not os.path.exists(filename):
print 'Unknown file', filename
print('Unknown file', filename)
continue
print filename
print(filename)
active_lines = set(get_active_lines(filename))
touched_lines = set(lines[filename])
missed_lines = active_lines.difference(touched_lines)
......@@ -97,7 +98,8 @@ if __name__ == '__main__':
touched_count = len(touched_lines)
active_count = len(active_lines)
if touched_count == active_count:
print ' coverage 100%'
print(' coverage 100%')
else:
print(' coverage %3d%%, missed: %s' % (
float(touched_count) / active_count * 100, ', '.join(ranges)))
print(
' coverage %3d%%, missed: %s' %
(float(touched_count) / active_count * 100, ', '.join(ranges)))
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from cmake_coverage import cmake_script
from os.path import join, curdir
......@@ -21,10 +21,10 @@ class CMakeTestScripts(object):
def run(self, name):
script_name = join(self.scripts_dir, 'test_%s.cmake' % name)
out, err, returncode = cmake_script(script_name, cwd=self.base_dir)
print "---------- stdout ----------"
print out
print "---------- stderr ----------"
print err
print("---------- stdout ----------")
print(out)
print("---------- stderr ----------")
print(err)
assert returncode == 0
def test_scripts(self):
......
......@@ -25,6 +25,11 @@ def build():
for v in ('BINARY_TAG', 'CMTCONFIG'):
if v in os.environ:
del os.environ[v]
import sys
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from get_host_binary_tag import os_id, compiler_id
os.environ['BINARY_TAG'] = '-'.join(
['x86_64', os_id(), compiler_id(), 'opt'])
build_proc = Popen(['make', 'VERBOSE=1'],
cwd=base_dir,
stdout=PIPE,
......
# -*- coding: utf-8 -*-
from __future__ import print_function
from cmake_coverage import cmake_script
from os.path import join, dirname
......@@ -10,10 +10,10 @@ scripts_dir = join(base_dir, 'cmake_scripts')
def test_loops():
script_name = join(scripts_dir, 'test_loops.cmake')
out, err, returncode = cmake_script(script_name, cwd=base_dir)
print "---------- stdout ----------"
print out
print "---------- stderr ----------"
print err
print("---------- stdout ----------")
print(out)
print("---------- stderr ----------")
print(err)
assert returncode != 0
assert 'Infinite recursion detected at project Loop' in err
......@@ -21,9 +21,9 @@ def test_loops():
def test_missing_base():
script_name = join(scripts_dir, 'test_missing_base.cmake')
out, err, returncode = cmake_script(script_name, cwd=base_dir)
print "---------- stdout ----------"
print out
print "---------- stderr ----------"
print err
print("---------- stdout ----------")
print(out)
print("---------- stderr ----------")
print(err)
assert returncode != 0
assert 'Cannot find project IDoNotExist v0r0' in err
......@@ -2,7 +2,7 @@
set(heptools_version 95)
cmake_minimum_required(VERSION 3.6)
if($ENV{HEPTOOLS_VERSION})
if(NOT "$ENV{HEPTOOLS_VERSION}" STREQUAL "")
set(heptools_version $ENV{HEPTOOLS_VERSION})
endif()
......@@ -14,8 +14,10 @@ if(NOT CMAKE_SOURCE_DIR MATCHES "CMakeTmp")
# it is not needed
include(${CMAKE_SOURCE_DIR}/cmake/GaudiDefaultToolchain.cmake)
# FIXME: make sure we do not pick up ninja from LCG (it requires LD_LIBRARY_PATH set)
# FIXME: make sure we do not pick up unwanted/problematic projects from LCG