Commit 794152b2 authored by Marco Clemencic's avatar Marco Clemencic
Browse files

Add dynamic dependencies scan for genreflex dictionaries

Closes #152

See merge request gaudi/Gaudi!1183
parents 0c51539d 0e51c1e3
......@@ -203,6 +203,7 @@ install(EXPORT ${PROJECT_NAME} NAMESPACE ${PROJECT_NAME}::
gaudi_install(CMAKE cmake/GaudiToolbox.cmake
cmake/GaudiDependencies.cmake
cmake/extract_qmtest_metadata.py # used in gaudi_add_tests(QMTest)
cmake/scan_dict_deps.py # used in gaudi_add_dictionary(...)
cmake/DeveloperBuildType.cmake
"${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake"
"${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake"
......
......@@ -102,6 +102,8 @@ set(GAUDI_INSTALL_PLUGINDIR "${CMAKE_INSTALL_LIBDIR}" CACHE STRING "Install plug
set(GAUDI_INSTALL_PYTHONDIR "python" CACHE STRING "Install python packages in <prefix>/\${GAUDI_INSTALL_PYTHONDIR}")
set(GAUDI_INSTALL_CONFIGDIR "lib/cmake/${PROJECT_NAME}" CACHE STRING "Install cmake files in <prefix>/\${GAUDI_INSTALL_CONFIGDIR}")
set(scan_dict_deps_command ${CMAKE_CURRENT_LIST_DIR}/scan_dict_deps.py
CACHE INTERNAL "command to use to scan dependencies of dictionary headers")
################################## Functions ##################################
......@@ -691,25 +693,59 @@ function(gaudi_add_dictionary dictionary)
# Workaround for rootcling not knowing what nodiscard is
if(ROOT_VERSION MATCHES "^6\.22.*")
set( ARG_OPTIONS ${ARG_OPTIONS} -Wno-unknown-attributes)
list(APPEND ARG_OPTIONS -Wno-unknown-attributes)
endif()
add_custom_command(OUTPUT ${gensrcdict} ${rootmapname} ${pcmfile}
COMMAND run
if(TARGET Python::Interpreter
AND (CMAKE_GENERATOR MATCHES "Ninja"
OR (CMAKE_GENERATOR MATCHES "Makefile" AND CMAKE_VERSION VERSION_GREATER_EQUAL "3.20")))
file(RELATIVE_PATH dep_target "${PROJECT_BINARY_DIR}" "${gensrcdict}")
add_custom_command(OUTPUT ${gensrcdict} ${rootmapname} ${pcmfile}
COMMAND run
${ROOT_genreflex_CMD} # comes from ROOTConfig.cmake
${ARG_HEADERFILES}
-o ${gensrcdict}
--rootmap=${rootmapname}
--rootmap-lib=lib${dictionary}
--select=${ARG_SELECTION}
"-I$<JOIN:$<TARGET_PROPERTY:${dictionary},INCLUDE_DIRECTORIES>,;-I>"
"-D$<JOIN:$<TARGET_PROPERTY:${dictionary},COMPILE_DEFINITIONS>,;-D>"
${ARG_OPTIONS}
DEPENDS "${ARG_HEADERFILES};${ARG_SELECTION}"
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "Generating ${dictionary}.cxx and ${dictionary}.rootmap and ${dictionary}_rdict.pcm"
COMMAND_EXPAND_LISTS
${job_pool})
${ARG_HEADERFILES}
-o ${gensrcdict}
--rootmap=${rootmapname}
--rootmap-lib=lib${dictionary}
--select=${ARG_SELECTION}
"-I$<JOIN:$<TARGET_PROPERTY:${dictionary},INCLUDE_DIRECTORIES>,;-I>"
"-D$<JOIN:$<TARGET_PROPERTY:${dictionary},COMPILE_DEFINITIONS>,;-D>"
${ARG_OPTIONS}
COMMAND run $<TARGET_FILE:Python::Interpreter>
${scan_dict_deps_command}
"-I$<JOIN:$<TARGET_PROPERTY:${dictionary},INCLUDE_DIRECTORIES>,;-I>"
${CMAKE_CURRENT_BINARY_DIR}/${dictionary}.d
${dep_target}
${ARG_HEADERFILES}
DEPENDS ${ARG_HEADERFILES} ${ARG_SELECTION}
DEPFILE ${CMAKE_CURRENT_BINARY_DIR}/${dictionary}.d
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "Generating ${dictionary}.cxx and ${dictionary}.rootmap and ${dictionary}_rdict.pcm"
COMMAND_EXPAND_LISTS
${job_pool})
else()
if(NOT _root_dicts_deps_warning)
message(WARNING "dependencies of ROOT dictionaries are not complete, this feature needs a Ninja generator of CMake >= 3.20")
set(_root_dicts_deps_warning 1 CACHE INTERNAL "")
endif()
add_custom_command(OUTPUT ${gensrcdict} ${rootmapname} ${pcmfile}
COMMAND run
${ROOT_genreflex_CMD} # comes from ROOTConfig.cmake
${ARG_HEADERFILES}
-o ${gensrcdict}
--rootmap=${rootmapname}
--rootmap-lib=lib${dictionary}
--select=${ARG_SELECTION}
"-I$<JOIN:$<TARGET_PROPERTY:${dictionary},INCLUDE_DIRECTORIES>,;-I>"
"-D$<JOIN:$<TARGET_PROPERTY:${dictionary},COMPILE_DEFINITIONS>,;-D>"
${ARG_OPTIONS}
DEPENDS ${ARG_HEADERFILES} ${ARG_SELECTION}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "Generating ${dictionary}.cxx and ${dictionary}.rootmap and ${dictionary}_rdict.pcm"
COMMAND_EXPAND_LISTS
${job_pool})
endif()
add_custom_target(${dictionary}-gen ALL DEPENDS "${gensrcdict};${rootmapname};${pcmfile}")
# Build the dictionary as a plugin
add_library(${dictionary} MODULE ${gensrcdict})
......
#!/usr/bin/env python
#####################################################################################
# (c) Copyright 1998-2021 CERN for the benefit of the LHCb and ATLAS collaborations #
# #
# This software is distributed under the terms of the Apache version 2 licence, #
# copied verbatim in the file "LICENSE". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
#####################################################################################
from __future__ import print_function
import re
import io
from os.path import join, exists, isabs
INCLUDE_RE = re.compile(r'^\s*#\s*include\s*["<]([^">]*)[">]')
def find_file(filename, searchpath):
'''
Return the absolute path to filename in the searchpath.
If filename is already an absolute path, return it as is, if it exists.
If filename cannot be found, return None.
'''
if isabs(filename):
return filename if exists(filename) else None
for f in (join(d, filename) for d in searchpath):
if exists(f):
return f
return None
def find_deps(filename, searchpath, deps=None):
'''
Return a set with the absolute paths to the files included (directly and
indirectly) by filename.
'''
if deps is None:
deps = set()
filename = find_file(filename, searchpath)
if not filename:
# ignore missing files (useful for generated .h files)
return deps
# Look for all "#include" lines in the file, then consider each of the
# included files, ignoring those already included in the recursion
for included in [
f for f in
[
find_file(m.group(1), searchpath) for m in
[INCLUDE_RE.match(l) for l in io.open(filename, encoding="utf-8")]
if m
] if f and f not in deps
]:
deps.add(included)
find_deps(included, searchpath, deps)
return deps
def main():
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument(
'-I',
action='append',
dest='include_dirs',
help="directories where to look for header files")
parser.add_argument(
'output_file',
help=
"name of the files to write (will be updated only if there's a change)"
)
parser.add_argument(
'target', help="build target to be rebuilt if the dependencies change")
parser.add_argument('headers', help="header files to process", nargs="+")
args = parser.parse_args()
if exists(args.output_file):
with open(args.output_file) as f:
old_deps = f.read()
else:
old_deps = None
# scan for dependencies
deps = set()
for filename in args.headers:
find_deps(filename, args.include_dirs, deps)
deps = sorted(deps)
# prepare content of output file
new_deps = '{target}: {deps}\n'.format(
target=args.target, deps=' '.join(deps))
if new_deps != old_deps: # write it only if it has changed
with open(args.output_file, 'w') as f:
f.write(new_deps)
if __name__ == '__main__':
main()
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment