Commit 4b4b8e6a authored by Philipp Gadow's avatar Philipp Gadow
Browse files

Merge branch mguth-ci-improvements with refs/heads/master into refs/merge-requests/422/train

parents f5becfc8 92783c54
Pipeline #3575611 passed with stages
in 23 minutes and 6 seconds
......@@ -8,6 +8,7 @@ variables:
I18NSTATIC_VERSION: '0.17'
IMAGE_TYPE: umamibase:latest
SLIM_IMAGE: python:3.8-slim
REGISTY_PATH: ${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami
stages:
- check_mr
......
......@@ -7,16 +7,7 @@
- test_plotting_umami_dips
- test_plotting_umami_dl1
- test_plotting_umami_umami
- unittest_classification_tools
- unittest_data_tools
- unittest_evaluation_tools
- unittest_helper_tools
- unittest_input_vars_tools
- unittest_metrics
- unittest_plotting
- unittest_preprocessing
- unittest_tf_tools
- unittest_train_tools
- unittest_parallel
test_coverage:
stage: coverage_test_stage
......
......@@ -13,7 +13,6 @@
--destination ${IMAGE_DESTINATION}
# Print the full registry path of the pushed image
- echo "Image pushed successfully to ${IMAGE_DESTINATION}"
image:
# We recommend using the CERN version of the Kaniko image: gitlab-registry.cern.ch/ci-tools/docker-image-builder
name: gitlab-registry.cern.ch/ci-tools/docker-image-builder
......@@ -33,13 +32,13 @@ build_umamibase_cpu:
variables:
BASE: 'BASE_IMAGE=tensorflow/tensorflow:$TFTAG'
DOCKER_FILE: docker/umamibase/Dockerfile
IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:latest'
IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase:latest'
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
<<: *requirement_changes
- if: $CI_COMMIT_TAG
variables:
IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:$CI_COMMIT_REF_SLUG'
IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase:$CI_COMMIT_REF_SLUG'
build_umamibase_gpu:
<<: *image_build_template
......@@ -47,21 +46,21 @@ build_umamibase_gpu:
variables:
BASE: 'BASE_IMAGE=tensorflow/tensorflow:$TFTAG-gpu'
DOCKER_FILE: docker/umamibase/Dockerfile
IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:latest-gpu'
IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase:latest-gpu'
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
<<: *requirement_changes
- if: $CI_COMMIT_TAG
variables:
IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:$CI_COMMIT_REF_SLUG-gpu'
IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase:$CI_COMMIT_REF_SLUG-gpu'
build_umamibase_plus_cpu:
<<: *image_build_template
stage: image_build_umamibase
variables:
BASE: 'BASE_IMAGE=${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:latest'
BASE: 'BASE_IMAGE=${REGISTY_PATH}/umamibase:latest'
DOCKER_FILE: docker/umamibase-plus/Dockerfile
IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase-plus:latest'
IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase-plus:latest'
needs:
- job: build_umamibase_cpu
optional: true
......@@ -77,16 +76,16 @@ build_umamibase_plus_cpu:
- .gitlab-ci.yml
- if: $CI_COMMIT_TAG
variables:
BASE: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:$CI_COMMIT_REF_SLUG'
IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:$CI_COMMIT_REF_SLUG'
BASE: '${REGISTY_PATH}/umamibase:$CI_COMMIT_REF_SLUG'
IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase:$CI_COMMIT_REF_SLUG'
build_umamibase_plus_gpu:
<<: *image_build_template
stage: builds
variables:
BASE: 'BASE_IMAGE=${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:latest-gpu'
BASE: 'BASE_IMAGE=${REGISTY_PATH}/umamibase:latest-gpu'
DOCKER_FILE: docker/umamibase-plus/Dockerfile
IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase-plus:latest-gpu'
IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase-plus:latest-gpu'
needs:
- job: build_umamibase_gpu
optional: true
......@@ -100,8 +99,8 @@ build_umamibase_plus_gpu:
- .gitlab-ci.yml
- if: $CI_COMMIT_TAG
variables:
BASE: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:$CI_COMMIT_REF_SLUG-gpu'
IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:$CI_COMMIT_REF_SLUG-gpu'
BASE: '${REGISTY_PATH}/umamibase:$CI_COMMIT_REF_SLUG-gpu'
IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase:$CI_COMMIT_REF_SLUG-gpu'
build_umamibase_gpu_pytorch:
<<: *image_build_template
......@@ -109,7 +108,7 @@ build_umamibase_gpu_pytorch:
variables:
BASE: 'BASE_IMAGE=pytorch/pytorch:$TORCHTAG'
DOCKER_FILE: docker/umamibase/Dockerfile
IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:latest-pytorch-gpu'
IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase:latest-pytorch-gpu'
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
<<: *requirement_changes
......@@ -120,28 +119,28 @@ build_umami_cpu:
<<: *image_build_template
stage: image_build_umami
variables:
BASE: 'BASE_IMAGE=${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:latest'
BASE: 'BASE_IMAGE=${REGISTY_PATH}/umamibase:latest'
DOCKER_FILE: docker/umami/Dockerfile
IMAGE_DESTINATION: '${CI_REGISTRY_IMAGE}:latest'
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
- if: $CI_COMMIT_TAG
variables:
BASE: 'BASE_IMAGE=${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:$CI_COMMIT_REF_SLUG'
BASE: 'BASE_IMAGE=${REGISTY_PATH}/umamibase:$CI_COMMIT_REF_SLUG'
IMAGE_DESTINATION: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG
build_umami_gpu:
<<: *image_build_template
stage: image_build_umami
variables:
BASE: 'BASE_IMAGE=${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:latest-gpu'
BASE: 'BASE_IMAGE=${REGISTY_PATH}/umamibase:latest-gpu'
DOCKER_FILE: docker/umami/Dockerfile
IMAGE_DESTINATION: '${CI_REGISTRY_IMAGE}:latest-gpu'
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
- if: $CI_COMMIT_TAG
variables:
BASE: 'BASE_IMAGE=${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:$CI_COMMIT_REF_SLUG-gpu'
BASE: 'BASE_IMAGE=${REGISTY_PATH}/umamibase:$CI_COMMIT_REF_SLUG-gpu'
IMAGE_DESTINATION: '$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG-gpu'
......@@ -161,7 +160,7 @@ build_umamibase_cpu_MR:
variables:
BASE: 'BASE_IMAGE=tensorflow/tensorflow:$TFTAG'
DOCKER_FILE: docker/umamibase/Dockerfile
IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/temporary_images:${CI_MERGE_REQUEST_IID}-base'
IMAGE_DESTINATION: '${REGISTY_PATH}/temporary_images:${CI_MERGE_REQUEST_IID}-base'
rules:
- if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_PROJECT_PATH=="atlas-flavor-tagging-tools/algorithms/umami"
<<: *requirement_changes
......@@ -180,7 +179,7 @@ build_umamibase_gpu_MR:
variables:
BASE: 'BASE_IMAGE=tensorflow/tensorflow:$TFTAG-gpu'
DOCKER_FILE: docker/umamibase/Dockerfile
IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/temporary_images:${CI_MERGE_REQUEST_IID}-gpu-base'
IMAGE_DESTINATION: '${REGISTY_PATH}/temporary_images:${CI_MERGE_REQUEST_IID}-gpu-base'
rules:
- if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_PROJECT_PATH=="atlas-flavor-tagging-tools/algorithms/umami"
when: manual
......@@ -190,54 +189,43 @@ build_umamibase_gpu_MR:
# copies of the images built in gitlab CI/CD will be deployed to Docker Hub
# ----------------------------------------------------------------------------
.push_to_hub_template: &push_to_hub_template
.push_to_hub_template:
stage: publish
image: matthewfeickert/skopeo-docker:skopeo0.1.42
variables:
USER: btagging
IMAGE: umami
push_to_hub_cpu:
<<: *push_to_hub_template
script:
- /home/docker/skopeo copy
--src-creds ${CI_REGISTRY_USER}:${CI_BUILD_TOKEN}
--dest-creds ${DH_USERNAME}:${DH_PASSWORD}
docker://$CI_REGISTRY_IMAGE:latest
docker://${USER}/${IMAGE}:latest
docker://$CI_REGISTRY_IMAGE:${IMAGE_TAG}
docker://${USER}/${IMAGE}:${IMAGE_TAG}
push_to_hub_cpu:
extends: .push_to_hub_template
variables:
IMAGE_TAG: latest
only:
- master@atlas-flavor-tagging-tools/algorithms/umami
push_to_hub_gpu:
<<: *push_to_hub_template
script:
- /home/docker/skopeo copy
--src-creds ${CI_REGISTRY_USER}:${CI_BUILD_TOKEN}
--dest-creds ${DH_USERNAME}:${DH_PASSWORD}
docker://$CI_REGISTRY_IMAGE:latest-gpu
docker://${USER}/${IMAGE}:latest-gpu
extends: .push_to_hub_template
variables:
IMAGE_TAG: latest-gpu
only:
- master@atlas-flavor-tagging-tools/algorithms/umami
push_to_hub_tag:
<<: *push_to_hub_template
script:
- /home/docker/skopeo copy
--src-creds ${CI_REGISTRY_USER}:${CI_BUILD_TOKEN}
--dest-creds ${DH_USERNAME}:${DH_PASSWORD}
docker://$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG
docker://${USER}/${IMAGE}:$CI_COMMIT_REF_SLUG
extends: .push_to_hub_template
variables:
IMAGE_TAG: $CI_COMMIT_REF_SLUG
only:
- tags@atlas-flavor-tagging-tools/algorithms/umami
push_to_hub_gpu_tag:
<<: *push_to_hub_template
script:
- /home/docker/skopeo copy
--src-creds ${CI_REGISTRY_USER}:${CI_BUILD_TOKEN}
--dest-creds ${DH_USERNAME}:${DH_PASSWORD}
docker://$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG-gpu
docker://${USER}/${IMAGE}:$CI_COMMIT_REF_SLUG-gpu
extends: .push_to_hub_template
variables:
IMAGE_TAG: $CI_COMMIT_REF_SLUG-gpu
only:
- tags@atlas-flavor-tagging-tools/algorithms/umami
......@@ -24,7 +24,6 @@ unittest:
script:
- pytest --cov=./ --cov-report= ./umami/tests/unit/$UNIT_TEST_MODULE/ -v -s --junitxml=report.xml
- cp .coverage coverage_files/.coverage.unittest_$UNIT_TEST_MODULE
artifacts:
when: always
paths:
......@@ -33,52 +32,18 @@ unittest:
junit: report.xml
retry: 2
unittest_evaluation_tools:
<<: *unittest_template
variables:
UNIT_TEST_MODULE: evaluation_tools
unittest_helper_tools:
<<: *unittest_template
variables:
UNIT_TEST_MODULE: helper_tools
unittest_input_vars_tools:
<<: *unittest_template
variables:
UNIT_TEST_MODULE: input_vars_tools
unittest_preprocessing:
<<: *unittest_template
variables:
UNIT_TEST_MODULE: preprocessing
unittest_tf_tools:
<<: *unittest_template
variables:
UNIT_TEST_MODULE: tf_tools
unittest_train_tools:
<<: *unittest_template
variables:
UNIT_TEST_MODULE: train_tools
unittest_metrics:
<<: *unittest_template
variables:
UNIT_TEST_MODULE: metrics
unittest_classification_tools:
<<: *unittest_template
variables:
UNIT_TEST_MODULE: classification_tools
unittest_data_tools:
<<: *unittest_template
variables:
UNIT_TEST_MODULE: data_tools
unittest_plotting:
<<: *unittest_template
variables:
UNIT_TEST_MODULE: plotting
unittest_parallel:
<<: *unittest_template
parallel:
matrix:
- UNIT_TEST_MODULE:
- evaluation_tools
- helper_tools
- input_vars_tools
- preprocessing
- tf_tools
- train_tools
- metrics
- classification_tools
- data_tools
- plotting
......@@ -11,7 +11,7 @@ mlxtend==0.19.0
netCDF4==1.5.8
numba==0.55.1
numpy==1.21.0
pandas==1.4.1
pandas==1.3.5
papermill==2.3.4
partd==1.2.0
Pillow==9.0.1
......
......@@ -21,4 +21,4 @@ log_level=INFO
max-line-length = 88
[pylint.'MESSAGES CONTROL']
disable = invalid-name,unspecified-encoding,wrong-import-order,logging-fstring-interpolation,no-name-in-module,too-many-arguments,too-many-locals,too-many-lines,no-member,too-many-statements,too-many-branches,too-many-nested-blocks,too-many-instance-attributes,duplicate-code,fixme
disable = invalid-name,unspecified-encoding,wrong-import-order,logging-fstring-interpolation,no-name-in-module,too-many-arguments,too-many-locals,too-many-lines,no-member,too-many-statements,too-many-branches,too-many-nested-blocks,too-many-instance-attributes,duplicate-code,fixme,too-few-public-methods
......@@ -9,10 +9,8 @@ import pickle
import h5py
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import load_model # pylint: disable=no-name-in-module
from tensorflow.keras.utils import (
CustomObjectScope, # pylint: disable=no-name-in-module
)
from tensorflow.keras.models import load_model # pylint: disable=import-error
from tensorflow.keras.utils import CustomObjectScope # pylint: disable=import-error
import umami.classification_tools as uct
import umami.data_tools as udt
......
......@@ -9,8 +9,8 @@ import copy
from itertools import permutations
import numpy as np
from tensorflow.keras.layers import Lambda
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Lambda # pylint: disable=import-error
from tensorflow.keras.models import Model # pylint: disable=import-error
import umami.metrics as umt
......
......@@ -4,9 +4,9 @@ import json
import h5py
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint # pylint: disable=import-error
from tensorflow.keras.models import load_model # pylint: disable=import-error
from tensorflow.keras.optimizers import Adam # pylint: disable=import-error
import umami.tf_tools as utf
import umami.train_tools as utt
......
......@@ -5,16 +5,16 @@ import os
import h5py
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.layers import (
from tensorflow.keras.callbacks import ModelCheckpoint # pylint: disable=import-error
from tensorflow.keras.layers import ( # pylint: disable=import-error
Activation,
BatchNormalization,
Dense,
Dropout,
Input,
)
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model, load_model # pylint: disable=import-error
from tensorflow.keras.optimizers import Adam # pylint: disable=import-error
import umami.tf_tools as utf
import umami.train_tools as utt
......
......@@ -5,11 +5,9 @@ import os
import h5py
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras.callbacks import (
ModelCheckpoint, # pylint: disable=no-name-in-module
)
from tensorflow.keras.layers import ( # pylint: disable=no-name-in-module
from tensorflow.keras import activations # pylint: disable=import-error
from tensorflow.keras.callbacks import ModelCheckpoint # pylint: disable=import-error
from tensorflow.keras.layers import ( # pylint: disable=import-error
Activation,
BatchNormalization,
Dense,
......@@ -18,8 +16,8 @@ from tensorflow.keras.layers import ( # pylint: disable=no-name-in-module
Masking,
TimeDistributed,
)
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model, load_model # pylint: disable=import-error
from tensorflow.keras.optimizers import Adam # pylint: disable=import-error
import umami.tf_tools as utf
import umami.train_tools as utt
......
......@@ -6,11 +6,9 @@ import os
import h5py
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras.callbacks import (
ModelCheckpoint, # pylint: disable=no-name-in-module
)
from tensorflow.keras.layers import ( # pylint: disable=no-name-in-module
from tensorflow.keras import activations # pylint: disable=import-error
from tensorflow.keras.callbacks import ModelCheckpoint # pylint: disable=import-error
from tensorflow.keras.layers import ( # pylint: disable=import-error
Activation,
BatchNormalization,
Concatenate,
......@@ -20,11 +18,8 @@ from tensorflow.keras.layers import ( # pylint: disable=no-name-in-module
Masking,
TimeDistributed,
)
from tensorflow.keras.models import ( # pylint: disable=no-name-in-module
Model,
load_model,
)
from tensorflow.keras.optimizers import Adam # pylint: disable=no-name-in-module
from tensorflow.keras.models import Model, load_model # pylint: disable=import-error
from tensorflow.keras.optimizers import Adam # pylint: disable=import-error
import umami.tf_tools as utf
import umami.train_tools as utt
......
......@@ -254,7 +254,6 @@ class test_DeepSet(tf.test.TestCase):
# Get net output
out = deepset(inputs=inputs, mask=2)
logger.warning(out)
# Test output
np.testing.assert_almost_equal(expected_output, out)
......
"""
Implementations by Johnny Raine
"""
from tensorflow.keras import backend as K
from tensorflow.keras.layers import BatchNormalization, Dense, Layer
from tensorflow.keras import backend as K # pylint: disable=import-error
from tensorflow.keras.layers import ( # pylint: disable=import-error
BatchNormalization,
Dense,
Layer,
)
class DenseNet(Layer):
......@@ -12,12 +16,29 @@ class DenseNet(Layer):
def __init__(
self,
nodes,
output_nodes=1,
activation="relu",
batch_norm=False,
nodes: list,
output_nodes: int = 1,
activation: str = "relu",
batch_norm: bool = False,
**kwargs,
):
"""
Init the DenseNet layer
Parameters
----------
nodes : list
List with the number of neurons per node
output_nodes : int
Number of outputs in the output node
activation : str, optional
Activation which is used, by default "relu"
batch_norm : bool, optional
Use batch normalisation, by default False
**kwargs : dict
Additional arguments passed.
"""
# Define the attributes
self.nodes = nodes
self.output_nodes = output_nodes
......@@ -48,13 +69,34 @@ class DenseNet(Layer):
assert len(nodes), "No layers in DenseNet"
super().__init__(**kwargs)
def call(self, inputs): # pylint: disable=arguments-differ
def call(self, inputs):
"""
Define what happens when the layer is called
Parameters
----------
inputs : object
Input to the network.
Returns
-------
output : object
Output of the network.
"""
out = self.layers[0](inputs)
for layer in self.layers[1:]:
out = layer(out)
return out
def get_config(self):
def get_config(self) -> dict:
"""
Return the settings of the network.
Returns
-------
dict
Dict with the config settings.
"""
# Get configuration of the network
config = {
"nodes": self.nodes,
......@@ -75,12 +117,28 @@ class DeepSet(Layer):
def __init__(
self,
nodes,
activation="relu",
batch_norm=False,
mask_zero=True,
nodes: list,
activation: str = "relu",
batch_norm: bool = False,
mask_zero: bool = True,
**kwargs,
):
"""
Init the DeepSet Layer.
Parameters
----------
nodes : list
List with the number of neurons per node
activation : str, optional
Activation which is used, by default "relu"
batch_norm : bool, optional
Use batch normalisation, by default False
mask_zero : bool, optional
Use 0 as mask value, by default True
**kwargs : dict
Additional arguments passed.
"""
# Define attributes
self.nodes = nodes
self.activation = activation
......@@ -108,7 +166,22 @@ class DeepSet(Layer):
assert self.layers, "No layers in DeepSet"
super().__init__(**kwargs)
def call(self, inputs, mask=None): # pylint: disable=arguments-differ
def call(self, inputs, mask: float = None): # pylint: disable=arguments-differ
"""
Return the output of the network for a given input.
Parameters
----------
inputs : object
Input to layer.
mask : float, optional
Mask value, by default None
Returns
-------
output
Layer output.
"""
# Assert that the tensor shape is at least rank 3
assert len(inputs.shape) == 3, (
"DeepSets layer requires tensor of rank 3. Shape of tensor"
......@@ -126,12 +199,26 @@ class DeepSet(Layer):
for layer in self.layers[1:]:
out = layer(out)
# if mask is not None:
# out *= (1-K.cast(mask,dtype="float32"))
return out
def compute_mask(self, inputs, mask=None):
def compute_mask(
self, inputs, mask: float = None
): # pylint: disable=unused-argument
"""
Compute the masking.
Parameters
----------
inputs : object
Input to a layer.
mask : float