diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 389d8de25b0864f37f8732e403496fa38c57d99a..6f3f5ce5d40a4f426ecc26bf04918991211d160e 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -8,6 +8,7 @@ variables:
   I18NSTATIC_VERSION: '0.17'
   IMAGE_TYPE: umamibase:latest
   SLIM_IMAGE: python:3.8-slim
+  REGISTY_PATH: ${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami
 
 stages:
   - check_mr
diff --git a/pipelines/.coverage-gitlab-ci.yaml b/pipelines/.coverage-gitlab-ci.yaml
index 5c503982715c43395c6a2e423f1b8f9195c5e6f1..30e4ca291fbd5f91e4185d937f83881619524924 100644
--- a/pipelines/.coverage-gitlab-ci.yaml
+++ b/pipelines/.coverage-gitlab-ci.yaml
@@ -7,16 +7,7 @@
   - test_plotting_umami_dips
   - test_plotting_umami_dl1
   - test_plotting_umami_umami
-  - unittest_classification_tools
-  - unittest_data_tools
-  - unittest_evaluation_tools
-  - unittest_helper_tools
-  - unittest_input_vars_tools
-  - unittest_metrics
-  - unittest_plotting
-  - unittest_preprocessing
-  - unittest_tf_tools
-  - unittest_train_tools
+  - unittest_parallel
 
 test_coverage:
   stage: coverage_test_stage
diff --git a/pipelines/.docker-gitlab-ci.yaml b/pipelines/.docker-gitlab-ci.yaml
index b17fe64d8d69ab92c328d292dd9ffd23e5b7d26c..b531e0a9edbce266c7e5a4466bd0a653f900faa6 100644
--- a/pipelines/.docker-gitlab-ci.yaml
+++ b/pipelines/.docker-gitlab-ci.yaml
@@ -13,7 +13,6 @@
       --destination ${IMAGE_DESTINATION}
     # Print the full registry path of the pushed image
     - echo "Image pushed successfully to ${IMAGE_DESTINATION}"
-
   image:
     # We recommend using the CERN version of the Kaniko image: gitlab-registry.cern.ch/ci-tools/docker-image-builder
     name: gitlab-registry.cern.ch/ci-tools/docker-image-builder
@@ -33,13 +32,13 @@ build_umamibase_cpu:
   variables:
     BASE: 'BASE_IMAGE=tensorflow/tensorflow:$TFTAG'
     DOCKER_FILE: docker/umamibase/Dockerfile
-    IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:latest'
+    IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase:latest'
   rules:
     - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
       <<: *requirement_changes
     - if: $CI_COMMIT_TAG
       variables:
-        IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:$CI_COMMIT_REF_SLUG'
+        IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase:$CI_COMMIT_REF_SLUG'
 
 build_umamibase_gpu:
   <<: *image_build_template
@@ -47,21 +46,21 @@ build_umamibase_gpu:
   variables:
     BASE: 'BASE_IMAGE=tensorflow/tensorflow:$TFTAG-gpu'
     DOCKER_FILE: docker/umamibase/Dockerfile
-    IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:latest-gpu'
+    IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase:latest-gpu'
   rules:
     - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
       <<: *requirement_changes
     - if: $CI_COMMIT_TAG
       variables:
-        IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:$CI_COMMIT_REF_SLUG-gpu'
+        IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase:$CI_COMMIT_REF_SLUG-gpu'
 
 build_umamibase_plus_cpu:
   <<: *image_build_template
   stage: image_build_umamibase
   variables:
-    BASE: 'BASE_IMAGE=${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:latest'
+    BASE: 'BASE_IMAGE=${REGISTY_PATH}/umamibase:latest'
     DOCKER_FILE: docker/umamibase-plus/Dockerfile
-    IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase-plus:latest'
+    IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase-plus:latest'
   needs:
     - job: build_umamibase_cpu
       optional: true
@@ -77,16 +76,16 @@ build_umamibase_plus_cpu:
         - .gitlab-ci.yml
     - if: $CI_COMMIT_TAG
       variables:
-        BASE: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:$CI_COMMIT_REF_SLUG'
-        IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:$CI_COMMIT_REF_SLUG'
+        BASE: '${REGISTY_PATH}/umamibase:$CI_COMMIT_REF_SLUG'
+        IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase:$CI_COMMIT_REF_SLUG'
 
 build_umamibase_plus_gpu:
   <<: *image_build_template
   stage: builds
   variables:
-    BASE: 'BASE_IMAGE=${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:latest-gpu'
+    BASE: 'BASE_IMAGE=${REGISTY_PATH}/umamibase:latest-gpu'
     DOCKER_FILE: docker/umamibase-plus/Dockerfile
-    IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase-plus:latest-gpu'
+    IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase-plus:latest-gpu'
   needs:
     - job: build_umamibase_gpu
       optional: true
@@ -100,8 +99,8 @@ build_umamibase_plus_gpu:
         - .gitlab-ci.yml
     - if: $CI_COMMIT_TAG
       variables:
-        BASE: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:$CI_COMMIT_REF_SLUG-gpu'
-        IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:$CI_COMMIT_REF_SLUG-gpu'
+        BASE: '${REGISTY_PATH}/umamibase:$CI_COMMIT_REF_SLUG-gpu'
+        IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase:$CI_COMMIT_REF_SLUG-gpu'
 
 build_umamibase_gpu_pytorch:
   <<: *image_build_template
@@ -109,7 +108,7 @@ build_umamibase_gpu_pytorch:
   variables:
     BASE: 'BASE_IMAGE=pytorch/pytorch:$TORCHTAG'
     DOCKER_FILE: docker/umamibase/Dockerfile
-    IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:latest-pytorch-gpu'
+    IMAGE_DESTINATION: '${REGISTY_PATH}/umamibase:latest-pytorch-gpu'
   rules:
     - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
       <<: *requirement_changes
@@ -120,28 +119,28 @@ build_umami_cpu:
   <<: *image_build_template
   stage: image_build_umami
   variables:
-    BASE: 'BASE_IMAGE=${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:latest'
+    BASE: 'BASE_IMAGE=${REGISTY_PATH}/umamibase:latest'
     DOCKER_FILE: docker/umami/Dockerfile
     IMAGE_DESTINATION: '${CI_REGISTRY_IMAGE}:latest'
   rules:
     - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
     - if: $CI_COMMIT_TAG
       variables:
-        BASE: 'BASE_IMAGE=${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:$CI_COMMIT_REF_SLUG'
+        BASE: 'BASE_IMAGE=${REGISTY_PATH}/umamibase:$CI_COMMIT_REF_SLUG'
         IMAGE_DESTINATION: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG
 
 build_umami_gpu:
   <<: *image_build_template
   stage: image_build_umami
   variables:
-    BASE: 'BASE_IMAGE=${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:latest-gpu'
+    BASE: 'BASE_IMAGE=${REGISTY_PATH}/umamibase:latest-gpu'
     DOCKER_FILE: docker/umami/Dockerfile
     IMAGE_DESTINATION: '${CI_REGISTRY_IMAGE}:latest-gpu'
   rules:
     - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
     - if: $CI_COMMIT_TAG
       variables:
-        BASE: 'BASE_IMAGE=${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/umamibase:$CI_COMMIT_REF_SLUG-gpu'
+        BASE: 'BASE_IMAGE=${REGISTY_PATH}/umamibase:$CI_COMMIT_REF_SLUG-gpu'
         IMAGE_DESTINATION: '$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG-gpu'
 
 
@@ -161,7 +160,7 @@ build_umamibase_cpu_MR:
   variables:
     BASE: 'BASE_IMAGE=tensorflow/tensorflow:$TFTAG'
     DOCKER_FILE: docker/umamibase/Dockerfile
-    IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/temporary_images:${CI_MERGE_REQUEST_IID}-base'
+    IMAGE_DESTINATION: '${REGISTY_PATH}/temporary_images:${CI_MERGE_REQUEST_IID}-base'
   rules:
     - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_PROJECT_PATH=="atlas-flavor-tagging-tools/algorithms/umami"
       <<: *requirement_changes
@@ -180,7 +179,7 @@ build_umamibase_gpu_MR:
   variables:
     BASE: 'BASE_IMAGE=tensorflow/tensorflow:$TFTAG-gpu'
     DOCKER_FILE: docker/umamibase/Dockerfile
-    IMAGE_DESTINATION: '${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/umami/temporary_images:${CI_MERGE_REQUEST_IID}-gpu-base'
+    IMAGE_DESTINATION: '${REGISTY_PATH}/temporary_images:${CI_MERGE_REQUEST_IID}-gpu-base'
   rules:
     - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_PROJECT_PATH=="atlas-flavor-tagging-tools/algorithms/umami"
       when: manual
@@ -190,54 +189,43 @@ build_umamibase_gpu_MR:
 # copies of the images built in gitlab CI/CD will be deployed to Docker Hub
 # ----------------------------------------------------------------------------
 
-.push_to_hub_template: &push_to_hub_template
+.push_to_hub_template:
   stage: publish
   image: matthewfeickert/skopeo-docker:skopeo0.1.42
   variables:
     USER: btagging
     IMAGE: umami
-
-push_to_hub_cpu:
-  <<: *push_to_hub_template
   script:
     - /home/docker/skopeo copy
       --src-creds ${CI_REGISTRY_USER}:${CI_BUILD_TOKEN}
       --dest-creds ${DH_USERNAME}:${DH_PASSWORD}
-      docker://$CI_REGISTRY_IMAGE:latest
-      docker://${USER}/${IMAGE}:latest
+      docker://$CI_REGISTRY_IMAGE:${IMAGE_TAG}
+      docker://${USER}/${IMAGE}:${IMAGE_TAG}
+
+push_to_hub_cpu:
+  extends: .push_to_hub_template
+  variables:
+    IMAGE_TAG: latest
   only:
     - master@atlas-flavor-tagging-tools/algorithms/umami
 
-
 push_to_hub_gpu:
-  <<: *push_to_hub_template
-  script:
-    - /home/docker/skopeo copy
-      --src-creds ${CI_REGISTRY_USER}:${CI_BUILD_TOKEN}
-      --dest-creds ${DH_USERNAME}:${DH_PASSWORD}
-      docker://$CI_REGISTRY_IMAGE:latest-gpu
-      docker://${USER}/${IMAGE}:latest-gpu
+  extends: .push_to_hub_template
+  variables:
+    IMAGE_TAG: latest-gpu
   only:
     - master@atlas-flavor-tagging-tools/algorithms/umami
 
 push_to_hub_tag:
-  <<: *push_to_hub_template
-  script:
-    - /home/docker/skopeo copy
-      --src-creds ${CI_REGISTRY_USER}:${CI_BUILD_TOKEN}
-      --dest-creds ${DH_USERNAME}:${DH_PASSWORD}
-      docker://$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG
-      docker://${USER}/${IMAGE}:$CI_COMMIT_REF_SLUG
+  extends: .push_to_hub_template
+  variables:
+    IMAGE_TAG: $CI_COMMIT_REF_SLUG
   only:
     - tags@atlas-flavor-tagging-tools/algorithms/umami
 
 push_to_hub_gpu_tag:
-  <<: *push_to_hub_template
-  script:
-    - /home/docker/skopeo copy
-      --src-creds ${CI_REGISTRY_USER}:${CI_BUILD_TOKEN}
-      --dest-creds ${DH_USERNAME}:${DH_PASSWORD}
-      docker://$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG-gpu
-      docker://${USER}/${IMAGE}:$CI_COMMIT_REF_SLUG-gpu
+  extends: .push_to_hub_template
+  variables:
+    IMAGE_TAG: $CI_COMMIT_REF_SLUG-gpu
   only:
     - tags@atlas-flavor-tagging-tools/algorithms/umami
diff --git a/pipelines/.unit_test-gitlab-ci.yaml b/pipelines/.unit_test-gitlab-ci.yaml
index d95caa66912a67c98cce70ecdfb3b73e44f528ad..c6a2b754e74c07293a1b5b28df26e848eca99d4c 100644
--- a/pipelines/.unit_test-gitlab-ci.yaml
+++ b/pipelines/.unit_test-gitlab-ci.yaml
@@ -24,7 +24,6 @@ unittest:
   script:
     - pytest --cov=./ --cov-report= ./umami/tests/unit/$UNIT_TEST_MODULE/ -v -s --junitxml=report.xml
     - cp .coverage coverage_files/.coverage.unittest_$UNIT_TEST_MODULE
-
   artifacts:
     when: always
     paths:
@@ -33,52 +32,18 @@ unittest:
       junit: report.xml
   retry: 2
 
-unittest_evaluation_tools:
-  <<: *unittest_template
-  variables:
-    UNIT_TEST_MODULE: evaluation_tools
-
-unittest_helper_tools:
-  <<: *unittest_template
-  variables:
-    UNIT_TEST_MODULE: helper_tools
-
-unittest_input_vars_tools:
-  <<: *unittest_template
-  variables:
-    UNIT_TEST_MODULE: input_vars_tools
-
-unittest_preprocessing:
-  <<: *unittest_template
-  variables:
-    UNIT_TEST_MODULE: preprocessing
-
-unittest_tf_tools:
-  <<: *unittest_template
-  variables:
-    UNIT_TEST_MODULE: tf_tools
-
-unittest_train_tools:
-  <<: *unittest_template
-  variables:
-    UNIT_TEST_MODULE: train_tools
-
-unittest_metrics:
-  <<: *unittest_template
-  variables:
-    UNIT_TEST_MODULE: metrics
-
-unittest_classification_tools:
-  <<: *unittest_template
-  variables:
-    UNIT_TEST_MODULE: classification_tools
-
-unittest_data_tools:
-  <<: *unittest_template
-  variables:
-    UNIT_TEST_MODULE: data_tools
-
-unittest_plotting:
-  <<: *unittest_template
-  variables:
-    UNIT_TEST_MODULE: plotting
+unittest_parallel:
+  <<: *unittest_template
+  parallel:
+    matrix:
+      - UNIT_TEST_MODULE:
+        - evaluation_tools
+        - helper_tools
+        - input_vars_tools
+        - preprocessing
+        - tf_tools
+        - train_tools
+        - metrics
+        - classification_tools
+        - data_tools
+        - plotting
diff --git a/requirements.txt b/requirements.txt
index bc3331e7b4f60ec41d077a79d77884db389d6fdd..3f17964862e7712e3d96ed09e91e24fc14b2c2a2 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -11,7 +11,7 @@ mlxtend==0.19.0
 netCDF4==1.5.8
 numba==0.55.1
 numpy==1.21.0
-pandas==1.4.1
+pandas==1.3.5
 papermill==2.3.4
 partd==1.2.0
 Pillow==9.0.1
diff --git a/setup.cfg b/setup.cfg
index 5c3ed95f957824a101b7f3cd41e821c87e8cdbab..d3d185cf7845b358f7b1595d312bdde115d6e0ac 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -21,4 +21,4 @@ log_level=INFO
 max-line-length = 88
 
 [pylint.'MESSAGES CONTROL']
-disable = invalid-name,unspecified-encoding,wrong-import-order,logging-fstring-interpolation,no-name-in-module,too-many-arguments,too-many-locals,too-many-lines,no-member,too-many-statements,too-many-branches,too-many-nested-blocks,too-many-instance-attributes,duplicate-code,fixme
+disable = invalid-name,unspecified-encoding,wrong-import-order,logging-fstring-interpolation,no-name-in-module,too-many-arguments,too-many-locals,too-many-lines,no-member,too-many-statements,too-many-branches,too-many-nested-blocks,too-many-instance-attributes,duplicate-code,fixme,too-few-public-methods
diff --git a/umami/evaluate_model.py b/umami/evaluate_model.py
index 8d5f9ce71e61dfcce1b19b58e64bb453ccbdbef4..837dd65cf0a353491e6fdcafe773d8cdb1358577 100644
--- a/umami/evaluate_model.py
+++ b/umami/evaluate_model.py
@@ -9,10 +9,8 @@ import pickle
 import h5py
 import pandas as pd
 import tensorflow as tf
-from tensorflow.keras.models import load_model  # pylint: disable=no-name-in-module
-from tensorflow.keras.utils import (
-    CustomObjectScope,  # pylint: disable=no-name-in-module
-)
+from tensorflow.keras.models import load_model  # pylint: disable=import-error
+from tensorflow.keras.utils import CustomObjectScope  # pylint: disable=import-error
 
 import umami.classification_tools as uct
 import umami.data_tools as udt
diff --git a/umami/evaluation_tools/eval_tools.py b/umami/evaluation_tools/eval_tools.py
index a751063159e1650e1f7ffd2d448ac5f6423c874a..44b60bc8ebac1d1824a477f8fc6db9fa0f90e6e6 100644
--- a/umami/evaluation_tools/eval_tools.py
+++ b/umami/evaluation_tools/eval_tools.py
@@ -9,8 +9,8 @@ import copy
 from itertools import permutations
 
 import numpy as np
-from tensorflow.keras.layers import Lambda
-from tensorflow.keras.models import Model
+from tensorflow.keras.layers import Lambda  # pylint: disable=import-error
+from tensorflow.keras.models import Model  # pylint: disable=import-error
 
 import umami.metrics as umt
 
diff --git a/umami/models/Model_CADS.py b/umami/models/Model_CADS.py
index 652efb8c3658a738812811a8e9ad4b3c67ef8203..e8f660a721f10433dc95e05a269576c0bc030e5f 100644
--- a/umami/models/Model_CADS.py
+++ b/umami/models/Model_CADS.py
@@ -4,9 +4,9 @@ import json
 
 import h5py
 import tensorflow as tf
-from tensorflow.keras.callbacks import ModelCheckpoint
-from tensorflow.keras.models import load_model
-from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.callbacks import ModelCheckpoint  # pylint: disable=import-error
+from tensorflow.keras.models import load_model  # pylint: disable=import-error
+from tensorflow.keras.optimizers import Adam  # pylint: disable=import-error
 
 import umami.tf_tools as utf
 import umami.train_tools as utt
diff --git a/umami/models/Model_DL1.py b/umami/models/Model_DL1.py
index aa12b74eb90b0489d826c996aaa00a6e821e6ba1..74248cf9d7b7bb31b8934f7a2f45b32763165c7d 100644
--- a/umami/models/Model_DL1.py
+++ b/umami/models/Model_DL1.py
@@ -5,16 +5,16 @@ import os
 
 import h5py
 import tensorflow as tf
-from tensorflow.keras.callbacks import ModelCheckpoint
-from tensorflow.keras.layers import (
+from tensorflow.keras.callbacks import ModelCheckpoint  # pylint: disable=import-error
+from tensorflow.keras.layers import (  # pylint: disable=import-error
     Activation,
     BatchNormalization,
     Dense,
     Dropout,
     Input,
 )
-from tensorflow.keras.models import Model, load_model
-from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.models import Model, load_model  # pylint: disable=import-error
+from tensorflow.keras.optimizers import Adam  # pylint: disable=import-error
 
 import umami.tf_tools as utf
 import umami.train_tools as utt
diff --git a/umami/models/Model_Dips.py b/umami/models/Model_Dips.py
index 52b27c13e5317119307aa4f5fffbb0f6a5a6b040..e76e35353fff8437e7085c14754450b70e49ac79 100755
--- a/umami/models/Model_Dips.py
+++ b/umami/models/Model_Dips.py
@@ -5,11 +5,9 @@ import os
 
 import h5py
 import tensorflow as tf
-from tensorflow.keras import activations
-from tensorflow.keras.callbacks import (
-    ModelCheckpoint,  # pylint: disable=no-name-in-module
-)
-from tensorflow.keras.layers import (  # pylint: disable=no-name-in-module
+from tensorflow.keras import activations  # pylint: disable=import-error
+from tensorflow.keras.callbacks import ModelCheckpoint  # pylint: disable=import-error
+from tensorflow.keras.layers import (  # pylint: disable=import-error
     Activation,
     BatchNormalization,
     Dense,
@@ -18,8 +16,8 @@ from tensorflow.keras.layers import (  # pylint: disable=no-name-in-module
     Masking,
     TimeDistributed,
 )
-from tensorflow.keras.models import Model, load_model
-from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.models import Model, load_model  # pylint: disable=import-error
+from tensorflow.keras.optimizers import Adam  # pylint: disable=import-error
 
 import umami.tf_tools as utf
 import umami.train_tools as utt
diff --git a/umami/models/Model_Umami.py b/umami/models/Model_Umami.py
index 9ffc777ac2060d2a2692e2167d401f47209cbd4d..c766f79e635170f556e2baba8a2f52530956ac92 100644
--- a/umami/models/Model_Umami.py
+++ b/umami/models/Model_Umami.py
@@ -6,11 +6,9 @@ import os
 
 import h5py
 import tensorflow as tf
-from tensorflow.keras import activations
-from tensorflow.keras.callbacks import (
-    ModelCheckpoint,  # pylint: disable=no-name-in-module
-)
-from tensorflow.keras.layers import (  # pylint: disable=no-name-in-module
+from tensorflow.keras import activations  # pylint: disable=import-error
+from tensorflow.keras.callbacks import ModelCheckpoint  # pylint: disable=import-error
+from tensorflow.keras.layers import (  # pylint: disable=import-error
     Activation,
     BatchNormalization,
     Concatenate,
@@ -20,11 +18,8 @@ from tensorflow.keras.layers import (  # pylint: disable=no-name-in-module
     Masking,
     TimeDistributed,
 )
-from tensorflow.keras.models import (  # pylint: disable=no-name-in-module
-    Model,
-    load_model,
-)
-from tensorflow.keras.optimizers import Adam  # pylint: disable=no-name-in-module
+from tensorflow.keras.models import Model, load_model  # pylint: disable=import-error
+from tensorflow.keras.optimizers import Adam  # pylint: disable=import-error
 
 import umami.tf_tools as utf
 import umami.train_tools as utt
diff --git a/umami/tests/unit/tf_tools/test_tf_tools.py b/umami/tests/unit/tf_tools/test_tf_tools.py
index 3af6fd666611776fd518b4f360570e384229afc7..566e9496c442cac971d3e9f777579eb082cace9f 100644
--- a/umami/tests/unit/tf_tools/test_tf_tools.py
+++ b/umami/tests/unit/tf_tools/test_tf_tools.py
@@ -254,7 +254,6 @@ class test_DeepSet(tf.test.TestCase):
 
         # Get net output
         out = deepset(inputs=inputs, mask=2)
-        logger.warning(out)
 
         # Test output
         np.testing.assert_almost_equal(expected_output, out)
diff --git a/umami/tf_tools/layers.py b/umami/tf_tools/layers.py
index 5cf93de01272f925b6a886b324feb64c91476db5..ab40776b9d1c989e919f75ccf0ba312332a7c18c 100644
--- a/umami/tf_tools/layers.py
+++ b/umami/tf_tools/layers.py
@@ -1,8 +1,12 @@
 """
 Implementations by Johnny Raine
 """
-from tensorflow.keras import backend as K
-from tensorflow.keras.layers import BatchNormalization, Dense, Layer
+from tensorflow.keras import backend as K  # pylint: disable=import-error
+from tensorflow.keras.layers import (  # pylint: disable=import-error
+    BatchNormalization,
+    Dense,
+    Layer,
+)
 
 
 class DenseNet(Layer):
@@ -12,12 +16,29 @@ class DenseNet(Layer):
 
     def __init__(
         self,
-        nodes,
-        output_nodes=1,
-        activation="relu",
-        batch_norm=False,
+        nodes: list,
+        output_nodes: int = 1,
+        activation: str = "relu",
+        batch_norm: bool = False,
         **kwargs,
     ):
+        """
+        Init the DenseNet layer
+
+        Parameters
+        ----------
+        nodes : list
+            List with the number of neurons per node
+        output_nodes : int
+            Number of outputs in the output node
+        activation : str, optional
+            Activation which is used, by default "relu"
+        batch_norm : bool, optional
+            Use batch normalisation, by default False
+        **kwargs : dict
+            Additional arguments passed.
+        """
+
         # Define the attributes
         self.nodes = nodes
         self.output_nodes = output_nodes
@@ -48,13 +69,34 @@ class DenseNet(Layer):
         assert len(nodes), "No layers in DenseNet"
         super().__init__(**kwargs)
 
-    def call(self, inputs):  # pylint: disable=arguments-differ
+    def call(self, inputs):
+        """
+        Define what happens when the layer is called
+
+        Parameters
+        ----------
+        inputs : object
+            Input to the network.
+
+        Returns
+        -------
+        output : object
+            Output of the network.
+        """
         out = self.layers[0](inputs)
         for layer in self.layers[1:]:
             out = layer(out)
         return out
 
-    def get_config(self):
+    def get_config(self) -> dict:
+        """
+        Return the settings of the network.
+
+        Returns
+        -------
+        dict
+            Dict with the config settings.
+        """
         # Get configuration of the network
         config = {
             "nodes": self.nodes,
@@ -75,12 +117,28 @@ class DeepSet(Layer):
 
     def __init__(
         self,
-        nodes,
-        activation="relu",
-        batch_norm=False,
-        mask_zero=True,
+        nodes: list,
+        activation: str = "relu",
+        batch_norm: bool = False,
+        mask_zero: bool = True,
         **kwargs,
     ):
+        """
+        Init the DeepSet Layer.
+
+        Parameters
+        ----------
+        nodes : list
+            List with the number of neurons per node
+        activation : str, optional
+            Activation which is used, by default "relu"
+        batch_norm : bool, optional
+            Use batch normalisation, by default False
+        mask_zero : bool, optional
+            Use 0 as mask value, by default True
+        **kwargs : dict
+            Additional arguments passed.
+        """
         # Define attributes
         self.nodes = nodes
         self.activation = activation
@@ -108,7 +166,22 @@ class DeepSet(Layer):
         assert self.layers, "No layers in DeepSet"
         super().__init__(**kwargs)
 
-    def call(self, inputs, mask=None):  # pylint: disable=arguments-differ
+    def call(self, inputs, mask: float = None):  # pylint: disable=arguments-differ
+        """
+        Return the output of the network for a given input.
+
+        Parameters
+        ----------
+        inputs : object
+            Input to layer.
+        mask : float, optional
+            Mask value, by default None
+
+        Returns
+        -------
+        output
+            Layer output.
+        """
         # Assert that the tensor shape is at least rank 3
         assert len(inputs.shape) == 3, (
             "DeepSets layer requires tensor of rank 3. Shape of tensor"
@@ -126,12 +199,26 @@ class DeepSet(Layer):
         for layer in self.layers[1:]:
             out = layer(out)
 
-        # if mask is not None:
-        #    out *= (1-K.cast(mask,dtype="float32"))
-
         return out
 
-    def compute_mask(self, inputs, mask=None):
+    def compute_mask(
+        self, inputs, mask: float = None
+    ):  # pylint: disable=unused-argument
+        """
+        Compute the masking.
+
+        Parameters
+        ----------
+        inputs : object
+            Input to a layer.
+        mask : float
+            Custom mask value (needed in tensorflow).
+
+        Returns
+        -------
+        masking
+            Return correct masking
+        """
 
         # Check if mask zero is true
         if not self.mask_zero:
@@ -141,6 +228,14 @@ class DeepSet(Layer):
         return K.equal(K.sum(inputs ** 2, axis=-1), 0)
 
     def get_config(self):
+        """
+        Return the settings of the network.
+
+        Returns
+        -------
+        dict
+            Dict with the config settings.
+        """
         # Get configuration of the network
         config = {
             "nodes": self.nodes,
@@ -158,17 +253,42 @@ class MaskedSoftmax(Layer):
     """Softmax layer with masking."""
 
     def __init__(self, axis=-1, **kwargs):
+        """
+        Init masked softmax layer
+
+        Parameters
+        ----------
+        axis : int, optional
+            Which axis is used for softmax, by default -1
+        **kwargs : dict
+            Additional arguments passed.
+        """
         # Get attributes
         self.axis = axis
         self.supports_masking = True
         super().__init__(**kwargs)
 
-    def call(self, inputs, mask=None):  # pylint: disable=arguments-differ
+    def call(self, inputs, mask: float = None):  # pylint: disable=arguments-differ
+        """
+        Return the output of the softmax layer.
+
+        Parameters
+        ----------
+        inputs : object
+            Layer input.
+        mask : float, optional
+            Masking value, by default None
+
+        Returns
+        -------
+        output
+            Return output of the layer.
+        """
         # Check for masking
         if mask is None:
 
             # Compute masking for not existing inputs
-            mask = self.compute_mask(inputs)
+            mask = self.compute_mask(inputs, mask)
 
         # Calculate Softmax
         inputs = K.exp(inputs) * (1 - K.cast(mask, dtype="float32"))
@@ -176,11 +296,36 @@ class MaskedSoftmax(Layer):
         # Return Masked Softmax
         return inputs / K.sum(inputs, axis=1, keepdims=True)
 
-    def compute_mask(self, inputs, mask=None):
+    def compute_mask(
+        self, inputs, mask: float = None
+    ):  # pylint: disable=no-self-use,unused-argument
+        """
+        Compute mask.
+
+        Parameters
+        ----------
+        inputs : object
+            Layer input.
+        mask : float
+            Custom mask value (needed in tensorflow).
+
+        Returns
+        -------
+        masking
+            Masking for the given input.
+        """
         # Return mask
         return K.equal(inputs, 0)
 
     def get_config(self):
+        """
+        Return the settings of the network.
+
+        Returns
+        -------
+        dict
+            Dict with the config settings.
+        """
         config = {"axis": self.axis}
         base_config = super().get_config()
         return dict(list(base_config.items()) + list(config.items()))
@@ -193,12 +338,28 @@ class Attention(Layer):
 
     def __init__(
         self,
-        nodes,
-        activation="relu",
-        mask_zero=True,
-        apply_softmax=True,
+        nodes: list,
+        activation: str = "relu",
+        mask_zero: bool = True,
+        apply_softmax: bool = True,
         **kwargs,
     ):
+        """
+        Init the Attention layer
+
+        Parameters
+        ----------
+        nodes : list
+            List with the number of neurons per node
+        activation : str, optional
+            Activation which is used, by default "relu"
+        mask_zero : bool, optional
+            Use 0 as mask value, by default True
+        apply_softmax : bool, optional
+            Use softmax, by default True
+        **kwargs : dict
+            Additional arguments passed.
+        """
         self.nodes = nodes
         self.activation = activation
         self.mask_zero = mask_zero
@@ -211,7 +372,23 @@ class Attention(Layer):
         assert self.layers, "No layers in DeepSet"
         super().__init__(**kwargs)
 
-    def call(self, inputs, mask=None):  # pylint: disable=arguments-differ
+    def call(self, inputs, mask: float = None):  # pylint: disable=arguments-differ
+        """
+        Return the output of the network for a given input.
+
+        Parameters
+        ----------
+        inputs : object
+            Input to layer.
+        mask : float, optional
+            Mask value, by default None
+
+        Returns
+        -------
+        output
+            Layer output.
+        """
+
         assert len(inputs.shape) == 3, (
             "Attention layer requires tensor of rank 3. Shape of tensor"
             f" received {inputs.shape}"
@@ -235,13 +412,38 @@ class Attention(Layer):
             return attention_out
         return attention
 
-    def compute_mask(self, inputs, mask=None):
+    def compute_mask(
+        self, inputs, mask: float = None
+    ):  # pylint: disable=unused-argument
+        """
+        Compute mask.
+
+        Parameters
+        ----------
+        inputs : object
+            Layer input.
+        mask : float
+            Custom mask value (needed in tensorflow).
+
+        Returns
+        -------
+        masking
+            Masking for the given input.
+        """
         if not self.mask_zero:
             return None
 
         return K.equal(K.sum(inputs ** 2, axis=-1), 0)
 
     def get_config(self):
+        """
+        Return the settings of the network.
+
+        Returns
+        -------
+        dict
+            Dict with the config settings.
+        """
         config = {
             "nodes": self.nodes,
             "activation": self.activation,
@@ -252,15 +454,35 @@ class Attention(Layer):
         return dict(list(base_config.items()) + list(config.items()))
 
 
-class AttentionPooling(Layer):
+class AttentionPooling(Layer):  # pylint: disable=too-few-public-methods
     """
     Define Attention Pooling Layer.
     """
 
-    def __init__(self, **kwargs):
+    def __init__(self, **kwargs):  # pylint: disable=useless-super-delegation
+        """Init Attention Pooling layer
+
+        Parameters
+        ----------
+        **kwargs : dict
+            Additional arguments passed.
+        """
         super().__init__(**kwargs)
 
-    def call(self, inputs):  # pylint: disable=arguments-differ
+    def call(self, inputs):  # pylint: disable=arguments-differ,no-self-use
+        """
+        Return the output of the layer for a given input.
+
+        Parameters
+        ----------
+        inputs : object
+            Input to layer.
+
+        Returns
+        -------
+        output
+            Layer output.
+        """
 
         # Get attention and feature tensor
         attention, features = inputs[:2]
@@ -287,12 +509,28 @@ class ConditionalAttention(Layer):
 
     def __init__(
         self,
-        nodes,
-        activation="relu",
-        mask_zero=True,
-        apply_softmax=True,
+        nodes: list,
+        activation: str = "relu",
+        mask_zero: bool = True,
+        apply_softmax: bool = True,
         **kwargs,
     ):
+        """
+        Init the Conditional Attention Layer.
+
+        Parameters
+        ----------
+        nodes : list
+            List with the number of neurons per node
+        activation : str, optional
+            Activation which is used, by default "relu"
+        mask_zero : bool, optional
+            Use 0 as mask value, by default True
+        apply_softmax : bool, optional
+            Use softmax, by default True
+        **kwargs : dict
+            Additional arguments passed.
+        """
         # Define attributes
         self.nodes = nodes
         self.activation = activation
@@ -309,6 +547,19 @@ class ConditionalAttention(Layer):
         super().__init__(**kwargs)
 
     def call(self, inputs):  # pylint: disable=arguments-differ
+        """
+        Return the output of the network for a given input.
+
+        Parameters
+        ----------
+        inputs : object
+            Input to layer.
+
+        Returns
+        -------
+        output
+            Layer output.
+        """
 
         # Retrieve repeated vector and condition vector
         repeat, condition = inputs[:2]
@@ -338,7 +589,25 @@ class ConditionalAttention(Layer):
         # Return attention output
         return attention_out
 
-    def compute_mask(self, inputs, mask=None):
+    def compute_mask(
+        self, inputs, mask: float = None
+    ):  # pylint: disable=unused-argument
+        """
+        Compute the masking.
+
+        Parameters
+        ----------
+        inputs : object
+            Input to a layer.
+        mask : float
+            Custom mask value (needed in tensorflow).
+
+        Returns
+        -------
+        masking
+            Return correct masking
+        """
+
         # Check for mask
         if not self.mask_zero:
             return None
@@ -347,6 +616,14 @@ class ConditionalAttention(Layer):
         return K.equal(K.sum(inputs ** 2, axis=-1), 0)
 
     def get_config(self):
+        """
+        Return the settings of the network.
+
+        Returns
+        -------
+        dict
+            Dict with the config settings.
+        """
         # Get the configs of the layer as dict
         config = {
             "nodes": self.nodes,
@@ -365,12 +642,29 @@ class ConditionalDeepSet(Layer):
 
     def __init__(
         self,
-        nodes,
-        activation="relu",
-        batch_norm=False,
-        mask_zero=True,
+        nodes: list,
+        activation: str = "relu",
+        batch_norm: bool = False,
+        mask_zero: bool = True,
         **kwargs,
     ):
+        """
+        Init the DeepSet Layer.
+
+        Parameters
+        ----------
+        nodes : list
+            List with the number of neurons per node
+        activation : str, optional
+            Activation which is used, by default "relu"
+        batch_norm : bool, optional
+            Use batch normalisation, by default False
+        mask_zero : bool, optional
+            Use 0 as mask value, by default True
+        **kwargs : dict
+            Additional arguments passed.
+        """
+
         # Get attributes
         self.nodes = nodes
         self.activation = activation
@@ -386,6 +680,19 @@ class ConditionalDeepSet(Layer):
         super().__init__(**kwargs)
 
     def call(self, inputs):  # pylint: disable=arguments-differ
+        """
+        Return the output of the layer for a given input.
+
+        Parameters
+        ----------
+        inputs : object
+            Input to layer.
+
+        Returns
+        -------
+        output
+            Layer output.
+        """
 
         # Get repeated vector and conditions vector
         repeat, condition = inputs[:2]
@@ -415,7 +722,24 @@ class ConditionalDeepSet(Layer):
         # Retrun conditional deep sets output
         return deepsets_out
 
-    def compute_mask(self, inputs, mask=None):
+    def compute_mask(
+        self, inputs, mask: float = None
+    ):  # pylint: disable=unused-argument
+        """
+        Compute the masking.
+
+        Parameters
+        ----------
+        inputs : object
+            Input to a layer.
+        mask : float
+            Custom mask value (needed in tensorflow).
+
+        Returns
+        -------
+        masking
+            Return correct masking
+        """
 
         # Check if masking is zero
         if not self.mask_zero:
@@ -425,6 +749,14 @@ class ConditionalDeepSet(Layer):
         return K.equal(K.sum(inputs ** 2, axis=-1), 0)
 
     def get_config(self):
+        """
+        Return the settings of the network.
+
+        Returns
+        -------
+        dict
+            Dict with the config settings.
+        """
         # Get the configs of the layer as dict
         config = {
             "nodes": self.nodes,
@@ -438,13 +770,37 @@ class ConditionalDeepSet(Layer):
         return dict(list(base_config.items()) + list(config.items()))
 
 
-class MaskedAverage1DPooling(Layer):
+class MaskedAverage1DPooling(Layer):  # pylint: disable=too-few-public-methods
     """Keras layer for masked 1D average pooling."""
 
-    def __init__(self, **kwargs):
+    def __init__(self, **kwargs):  # pylint: disable=useless-super-delegation
+        """Init the masked average 1d pooling layer.
+
+        Parameters
+        ----------
+        **kwargs : dict
+            Additional arguments passed.
+        """
         super().__init__(**kwargs)
 
-    def call(self, inputs, mask=None):  # pylint: disable=arguments-differ
+    def call(
+        self, inputs, mask: float = None
+    ):  # pylint: disable=arguments-differ,no-self-use
+        """
+        Return the output of the layer for a given input.
+
+        Parameters
+        ----------
+        inputs : object
+            Input to layer.
+        mask : float, optional
+            Mask value, by default None
+
+        Returns
+        -------
+        output
+            Layer output.
+        """
         # Check for masking
         if mask is not None:
 
@@ -487,19 +843,77 @@ class Sum(Layer):
     """
 
     def __init__(self, **kwargs):
+        """
+        Init the class.
+
+        Parameters
+        ----------
+        **kwargs : dict
+            Additional arguments passed.
+        """
         super().__init__(**kwargs)
         self.supports_masking = True
 
-    def build(self, input_shape):
-        pass
-
-    def call(self, x, mask=None):  # pylint: disable=arguments-differ
+    def build(self, input_shape):  # pylint: disable=unused-argument
+        """Build step which is skipped.
+
+        Parameters
+        ----------
+        input_shape : object
+            Input shape of the layer (is needed in tensorflow).
+
+        """
+        pass  # pylint: disable=unnecessary-pass
+
+    def call(self, x, mask: float = None):  # pylint: disable=no-self-use
+        """
+        Return the output of the layer.
+
+        Parameters
+        ----------
+        x : object
+            Layer input
+        mask : float, optional
+            Mask value, by default None
+
+        Returns
+        -------
+        output
+            Output of the layer
+        """
         if mask is not None:
             x = x * K.cast(mask, K.dtype(x))[:, :, None]
         return K.sum(x, axis=1)
 
-    def compute_output_shape(self, input_shape):
+    def compute_output_shape(self, input_shape):  # pylint: disable=no-self-use
+        """
+        Compute the output shape.
+
+        Parameters
+        ----------
+        input_shape : object
+            Layer input shape
+
+        Returns
+        -------
+        output
+            Layer output.
+        """
         return input_shape[0], input_shape[2]
 
-    def compute_mask(self, inputs, mask):  # pylint: disable=signature-differs
+    def compute_mask(self, inputs, mask):  # pylint: disable=no-self-use,unused-argument
+        """Compute masking
+
+        Parameters
+        ----------
+        inputs : object
+            Layer input.
+        mask : float
+            Custom mask value (needed in tensorflow).
+
+        Returns
+        -------
+        masking
+            Return the masking
+        """
         return None
diff --git a/umami/tf_tools/models.py b/umami/tf_tools/models.py
index df9430d730bc8035097a64c30fcf02c3d884da4c..84352b2a1ffcfeeaa92fb12e580577d6eee61c51 100644
--- a/umami/tf_tools/models.py
+++ b/umami/tf_tools/models.py
@@ -2,9 +2,14 @@
 Implementations by Johnny Raine
 """
 
-import tensorflow.keras.backend as K
-from tensorflow.keras.layers import Concatenate, GlobalMaxPool1D, Input, Lambda
-from tensorflow.keras.models import Model
+import tensorflow.keras.backend as K  # pylint: disable=import-error
+from tensorflow.keras.layers import (  # pylint: disable=import-error
+    Concatenate,
+    GlobalMaxPool1D,
+    Input,
+    Lambda,
+)
+from tensorflow.keras.models import Model  # pylint: disable=import-error
 
 from .layers import (
     Attention,
diff --git a/umami/tf_tools/tools.py b/umami/tf_tools/tools.py
index 8b8b1da8dc0b3eaabcdeb73de4427fd0da6885f7..6fe0bc9608a12dd761879a2b827cd30830d6dd3f 100644
--- a/umami/tf_tools/tools.py
+++ b/umami/tf_tools/tools.py
@@ -1,5 +1,5 @@
 """Helper tools for tensorflow."""
-from tensorflow.keras.callbacks import ReduceLROnPlateau
+from tensorflow.keras.callbacks import ReduceLROnPlateau  # pylint: disable=import-error
 
 
 def GetLRReducer(
@@ -37,7 +37,9 @@ def GetLRReducer(
         number of epochs to wait before resuming normal operation after
         lr has been reduced, by default 5
     LRR_min_lr : float, optional
-        ower bound on the learning rate, by default 0.000001
+        Lower bound on the learning rate, by default 0.000001
+    **kwargs : dict
+        Additional arguments.
 
     Returns
     -------
diff --git a/umami/train_tools/NN_tools.py b/umami/train_tools/NN_tools.py
index e1c155888e5a93c7c757befaac89283696f0d177..a15aca729612d34af16818bf8b20c5d1ac92d9c9 100644
--- a/umami/train_tools/NN_tools.py
+++ b/umami/train_tools/NN_tools.py
@@ -9,9 +9,9 @@ from shutil import copyfile
 
 import numpy as np
 import tensorflow as tf
-from tensorflow.keras.callbacks import Callback
-from tensorflow.keras.models import load_model
-from tensorflow.keras.utils import CustomObjectScope
+from tensorflow.keras.callbacks import Callback  # pylint: disable=import-error
+from tensorflow.keras.models import load_model  # pylint: disable=import-error
+from tensorflow.keras.utils import CustomObjectScope  # pylint: disable=import-error
 
 import umami.metrics as umt
 import umami.tf_tools as utf