diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 19161ab959d7b2f5c94da788007cb424717c2c8f..f012d6524b6d4d91ce3803bc8582c11db8180fbc 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -110,4 +110,4 @@ build_apidoc:
 #  type: test
 #  script:
 #    - conda install -y pylint
-#    - pylint analysis/*/*.py || echo "========= Pylint run with errorcode > 0 ========"
+#    - pylint analysis/*/*.py || echo "========= Pylint run with error code > 0 ========"
diff --git a/analysis/__init__.py b/analysis/__init__.py
index 5f056ccb0e020255995ad6aeaf92dfe7ab5cdfdc..bb7b88f489b4caff26321d165148705c56c04348 100644
--- a/analysis/__init__.py
+++ b/analysis/__init__.py
@@ -11,7 +11,7 @@ from __future__ import print_function, division, absolute_import
 import os
 from collections import defaultdict
 
-# TODO: automative version and author
+# TODO: automate version and author
 __version__ = '3.0'
 __author__ = 'Albert Piug'
 
diff --git a/analysis/batch/batch_system.py b/analysis/batch/batch_system.py
index f324b1802c945ac173d170e00c8967757143033c..ee1b771e653ec6fd4a4c91cb7776be17f672260f 100644
--- a/analysis/batch/batch_system.py
+++ b/analysis/batch/batch_system.py
@@ -91,7 +91,6 @@ echo "------------------------------------------------------------------------"
         Arguments:
             job_name (str): Job name.
             script (str): Commands to run.
-            script_args (list): List of arguments passed to the script.
             log_file (str): Logfile location.
             extra_config (dict, optional): Extra configuration for 'script'. Defaults
                 to `None`.
diff --git a/analysis/data/hdf.py b/analysis/data/hdf.py
index 5b34dca126b61d799679eba5f3552233d6f91e52..0b485184ad24e5f40cc3a6e2e0b603b4cf9f5e0a 100644
--- a/analysis/data/hdf.py
+++ b/analysis/data/hdf.py
@@ -44,7 +44,7 @@ def modify_hdf(file_name, compress=True):
                 mode = 'a'
             else:
                 logger.info("File %s exists but seems empty -> not construct with pytables?"
-                            "Overwritting existing file!", file_name)
+                            "Overwriting existing file!", file_name)
     with pd.HDFStore(file_name, mode=mode, format='table') as data_file:
         yield data_file
     logger.debug('Compressing...')
diff --git a/analysis/data/mergers.py b/analysis/data/mergers.py
index 68c076fc7ee2e11c96bbdaefe64ac5fb874b0779..da95feab6c035401df00fd1386d2cd4d9a9d9b62 100644
--- a/analysis/data/mergers.py
+++ b/analysis/data/mergers.py
@@ -51,6 +51,7 @@ def merge_root(data_list, name=None, title=None, destruct_data=True):
         name (str): Dataset name.
         title (str): Dataset title.
         data_list (list[ROOT.RooDataSet]): Datasets to merge.
+        destruct_data (bool): Destruct the ROOT objects afterwards.
 
     Return:
         ROOT.RooDataSet: Merged dataset.
@@ -67,7 +68,7 @@ def merge_root(data_list, name=None, title=None, destruct_data=True):
         raise ValueError("Incompatible observables")
     # Check weights
     if len(set(data.isWeighted() for data in data_list)) > 1:
-        raise ValueError("Input dataset list contains weighted and uneweighted datasets.")
+        raise ValueError("Input dataset list contains weighted and unweighted datasets.")
     # Merge by append, since we don't know the original weight situation
     output_ds = data_list.pop(0)
     for data in data_list:
diff --git a/analysis/efficiency/__init__.py b/analysis/efficiency/__init__.py
index a2ab8acb5230f5ade9ea5b1fb93f12fa8736b5ec..cf86ecfca9f2ff79ce7efc3b6a33aa695b39f5c2 100644
--- a/analysis/efficiency/__init__.py
+++ b/analysis/efficiency/__init__.py
@@ -141,7 +141,7 @@ def load_acceptance(name, **extra_parameters):
         `analysis.efficiency.Acceptance`: Acceptance object.
 
     Raise:
-        OSError: If the efficiecny file does not exist.
+        OSError: If the efficiency file does not exist.
         analysis.utils.config.ConfigError: If there is a problem with the efficiency model.
 
     """
diff --git a/analysis/efficiency/efficiency.py b/analysis/efficiency/efficiency.py
index 53452af2eab6a9df8740f2e5c8c60f62251bd986..8b64a0c0f580ab60b0486b1d8512acaad9971b9c 100644
--- a/analysis/efficiency/efficiency.py
+++ b/analysis/efficiency/efficiency.py
@@ -104,7 +104,7 @@ class Efficiency(object):
         return self._get_efficiency(data[var_list].copy()).clip(lower=0.0)
 
     def get_randomized_efficiency(self, data):
-        """Get the efficiency for the given event or dataset Gaussianly randomized by its uncertainty.
+        """Get the efficiency for the given event or dataset Gaussian randomized by its uncertainty.
 
         Arguments:
             data (`pandas.DataFrame` or Sequence): Data to calculate the efficiency of.
diff --git a/analysis/efficiency/legendre.py b/analysis/efficiency/legendre.py
index 052d620c1e06c64adf4d6620c3c41943ecd88649..8bf114a280fd742aead721a896c7153215a6bbd4 100644
--- a/analysis/efficiency/legendre.py
+++ b/analysis/efficiency/legendre.py
@@ -126,7 +126,7 @@ class LegendreEfficiency(Efficiency):
 
         Raise:
             KeyError: On missing coefficients.
-            ValueError: On bad range or bad symmetric variable defintion.
+            ValueError: On bad range or bad symmetric variable definition.
 
         """
         super(LegendreEfficiency, self).__init__(var_list, config)
diff --git a/analysis/fit/__init__.py b/analysis/fit/__init__.py
index dd908cd6c2de6daada11f774cbb07ab2676a44f2..7724e5d488228ef245c96cfd2003b25c01407cc7 100644
--- a/analysis/fit/__init__.py
+++ b/analysis/fit/__init__.py
@@ -113,7 +113,7 @@ def fit(factory, pdf_name, strategy, dataset, verbose=False, **kwargs):
         logger.error("Problem getting the PDF -> %s", error)
         raise
     if kwargs.get('Extended', False) != factory.is_extended():
-        logger.warning("Requested fit with Extended=%s fit on %sextended PDF. Check this is what you want.",
+        logger.warning("Requested fit with Extended=%s fit on %s extended PDF. Check this is what you want.",
                        kwargs.get('Extended', False),
                        'an ' if factory.is_extended() else 'a non-')
     return fit_func(model, dataset, fit_config)
diff --git a/analysis/fit/result.py b/analysis/fit/result.py
index ac9462057775a37ea83772411cac3d91fa9fb31b..786141c1258d5b01d32f534ea4aaa151d048c893 100644
--- a/analysis/fit/result.py
+++ b/analysis/fit/result.py
@@ -322,7 +322,7 @@ class FitResult(object):
 
     @ensure_initialized
     def has_converged(self):
-        """Determine wether the fit has converged properly.
+        """Determine whether the fit has converged properly.
 
         All steps have to have converged and the covariance matrix quality needs to be
         good.
diff --git a/analysis/physics/factory.py b/analysis/physics/factory.py
index 146bee8596aa4dc54983d09f72ee29b265749277..6878c11350be7589b6e59959839d983a3b2be261 100644
--- a/analysis/physics/factory.py
+++ b/analysis/physics/factory.py
@@ -42,7 +42,7 @@ class BaseFactory(object):
             **config (dict): Configuration of the factory.
 
         Raise:
-            KeyError: When parameters or observables are missingo or there is an
+            KeyError: When parameters or observables are missing or there is an
                 inconsistency in the configuration.
 
         """
@@ -198,7 +198,7 @@ class BaseFactory(object):
             key (str): Object identifier.
 
         Return:
-            bool: Wether the object is in the workspace.
+            bool: Whether the object is in the workspace.
 
         """
         return key in self._objects
@@ -366,7 +366,7 @@ class BaseFactory(object):
         return self.get(pdf_name, self.set(pdf_name, self.get_unbound_extended_pdf(name, title)))
 
     def get_unbound_extended_pdf(self, name, title):
-        """Get an extedned physics PDF."""
+        """Get an extended physics PDF."""
         raise NotImplementedError()
 
     def is_extended(self):
@@ -600,7 +600,7 @@ class PhysicsFactory(BaseFactory):
                 elif isinstance(yield_, (float, int)):
                     self['Yield'].setVal(yield_)
             else:
-                logger.warning("Trying to set a yield that cannot be overriden")
+                logger.warning("Trying to set a yield that cannot be overridden")
 
 
 # Product Physics Factory
@@ -716,7 +716,7 @@ class ProductPhysicsFactory(BaseFactory):
                 self['Yield'].SetName(yield_.GetName())
                 self['Yield'].SetTitle(yield_.GetTitle())
             else:
-                logger.warning("Trying to set a yield that cannot be overriden")
+                logger.warning("Trying to set a yield that cannot be overridden")
 
     def transform_dataset(self, dataset):
         """Transform dataset according to the factory configuration.
@@ -911,7 +911,7 @@ class SumPhysicsFactory(BaseFactory):
                 elif isinstance(yield_, (float, int)):
                     self['Yield'].setVal(yield_)
             else:
-                logger.warning("Trying to set a yield that cannot be overriden")
+                logger.warning("Trying to set a yield that cannot be overridden")
 
     def transform_dataset(self, dataset):
         """Transform dataset according to the factory configuration.
@@ -1059,7 +1059,7 @@ class SimultaneousPhysicsFactory(BaseFactory):
     def transform_dataset(self, dataset):
         """Transform dataset according to the factory configuration.
 
-        The category nane is used as column name to determine each of the
+        The category name is used as column name to determine each of the
         samples to transform.
 
         Note:
diff --git a/analysis/physics/pdf_models.py b/analysis/physics/pdf_models.py
index 6278a1a81230dcf799e15554d0b782f9687266da..09d19537bbec90ea29e0801617b429305fff72bd 100644
--- a/analysis/physics/pdf_models.py
+++ b/analysis/physics/pdf_models.py
@@ -209,7 +209,7 @@ class ArgusConvGaussPdfMixin(object):
         usual PDF instantiations.
 
         Note:
-            The Argus and Gaussian PDFs are created new everytime the RooFFTConvPdf is
+            The Argus and Gaussian PDFs are created new every time the RooFFTConvPdf is
             instantiated.
 
         Return:
@@ -336,7 +336,7 @@ class RooWorkspaceMixin(object):
             raise KeyError("PDF name ('workspace-pdf-name') is missing")
         tfile = ROOT.TFile.Open(workspace_path)
         if not tfile:
-            raise KeyError("Cannot open wokspace file -> {}".format(workspace_path))
+            raise KeyError("Cannot open workspace file -> {}".format(workspace_path))
         workspace = tfile.Get(workspace_name)
         if not workspace:
             raise KeyError("Cannot get workspace from file -> {}".format(workspace_name))
diff --git a/analysis/toys/README.md b/analysis/toys/README.md
index c191efa9aee0538cc715d26771d8ea8f96b84563..4afdbc702a18afae1507522af7bd67390a3a4c35 100644
--- a/analysis/toys/README.md
+++ b/analysis/toys/README.md
@@ -31,9 +31,9 @@ pdfs:
         pdf: signal
         parameters:
             - S4: 1.0
-    mass
+    mass:
         pdf: doublecb
-    q2
+    q2:
         pdf: flat
 ```
 
@@ -44,7 +44,7 @@ The `pdfs` key configures the generation PDFs, and details on how to configure t
 ### Trick
 
 If using the `link-from` option, several users can share the same toys if the config file is committed (or we know its location) and the same `link-from` is given.
-When executin `submit_generate_toys.py`, it checks for the existence of the toy file.
+When executing `submit_generate_toys.py`, it checks for the existence of the toy file.
 If it does, it is simply symlinked to the user directory, thus making everything work.
 
 
@@ -102,8 +102,8 @@ These fit strategies should consist of a function that gets the model (PDF), the
 
 The `data` key is used to specify the input data for each toy.
 Each of the entries is used to load a toy generated by `generate_toys.py` using its name (`source`), and configures the number of entries to sample from the input data set (`nevents`).
-If a simultaneous fit is performed, the `category` key has to be specified in the datae with the name of the category.
-This does *not* select the given category from the data but *lables* the given data as having that category. 
+If a simultaneous fit is performed, the `category` key has to be specified in the data with the name of the category.
+This does *not* select the given category from the data but *labels* the given data as having that category. 
 In the end, all sampled datasets are merged and used as the data for the fit.
 Special care needs to be taken when extended fits are involved:
 if the fit model is extended (if more than one is specified, they all need to be either extended or not extended) the number of entries sampled from each input dataset will be varied according to a Poisson distribution;
diff --git a/analysis/utils/README.md b/analysis/utils/README.md
index 829784e1f1b8ac16d2be83437f44cd8c9adec7a0..02dd7ff6c6ee1dbb9b76b883a2f0217cb350fbdf 100644
--- a/analysis/utils/README.md
+++ b/analysis/utils/README.md
@@ -1,6 +1,142 @@
 Utils
 =====
 
-This module contains several utilities used throughout the package.
+This package contains several utilities used throughout the package.
+
+config
+------
+
+Several helpers dealing with yaml configuration files are placed
+inside.
+
+- Loading (and interpreting substitutions) as well as dumping a configuration
+can be achieved with `load_config` resp. `write_config`.
+
+- Comparing two dictionaries and finding the differences is done by
+`compare_configs`.
+
+- In order to manipulate nested dicts easier, `unfold_config` resp.
+`fold_config` convert the dicts to a file-like structure.
+
+- The logic of converting a parameter specified in the configuration
+into a ROOT object is implemented in `configure_parameter`.
+
+- Adding shared variables is done with `get_shared_vars`.
+
+
+decorators
+----------
+
+This module contains the decorators used throughout this library.
+
+- `memoize` is memorizing the creation of a class if a function is called
+with the same arguments again as before.
+
+
+fit
+---
+
+Helpers for the fitting and results.
+
+- `fit_parameters` converts fitted RooRealVars into a dict containing
+information about the errors.
+
+- `calculate_pulls` creates pulls taking no error/ the symmetric error/
+asymmetric errors into account.
+
+
+iterators
+---------
+
+Several iterators for large iterations are available.
+
+- `pairwise` converts a sequence s -> (s0, s1), (s1, s2), (s2, s3)
+
+- `chunks` iterator over chunks given a chunksize
+
+
+logging_color
+-------------
+
+Logging related utils.
+
+- To get a new logger given a certain name, use `get_logger`.
+
+
+monitoring
+----------
+
+Functions to monitor the resources needed for execution.
+
+Memory usage can be tracked with `memory_usage`.
+
+Tracking the execution time can be achieved using the context manager of `Timer`.
+
+
+path
+----
+
+The path management of the package happens inside this module.
+
+Several default paths are set:
+
+- toy
+- toy config
+- toy fit
+- toy fit config
+- log
+- efficiency
+- genlevel mc
+- plot style
+- fit result
+
+- Additional paths can be added with `register_path`.
+
+- To save a file, `prepare_path` takes care of the right naming, implicit
+folder creation and more.
+- If you want in addition to that to work with a
+file in a thread-safe manner, the function (or better contextmanager)
+`work_on_file` can be used.
+
+ 
+pdf
+---
+
+TODO
+
+
+random
+------
+
+Any randomness related utils.
+
+- In order to retrieve a *really* random integer, use `get_urandom_int`.
+
+
+root
+----
+
+ROOT framework related functions including iterators for ROOT containers.
+
+- `load_library` is used to load a C++ library or to compile it inplace.
+
+- `destruct_object` deletes ROOT objects safely.
+
+- A functional helper is the `execute_and_return_self` function, which 
+executes an object and returns it again.
+
+#### Different Converters
+
+The following converters are implemented:
+
+- Python list to RooAbsCollection
+- Python list to RooArgList
+- Python list to RooArgSet
+
+- RooArgSet to Python set
+- RooArgList to Python list
+
+The following iterators are implemented:
+
+- To iterate over a RooAbsCollection, use `iterate_roocollection`
 
-TODO: Extend this README
diff --git a/analysis/utils/config.py b/analysis/utils/config.py
index 3141a4e933ab40cb805f64b4c11aaa11ad29c01b..91d7482c6dd7487be9110cf1383ec79e7a66e0ed 100644
--- a/analysis/utils/config.py
+++ b/analysis/utils/config.py
@@ -416,7 +416,7 @@ def get_shared_vars(config, external_vars=None):
 
         @id/name/title/config
 
-    where config follows the conventions of `configure_parameter`. In further occurences,
+    where config follows the conventions of `configure_parameter`. In further occurrences,
     `@id` is enough.
 
     Arguments:
@@ -426,12 +426,12 @@ def get_shared_vars(config, external_vars=None):
             over the configuration. Defaults to None.
 
     Return:
-        dict: Shared parameters build in the same parameter hierachy as the model they
+        dict: Shared parameters build in the same parameter hierarchy as the model they
             are included in.
 
     Raise:
         ValueError: If one of the parameters is badly configured.
-        KeyError: If a parameter is refered to but never configured.
+        KeyError: If a parameter is referred to but never configured.
 
     """
     # Create shared vars
diff --git a/analysis/utils/root.py b/analysis/utils/root.py
index e49b13562084ecf0ddef1cdd858c3f1d16644767..a675de9c3ae0583ed38105e88736a2439b101e58 100644
--- a/analysis/utils/root.py
+++ b/analysis/utils/root.py
@@ -120,7 +120,7 @@ def list_to_rooabscollection(iterable, collection_type):
 
 
 def list_to_rooarglist(iterable):
-    """Convert a list into a RooArgSet.
+    """Convert a list into a RooArgList.
 
     Arguments:
         iterable (iterable): Iterable to convert.
diff --git a/ci/static_checker.sh b/ci/static_checker.sh
index 5915cd589da382bd5b9ece9f5d2c558f16f5f7e2..b72fb191fcfe4f7e60e97a344364c9e32a942877 100755
--- a/ci/static_checker.sh
+++ b/ci/static_checker.sh
@@ -10,11 +10,11 @@ tail -n 3 pylint_report.txt
 echo "============================== Pylint check DIFF ==============================="
 diff-quality --violations=pylint --fail-under=95 pylint_report.txt --options="--rcfile=ci/pylintrc"
 
-echo "============================= Codestyle check FULL ============================="
+echo "============================= Code style check FULL ============================="
 pycodestyle --max-line-length=1000 analysis > report_pycodestyle.txt || (exit 0)
 pycodestyle --statistics -qq --max-line-length=100 analysis || (exit 0)
 
-echo "============================ Codestyle check DIFF =============================="
+echo "============================ Code style check DIFF =============================="
 diff-quality --violations=pycodestyle --fail-under=95 report_pycodestyle.txt  --options="--max-line-length=100"
 
 echo "===========================  Finished static checks ============================"
diff --git a/docs/api/conf.py b/docs/api/conf.py
index d81068a99b320eea37fe4acff4e02fd39146df66..97275bf2afe3d3136304e26a279c01e6f6367f5b 100644
--- a/docs/api/conf.py
+++ b/docs/api/conf.py
@@ -8,7 +8,7 @@
 # containing dir.
 #
 # Note that not all possible configuration values are present in this
-# autogenerated file.
+# auto-generated file.
 #
 # All configuration values have a default; values that are commented out
 # serve to show the default.
@@ -30,7 +30,7 @@
 
 
 
-# Manually added tthings
+# Manually added things
 import analysis
 
 autoclass_content = 'both'  # document modules and packages
diff --git a/docs/api/tools/change_headline.py b/docs/api/tools/change_headline.py
index 54a35619e9fc8c9c6faa7b6b63af26fefc2bc07a..bbd76a4af9ca489a5fc71ce6d5c4f5ef4c733044 100644
--- a/docs/api/tools/change_headline.py
+++ b/docs/api/tools/change_headline.py
@@ -13,9 +13,9 @@ for rest_file in parsed_args.files:
             continue
         replacement = first_word.split('.')[-1]
         underline = f.readline()[0] * len(replacement)
-        lower_file = f.read() 
+        lower_file = f.read()
     with open(rest_file, 'w') as f:
         f.write("\n".join((replacement, underline, lower_file)))
     n_files += 1
 
-print("finished sucessfully parsing {} files".format(n_files))
+print("finished successfully parsing {} files".format(n_files))
diff --git a/docs/make_docs.sh b/docs/make_docs.sh
index 510c9b9ad013e7bf9a7f5775c672d01cc33f7eb3..4c66e7fc5db18df61135c8fcadc849c1863bf5b1 100755
--- a/docs/make_docs.sh
+++ b/docs/make_docs.sh
@@ -9,4 +9,4 @@ popd > /dev/null
 sphinx-apidoc -o ${MAKE_DOCS_PATH}/api ${MAKE_DOCS_PATH}/../analysis  -fMeT && \
 python ${MAKE_DOCS_PATH}/api/tools/change_headline.py ${MAKE_DOCS_PATH}/api/analysis.* && \
 make -C ${MAKE_DOCS_PATH}/api clean && make -C ${MAKE_DOCS_PATH}/api html -j4 && \
-echo "Documentation succesfully build!" || echo "FAILED to build Documentation"
+echo "Documentation successfully build!" || echo "FAILED to build Documentation"
diff --git a/tests/test_config.py b/tests/test_config.py
index cb7dac0b18e2f988c4504ae0e707fa28b8423623..1daba60c6f47da6714dc52315124b3d27b69fac3 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -33,7 +33,7 @@ def create_tempfile(suffix=None):
     try:
         os_handle, filename = tempfile.mkstemp(suffix=suffix)
     except Exception:  # aiming at interruptions
-        print("Exception occured while creating a temp-file")
+        print("Exception occurred while creating a temp-file")
         raise
     finally:
         atexit.register(cleanup_file, filename)
diff --git a/tests/test_data.py b/tests/test_data.py
index 61a2f3a2d0d49a7d491ebbe0c64e51a0248e68b2..2575b051b35d6068525f8c5fc306b5f8f4b797eb 100644
--- a/tests/test_data.py
+++ b/tests/test_data.py
@@ -5,7 +5,7 @@
 # @author Albert Puig (albert.puig@cern.ch)
 # @date   16.05.2017
 # =============================================================================
-"""Test data-related funcionality."""
+"""Test data-related functionality."""
 from __future__ import print_function, division, absolute_import
 
 import os