diff --git a/examples/Classification.ipynb b/examples/Classification.ipynb
index 3870aa2eadde64bc59da1ba2ae0ab5c5a588bee1..c5ded0705049756f124b1f7870e5cd7aa19e564f 100644
--- a/examples/Classification.ipynb
+++ b/examples/Classification.ipynb
@@ -253,19 +253,38 @@
     "None"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Export to lwtnn\n",
+    "In order to use the network in lwtnn, we need to export the neural network with the `export()` method. This export one network per fold. It is the reposibility of the use to implement the cross validation in the analysis framework."
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {},
    "outputs": [],
-   "source": []
+   "source": [
+    "net.export(\"lwtnn\")"
+   ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {},
    "outputs": [],
-   "source": []
+   "source": [
+    "!ls lwtnn*"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The final, manuel step is to run the lwtnn's converter using the shortcut script `test.sh`."
+   ]
   }
  ],
  "metadata": {
diff --git a/nnfwtbn/model.py b/nnfwtbn/model.py
index 04b2dc6d6d82a2f708b923eb20ab18cfcf963c5a..ee6c55ddf96fce40a78bfe4abdc20f1113ac2895 100644
--- a/nnfwtbn/model.py
+++ b/nnfwtbn/model.py
@@ -3,6 +3,7 @@ from abc import ABC, abstractmethod
 import os
 import sys
 import h5py
+import json
 
 import numpy as np
 import pandas as pd
@@ -269,6 +270,24 @@ class Normalizer(ABC):
         Check if two normalizers are the same.
         """
 
+    @property
+    @abstractmethod
+    def scales(self):
+        """
+        Every normalizor must reduce to a simple (offset + scale * x)
+        normalization to be used with lwtnn. This property returns the scale
+        parameters for all variables.
+        """
+
+    @property
+    @abstractmethod
+    def offsets(self):
+        """
+        Every normalizor must reduce to a simple (offset + scale * x)
+        normalization to be used with lwtnn. This property returns the offset
+        parameters for all variables.
+        """
+
     def save_to_h5(self, path, key, overwrite=False):
         """
         Save normalizer definition to a hdf5 file.
@@ -377,6 +396,14 @@ class EstimatorNormalizer(Normalizer):
         width = pd.read_hdf(path, os.path.join(key, "width"))
         return cls(None, center=center, width=width)
 
+    @property
+    def scales(self):
+        return 1 / self.width
+
+    @property
+    def offsets(self):
+        return -self.center / self. width
+
 def normalize_category_weights(df, categories, weight='weight'):
     """
     The categorical weight normalizer acts on the weight variable only. The
@@ -615,3 +642,43 @@ class HepNet:
                 instance.norms.append(norm)
 
         return instance
+
+    def export(self, path_base, command="converters/keras2json.py"):
+        """
+        Exports the network such that it can be converted to lwtnn's json
+        format. The method generate a set of files for each cross validation
+        fold. For every fold, the archtecture, the weights, the input
+        variables and their normalization is exported. To simplify the
+        conversion to lwtnn's json format, the method also creates a bash
+        script which converts all folds. 
+
+        The path_base argument should be a path or a name of the network. The
+        names of the gerneated files are created by appending to path_base.
+        """
+        for fold_i in range(self.cv.k):
+            # get the architecture as a json string
+            arch = self.models[fold_i].to_json()
+            # save the architecture string to a file somehow, the below will work
+            with open('%s_arch_%d.json' % (path_base, fold_i), 'w') as arch_file:
+                arch_file.write(arch)
+
+            # now save the weights as an HDF5 file
+            self.models[fold_i].save_weights('%s_wght_%d.h5' % (path_base, fold_i))
+
+            with open("%s_vars_%d.json" % (path_base, fold_i), "w") \
+                    as variable_file:
+                scales = self.norms[fold_i].scales
+                offsets = self.norms[fold_i].offsets
+
+                inputs = [dict(name=v, offset=o, scale=s)
+                          for v, o, s in zip(self.input_list, offsets, scales)]
+  
+                json.dump(dict(inputs=inputs, class_labels=self.output_list),
+                          variable_file)
+
+            mode = "w" if fold_i == 0 else "a"
+            with open("%s.sh" % path_base, mode) as script_file:
+                print(f"{command} {path_base}_arch_{fold_i}.json "
+                      f"{path_base}_vars_{fold_i}.json "
+                      f"{path_base}_wght_{fold_i}.h5 "
+                      f"> {path_base}_{fold_i}.json", file=script_file)