From 28214d4e861a855868a0a5fd841aacc4fd97f21c Mon Sep 17 00:00:00 2001
From: Olga <olga.vladimirovna.datskova@cern.ch>
Date: Sun, 15 Nov 2020 20:35:53 +0100
Subject: [PATCH] Adding VOBox container and CE guides

---
 docs/site/ce_guide.md               | 134 +++++++++++++++
 docs/site/vobox_container.md        | 156 ++++++++++++++++++
 docs/site/vobox_legacy.md           | 246 ++++++++++++++++++++++++++++
 docs/{ => user}/jalien_migration.md |   0
 docs/{ => user}/jalien_tutorial.md  |   0
 mkdocs.yml                          |  14 +-
 6 files changed, 546 insertions(+), 4 deletions(-)
 create mode 100644 docs/site/ce_guide.md
 create mode 100644 docs/site/vobox_container.md
 create mode 100644 docs/site/vobox_legacy.md
 rename docs/{ => user}/jalien_migration.md (100%)
 rename docs/{ => user}/jalien_tutorial.md (100%)

diff --git a/docs/site/ce_guide.md b/docs/site/ce_guide.md
new file mode 100644
index 0000000..3e378dd
--- /dev/null
+++ b/docs/site/ce_guide.md
@@ -0,0 +1,134 @@
+# Computing Element Guide
+
+This guide provides instructions on setting up JAliEn Computing Element (CE) by either running directly from CVMFS [(Option 1)](#option-1-running-from-cvmfs), or by cloning the JAliEn Git repository [(Option 2)](#option-2-compile-from-source).
+
+!!! warning "Update from 9/11/20"
+    Due to dependency issues in the build system, we are currently unable to push new JAliEn tags into CVMFS.
+    Consequently, the JAliEn builds given by alienv now are __severely outdated__.
+
+    For now, please use [Option 2](#option-2-compile-from-source) when setting up a CE, with this fork: ```https://gitlab.cern.ch/mstoretv/jalien.git```
+    It contains a workaround that will point JobAgents to a different CVMFS directory - one that we are able to update independently of the build system.
+
+## Option 1: Running from CVMFS
+
+This approach only requires CVMFS and a valid Grid certificate or a user/host token in ```~/.globus```.
+
+1. Ensure that environment is set before starting:
+
+    ```console
+    ~$ export X509_USER_PROXY=<your_path_here>
+    ```
+
+2. Load the JAliEn environment:
+
+    ```console
+    ~$ alienv enter JAliEn
+    ```
+
+3. Run the CE:
+
+    ```console
+    ~$ jalien ComputingElement
+    ```
+
+This will start a JAliEn CE with default configuration options.
+Override is possible by placing desired configuration in ```~/.j/config```.
+Overview of the config files, and their default values, can be found [here](https://gitlab.cern.ch/jalien/jalien/-/tree/master/config).
+
+!!! hint "User/Host token"
+    User and host tokens can be generated from within a [JAliEn shell](../../jalien_commands/#token) by using ```token -t user/host``` command.
+    These can be put as user[cert/key].pem into ```.globus``` folder, instead of a full Grid certificate.
+
+## Option 2: Compile From Source
+
+!!! info "Requirements"
+    This approach requires compile tools such as JDK11+ be available.
+
+This approach gives access to the latest CE changes without waiting for a new tag to be pushed to CVMFS (but note that JobAgents will still run from CVMFS). 
+It also allows for more overrides, such as for the agent startup script (at own risk).
+
+1. Clone the JAliEn Git repository:
+
+    ```console
+    ~$ git clone https://gitlab.cern.ch/jalien/jalien.git
+    ```
+
+2. Compile JAliEn using the following commands:
+
+    ```console
+    ~$ cd jalien
+    ~$ ./compile.sh all
+    ```
+ 
+    !!! hint "Configuration overrides"
+        Configuration files can be edited directly in ```jalien/config``` before compiling.
+
+3. Ensure the environment is setup before starting:
+
+    ```console
+    ~$ export X509_USER_PROXY=<your_path_here>
+    ```
+
+4. Run the CE as follows:
+
+    ```console
+    ~$ ./jalien ComputingElement
+    ```
+
+## Managing the CE
+
+A wrapper script can be used to shortcut common tasks such as ```start```, ```stop``` and checking the service ```status```:
+
+??? example "Example: jalienCE.sh"
+    ```bash
+    #!/bin/bash
+
+    export X509_USER_PROXY=
+    export ALICE_LOGDIR= #e.g ~/ALICE/alien-logs
+    export JALIEN_PATH= #e.g ~/jalien (remove if running from CVMfS)
+
+    cd $ALICE_LOGDIR
+
+    if [ $1 = "start" ]
+      then
+        echo "Starting JAliEn CE"
+        nohup $JALIEN_PATH/jalien ComputingElement & echo $! > CE.pid
+    elif [[ $1 = "stop" ]]
+      then
+        echo "Stopping JAliEn CE"
+        pkill -f alien.site.ComputingElement
+    elif [ $1 = "status" ]
+      then
+       if ps -p $(cat $ALICE_LOGDIR/CE.pid) > /dev/null 2>&1
+       then
+          echo "JAliEn CE is running"
+       else
+          echo "JAliEn CE is NOT running!"
+       fi 
+    else 
+        echo "Usage: 'start', 'stop' or 'status'" 
+    fi
+    ```
+
+Be aware that the JAliEn CE will terminate once the token expires. 
+To avoid downtime, a service can be set up to automatically restart it once that happens. 
+A trivial solution can be to extend the above wrapper script with a restarter as follows:
+
+??? example "Example: jalien-restarter.sh"
+    ```bash
+    #!/bin/bash
+
+    JALIEN_CE_WRAPPER=~/jalienCE.sh
+
+    while true
+    do
+       if [ "$($JALIEN_CE_WRAPPER status)" = "JAliEn CE is running" ]
+       then
+          echo "OK"
+       else
+          echo "Not OK"
+          $($JALIEN_CE_WRAPPER start)
+       fi
+    sleep  1800
+    done
+    ```
diff --git a/docs/site/vobox_container.md b/docs/site/vobox_container.md
new file mode 100644
index 0000000..1f309cd
--- /dev/null
+++ b/docs/site/vobox_container.md
@@ -0,0 +1,156 @@
+# VOBox Container
+
+This guide describes how to create a networked Docker container for VO-Box use.
+
+## Requirements
+
+| | |
+|-|-|
+| __CentOS__ | 7.0 or later |
+| __Docker__ | 1.12 or later (Tested on 17.03.1-CE a.k.a. "1.14") |
+| __CVMFS__  | Installed on the host |
+
+## Setup Networking
+
+Create a new MACVLAN bridge with both IPv4 and IPv6 support, named __docknet__, using the following command:
+
+```console
+~# docker network create -d macvlan \
+	--subnet=137.138.47.192/26 \
+	--gateway=137.138.47.193 \
+	--ipv6 \
+	--subnet=2001:1458:201:b50e::/64 \
+	--gateway=2001:1458:201:b50e::1 \
+	-o parent=eth0 docknet
+```
+
+??? note "Command details"
+    
+    * __subnet/gateway__ must be replaced with the settings applicable to your network, while __parent__ must be the network interface.
+    * Why MACVLAN? Docker's normal approach to bridging is simply a NAT translation scheme. MACVLAN bridges give the containers direct access to the network.
+    * Containers on this bridge will be assigned their own MAC addresses, and appear as conventional computers on the local network.
+    * However, due to the way MACVLAN works, the host will not be able to ping the containers (and vice versa).
+
+## Create Container
+
+1. Clone the repository containing the desired preconfigured setup: 
+
+    | Scheduler | Command |
+    |:----------|:--------|
+    | __HTCondor__  | ```~$ git clone https://gitlab.cern.ch/mstoretv/dockervobox.git``` |
+    | __ARC/Generic__ | ```~$ git clone https://gitlab.cern.ch/mstoretv/dockervobox_arc.git``` |
+
+2. Build container image using the following command:
+
+    ```console
+    ~$ cd dockervobox
+    ~$ docker build -t voboximg .
+    ```
+
+    !!! warning
+        Be sure to change the default root password! This is simply __root__ in the above image.
+
+3. Create and launch a new CentOS container connected to the MACVLAN bridge:
+
+    ```console
+    ~$ docker run -d -v /cvmfs:/cvmfs \
+        -h myalienvo.cern.ch \
+        --name=myvocontainer \
+        --net=docknet \
+        --ip=137.138.47.251 \
+        --ip6=2001:1458:201:b50e::100:3e \
+        voboximg
+    ```
+
+    ??? note "Command details"
+        __-v__ mounts the directory __/cvmfs__ from the host as __/cvmfs__ within the container<br>
+        __-h__ sets the hostname __myalienvo.cern.ch__<br>
+        __--name__ sets the container name (as seen from Docker) to __myvocontainer__<br>
+        __--net__ attaches the container to the previously created MACVLAN bridge __docknet__<br>
+        __--ip/ip6__ sets the desired network IPs for the container
+
+    !!! info
+        Docker gives containers limited access to the host kernel, which __may prevent some tools from working properly__.
+        A possible workaround is to give the container additional privileges (see [advanced topics](#privileged-mode) for details).<br><br>
+        As opposed to mounting CVMFS from the host, it is also possible to have it installed normally within the container.
+        However, this will only work on a container with full access privileges.
+        Be aware that unless otherwise configured, this will give each container its own CVMFS cache.
+
+## Service Management
+
+All services are started automatically alongside the container.
+If needed, you can also manage these manually by using the following syntax:
+
+```console
+~$ service yourservice {start|stop|restart|condrestart|try-restart|reload|force-reload|status}
+```
+
+As an example, the following command may be used to restart rsyslog:
+
+```console
+~$ service rsyslog restart
+```
+
+!!! example "Example Deployment"
+    1. Install a host certificate.
+    2. Start the services using the following commands:
+      ```console
+      ~$ service alice-box-proxyrenewal start < /dev/null
+      ~$ /usr/sbin/edg-mkgridmap --output=/etc/grid-security/grid-mapfile --safe
+      ```
+    3. Adjust ```~/.alien/Environment``` as needed. 
+
+
+## Useful Commands
+
+The following table lists commands that may be of use when working with the VoBox containers.<br> 
+Refer to [command line Docker documentation](https://docs.docker.com/engine/reference/commandline/cli/) for more details.
+
+| Command | Description |
+|:--------|:------------|
+| ```~$ docker start myvocontainer``` | Start the container<br>Note: This way, the container will remain active until you type<br> ```~$ docker stop myvocontainer``` |
+| ```~$ docker exec -it myvocontainer /bin/bash``` | Start a bash session within the container |
+| ```~$ ssh root@your.ip.vobox.here``` | Login into the container |
+| ```~$ docker ps``` | List active containers |
+| ```~$ docker ps -a``` | List all containers |
+| ```~$ docker images``` | List all images cached locally |
+| ```~$ docker commit myvocontainer myvoimagename``` | Save container state as an image.<br> With __myimagename__ being the desired image name |
+| ```~$ docker export mycontainer > /home/myexportcontainer.tar``` | Save the container as a tar file.<br>With __/home/myexportedcontainer.tar__ being the directory and name for the exported container |
+
+??? help "Docker commit vs export"
+    The ```commit``` command will save the container as an image in Docker's internal format.
+    It will also preserve the container's history, layers and metadata, and also create a new layer on top.
+    Beware that an image will rapidly grow in size if it is continuously being committed, as a result of all the stacked layers.<br><br>
+    The ```export``` command will save the container to a tar file, that can be reimported and used to launch new containers on the same machine, or uploaded and/or transferred elsewhere.
+    However, it does not store any history or metadata, and will delete all previous layers except the current.
+    Exporting and reimporting an image can thus be used to __flatten__ a container that has gained a large file size, e.g. as a result of having previously used ```commit``` several times.
+
+
+## Advanced Topics
+
+### Privileged Mode
+
+For the sake of isolation, access to the kernel is restricted within the containers. 
+To enable some access for networking tools, such as iptables, add ```--cap-add=NET_ADMIN``` when [creating the container](#create-container). More access can be given through ```--cap-add=SYS_ADMIN```, and full access can be given with ```--privileged```. 
+
+!!! warning
+    Be aware that this will create pathways for potentially breaking container isolation.
+
+### Open Files Limit
+
+If you plan on running more than one VOBOX container on a single host, the system limit for the  maximum number of open file descriptors will need to be increased.
+The default ```ulimit``` is otherwise bound to be reached at some point, causing the containers and their processes to become irresponsive and terminate.
+
+### Access from Host
+
+As mentioned earlier, the host will be unable to reach/ping the containers, and vice versa. A workaround is to create an additional ("normal") Docker bridge, and have the host / containers connect to it. 
+
+### AutoFS Bug
+
+CVMFS may respond with a <span style="color: red">"too many symbolic links"</span> error when accessed from a new container or after a reboot.
+This is known as an _autofs bug_, and can be avoided by disabling autofs on the host and mounting your CVMFS directory manually. 
+If you prefer to leave autofs enabled, you can remove the error by accessing a directory within CVMFS on the host (e.g ```/cvmfs/alice.cern.ch```), and then restarting the container. 
+
+!!! warning
+    Be aware that this error will likely return on the next reboot.
+
diff --git a/docs/site/vobox_legacy.md b/docs/site/vobox_legacy.md
new file mode 100644
index 0000000..0629d90
--- /dev/null
+++ b/docs/site/vobox_legacy.md
@@ -0,0 +1,246 @@
+# VOBox Container (Legacy)
+
+This guide describes how to create a networked Docker container for VO-Box use.
+If possible, please refer to [this guide](../vobox_container) instead.
+
+!!! warning "Update: 10/01/20"
+    Please be aware that the instructions listed below are outdated.
+    A priviledged CentOS 6 container could previously be set up the same way as a _traditional_ VOBox, as long as it was a MACVLAND bridge.
+    Due to the introduction of systemd in CentOS 7, the setup procedure VOBoxes is now different for containers.
+    To simplify setup, the following Dockerfiles can instead be user to quickly deploy preconfigured container VOBoxes:
+    
+    * HTCondor: [https://gitlab.cern.ch/mstoretv/dockervobox](https://gitlab.cern.ch/mstoretv/dockervobox)
+    * ARC/Generic: [https://gitlab.cern.ch/mstoretv/dockervobox_arc](https://gitlab.cern.ch/mstoretv/dockervobox_arc)
+
+## Requirements
+
+| | |
+|-|-|
+| __CentOS__ | 7.0 or later |
+| __Docker__ | 1.12 or later (Tested on 17.03.1-CE a.k.a. "1.14") |
+| __CVMFS__  | Installed on the host |
+
+!!! hint
+    The commands listed below must be executed as __root__.
+
+## Setup Networking
+
+Create a new MACVLAN bridge named __docknet__ using the following command:
+
+```console
+~# docker network create -d macvlan \
+	--subnet=137.138.47.192/26 \
+	--gateway=137.138.47.193 \
+	--ipv6 \
+	--subnet=2001:1458:201:b50e::/64 \
+	--gateway=2001:1458:201:b50e::1 \
+	-o parent=eth0 docknet
+```
+
+??? note "Command details"
+    
+    * __subnet/gateway__ must be replaced with the settings applicable to your network, while __parent__ must be the network interface.
+    * Why MACVLAN? Docker's normal approach to bridging is simply a NAT translation scheme. MACVLAN bridges give the containers direct access to the network.
+    * Containers on this bridge will be assigned their own MAC addresses, and appear as conventional computers on the local network.
+    * However, due to the way MACVLAN works, the host will not be able to ping the containers (and vice versa).
+
+## Create Container
+
+1. Download the latest CentOS from [Docker Hub](https://hub.docker.com/) 
+(for CentOS 6.9, replace ```centos:latest``` with ```centos:centos6.9```):
+
+    ```console
+    ~$ docker pull centos:latest
+    ```
+
+    !!! warning
+        Be sure to change the default root password! This is simply __root__ in the above image.
+
+2. Create and launch a new CentOS container connected to the MACVLAN bridge:
+
+    ```console
+    ~$ docker run -it -v /cvmfs:/cvmfs \
+	-h myalienvo.cern.ch \
+	--name=myvocontainer \
+	--net=docknet \
+	--ip=137.138.47.251 \
+	--ip6=2001:1458:201:b50e::100:3e \
+	centos /bin/bash
+    ```
+
+    ??? note "Command details"
+        __-v__ mounts the directory __/cvmfs__ from the host as __/cvmfs__ within the container<br>
+        __-h__ sets the hostname __myalienvo.cern.ch__<br>
+        __--name__ sets the container name (as seen from Docker) to __myvocontainer__<br>
+        __--net__ attaches the container to the previously created MACVLAN bridge __docknet__<br>
+        __--ip/ip6__ sets the desired network IPs for the container
+
+    !!! info
+        Docker gives containers limited access to the host kernel, which __may prevent some tools from working properly__.
+        A possible workaround is to give the container additional privileges (see [advanced topics](#privileged-mode) for details).<br><br>
+        As opposed to mounting CVMFS from the host, it is also possible to have it installed normally within the container.
+        However, this will only work on a container with full access privileges.
+        Be aware that unless otherwise configured, this will give each container its own CVMFS cache.
+
+## Container Usage
+
+The following table lists useful container-specific commands:
+
+| Command | Description |
+|:--------|:------------|
+| ```root@myalienvo: ~$ exit``` | Exit running container<br>Note: Docker will stop the container once this bash session is closed |
+| ```~$ docker start myvocontainer``` | Start the container<br>Note: This way, the container will remain active until you type<br> ```~$ docker stop myvocontainer``` |
+| ```~$ docker exec -it myvocontainer /bin/bash``` | Start a bash session within the container |
+
+From this point onward, proceed with configuring the VO-Box as you normally would on a VM, e.g. as a [WLCG VOBOX](https://twiki.cern.ch/twiki/bin/view/LCG/WLCGvoboxDeployment).
+Once again, note that depending on the level of access given to the container, some tools may not function, and using ```service start``` may produce errors (see [advanced topics](#advanced-topics) for more information).
+
+## Useful Commands
+
+The following table lists commands that may be of use when working with the VoBox containers.<br> 
+Refer to [command line Docker documentation](https://docs.docker.com/engine/reference/commandline/cli/) for more details.
+
+| Command | Description |
+|:--------|:------------|
+| ```~$ docker ps``` | List active containers |
+| ```~$ docker ps -a``` | List all containers |
+| ```~$ docker images``` | List all images cached locally |
+| ```~$ docker commit myvocontainer myvoimagename``` | Save container state as an image.<br> With __myimagename__ being the desired image name |
+| ```~$ docker export mycontainer > /home/myexportcontainer.tar``` | Save the container as a tar file.<br>With __/home/myexportedcontainer.tar__ being the directory and name for the exported container |
+
+??? help "Docker commit vs export"
+    The ```commit``` command will save the container as an image in Docker's internal format.
+    It will also preserve the container's history, layers and metadata, and also create a new layer on top.
+    Beware that an image will rapidly grow in size if it is continuously being committed, as a result of all the stacked layers.<br><br>
+    The ```export``` command will save the container to a tar file, that can be reimported and used to launch new containers on the same machine, or uploaded and/or transferred elsewhere.
+    However, it does not store any history or metadata, and will delete all previous layers except the current.
+    Exporting and reimporting an image can thus be used to __flatten__ a container that has gained a large file size, e.g. as a result of having previously used ```commit``` several times.
+
+## Advanced Topics
+
+### Setting DNS
+
+When a container is restarted, Docker will overwrite its DNS configuration with a default one.<br>
+To avoid having to reapply the DNS settings on every container restart, this can be changed either by adding ```--dns=YOUR_DNS_IP``` to the run command in the [create container step](#create-container), or by changing the default DNS used by Docker.
+This can be done by creating the file ```/etc/docker/daemon.json``` with the following contents:
+
+```json
+{
+    "dns": ["YOUR_DNS_IP1", "YOUR_DNS_IP2"]
+}
+```
+
+!!! hint
+    Be aware that Docker must be restarted for the changes to take effect.
+
+### Automated Service Start
+
+Unlike a full OS in a virtual machine, Docker containers have no init system that can be used to automatically start services. One possible way to achieve this at container launch, is by using a Dockerfile.
+In essence, a ```Dockerfile``` contains a set of commands and arguments that will be performed on a given image. This file can thus contain a full list of commands to execute at launch, or simply point to a bash script, for example:
+
+```docker
+ROM mycontainerimage
+ENTRYPOINT /etc/init.sh && /bin/bash
+```
+
+Here, __mycontainerimage__ is the image to be used, and ```init.sh``` is a bash script to run at launch.
+
+??? example "Example init.sh"
+
+    ```bash
+    #! /bin/bash
+
+    d=`date '+%y%m%d-%H%M%S'`
+    log=/tmp/init-$d.log
+
+    > $log && exec > $log 2>&1 < /dev/null
+
+    f=/etc/sysconfig/iptables
+    [ -r $f ] && iptables-restore < $f
+
+    service dnsmasq start
+
+    #
+    # hack: work around PID file corruptions...
+    #
+
+    service rsyslog stop
+
+    for i in 1 2
+    do
+        echo == rsyslog $i
+        sleep 5
+        service rsyslog status || service rsyslog start
+        perl -ni -e 'print if $. < 2' /var/run/syslogd.pid
+    done
+
+    svcs='
+        crond
+        sshd
+        gsisshd
+        alice-box-proxyrenewal
+        condor
+        autofs
+    '
+
+    #
+    # random ones have been found absent...
+    # we try up to 3 times for now
+    #
+
+    for i in 1 2 3
+    do
+        for svc in $svcs
+        do
+         echo == $svc $i
+         sleep 1
+         service $svc status || service $svc start
+        done
+    done
+
+    service fetch-crl-boot start > /tmp/fetch-crl-$$.log 2>&1 < /dev/null &
+
+    exit 0
+    ```
+!!! info
+    By running ```~$ docker build -t mynewcontainerimage``` Docker will in this case produce a new image __mynewcontainerimage__ based on __mycontainerimage__, where the contents of ```/etc/init.sh``` will be run at start. 
+    Note that the dockerfile must be named Dockerfile, and the previous build command executed in the same directory. 
+    The contents of Dockerfile will run independently of a container's access privileges, thus the ```/etc/init.sh``` file can contain commands such as ```service start```, despite having no additional privileges.
+
+    Alternatively, all commands can be given within the Dockerfile, without having to reference a separate bash script. However, this will require the image to be rebuilt every time changes are added to the Dockerfile.
+
+### Privileged Mode
+
+For the sake of isolation, access to the kernel is restricted within the containers. 
+To enable some access for networking tools, such as iptables, add ```--cap-add=NET_ADMIN``` when [creating the container](#create-container). More access can be given through ```--cap-add=SYS_ADMIN```, and full access can be given with ```--privileged```. 
+
+!!! warning
+    Be aware that this will create pathways for potentially breaking container isolation.
+
+### Size Limits
+
+The maximum container size can be limited by adding ```--storage-opt size=XXG``` when [creating the container](#create-container) (replace "XX" with the desired storage capacity). 
+However, note that Docker will most likely ask you to change its current storage driver to perform this. 
+
+!!! warning
+    Be aware that doing this requires deleting all stored containers.
+    Before continuing, back up any containers you want to keep using docker export. 
+
+### Access from Host
+
+As mentioned earlier, the host will be unable to reach/ping the containers, and vice versa. A workaround is to create an additional ("normal") Docker bridge, and have the host / containers connect to it. 
+
+### AutoFS Bug
+
+CVMFS may respond with a <span style="color: red">"too many symbolic links"</span> error when accessed from a new container or after a reboot.
+This is known as an _autofs bug_, and can be avoided by disabling autofs on the host and mounting your CVMFS directory manually. 
+If you prefer to leave autofs enabled, you can remove the error by accessing a directory within CVMFS on the host (e.g ```/cvmfs/alice.cern.ch```), and then restarting the container. 
+
+!!! warning
+    Be aware that this error will likely return on the next reboot. 
+
+### HTCondor Error
+
+HTCondor may throw the following error: <span style="color: red">Failed DISCARD_SESSION_KEYRING_ON_STARTUP</span>.<br>
+This can fixed by setting ```DISCARD_SESSION_KEYRING_ON_STARTUP = false``` in the Condor config file. 
+
diff --git a/docs/jalien_migration.md b/docs/user/jalien_migration.md
similarity index 100%
rename from docs/jalien_migration.md
rename to docs/user/jalien_migration.md
diff --git a/docs/jalien_tutorial.md b/docs/user/jalien_tutorial.md
similarity index 100%
rename from docs/jalien_tutorial.md
rename to docs/user/jalien_tutorial.md
diff --git a/mkdocs.yml b/mkdocs.yml
index 6976047..e5a457e 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -22,13 +22,19 @@ markdown_extensions:
   - admonition
   - pymdownx.superfences
   - pymdownx.tabbed
+  - pymdownx.details
   - toc:
       permalink: true
 
 nav:
     - Introduction:        index.md
-    - Migration to JAliEn: jalien_migration.md
-    - JAliEn tutorial:     jalien_tutorial.md
+    - User:
+      - Migration to JAliEn: user/jalien_migration.md
+      - JAliEn tutorial: user/jalien_tutorial.md
+    - Site:
+      - VOBox Container:          site/vobox_container.md
+      - VOBox Container (Legacy): site/vobox_legacy.md
+      - Computing Element Guide:  site/ce_guide.md
     - Commands:
-      - alien.py:          alienpy_commands.md
-      - JAliEn:            jalien_commands.md
+      - alien.py: alienpy_commands.md
+      - JAliEn:   jalien_commands.md
-- 
GitLab