From 5484ce3fd9853f39dd67d0745c23c55c0d1f394b Mon Sep 17 00:00:00 2001
From: Wei Sheng Lai <wei.sheng.lai@cern.ch>
Date: Wed, 6 Mar 2024 08:41:06 +0100
Subject: [PATCH 01/30] Moving tutorial from FTAG documentation

---
 docs/index.md        |   2 +-
 docs/tutorial-Xbb.md | 604 +++++++++++++++++++++++++++++++++++++++++++
 mkdocs.yaml          |   1 +
 3 files changed, 606 insertions(+), 1 deletion(-)
 create mode 100644 docs/tutorial-Xbb.md

diff --git a/docs/index.md b/docs/index.md
index e8317532..f5816ee6 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -24,7 +24,7 @@ Below are some helpful links to get you started:
 
 !!! question "There is a [channel](https://mattermost.web.cern.ch/aft-algs/channels/gnns) for the framework in the [FTAG Mattermost workspace](https://mattermost.web.cern.ch/signup_user_complete/?id=1wicts5csjd49kg7uymwwt9aho&md=link&sbr=su)"
 
-!!! abstract "A tutorial on how to use Salt is provided at the [FTAG docs page](https://ftag.docs.cern.ch/software/tutorials/tutorial-salt/)"
+!!! abstract "A tutorial on how to use Salt can be found [here](tutorial.md) and [here](tutorial-Xbb.md)"
 
 !!! note "[Contributions](contributing) are welcome! Check out [existing issues](https://gitlab.cern.ch/atlas-flavor-tagging-tools/algorithms/salt/-/issues) for inspiration, or open your own"
 
diff --git a/docs/tutorial-Xbb.md b/docs/tutorial-Xbb.md
new file mode 100644
index 00000000..68aba298
--- /dev/null
+++ b/docs/tutorial-Xbb.md
@@ -0,0 +1,604 @@
+# Salt framework for $X \rightarrow bb$ tagger tutorial
+
+## Introduction
+
+In this tutorial, you will learn to setup and use the [Salt framework](https://gitlab.cern.ch/atlas-flavor-tagging-tools/algorithms/salt/) in the context of $X \rightarrow bb$ tagging.
+Salt is a high-level framework for training state-of-the-art flavour tagging algorithms.
+In addition, plotting scripts are provided to plot the results of the evaluation using the [`puma`](https://github.com/umami-hep/puma) package.
+
+In this tutorial, we cover the following functionalities of Salt:
+
+1. Training of a subjet-based Xbb tagger
+2. Training of a constituent-based Xbb tagger using tracks
+3. Modification of high-level settings and network hyperparameters
+4. Evaluation of results
+
+!!!info "If you are not studying $X \rightarrow bb$ feel free to skip task 2"
+
+The tutorial is meant to be followed in a self-guided manner. You will be prompted to do certain tasks by telling you what the desired outcome will be, without telling you how to do it. Using the [Salt documentation](index.md), you can find out how to achieve your goal. In case you are stuck, you can click on the "hint" toggle box to get a hint. If you tried for more than 10 min at a problem, feel free to toggle also the solution with a working example.
+
+???+ question "What to do if you get stuck"
+    
+    In case you encounter some errors or you are completely stuck, you can reach out to the dedicated [FTAG tutorial mattermost channel](https://mattermost.web.cern.ch/aft-algs/channels/ftag-tutorials) (click [here](https://mattermost.web.cern.ch/signup_user_complete/?id=ektad7hj4fdf5nehdmh1js4zfy) to sign up).
+
+This tutorial has been run a few times, click below for intro slides which give some context on the framework and this tutorial:
+
+- [$X \rightarrow bb$ taskforce meeting](https://indico.cern.ch/event/1248303/).
+- [FTAG Workshop 2023](https://indico.cern.ch/event/1311519/timetable/?view=standard#31-salt-tutorial).
+
+
+## Prerequisites
+
+For this tutorial, you need access to a shell on either CERN's `lxplus` or your local cluster with `/cvmfs` access to retrieve the `singularity` image needed. To set this up, please follow the instructions [here](setup.md) by selecting the "singularity" tab in the ["Create Environment"](setup.md#create-environment).
+
+Efficient training is only possible on resources with GPU access.
+It is highly encouraged to use an institute-managed GPU enabled machine if one is available.
+Otherwise, CERN provides special lxplus nodes with GPU access for interactive computing.
+
+You can log in to a CERN lxplus gpu node with:
+
+```bash
+ssh -Y <username>@lxplus-gpu.cern.ch
+```
+
+You can check that your node is configured with GPU access by running 
+
+```bash
+nvidia-smi
+```
+
+If you see a tabular output with information about one or more GPUs, then you are good to continue.
+
+!!! warning "Check your machine is configured correctly"
+    
+    If you see `No devices were found` your node is badly configured, and you should log in again and hope for a new node.
+    
+
+
+### Training datasets
+
+You should copy the training files before doing the tutorial.
+If you don't the training will be much slower but you can compensate for that by reducing the number of training and validation jets as hinted in the tasks below.
+The train/val/test samples for the tutorial each have 2M jets and are stored on EOS in the following directory
+
+- `/eos/user/u/umami/tutorials/salt/2023/inputs/`
+
+=== "Copy to user EOS space"
+    If you are running on lxplus, copying the training files to your private storage on `/eos/user/${USER:0:1}/$USER/` is recommended to avoid overly high concurrent access:
+    ```
+    rsync -vaP /eos/user/u/umami/tutorials/salt/2023/inputs/ /eos/user/${USER:0:1}/$USER/training-samples
+    ```
+
+=== "Local Cluster"
+
+    If you are running on your local cluster, you can copy the files to a directory with fast access:
+
+    ```
+    rsync -vaP <cern username>@lxplus.cern.ch:/eos/user/u/umami/tutorials/salt/2023/inputs/ /fast/disk/training-samples/
+    ```
+
+??? warning "Access to EOS is slow, copying files before the tutorial is highly recommended!"
+
+    The training files are stored on EOS, which is a distributed file system. Accessing files on EOS is slow, so it is recommended to copy the files to a local directory before starting the tutorial. If you attempt to run the tutorial directly from EOS, you will experience very slow training times.
+
+
+??? error "What to do if you don't have access to the EOS folder"
+
+    The training files stored on EOS are only shared with user subscribed to the egroups/mailing lists
+    
+    - `atlas-cp-flavtag-btagging-algorithms`
+    - `atlas-cp-flavtag-jetetmiss-BoostedXbbTagging`
+
+    If you are not yet subscribed, please consider doing so to get access to the training files.
+    You can subscribe using the [CERN egroups webpage](https://e-groups.cern.ch/e-groups/EgroupsSearch.do).
+
+    If you already are subscribed and try to copy from inside a singularity container, it might fail. In that case, copy the files without using the singularity container.
+
+When training a model, [it is possible to specify a local directory](training.md#fast-disk-access) with fast access, e.g. `/tmp` to which the files will be copied.
+This will speed up the training on e.g. `lxplus` significantly (though you will still incur the initial cost of copying the files).
+
+The total size of the training, validation and test files is 17GB, make sure you have sufficient free space.
+Alongside the input h5 are the `norm_dict.yaml` and `class_dict.yaml` which are also used for training.
+
+
+### Singularity image
+
+The FTAG group provides salt-ready singularity images via `/cvmfs/unpacked.cern.ch` on lxplus (or any cluster which has `/cvmfs` mounted). On the node, you can use `singularity` to launch the container from the image on `/cvmfs/unpacked.cern.ch` with the already prepared `salt` framework.
+We'll use the tagged image for version `0.3` of the code.
+
+=== "lxplus (eos access)"
+
+    If you run on lxplus, it is advantageous to also mount the `/afs`, `/eos`, `/tmp` and `/cvmfs` directories:
+
+    ```bash
+    singularity shell -e --nv --bind $PWD,/afs,/eos,/tmp,/cvmfs \
+        /cvmfs/unpacked.cern.ch/gitlab-registry.cern.ch/atlas-flavor-tagging-tools/algorithms/salt:0-3
+    ```
+
+=== "other (cvmfs only)"
+
+    ```
+    singularity shell -e --nv --bind $PWD,/cvmfs \
+        /cvmfs/unpacked.cern.ch/gitlab-registry.cern.ch/atlas-flavor-tagging-tools/algorithms/salt:0-3
+    ```
+
+After running the [`singularity shell`](https://docs.sylabs.io/guides/latest/user-guide/cli/singularity_shell.html#singularity-shell) command, you can re-source your `.bashrc` to get some of the features of your normal terminal back by running 
+
+```bash
+source ~/.bashrc
+```
+
+
+## Tutorial tasks
+
+### 1. Fork, clone and install Salt
+
+Although the singularity images come with salt pre-installed, they do not allow for an editable version of the package.
+It's therefore highly recommended to re-install the package from source to give you full control.
+To do so, you need to do the following steps:
+
+1. Create a personal fork of Salt in Gitlab.
+2. Clone the forked repository to your machine using `git`.
+3. Switch to the `0.3` tag which is used for the tutorial.
+4. (Optional) Run the setup to switch to development mode.
+5. Run the test suite
+
+Go to the GitLab project page of Salt to begin with the task: <https://gitlab.cern.ch/atlas-flavor-tagging-tools/algorithms/salt>
+
+??? info "Hint: Create a personal fork of Salt in Gitlab"
+
+    In case you are stuck how to create your personal fork of the project, you can find some general information on git and the forking concept [here in the GitLab documentation](https://docs.gitlab.com/ee/user/project/repository/forking_workflow.html).
+
+??? info "Hint: Clone the forked repository to your machine using `git`"
+
+    The command `git clone` is the one you need. You can look up the use [here](setup.md). You can use the `--branch` argument to checkout a specific branch, e.g. `--branch 0.3` to checkout the `0.3` tag.
+
+
+??? info "Hint: installing the package in development mode"
+
+    By default, the singularity image comes with salt preinstalled, but this not an editable installation. If you want to make code changes, you can install salt in development mode using `pip` with the `-e` flag.
+
+
+??? info "Hint: Run the test suite"
+
+    You can run the suite of unit tests as outlined in the [salt documentation on ](contributing.md#test-suite). Make sure that you enter the `salt` source code directory before you execute the test suite!
+
+    ```bash
+    cd salt/
+    pytest --cov=salt --show-capture=stdout
+    ```
+
+    Note that, depending on your machine, the test suite may take a while to run. To just run a single test, you can instead use
+    
+    ```bash
+    pytest --cov=salt --show-capture=stdout tests/test_pipeline.py::TestModels::test_GN1
+    ```
+
+??? warning "Solution"
+
+    Open the website <https://gitlab.cern.ch/atlas-flavor-tagging-tools/algorithms/salt> in a browser. You may need to authenticate with your CERN login credentials. In the top right corner of the Salt project you see three buttons which show a bell (notifications), a star (to favourite the project) next to a number, and a forking graph (to fork the project) with the text "Fork" next to a number. Click on the word "Fork" to open a new website, allowing you to specify the namespace of your fork. Click on "Select a namespace", choose your CERN username, and create the fork by clicking on "Fork project".
+
+    Next, you need to clone the project using `git`. Open a fresh terminal on the cluster your are working on, create a new folder and proceed with the cloning. To do so, open your forked project in a browser. The address typically is `https://gitlab.cern.ch/<your CERN username>/salt`. When clicking on the blue "Clone" button at the right hand-side of the page, a drop-down mini-page appears with the ssh path to the forked git project. Let's check out your personal fork and add the original project as upstream:
+
+    ```bash
+    git clone ssh://git@gitlab.cern.ch:7999/<your CERN username>/salt.git
+    cd salt
+    git remote add upstream ssh://git@gitlab.cern.ch:7999/atlas-flavor-tagging-tools/algorithms/salt.git
+    git checkout 0.3
+    ```
+
+    You now forked and cloned Salt and should be ready to go!
+
+    Launch the salt singularity container (make sure to bind the directory containing the cloned git project) and change the directory to the top level directory of the project.
+
+
+    ```bash
+    singularity shell -e --nv --bind $PWD,/afs,/eos,/tmp,/cvmfs \
+    /cvmfs/unpacked.cern.ch/gitlab-registry.cern.ch/atlas-flavor-tagging-tools/algorithms/salt:0-3
+    ```
+
+    If you want to modify the salt code and contribute to development, you need to install the salt package to switch to development mode:
+
+    ```bash
+    python -m pip install -e .
+    ```
+
+    Finally, you can run a test to check if everything works fine:
+    
+    ```bash
+    pytest --cov=salt --show-capture=stdout tests/test_pipeline.py::test_GN1
+    ```
+
+    Make sure you are in the directory containing `tests/` when running the test suite.
+    The full test suite would likely take some time to run, but can be invoked with
+
+    ```bash
+    cd salt/
+    pytest --cov=salt --show-capture=stdout
+    ```
+
+
+### 2. (Optional) Set up logging
+
+[Comet.ml](https://www.comet.com/) is an online ML logging service. It's free for academic use. If you get stuck, consult the [comet documentation](https://www.comet.com/docs/v2/guides/getting-started/quickstart/) and the hints below.
+
+1. Create an account and a project.
+2. Generate an API key.
+3. Save the API key and project name in the relevant environment variables, or add them to your `~/bashrc` file.
+
+??? info "Hint: Creating an account and a project"
+
+    To use it, [create an account](https://www.comet.com/signup), and then create a project using the blue `+ New project` button in the GUI interface.
+
+??? info "Hint: Generating an API key"
+
+    You then need to create and save an API key for your project, which you can use to log your training runs. You can find the API key in the [account settings page](https://www.comet.com/account-settings/apiKeys).
+
+??? info "Hint: Saving info to environment variables"
+
+    See the Salt [logging docs](training.md#setup-logging) for info on which environment variables to use.
+
+??? danger "Warning: If you don't set up logging, you may need to disable it in the training config file"
+
+    Open the `base.yaml` config file and set `logger: False` under the `trainer:` block. Remove the existing sub-blocks under `logger:`. You also need to remove the 
+    ```
+    - class_path: pytorch_lightning.callbacks.LearningRateMonitor
+    ``` 
+    line under the `trainer: callbacks:` block, since this feature requires a logger to work.
+
+### 3. Train subjet-based tagger
+
+In this task, you will train an algorithm based on inputs from variable-radius track sub-jets of the large-radius jet. For the purposes of the tutorial, the training is configured to use the first 5M jets from the training dataset, and to run for 10 epochs.
+
+You can take a look inside the `SubjetXbb.yaml` or `norm_dict.yaml` file to see which variables will be used for training the subjet based model. The r22 GN2 PFlow scores have been included, along with some other kinematic information about the subjet.
+
+1. Modify the `SubjetXbb.yaml` model config file to use the correct paths to your locally downloaded training files.
+2. Run the training for 10 epochs.
+
+??? info "Hint: Modifying the `SubjetXbb.yaml` config file"
+
+    You'll need to specify the `SubjetXbb.yaml` model config to use. This config file needs to be edited with the correct paths to your locally downloaded training files. You'll need to modify the `train_file`, `val_file` and `scale_dict` keys under the `data:` block.
+    To change the number of epochs the training runs for, you can modify the `max_epochs` key under the `trainer:` block.
+
+??? info "Hint: Warning about the number of workers used in the dataloaders"
+
+    By default the `--data.num_workers` flag is set to 10. On your machine, you might see a warning if this is too many, and the suggested number to use instead. Including the `--data.num_workers` flag in the training command will override the default value.
+
+??? info "Hint: Running the training"
+
+    The command to run a training is described in the Salt documentation [here](training.md#training). Make sure you specify `--config configs/SubjetXbb.yaml` to use the correct config file.
+
+??? info "Hint: Speeding up the training"
+
+    Take a look at the [dataloading](training.md#dataloading) section of the training documentation. You can try increasing the worker count, moving the files to fast storage or RAM, or reducing the overall number of training jets using the `--data.num_jets_train` and `--data.num_jets_val` flags. Finally, you could decrease the number of training epochs with `--trainer.max_epochs` flag.
+
+    Make sure that you really have copied the training files from the `/eos` location to a local path, otherwise reading the input via `/eos` can also slow down the training. You could also experiment with the [`--data.move_files_temp`](training.md#fast-disk-access) flag to transfer the data to a high-speed file reading resource before training. On lxplus, you could do so by adding `--data.move_files_temp /tmp` to the training command.
+
+
+??? warning "Solution"
+
+    After modifying the `SubjetXbb.yaml` config file as described in the first hint, you can run a test training with 
+    ```bash
+    salt fit --config configs/SubjetXbb.yaml --trainer.fast_dev_run 2
+    ```
+    Assuming this completes without any errors, you can run a full training by omitting the `--trainer.fast_dev_run` flag.
+    By default, the training uses the first GPU on system. If you want to use a different GPU, you can specify it with the `--trainer.devices` flag as described in the [documentation](training.md#choosing-gpus). To run on the CPU, you can use `--trainer.accelerator cpu`.
+
+    For more training options, including tips for how to speed up the training, take a look at the [documentation](training.md).
+
+
+### 4. Train track-based tagger
+
+In this task, you will train an algorithm based directly on the tracks associated with the large-radius jet as inputs. Again, take a look inside the variable config to get an idea of which variables are being used in the track-based case.
+
+1. Modify the `GN2X.yaml` model config file to use the correct paths to your locally downloaded training files.
+2. Run the training for 10 epochs.
+3. Compare the losses of the subjet-based model and the track-based model.
+
+Note that you may ecounter _carefully planned_ errors as part of this task, please use the hints below to try and resolve them.
+
+??? info "Hint: See hints for the previous task"
+
+    This task is very similar to Task 2, for a different model config: `GN2X.yaml`.
+
+??? info "Hint: What to do about a `MisconfigurationException`" 
+    
+    You might see the following error if you run on a machine with only one accessible GPU.
+
+    ```
+    lightning.fabric.utilities.exceptions.MisconfigurationException: You requested gpu: [0, 1]
+     But your machine only has: [0]
+    ```
+    
+    This is because the default `GN2X.yaml` config asks for 2 GPUs to speed up training.
+    This is a good opportunity to learn about requesting GPUs. 
+    You can read [here](training.md#choosing-gpus) for hints about what to do.
+
+??? info "Hint: What to do about `ValueError: Variables {...} were not found in dataset`" 
+    
+    This kind of error is quite common if the inputs become out of sync with the config.
+    In our case, the config has been updated to use the new truth label names, but the samples are somewhat older and have not been updated.
+
+    You need to modify the `GN2X.yaml` config to revert to the old label names which do not have the `ftag` prefex, e.g. `ftagTruthOriginLabel` -> `truthOriginLabel`.
+    This needs to be done in the task config, i.e. around L150 and L191.
+
+
+??? warning "Solution"
+
+    The training should run in the same way as the previous task, but will take longer to complete, since we are processing up to 100 tracks per jet, rather than just the info about 3 subjets.
+
+    You should take note of the initial and final values of the losses for the two models so that you can compare them. Which loss decreases faster from its initial value? Which is lower after the training has been completed? Why do you think this is? (Remember the default choice of 2M training jets is a small fraction of the total number of jets used to train the GN2 tagger which is used to produce the subjet scores.)
+
+    In order to fix the `MisconfigurationException`, just add `--trainer.devices=1` as a command line flag to your `salt fit` call.
+
+### 5. Modify network parameters and retrain
+
+In this task, you will modify parameters of the models trained in the previous tasks and retrain the networks. You should consider what effect the changes have on the evolution of the loss, the size of the model, and the training speed. This task is open-ended and you are encouraged to experiment with modifying the different config files.
+
+??? info "Hint: Changing the number of training jets"
+
+    Inside the model config file you wish to change, look at the `num_jets_train` config key inside the `data:` block. You can take a look at how the number of jets affects the final performance of the models. You can also configure this from the CLI using `--data.num_jets_train <num>`.
+
+??? info "Hint: Changing the model architecture"
+
+    Inside the model config file you wish to change, look at the `model:` block. The core of the model is the Graph Network/Transformer configured in the `gnn:` block. You can modify the number of layers, the number of attention heads, or the embedding dimension and see what effect his has on the training.
+
+??? info "Hint: Removing auxiliary tasks"
+
+    The `GN2X.yaml` includes the auxiliary track classification task. To remove it, look in the `tasks:` block for the list item which has `name: track_classification`. Removing the associated block will disable that part of the model and remove the associated track classification loss from the overall training loss function when training.
+
+??? warning "Solution"
+
+    This task is really just about tinkering, but you may notice the following things:
+    
+    - Reducing the number of jets is detrimental to the performance of the model. If you study how the lowest value of the loss changes as a function of the number of training jets, you should come to the conclusion that larger training samples would be beneficial to improving performance.
+    - Increasing the model size (number of layers, embedding dimension) leads to slower training times, and more overtraining (especially visible with such a small number of training jets)
+    - The loss for the subjet-based model initially drops much more quickly than for the track-based model. This is because it has outputs from an already trained model as inputs.
+    - Given enough time and enough input jets, the loss for the track-based model will drop below that of the subjet-based model. This reflects the fact constituent-based tagging approach is more powerful than the subjet-based approach in the long run.
+
+
+### 6. Evaluate the models on the test set
+
+After training, the model is evaluated on an independent set of testing jets. The results are used to produce performance plots. The test file you will use for the tutorial is called `pp_output_test.h5`, and contains a mixture of the different jet classes used for training. The jet labels are specified by the `R10TruthLabel_R22v1` jet variable. The classes are specified in the `enum` [here](https://gitlab.cern.ch/atlas/athena/-/blob/master/PhysicsAnalysis/AnalysisCommon/ParticleJetTools/ParticleJetTools/LargeRJetLabelEnum.h#L11).
+
+1. Choose which models you want to evaluate and 
+2. Run the model evaluation command
+
+??? info "Hint: Comparing to a provided pre-trained model"
+
+    If you had problems with the training, or you just want to compare to a benchmark, you can use one of the provided model checkpoints to evaluate.
+    These models are found in the `/eos/user/u/umami/tutorials/salt/2023/trained-models/` directory.
+    Note that they are not claimed to be especially well performing models (they were only trained for 5 epochs) - you may well find a configuration that outperforms them!
+
+??? info "Hint: Running the evaluation command"
+
+    Take a look at the [relevant page](evaluation.md) in the salt documentation.
+    You might want to choose to evaluate on e.g. 1e5 jets, rather than the full 2M.
+
+??? warning "Solution"
+
+    Find the path to the saved config file of the model you want to evaluate.
+    This will be located in a timestamped directory under `logs/`.
+    Also have handy the path to your `/eos/home-u/umami/tutorials/salt/2023/inputs/pp_output_test.h5` (or run directly on this file).
+    To run the evaluation, use
+
+    ```bash
+    salt test --config logs/<timestamp>/config.yaml --data.test_file path/to/pp_output_test.h5
+    ```
+    
+    If you want to evaluate the pre-trained model on EOS, this command will be for example
+    ```bash
+    salt test \ 
+      --config /eos/home-u/umami/tutorials/salt/2023/trained-models/SubjetXbb_20230920-T192350/config.yaml \
+      --data.test_file /eos/home-u/umami/tutorials/salt/2023/inputs/pp_output_test.h5
+    ```
+
+    Salt automatically evaluates the checkpoint with the lowest associated validation loss for evaluation, but you can use `--ckpt_path` to specify this manually.
+    The resulting evaluation file will be saved in `ckpts/` in the training output directory, alongside the checkpoint that was used to run the evaluation.
+    Read the [salt docs](evaluation.md#running-the-test-loop) for more info.
+
+
+### 7. Create plots which quantity the trained algorithms performance
+
+In this task, you will create plots of performance metrics using the [`puma`](https://github.com/umami-hep/puma/) python package.
+You can find more information on how to use `puma` for plotting in the [corresponding plotting tutorial](https://ftag-docs.docs.cern.ch/software/tutorials/tutorial-plotting/).
+
+1. Produce a histogram of the jet scores for each class.
+2. Produce ROC curves as a function of signal efficiency.
+
+??? info "Hint: Installing puma"
+
+    Your Salt installation will install puma as a dependency. 
+    You can also follow the quickstart guide in the [puma docs](https://umami-hep.github.io/puma/main/index.html#) to learn how to install it yourself.
+
+??? info "Hint: Plotting histograms"
+
+    Take a look at the [relevant page](https://umami-hep.github.io/puma/main/examples/histograms.html) in the puma docs.
+
+??? info "Hint: Plotting ROCs"
+
+    Take a look at the [relevant page](https://umami-hep.github.io/puma/main/examples/rocs.html) in the puma docs.
+
+??? info "Hint: What to use as a discriminant?"
+
+    Since we have four classes, calculating a discriminant is more complicated than in the single b-tagging case. 
+    One option is to use the score for the signal class directly as the discriminant, but please note, this may lead to a suboptimal trade off between the different background rejections.
+
+??? info "Hint: What are the truth labels"
+
+    Take a look at their definition [here](https://gitlab.cern.ch/atlas/athena/-/blob/master/PhysicsAnalysis/AnalysisCommon/ParticleJetTools/ParticleJetTools/LargeRJetLabelEnum.h#L34). Hbb is `11`, Hcc is `12`, and top is `1` and QCD is `10`.
+
+??? warning "Solution"
+
+    The prepared script `make_plots.py` provides an implementation example to plot tagger discriminant distributions and ROC curves. 
+    It is located here: `/eos/home-u/umami/tutorials/salt/2023/make_plots.py`.
+    
+    At the beginning of the script, you are invited to fill the `networks` dictionary with one or more trained model paths and dedicated keys. In the current version, the pretrained model paths have been implemented. If you want, you can modify them with your own trained models. 
+    In addition, a `reference` is also requested for the model comparisons. It should correspond to one of the model keys added to `networks`.
+    Finally, `test_path` has to be completed with the path to your `pp_output_test.h5` sample. 
+    
+    Two different tagger discriminants are defined by `Hbb` and `Hcc` signal class probabilities.
+    
+    In a first step, the script extracts the different jet tagging probabilities as well as the needed kinematic information to define the jet selection to be applied. Implemented as a boolean `mask`, the jet selection can be easily modified. Efficiencies and rejections are also computed.
+    
+    For a given tagger discriminant, the distributions corresponding to the different jet flavours and trained models are then plotted on the same figure in order to perform a complete comparison. The Puma's `HistogramPlot` and `Histogram` objects offer a lot of configuration variables which can be modified according to cosmetic tastes and needs. Finally, the plotting of the corresponding ROCs follows similarly in another set of figures. 
+
+    Below, the content of `make_plots.py` is shown:
+
+
+    ```python
+
+    import h5py
+    import numpy as np
+    from puma import Histogram, HistogramPlot, Roc, RocPlot
+    from puma.metrics import calc_rej
+    from puma.utils import get_good_colours, get_good_linestyles, logger
+
+    networks = {
+        "SubjetXbb" : "/eos/home-u/umami/tutorials/salt/2023/trained-models/SubjetXbb_20230920-T192350/ckpts/epoch=004-val_loss=0.59234__test_pp_output_test.h5",
+        "GN2X" : "/eos/home-u/umami/tutorials/salt/2023/trained-models/GN2X_20230920-T193158/ckpts/epoch=004-val_loss=1.15303__test_pp_output_test.h5"
+    }
+
+    reference = "SubjetXbb"
+    test_path = '/eos/home-u/umami/tutorials/salt/2023/inputs/pp_output_test.h5'
+    num_jets = 100_000
+
+    # load test data
+    logger.info("Load data")
+    with h5py.File(test_path, 'r') as test_f:
+        jets = test_f['jets'][:num_jets]
+        jet_pt = jets['pt'] / 1000
+        jet_mass = jets['mass'] / 1000
+        jet_eta = np.abs(jets['eta'])
+        flav = jets['R10TruthLabel_R22v1']
+        mask = (jet_pt < 1000) & (jet_pt > 250) & (jet_mass > 50) & (jet_mass < 300)
+        is_QCD = flav == 10
+        is_Hcc = flav == 12
+        is_Hbb = flav == 11
+        is_Top = flav == 1
+        n_jets_QCD = np.sum(is_QCD & mask)
+        n_jets_Top = np.sum(is_Top & mask)
+
+    results = {}
+    logger.info("Calculate rejections")
+    for key, val in networks.items():
+        with h5py.File(val, 'r') as f:
+            jets = f['jets'][:num_jets]
+            pHbb = jets[f'{key}_phbb']
+            pHcc = jets[f'{key}_phcc']
+            pQCD = jets[f'{key}_pqcd']
+            pTop = jets[f'{key}_ptop']
+            disc_Hbb = pHbb
+            disc_Hcc = pHcc
+
+            sig_eff = np.linspace(0.4, 1, 100)
+            Hbb_rej_QCD = calc_rej(disc_Hbb[is_Hbb & mask], disc_Hbb[is_QCD & mask], sig_eff)
+            Hbb_rej_Top = calc_rej(disc_Hbb[is_Hbb & mask], disc_Hbb[is_Top & mask], sig_eff)
+            Hcc_rej_QCD = calc_rej(disc_Hcc[is_Hcc & mask], disc_Hcc[is_QCD & mask], sig_eff)
+            Hcc_rej_Top = calc_rej(disc_Hcc[is_Hcc & mask], disc_Hcc[is_Top & mask], sig_eff)
+            results[key] = {
+                'sig_eff' : sig_eff,
+                'disc_Hbb' : disc_Hbb,
+                'disc_Hcc' : disc_Hcc,
+                'Hbb_rej_QCD' : Hbb_rej_QCD,
+                'Hbb_rej_Top' : Hbb_rej_Top,
+                'Hcc_rej_QCD' : Hcc_rej_QCD,
+                'Hcc_rej_Top' : Hcc_rej_Top
+            }
+
+    logger.info("Plotting Discriminants.")
+    plot_histo = {
+        key : HistogramPlot(
+            n_ratio_panels=1,
+            ylabel="Normalised number of jets",
+            xlabel=f"{key}-jet discriminant",
+            logy=True,
+            leg_ncol=1,
+            figsize=(6.5, 4.5),
+            bins=np.linspace(0, 1, 50),
+            y_scale=1.5,
+            atlas_second_tag="$\\sqrt{s}=13$ TeV, Xbb jets",
+        ) for key in ['Hbb', 'Hcc']}
+    linestyles = get_good_linestyles()[:len(networks.keys())]
+    colours = get_good_colours()[:3]
+    for key, value in plot_histo.items():
+        for network, linestyle in zip(networks.keys(), linestyles):
+            value.add(
+                Histogram(
+                    results[network][f'disc_{key}'][is_QCD],
+                    label="QCD jets" if network == reference else None,
+                    ratio_group="QCD",
+                    colour=colours[0],
+                    linestyle=linestyle,
+                ),
+                reference=(network == reference),
+                )
+            value.add(
+                Histogram(
+                    results[network][f'disc_{key}'][is_Top],
+                    label="Top jets" if network == reference else None,
+                    ratio_group="Top",
+                    colour=colours[1],
+                    linestyle=linestyle,
+                ),
+                reference=(network == reference),
+                )
+            value.add(
+                Histogram(
+                    results[network][f'disc_{key}'][is_Hbb if key == 'Hbb' else is_Hcc],
+                    label=f"{key} jets" if network == reference else None,
+                    ratio_group=f"{key}",
+                    colour=colours[2],
+                    linestyle=linestyle,
+                ),
+                reference=(network == reference),
+                )
+        value.draw()
+        # The lines below create a legend for the linestyles
+        value.make_linestyle_legend(
+            linestyles=linestyles, labels=networks.keys(), bbox_to_anchor=(0.5, 1)
+        )
+        value.savefig(f"disc_{key}.png", transparent=False)
+
+    # here the plotting of the roc starts
+    logger.info("Plotting ROC curves.")
+    plot_roc = {
+        key : RocPlot(
+            n_ratio_panels=2,
+            ylabel="Background rejection",
+            xlabel=f"{key}-jet efficiency",
+            atlas_second_tag="$\\sqrt{s}=13$ TeV, Xbb jets",
+            figsize=(6.5, 6),
+            y_scale=1.4,
+        ) for key in ['Hbb', 'Hcc']}
+
+    for key, value in plot_roc.items():
+        for network in networks.keys():
+            value.add_roc(
+                Roc(
+                    sig_eff,
+                    results[network][f'{key}_rej_QCD'],
+                    n_test=n_jets_QCD,
+                    rej_class="qcd",
+                    signal_class=f"{key}",
+                    label=f"{network}",
+                ),
+                reference=(reference == network),
+            )
+            value.add_roc(
+                Roc(
+                    sig_eff,
+                    results[network][f'{key}_rej_Top'],
+                    n_test=n_jets_Top,
+                    rej_class="top",
+                    signal_class=f"{key}",
+                    label=f"{network}",
+                ),
+                reference=(reference == network),
+            )
+        # setting which flavour rejection ratio is drawn in which ratio panel
+        value.set_ratio_class(1, "qcd")
+        value.set_ratio_class(2, "top")
+        value.draw()
+        value.savefig(f"roc_{key}.png", transparent=False)
+    ```
diff --git a/mkdocs.yaml b/mkdocs.yaml
index 84ac6e08..9440617f 100644
--- a/mkdocs.yaml
+++ b/mkdocs.yaml
@@ -38,6 +38,7 @@ nav:
   - ONNX Export: export.md
   - Contributing: contributing.md
   - Tutorial: tutorial.md
+  - Tutorial (Xbb): tutorial-Xbb.md
   - API Reference:
       - api/data.md
       - api/initialisation.md
-- 
GitLab


From a2244e5060e4cb4dd5a76e6b738c6b831e99c071 Mon Sep 17 00:00:00 2001
From: Wei Sheng Lai <wei.sheng.lai@cern.ch>
Date: Thu, 7 Mar 2024 09:28:54 +0100
Subject: [PATCH 02/30] Adding more pre-commit ruff rules for doc string

---
 pyproject.toml             |  2 +-
 salt/models/task.py        |  2 ++
 salt/utils/mask_utils.py   |  2 ++
 salt/utils/tensor_utils.py | 13 ++++++++-----
 4 files changed, 13 insertions(+), 6 deletions(-)

diff --git a/pyproject.toml b/pyproject.toml
index 0a239924..d4617775 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -32,7 +32,7 @@ build-backend = "setuptools.build_meta"
 [tool.ruff]
 line-length = 100
 preview = true
-lint.select = ["ALL"]
+lint.select = ["ALL", "D212", "D417"]
 lint.ignore = [
     "COM", "D100", "D101", "D102", "D103", "D104", "D105", "D205", "D401", "EM", "FIX", "FBT",
     "S101", "S404", "S602", "PLR2004", "PLR0912", "PLR0913", "PLR0914", "PLR0915", "PLR0917",
diff --git a/salt/models/task.py b/salt/models/task.py
index 8b2645c9..bfb1db74 100644
--- a/salt/models/task.py
+++ b/salt/models/task.py
@@ -296,6 +296,8 @@ class RegressionTask(RegressionTaskBase):
 
         Parameters
         ----------
+        scaler
+            dummy text
         **kwargs
             Keyword arguments for
             [`salt.models.RegressionTaskBase`][salt.models.RegressionTaskBase].
diff --git a/salt/utils/mask_utils.py b/salt/utils/mask_utils.py
index beb197d6..40df58b7 100644
--- a/salt/utils/mask_utils.py
+++ b/salt/utils/mask_utils.py
@@ -15,6 +15,8 @@ def build_target_masks(object_ids, input_ids, shuffle=False):
         The unqiue ids of the truth object labels
     input_ids : Tensor
         The ids of the per-input labels
+    shuffle: bool
+        Shuffle object ids
 
     Returns
     -------
diff --git a/salt/utils/tensor_utils.py b/salt/utils/tensor_utils.py
index 10fcafb2..cb6a59ee 100644
--- a/salt/utils/tensor_utils.py
+++ b/salt/utils/tensor_utils.py
@@ -22,11 +22,14 @@ def flatten_tensor_dict(
 
     Parameters
     ----------
-        x: Dictionary of tensors to flatten.
-        include: List of keys defining the tensors to be concatenated. If None, all tensors will be
-            concatenated unless defined by 'exclude'. Cannot be used with 'exclude'.
-        exclude: List of keys to exclude from the concatenation. If None, all tensors will be
-            concatenated unless defined by 'include'. Cannot be used with 'include'.
+    x: dict[str, Tensor]
+        Dictionary of tensors to flatten.
+    include: list[str] | None, optional
+        List of keys defining the tensors to be concatenated. If None, all tensors will be
+        concatenated unless defined by 'exclude'. Cannot be used with 'exclude'.
+    exclude: list[str] | None, optional
+        List of keys to exclude from the concatenation. If None, all tensors will be
+        concatenated unless defined by 'include'. Cannot be used with 'include'.
 
     Returns
     -------
-- 
GitLab


From 3fa717241efafa6601241007cdf7267872714cb1 Mon Sep 17 00:00:00 2001
From: Samuel Van Stroud <sam.van.stroud@cern.ch>
Date: Tue, 12 Mar 2024 12:01:07 +0100
Subject: [PATCH 03/30] Add Joss paper

---
 .github/workflows/joss-paper.yml |  23 ++
 .gitlab/.ci-docs.yaml            |   6 +-
 paper/paper.bib                  | 470 +++++++++++++++++++++++++++++++
 paper/paper.md                   | 140 +++++++++
 paper/salt-arch.png              | Bin 0 -> 30980 bytes
 5 files changed, 636 insertions(+), 3 deletions(-)
 create mode 100644 .github/workflows/joss-paper.yml
 create mode 100644 paper/paper.bib
 create mode 100644 paper/paper.md
 create mode 100644 paper/salt-arch.png

diff --git a/.github/workflows/joss-paper.yml b/.github/workflows/joss-paper.yml
new file mode 100644
index 00000000..f85b711e
--- /dev/null
+++ b/.github/workflows/joss-paper.yml
@@ -0,0 +1,23 @@
+on: [push]
+
+jobs:
+  paper:
+    runs-on: ubuntu-latest
+    name: Paper Draft
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v3
+      - name: Build draft PDF
+        uses: openjournals/openjournals-draft-action@master
+        with:
+          journal: joss
+          # This should be the path to the paper within your repo.
+          paper-path: paper/paper.md
+      - name: Upload
+        uses: actions/upload-artifact@v1
+        with:
+          name: paper
+          # This is the output path where Pandoc will write the compiled
+          # PDF. Note, this should be the same directory as the input
+          # paper.md
+          path: paper/paper.pdf
diff --git a/.gitlab/.ci-docs.yaml b/.gitlab/.ci-docs.yaml
index 49bf5334..7b2ff477 100644
--- a/.gitlab/.ci-docs.yaml
+++ b/.gitlab/.ci-docs.yaml
@@ -13,6 +13,6 @@ pages:
   variables:
     GIT_DEPTH: 0
     GIT_STRATEGY: clone
-  #rules:
-  #  - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_PROJECT_PATH=="atlas-flavor-tagging-tools/algorithms/salt"
-  #    when: always
+  rules:
+    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_PROJECT_PATH=="atlas-flavor-tagging-tools/algorithms/salt"
+      when: always
diff --git a/paper/paper.bib b/paper/paper.bib
new file mode 100644
index 00000000..2731117f
--- /dev/null
+++ b/paper/paper.bib
@@ -0,0 +1,470 @@
+% LHC
+@article{Evans:2008,
+  author       = {Evans, Lyndon and Bryant, Philip},
+  title        = {{LHC Machine}},
+  journal      = {JINST},
+  volume       = {3},
+  pages        = {S08001},
+  doi          = {10.1088/1748-0221/3/08/S08001},
+  year         = {2008},
+  slaccitation = {%%CITATION = JINST,3,S08001;%%}
+}
+
+% ATLAS
+@article{ATLAS:2008,
+  author       = {{ATLAS Collaboration}},
+  title        = {{The ATLAS Experiment at the CERN Large Hadron Collider}},
+  journal      = {JINST},
+  volume       = {3},
+  year         = {2008},
+  pages        = {S08003},
+  doi          = {10.1088/1748-0221/3/08/S08003},
+  primaryclass = {hep-ex}
+}
+
+% ATLAS software
+@booklet{ATLAS:2021,
+  author       = {{ATLAS Collaboration}},
+  title        = {{The ATLAS Collaboration Software and Firmware}},
+  howpublished = {{ATL-SOFT-PUB-2021-001}},
+  url          = {https://cds.cern.ch/record/2767187},
+  year         = {2021}
+}
+
+% Python
+@book{Rossum:2009,
+  author    = {Van Rossum, Guido and Drake, Fred L.},
+  title     = {Python 3 Reference Manual},
+  year      = {2009},
+  isbn      = {1441412697},
+  publisher = {CreateSpace},
+  address   = {Scotts Valley, CA}
+}
+
+% PEP8
+@techreport{PEP8:2001,
+  author = {Guido van Rossum and Barry Warsaw and Nick Coghlan},
+  title  = {Style Guide for {Python} Code},
+  year   = {2001},
+  type   = {PEP},
+  number = {8},
+  url    = {https://www.python.org/dev/peps/pep-0008/}
+}
+
+% YAML
+@misc{YAML:2021,
+  title        = {{YAML} Ain’t Markup Language (YAML™) version 1.2},
+  howpublished = {\url{https://yaml.org/spec/1.2.2/}},
+  year         = 2001,
+  note         = {Accessed: 2023-05-11}
+}
+
+% Setuptools
+@misc{setuptools:2023,
+  title        = {{Setuptools}},
+  howpublished = {\url{https://github.com/pypa/setuptools}},
+  year         = 2013,
+  note         = {Accessed: 2023-05-11}
+}
+
+
+% Flake8
+@misc{flake8:2023,
+  title        = {{Flake8}},
+  howpublished = {\url{https://github.com/PyCQA/flake8}},
+  year         = 2010,
+  note         = {Accessed: 2023-05-11}
+}
+
+% Black
+@misc{black:2023,
+  title        = {{Black}},
+  howpublished = {\url{https://github.com/psf/black}},
+  year         = 2018,
+  note         = {Accessed: 2023-05-11}
+}
+
+% Pytest
+@misc{pytest:2004,
+  title  = {pytest 7.3},
+  author = {Krekel, Holger and Oliveira, Bruno and Pfannschmidt, Ronny and Bruynooghe, Floris and Laugher, Brianna and Bruhin, Florian},
+  year   = {2004},
+  url    = {https://github.com/pytest-dev/pytest}
+}
+
+% mkdocs
+@misc{mkdocs:2023,
+  title        = {{MkDocs}},
+  howpublished = {\url{https://github.com/mkdocs/mkdocs}},
+  year         = 2014,
+  note         = {Accessed: 2023-05-11}
+}
+
+% Pytest
+@misc{sphinx:2023,
+  title  = {Sphinx},
+  author = {Brandl, Georg},
+  year   = {2008},
+  url    = {https://www.sphinx-doc.org}
+}
+
+
+% Docker
+@article{Merkel:2014,
+  title   = {Docker: lightweight linux containers for consistent development and deployment},
+  author  = {Merkel, Dirk},
+  journal = {Linux journal},
+  volume  = {2014},
+  number  = {239},
+  pages   = {2},
+  year    = {2014}
+}
+
+% TensorFlow
+@misc{tensorflow:2015,
+  title  = { {TensorFlow}: Large-Scale Machine Learning on Heterogeneous Systems},
+  url    = {https://www.tensorflow.org/},
+  note   = {Software available from tensorflow.org},
+  author = {
+            Mart\'{i}n~Abadi and
+            Ashish~Agarwal and
+            Paul~Barham and
+            Eugene~Brevdo and
+            Zhifeng~Chen and
+            Craig~Citro and
+            Greg~S.~Corrado and
+            Andy~Davis and
+            Jeffrey~Dean and
+            Matthieu~Devin and
+            Sanjay~Ghemawat and
+            Ian~Goodfellow and
+            Andrew~Harp and
+            Geoffrey~Irving and
+            Michael~Isard and
+            Yangqing Jia and
+            Rafal~Jozefowicz and
+            Lukasz~Kaiser and
+            Manjunath~Kudlur and
+            Josh~Levenberg and
+            Dandelion~Man\'{e} and
+            Rajat~Monga and
+            Sherry~Moore and
+            Derek~Murray and
+            Chris~Olah and
+            Mike~Schuster and
+            Jonathon~Shlens and
+            Benoit~Steiner and
+            Ilya~Sutskever and
+            Kunal~Talwar and
+            Paul~Tucker and
+            Vincent~Vanhoucke and
+            Vijay~Vasudevan and
+            Fernanda~Vi\'{e}gas and
+            Oriol~Vinyals and
+            Pete~Warden and
+            Martin~Wattenberg and
+            Martin~Wicke and
+            Yuan~Yu and
+            Xiaoqiang~Zheng},
+  year   = {2015}
+}
+
+% Keras
+@misc{chollet:2015,
+  title        = {Keras},
+  author       = {Chollet, Fran\c{c}ois},
+  year         = {2015},
+  howpublished = {\url{https://keras.io}}
+}
+
+% LWTNN
+@misc{Guest:2022,
+  doi       = {10.5281/ZENODO.6467676},
+  url       = {https://zenodo.org/record/6467676},
+  author    = {Guest,  Daniel Hay and Smith,  Joshua Wyatt and Paganini,  Michela and Kagan,  Michael and Lanfermann,  Marie and Krasznahorkay,  Attila and Marley,  Daniel Edison and Ghosh,  Aishik and Huth,  Benjamin and Feickert,  Matthew},
+  title     = {lwtnn/lwtnn: Version 2.13},
+  publisher = {Zenodo},
+  year      = {2022},
+  copyright = {MIT License}
+}
+
+% HDF5
+@online{hdf5:2023,
+  author = {{The HDF Group}},
+  title  = {{Hierarchical Data Format, version 5}},
+  year   = {1997},
+  note   = {https://www.hdfgroup.org/HDF5/}
+}
+
+% Puma
+@misc{Birk:2023,
+  doi       = {10.5281/ZENODO.7806395},
+  url       = {https://zenodo.org/record/7806395},
+  author    = {Birk,  Joschka and Froch,  Alexander and VS,  Sam and Guth,  Manuel and Gadow,  Philipp and Schr\"oer, Tomke and Kobylianskii,  Dmitrii and Rettie,  Sébastien and Strebler,  Thomas},
+  title     = {umami-hep/puma: v0.2.4},
+  publisher = {Zenodo},
+  year      = {2023},
+  copyright = {Open Access}
+}
+
+% Matplotlib
+@article{Hunter:2007,
+  author    = {Hunter, J. D.},
+  title     = {Matplotlib: A 2D graphics environment},
+  journal   = {Computing in Science \& Engineering},
+  volume    = {9},
+  number    = {3},
+  pages     = {90--95},
+  abstract  = {Matplotlib is a 2D graphics package used for Python for
+               application development, interactive scripting, and publication-quality
+               image generation across user interfaces and operating systems.},
+  publisher = {IEEE COMPUTER SOC},
+  doi       = {10.1109/MCSE.2007.55},
+  year      = 2007
+}
+
+% Machine learning in HEP
+@article{Guest:2018,
+  doi       = {10.1146/annurev-nucl-101917-021019},
+  url       = {https://doi.org/10.1146/annurev-nucl-101917-021019},
+  year      = {2018},
+  month     = oct,
+  publisher = {Annual Reviews},
+  volume    = {68},
+  number    = {1},
+  pages     = {161--181},
+  author    = {Dan Guest and Kyle Cranmer and Daniel Whiteson},
+  title     = {Deep Learning and Its Application to {LHC} Physics},
+  journal   = {Annual Review of Nuclear and Particle Science}
+}
+
+% Machine learning in CMS
+@article{Cagnotta:2022,
+  doi       = {10.3390/app122010574},
+  url       = {https://doi.org/10.3390/app122010574},
+  year      = {2022},
+  month     = oct,
+  publisher = {{MDPI} {AG}},
+  volume    = {12},
+  number    = {20},
+  pages     = {10574},
+  author    = {Antimo Cagnotta and Francesco Carnevali and Agostino De Iorio},
+  title     = {Machine Learning Applications for Jet Tagging in the {CMS} Experiment},
+  journal   = {Applied Sciences}
+}
+
+% DeepJet
+@article{Bols:2020,
+  doi       = {10.1088/1748-0221/15/12/p12012},
+  url       = {https://doi.org/10.1088/1748-0221/15/12/p12012},
+  year      = {2020},
+  month     = dec,
+  publisher = {{IOP} Publishing},
+  volume    = {15},
+  number    = {12},
+  pages     = {P12012--P12012},
+  author    = {E. Bols and J. Kieseler and M. Verzetti and M. Stoye and A. Stakia},
+  title     = {Jet flavour classification using {DeepJet}},
+  journal   = {Journal of Instrumentation}
+}
+
+% ATLAS Flavour Tagging paper
+@article{ATLAS:2019,
+  author        = {{ATLAS Collaboration}},
+  title         = {{ATLAS flavour-tagging algorithms for the LHC Run~2 \(pp\) collision dataset}},
+  year          = {2022},
+  reportnumber  = {CERN-EP-2022-226},
+  eprint        = {2211.16345},
+  archiveprefix = {arXiv},
+  primaryclass  = {physics.data-an}
+}
+
+
+% ParticleNet
+@article{Qu:2020,
+  doi       = {10.1103/physrevd.101.056019},
+  url       = {https://doi.org/10.1103/physrevd.101.056019},
+  year      = {2020},
+  month     = mar,
+  publisher = {American Physical Society ({APS})},
+  volume    = {101},
+  number    = {5},
+  author    = {Huilin Qu and Loukas Gouskos},
+  title     = {Jet tagging via particle clouds},
+  journal   = {Physical Review D}
+}
+
+% ParT
+@inproceedings{Qu:2022,
+  title     = {Particle Transformer for Jet Tagging},
+  author    = {Qu, Huilin and Li, Congqiao and Qian, Sitian},
+  booktitle = {Proceedings of the 39th International Conference on Machine Learning},
+  pages     = {18281--18292},
+  year      = {2022},
+  editor    = {Chaudhuri, Kamalika and Jegelka, Stefanie and Song, Le and Szepesvari, Csaba and Niu, Gang and Sabato, Sivan},
+  volume    = {162},
+  series    = {Proceedings of Machine Learning Research},
+  month     = {17--23 Jul},
+  publisher = {PMLR},
+  pdf       = {https://proceedings.mlr.press/v162/qu22b/qu22b.pdf},
+  url       = {https://proceedings.mlr.press/v162/qu22b.html},
+  abstract  = {Jet tagging is a critical yet challenging classification task in particle physics. While deep learning has transformed jet tagging and significantly improved performance, the lack of a large-scale public dataset impedes further enhancement. In this work, we present JetClass, a new comprehensive dataset for jet tagging. The JetClass dataset consists of 100 M jets, about two orders of magnitude larger than existing public datasets. A total of 10 types of jets are simulated, including several types unexplored for tagging so far. Based on the large dataset, we propose a new Transformer-based architecture for jet tagging, called Particle Transformer (ParT). By incorporating pairwise particle interactions in the attention mechanism, ParT achieves higher tagging performance than a plain Transformer and surpasses the previous state-of-the-art, ParticleNet, by a large margin. The pre-trained ParT models, once fine-tuned, also substantially enhance the performance on two widely adopted jet tagging benchmarks. The dataset, code and models are publicly available at https://github.com/jet-universe/particle_transformer.}
+}
+
+% ADAM
+@inproceedings{Kingma:2015,
+  author    = {Diederik P. Kingma and
+               Jimmy Ba},
+  editor    = {Yoshua Bengio and
+               Yann LeCun},
+  title     = {Adam: {A} Method for Stochastic Optimization},
+  booktitle = {3rd International Conference on Learning Representations, {ICLR} 2015,
+               San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings},
+  year      = {2015},
+  url       = {http://arxiv.org/abs/1412.6980},
+  timestamp = {Thu, 25 Jul 2019 14:25:37 +0200},
+  biburl    = {https://dblp.org/rec/journals/corr/KingmaB14.bib},
+  bibsource = {dblp computer science bibliography, https://dblp.org}
+}
+
+
+% JetClass
+@misc{JetClass:2022,
+  doi       = {10.5281/ZENODO.6619768},
+  url       = {https://zenodo.org/record/6619768},
+  author    = {Qu,  Huilin and Li,  Congqiao and Qian,  Sitian},
+  keywords  = {Particle physics,  Jet,  Jet tagging,  Machine learning},
+  title     = {JetClass: A Large-Scale Dataset for Deep Learning in Jet Physics},
+  publisher = {Zenodo},
+  year      = {2022},
+  copyright = {Creative Commons Attribution 4.0 International}
+}
+
+% Deep Learning
+@article{LeCun:2015,
+  doi       = {10.1038/nature14539},
+  url       = {https://doi.org/10.1038/nature14539},
+  year      = {2015},
+  month     = may,
+  publisher = {Springer Science and Business Media {LLC}},
+  volume    = {521},
+  number    = {7553},
+  pages     = {436--444},
+  author    = {Yann LeCun and Yoshua Bengio and Geoffrey Hinton},
+  title     = {Deep learning},
+  journal   = {Nature}
+}
+
+% Deep Sets
+@inproceedings{Zaheer:2017,
+  author    = {Zaheer, Manzil and Kottur, Satwik and Ravanbhakhsh, Siamak and P\'{o}czos, Barnab\'{a}s and Salakhutdinov, Ruslan and Smola, Alexander J},
+  title     = {Deep Sets},
+  year      = {2017},
+  isbn      = {9781510860964},
+  publisher = {Curran Associates Inc.},
+  address   = {Red Hook, NY, USA},
+  booktitle = {Proceedings of the 31st International Conference on Neural Information Processing Systems},
+  pages     = {3394–3404},
+  numpages  = {11},
+  location  = {Long Beach, California, USA},
+  series    = {NIPS'17}
+}
+
+% SHAP
+@incollection{NIPS:2017,
+  title     = {A Unified Approach to Interpreting Model Predictions},
+  author    = {Lundberg, Scott M and Lee, Su-In},
+  booktitle = {Advances in Neural Information Processing Systems 30},
+  editor    = {I. Guyon and U. V. Luxburg and S. Bengio and H. Wallach and R. Fergus and S. Vishwanathan and R. Garnett},
+  pages     = {4765--4774},
+  year      = {2017},
+  publisher = {Curran Associates, Inc.},
+  url       = {http://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions.pdf}
+}
+
+@incollection{pytorch,
+  title     = {PyTorch: An Imperative Style, High-Performance Deep Learning Library},
+  author    = {Paszke, Adam and Gross, Sam and Massa, Francisco and Lerer, Adam and Bradbury, James and Chanan, Gregory and Killeen, Trevor and Lin, Zeming and Gimelshein, Natalia and Antiga, Luca and Desmaison, Alban and Kopf, Andreas and Yang, Edward and DeVito, Zachary and Raison, Martin and Tejani, Alykhan and Chilamkurthy, Sasank and Steiner, Benoit and Fang, Lu and Bai, Junjie and Chintala, Soumith},
+  booktitle = {Advances in Neural Information Processing Systems 32},
+  pages     = {8024--8035},
+  year      = {2019},
+  publisher = {Curran Associates, Inc.},
+  url       = {http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf}
+}
+
+@software{lightning,
+  author  = {Falcon, William and {The PyTorch Lightning team}},
+  doi     = {10.5281/zenodo.3828935},
+  license = {Apache-2.0},
+  month   = mar,
+  title   = {{PyTorch Lightning}},
+  url     = {https://github.com/Lightning-AI/lightning},
+  version = {1.4},
+  year    = {2019}
+}
+
+@techreport{GN1,
+  collaboration = {ATLAS},
+  title         = {{Graph Neural Network Jet Flavour Tagging with the ATLAS
+                   Detector}},
+  institution   = {CERN},
+  reportnumber  = {ATL-PHYS-PUB-2022-027},
+  address       = {Geneva},
+  year          = {2022},
+  url           = {https://cds.cern.ch/record/2811135},
+  note          = {All figures including auxiliary figures are available at
+                   https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PUBNOTES/ATL-PHYS-PUB-2022-027}
+}
+
+@techreport{GN2X,
+  collaboration = {ATLAS},
+  title         = {{Transformer Neural Networks for Identifying Boosted Higgs
+                   Bosons decaying into $b\bar{b}$ and $c\bar{c}$ in ATLAS}},
+  institution   = {CERN},
+  reportnumber  = {ATL-PHYS-PUB-2023-021},
+  address       = {Geneva},
+  year          = {2023},
+  url           = {https://cds.cern.ch/record/2866601},
+  note          = {All figures including auxiliary figures are available at
+                   https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PUBNOTES/ATL-PHYS-PUB-2023-021}
+}
+
+@misc{onnx,
+  author       = {Bai, Junjie and Lu, Fang and Zhang, Ke and others},
+  title        = {ONNX: Open Neural Network Exchange},
+  year         = {2019},
+  publisher    = {GitHub},
+  journal      = {GitHub repository},
+  howpublished = {\url{https://github.com/onnx/onnx}},
+  commit       = {94d238d96e3fb3a7ba34f03c284b9ad3516163be}
+}
+
+
+@misc{umami,
+  author       = {Barr, Jackson and others},
+  title        = {Umami: A Python toolkit for jet flavour tagging},
+  year         = {2024},
+  publisher    = {GitHub},
+  journal      = {GitHub repository},
+  howpublished = {\url{https://github.com/umami-hep/umami-preprocessing}},
+  commit       = {640369546e65937db79f0f7bbc86ea4c3114943c}
+}
+
+
+@article{2017arXiv170603762V,
+  author        = {{Vaswani}, Ashish and {Shazeer}, Noam and {Parmar}, Niki and {Uszkoreit}, Jakob and {Jones}, Llion and {Gomez}, Aidan N. and {Kaiser}, Lukasz and {Polosukhin}, Illia},
+  title         = {{Attention Is All You Need}},
+  journal       = {arXiv e-prints},
+  keywords      = {Computer Science - Computation and Language, Computer Science - Machine Learning},
+  year          = 2017,
+  month         = jun,
+  eid           = {arXiv:1706.03762},
+  pages         = {arXiv:1706.03762},
+  doi           = {10.48550/arXiv.1706.03762},
+  archiveprefix = {arXiv},
+  eprint        = {1706.03762},
+  primaryclass  = {cs.CL},
+  adsurl        = {https://ui.adsabs.harvard.edu/abs/2017arXiv170603762V},
+  adsnote       = {Provided by the SAO/NASA Astrophysics Data System}
+}
+
diff --git a/paper/paper.md b/paper/paper.md
new file mode 100644
index 00000000..bc1a8e5b
--- /dev/null
+++ b/paper/paper.md
@@ -0,0 +1,140 @@
+---
+title: 'Salt: Multimodal Multitask Machine Learning for High Energy Physics'
+
+tags:
+  - Python
+  - high energy physics
+  - machine learning
+  - jet physics
+  - flavour tagging
+
+authors:
+  - name: Jackson Barr
+    orcid: 0000-0002-9752-9204 
+    affiliation: 1
+  - name: Diptaparna Biswas
+    orcid: 0000-0002-7543-3471
+    affiliation: 2
+  - name: Maxence Draguet
+    orcid: 0000-0003-1530-0519
+    affiliation: 3
+  - name: Philipp Gadow
+    orcid: 0000-0003-4475-6734
+    affiliation: 4
+  - name: Emil Haines
+    orcid: 0000-0002-5417-2081
+    affiliation: 1
+  - name: Osama Karkout
+    orcid: 0000-0002-4907-9499
+    affiliation: 5
+  - name: Dmitrii Kobylianskii
+    orcid: 0009-0002-0070-5900
+    affiliation: 6
+  - name: Wei Lai
+    orcid: 0009-0001-6726-9851
+    affiliation: 1
+  - name: Matthew Leigh
+    orcid: 0000-0003-1406-1413
+    affiliation: 7
+  - name: Ivan Oleksiyuk
+    orcid: 0000-0002-4784-6340
+    affiliation: 7
+  - name: Nikita Pond
+    orcid: 0000-0002-5966-0332
+    affiliation: 1
+  - name: Sébastien Rettie
+    orcid: 0000-0002-7092-3893
+    affiliation: 4
+  - name: Andrius Vaitkus
+    orcid: 0000-0002-0393-666X
+    affiliation: 1
+  - name: Samuel Van Stroud
+    orcid: 0000-0002-7969-0301
+    affiliation: 1
+  - name: Johannes Wagner
+    orcid: 0000-0002-5588-0020
+    affiliation: 9
+
+affiliations:
+ - name: University College London, United Kingdom
+   index: 1
+ - name: University of Siegen
+   index: 2
+ - name: University of Oxford, United Kingdom
+   index: 3
+ - name: European Laboratory for Particle Physics CERN, Switzerland
+   index: 4
+ - name: Nikhef
+   index: 5
+ - name: Department of Particle Physics and Astrophysics, Weizmann Institute of Science, Israel
+   index: 6
+ - name: Université de Genève, Switzerland
+   index: 7
+ - name: Technical University of Munich, Germany
+   index: 8
+ - name: University of California, Berkeley
+   index: 9
+
+date: 15 Janurary 2024
+bibliography: paper.bib
+
+---
+
+# Summary
+
+High energy physics studies the fundamental particles and forces that constitute the universe, often through experiments conducted in large particle accelerators such as the Large Hadron Collider (LHC) [@Evans:2008].
+`Salt` is a Python application developed for the high energy physics community that streamlines the training and deployment of advanced machine learning (ML) models, making them more accessible and promoting shared best practices.
+`Salt` features a generic multimodal, multitask model skeleton which, coupled with a strong emphasis on modularity, configurabiltiy, and ease of use, can be used to tackle a wide variety of high energy physics ML applications.
+
+Some key features of `Salt` are listed below:
+
+- Based on established frameworks: `Salt` is built upon PyTorch [@pytorch] and Lightning [@lightning] for maximum performance and scalability with minimal boilerplate code.
+- Multimodal, multitask models: `Salt` models support multimodal inputs and can be configured to perform various tasks such as classification, regression, segmentation, and edge classification tasks. Any combination of these can be used to flexibly define models for multitask learning problems.
+- Customisable and extensible: `Salt` supports full customisation of training and model configuration through YAML config files. Its modular design allows for the easy integration of custom dataloaders, layers, and models.
+- Train at scale: `Salt` can handle large volumes of data with efficient HDF5 [@hdf5:2023] dataloaders. It also includes multi-GPU support from Lightning, enabling distributed training.
+- Deployment ready: `Salt` facilitates ONNX [@onnx] serialization for integrating models into C++ based software environments.
+
+
+# Statement of need
+
+In high energy physics research the reliance on ML for data analysis and object classification is growing [@Guest:2018; @Cagnotta:2022].
+`Salt` meets this growing need by providing a versatile, performant, and user-friendly tool for developing advanced ML models.
+`Salt` was originally developed to train state of the art flavour tagging models at the ATLAS experiment [@ATLAS:2008] at the LHC.
+Flavour tagging, the identification of jets from bottom and charm quarks, plays a crucial role in analysing ATLAS collision data. This process is key for precision Standard Model measurements, particularly in the characterisation of the Higgs boson, and for investigating new phenomena.
+The unique characteristics of hadrons containing bottom and charm quarks – such as their long lifetimes, high mass, and high decay multiplicity – create distinct signatures in particle detectors that can be effectively exploited by ML algorithms.
+The presence of hadrons containing bottom and charm quarks can be inferred via the identification of approximately 3-5 reconstructed charged particle trajectories from the weak decay of the heavy flavour hadron admist several more tracks from the primary proton-proton interaction vertex.
+
+While initially developed for flavour tagging, `Salt` has evolved into a flexible tool that can be used for a wide range of tasks, from object and event classification, regression of object properties, to object reconstruction (via edge classification or input segmentation), demonstrating its broad applicability across various data analysis challenges in high energy physics.
+
+
+# Model Architecture
+
+Salt is designed to be fully modular, but ships with a flexible model architecture that can be configured for a variety of use cases.
+This architecture facilitates the training of multimodal and multitask models as depicted in \autoref{fig:salt-arch}, and is designed to take advantage of multiple input modalities.
+In the context of jet classification, these input modalities might include global features of the jet and varying numbers of jet constituents such as charged particle trajectories, calorimeter energy depositions, reconstructed leptons, or inner detector spacepoints.
+The architecture is described briefly below.
+First, any global input features are concatentated with the features of each constituent.
+Next, an initial embedding to a shared representation space is performed separately for each type of constituent.
+The different types of constituents are then projected into a shared representation space by a series of initialisation networks.
+The embedded constituents are then combined and fed into a encoder network that processes constituents of different modalities in a unified way.
+The encoder then outputs to a set of task-specific modules, each tailored to a specific learning objective.
+This architecture allows the model to leverage all the available detector information, leading to improved performance.
+A concrete example of this architecture is in use at ATLAS [@GN1; @GN2X].
+
+![This diagram illustrates the flow of information within a generic model trained using `Salt`. In this example, global object features are provided alongisde two types of constituents. The model is configured with three training objectives, each of which may relate to the global object or the one of the constituent modalities. Concatenation is denoted by $\oplus$.\label{fig:salt-arch}](salt-arch.png){ width=90% }
+
+
+# Related work
+
+`Umami` [@umami] is a related software package in use at ATLAS. 
+While `Salt` relies on similar preprocessing techniques as those provided by `Umami`, it provides several additional features which make it a more powerful and flexible tool for creating advanced ML models.
+Namely, `Salt` provides support for multimodal and multitask learning, optimised Transformer encoders [@2017arXiv170603762V], and distributed model training.
+
+
+# Acknowledgements
+
+The development of `Salt` is part of the offline software research and development programme of the ATLAS Collaboration, and we thank the collaboration for its support and cooperation.
+This work is funded in part by the UK's Science and Technology Facilities Council via University College London's Centre for Doctoral Training in Data Intensive Science, and the Royal Society.
+
+
+# References
diff --git a/paper/salt-arch.png b/paper/salt-arch.png
new file mode 100644
index 0000000000000000000000000000000000000000..1105b651ff860ad1b9a2392f2a6e511b63bec561
GIT binary patch
literal 30980
zcmd>li9b~9AFzA7H@A(JTPoq+He{Jp$evqKGue)P4@pgS#xldGRFrVDO_o74=~xmP
zVhoie3CXd|7?bS77~9NX4DZ3c_wDyDyl<b6PoHz1dA|GeeZJedZ)$9~ZS(%kA|fK&
zE?zhXh=_=SA|e}p|7`>Orsv`J=kR}jxLp8@L`1MhMMQ4i5fNF0U)>xP5y2c65n){x
z5z%}sA|m0NQDvqrBJ%Surk5<vudJ-_c>MZ?Px#3DaS4wrDYb3w9R)8+y#uhC`pDst
zQNhyECBV+x%&oxvR-0eaa7bpoS6t=w=s1S}$4mCaCnU!UCIvS3^^TW^-Q3%pT`KLI
z;s9%J6u=9AwsNxoZ*}9+xl5DoS68lI#~WJ+B0#~lYrPC6(bCq@4Sfl)=XrP#FXMV$
z+$OQPlbk81doGwVzsP3n13EtL`>lt}o7n0=U(m}s)bXN|`IQ8BH1WFW4I2{R4YS7d
zNw!tgI(JZsboQ(u{P}y=$h}*)f4%<DTm9mDQ&WM5$E`C~wucXsk;uK9HYHwkRhE!e
z-no;7$(Y0DGVSfp9XQ~vq{O;>dG5-UuU1yKR8`4#p^4hsLD*aTg@rQ)Cg(3%j!&?A
z15+;<n1cl`G3Hmp>V2cSakpE8yo%gn`tbG%<~lCwX19?l#!7)z=zVe$+jeg?^w&S4
zeeg=QeMKepymRn{>-Q`lR$dB7vP)}mE1SJS=D8M6n&6)3pzOYUg`A#!yIe2=L>B2=
zUUx1Ux6bT3gULFmXK8Trg=^`I{j>g^e<&&#qc6o!PuPcPT#Ma*Tz}IJ2_X5C!6i#G
z8)vjn5box^7ca?R;mgLR77wpLUrbnfX71G+{={t3ixTp{;E;9Vdxz{UYVEtL#?E24
zqWhTr`WMU|C8b8x2S-6+W*01}#FQ0Snm@Us49|%~|0;C>e%R!D!8SldMETI_-%lYV
zy&(9dXy8So^P;0aZxs{QfI6?*!G`tz;yHbb5E`3tA166N{1%$<jk~kdy(uREb4jE0
z-FCmvsSn#3sa^*Thg?WH_u#2<zK2JVcaNr`eTwX4OIuH1gQ3=a;%OXr(wJk+Y3W-T
z>=au4^lP?AwIxUV4S!bq{>205fph&$`PlIyP#&h?|FwUSshRRFzm)~dwwFgliF^>}
zG}omP)Ateh!((^P-+j`o+_QZC=e-I;`})$i$;H;=W<Sf17=}(Rv?DH?A~HkA3TNYA
zdkU;KU4HnP!f8fCu*aEivTGKK^cKnmeGcV9$^lv_;NcwR{_9z6f5vPceYEy~$Z!4C
z>a~kpPDaDuHPdN8Ozv{uMD^ax+RGIGf&N!OS$PCe<iq>hK*WqAJy}wm8K{=)xuO|$
zl5>X^)c@gLBFmCqYbxUAVhM73lzY2<0UbAigl3W|H-TLf`-oss&qMtF#>U1X!j<CV
zHh6=T#C9G%o?(O*GfNN92TS%|`Rz^V5mJqh7Q9L+j++x}wdGc^t74N0&~3J}{7seP
zO-IRQOv$l;fKF&j`GA_0ae*qm77%gEeu}@W<?S|<eklAJ0IGX7cokOXKvis}{LQvJ
zXze*HI7!^-g@QZ9%WZh#x7z(87tE|4oP6~V>vc!m0%3-(Yl)q&6ghq+{K*yE=KA}V
zX+%k(j(rQRjQuJ$wF}B%JF|`#Z=_$BJUL=8-yvfDscAQzoECUUul*!REfzSN3r%@^
zU9nzSj9M8s`kiB_x74mD2pEw#{$1hVKR>WBWi>=B9-_ZZ^VD0KHLJZW_RqpDA15#Z
zl~*LFIHVhX_@9@F-i%FTHnQJHy(90LSMA<=XF#J|=!+cpsl8Q%H^t0XINESX$K{WI
zVR+ktdSr%N$a3IQ*{x5ZXJ+VYN!WMIpbTC4_pp_#KfY9YzVa^U$@cjevp%8GKTz)|
zfO=(8a=$E-kM-a+w#EVJ2H+Dj)~1T*_-k6Gzb*MDiCdVX+vQYGA$9tHC+!)CLjKS1
z48fsnDEi4Nfp>Bte^c~((=`Ij&>C{8uIt<6$On@4t0e8;Ps;R7vJ*qb?nQ2k1x!vO
zl|)GfTS@l}K-mMNlt|#vy;XVciUXST!5wGU$oT&Se4+yjU1U`dR9QRO%lugL`rhmm
zRDTLB8LOM(w&NeMIc6A*P;u=%^)|-gq^e3wuTyAxtN}*W52r%vD+={AJiQCn(W<aD
z3z`EZ1U7$l0?w>}x5E!3z;PU%{E_FR*m*ap$|)DxyL(j+L`x;c&;;tE1507@cCpj3
zNspL^;UM2WA|m!`i1nYOARnFX{4PQ3Fn*fyS$<u=tisKTTveY&HMRVPYA8xlKQp)h
z7yE4>vhG{UK_nxv_Iulz9&I`N*^GY_+>-wqkP)D2oO)aL?Bf9HR4JxtzceQ|d}a1>
zK~*$BY3knlJk4m!|HV)f`lr<3eOpv`op8gO1!nX2i1KM=vScgG$(-psO49roU`CSm
zl;w$$4BA$?t?~&aET+u9l&m1GP|TNF)jCPMI5VFk-ZSQGPMo&_rp!t2VR*iReX%;7
zfQS62G;V<LU&ZZYYs*&{+W>l9%8)7giovQOhoh9Z(|#PKZx2a9GC}n^R$eZbdQKB)
zS_ai!`%BquYSC337|)Yj-aBoO#LF&~741`=Wz5Svy~WSX=b9tKyQkt}ebo2&e|uRc
zycbazruV3w$nd>}^}T>A=nq$ayNgd;8ExQ1q3AQ-FDR(3wA!)}1l~<=nNa^IV*G8N
z@h7I_>Op}|1HY&Ul0XQ23cDtIA26`jXJ;&ff8)o-^cUR+oU12ZCzpo>Yxfk*1{Xb)
zmmC4Ip2iJ-B|jiwWrL1&d7pG*QAdNQ!Xlr5{Jm<gs1UciE~j{JCgNEgvo~7DyOw+F
zZNfM^V#jOoe1UTvrJ|BB<ETN5Qxe_*a<{A=%t?C`=;IVi<ubPUP+#q%EZnR0JAmNU
z$K_Id1a*+N?LKq07E<4Zo~Lm!$(r5IbS^v;<0GlN1@2K>w$OsVb%p1qmO#x0uGd^`
znvX-V-OZ^esinL9eYm~i_&LHGm#H!R8N+Pog8sVTQ#7}7F)ModVF<@9YGsjHREq{=
zc&`f77jl9xt>E_Q!)qvteC}ISGOfw4PG@iUs8_Ul&KJ!;d%HCqG-xv?LY_-hQwBVX
z)cM@2zMKKb39ruL<|gkdF19b3w8q?sEb32lav%U&CVelg3Jewcrz)}3u?Xw8eZTnd
zDYLsy>%DtXlMEc%wkCTeTeNy1S)pN)?Oj;BsP&2=*QG|ZM=)x}b^fSsLy!slxwD;<
z<#U$&4Z#JJDHV4&2Nt&Y_!uDbzFe&$=S|l@`e!5R-N}2tTHSvK)zuJ-pgwuGw_oFW
zMMXV)yv7leUKneEJU7Toue?~-!8rWT(uVIJsj9tg(hMZ{=)tNB6r{*;Non3MD*j|g
zglUw^_HLUaPi^3MBOs|`Vd`PSq=G#kl45ZnMdy!WWXX}p_szJrpWh`GQ<cWp&&S(T
z#@VB-x12~XKJbzR_FwxtXN{M{A1Dc@JV~9hH`I3u%EfQ%9o^~H8ZWjV*Z5>AX}{m9
zQ(#n_0~t!p=XusSA{6oa$8ZZXu}1vFmk{AaaF0XKp-CSC6Wp!$%=012(P*y19+bTA
z)tq~rI9&w&@db|%ck}RKpReE@;|Aqa7`Bz}8o4*d(QpIn*h6)L%p>-1`?y%o2&@l$
zN3C8F7-j;l7E#~Q>aYG1VMz75;b+Qv@uX(2(mR3X`FMHBS)jq#T{pksFp2?`9dG1@
zY!V*LdWug*rvU2qYSZdDjA6e;{e39njI93roWVT*ZBVC$Icw9}y%9csV)kNSYwW8X
zZu%fYXFy$3X~};gwCjP4xtazkZd+bla!agd)9`ck?bTIZ%LUb5IlmV#;A~M0&Ejk0
zb?JTI7nLw`8?5Z(^rM`cB@N0MvzO4wSb3z<Cir-uK_4kf4z9Yur9&-|$VG+A?%$Gm
zaKFhO<)`1)?15<KT0jl}#~VeDdi%&@y5AL{2hJdr%)x;?+}+ztY9{ZlThFNzN+7cg
zzWE+bAAYZ5h+BjQN%kKvUgc9ns(BSU;y)q$Qmb<M`Z$dkqxs1Rq};QMmknU#@77&a
z0&yp;g%rm*J!#4!j=C5&?#eAouClu^GNP_KjzoirhOZ@ym=S?D>_BU=Ts`@6KF$$}
z`Nub}p7CM_qZ*mT3a6?a4BwH{Ay3tR>!l8{ij13r2$<>9gTpo5tIVK%t~}QLOHwLt
zvH&Wf=bBW!OFbtUE6mp|M!x~n>ujy}$1>0pUjhoDl4i~_iv0?USTxUW6Gp54b9pf;
z25s&7FXc>%aGK|JD%6Va`b2km<0?E2IH|;q4;(!sa7sa!diKJ;HvF_Z!3PZv82+v0
zX&&Wa8XED_xjdC9QS!-TY;NYDa$WlT;N=2ty5v&`rxer0{Ltd`(w*AIz4<RL9S4vT
zazt0`dqyu!H)7WN<*r_ty~BjXDheY^J-n@pj&huMT<WLH#Vc`76_o?UhhwUDitR-8
zg!lb6*%!w<#3vgO{e`Gv0q%Y)4N25CW~=e#$Ph!hztTVO`^sH?ux-b>n{YxGHwU**
zZgQv^?NCo!cp5a2pj8sg4x%OaJiAi`i*CRHUHpoDGW-<*tJPzpj+IrAYQW3p?P_&k
z5$9?p6J)hL_6elCTx5~#W!q9ZH>*yjy}XURV<ZVUGO%Yj#k1DpZ`dWB4FrjNC!te`
z_K5wAk;z3lGJm@06)q58d@tS)#m>we5|c#fScK=Pe)g`~BDLz8dF8tFfo><3fxmj_
z>6-;ZVJt?r;J!UI_S+>=+)<0`t$EARd@OulP$7G=^lbGV6sKGu$oSMqbLim}{JPH+
zt#OWk&2QMeLg++MQ%@&^$-+*ozNGq=OPgoT<kg^0^FG1LUub^cpWPt4HYCMjKx*tq
z>eWxNjey~7l!1J&n1Uolr+41S^B0c0GRA-B*W~0~5&LAP*$yk)L+nBRfM6r<D=Hj1
z@hdH5#qe#Lu_{mfqkfU#j=MKV*lJI-m(z$TDx~8pv(=GGQ4>IG(D)p<Pe6zIua-8@
zcTX|`#zJNv;BQM#DU7(0YLaz1{CZ#Y${^C}`Ex`^oSD<FHj0Y1!-0%5tkCKl6;~&)
z4&@hek((x2+j6(!mLybd4N{Bx<`ticwkh*+y9r|U`{|g7o%AxcbA2flY8?1B$vvmW
zD$=uU+BbZi)H2dxnpyDtQ#HG53pNMx|ESEpL@*^hBTWd!%Htw;mGv@c_l^4P`=6E8
zq*yI>fW=)j<(C)`CTPHs^(+DlsCruJye^%yDutH+C^<`QW{vdJrdat>)FVGyJ#No_
z-ST15;8j=Uh$VtL?jgnU6kP0-95R$#c7JM;V<_{URh?TK3jsL28@W9*XJ0ctPxsLy
z?!*FGMNSqEiqO+=JXDIJ_PIx5k?;J_J|{I_Ck%0E9WTE~4BVpb(A5nJM+cu2u$q&G
z%B5?(wE~I7)5@4r>;`?e;Nl&b{s+`-2`zL?HMPQ$33`wEV?BJNpmKf%%f3&(k>1wa
z5=o@1_3p~8jz#l*Ze^0hGDnr!XM7&u=xvg++(gF_lxO9Aoo*)jp7kg6sq)+CE}jXi
z!;!BzIR^+#PIoi?QUbRNHNLNq+n#rh%L*hL2PW^P(2Te_Y>BPdCu;(Q>_)b+<TsIc
z!(%TTYeT9-7xELG)IW8pA)IF7PItXvQp3Y=?YQ^E6u=p&0h@fv37L8JssV<ALIqvf
zyB>cvpz>sA^3O|>@$TZ>UtC@KgSm&)m!>_OVsR;0Qs(1_iq%Ik6Ca9puUH(bcpvrF
zt8Kyct+l6>R+46z_bbKL&LCP*J%J`X6!w9quu%s-33mwC%Gr(Fq-cQofXa5<`b_Xb
ziXz$Vw`}EC2+0$k^MV2^vZ^gt-3o$iUU{xSoHsW$9x|4uSu5FA<6m6LEvKgo`i&-A
zuIde?BBfT2iJG~MOZGee>}|k*2Sn|<i;IsrdO+v2-Ixt4xo(^A>^XW%R7uuKc0(_-
zGSh^bjVyOo8NCHG{aFna5Y?aOol9C=@;kk&$Fd?X_%@JL8)#VjrxRmN{lNf<mn&`Z
z5?9y3D^5A!m{~E6Ah$OZQ4I#ZWw&)}UECV>!n&ZNswD&05&L2Je$Cw6qlk#v_n|?#
z`)dz$JemCx`=MrKFt>=R!}qgqsiW7FQy$I-c`_D`!P&-)|2})cq?hcn#A!J8T$Ist
zCY4AYaMR15)Qs<;Z>nME@;FSv#c|obocyRp&Rd3?v^L!dB*vnRJ9ff}9Zda-@)2l}
zQD+SLcxisWPYqaL4M|!r?U(U!>YLN64xIkr?okKF9HaZC8d_%Kxb1d(0@@blf;hz~
z#TVsNOFsq2m8#ZH`K%^7r6?We1%$m8b+6<JbRIPD5IEaZ0X3mJd8myHIJ`JasB!>U
z%;4Va%p7PK&WsmQu$db=zQRkt5U^kl^e&>qFB1CzNH+wZ4B4}eSe6JA^n>9rsFIXv
zmS3u=YB}~wU*|m73}@go%4DcQ`WF~2fO5>llxH(k`kIJp&-3KSq5tK!Pe_?g7U;Kv
zYc=f~xxbUt;{VNP|15*Xckj-8XEymC!1atdT9Ld99d21&FD40@C(*S3O^u`81Ff=Z
zPewF7O-;<uv;Sq%Kcm?9G}T(a0lsfEfBm-zuqxSNNa;yX=V+Wb>E9v%%;x*#DkaF#
z^xp&?CjKjuVe(v_?DrqZE~H5XD2aCZKg5LqUjmKvI`D5|e%1o)5^PZ=sm1-r3<wP&
zBc42iKCD+8Rz;nRRZ%->do+o5>OVl)078LGW1f^FU;huA1}W1zycY2zMXpHd3&_8=
z^~n!(KTX0`G25;vBfa+jN5%!PDJK5}gJ1R|ygx}!runrlOz9tgX#f(FgRZ{J>oKN3
z{;xC<Er+ShK}KrF|6AgKW5n%dEL+Pl+^N)<A71`#%37$&)86zWjXxtDU(7`uCI8Q6
zQ<g1R?Msah(o6bZBJ1#=wJ3vcv}Q5u3bsLDLXDcm79D<|unr4KY&^hc=lsC3Cjt9L
z!z4$R{v*-d%G~dl12X4-WVvb8HdnR;ocr-5g`{kK4~xkD0pLUmHZ;2b%KsQu2cR|1
zTKc=Sf>^HdUxnqR9qY-xM57<AObw-z3oV*%`4OICN|l4zmO7<dKgjB(4a?=K?5x=j
z2F63`=nVRn%Ks=8Cy>xVdl~a1w8CU;_`K1Nc2;KxqRwVM8TucgB-3n-2$vM!2hl$K
zS1GEq0*yQYbM(M}hav?@MY|@Z0g~`JhKpCQW?x#cRLP!~<$ttCBG4+1v1R@C!>Mt}
zgRZK9KOP4KQji)Lm0<`O{#zk~%UU+_+?>?^CDs5Wd_F*Ww&qq^!H%#9=W^#0qwqm=
z$%V>}wvd8~%+YmD<|psg9=Mch#k$QdkCix-2Lzo1>sJBFabH!ci@;S+ir2tUhXXS`
zz{)JotHP_$;Uk}V3y4`|!4$ngqHS{#SWNe(hp1p2-aT-KYpSnflPlIO>vw0Czx#a5
zwYz(u8|8F0s~VNBKA8m*%WABQwC35a^J|#kA8A;slow)1H8&{U1B19{2HU&M%sf21
zydGBn&2d2^m1IaMx${7A>H(+fXEp8=Z?{2#My>jYmoWoZAH@9d;qHk4a#`#eO>))H
zv#UDO&GK9+dbp${Q&?WP;}@%>E-1jKTe<hK?_c3)oi64c(w=yr$t(m!-Mgx@NipC0
zB6*+jbITe|bEUl=X_9)G;^)>sNA2Yoo34c{=Ly(G-)$NEVe8uKt4pHA#{$j@J#CBS
zFuYUrj=IosOkR7kwug?N*}uQ<nR{EzRV6ts5373yd%qWc!yyzf=)tq>>H{gs<9!-i
zS%rMWYtN~@mYPT<X_&!pfQiDuuHRFHFc#;!Y$xSWYUTF!rW>2#3OnK(`OLbo99Va?
zKPVa@3VZN3Bt!33t4mzPZaOPBOjuXj@Al}#vdqKNecDNWpH3NUSm)Cd^su;lR3sZZ
zbQ9*<6O3)a{(L!boz%N2So~9j$WLz$tdsEtkj&)%^5$a>^d0t^4-~nJA6swz`eyt3
z>PJ8_3toM`zPecvBUY9vvezuhP8+FYc#D)NIjx}j)A6WvRlI<P9;B5BKW+HqmvuTE
zz>!q$FK43vUA}>S%iAJ;^Y``bE+i-S&Rrrq)-k;VPL6L9`A4bw&^U5;po)$N%)qMU
z$>l?zu|NNDZ~fsp)PAgZi}{AV=hp%0ldi&W;9|ilQUCLxvbM5medT{1U^ndwZ~ABH
zPm(fdOE1<C$@xeBN!T=76OqIJXg3LKt3<;${`B|Vbpo;>_p3TTeYySb6HHt7?sb;W
zfC}gqo`TrT<LgM5*C_CkBZnl}#Vw(zAMS!by;_&1KSC82<~74LMGrc$Y*%5fv-pv+
z#_z9dFdqej1da1s&S<R(>d**4P*_DMwZ0etj=&9#f^9d?tRrk8Ik~}%$**G!L+KR%
z{sR_!)_EAq-~V7CuVMKI&<)VTvXOUV+szB>fHK!8ms%G{Xv(TQ0@sal&<4Fq>ZWx9
z+(3`Vu#!dBcg3ykBChXRNXBNGzY+=jYn?@F)=rp|(K~Bm`&b0EU|)L+?ruDDY@M+R
z00Eo8&A4^StC2cIP(HhBpGe8SFQ{({Wkob|*9_JF*sAbG|6v4#PUgRZB^$VYDh4gq
z2;8<t;B>Bo6~S_q;I-#12~DhbFr2D2CFsxCICieusP!887o-cetb&bOCGpor7fh;E
zM$WI<3abR}IULpD9=9rmHI3bV?Ckhyx2l`q$i_2jYwPcqSSMOx$k^;v3iUr1E3ewS
z!q%H`j{&yK=;@BT@b0-a5^wKyc3hR3$@<b*hr1(qgpN2a0*8Wd&h*DKYl&5gC09Av
z;u?aHUzJSXnjp5<t<m^#?PUC_La9np@C44@JhC<adE@r=Uz8QZ)ElAcKE8)MtB>ZV
zX4+cp{os22*-gUQx!mF!h$%VsNDl7h*(o;1UVJeUD*xCQ{U^U`&3&vcr<SQa8a>8o
zKAK@C9l7boGgOO-KW{<8>iEkoh-&VV4ykjd=DqehasC)THE`qsUM1Os$R8%u@?vNA
z_NwEj$kxgEgTlUym6*2+xoobrXP);~htuIE#7_%DYj$+|v4f+cCjj>aBW9%NBuP<b
zm@1z?{Ik<mes_l2Kr9Ze#7s_I(}c?FDx2=7L4E1Y!dvVQvG+OE8GKwo-<~9F-Fn9_
ztn2E!4!*HmT@tZyEKh?wfAh-)x#7EC&A^xCYUl4Ha;Qqvd`-T?Rub^ltiZA_7pNK3
z=+X%K>E?X+1}f}r3ikeG9aQfcC@1QoBRy177GbZ^*pif>>9*S6!>Hbv<Cj?==TW2t
z^@np}#FBI1uPRn($#m!lSC#H3so;Easn0=?wAc9RS9?}ckF#lfyL{!wtW!yL+ucaz
zaH%Kl`-Ea*&7;{TjbZZjo9qly<3@lk)A$*g`)&_lmFGihY9P^iCrORfv7_&$pXOyP
zxM7$vW?koB0mRR2zprv0#%Q^Uu^qV@R)%`rLA?A6G}7`|jOUD*6VEy>F&9Sf!l0f*
zR%#$Yj_gt#J0*FPq}Ch)GRlr=?VUDXXxK&C^WGFJ`aHkhe)~+oqKnKyYp>d{FLChJ
zXRBM+f)P70(&VY<g;xX}k+A#Tev(vPdeI1sh0_p!IWDAF*JvOLgSr6bB_}_+uomMe
z#RGZb$K1Z|dIa0E0g_tS=~?|Z93dXEVa<H~nlVxgQxuw{i7`!$mGU2rg|X#8DQnnr
ztynW*u0Hvz9Zv@&oY8m&nru7Z1Ebvq`|0xvh{H%P+O_1W>TlH6tXUzP2q<O&6)>*T
zsiwJ(EJ0iaH;>aB;k)AEJ6l(?XYNb*19sMjb0ERfs0RfaNrFJJmc0kxDMDf5)0QOK
z%P=HE7sfGCFBt%DPa80sZ=}aFOm6Un>4NdeaK3zb#C{la&tmRlAYBbfa)LvK`eH*^
zWKAGc6!|H|t2W`wtvf)iV6ZVUnC^}74x7ntI5|MV7hB~*oohTDvS5up&`f`QP?V&m
zlm}F7J*HLfdjdS1w0PgVQP!*CXgNVET5mWU$&TAz!ZlPOsd?RjWj0BDG`bnyQ4Gk6
zCdbYkX=~`cM$jy02;YP+2{@sm&l%~=v2pvoi9l;ldihP5JWZ0a0!FiEH`2?_n-nL0
ztjr%Tdtx6Os8Afd9#Qwv^+PTcG`xcg2gn9_PyovlWew}3GFemi`NRGz{`Zx*!k>`B
zw{Mv5LXxmO-z|ON)m(E{0=9&tkVJd6ge($GZl>Y?d~&jcg!i}!@TaHtxS6qzq+7p*
z)1_Er_)OhRc>!lCGYXhf&nJH0h2L_t9EzL}W=;VJTE;&lTCrl+e&nx+(b#LDb_`L`
zcxp2#DDTMh#%P_8!OT|fg;vbY&tcznzok<6T90R*x(_D?>xNClso?n3l)7$lj-LNy
zNS9QCni!mXpc#Ez?!&yFAUXN8o`A*G0GJe=$hcHvCdUn5Sr+XNwIPct#aBAVT-QO3
z_wNd4MolB8{;1}TMf$->m@D_#X_&W1^4y)I7UxV-=CMjrdGO%YeHY}p1LgMiD4|o~
z2hLHM_9BR%^?~y+Rzq++Xf{$sj$6to__mgny>dY5G~Z!<zbKCp{5gCD%%TqL;rAq7
z-mt)a@75POGk71z7rea>l>kwaEhbs>_JghObiTze(X+H?G8e85qP#Gtd)CwQCNiWZ
z>v~2l7huuaY3=-86KUNzFUbZ2V!cETO8AAIgYRmZ&CQ!GV&!3d=C$5ltzbt)xgZ^#
z%9d8`2A%<cYYs!?9ic@=FCNKb*h1gV5|65U9h6WGZzJ&XiR@)Q$8WDabb-9KPbcqK
z(2Cr%wLEPBK2)zxo#b1uxOih4-@TU|pAE?M!hqRsaj+5a-)TvYGMH2(+Y5;JdkMK5
zG*|Xsu3R+NiG>+A=K-rZ*DD8@5J$NqFTRvmrO8h%J?gmo{^>w2+N&lyLa~yB*Ry5u
zbG7<9(?3&@!Y6&vaVdjJqr|}rQ&BLK^JJUe%_(s818(`J!ddFoHiQw$!e4i0xm6HY
z`S>RUs;^7XXc%$wlkVb0v!WDxR*DzP@H&6)7&r0UcQ<~>hDJAu=@4xiDYFcFj2yKN
zNST|U&Ioo7`as7~25Y&6i004*hxDcuQ`+rW%rY-ePq6S&P|p+s;F7t{2@)Q`&U7N-
zY1rki#LE|p+^B}Q34X%Og}bZSt)`EC2b%klr~c?s4}eInmNty&LyT2q&g6GZ4Q1HF
zIjUjpODWs4c%Io$qB4fhAkI@pdjYf@YNOJx%r#V!q#tr*&7^U|nQ&!gnxu?aSRT(n
zhFcVm(A(~neeDVmZXSNDNbVYYuRBS4Z*LNMwYjG^vnk*?R1HLUXuaGhbX?X_h2ety
zEAp6hL!WY!7~bV1l+F!YZv|X4I7)%W@3);Pa@<!1Md3u>vZ^S__QIoO(9m6;(J-mA
zA~STPf|4>|Ges`h%x#TWPV6;FNCsHtTdZolvLXIS0yb$xE`5i}4tSo!$RG4rx}+D}
z!tMHiqL%G&cB$Z}9Ze6mDHfHfM>-bEq$w4Q{kmU)3!Ddmac3Ti?W1$DWgumiRv4rP
zHhlXH^SRhROVa=e(uak3llT_pY={%<4R$4WEYzHZFA2D)6~xcX#7!svaS*Xr852;9
zNoJ_o>Ug;6%zwNd#8^4>?YY&Bz1s?=e@#2a%>a^lZu#;g9=<6?W5}ad4P?Z!GU6>z
z@)-N0GGhTH_viv5R{nE?5=ojd038$qR_j#nQ>v<;L$eZr=e80Szc91CCww}@$7Lz_
zf@b#5KAkzh4XtN&=&)N?k#G%7?YX&H5&>XhGnAM}UCMaUB94AtbwXwaFPQ=l&0vou
zrbrqkEizsq#!?opH;s5P|LWf;&n?XF*41x13mPTbp=BYyQ?%iGPcYr-xq42?6+@?j
zideQtab7Z(8E6xW_;LYc)Rq9jZ%FW68?KZ!8{e5+>S)!ztjl3AcOr#y?(JP#m!vM)
zod^M$CiG(I@W$>D4y$&H;{I&t<oS$G1h~j%ga${FkauXlAoVz>!lHO6#l@l>$v7@$
zHL&HB8Co40<Lr?_GKTh@wFPR+9jylN;0*Dh<zFB*^1Vr7MfkTq-shE&DL~I#W1qsp
z5jS^y#5okz$*ifeM)yQ6^z5P%<~;GD!TW1qMX&1g;n4KpUc0Q9mdt#$paR*&dsi8H
zS>vNmY2C%0yIPC1;ydGY^1G-mS1>*nw!Rs(g?P$tXa2@ps^ilomm6C%OyGeGv}#b(
zliH`rL{^_SX8d@S8RGbB_l5=Kdm1$FY{)oBi~V`hf|UX2<m#5fT<CrZbtt|A2xQH5
zqM0+tgTMj^sFetZiXd@2i7>s#cBj-zdxwmaHE)xG!~uG>9A+ylm22Qg2X#wh^j;-0
zg(JuwD$k|y`i%EB39n`Jc)vF6WxHZXJ->N(;aaTA-vv;`c-7ffTsd;`wTa+ERn0wH
zbqnh&?u}2kiq+li5X-6ExW6wU2NAH7u~+GfBQhj%Sw%64whfcF!5u|pVp9T6ldpIy
zqKF*Uu^gzcW;CxPr-to_7#IFgz~{#K#=k%p0~M3Y_^ySCy3u})VN;lP8fmh8a(66z
zPG7-+Vi-qJ+<{dG^}Au+EV{mKbJnq9J<~0_p>+YHvwYbHq@t)_OCZ^zEip(c$%pBK
z;JO@Iey`8}F7C{-?Fe1*BmgEuQ%BE&0@)mB`K}^YZ4zg)Yp9x;(54IfXbZy;bb~Ic
zcUs@;6pC8tudDV|6zP@L4b&#PfqZi{(2cchFuUAXlT}-#Ob)1f>)nxc+`W}jS;^K3
z;ap+Hzb@0EbWCM-`Hlh{^<F^*k<Oze(da3`GJ{!X%kxlFGCR(qc#-)?VEC8ot>z68
z={5Vcv(X?FzzA{+Rv=S!E{8NoktVYq0=4YC1z|&cA7y2<gy|(U`6rSsa}z)8{PXNt
zet|Yu{i%>K9?!(h57+TClvt0njj4+6Yei64P(=fcpHr&Z!pqvJTBj=)eeF4pyo#qn
zwnMln{YJscWuMmhdwiFdI#S9-eLDw@NxJ#xooP%9y@z}H6)X3J8{&%Y>a|6fg)r&2
zh(K_y+L;P~66tcax#pf6X5~tf=Ib+oxBIT^TmqS)OlMk|tjUtN^sBJ*#oeual5YX6
zCwoq$eLfEw?SgNcI>-RU2rnpeifO+6{jSPpug5LP$zBn&J;m2vEBXi)eWL+ReQlsB
zvecTz_%>W5_g;zq9oHXRvIFmPo={cYiK%LOLCUmK4gYB~lA-q}2|vpgPaxv;Qkq?m
z!r{7x>3ZD`iN$E4-_#si-M;3IRCuzT)pNrls3I;t-lu2zXcbJxR$J+Oh-jYD`~Af|
zjlzZ?L&5dJd+~8`zB1mwwm;ft#mb#${=9rL-3%OeiOzLz*fXkoaDNj%<izNC!+VNc
zfBuaPfe0>iyI*08DQo$nJh^>9wj}WA#@jejoDnKVJ@#Z0?c$8UY+0sWR{g$r1g|AM
z*Bb5eM4AlvXx4I!=*)A$Fd5h`oLg?wyW_AA$iHV_Ng=-qy?{&7)ZkI};mea>_YiM?
zZY}OBnAE&r`wc&nE)DsToLDM1DTW|X$v;<6E00+jOrrHHg>%|+gPGxTqqoKy-}Nng
zah!}S->Fx<GRiQNsX6)aU_hH8=yB(6RX5;l9IP(biA`B_4SO4`=Hq;9GH=TzPf*Jn
z(`hd@EB(v1!UmRP!Jw6rQil%G(d4McQem1l-bq~j`v9(63-RU6`=PpIY)Jzt6PrY%
zG6%aYVI^E7Wrp;ibP!5VeM(izq(AS-oFexs{hiMq{%j~5yK;?#Cl4#^zgv>o{jvjB
zNG)N~arTBq&QDeV3Voyldw9_p9$je<&J!@}CmVgcXs~i-smfWs1(5j>^3^HTbD&_g
znBc(D@OvV?0(7CR9|(SO+yxKx+>k2%U}hxGwX%2QdT^)IgI^Y()UW|1*@!3u*BwL@
zJ2q^#CWiWFd12Hsu3d3O3>=%PVKd=j4;i=|aRVeIkF_7Y=i$NOV-R0LCxk&VzFzGC
z12dK@MZDLqv!<5rq-a*EYmgwEyg2M#lk2GL=LIe+q6%EzH<FVEEk2W4qB@#va8I5a
z)>>dVr%0a7G*3Hm58%U9hlC<%(xUon`E##WIjtb(^2SitXTZ4ET#R}zBCzO7me)ZP
zmAP}^PGXwwp<{5)Xq9G9F*|ZHRyb*j_W3FH4)y)Psw7&;u5bzZo)35gFRXUdk%dV#
z%!l<3XN0LpVpsC$y;FT8-H1Lq77=LEb1sFZBhov)nNTDgC-b$Uxp8`aw%ZqKkdE;_
zT6sjBloDvtWY6cMa+Pi??sk$BCtUwS9~`ak>($PNgt^e|EAm|Qk>Uc1xFHfmNICcR
z_G;^ThiAhvS}=&%QzO6~M6rnpz3^0lFNMWpzw8V>XFCE=mQpL@7nL!tgElm$g$a_=
zrug(O>y0YsKnX18b&Dy>lJu}yJ}m;&TOLd_kyG_gcmi!f7G12AGrau}kYR4(pKzk=
ztzGc<9gkJ02#n4JD3tQz^3Rc9?O3QJ8o>t?CT82KYY~gu*iyBn^pf<t&;~4lmgN?x
z;0B84<ehix?j6Z%_NQ5c!qoGk>GQZ~z#7YmY%*mj#xx#V0U}sSXU)-rPe)jQvb^O^
z|J0^E2a(|_y5C2zuZuNVuf|!;a_?P0&S*Nh6JYvhIgH-7OSENSk)L_*1}JUko@5e_
zSM5Wx9gPAIXTbr!ApY8&SRI?|UCtmi>oA+AjUu|cokoF#=521OW~|n#^3&$vN}JFz
z!We8Tb}DE>pKlD~XC|1j==c~P&%W2gcwgCT6CK~I^tzWnz%w?%?SC<>Sd>WV9Wvcl
zQfPWIXhe-ARia)?V;+ZCDx^Y_!<^1Zeb0A~(66@!yEG@gyeE4_2Ab<t;9h$ed@^*g
zd)a_{yfWH&OK%S&TIbtS`pwD=B=zqnFKv-j8i%y3sZa{lE)1#@rRVJVU4r<e{X$mF
zu}iJTEO!JXH(Bk3)8@shVNQKbZm<>e%Msn6+;H`+CC;oE@QJqqSCzPY`_eT~xPVm4
z`5+xy_e|W1hOI1<QX^BLj_l^PDJ7p_GoOGkd!I7dcl&v#)GA&_G>#isERLqr9>ifQ
z2#d!n-xSS<_;3VWyGXyAv+B1Jx;_FdP}d-WKoNIos)ZI$V~TjTQ4L|=g+t5s@}x;W
z(?np)R}3cAPwlyf*W`i|yNb6-j$9F`sjVG;2w6h!_WimuxIn-n&6WT-t<OFc2n{n9
zD&4wHc8|wjC^iRr;ib;25KHG4QQzOZUO^xyUbSdKL&H1(C(&_jB1BvleJ=g$+gnq)
zSGCLZABoouVO1F#&%Mr}c*-ZKY@NlA)ITt$vpkJYR(9|WpmsWzwvy}ia?E6-GN!U?
z>B?Ig9e0s5!F-osmoYXI^e6&6R6FLtVrrxY?Qy*}*z<*?Y?(sYw)+`$JJ8lKWlyqQ
znbt4`!mxLlvgm~3G4o8uQRb8`@)Ym;Fni$m`}F>wm7givwubo+l-AOhuy4Ei(jcH1
zqCeYrUoAHMZK^S4di<|oIkFzUDfA1z&N;hQMY@-D)Q+H;ar?8dTaQ%G@3@%)5~>Ff
zPj_U@><$|fE4%+;&+CpE$H2<PAZ3yhkgO>F$&^(&k(hP5DWI=-Pp6hL_!5X{D}iLR
zw#>h~$Rz;q*iw3POE7IO><<DR9IY74@3)7ZT<KG|?)RdOCYudiB>O%~xG~-4{v;bx
zFAn+*L-)+-LhK2Tjk~XedCA^4ZPK<Gph{5OYC(E0K7)4u%E1MF)A(|8Bz2bp*Lbdw
zT{o|KJYLOp%Z*pkEbj%y@;8Gr(veam=^Q9^F%5)L(`1xPF!wRKq-AY(^c+VEc56w(
zN`Ay`BtxS!$?osD4w81o@X|7}v$gpc#!+xMg;v|3b%l|Ju7!wa8SvB)86GsFY?dRN
zS6puD@bgX3_PC+Y>)>@R_l;JZP?r|Jr%a-G+a6DXEK+J163idJ;Gf&)PiC<U=mgl$
z&;J~6JQw^##@+&yfd1l$x*y{P^8R#p0kx_f_bPFHbOoQUPy6U%e4Y#cjOI!#$7Y4|
zEQ@%_vwp}rV)Ga?RI>BxhopT9m?#@_wEox5I~_w0W((~198V}*Rlhk4)nHvs>}=C%
zz+WpqY!DktstKfa`YrF`|72aasa_abTf7ISKYme}OmA8==045Dn)0vZ*|loQkMtXp
z?HKv8=m-lYn5XS)81Gt;mGLy<Ze0<}_(L&!Nu=#(D(Cy*S#_f8H-K<rt6Y;6OZQ$<
zJ(9=^a(M+!zIkb)yJ=<5A}hWxcDb7~+L;vD77-YX7l;<rCu=ILO1HejJ^XERp!<ui
zD>v`T!Q#D=v=|hkvMnp_g*8nQJKIBLzmQ}ZcQKK1!WvAMz4sIA_OvPBkjpo~x^z(0
z9+VJO#L($?r-7B+vazKoW~Jm8{a_)WMTf}7bjUcTcClA0&g<%>)PA6%J`KJoZ5-ip
zeP>m?ikuBNPLq8C5xiebl!$rPhUirHvl!``?`!B|C~qB9Q-r}I^DOv#gbf7E_ItkN
zfQQTy!wSt|A6zteNYLCX!JC#@+BVd^0FlKSKs~Xn+2kiG?!zcHb-_^bI?UT;jIDE8
zx;^h$(H_#3syc;>4R&Y1_&RxGH6IxnSxNk?_p73C!{Dy?;Klxta(D>GVO+)XwM)`|
z+ICXQOT!oNbURKoa`x}fgvDUJc-`d_<{(2fM3Uybf4_MO3S|89i)^ncT0fySXJxon
z$XOO7__(}CCwYHnpXN*<H@hljLj<Z@BLSH7V{9Fkn+<jmkZK0JJ)KP){c&J%V2^dC
zC~}+E7r3cE1o)gxl_o!5j(X@LD}|K$KnsI8@)mk+AI>B#AFCFs-}{g>WXU=X>ukH_
zYe2oMQSZ0q(gC2?hK}Gi+lBoROnW|s>N`GF;1W#~Wj;8nbfd$GH855AD?CBbvU_Ft
zZ^EY;y;NncN73qZuSCE1OxDVhIBdD$rzZ4JAe|U<a+f7bG+tV#%14ks`c9a<6u&ZO
zvNA_lNn9CU;IGU^EQ!n0zie?7RHbxn=Cv{=9R$vW1X?=R-o!5<XgNi_o37Pez3=zM
zpuDpkY*G**gdOGw*SJ)UalgM?L03UpJbUzrpYyLW%ekp4?v*K3C$#j;Ncitd#?q^?
zdbE_HowK9O9NzGrXcC4&A=5Cj0_UL6Bh4)<vg4#6I`&Z2h`${u^csZ+2FvdEv3;Lc
zQK8@-fe~2bhtlaTR%mK3mCkEf)Cuepu-T2OHGDygE=np^HXzLD<cVh!xj#kd-;ZF-
zUBVgi*>GV(c1(Iu;BNRFdNGq9k<fWNI`NfeZ9=u)(Fu;QW~bh8*nwDKjX!_JU5DLB
zyeuAS0gvPfu!1EwFB0!yYxD`ESA)$%xh;l<sd`yet-wm3&S~MrZzm7Aee%~dmNMGt
zod=nq$NXDHqmya7FNL{=-#*b`7bO8`IH+$av^n^3IzF=fx%c8(Dx=jKa+P@dKsvXW
zG^F!Nc+6G__7ZKBzlZ&9=}D6AsV#b3ujh%`A-L8>L;LWTY^6zYS)wTL*NcvhpJu%e
zqIk0M<1_req&tAKzg|s0`(uz=aj~;FF92tl!`UdPD83sX@0hjd>U!AjWB42Hz!$07
zrC?lyVNz9{(cVvQNKPlMS(joq6S`E|&3P!{dHYbj0z8ovfP!0dnnlX9lL)^~p6*n+
z1n~W<6I{BIx)5X+-o?yf$IjLaC(*NbgEV5Zq*I<yt^p}kdlOR%6)8(#ABn-{9o=*8
zq_)bqXO1nOtwdhZXP)T%5Ck$d0lMuE9Ry!JK?x7L6_B00c%O*SdO!+tS~wC6wDmh}
zs$;Ek6b(2(cy=(LA)3L{(-c!~+fljq?rB9X=~aOjnXt&Jf5`kD9^?41>GfX?i+@;;
zUZ9Sa4gr)r(-mc2XjoY49U#xi@i81*cp2F^2H+#D#I46A09yU4U*G{e5=GyF+T`R|
zy<gh4rpRL-HK6e9fo<ZmE$_WGEQpF+a#L`8%yJJrr}C3E)xq+HZS5$)!dG<jlia|S
zFJ+Lkruwo9K9UlvGx7L@IVqEPkr|2ve;uo-`m9lFkj`N^F88IETCz}+`h(pg6>;k7
z>ON7jjtp&GvGOE@V^1}1A+Dw&^xJfauKM`ZCfD1dcNCW{kI1Wjhog=UC=-v6FSpa!
z12I8WYUHL6aA3r|qS=f`Pw~JnTaJ4<@7-&D^(o}o(wJ<+GK_;ipQ)K0ZA|nqkq+<n
z=hyM2bwVff4i2+LWwb+QmJ?-Vs`<x58-(I|A-usx3ZLnzz@>{z<4i*N#1zD&?zrFN
z$V2jYNmU_2^U15=S5>HId7jDG40uLt)d7FwTp~Gr_7{G>92&EQ!CTpF!x~MKYE+I$
zdrhJ;_8@iW;jFFqN%-Mj+)8s)+siVKX*s>6KdKiZLoVw<xMNv05)t!d*W|_Rky1G1
zgu?5rjn0&*W=>{7Oqs1?Nx5jXV6`g&*Rr=cu@ujKDov`&2tQ+s_!W)^0v0Qr@q6OQ
zlAQR&Edu{}75B4*D$)C-AP%*7*Z3>zsJXWG?zXyXxqSBE<(c5zsM=ljpUR@90liDw
zL)T9=KDTCxKiGS}?**S#>STu2?UHz(y;`5%Py0s7G>@_E?8}U|wcKr9`~)7)bKZ5b
zW*hObtoBrTe?yXyC&*aA@!syL&6Bwkyg+};(Zi=);L`Cm%$09#1AL*JsCdLEb9#bd
zXp#}1snfo&Efc`O0&YLYWyCWJE-G=Mxg4O$|HU0}b<B$UhuApxDjrqbC9WiNh$LWj
z>J;7KusuU~9D}G#(bULMJRPiQ#X`{eQMj&e`~5HVJWZiBj)?aCG{Ud`d!uBBc(o@b
zt6KP^>q>+nU-WICSC^IvmyPd^)RI0tGp<)pkC68KTh#fTaH?dMK@DD6ru*zsY<inB
zQwqfVY`)gH+;F<ELoJQF$MCGu%tzb5=BJ&C%H2mqC95Ix8`OA&$Lgq^6;9`<!Y<Ns
zvuxJ9pg=EJcm6fEGrkT-6knMi%gYMOoOyqmKb@pWt!JQ$)cu+5;o({})G+3a{2Gk|
z7luiiHuHjMEosOu<@az=8QqReK0U=w&pk&JTI{8)J2$O%UiihKrgZPl8%p_SmCE`R
zpGf0N&FJN2mO5*><T9K~h6ZTW#=;+4Pb+cD1YWNE4}UG=UkL<{C?&$<HGiiGdPMt0
zbrEcSqD*M!7~EyiUSwOXfGaLVR%QMhQxyh2|2)1sX5f?N>TDuwS;JDtLB*bRD0maD
z&4W(fprF^YniuOVYMlj9=$`Hy(E#5B^30jWaYZH@%ru$e6*-aB@HB0smcD~tw36`p
z{L2Q-H!lp$?#Pr&-^1_pGHl@Akx&{xaAxPaV8aV=M%#G!EC?;Rk%#AWp+(7dffE$X
ziKB}*@5zyO^*9+m#=+B4y&rv$I=_J=?`?&Gx57E$SAL=kU;QyNST7_+PLR$yNAkvf
z${l?~o@(54C9}xna=tulu2nuQN69Zr6WJf8Vm!UO)XLH1#O@KghM4X6wZ{O0d%(Pt
z0+AHWiSFm&A}1Lt5w@CvGDrCSQMwOxR2S}aDvdmc;_cKkWX?I4Vm44O^o(>Q6W|%X
z{iMk*V^4KVA*9uT)_GlITa;ONpfZay;p54%N^fIRB<ZoEE-K!nbaJ;cC()*S%c)CX
zUMH4aIcduxU6N@1`~cv$I)Tgx=xC6SVoD=Wfhc5O<xB;>wyzEV2b>ooukEukZrywZ
z{`3lMcIeF-H_kXREpA!y7<m_+@vU5`VyH!`BRqnBxlTsfiYZpUn@3Bj?4Mldi;0!j
z^3Qbd_mIqo3aFa&GRQz3l;?^(jT6SzmvR86_{IdCOJZ^{;XMsR^)Ytr68x2mcm8=|
zAzX*dg@m*hUJbhKxY~+b;&+O-KT%5!EsB%HiK?z_n%w>>7Tq!CM_-;fTRg)JsZ;+>
z>E3eE0wjJ3l38XHK(j~V0dtoALw!*1nxl;kOCW_3KE;8Y+UtgTq?D9(*|k6E3kn(o
zI8j=KLPodKV}`IVioR?Ra%jF9|4mHF<TvAN-Ds(d7j_zX*w;XD!Q_Yqf3=b7`;GrQ
zN&5X>I1)+IZKNfZKqhvf@%vH%%8jf>9pLQhBqd9icsR;^9594oKXLH}#p^sKLhtHL
zX;rsZtx)!PK5H1C&<?Mim+^iLm?#A@<tE{o*t#ehT&y(!A<ni^zLxGgt+ZW3ev1hX
zx#s9#LQ-38U=%$1kPC(99~CoiXuzxAKE=W2*>~LNcbYlG`E&8Tf=4<f#b(gTbZGwV
z^^<O&1gcly;ax}m8w`T`@PxWfY81ftNkWF}xq*70)p8VtG4ZA#!vZn5$Qs?ScCxSQ
zsZHKEVg~*3+9-k_G9gy{xn(w}poX0S$lPgoDp?}@o@;{+7Iz@%jZ;#vj45~uS0~!+
z7$)jLpO4N^e!H!mcrIY5<fD#+>%S=h?n-M$YM$!`S}&#68!yF9b^_`<b1khBVop!V
zn)Jl8C5n3<KlAJcD3TwDgfB%<`>^PHxLzB30n9oVJgQ(<voJrMYsin(sMdR^vtqmC
z8#wVBFLLm+?AW1d!+glR>}tamv9;M)0TgX?q&z{9ThleVO|PV6L$f?Q8X!-PVeW{P
zzri@~6#WFn;I=L*!wt!_tg2hEES@6S_@^V{xb3i4NZC3=Z+kXJX#@djLlO_c*da*Z
z9C=})qe%*~KD2Mcy78M(h!vnX!{~eNw~)YbHLlZ4sNKm>?~hh<hquUDh&!YCt&7Is
zqytO)FC$R&I6&elt4nR$f!h-_ukY_%UK7`#b-&1qQ+D~Hc!EBD<T`uIZKb{K7)F}%
zY$M!STUBn5G8wn6aQLF{7r0cijT1Un7N=p-y{$N-ayPe9BcTDzs~!zPVj6(p(3b5V
zN+3NIZPtrq2xtiM9|v%fjV6&t#|xaW_8<G3@pmG>=*3{VYJD+F*vBSy2lc}A9tb{u
z4%=zpaPHwTxCaIsDmh~`1NL#WqS$W%e8r-x^KCCA2B&T{${UG&AB%4xOE;D<rPV28
zd6vP)xK(B6iFFqqAK~tzyQsE|1{TPlYd*@73Nj}0wdNsui4Y2f=rY(HVlxx<3u94}
zvV7`W*M5?U-d35FVWDBm)mZ`d;@kRs>WE&*9y-rk3gPrrAFgZt-pD_aux;H23)&YT
zh2^_y@BXHcc<MR9)btF&?caPl#Ef@tIsGMzc*pHiP@#iS(>XBjQyH@P8T<CQDXSwx
z8F9P_LU<B7US`QBb83vttVe&_zwk8W^)FV1kpAUE{U11#<vf$DUstyIM9eErkIs8u
zeQS<3xgB+9S8O~@jjUy!edlb)>R1mWFL&^Rw?Dm0>Oi>^qyOpRwV~(xx9$qqSxfDJ
zKYLyEOdFHfM|ls_>r~tud+K9cpF({vOY4((&j|2X7CCHV;R`ULglJW_T=BNAx-8L-
z1r0zx27;HuKgNGzio2?JWsaGb!y^v}t&*pL>AX=Y`%q8@_LG(cV|=FU*VTz_7T)Xd
z#t2zdxJ`^f7+j};kIj2tF8@{){)3s4MvC@ho4}zzLId=-CS%bxIgkV0nRlJ!WM`3|
z)UNjCT{nAqnO@Dj12<KCYIc$Nk@FhR;&5JLco(Op*vDA;qV>|4C{~*`noOet#x;@q
zO4o6+WyLHi4w&>s8^7@{CShKUJL%py6gJ;-i_#}EQLmn2lF%Ma!BH71?n-4b67?1!
z<7_Lsh#O*y1Dm!Whxk7`ShF}@6&=r=2k~3K8iP?YQ&dAZIendloy`@ekDLspR}R$8
z&G!Xo&GZSQd_El?75lxteMao|Y78mBK<<TIZ9#$FmJGcwVKye7`@6<_Ui?k=82VXi
z7^$PGI^t<XNN~Kagz3RuiZfSLjO#Msw&K4IxVqQDZLIiWeU#&#mn-ROz&fWtkDj|c
zUbl6VR_TeescnFX5kiaJ6f;{=JDEswat_j@ri@QSwHR=jJuL-qO(T+4UOFL_KE8?E
z`OC!0>Z}N>2GjRk_hGY&ngvK%Qu$jUktAkUlww_H(}1MvmW}pWrZ$Bl7!LuxeS~Rd
z?IPx<GI;b(f1`}<wTKn<HETwM?@T1>@Q(+c%Evth=o+cmdibM4D}+U_4WgwPVRRS%
zbS2H6=R<5=VO;53(2U?K&>u<lA(n?^k;G3cGh5(-$Xk@xKj6f(A7#AOs#3xGhDHN2
zUQvWn<+L$CYuJOTsMXo)()YFj?dMYUx5QUg8h=Q4zgKNg#694r6;~L@pT>=4HS3ZQ
z{i6K^S9!=7RCmp-ijw0Re7`e(aLAM;A8gH^$oYDVbob}-XNL-osBYXdvbplYPSTz$
zj}u2cI?_6X@I(jvrHN7&fO|$rSq_Hp(iFw&TS)|~5d#{aqV<OyS*{NLh~)ot_wC_O
zZvWfbcG^2q=|Dua+ifI=6hf#JHHwTgA%q%+ig6f>N=Zp}!c<PxG{!jQFb=~Yhg3rH
zIL#PJBE!r$jq~uXXQuu8?(chDzw7<~{mW(6^Q`rGKI^m2_kAz-xwtgJqTstds;Q<q
zELv3}7AdX?u{256mXZBLA?XD0g6<>Dz_HN|Ka7LbzE~LY_7Cx7`=#<u&-bk7V6pz6
zpWbyg{y%@|>1-VS@3($*dbaeR^%Phx6u@}*3GDW--~&f*3yc(S7uN2;VU!*8t^UPH
zfq!yP|1#w>Dp++0g)}ya<}?>M7Ehn}7h&SKM6Oy_EA;PQ&gQBGWV?GVZuyt}4Gk=0
ze%4yK|N72`cPveR@Be=8BdaJgSycce{D;D`=}=B{LQ<Sy<|35t*xvAoRdm^d@a$jT
zaIl1GuZt8c7KG9ux1KmU`|czQflmVV_wloNYCuK;p341{2HE716MN@p&OzoaLrYE+
zKJ^6PWBpfx_P|&#5(PZ5d)O9FsZ-pak_koAGHu0I<h>G9n-Mg3z^OPz=7mS*l)Bmh
zaIv_%ALgk#Zq}bz?Exo!uiU0J&#ln<;|dXHPP+`GVZJA&S|P|pu!k*k!MSVw0EU`X
z?FIi{iEyZZ5bC`MeigPgsPFBED>m;J%1}#~I~-8Hm>qslC)*ophDB+rm1kNGIgtl0
z&Xrku2dp$Z{fONF!1gh3I*AwNG1;VEJ{oGa&GhH0ew!#PX6<8O<o2o$8=OoO1m3|4
z>ek3K)+ydIpnUVlzo+6)NB<pW{A45vJ%{$KdNBnvFyIJ<AFQ#Yy@I;ogzym?EvNAU
z^_*PNCECgCbD?ADjvp7={1B=DX5R1vM0wYGoT{n{?;PWf+?Ih581Z}DQ<ZmTBoUkD
z(ODl>zmVds%9R!+Cc}OT1mNQAk=O{;B<ZBZ{yDU%pG5db7{cZfXQ|bNl_YNN2AO&H
zW7kB5jq$=-YX*HekrYcoRe0IqJt_7xIq!S3Z3`mCQU+?j08BDIEwV8II`6H{`|x43
zczu0}>P0a?gn(@uXKA9{SPvEw*kzQ$*GkueG+kY-?p9wN?$c$gKG};#2UY(~*q!JI
z=o>L#J3YJJK7U25kfJD<SaMsU+!MIV-^0J=LIVJ@zKsgkn~QVutMngHn@z~mrsQ@Q
z(~HFvUY#_1ppQs1Ld4mEAnE|oH5PcW&yuxli2|$u7!|%gga4~XJaQx+NdT0BhItlp
zd0bH@I8iS$Q8o`$@mo85(SGdgmVae44YKLA(CDWZhu=iX&0hF=XwP|JINC|e6=z(`
zC0=qMm-&{VtG3p32?JF(S8X2|f2l48q0BR5VHjvRm4dAU=LDVrICqfUMOy?J0C(U(
zYRy7S*4Qu#Q~>gQ?RyW5+w2w@fEg2qi9Ifpigv;*Lvz(W;(j4~j*h3j0tEQ9vdPJp
zdN%X>%>-dQR7BNS`u!|Ig{Lxi1d0B5g}D8|vMstM{}b^2pLjuuNS3mL_g1oY!vd+~
zYL&46d+!Ms_aqTFH8YJ!GiXtmaHCBPIuN(3?#zD)0kr(_DF^2#{9nZ|Plks|kgWcS
z?zW3p+l)q*_of$JfBiEPZ-LFwl@mpFt$kK9U4ysM9`9GC1cJ^#)mGZe=%#KONs=~E
z_w>-nr-Ieom5L#!^g`{qHG&{a=wQ{2D{9<5Q@ufd@7Ey-p_2*(CIi&b1LR&&gz9M*
zfXdIPl|i6oh$dnn-#$UUfbOM%h?4;IR*?t0#|nU+6J-IF{rGh!*zC;nsCp$W&F9dq
zp)%M(5-*@=OVp)fz|Z~yn@^B85Fmi{7f<*mup$a6XF`6o2KcN=V1O7=dmMv^1Jr=s
zq4{tLdlLr}98A)z4wXid5*-bJW$6OTV(w#`gBWj{4SqL1tt9yJ3xH!_bo2LSLG`O6
z@)#>=NYnyssvx5+!<`0jT#!zkL^kN$RQpdtYWL>KnhjX<1`#QtF9rmr)aB{{LYK!-
z$%Dt5!V<!TCA5yk?Cs2~x@PhABw&ev_H(*bVA5?}B>IEX9P~2A>gNlof)_S7BhvKI
z`qv5EI;W9D$1R&tLjDC;P|WROo4nR)-0+OJOi!%3?rIFm)&%fJ0l((&D#7bJ#+ug~
z=NyRLapZ(B^r{7(eVYDM#Z9I;2MC!R6k5T7t$+zQa6daFc<M%wFlj}YWGqaofit+F
zElgS?Ogbe<+WK0M1R68^S(}AP$Aw9Sf~4)jq&<S9)54HlF9_LVQiAY8zJn`PBthsh
zcQ|a;{C!NAMX?|Yz()X$<kj;|V#7U05qIH(w*(Jv5*ojiLd_+wWfbU(0KqGGAUY+8
zHb9tyduU}}3^{8DL;a|TyD;^RAoUnQ(0<kn0`Z24u*5Rzft&=kodQfCf}6*nEgl05
z+Vs=#3vkc^LYE-hs~88**)1Y#gyy}2Yexjok>S_=fNNsSrsskZ?q6{J03x&Yr>#`@
zqeKA54|MQIFe0>ouRkjEQlY?06GAH+;V`_ld``XPqucpIW@C-Z&whTo9X{2|NO`*d
z=<T}xlvoXe;w+6<pHXXSfDj7os4t!S;(IMJy%D7Yy(R;LAriZl+Nee!djn+p4CXSf
zs;=5v37u4+_SV~Iav;SOw;?L5;`!WgUrWYyP5!c+ip&SOf}CI8;)Gq>sRFf3ZzG57
zBTrZ8!L1%j0}C#jxh<hipu2G@J*2bUB~)z{FC4LuGc5Oz*~dI9R_Css);|0B6)@v(
zGy4Yg>ndTE;b5H^%eu9AMrgsE0yPtFqo;<%YQ8fgPENYji+4HxK*~O7_`;EtxgAMD
zuYApkZSS>6?#vk|lP{l~L{sc1V&5o@B=K&+tQP61>KS>VGnZNJS7*13&=yikrva_y
z5M8dbnWYQHbrtUbLs?;K*jhuCN#$QUPV)tA^*dv4gu~}k7&<(Yn2ruEJit3*seymG
zT?52i%pD`&mKGcm=~^aVRyf6b)X#5e^)K$%pe==!`eaQj)J9i>{uf{(`^FWw@ixG;
zmw~pWPnXg^AP%N~%nFMpt)v-t=0O3auE#C|x=P`-*YA496$Od@@e?KEW&-AlE=j^7
z`cU^i$j5}_a04*%HpvmSwU*vfk)<8lUM_N9RpkKRL-$)jG^LfO5Vd3Ktt1#2O~~h3
za78EK?(x${JaziHW`4BFCXn!fEEVl#B%DSMn=wPr_x^YoFXI8SRUv90UJ~rn8aKay
z(!>CvqW9)EF#C4*UcBkKS5C0N-Jp6G87yP%HJfdqaT_F&pbnFKvE%Rt#HT4G%;NK+
zM>Y2uAz-k!fA-nGwIQQjIPb?JNgH4W%2`m58Q?xkREYy-fXQp<JCUf{<_d)Ut}A2c
z&x_g(?nPDsxA~rX%=NY2Xwdf<5EoEHwKhdCy&<7L4q35gk9(e-R|VR34fM6>u4vce
zU`9%?VNKndGl^`S>lQLrv;n!I{{3y(4i(<=#K?I7zc&HANgo6=jGy;f1u)v{ukXi<
zJueC`$h-FjlsNAEXVA4DH~$1w2h07uMP&@P(H<s$geK|fX89(n4`>gAo?{k4J3Mn*
z3V8gtBrNr1Yqb<tNI<bjk}=no5<J1qEihiu1#@Y+7fAJ-F#kWPVE3&V>`)b+Z-coZ
zdqMCtA`3Q##Q-e-|HVkz0EjQQ2d=|`lZ-{Vy&JUvCIE7QE~fM+!^HU^sF@FA=s#dD
zCDR>T@6zYMzNcIo5)Z|yI)j@1ZpR{b?uJbA0WTU1rQjiF*Sk9`fDzy!J4jskkUi__
z`Cc#`+e`(+tGz0vxLR`B6p)3`frvavPWX%i>&jot$uP&R4C*jGw+esLQ)EGz_fup_
zvNxH3+*3kv3(QR_gE)(N^K^tDT^&r=zB{6V&-cbu_520M#SPcN=5ch7C<scJ5d}_2
zvhRrx`~lc~n;1kF*}+%B#YwV1nS_`wbv-3m15`qsGo4W!!EzdPITTPY4j;1HpR}@Z
zB(gam=TzHY)avuH*MRnB<IXQ6@CASnG4anTGahNtre1adVMcDOs6!dhKFxNTcKB`v
za8}1nV?+ga;S$>zsIE;dzr;ViU#x0SmJ0zuobT1<DeRhpXGJ<+mK7tXJ6z+6A{SC>
z_>^gn10$<kxRtuH-poY`B^K+!5<I`WI6<`yU+?-Na%u5_&V~=}oV$6=t*&d07Dm-^
zhGID0BP6kJ-4Lj)UlXxkgchci@l6QVE-$6Ldv}>k!aHfF;oZSDXsAw?k8T<Lciupc
zdG0|Y)i0W`!E1;LA+W*c!pZpL;iHjBX5=wmwg;*IV*xJCG0(P*QZOuYfKoP~>o7<h
za*DT`YCIZWt)|4YVnk@Au&PGfiO?-Dsi|BI`@7y6G{<((oZ4*NJy4F7n!e=t>~)k{
zfCG2orF=xH=TZC*s#B_YA$czE(0D%wuo>lSM_947nOZDc4}$4rBDE@W`??TWV9!-D
zzx^01m0NHQPpxn=>(QpO@=#jyO13N%rrFowocbqwa#~(5g7Gq$K7j2qGygO#>!?k_
zp)2#e_`c<1a-Lu!x7iXEEIYEMe=CUd>t-~LZxY-<HDJO6&#l}b;`QcIs^0u(x^Fj%
z?OODc?jXCLqW^aeWat&B%+B-9d}(cNnxo=TdWP)>_N1pRwt;5<PKlzw8oa8a1Pt{h
zcUzFdyxlh+yueZ|Xo<@XzD=$8-i;lwpL&Q=CK?_T5Cg1OJj9|#A_T}$>cg0oM@XY$
zun&g=`yn)C%I*^g0~^^d=RpGj?1E+=mdoINyq8%-IkRXp8_(#GVW*WKxlbhl7eiK=
zvW10UnbT*G7D@Q&GnDD?U~jR+RVj(<@B`GRGT27Wrr_2+1|8t5WbBK5ke3-Vu<S+I
z*knpokldJy7HEG0(#2m016Rw~(J>y9RF;0{0+N=EbFa*O%hwwb?G$7bv^vWPw%!43
zx=d~+L8F&WK1jyabQ$hu#Es2bPMitIp&!ZWl$2F$UvM(*M0JKDrW-0eDhs6tmJPEx
z+B+5Bpn2IkS$zhpK8<otwgyux-xRhMV54duW(&9pk`}Xaj8?mzZdbQvS7bziWe)nV
z`Aa$8FZ{;wih)y@7<o}Kpa!CMhZ;a84Nsrur7pe;vX5K3Jdq>kc>+|f6>22DAJ=UG
zfc2k{ZCPQ`fYL^v4VXTbLAEivyi3YuE(p&(S!RkhpvNkNzuu*7^ja#8?{~6HYq`E>
zajg4WMq~O;d2Rm?v-{N_O%7EZbi4=-Z2IM9W+uJ@-K1-=R3~!48D2=T-auy(Qv44R
z#4pGIA^rj|AxFZzL%$%ouY8eBjVN-|XcFD1nw>*3=!v_;MHU3^#{kNXJ%Mb-g!bj5
zf!>$Oq_eNY6V?keXP2?-r*i<_NB&Eu=?yE^3fy+VW@$wm4~#|tav#geOOAPRfahml
z8R){3%;tH~=$}N}`qDXm-LU$S{!)8pZg2paK;05HVacp4<a{C;=4O;{4D{0<N3x=8
zN>C9a&ogVz%yl{Yzrr$i5jko)g|!4U*QsAOJ0pa2kL$1>)9v<>iC|Z6lTkPTj`zSs
z^^r9T2Om3baYPYa-W5V}OpA2G7<3ZVsc4<v`>$-L_S4t6Yu-f75!Q9j#1vf~emrl^
zktF7iskhIp$o72ZOICaM*CjK%>Y6qdRjw)6P-<!5OCF(`x3p6rD^UpzHm~J83kSaT
zWD-5ixF#M}k_7~z?;4bLWN^mtC6z9hngl1yQ)TrGow^j}65$(zb8)<W4}uxPtuQ>>
zw=<%ice%H4Ej3a-^>%ZBPR+WMtLS&&04)Dd0D#m7Hcq@i0Z2I*{&JCc=_TDfYzZP_
za_W1tG&*vNL5wOU`oKmT7PcvD<6!4>rTjKcR+!fEpZhCvcC4;<MljC;hR3r18lxIh
zOLF{Rvmwgii(hzraKM+bI0*+<BS#SwLeWPx{!l)hNR0#y#DU0B?>v>fc!b$9Zq^gn
zZ>UpjFf%-v)WYB&U?c)OV^hG>XZH?Z>q}@6?|BVuMYnw(`edi-6L_SeN!Xa`IBIGb
zyIgRS`D3_bZMwv@t8{y^H+Clq9J!GbdUuqAGdD2c*CCj8*|7&OLUC3{7Vym-S6V>j
z>x?#2vQwv1bPD=cHv#niRV2O3_Jf^FF4Q>`TqLJ<Vj#V=w!Y_)uM?ONVLu|k`D2Hu
zi+Gu+&`Bl8G7@$A=Ge6ZyhfjU-(6LMA{Rh)^pCdYT{=b3_-jlyqHBgEG0<<I>$|X?
z52X0cGCvKLaZ4hM?-75M7V}A!(AvW~4-gMY9OF7{5_L{$aT%wCsR2e0BACxuS13JK
z<ieH_5u@e|yK<+V_z>I)v$Ug9%$LuQtjDcHNh`KOYyMvk_ieOgZI5gCLl5s5Da}~I
zc1JvdRae3uY+yT}yf=%v#tHo$O7uI!t73(mI;+w|E-2?l>dKR<1DOM(5#Q$%rng14
zjLF^MKiJ~P8tNZPL~=<-%VA)A&`eT(@K}8!RU0s`D@+Iu4q`hOfF@wthCzk+AE1C8
zVjvQQtuUEh`5Zm7ZXwr-C7IG``xdvW(jOOf6lmkn&b!tSYr1UaqJIg#VR4D<q?5Ac
z^DHhHM8u8|nK5?#3c!W^8f;Z*ckamh3#j0tDW*o3XeRava;{&2<IL)3e2);q<WC=<
z;8_#CAMQ`RqW#&F2L&@`g9ZYvBG^bbqrQ^>vGG9v`V*vO#NXvkWM};)m%lbHs$&eQ
z*?rm%j)swGjH=kEA7*h|ZCMNbP4MAAs)8Fk8wk!QKoj2Vfa3l^iS6K+=Is9x`D`}<
z!mIVu!WS05>!*}S#aB`)Dndu5oDH!0maLRknySSFK3ws8smdMxRltad{6^uuee77j
z;frNn!Di2cE3dxrs?BKI)G+{=4>(P}4q*eQ947))dA{6GGtES-GkMe{UI3Aw?DfvD
zL=BzvBGZZws^nPpU*O-gLXY^K2AZY%b_7C9a3I!y^o5(wE&C=jHd>|Z<U-X_VvZ`$
zFmi#;{G}}?{UfL_U+XX{Z~wJPodI_b*IR_aApMy?f$x>gr3TIgCfm}_v`2t$*25>#
zb5j(anhT){nl1qX{IiNJl(UJ8IvL?pXGX$|Fe@)AjdW&oi6%8*07ON!q&&EK?g}y4
z==It}`YzG!R9OzGIz?vP(5}l5lNOPVSv0vr=5Y!5_Xj9CX=nBxOv3Ag&{Iy%>@Ga>
z><QsO9(0%bLXTnqfM^4&x7sUu3w1v{GtG<rQB^x1d<{8vUB-}vY@6F@kqj1F!$FZL
zN<438r#>2#(KxUfw^V6_M9rM?6`TpGu4L@b_bdjmQQDubHDQzUFUvN|r<P#$Rq;M-
z@Q-CM_5-ovDI5*+Mw)55J@~X<C&2R$WEsY!h4nqix9rq>b>JX3xCrXJc--mUI#UM3
zOMI>H>&CtqO0bITH#RfZVizL|J+8AQ_Ok1jAjpfx2p_1^=3S3IQ&|kEOWBHKJ>rU@
z%`G*z;Ra~X!#2ZA=too^GAyjtX`glu8^4Gg|5Rj0_u$t1=+Rx0Xr?ELv&PJ>p_Y<x
zY2OoOw80EqxRhUQddRxqD1)|jg=wGO`&^;mv1aje4_)Wz&JBKM_8UE#;(2QeZCMN|
z#GBtUOuH;zNrU>kW-VuaV8g3|-S?bI!m8FkXCEM@XV3J<Dm+Uc#h%q)MVcB0>!mLC
zwS~Ke7L*$Oro;=Mi*yFP`4E^g;a3W}tTXP@*7m4V>_#R&O~qX%=;Ct1PSeG&TC?=Y
zFAO6qy1Gy9uXq9>cdb2iXXm)BMfL{&sz7UR|GmM%W8k2)IwuG3g7UvgpG}|s;zgs%
zqlc(Tc{Vv5YbCwn6|Gk}UzJ@cCbZr_bqtM!A8&~$a`n&Onch$xxa9k?C3!8?sJ3)T
zwE(i{%g6;hx()}ETL9bb8?SukakEqE=+VvBZW32sS&{4GYbLZ`ueRgJX4yqm^w0tz
zBy%#5%zUJb_9ECN^O3v!{ahYOst)Ae=}0WUn@hNMQ70<C4jKF##SYbcm#kO{6`rA;
zz}~&4#RjU@h7#s(lB)q(9$ZBL);~I;hbSu8Tq!neIaNvTK$|~3cNlc?aSg%k-w|#L
ziCDwt#C=Z@1_T5E7beVcUYCB_)3r8vvBQNpYqW#}9MoWFL9!6g3V>K83PJ89U&Y7q
zk!8$p{##wmUg>R0r1RT4OX}Mbsf1-SHZ-$)b7M$f(96+k6yqzKP3-rH_X+t?S&@Qd
z)r&f#__=XjgQ(8J8mZEH=N|coR!!19586&*HF~PKdk_XBuP`GeEAk-&C2ST;%7*|j
z`7*z*q*xd&@kuqmbND@#?G??TJ-g^8A&O@FJCB$H(Fo?jE-<$C5?yS`w^~qz#90$O
z#oT@nzv%ARVBkkayVhWDnX%1n2T6W}c0IdUOHB8<(YG#IviBo;E+3xp3SKU*zpBv2
z?13&Wnd_CwnR3>}nE>QUaAYO!hSg=A?0fsVb1K)Z7@VW!Uf!%CFCDUVS%VUHTpz&1
zyo-+>w7T8gdpA+Fv48jeX7sv?amkBYMxx>)SBR0d7273tO*i2prTBMd&MXE*)FIgB
z_U*XT3@|mUgnUMcvlf)?NEo2caS3vcM?`eE+Nk?0G%1WEQ3JAiGda0WkV|*QLQa=u
zG$ZA^###&&I6gt)A8yz+=*bRM#_T8uaI4}QUW^O2DCKX`&ZyAX%l9ag;Hm{rXh?lY
zjkhbU4X2m`DZ}*_qsF7M-Z66lHQEWk<*roJ9ye)%9B1d<_&_tZGnQJvPlp5Dr^%Yi
z0?_Oir1u9OfOx-;E3fLRzcd*^eQU-JpITBkTo?-0@?X;1&l|}o51yqju?@`{Vv1~I
zCyl{w@uqfS`sf7|rS#7ikX?S)XGFwG7xRZ+M|b{w<r^KsI%1n7)xl#tcwHiW{SG1k
zN*^FrpF3Ho?ZTqlt*k-me9B-USNj;n%!re{BOK#M{pGWsx`}$%1RkS8vDqniRxi;Y
zTm-kzuo`0s_#OfJ&%9@_<Av!dY1_jo^BIaak;ZZ_p&gQH(pD&)u6{%3Y2r#<&E&VM
zra@d}nAJVpBDsulq2Z$<QF8T?k9f`s_YpF7PX*-J+2TIBTI!7Q|K-z<{Ygz|xkjCW
z`K7nCF%Q6)z)qXw)pr(FIaohr_*G-7j{)qau9n(%@60&-Vp*Zn>fd<~eV5_;DISo-
zJxy#4u4q~StF#0&n)TGj)&*Pq(ZwV8d9;DDEpqS5I)Yr6A|#EjXX8d4N#EMZ0T24A
z*oE@)T|gmGE(59mbs;{}jPE&?$4{ir_8Q5SpEXS<&T@&6`)M_ddY2jd?VMv^8xs<3
z(W!fO_fSTNXnluRXjuIx$F`2^Iv=>H>g$Qo!^zm!uK4}D(kG*qNnhu1ON_g3%g9FT
z8ZgX|w(!{#DV@7E%!}U-RHo=lrXs~pke47wmJ*>qf8uJXKdp_P@_lJ=p~v^7-RIzd
zr7f8y<1*a&o^U{iFy2XOEuHlWfhL;BQ5}?z8N|*=GQpy1^kj|f;%SVZ;8!rGOQc%J
zmyvlmPP3gy@!z3gmGNy&0AB8=NIGXVjJ9?)@S_pSfhu0ylY=r#d2QL-ZBVOkGWg-A
z7ZM#aY|fDZb0FeAe(^|9>SDe^SA-(2HuO+<_;@zHFpvAIzePp=Ax=h41f^ER4T-v}
zi7<Hl)$+}ho);Ezfy~_e&0%<xIu*M29&A{_mP`Vs-QzI!AaS}(&848TZ?%)tFQIUK
z;l3O@MVe5Yiy`yZz1o>QJDXS;UlZ_selT-@J*HC};hG3!0kyA^rHe1*XgZNqu8$}I
z7~qP7QyrpZr{^t87R-${$#<p=b6?GNIL4M16s(=bFEpil0LV{CycIHn&^3s?=>!3N
zbmNbIp-}x?@`3KQ2g%aEx2}8m7yxaFN!aAjno|U*K5?;1ZA^X@39Y0_e=DQ~9m)<U
zk!Q<io9o*w4pJJd3bx9^-rAE-q@-S9Yt20W@xm=LpeDI%g1FKrzsgU^*m&3H%DGJd
z_LIVCJTgOST9NcweKnF2oZp#`4R)bQjlauxu9nyC33UmS>{1O@9_{{C9x~!se#T{<
zNC8xdn?Sd5`=LA<ZP(!p&ok{))|TC*ATxebPhJMknWg@#JZX3lBHTc3WCPvw7en8^
zsWgcyT&fNYzwo2)(jGy-QQ-a5{IC@nU+kM^+C27Tl)MumZ<L|ufC>&+`1Y`kXUt!x
z>`|ld1rQUlCxF}{jPAx}C%Bz<$V3)+Rws5rSo~RwZH}n96Am>NVfi}TImO83nYn&Q
z@t0OodJ1<@8l1G=hP6aLiK+8)9p4qr&W3*soL%Ed+iK6U;1ldJN)2?r<`{XcP%DzC
z>kz8H!Sb3gta}F~*^|v)c(NeT{64D|8M(~pC$6x}_D<uhSoe;@+K<P_b!1H!R3pj?
zeMU!QtDx4>2`rd1D)Zc<(bEsWoN+EAVtAL+O5F4t=|U%3jtg#c|I&0@<VeZlM%NMp
zory{dBl#dmhnRi@MkOfJp2;hX?73X1^`&lkEgJ~uKF*A6w#_T`7NDISq*7i$4!1Ir
z?jg;L$%+CymxAqcv}{><rh9Tk)l{pWGv&FNd`I*r&PMMTq<0TL?FBq(1O~4G+h4GP
zC9@`q9@GJ}ADDS8QGeyTZ|t(Zmj}~|_aK~Y4c4K(m3YtI;P(nh$O5lLFz(w`aQs|1
z?sGg5;Ip@njrLic+XP^aoC@8(N@zO!^HRxpu5c|0e*?!ZfWdqv$=lDfG?*N9qp>#^
z?%Sy+*SYdiE=W83t}-vMZR`q9jpsOf#R3c9uHbYIzQcv;+B(a*w{n0ro?g9>WvhRc
zo25P1=S*+6FHVcDj12QAcxvxb#cMUeFe9_l;#h8Ay<k1}WL=3jd88%nEKto|cO32l
zK7j>dP<_<<^sHixPFwYs;JI=qR@|3Or5P;#M9l14xA4$Hq>c}T{l%D8^2Y`nlsAF8
zltc_mqFy){e7_3-T!D<5@WWAm$z|(@ZQ2%I&&04Lx8|_B;)8;soas)StYU1$*z<jL
zg~*H_s-r^#qNjEMNT#^KL(RGUn1YeHdjgI%9Ih-wHiTL#w7_^<)agI0O6e>7iVM<N
zj$=h#WKb(oj;>R=aDvIyt#)?3m#qEc;?y1qJCt`yNNE$D!RGK>7()>+|KlQy&PyjS
zWP)?IqRTxdaw)1VsOs*K6sR)K;&-FCaUG8`$0O&#%9CVUnL+NP0hOTf?#hq1|7D9!
z{c+c7`=B|Lmj7UaXhBb_yxa==9G8%-aIMriXCHo3`<66$$%xtR-9SkI%FPbkA=O2L
znw>NBMFeYlVZ@bm7ea5mCSKG?9}O~XQ7xXXio4j~UW)XG<efSVMjl2Hew^70;IftF
z(=UdBAOhDQ0Fm*p0I}Mu3#nAAEY-^Cm2!=CmoCv&FnbTaa9EUjwzRmrTtUp`xto~*
z#l@27=i}eBlzLw7Xc8UhV1Navpnfv|^W5lI#1}5lT$$&hbv?2AHevpXBPFSBgEK3?
zk&Q%=CcHKReHhm9e`2208dk#;{k$P~WX`r(-UT-oMd(3GiGGxAFQ0hSw`9<%)tLsA
zj_E_cJ1YF^OQFz{PwgfN{EPXP1}{Gv`Q*??OL7{d>Z~eE0#82msqo0wPsVD+pVyB(
z^uU!I$@F>KwFm4!37Q!R<*wO&$ve%$T=G_0KkrAK{mOT3_kbrh`Yo2j=a!u}`peUb
zOTt@iG0K$6Gz9zTp1bGuT{-yDdQ|G0=3o-o1QxUl3wdwDYw|zD?Wn|A^XGnIk3)6p
z8n_t86Nj`Td%J#X4VSw9!Rd@R0!wagUbs!CNsbIK)N<Tv-YvDHz{4~ggP7EF5C$2`
z;dyOg9wBG=LNE7tQJ8s)@AIca4^`Y|{}|7^+`XvO`d7AH$9;s;c@()@I?1thFU09(
zu3ps7i{@L~TwlQ7|HqvL%!akfkBLE=3LLrMf=`9g7ar9E8dA{J`(4Ke-5n>NT9guw
ze`RQ!f7Y~Nk*_G$#G*~Dt$+rPRrJWFxTlsirSR4?dfB=Z9H4K46`beB{=D{l@pXF)
z_e1}0>Kg1Mtfvq6c~@4no1d-RI!YWrZOBG_W7NWV*-9G&`FSN5p5`0XiR;a&9&BQ|
zYry?<#-r{l#FfS%r?|zW_MBUpFaGIPTG*}9uO7sS=4`|~uXwc6&1Gu<=)BY^6e<Ps
zBdLT|FK>s`Bi$<V#rS;n>Hb%lP=dhk{;GtUZ`w0wI*gO33eK$rdoeTiUHg`|nZKP&
z#A5waJ&PH+Q_mK^&r9qNI6+HpTN43Jk_7Iq*4lW>D$_}x(<JE#HU#4u3MU3-R571e
zAEuTAOJ>D_bK6w2`}EnkG%FU~i2sz`AUhcwd7fomO}Y$Zku8P2^i0&dPPSvX^_&I~
zAn{N6G=`AVwgvU77CEM-rMNmivqZgLSiOU6q@mG&aW^z5`%fL>HAl{m7MO%9&JV0M
zTUe)?&g}i^-i^C4t7T!oKSW1(b#5Z|7nX4K?*dq8(g?oWI_2|)mCnhU<vL|8g{~>t
zKX(0WXT06Ba&d%2hPk_rg7!>f=28ix2Q4`d?E&`}b<eQ+e3d24uK<Yujl9v*p<Z1B
zF}plt#T%DBjfKrrt0WZ>$H~M~RX7{v{uxw-h(ov)3vT#==UQOM_=)Qi4$=a#VL|Bs
z1Ji@!LwHk{z`s8){1UXXpBH<@AWyY!y0jezp283Mfhqc=aj9Uo0N+lQ4K5;kzG*%Z
zddbGufB7Fw<20+ORP;`5207use(pf<Yf8n}4#eARwzFs!o+uNb$9`b-cw(81>?n2J
z5CtR1VLLvzytY)WkH_UEqyKexT7#2y<m6sPQeBGCqBsV18_+*~DximqA^1=7Pz>C>
z2eAuXvo%_H^sJ=SWueUTUyWm1pDWtMAFV;MpenX>-T9xkzJ(USmZtj3a@)nbkFO9B
zgsQPHp^UsG>c}wDw*J|NscbgB_Vv%_qrD?AelLENOCCw~U7g+VQwUqOcH(Ecn8r~7
zJTu5VZZ^hBL<&C3u7Lc(p)!Yj?TCq3Z9dT18)s+^q!go%sQ@HGX&nAhI!flo)@?jd
z02@+AC7SUQu>AmX^g)~u&c=$kAIRbM`Z4Smsz1#Q`w|WFTImlN_My&sy(^8PGNRMq
z);<_OAhp<^ng<oUUShlwpOZN+R(qq!eXi{IK!nZkjiLuYwS<&joK|*likjWCLKBq{
zFJ^t`(m9Oz`E)&-Qoh`|q(TV5mdAjur*^B-i^$S7GFlxZ7;&c4Sgqr25V|Nq)*h-e
zL%mXW-gqow2T;q+q2K1>&T+y5hOd~XT4xvpya^gM>6dF8c)a8;qJbSw!t$I-u=WEB
z^kzg0eZ?~<A>u?)2dB}yl@=%#6~=sDqNg?0&0Vwm-LmpX#CM9(!gjF{FUK?>g`6{<
z6;C!+Pp$A?+K=&get2&{?JqYhZPIgka2|4^v4?Jr5LiA5TygbO!|6rcnbB3N)p1vE
z{~*8gv!WXQS8#s4vv*FKKAoBKQfqo-q?Wo?L}n+kbr(@g1YC><AcCSIVx};>aE%Cv
zw*>+ua^sBvODIzNMG3Q_q$P6kssK1BqESA3!1Xy4Z9oFm{cMfMUX^_SmkKT+{C3I`
zB~n|9MDV4$%MP=)5q(#a*N6~Qh!6?Dzd(g}19f5l9U>Z3BqHJ_bHIe@acl?3_DX$|
z3({M?r<E}uVwsi@@o2lqjmrSO`dz^^J5Dzbz9>;s<l=2&dIHAe5z06<@`ER%#zUl5
z$;>EpF_hSv?zM6LwM*z^E;4p}34G3r`ooH(B;;Hx4F`O>qC2u{ax3Azh=}p%;XS=7
z-W+8KpKEUPS#=o^jX^yNdF<CahrC-~xUM?z1KyV%HO^ckV(gLNU*CFl_OM7T087gN
zAIY48(IFxt1Oezy<RZ{ld;nbb;3bT)6S-jtWAiqMXxxV(b!$XS07S#(C3yUCBDN7m
z%ISh#iQx4jGK$*(J_bh4Q7#GpZw6Rd0F0APbT9Q6xp6#aXO=dr1cYPKFCuTnD{JEP
za<==fnyeA|=IM||yQuH~?c1p!oT!LIiXqXZj9FHA&9U$dC>ISB+k3LL2Ol6~;TtV)
zOdcwLdZn-M1a8^FDq;`(C2~<hmuxF=ig8Hmg6So!rpPhF4*}cmxaFy8JdqIj8$b2A
z@51j<PU#M1eEnblf4hEEL`Hels4029<wfz|Q^3dV3p`~Xc*!l$Q_thNC-@_xsiApT
zRYOztpr-Y~!+Hm`^t5zTG&J-yG&*{V^8V`uwEv|mUZMZ{3omc4-wa+*`kxX4ub@2x
b0^QJ8|Mz!zk5pP=m!COhelq{W#asUe9`#BA

literal 0
HcmV?d00001

-- 
GitLab


From 46fd50cb8090bb1a2b89c7bc24b223ef3ba73e09 Mon Sep 17 00:00:00 2001
From: Samuel Van Stroud <sam.van.stroud@cern.ch>
Date: Tue, 12 Mar 2024 12:35:28 +0100
Subject: [PATCH 04/30] Add paper pdf

---
 paper/paper.pdf | Bin 0 -> 242563 bytes
 1 file changed, 0 insertions(+), 0 deletions(-)
 create mode 100644 paper/paper.pdf

diff --git a/paper/paper.pdf b/paper/paper.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..c69cb401119550c0282de6948e185d69b2ee3d92
GIT binary patch
literal 242563
zcmaI7W0Wmj5G7bPUfFnsSGH~2wr$(CZQHhO+qOORb@!U-A3c5lWUia*+&p<Q_Kv;d
z#3hy$6sDr1W`ZJ~T<TjKTAdvnhGN90#kbKnhvMYKr;)~Iz^4&5vvf4F|2<mjIT{HX
z8Q2&a;nPSOS(`YT;?uLx(&BS-<NwbOl!K$ak)9Ql>v~l(xFXUT?(ofIVZtmTK~^3M
zq!hlds11;OVT8CS5J)`0-@H}m2&hU@0)kx9dh&h$3JQD+A!O(X-M(uuHN{0II}|K(
zjy?zFZI6?zm*=dPskF_VxBDrd>(7&I2QUD07LoXWfpP$X`ej7qv*1xOqJm-eyu)w=
zvcT-v*4ECepT4HPWUD)9Q$zf_c#p<^c*F0~qw-N-J>*aB9NGYQ#n5d|j3o-udj?pe
zNDv<Yd=u!VkHcJ{u18$E5@)2q0m4R7(@72txce0jA3#1mBR%Mt5yl09B1dSALtisB
z0c=@=a%iT8&krb(y~U4_XOwF}JWL}Aku#rz+!WGiG-;E<qVI=VVE_=BtT0O?4Wb3Z
zw<6S|j!eAWs>$5Q-GlX~9GYF6oSW@**9uU-+35LjMMlvi<jhdhb8=?JlM0J!Th#~_
zx0jNlz=U$w;wu&XVr|%}G=56N<-PqB>E6X(vB`2mijnjE5aDzNMGjO=b9qS(^W;4L
zF0)?p_@P&jOPv&~)g$NmM%>p6V*8~AO;7M2up&Z=P*P=v4IkdiMI%#9#*58XdeV_2
z92(aHq%H#&>t7X06s=h|H;1!9y=o@!3w;;;puf^EUU^drwbf|dAC%Y|@{^UD;;~4S
zrkYqYhfIiAeY^4f%%DjtRO7>Do!clfvRg?vp3zvVf9|~bNa!r{{Bu7b?^U$~SNtFa
zc)Nn=ovWi0n-deQYbl}6L#D}9y|R1%(s>?9%=M{)N}=%nZinj(&lKmI+^*SH@p`Iv
z2JFQhdEPCq(H>_8SYH9mb^E9=k%#vcM*y(e(o7?|i6~hu2Y@H#1*P>xodqB61zH6F
z-v!$AL%Ric;A0I1gVaUX^>@SvLX3q^_FKwDPxP14r6>h_%0*!GL$yV33^>?@VTGp-
z@U{is1huCH%J!qX1(W3cBN%A<2WQ?N247(m)82P=7JC#ADkwh}gcL(OU@#U&9jq!y
zJP)cAY#o+~kB4_8mtj`O6rddzmakwIZx$TguZ$LWIbcdxBNf(T;IA!X=O5_+hh1V@
zgieI&V65E$TP#noZkQe4qgw`7Y`{>&enA)ntiY&Of;~}!_;Ar!6j4a|AVz#DQIh!p
zO@YHvheNJt-+}|#OxS50w7`fyJ3F>klva98SZkhY*p;Y`V66VAJ?;ZDSFKK_ZCJd>
zn30mbhr6Ly@=xtgxlguF*iVRWSU+A}#7Ri%Kzx3nzX7$0vcju;ro@^5VEzO{9taZ1
zMVLxG;s+zK36j;Nwr07;x`w+ZvGRM0$P-}1Vv7P63dW~41}h4&^RkPzi@Qsv<5$E>
zjaeVs(#L3uQj@U6bBw8s5*|W^V-98R^Mw@~$|@_AlTjr)Bzh#Oh>MF0i&u-si31fW
zi7$zVBvO;Yl694oD^!(sFA|kUw#T){xTm{kyoU=Hq|J4jhB1;^5mtpcCqMf>OQ%&&
z%EKuilhr6zD_JXamw)mHRqB`TSJjIeg^4GPBP3Cf&nS(R8<%&NV=tkUH<gbl+LT9E
z%q?9m^)CsU8<~SIQkQedk(6hYeN;RwMl8OR<|yG66FM5S(_~5LNZ1tS6vWU%z&Hrg
z5(dbPzh=*kpfAX-q!uAm$mPma*zpTY3#J-R>K4bUO><6S8+xbau?%>C`3whRP%%_b
zyH=DHoE43&P|jH{b>_YCK`-5}j4!TNE>%0}V3k-E;gn_7bSZpfK0Dw9M|Z%fln9>G
zPUC2)u86aUyOdqyZfS4Xad~y|c3E~^xtly?Uobp%Ju*B}JjtBLV9vrGqeH-u!%Aat
z3NgvrD26H*Y@Md&RqMs(P1?5T<?IE<waG=!VHPMCd<-KSG~1Qi71^cSrH1*5rH;`j
z7$<NP8!9xLwVyFB;4ZWn9~;{lUreq~Wv5!CgJ;mt*)aE*@HN~vj@FshwpxqT8fqA;
zU)Nr?_*Z3C&R3aNRjUpxPcH*q;4A|(+%agGgqtu=cA5z^-8GLjm^EYQ)2ZcEGF3V+
zLu(*y2yRR@b2aB&WnKn4M>)G(EnS_g*VNrK{tWbu`(4Oh*lxugqwluP*A3hqQC_Hh
zs&SZX+2?Zvu~j*o+rJ-P?o--X+u=IV*@-!qIkb%#*AlK8T|!s8s^G08RgY-xbg*@Z
zZNYVXU0WYypLzFwjafddeXd0{gm6W2vf_4d9XiaijX5|^#d%=5+jC-Z;<;mZ+;u*?
zb3HddPTz8$e)W$`i|jiro>bLj+}Q6dc!%ob>V)a!^tAg#`b7N%02KH(`X2k%@s{)!
z`IQ9R^8Mw@2(Sjb0Cfn+@E<vDpVZ=R;RwW}=PCSms8^~~O4P^z1DAo-OnkY`x!|_%
zcD0Ak$?7Tb`UUY2ni-ly02zl6*Ay3%e_4<$^fXyAqr>^r`^hrLJ$E$cVnC^HNT*dt
zUME&3RcCYXxlg`-c-MS4aX>?2N&<n%ju;kJ7v_O9g7!+Y+OA79MpnjZW}Elc?;nJQ
zW=4ZZ&0eQ!L}zjrXXt6KdGB`^lW8jrY{E&*T10IWc`>my>^1zbuXHc`hWiQieV`b#
z7`{kihHIuVFTB9DP%)na?Fq`{pMt@M@q}T+Fu?F)Bn@>CSVa&|s^%zJNj;71Vsd7J
zVhC%fVS;8d;jn4nd>kppPvqf{w;9xiBM<iccZs4Q){78pJR?CR<As`s`A5`G-LEyb
zWwzW_VDEq!P@-ESTw`2oy=l^R?c7LUqATS_8?Z`LZLVqF)o5-!PRXz0xu&>sTJ!4x
zvx5o6Lc1nPV?(V_rLo-fHKW%iJt8TBc!*`Ct)@aJMuSFmr=`zs;2AbY!j(8h{7Sq+
zoG00Cg4)aC$Lgsi0ZW19If*0XW>avLXq&Nh&Eer2swtcpWk939xlt)Ui8!gIg?ih)
z&RkjPi+P-RjoHn~)M>;y;VPrqr&6q@P3}TrL%l?!CGYwQZacbyqszH@@nR`u*=Ln_
z)ycwT_qrLZA-J3?kju>jmuZ;!*>mK(+N|bzd%i2@IqYTZMda$n+TP5ZJ=Y_*6T>>0
zDF#=Pef6hn=LLs~nW{r0t*R)b_wQO>2@lZ^tt;!H?FzzkTr!@NPbG&qkNP}V6H0Sb
zQ%TDN2O>LI^c2_f(%H7#BgwMNM>H004OgCB{wL9q;oR7mf`oKb&NTPer{sFtUiFZc
z%gk_2*EY`DmJ9XaX3zuR$;`9sOJ@77-QKHEOT-9u9Zi#tbNA-*#iGZfNl8x3Guum(
zmR;NY+|Bw98?DFgq0&r+N%fl+hY#25`w`ZIH4Mx!%yxU4qk?yY%f*=0-c^dr{k7z#
zmJ_^B&0+WXS2=h)Jf?TuXOYr1B@f*p%i@Ra_dv(M=AKE<@%Ou3;VHtE+>$}Sh?ER=
zHZ~qC`jt=n?%8O=r3unZgpa~U)pgKN1fI{!dzTBPx8-5c;n5+UQB9-vl?T^_(~9fh
zYXD0=3zCh*wprK4Pxe>$iNI#a2+|u~7+wXBn)mr9_Wj#-p{YGR%hKH5L`!cd8Gv_m
zLP9`QLPB;8;9@f#06iYZ2nmS}GC-*68_8%cAs>{Hwc-DgrN3wY30vm>UE%)U#4R%;
zJ>&mL++Jx(*buSVcfZt>^2lO$<N4y}7RIA)Z@6OJ>eZDnFLOI?pzD0T;CO;y5HU2=
zi}&{eAueE+)xW}k^|;25$zXqf<ot;Cyq@X&+)EYm42r4tCC-wNfiUo8dw;$^kFXrK
z9_%T4XMM!f;U?}+=G>AnOLE5_&#Zo@Io{rXZP?uKrzgy2yX_5}eekG&KYp#8-N<B0
z47ADLYV^kh^h`bQbpJeI<@|eo_Oh_<5r|TKrGaMJNNv)ee@~#xRy%{1;)hT#Avm0Q
zOrmA>qV;2oi1GPI(b;$WvN^S@nBkIHZ{<8#&z_#P`v)%0{x8Un50ID*x?8@N-sWix
zOcjkJHdWlA+AsEfybZ8FJmsWe`Ct4SZ1(DNRClk8=WxypJujeD?fHY+SQB}1B7I$f
ziaPyLl#GDcCyuuzWu8Hp#V4`^mjrxH6ghEi;Yj>?5nq!0U0L{3of4jh>R#;5b^w<d
zDmGg20cU~U+L9>cUzH6EG8*A}jT!&Bq!6m<mPr_4Giq9Al#yz|;_x=)b}jm%NN-16
zuN(fV#)cEf$0Hd#the3IeMKgd&E_j81qHiuR<bw@BDw&lI*=IRY$OH1mcb#(0fSj;
z;QD$_fO|SP%Lk46r=t1EQdY_tkeOQNcV%&bwPzMa;HmQ6sWTs$I@zz0G$zhK2(!(;
z6}ttpT$6LxHwtvY%>rNLk_Ka1kv!=h>8ms&c@grx8wT%ypo;*!5*z=vKc;!CTkC@F
zzDwSjh-TBOpgtoZs{g)Wk4(SVsVRA2amhbV&Ng^tSV(_72vS2o|MX1x^g4`X?qU`j
zrGi_h#Gxaa8Ud1AG~^tn8LkVwJ(enJxq=h~RL$`aR`rFV0O!WFF9&-wIn0XW!lt*n
zNqXt0AzV8uOR^CYc`r(;o;Vh3x$C1_DwE=MAWs)|NByxw!5Jmp!2QH0>@(pxncGVR
zR{4v%2&BsmyT%`IC5Ktx(6>m!T%90Mz^G0Bo)+288DJf-h`+*%3G_O69dNka*Q%Ez
zx`O*@TF@uLQv!G_Hvr^%AhiFthF@*}of6$*N{k;|y<`~#O^%4cs>>?JhX@w}Q8y(K
zL6}rVqYU0&rA*n&xb37))=ZceZFwnsd_=JbrZ~B@pot&CC%w4Q2W`>kC+XV<cxqXQ
zYkwsTKo#*B;0<mR&{jZB)tnSA(&WH~7<{e?YFi#$Rh-A4#Ri|Mp-_t4mooNy8Mwt;
zDhINXWQ{T(HCAFsUD)$qCk3#jo9_qUHFzuD?)OnISHX(3sfw~c69I{-%Em05Da^wH
z*6g>uFB>K?`GD*EG76~A0Q6~wetjt^1UJ$+6AYuE#Sy>*@FYAfPw!`hAKU(lH0uf+
zE;&PA+jg*ns-v!63*d>(ki&8KNvY}8`<2{{+)l#kE&r21^tA|^dq|;CoLnixOBXvL
zijVkkzsb7~t8GefbYHYT29EquEP03`TjBW1-4F67I}*Ug{SO2wT%{Qb<4wlI4y6PN
z)SPS-B!+OPbo@|Js<`5Zj%@v4N&6r@Vqk|pd+E&cov2OP^1bgZa6dc*5B}!`pKD)D
z*B{(m*4t%>y?f?B3hGq<CY_L53+GXiUT?DNcbEe`+vAYRs1n`pk`MR9mq|7U-gm$=
z@It)U?<edR&>m-g_Fgmx_vP^`E728Az*H-<qk6NWV%GBn>TAqK9tyc!&4+!%zuS34
zFxnF4(xpv22-kT=u@WI*KD}RnFW{R#T&*IX>yR%11GH9LeO(=}FT$8$mvwM2<H)os
zu!T1+XD?OI4J$=F#2!!>Y#r?nG^fz-l4pNC2YF~w3Do?ULmpWRNB(lbo!u!Fg8vr-
ztHF<d0GGjRSD+>;?L8D&L08fFB11c4j|m@QsL!~E7ZK5rDUKXNzBFmN!$dscka=W3
zdGCOSdUDZt4pyX*QvhEHB=N6ph%D@<u~o20ukX`pFB{5aYKAfjM*mWzba9fYF?d!#
zXw%g!`W=GD<B59Xzo9v9n|!W^9YQ_cvA%iaalU^V{F^3E>_JuHU_9Ys<9-s(e6Bge
ze=Ip(2QR+dlV`*K{O>jN=vR9U*JS;v#z9uO)=q(<#mnVW4Y_qy7$AO^W6idr>P86>
zDZ_P|ec?*}og<t(u)Juv?9Iont9=CAqe9lxUs`Ca7~t4+TZm+VT5SKLKa@HnDmi->
zlD8HUV*`1OK+#9EEO^#N8!bd$yO>;l4ha<dI7#2hGY0nBT;jM{Q^1p*GP=0tWiyY@
zwtvKGNx}2+LcQ_d{Z4hJHxzCyND6^D_{o^~-eDE^!T>Dwm;Y+TReE&t{Kd&n-|MVU
zP+*t9{U-VFD+KP8{|_|Y|Di``243-ZXsZS2P{GCrx9tjMIIu}&fXC8G#m6?<N*D1C
zT#3QWGZgF>^h>P@DSPuMRc){+o&eZ;8;hO2uaur1YCPFrI5mgSbb)2;`eW#=!PvVC
zNdU`-e_*bsdPWCwbX(MM-JDS0T*fK44^Xcj#sP}v2p?3I{}eJ1gdT^mD!HJS%eD1c
zbp@H0<(s#Lv!vsbJSy*+&s}#++G9Gl&8VCh3}mT`JKC(M?5K!u<J<08dX_!v%xa1o
z^~}Fi+&Ngy4yqtuK<i0kudvZA@FO<=Ma=)@9|;+Y&+>5ccU^NKmbSEUc$EX(XO0|D
z*%(+6XIK@=;reuSb3wN;oT0FDK*8HY{%RaJ2ECHFjLPWmO@?!F;2M>Pr!daM)TmH#
zuYw`yP6Al9#SENsp)V%hU3<$gO}`nEspNZ>cFz}&i~giXO6RtaVAaaesm`5|Ieq$g
zq5{Bdxs8Ut7<-mM8yZAodCXu>eu%=Egl<E{D|ymZGjzi1WnN~0tswEXB1D%(0vs2a
zKga;nmT-@*GD%kk-^E@q*Y@x*0N~DkNiQF(r;{f*X$5ob+wc(EF44H_vtopKzrW?4
z$M4-Ra?Y6&4#bG98}A^F%)>IO%8I`Gbl8we>36j$jA`zGLe%9+QlcVQ1<O<r^`Xh3
z!U>@)7_3Sc*KTo>a-+4s6uolqwvAe0B%R2`iivPUdS>ehYzNs2EV5DPcva5Pxj7b`
zedP7SEFr20zlUHUpJv>pb6XoD;f+Fudh9KGN{4R^zK<{Qa2?@GGpUJ%y>lYHF^t%y
zP&VFs^9`-8G0o!&jK<;m3$+G!Twf*3HUlH+N+jNzC*n4NoKx=Qnts^C>^i|mf@t?%
zpV3aB;6r>W`>5pLmRJU!KN{sScIgd1foS4BO9+4(C<Q($n><>HKUe@9df8%$?>NAC
zGx?6`578udcIGj~fj0V)oz!2H^Xk+k({4z3)$#RU9dA3WzMazVGJEP+BWE1(9u8y=
z9^a1U2GC`A2pW5?zCNGBWO1euQz<~)A#PXRT~Vk@tnDOqjsEjTEbL-B;0>19<#Iw;
z1l|2wzE7co!|iJN%rCa&eT3#-b|5|Q5ErqRy%q#o=H3PY?}+AUzQDmf`4jIa8RqMd
zVsSPiI$+G<sP5s@<h$5YQPKk=o`cxO5ucdf8Q&f<Cy(|tR27=XL`1$kEGAyy-^#Ga
z)ypfV%9M3hbwJJ|LNwsWVz5SLh$@A*B*Q3^?fBP;<|Fufb2|bM(|2%ne70vU_B*M}
zoy+qkTgf1#&(Gkvfs{>^^-!|Kevaxp35J;6dS!<Zlub6I!*WWV)uk->vp<8+INVjc
zIftU6M%79a7{^tB28*DIL3WMP`)d2h^4Cv;6ZC%ruGiZC(Go7@)H`)l*VmQ3ub>(~
zJq`dVGZ=WD&+z$-P;)t-<#b5=;HTiI{Ua3c>oONwPxSc#_k6gyeOOW0r#xj%{&GT`
z2izHE>HFsVy4tuABsKBoMhc$Ji>D&ik{Bzrv578J=4<w^=-f0Ksnnp1cFP<7UOY6g
z!p%(~D5k2zyK5?QANeo|0;Gm46S*3y>xudLBDuLq-qlvUlXSHkSw6&tni&ao<hTgr
z#*pf)ec_0Yh>S6@j`}Wj=N5hFmZKh$*928yyxF>9fhgr2f)tm97c@_YPjMNK#~`P;
zQx-egXW}y$m^&gM8mc<)#o8|3^fVb$atOnfM1<eYjsk79g)Q3|wWPjkig6v9tzPTx
zci?G`>>hWlh0~yr<|+`g9lpeDX1>)}*f>Xg34h*aAS~Z7!6XNF&sf$Mlq~|_^)etU
zq!9+=XFdU%=M<uMJgJe)5rkZT^lWP`LV;<gvhv7thLh0;R#(wZzAg*oM)m@AeyWUO
zj0=B{Ifj6C)+~5_WxhduqHX>26y}pX5L@%4v627jmQU5KFz9~kj9x4%Mwm344kH;~
z8#$oIn(lZTd_z6LD^0m~=AVYj-yflPPM}@;ubD#f*+SnCC>Sev&%O@^9;c+M#g#=U
zsT=zIw&l?nP0HUp>AemF9=^lzte4N1?rf9s9=&zRiILXYsZRz8<C?*zeLG55I;hUY
zJB%y#a5DA!kbrZtFZ=Z8Zg9#U;zQo1-Npvp1)lvrgd5S;`Xdy$H>44BPar0sIX-Q+
zbgQSSdseNcn`)cWN{FM*J1S<q{$!SBQ{0hbRbL%otHd6LGbXbgz5IH=6M5P_vrYz@
z4wGxStRNBnwHf5yTEsNP#JpKw3J=l<&LSgK=*9?jmC4i7MO{+4R?2fJz|WXbrTkP*
zD$a~DXl@ueD6d@xv_~&1V==#Rc~s7w467oHBq$~I&-N)pkF|p|C0U#WBi{f82Ai0J
zT~=pOE4X=R(lJuwrN3JA7`-)1l*Ybw5>=p5;9$V!JdO1}ssnsuYn(=63EQ;-(6$kB
zFFj-eA_YtvSdI8TH=V%nUlepqeN3?7Vj`5GG7FJ|S|I_c#2O<D+%!oxYoQGktS}u%
zUUBx~e1z?o(gZpnWgte5o2aY)MOmS?uwpq47uq~%(rA@(#%f9_Me86Ojj?u11G{uM
z18T*%%jQH%^*a2EvJ}N|ZifQD%x=UF@FeN_fRaJlwpI~Gng-m%?`)#5k1L$bo3GKY
zd4GPnmW6hRCKjR6g5HpH!<0eM1cb}{TzeU>ba|;g55kHUY0&J9&?p)6#!r4k4EcP0
zl&~yYC&w$$YK~|(vYbh0F!3$_;+=MsKn)rjkbK~*r2@;j8hw3emk@|7qSDap_~wUl
zv4&Vof8-}PyE`fagMPuAoY`VtNb}KV@-*Xg3i0HXqT&sLlF#|>vwv=s$#_P&bAe_k
zajDx;m9gs8|MJZ#vvhkYqv2BG#rZTHb5f<`Q25bs)7$&?T>85fcj2L@yt$BkUSYpg
zL0-nOHR)-dxOS!0Cc%xP&^6|&S>!8ZAX(ZOA>Qfb7HE8x2vn#dZXJjRGtui>E8<%x
zVQCZvh`FimEmry_msVxTGKm?M`d|=P>D`z({Q%QV|CMc9Lr^MBk8{Oob`>_jHex<-
zL!WSuihs2eD?`1U>Nx9?ps;>EXMwmA(yYZ1o$p9ylGk0$i|+zC`z}S`<7JGg$Dc7h
z?^X-lB1EYX`p7l=Dmz*U(P&sCkc>L=15;5f9Chrsw_G{}K*XcOuKy`pXQw*nVgg5i
z(J3p_t-v*cz&hLh;|N@uC#QMei!1$%wX8LoVQmXVN{H0vTGnwc)r~S0k?K5A9^GDc
zt{Yz3xm{!I{bxKR&G_nDuYRo*$=R)^w3DeT$<XzA<S+A`yJWKt3dKC@zb=g7dWmvH
z9fDL9Z8$1Nj~!>!=O9;JcdhL4Q|X@w>Yo&mlu9fW^|~*$stb!#pa>DnBxTC%&`um6
z^J#qqkW*m&QVqf}uKZUx8-bF(Ken3D%!%)=bVyDyawjgO(>I&^c~8N4Wf(amwF|SA
zj<R6Sehmf;Qp=0HSw*)oj<m_DK~aL)=tXo^S}_Tg-6$?01G#YOSm>6972dOVFk-Ie
z(5dy$Iyqszr5&x(x#c`hrE<Br0aT*JgyC)*($a>L(g;6{S?I31&`(q)2-^p;&x8jQ
z_?D)S5-W5djk>#{b~ENmH_-vnn~QlC5lRe|74oFBQwiVli|tAFy3`%Rn(;T;s!pnZ
zEI24~k8zz#MX3C+E?Ex-Va&J_28g1Nqj-=)%cG06are+4GdTBb3g)!1erSK562yq+
zTqLv%yIsNMC+b<!vvt#!rji1%3+8LeA*H@>Y!1fI?H}VD{klEpf@gD9$6?sfIbta2
zxIhuvrbxmCuTx7QB`yf!(u-P|wU(UWOluIzRIFy^LKinT?V~mm{pk})%~?)4sO*i1
zpw(llEF)pSWBU9nU*idzmTdI(<-Bs3w8b3G9587I^WRQ8D2~GV6tfwfuH}TU%;M3X
zsC_O<?S@veMAg`Q7A`M6gQtogK5A=~9ig3R-;vzW%#WnYzb6s??g$0WUTN4Bysh0Q
zDp%%K+PI!mJS+y|8~<nuBn<pO&n-W7b5lYFgGyrWap#7K)}&g7abg<GWJ7&^oMBm%
z4>?KIdM&RXaK!CIsK%K)?otp)E_?85XpHwsXn5f^9i7j_n%|p<HXY8CCWv<8@qoT8
z@7FJ;N&JhOBTqQ#!UEx!PUXAe^FCdnxeM@7WQdifCB}9NpLT(}yq9G*cuC&rZsibF
zO;Z4OJG&vIj?hyM6Wn_!>|(r&{s<}b(qS6$4zto=0a|aehrmfC6C&j4s{~{qBGhsp
z-%h;A{Ws(>8LYQWvD0LTKT@o`9LOR?Bl;)j<S<m@Nj>K}cNGMy*uf98fkeAS_zVT_
zGazzZft2q2h;X$U2iVFerB(+&@v`16IbP8o(rL_Cm%UfBGv0ovLaTc$r&`POtTJfC
z19f~pMOk<@bL*n6wD?Xz>T;Sr$$SqcyJ<5?o3}F?#BL@jIaGsd-`kD75MDV*|KD{0
z%sfr$EIgY|97m$wdMHZH)Bf<eJTlP~H3cz|lro0R{b};${pJw^EOE)@^3|W~fyYa+
z*weSLYB>xC=YTF1`aut-_wuLbFx&$5(kj*^_1&2D2aZ_n;Q_&3Re=^4g09FqzuXaJ
z_o)fare5_abwR&m!f1;%%`$6u6$0%>%P@`>VeJfIQxfeeqvBMfuw>8|?j5j&&<w<C
z$+p70XtNZR=s)8SY1qjN<O(twXYC$|10T4EAr)ID^~?482z>inMOWjTXC7n-#2yh+
z%L~vcu&_zb1Wu8U>vbuYOG_{Lvh{41Ni<sC7JhM-+vY%rrZ0<;o7^nUd<e%jX>ezl
zb{aFVk?qo{j#=wX1!Q=ykIi^Z(El{z^Q(g=i$s8L2Iz^T-B&>|bawPS1t1nghENwi
zIz-o+`=0^hXd@}59+0#G`Z<g*=yI=2*G#D%ojy87st2KaQYlcggCm&}B(@V_;GFa9
z4Ng1=wc<OCSEL_i^fbi)Uf5R?wu+&TL^V2}slH1gHq&>IP|JkFBE)u(#3)3Z@`MAn
zB=5=}9bJ6+mp%F@4;*efpTns`+u=uX#Ey-ID#r*jC7){*=)V52!Z3(+40FT4!MPS?
zZLd0lEKoY17Y=R|y;`*Wbb#O`15H99wEsIVFr(cG7a>4w?HY&W)^c7*O#49)JRgfQ
z?eHpi9X`X#JE)pz=kOw&khqgo?~FBYe#(EuCa%mGpE^kIJlp>cdomD1dWcEwU}Ztz
zb$R~Mt8UfQr-Kx+{I@rnA;!92=7J`ZU434AtPov`4S%VH${*4OE^RUw?|ry$d?N8H
z?8~s>Mzx1Knb|P3n|uz#-6IQ=eFo_=M;zXK7aUa22HExqGY9&rpP4$#D+n}*%JDk+
zG`SGNPlCxzMyAhkT)THcZAP#c?(Kw*{;WvYi$aixG}j}Y`U2P-I8hJy4qjwEK*?@N
zEs#$aQE$9{myKQt5O!y;hQiRzIj^K4%%m|Qp*)kblK%Qv@_xY}atP5+T7|rQ!0R4K
z*nT(fxQF>_51XoUVKE{7M@?$ir3)j69D;lB>Nyv7iV`UZ%=|b;FF2tG*VPzus+GI+
z<&JvB$hBwoIa}FEtg4Q5abcSXCL)kR)6vo*W33Tjj%tqD2JdqdGHv^h%#dsaL6_E9
zWtT7n_H8vf%K7a8rS5;`e(4b5H?!hP4cz#M@Ba?sPLyU&_fcqlV$u2;P;>}yQWf>v
zl}Wly&})|Ha?ONZ{%Th=JSGDof{$HcBI$I@YGBW(3CRZit<0k}lLAkeF)_G>&<%Ml
z-q#|_3q_*6CrouWAv}y~jF%1()wH-0Pa-={2_|2WLDPdoM1!%g<FgpmhHO=-)Moe2
z($HEsnr|_`s6QM2WT(cxO+`nWAv2x_J={94+UE7&Qr0oV9o%0MdZXZC3{uz4@5Y=&
zYO&5v6nM4JEbWtzuwgtS>91iS%LP`Q<cCAq;3BHP@ps36Ri&}3A;rcjJ0d{HzflS^
z!Mjg_4?`&+KuA%(b{m_k4PjE1?ha}IZsX<G4Oi&iZq~NbsGnyw*AI%5PlMM^>TvJN
zrj5UIl~QEU>`#g~uAhUOJVKCFK0x!e`vEVt5YGQUY_!n*Z>>a%ZnnQ|L#i_R=0*mN
z_%sSm`i}qO8!;<Aliv#}W`@7*M9fUI_%xzMW+tYN`1Fiy_%!@xjt;U$_5wCmwl>yA
z){gkB_%s4GmNxbZwt5D?!vu|-%?y4^?)<;MD5d9MfzQJFdr$fQd(#p#GZP)t|GTmC
z|1~XLW`VmRA8kMPykx#$W7(e)cUe0(biHWLd)Ndr0{IaU#eyKpL;Nko?+QRZ!=DEc
zjKy@n@rNQP#g{^sL)g5@&Jx%bMFMs7YpZEX9=pE0oP2<px$)t7`uXA!Pb8Jg7Ysur
z{{KHIO3}*VaB;S`H{a>zN<#jh1Lk)sq_f!D+S=yM9H~<%{y?ZwqDBcjI6Ggj))6ML
z(o|J4bG_q;&p3TE_i#^t{0@4bcznO88K7i9hk*+rtQR9i_Mdhg;&xuOYz_$lUzI9a
zicq0<Ii1~WTG9S^>G}EU`T6Mi`8?s#@p@W1_B!?XKJgjNO5P#hvHFI1*U7jQvj5Tf
zd0(NZ>AXMAupwA2Kzy@FkuI^=YG<-Xed9MqvJaUY^!Yr%GgRJ}>16f+{a}rp)W1i+
z?{Fo?gTLo=wO?;EQ&3V8BBm!6m(kMda0X2jE0N@X^S*oY{FeYJ|MB`NOZKb@J!na-
zoAD-bUY6POrg#XaR?Cud`zn`sqNw*RiGqamP%dTY&r_gj{kD#s_c!IoYQg9G0gs%J
zl<RiPY^fY22`+}v#>VE4TAu9ak^L+NZrTt2@2cfujO>bHAMft+5^p^akdTnf&vQQ!
zyo=p_!DsC50w9pbWQ)%Vy{i?La~aaPVYq&!NVRg3$B@R&2wyB$l{vm@O~6c#Tg9&t
zC5hifpSQ?YEa}&wAGg2J0$}o%%$X`;u2)qmQ}l}PzuC}K;DIl<O3yo-IkJ;~;40v7
zI1`0g!t<@%7}Ke_MeANhv25qwUmF!ODAnAXy1Bc@C#x;<1Nq?-6Bji%JBvauEiNi5
zDgrF%x;(@YE%@1sh=`EWT|tf4^nANJ6x=?%@}y7>AgDK5tVEB`f|pBWFpJ`S#B|`s
zO&C%z(@Ht96`V?z6=7pz>xN`>**4gL`Bzj_glv@z4-YpsGS+H5Ymg<0|4R@nD&Y#f
zJbcJRU4b89TniOzl8@{EVX7y41D!Ccr|?fnK}#A!&eE~}t8LV<_R?xR&kEcJaQEd{
zT~QGbrd+W^b>+~oxwTcM_=N>rbqscd_S*9x5pBl%CIr=m>t`0)&nR?~qU`#5V>dN5
zi~#`_8p0)->+J6S{`HKJc=GV3NPMV6lY9POTtUMefq(Z1W8aVeMi^%nV%!jo^XXHs
zXe4%p&=+3G>8E^|!^t#%-mNz~aEI>=aFj!Xxw$#6X%3(%dzN(3l}_=Bj!)L*pAY&U
zaGyQs+mxsY_{hQ1>FwY7<G#JWZ|js$#fkcA^}gs0x2<uT7#nZf<cu+}a&Rb&eN-;B
zJ6(7bN4k#vWF#;cJO09A=zu<{hiZ#)K2RJge43_~lauoroWheaW@KV~9FyDAb?c_A
zxmmejE_7)B<BB0=;%au!i99YRPG4VNmNzdoKYm|N`TZAM9@{n`ExbZHF}3rdgQeD`
zzw;q#GG8h}(%LSrAR*nXm&W?I?eyfvYosD2^=10jVrOAw1OWxzqxkP(c{|+Po1Xr-
z*q+FmeLFt=Lb_)%Dbpu(?iYsR53YCYS=UAvLboLtQGxlzU%$(wnQ~%<)3woY$1%Ss
z?N4iB=1-wDcvP=3uZI7}cf!xlPfkv5Zf>q8!g#l%ww9Ka6@6rYFG1CYEdes>cp<6G
zWf#=tp2_hs*3l8I*{%?1iv-1}_*)S0+mkLmlQDR_V+2iYMT#yx|G}ZqS*sI!^`DDT
zcm(CPRNDT*&dkK>h}OnHoDo2}mzUSr8tYgP6$}_~F17&@`c{n&kLrQQY+H9X?udw>
z;O^_{>S}E4pFbFp!xYnFhV*Wn)hr5v(4_axk57!x&T+)97#?loh?yXNakSPHwTAJ2
zCz4$V;!3SfUZqT-L~&0PSzPQ_bozN2ctONL+-$ydE>i=r)E?oXfz}L60wDNI|8&QA
z=o`uX{l!)XJAdgG-g!hB#z%A*F#0<wP9;)gJv}{q%SWxaNl+g=z08R4Z~!}OVeg>9
z_BBgXs31$`j3Ir%cTegJJhi`L*%AQjvlCKD=jEb(6xAgZB)K@7jnFvV-!ogr4ktOs
z)|oW7qSR1RGS;G3pHLbt)>2MHuKo7iRvXQV?6QvD>iq|cZ4M?O9rpEOzb{~I4TGcg
zl4aZVfX+nAbgR99Fb2QAfiW6^b9N*1rtC)Sfx>acurA-6@r&oNg6*8;5rn2ObVe`U
zf)A)bxBx-ZI@prIoNqfm9#OXSo3H=l?(WXl_0c9sK}cBG|Mf@9TAuXw2m+-|Ldk1q
z1%A^Kw^L@x1(e0R)#eo6-L_0dmM&Hb{`7=e=6sxy+c@0sxTfnFlOK%;0Wn;Xj!0{z
zT5C*2_CqMW4b4Sl$(Y9tQukyjTXE!)1noORHjH=fZEUIA%L8$onwr}BWo&6Fsj#7=
zgX0^-8sJpqk<aR)*AY?3{oI|_AFRD%0l-N4@Q>UJS$jNe@p#vLrqFQs)_$d0IQI?N
zd*{SH;w<v|;E_Gx!YMe}FVLu|ul*c{Gl0S2EX^`+02`%C4s2NuWguE9t$CvxSNI=x
zzq|!vz!tLxE3A8ktv@xW>jhQZ=&4sHVh{4B>=A_>ALF?v#Ru>M+D1kGtPKnRiv>Qq
zsh;l<>SBW4Kj2CnB$x4j1ktqUxE-O8zq{Y_XJZ-*{r5!GS{=ss?dym7NMeQ%HwjMD
zhnjf0nK3c&oBA2dRsdzj-d`mN4kqo(V(8aKr&mUoSJmPU3dQYZ?Gg<qudr+|5bD9$
z%do2;1O_AWUZQI*O)-qJqdzn(V-GErv)wz~@aVL=-^(VQ38|?No;;TkeSLklN4Lk!
zA=HL-tDUXx53vI4`%m|oLHm<w;oK69LKlN)3r@hy2Qu!n05Ss6Eo?I+xjhw}b7Fty
zhE#U)rG5V(!is^MCYTYRM%*E7Z*NaC|DC%P|3^6AkhTcnm%VdtT|VoLF>X~pwQqJr
zFOEx&L(0w*2B5ZEAdx~%B+L6pcYR@DVPYaey2PLaW85`qRq#6u=Qkn4HCcS*Fyr88
z71nJhcjLC>GWGzu>(X(wE2s7FYt@R!bttBFD8{q>w)P(Ccwi5!DLm%sOd(9f63#b@
zO(iEYPP=z~>p2GZ_xHEE+;E#OmbR#y4u&EuEiLW-@;3uny?w`9NFWfdmgRnlbpO$r
zL`$!rhIuslc=}-%+6MVX%wJc4*0(W((>`FWq%BVY3eN#@uBWx%77ZWU8XOKr5($U&
z*fAi$g-}CZGt8*q0o{tYLS06)oUw8nL8h54fsY%XpA7QGobd0r8K3p5Qq}UjH!?6b
zMS@>AA_5{6wgvQ}1@ZG$r&i9HHmXyphOq!jAx^zO+=vYlw9`!ZRRi>QPPSE#E0V}H
z;v4x_y9T)30!mEDsm;8lds|o~yD<P%P3lljr<a}m<mJC0n$>_hX3G=(wn>i(pM)$)
zC-j#*oXq^ZacgzCH~lgKd<7y2Max*;!Vel`EqY}xzf#}Fc|X(eeb#R{Q~b6e!mXN?
zv00Mt@VTCEM;LY;KOl+_ZriRHw~r!nK6P<At-)>bqG6_pB9*=JmGSpt{sotpDOuP<
z2@4}qB2O~xv8yKlN-Zw__BInjxHTf(E8AX2&dL$2Qg8B40;cqZ`Lz_+NU#1ezSAD9
zl&FxZo?*}%00bTyP@7C@*XNEqwNTEBvSP~|>G@89f2+8O#?q}wH;i#TXBFE-G#ay0
zXy+GZC3M=UTo*xwo+YU_R95*pabvL8KWT$PLUPy~hTrk~1P8cz{HKKr>WXC0?%S}9
zWOWZtTBQNhmo5HMlnbYGsU&hsrE`g_J8(z#bT^7+H>fJrj;q!|<I(wp1I=z1YtkrW
z1JjH~*zDec$cL2@7a$Dd4+Dh8S-+GJX^%AncYdG2-THKuLS2=7`0Q|lb0}!voGHir
z4@weSERsk$4mN(}m=XZl8PTsxKO;neUgL~QAxcD*JuDu3yt|Qkpg1IsnuA@1AeE>;
zF_mBrWiSD9u+6Q{6mnI1bd$+$5Y=fPsyPnLpF!N}r9w#X*?)UMwpIHPk>JK7@eyot
z78DfJ*53XV>A0dNQyDDaprBlr9k&Os$a=yLGhu=Jn~8OmWajHK$?1gojb)do>Uu9^
zzu9MfKPK|1117-Pf7;&~3D!u8cEz`~%)W-ehK!*T%6eTWdlI7Rmj3Y(A^jD|Pm<g$
zn-7%5o+n+bPby%m(97A`*_*mp$NQ>jg_MzJ>*vGSRY|d7NI^!19D$Vm$E?X_C;O0I
zx6hCsFgj!oqY@!vh!a;XWn$l!>-8VJmrPDCBD|KfiZyL6n1W^HY876i)Rs*YkwtR1
zWrp6Yv(#M3<tqCx!?f>!e>;&33~NPadJfPZ=9Zc3@D7;VbKkr{EfVbgC@~GJ?qlNe
zo%Hthh9Qxt<K^3Qzkxw`2R=ndMh3F{L@Akk*z-#?5@Ydc3z#8M(>q=gJ3R3jZQHN<
zDD{~u?Ig`-J2(Ne9CR0Qoj@v(0PcjccTTl6<8H}O-xIM&1y?XGM^GI+w8}%y(M>K>
z=DG=%0#heEw^^)IJAdaoUvH+_1JZO><PVQ<J?&?yw}$6GD9}y&2Jz?3a>}z;1)l;4
zBe6mba6NW$GNw((v(hM3w-D##u&O6A!{P(S?XefFIXoFj-EY1OVk|TX;q4K+p4{H4
z3=Hc)=G<1*>8JQ?m5rM5P<&7t(|HL^Nh=cpWeM=}<IbH%%E04sTY9yUQ&0#92;j-J
zMQwb-=DuMkv$MNC4uyuTFnz6n#eC8mK7s;Zu3)UXVpi{HWeEMo8%YdGu@pkF6lmE|
zn?$YMfr#lx%|O;Xu!N=|WOX<)nvn~5@%B&x9PfqWx=2x>^vT6VL};j_8^;Z<aNgU0
ziOTCq=~r5)DE0t_Ps<S4L!hYjh?gwjnPoCzHpNpUdW;@uB5$4Y0e8W4c>#qFrZM#C
z^PLmf4a1nlu<0_=n87^1i5U=aZOfrm!<GIpy1uH(&d?vmmg}N)GT=k_gU7rQ=%35f
zTNk)m3QLgu8cf_P&km=H=dYKYUo=OcQb9pLxPBI!#QDykngL-+gEM9KIR!^`AhKWE
z&xSe#^KmGG-W=el)quA9X+SW#<WSx;sIIwyGS72cy!(X0lKC2-|1A>KY9UxtO1dV7
zix(}5*=<m+NE$Podd4_jw9b()|9c-4<17my3%II4vr+b~`yX{pEv-jKN6VKz=3N!4
zM`=>OAZTz#b#;AxxEz6boO}ERP{l`$(u+m~HPN|;pMlZ0{cohXO%u4^^0GL!x0&ml
zo<j2t@W7Y;2wEM~x4i?n;&6Ybd+#34&Y=yEZ0(*R1#vo0eCx)bpTU5FgRd+tso|9|
zm`rn8jxwx0!n|y8o~I#a993Mj4A&f|?#vqa4b1c&2TVZx<rnrU!m33`g){%;to>H_
z@}0_$YymApLh;hg+>t-3kv*m&1`{vonUX^9i^e$4yPDgU*dq$JUwLv&8nzD<uM&O=
zT{f{-Wjwvy-PLU|Ev8&jNa>Q|=a({mq8xYr57aziqvlos{R0CBeqE`JgBs>=GN=9U
zj!Gu&qY2ord2s%=GzxgqnuMJ6wlKUEP$~@tA!H6I<Mk)@2bfA;Dp2yn5%a~V<S!t<
zL8-ec#>?h7IW$YRqgoDc7S0W_;*UA~OMkVX_Iq1zeZCU^65dX!_4xSsuvn};8MAD5
zy0bDeJbnzbrySv+p$ity4imFoIIJ54=TP)FWND;+oLWU~*+BA!G;$_9X!sowWL?N>
z_Zo8dOn}e1`?lON?EQ3U1mLD>VsTaZ;W2w5Fb2ab@9u6!AaD+=$86&VTgm0IgXC}}
z%OuI$dXllu%4JE>VZh9Cv@|s@9ZXS_MmZyhnkvc>%PF`~Ze{CP*jvn(!YyAwa>w>2
zcMT+XHy{Wc9*o(5Mus(eN2QjsFz7&<<4|XFP?(TKfihU@VB;s%J`d~s6CpbX<$l>$
zQt4y27{zU32!mmd7x<h<39T9m%LCF8&iew|>{-*sA=_t5&S_g8E@I6d5m6J&`%&*|
zT8`Q4>uF_qXCV+02?!V%PuL_?xl4OSzfN7*dZAU^U9^qfCaum>Wmdx^toVcu&F=2q
zPeb?T%Od6i+_Jx<J{!7_>-hEc1w8H-fargXxB-W_0fi!W<((C)QrOVQd<%2mUK~~o
z-8J6Z%{&~7#o9OpxqHrti<K+v09$oB;|JC*ZGRV|zw@>%{62*Lp@DSuf2H;R$_gW_
zEHloa^1bcxzQG$v5=YaLNePT|6NwTJ6t|3rwg(~*3MA0&Qu%LB2ZOLCBh0jfA+I1w
ztz`?Df^*j6iUYY;w-dkmqul?+h%Ev(z!_lk++QjE+v4j&+Wft03AKhLk-5)1IPzOO
znK9DGrI4W-&=C#-;`ZTT0F*O!`nEFw47Q#-I2xPHKj4N3k}Zwo8V(R(Y~)yt3qukW
z+iS4!W=iQ}blLEyJ;tc(_#&KT)PpMVgi`*HddSOfD^wU{>!0s*JZ^&#*9qb3ALooy
zBvpU(VrY`yjZApe-=>JJNw?p;;9`}W!Mwot=XgS;TnUQ)xewN!6+6|1er@dC$}mvt
z2{}JKBWKR%ji>7w1I3q%=KX}{NA0)PR0J$^!c+BpwUJOznr@PZ95DodN+*=6K88@1
z38H76PFY)wG_Xq`r3;eMM?N0_)=%;)wQzzE7=QSc6=6Bt(P_*WBO8EeK&<7`da!+e
zl|89smLcg5VCJ4e=AdF7N9mp9VItlgnPg+PC6)*VCGuy>4WsvpAwB3Ci1%I-vBsDe
z`ZflGF(+JBjaCOLF0OYE_(%?GTTc&9CKGMchZXL}q!HaBn*(9VH#XfF81=l*s@O&%
z8QVR#L}XG2l|b|>ESEK|CxB@%6ROFiq0sUwP*T`rXdhz_|IRGmcCx3wwzZecPAs1w
zc@T`fQlVpRS?s@U?;z=UkaQwm>Xb7l_4@o=qxuA?nV7cY6&?I6X%QzRcj2F-tCG!P
zb`;q-D&mjA2P~V;y|Xhh>BKEgzD6r62vjXYX9bg1(sn?Z=H~@b884b&183QtcLeZ<
zt9DRn4R%vc2p+|#lWIDhOx$PV*OPQGBT;AM&`awq2fjQRX(V~6`08AquJ5)?3rV*l
zaDQvOo~B~7P%VLTb#>l3*wz?|0%(G*gShX{Bx{0IMhO#g`@4KU{T6XjrorKHL9+A)
zri>Y!M-yTvOnO`PEf3-Q$^+tSuLeb_i{UqF&`z<r_cR9Kf5GNRjP(m@%Y`7S7vL<5
z=(~gwJ0xLzxV=GU_tW~UA-d~yM`2MilIHA;TU5;kd?!SXW;hOCR+8$tRGG=x1GCGh
zfUQL{+~95PaCA7CUcH9vv=l6E%)a<9$NR$bBkfmvEIy#$pem66LYR1#%Rl{O^;a-x
zSnC-I1lhGUH1C=Oo!)0fbEBV<2UxEsDCg5&6)<*1`J|^YOC+*DO_hKr1uNde2y^<&
zP#mB@yOKSO{VTLRLAMotmGG>UKdV7|o@GY)&4->rHzbZzpJE$_b+uA9`}oKNtmg5Z
zD{ah_X)v#hE6e5J@bJEuEe16!8Clt{oxr~&$??4JaICJW8Hqc*L(L!{B&>r!O&D%_
z3(;M*gY_+dpUNPXMOY|7u6e<7XFR^r`Sp{xB}K=f<+e>k7dX<ASt)ffR0v>%YMFv^
z-Fipedr7wwuL-Jn3$efi4*Ma5M!p6z0yacwgth?RgU7|^*F<4=keIs!GLOc#M2K;P
zL4OF>WLmDKvp5uz&wq6a86yv&+(0H7I}Zvw19~JXw6xeRVaL4EbbVoUCNG*A0fYoP
zuLE`DUk-Co73<sz`k>PBXbdnL2sE+B&FHcLJw^weREt(KcUj2UORMvND7?M08DkLG
z*(A$9CX=@nT-PcNiW-gFxz|X)gi=ua(=P`G{i^wE{vZHxnQvSAImC;8nVT0038`<*
zFe5)dzgQ;c+munK+mnrn359Xwj*NLv1AwtDQ>4M}pAfTvhnt%nD>ik+UvbOO^k<31
z#T-JC9YwTiW)(c5<rGTwneghSstY=}I4}<P(v&H<Uq_~A5Uk|49^DV2A{1_;N-Q_<
zN9iE%VlK!CaAZwx!1ViTx6H1vf}jn0V@E3Km?-)#-+^d<2VmX%{Z9VLo`&;JS#pGl
zN}<A&^sm6twCTJ9+ZA25L^3^w%yRRyK8M*YKoXdNpgJLm2{3AQlDL+cR)>VCD==S^
zW9rUB!B<q#wna<M>G+3qJhzc}QY?8j$mo{l;Pv_O27)@E$S$G(D{93(IuCd*4Np%a
zhVQlFC)1MC1L7mgEScww(#H&eqN$=BBtEclaJD9m<nf)1jgO<SSbin_uXz#{6m0xN
z1&De`oigU(>FNCl+yP2|^X&E=F}5)HH0O!V`~*I&BzMh8CGQ;JxShV=j`#)7{^Qx?
zsJW}5yKYzm<Mv2TPYM|tAS72Rx*V}&R$vEeu{5${1^~grC)BSvJp&xtKIYWk7oEk>
z5uE(RpA<ByGvdhmts-JwZQI$J^t)xtZ(*B$n#QwDD(`LW$bKXmr_3?;^fQoOjjx$w
zfYYJX6W%C3f*=T;0fjrYw~jdHSY=n3k@}<|KbW4dTo;n9&Z&0bH}4OMkNger{VSu@
z9MTLvHIMdBJ(eE#8=*5eZ1}iZ`etx8Y+)zaa0&|Z+wC+};x`Xg-y6yuOSZE|&lo|1
z@S~gXKH&05Kska%B-|(%8XhiJY2eYVwzamha&v!Y+xSfz62E=*1iAEz*jvl!==|&K
zsXXB{n{jK^G5Utu@Z8O@KiIKfC?w>zi~F~`%>IUn^P2ZdNBKIMhD>yEX5+4Q%N(15
zagnopzk)znu`0$V4&M!%A|LY%Ks8{_H7A7@u?&3Ys(S2}@v-=B5it&+CpyRacVEaT
zQ|A&&=0j~VEB}E3aS1&8*96V|7S8w2&&g9JRyFfd{!Iz2EQnzb$Y2tJaREoL*}?us
zEQuU4j~mP$6mU3zj%D_HcC<f)ePa!&7baz13R#S`h9W}p8cShhs<nFZZuQh<|MLY5
zh1F!7@eCx!Q&Q&&KfG5nsR28S9*9CNGSsh01t@;<^Jd1!2XK`3j{PlsgTdo|(z4@m
zI8&07pMTr*afgJ2v|J)C*V}94$=bGQYQo117=k~>9Z}}Ly@>4QVYLqRTqm}@<8!qx
ziXmR$LpUjva6l2p(Pyr@YoNH`iFm83NaG`d3}cuMcX3D@fRG{W6|c<=5T!8y{z@GN
zAguf%{Nn7?*TxM&jmLOE2dM*#P$O1Q|7NQV`6K4ve+UDvI%s7pm`TjNm7V?;;~nfB
z4e8XX=h59|&3>&gja_52DHo3$r5=F5xbEF9Y?oBqF9$M8_}Q4Uahc+C=7umQV93FK
zz>p3ocZ?@!9*$`jd+vfU-VD+LOjU;)$_PlKN%NW{r=i;?v5sqH{hP|RsN57_c`<H4
z2g(?wn|@hX)2~aF{D=SK=NnwY?eqN!e8y?FKX`h23hqt8%&c6c%o96Jo&-g`?JUHJ
zmmQCxR57VBErEHJFRJ{kWmdTw_5W~m4s4aZZ4|F*vL@TM?Wv}kJlVEwO`dGqwr$(C
zZP&a1@A(8>oxPvuUTgi<eWLR^lGgDyS0Hp43~cm4nmZ|GXx5g~@Ra&OE#eI)kd4|S
z?r&HhAxct^gqLR;+5?cp=@GRMf$5-pm?*mr(DTdvCpIMLxi4tQVWEuQanb9X&K7;%
zz+QT3QK0iyZ+}&PH9alV5O8-ff-dv9xyu%Q(LtXb9EjRQ0FTwpq}6J@onZ=K5ovLO
zu0J=iek^{&^`#=oN8Q#P(k+7X>BVt3trN78e|0ogEvSAo{B<p8_v)C7$L`5!Ns@iu
zjrc&gXit4TZPaiV->b_lxhHh=m|0u7E|_bg*aZEr-RQoWTDI|L_faej;hk$cq%vEa
zK^DQUqmYo0m%F2_sKc|Rs-l7dzG{A6Dk|u!vxis6MjjkE2q3_!?CH^G(jt{#50D4k
zQm;Ph5GH-lkF{3lH8*kG&^5RdL4}r*KPN(DzEx2^0;FN>OT@TTzq8EGklEvr)In?e
zlu@49k7mHa)MD7gLAR<$zh+x(BTBDe7!<PTEqxG$891bWnV3{@Y(2kGl(=e-UVOFC
zXfhTD>tpeJeLi*lr^}Nu&g;z=N)N7C&-d`!zkrCwfIyc2@k<Y-Q@j>n#Om5~c_dmn
zwmk2+V7f1-jP~*Df4qd?wwpZS_cyuT0a;)-i?2a-)ZPu9=TjCt-yH3ykD`qgpz~!b
z)SXUxS>4rM-Lc(UM&khCA_(u)V?K?(?HhFn%=6!q{_-&P`l4>`P(*-60weTo+^;_A
zx9_NR24@9)N1?M@5TPNN<Eylu8(JBgF*#?Wu$RYqUKJTA+n2Pm&dQEw$9b3MRJkmk
z5(vOR9V&9$O|SPWFXntj)VZTm3>q35h@5qAqPiAV8!SjjNC=?rNz-gsA=RA(1O)6g
z(TMn`M&cGi3y=HjU5_1bMQ8BZ(X6z<n_(^S+7k?;7y#ReQ{P4*lhG)b(2bYFy19sc
zZSc6s?DGR@&~`7_<$k7`xXg}`X_mSXwEb|9%{**TlG&uz;Pe_{7dOk77|1E=XlpuP
zkFLD1FMR$C9sb4sdGm%kl-+Y1UBC_FW1q5Dq!GIoW!HFzKQO`hnIs)jlDW7~7qYq5
zU=ch7;pWG}(f112-jn{p!GEzukBW$0lsML^*KxIb#q~}}`I)wsT$Mh51LLi#vDqW<
zHdyD>TKpIxwo;fNvAZsG+A$G3Ci#pS@?Pc0{xbQU;m~{^raP9q-=S}OTB(Og;($En
zC2gq15tYT+FvyiBCT_}4Q8>+7X?^ILmGl1|WaC6jes98zosynQGRZ_%Ha4pknsW>c
zjG_L1Ubu;6&<u@87i+58jg<H*24QlQ>i}dI?6NqVx(HYdO=o0Lz~yk$ctsW-9DfNB
zY^|o%r4g`0W%#7a&xvE~UHe0L&L4=x93sm){bY|S`Wl4rbNU+%yt)76c!;@BQ>b7I
z;9-(B*IxYwjOIp(7M5rv1V=ifbOH#YS)?x`fz=uVG2|bz+P-T_Eml)4a95$<T^t<V
z#=KF9)W%%Tjf!1z4Zj?n6-fPQD{oP~M^!<-6{;q5{fy>CzXnd~Fy_KOBnOmVt!1+z
zFFijYUcM(h2Pp#r#4|M2O25FsexAiSB($(i_M1z0)g&!T=Wfo$4rTbcpn}WJGY=H8
zmKkoIwgoG;BR5{$2<agG%J)B80w5{*b-(gLBO<H{Pd_duS5{V<tT$SVjNkkb`9T`4
zst_rr{GaI1!&WSTf9ps~XIdXbjJPTDPV9quLT(sj6AeQ>4ac${0Ys2H(!?m4cA+L<
zf&qgrtw$!a4>;2`S8ghv)({lEFx;Ne<lVH1N7`L`IG11xqZ2d2BYagq)(<GE+e9k+
ztv#}EIkZZ4xYbH(RnbX`!#$Ph8L>|nz6fVk2C>9tpSBHQ#c55p*Q3i`nOPJL{on->
z!~2Co>9npJY&rLBCXZO%@KeNJz{NAH_G2)z(kZbYc(B70JV^PB!D>g>3rag_OufMo
z-sPGF<S<q3kO!6rWfM@mlHohLb<T&~Cy)9OgCXM|)IkX7#?+v8w*zjqVL`|Nox$`C
z+gmPUb5VLZrqbeQFeyY7I0>l}{!WsDhAJhjSk`*Icq{iRfb~eYIQcYPAd&pywKX~l
z2nYyvGnvNfdZ>04Bt^b)<-J|4?$fWaB|RSB|F`Nvv*s~sM?zow9SE9aH1lM2k#;=q
zk|b-5?ex#A6yo7fR;6kH&7k5|n^wy6Cl<&l7k$d*qPatnG-1vr{#71O^oq$U9MbKP
zMb4vE%q3a%0hi%i;05A6kL0d;(<e?L*~xT{e@DWa;X|R87V?VghZvKUfQz2ZC&CES
z*6Wk~i#Ji&Na5+wlo76(isZ7KyUfY@;LMO#oF9BPQ~VX9;yH4mbj)Kim^Rd(K|M(H
zs5HjES<I10e>M~Fwne2n<<F^QZ=sKE9R9ZSx34hu*^+9qFzDrn(IUwu8cxPOq*F|(
ziC|jcnvLGgKLP2a)4j5jE~e(m&pi35hLV<VSl>OjZK-{HeEfSrlKI0YV`5^iI-WK^
z96R0ajFpuaKRS9-Ws%X)z`s7bt=E`S&aIoQYVFrlD*pV-{|%{aOHsJuhtc~Fw->JL
ze%kn3wm8aniog4IKXIIQVvtgx+GO=a7eiQ#iQDZIT%P8i^n+B8v<~~3v_-@&aWULt
z3%}Gqy6Jwd&FgLRFo~H)x!6*3S{Og;A|E^tba{7LdF&DX6YIBeeiL{CCK(Isw)c&%
zJwQGLedL#R<g8fNVWNYyyeEHjmze)|d4Nt#b~5^Q>Y>@1*QSFD*5!Ee9K)4<o9Ms&
zgVuJ?f#ENr3xCeY<o!Dd9v@UTy%|Cuw6Gcm7#zB>vtlzcc<a@=qZyru1<c?+4MpLd
zV#op02nmD}CXd^dirck}yYAhTmdO8dE?mHVWEc|LL?8`c$VBjQFG|3BJEXL=wbdgv
zQzMg<o&8a76KkGa#nZZzoq^%1vNKgiN>W9eaiS|LsRWTJETj^WFbw2ZI+sE`JDOqv
zB!aKn;XqWh;Up=!Jv6NlOk3$6tBBhNVAv9qC!K}#AwGdKl75{fHyE&1t9s#tdsG7=
zB#m~Vxi4XfMN6kHL`$tnm{vw<29}JhBr3e?_7D2Ji=vM1O5-cfNaJ=9qzFkb#|REK
zXlT(R3Plf<#4y&#!>ueNO^7S<-&ypN?vRfu$H<Ai$}=W*x_f3`C(2ec+-8rs&X#;k
zT&hxdGqdKyW>-`XEj&AD)!!6oCo+qU0hM6XPI`yuearbS5xEceBJkoZC3CpVM6Um#
z42fj#L!o!Z7zC+NQ4!u>R%-{goQRg+$W0I}B#sh&gV-iVMqW;evVc5EsZ4o;1xI2m
zj+B&?cbvzfL7h8e6PI^;b(QC}{vo>IDZZs_u3K|!+J3A0H(+uDdCGSskFGK*b&9iw
zALy8<==oB*zZBc36n1><+Jos~dqSJ2n*5u3$I@YIQ6AQ)_9D>Q?v12twPA+~LCvC8
zQoRYfrOR%UNCa!)xM;`JLViGPIUxshraNU$83>6zK6&e&SP4MCot9OtO6(0da9a%Z
z5i@<)ePof~FF1;yyc|W9^<rntq1UAQon?$!FZ}<6F9C`7hbdI?7isooey3wt>lU`d
z1E}*jBPW0M5QkxOTCIkPb+kX>xb(t?bOOv`vjxjdN$%7iXQ_^g9(OX$y4oukXr{CS
zsC3X|a`gIJnrrqW6s7gp<;BQzXDz;M3DKWKT#3|#gua@iEd{{?Wt#L|Lp)9SEblf>
zP7i&DFg)(}$9X5TaU*aLeSE%;Pfw5;BX&w51hz2<tP(u3Tz`Kgs{zA_^1K`=6koMS
zOq+;OGt9_9gpZ_BGlXVi17XxR+nqul7HNLa4?knivbw4nHSDWy*oVe_j}&{Pg<%hd
zATe}6)9%rp+(ZI~%2##Nz{wn2pha{LFwhp3ek7g>?}n!UU_zE}?j(O|*I|#Xy?t$*
z{aZ`sRL{i3MW#^0a!r<fT59?E|CI^Jo2$ZLO77iUY#3=GzT$4_apABQ3?#+xv_gh8
zW7;h;ar~p4iQB3VPTl2^|4*q~(|M(T6KqGTJ$mOyfS?OFLgGjZQG@|_1pF%`s{I2-
zkkVIT&V~-k2#0BYjidZqf6_xb!<bt6uvZio(=vOO1fA+0ZG^?e@ab`{?(3{5F%c%r
z<lj*AumY@!?EIfOghM<-LqmW*+1c5F#h~}Z%&=K+22RD9_51Z(xNfgIxJ^1dR_A$h
zCvkCcP0jQH{l|xgi}O*-qk?<Z``U^pd6_$g{(&~?KLH!$e@kP_)w*dYDv@@A0~ot3
z+$8b}R_+9^Bl)pqg=M!fnWQkBK?uWadZ55|!=r_E@0$WQfN;mBnC=Ct1oeJ#C3|q{
z!$MZvW4tG2As}?Kv{4#ruW-Z{{z=ZH-XXtBvGR046|<>DHb%?IzX>IT(CEBHtjFMV
zf@4c0T&VH4LPrB&ONFj-)*Sl{0M}gh;vWY^4qX;0`u4h3-Gurf%Vd7HN&T^HFu#Ya
zSjWbBT;PTH&g-^pylk7;-#U!4^PPtYPRotxKHIN)O60AUHU^IkHT<xDu?N{b&AtZl
z6^sLjdkJ|2+;BA=Fl2ngcvqBCPR-8u3NLoL=gsdEkB^6&o0mgEpb~zI63<$)gH7b*
z<NzqpGs@(v-y@ckk7}NS!yHRwOV(sJUp9deit}*J;(oi~uRJ1t{3H|<%pcP^VUj$u
z;FlSPOH*Tl;lad<)#MX4lf=(D{rJs7GMfHzoWJM2{*`Wft(o4`0LX?&SRFCej0}$p
zwA#WpB@_iYJjtPl1hSD7NBq?^;wnTT&X~)8k!A5sxX*<hkO5ntvRc1G83yD|d5HVy
zt)hC42`|;bo>iIpHllEMiECM0<Hp9tu1t}wNZfXcx2(51S(rm01J{jW6u_I(v!5Lq
zjW*2VvLNzj(`^!tspZqHP);U;zCqMMKtc+bmabb+t`(kiEgIq&gWq@GgK}S{N;Mn%
z7NX3}#}tD8ZVwIZeRa`O(~U@Q8xwK^WF){;aOP0^kWGW#@1gnhlHSG*Ruc!??gfFJ
zG0toz{?yGNuQhcXEFQKM2wO3o))*&`_yw;D0FwX)^(`C1f5-bReChuF9?+0LkO_-U
zYnsV)j=u+o9_JVt8tNxI!~pTE5f6jKKbr;?KrKmuC`3^RZb-UlErTuWI4+aY4M$LZ
z+J?N|fAOP9xiR9{Wy1@G|7X3wZvf6x2<a{BK4G$*AT`rH-U+vuUO*dpM_knm`<;nP
zzrg<OG!QyPHv$+y>L)?zYbD6~PA8}rU=J0RdNo=xAj7+fNRgnkW>?@`k(HR`b9eHe
z%?{#M`l;*2Ad4OuJKf69fkC>a-ErDQ%x2!ne}es}O%Jp+_imv0DW>zC{O{%~arO=i
z<Ij{PshKw|-H>y92xrVO7vGu5*yb<J+SS(3Zd1V(2~^SXwUV<PvvYW{65_%=%n~Z?
zD!l5LEM8@mT2Lr|ub`oRw{B)tN$Xy5Kmz$fH{#9TLF4atBf1=6c@02d%MR?1!5@Nr
zb!TsHZ&y!87Z(plZ*OOJcVCulv=)#WNcLDX#RmjDmzGkWk${`v;h@B&{`QPp4jIT$
zKt@59`X#JrD8f03kBq#1Wpi_P6t=(Vi6@P9QE&lhQ2WH9X@5yzSQ~EP2Hzmp1-w;a
zX~5C10UavsyuyLJ_~D!tj%RmBn_twb%SK~PnH^q5_<1r4e+rmWz0k*ipdDv*dSsjN
zhwMcF5SqA$^dU)Xc$0|xdH8k9@^!rX#iJYphLX1m(bu#x#8rEfu1R#Zmj&Fo2lZ{M
zQ3s_$$4Bo*h4^LRj3TWFUiTYAiZN#eP)xFalV8Ect&@O*i=qn0+zVp<Zz+_yaO{oN
zZ-WJXlYseEf4H~`)FPY9wEty0M#p|P*q^!Z--b7vShtZxl*_wC)EgongbN(Y@?hXn
z0>*6M0pxKtvBf%DKXyp@vAWyjPavQF$i4`=#Vh@*<cm>Wz-qxRm`A4`C9Tr#R}4b&
zOP37*1bFMeF9LGe>+hK60v7$Pwe}%EVr6EgIwG2d<81Br#J=64IX21}LCwDf;=KQq
z`Xuu9BO<QJxAFdrArXtj=W{;q9~z3x>h4+=uz&v1a<SFrGqLK#-15hU@#F!b*&t#v
zThT&|w+2JwbU>bKRz#~kAj7P-(y|MIzg33TM@+unpRiXnQ{;}}9vH1X=zgB>5X<(%
z|7(?1I;fgQ_13LxEz?a5JtlJ~%&}}EcA*k*<GqYT<NA(|W$LU_)L?hW4MPk_i>>or
zgS7az=;9;UEb@`mKo-K1oiL=k@9f^_+z_E(2}Z+uK%%U-R~Z!56{JvN>EJ<qr;-Xu
zP_0PtH_#=9%-wW0>*O`|ahms4zdXXIsWxt5C^Jc3F`kmog4^YGzlAz>Bfg~nI?LMv
z%}tVyEjS{<qnog~xB6!3ub8)}>yh_=nI`B2Kw5$A26#<s6kawB{_*;OzV^qVzv__E
zGpmi;Kbdt+dU<3iK!xDz6F3Q#_vO?DkthmfV%omz#28KywFal$0U0g;!~FJ`B;in-
z{APCury>et8t9J%wye7$NDG0qw4F2dm&`?_2=FU2`fDa@MJUve*&*ena9q?pQY%?_
zvPt@uXT;VvC09aoTy`Lx9%FSKc2%pnYT+U+<H{K4wXin=b56GDuErl-kcT)Wb~O_=
z_~1JD!rdZwo9WvbDa^j{<%grOgr{--%VN$<-lJ)ZMA94u&Z*<ON~?$B1%OQha-R4X
z8rzub89->(X5Q(&7_B%Mt=xGEkD4ZSw$9Q41N7SO36yBfep>TbOx^hApR#5c*28I9
z$9U~Zld-Blc&l%uwy*0#{zmt5S^*FR(ej7}I_JC8@G)$+-SqWT-=Ov&I{yx~J`Ujf
zZTL<AKqml!+!tuXm;$sQ{cbWwTKN1c{}gtZj|aU1UNNP-qjlw&y@E3sGmy-GHc<G2
zDQ%%85(NXKknV>eSJ*rN&vZDkTdo^Tuh<;dbO%k@N?mIl*$VdW<1qf0Gl-fgpm`<w
ziUD`Gjf1vF>gvQ=Qy5Q~e-r>?Lc_wf_nJ8$fcMe%B}lrD`!Nk6Oh!3CuzLQ6n>T+R
z_gNkS=_oNLm^|UQFZS6O`s|b0p73pr1L(WibFR_$gMC5<*FVB5?0eaG&D7OXi#z6U
z&fU_0*QSAT(YD?{v)rFjw_FneP($+{0{uZc`(#mQ-D`(czHUiJwCr%iG6mGC((^SY
zQ%v#fNDelS$p+=C2BoVOlw0%@nGt1wB?qql&L8%??HJm*z0T6b;$LmQS}B2E0UQl<
zG0%a(dHz;3NwfGbRd4|#+|FWxW;B-xN!$~w$-uEo@uen*r24z)mRG1%9u^?D=l2@`
z%zNNY@E7wE^=-C|(7Ct?k=QbT#r94N@%axxU5DGl1$K`Jm{jsZ`CL%T0;4MFh&!at
z_jNUSHm&7P>(a{$O|Gd+uKeS7{j-HG@7EScKI2DQta!?g4@YN=mXg`^2DR^wLHJ7d
z-Wa|t%gH!$UnMpA7~4=$=PDjQbs8t+Iu#Z$FuC!#ULaL}67razPVJynIq08lC~dff
z<{fZ_wST-i-Rz&#V}~j(1{$3TD)gPMDq8@!Y<@SIIi%$`3t#VC`38NAc9vr}l}nW;
zrkPyz77o*A4A($3@O(j&v1tG5{p_xmQxMLAVJ+ayuqs_I!f95THK6ZWGj#i;e()t%
zx>1(7H57WKm7gLDU!Dn1j(IN!k8Nk0ZD;E}9|$kzwr--jFLvKrj_ExYo$a*9e<>q#
zD^tD6<D`mP3EfM%F(7N9Nf^*T{otSqYr17Qv~QD*JFMI$mzI0fvMA@aEa%m&<S(b&
z{7(3~fE#ERq(J@?mbmg6b;O8v%L>QZ875ShcB^Dqqx8HjHrbEGiI~SdxrqPmGB+jY
zku@TZXCr&5rbPrEOb)R0;RWl6E#^NKkuTo*1e)e?g)DueLD;cC5xr%!sz(=5Y&i7M
zv_>ERKhiMK=vt`G>Y1e%I=4Z6sZRp)llEU@Bp%NySO%5KL$8O)cU_<h!kvH-uWir$
zb*59rBdL%3tk)e}qt9VVdDS2{_XXxQ*$Z4uK@lBrd+1CzZPD^NW}(nk=l~G%x|9)~
ze6Q8te^xlk*F!QXD*cA_3_K!GXYgOc|4J{m$&!ZSf05MKqhu7BUgez}=M}y@-9b|m
zB{D`5RN_@5{lfgUfY{l5iRziv>1Y9{WGv7IEs>$R!8sw4NzmQW-i1}awdak^#G{HC
zxaFM7E;X(1wH%?dMYVZBFnKfyB_!Wqjm6NWB&ahs^4z_@V#dHw?%dH9Sf#YsjVHzS
zjw-U+bvNz@&P?cDii_LntFm1$+se=@*V}amg22ZFde3qb(y#|7k7MRs?(@|gk^lt9
zaBp4|hn#MiQSURz4z@A>P#OFX?v&kHNou1u`k$yb7zWlW-TZ?F`4v+J8kb@`Q=g_6
z)RHXbSJ2eBFapuZHynQ9``pVXA>BPJs}TrWO`SzxcolD&NI%3;j}$D3DC-U_P$*Gs
zM4fGW`<Ia)3gzSo#%{N__4j1~)yuSe-CuXa=6EN&iJyW39_$hcoQOfU7{DlhI*Uku
zNC?=5wO)hG&I6b@alT$aJmBg!x?u1=h!p@?jAGgqECcm(j>w42@%p6JvCw#^mc%^H
zQ+gGZI>Xf+K8GFLc^#0F9g~cvLkr_g+XB2H$1c7DRu0-Hd{&V3GrRpNU6&pOop4le
zWJm86yTqagG=m#1pP`&s&`KJe3KbemATn~;z=dY=#X1=X3ezK(Jrt7@ro0WWhC(|+
zWnA@m!;la`d)}~yb@{p#<yNG$POF4YYYdilp$v^gX;u=Slhe-bg#Y3tR;`=QUS^&@
zIs*Zp`@P@CTi5IDSXoln%T4fAMc1c($H(2K)|#x3yZKe7R)3a@)7zq~hx1jt=gDph
z!(^tw(}0aJ9FP5RoC3ku^WKC%<YSZPmrA<%Q{-J%mUf%VQ*PQ;Rqdv>^V^Z=mKV=a
zuS(ry2Axg^yW=)vR}apIMWgu}!@|8r2~Ks7T&M7=1;12B=~ZcW(45G}<Fvu<I2M$s
zph<pp9OLXzmkveH=R;&B9J)@$AEF3F>H<~bi+#E$#`{FWsXg+Dgd;i?^zu-*A#9#C
zY>p*t?#oiM>(6E`<&EwN@v>~BBe38srTwaDR4>C4e#=UJLa7W(%z9WK;yqe6F}?bj
z9GTvbNBnn_8{9w7t~Ex8@R0E?g(5R8{=viYr~^=Ciu+aZNAybf+TFKT!g1eUs|Rpn
zVn>h|Ie!w6Zv~S)?38$k7&BxpO6uP3<FupA{2lgt-{wtZ-&8@moS0)2fd~@Z6E&H^
zv_uaZ=%k?!-aEXUS(H9DI**9JyptT4m<6#^Ge?NCKqp}&Jp2|xfcv`G88S1JN0Axn
z@mAKamXu53l_}ukwD(9?R_?N>TCv}_XrAHpxn4;U?9<|8vDRqS_4!IOGMDA^*-}z+
zvfkpb{9k_cbiN|o8TA2=!>-xkj`rqqxxP|sG!`ZB8S(*8xIi3ua=ucxuNRDf2YmMo
zBOhTH47+*%VZd)H3|gtRCR4x5oB|jAEiEXY9Se(@yJRKX-zd+<Z<vi$m`VCbXFh4Y
zhui3rXECXi-!U6f*(d!i0)5Exn*qiR2U4{<)fhJXRaz5?^)xN7HqKW2<*y_WaR!Ju
zY2n!#5xAI4q6aZ|d#Q~8J@DD)-vrXkYw45Bq_ajnu3KWoPdn6RQvCijH%73_p3q?o
z4+-EerwZUVPXE@Q;LtptOXBcEnI?0Ki{9%0tA1TfGPIu?<}#XJBK&6H%HUyf)beV8
z(_$7wIT%B8;Kb#RBH4Ek{pn|gf?P$sRu;{Gd|kbxV5!+y)f5bywKsND`<=Hj+BHk8
z9R|Gdf&=;bC4;XyvR6E{fI+~my!Z{(MH+(X!ub;>SH@#o*<e7suN(0v&Qm`9E0aMW
zn?VbkR|=a`Xv15*m8AEl-D-=20Rn-x(ca#h@)-{XL*;5Cd^R|D&mz!-(S_{Z07_~A
zqLDgr71I}7soClH8CD1RcwuGsPp>|TfX98OjN$2g_2Eud;4`c1^I*|AebM~mK2Fy8
ze0j$HB0S3ZBGuu6+S~mgW9?zhdGV6};gRhu+@_-73}H~_joK=u;q8ybD~!eM`;{=Z
zb6^zz-?v(L2p#?M^*jcvSYZ5Jbd*UuH5w*Ld^z49J+Kc%j0T|+CC#G?jH&iSm|Gf(
zySyM0Bz8~}>0GY$9S-|4hI#qNU=Oaz9yO+C_e@)oVsUyYp41kSx7xxt5&C0eef``o
zd|rIngqr6hQdC+(`h+asp-3h}Q&Y3OqXP>rD5(iCY6$K6OVV)1K#~->=dMAsRARZk
zaNes|Y?Hs!YGVKZ^`=t}RgO!=6Q8tkmtDyLMci)wk3T%+R&+M~j0m2`@38&}ihD?g
zllf$BN_iF6VlNW!j$Q{M8^iNy;~gHy`s>|+g&UE8+wpZyw#oAS_{>4a)8pVa>1ZnJ
zWp{mqMdzH;u|40@wl7~zlH=Uf<)d-nEET<KtI>9&QIz0fl%ay!3b)SCYYuYl(Q4hn
z^=KywF@yJG1Vg9t!*niRB5tut<LhFftjT(_ezQ~hvtO3OdVICcWPqV--g14__Brfi
zOT9?DR;I%;<Mu&zBrf$&(mRY`5$!X-?r@Mbtc|13Q(~nnn)N8JYgg&jZ=ihw5TpP$
z5?k)lF6E_(N>z!^I@32-YV`ZtE`PtemX9Fu?iepo6v_5oGHqgTQWHbm3E<#uZ(r}r
zMgkozw#}4W7BqkP+<VlMcc9D5&BHTi%1Ry=s;@(xKy9U8r;2!E7q!dWVoBVuU*nq1
zlQdQfeYGvipXDP^eski(&CL^LAVkUFKwpC?GUax7Dh%pWG`<iVQ9zJS_kkvYV78vd
zc!msu$FcY4WfiachPlkCWuf&snBX)mH4*3wo2)ewbbWm;>3D+*%4TvmTpiCe@VN9p
zj1w^KeT)`pjBN?1c7E2?MjVZ%(i@Mj;_`W#4fgJVr*)03+4A}9J-kbAp;^Ar+npZ9
zo8frSZgSX8xsa`O(V4ZX*FM~K7mj@_W{#w{7hjF1+)bssc`Q|q;BZV@nX9+BTm=aS
zKW1>V*?qQ`*PxWSWF*_(96@irbHDt1xHwx(;V4hgknQjA!WTg(UWw<u;<g6e6v)?$
z%JDSO-8;$N$?~582p)jd$sfY~FP>(E>lUSMFl6d)f~E>hhIA7tBz>U>@M1prl9K^y
zr@u*&6MmB=jona;xbuPmrRNU^S@|;Miu3)i-mseQL0+RW*a(!|3hp~6>y1_#u7>gN
zgv=|luvNEhu_K1h@c{#Us_V$3q#-r_ch4^`0HPijCI^(iE0n`fzCn_PN%TkezQ99(
zRpx+6kA;^9`70mNEumMaU{per9$sqWaa@$u7c5?0Atm$)Ijt#deH09pSBzb)tLx?6
z^=dSsuQxO4Uu|ag1v99(-1>jKCE;;iPhM`^@qfBLB#iZOSe<pb%YL55HDsw;$f#OG
zY6NH0K599YF0~C&KW<5Qd04D$>Wn?AL+Y)WNc)(WY%LVoN*$I@ECgFG;MVEmyw)^0
zzAbo<r5Wpdc|U*p!&Yj(bT4+c?=I<VwK-XwFmy?A+v$(yiryVDEIV&+r}9CfpK~$-
zKEUe38qQM|w%e`ec`;jJk4D^~CoFy6!`?4nGpm_X;Jycq$fA0s1y~|N<%-{FgW=e}
z<AUE_)S@v&!g!M;e{qV_=(kA4K&=Ql?*jn<#ZoMJ;ltutOeyCrzLdbX#o=%1D3BaS
z4ZD@qQ4G75jJ-a@gLPR4Bqhu{SSNa8vR@Bm_?aBkJd_>9j?T8Wf;EV-qXfjLOClO9
zSy9M1RlI591Y+(@#=Z7dy(QzuEX7F3$Uk#a6sx88(|)){^Z}{%Bm;U_US1vwGO{3<
znD&ojDMaf3a!jR5l0jNTe%;DtLEDK%_q4`!Z06mN7RS$Pt+vB-J|3^HPcs}2`qkQk
zlj9kuG!7MZc6x4~&paL%cGZfsibX4sr<J+`90cih!wHSr3eP7OQghfP??g5mn*&gb
z10stmvLT19uUyZr2Ft07daO0rB~SMKWT#<y3t;6mYJ~s?>JUK_dWC>St@+qG4hP0o
zjm+EYXmg(!e>IzJRc(h1kEiQhCf_2D%j4z7+ug?CF;A0sr}N9~35yAWx2L_Fm6qyC
zvvpnMfHY4Q@0UqGX3CYu2ZT-SJnjOzPyTt^Ec1SK5>c=*mF@sLGg?j!(+to~6(`U|
zC4%2Y@8>ea4qBdP)=S%>NU9@c+F({ly({B-^EJ-r%8v5a_@)1}&qMs3JL5CheQ+1L
z9^jHdEMQmZGN>?QLuXkE>WYdelHaOtQ&=Y{XZ=E(0Z)B-IXOAGm{NbC==%B^Xf-3F
z_<r-mRfG2yh$qM~*6xE&l`4FGZuZUArO?Bq%MQWQfYjAfK&wPAl#ayxGCQBIo7qF8
zkmiYHx6pY%HsX7G<Z(M3UjiZzmy6YlHo<kEeH2KeV$8v2)$CxBx6nFb{&5{lK&@<p
z=XLq`TvZM~RC%x64qMsc+Sd~9;;f8Yt$g0J+u*n)ds63|Z~>m`g6idnKwHUgS;4PQ
zx8+qgly22Ppz+q}Q*J&t8Q5ga&}@|Ph0@5whQm?ue1P757n{aW#L9-()dq0@Ku}j|
zQMUkO%j0t1{JfxY=VF<5GckY!8F|34y6!c&xjaHaLXoa~SQULh`z`My*lS!8_n>6#
zk@c{qfZU!<%3f)7NOsCt3hcm_Ar_VB$BVJ4DdjEfk;o&_kDvU+!ZnDW{I8(hV%K|7
zTci;r7ey0EwNq2#k8^6NwNvrhyxe=@5dK2wU5GR(@{WCmpq-Q>2Hs`MThx=}+k#a~
z0{p+#nZXg57zw)>80Xcnk18b~8^;^Xe!7jI7)pSDs1Q(p9Lx2^R1Iamc=vKN4Oy?a
zn0|FuT_RW$KO7|KTmniWa>4z1PS%8g>)T%YfH_{Lo5w~Cjo@8#Re4;Msw4;QgpM7n
z*I;3-7VtwEo4C~%Z?P3ikd|$kyk=U*+gKf2JesW^r`;7Ql1*AP8=qDKnK){7O%KT}
zjowq8WFR#Qua{Me16{~G1~Q(^U$+O~DwX3K?Ju=fa%d$w-Y-UXhZ9X{ZT~**=K#i&
zL9<z~+{x*5K7J|$!E&r(QB+_Z^XZ1d=<E@O_>B^F@fndvVp95d-F_S89OpM^Vxnv;
zrl~A(sUK<ue~3JZ!AcWb1yFAO`Nxdi2Ez(rAlN!Nd8HqfmX?lsA4@gx3y`cRmHuJE
zEWikz<P7iI;caEtAdcx)>apF3It+#`=;$dFfHjR!Gy)pn0MENaX*FljRYsF6pt~|E
zpQWAVyuf{u>vT(Iu~B2ge!1y#^msw1(YRg{kwV<ycD6XiVe_fm@M+ml2@I46O<R+1
z?{}woSyG*rcn*(6dXQcC^)GezB^I1H9p4~)ey}<kY)=<<uj2XjJwnW1q7_6Y8p^=q
zd-GOb>i%ea_3wd)^bfi~B447Z{Y56bp2@7|wYX*hyJb<j>R$fYeX^=0c1td+U1Qeq
z@F@FK&gr&490MqVSc5XV!2M3E)7Ui!b}Mz;vm`S1&Z=tlA$$SlPWK2xw*Y3J0-Xyx
z;Q@LZNgW0UP}y?kV<*G2ZUX6F0e`aT>@U301&Eh^DBQffi-Dw;A3R=PyN(}CSN^kR
zU|=Y<1uC*DmG&vAo<Xx=LPx;2=Hc$1%jz9q0f;5ai2q=<u4o&zd7Owr0ind`ZNf;^
zz0%(<+rcHUOF&3+I`&u?uGV?|W<$e%4xR)o)Xlh#aO!Wx6Zo_RYv*yh-A`dL5r6Cv
zyW;Z1%zxiB?=Wllw0&E=Zz%JS$w=!u7hJtBoX#oxC~bURz)if@EWdopX?Yc24!HU`
zqy>-XLuYtyq_hL+7m;Nl$sQEm>HE}2u8ElCNaDJ`RO(IeyyTV3uz}D1`Rn?)%#WlK
z+LV)E`Z}=SHJaH4NuZM7YdIL+p^@&*^sA?Ds8XlRvH>~q<q(krsIow^Zm~3Czn+h$
zgi`Qpo1YD`f9{va2P0NJKgirylg*|-k*ZxKLlA8D&$pQ|^EGqF&HO+{zCD`8#m0UW
zbASg;9?<s_N*pnaGTpm=c<{o7w6f8LSn~JC9}O-l1~wA(RUU_j<rtIa+<%+KGKKKO
zf`ce$Qag=}i2AXaw65eZaYTRyZ5lU%>ZuZSl0j#TqL6<z4h?Kum`P$wfh3T7(wJ+0
zy2dVRFurSy$?}F2Tq5XbHoEdDnrnADQ&vzwG0RG8wD~#=Ch)FT_o-25(CTWn+}oVi
zerS(}{&dO$pU;BFw<EGVSF)NB1JW~K7$}BS0h|p~plo=YY*S6Zc&@m=p9W+t6aG)Z
zvhVbsvo3&uEQV1I41cAzAC<Pc5hKc@4~XhQXYxz#TK(fCr%+FCcrU83@R=>2^e(1#
zW5IhRurKF%u{c}8Lr!OHzUtukQd@9UrRn|o?sn#--t2Ui%i~h$^WpR{iMQWRl|XQz
zh+#Dkzf60=l!m6ZM;U$}FIuAXyH?<Fk7LUK(iCFuOS5udXz1<b1v>C2FPtmf@Z{!l
z%u0Y@<oWi*fxS-?{3^mx$n%iEr_jR>kAL<64bX&i(2QUDFKyOA+64GC8{1!(1G@%b
zv?G3jmEj;E&2)G?ZFYEIpra=yC7Bo*`5`d*D1&?-b;yu0sSm|{n~NoR>Lw#36s%D0
zW1MZdolGaRJCp)CkOl`m+y4T?Y&Jm#jJ2BO790(4mCiyMr1R~_op$>}rv$!m(a{D|
zlF`q5RpwM!$(C#uRqz$jfc?;`dZo*F+^;y6KyRScFO7s;8!^jv1E^;cBH;L-nLZLY
zGwaVj`PwWeOZKbd$62caZu7kkYzT^lgjA<q=_+I$oD^gfr)>F$VgtsIn9R$J>VG9E
zxM-kuso_cEIUD7dS7lCv@Y{p#FdzX<DPp<K<tg}V>1w&*tbZh~NH&Y#<r25@E>2be
zufxLSV^T}f=f$X{0^WVIN#z5Ao?RJ_5q@(j)B@nQ+|1@;9L1v{m4)iV-G;rd42emm
zd$Xdp;Ou0*cR-<FVR3X7yuH1n@cEFDk!^ogD8n9ujb^iOiNM-z<PosEl^cu9NIMa@
zx1N{<gA-=*#e*qc%DG=2uLD>ldzy5l6L^N-VQg&dzkmOTgoEHZ%2HFYZ>P@B&jlb&
zQ6IXQmO**Y@$e$>o=x?0$oxU4{VEz&X(tSvZm2eicqo7b+PxMHKZw7&Ecf0lHMU}a
zhW*Q>Hb&=4(Po|B@e6|o5x-?9;&P?V<f!uEO(q?)&Qwxy0^Q*QBL~HsG4bji$DP)*
zmkl6%0D1N8lpm<>5+(g?ta+y1q!?sczy;3}9`Xi5!DHV^?Q|?FwT*D@F1&```nJ1l
z%gFz27holhAuQpr9Ej{sW&0%x*S||mEI^V_eEItO2l-f2u0@VP+}pJm8cNDt>?EW;
zt~YMBIPMK5sZ@H}-R#DoP&8hxHn`m#Ub?yUaoA*jZMckCR!lYbY}PFmhcywy{V0%j
zN*rvnV16L~DlVoZ#U6%|lw7p;PY~y5mmnue$$w_$<xRgI6F(AI!W4S!CSW!Fk@Dth
z<&6on1i~puxwx=oW$xxFqpU|7;(DqPX*x>&?v@N&o6LwTPZvt#Q2SHzi8)#W8=bfg
zNYn;%tHHS~EG&RMMv!ZY9~wEf05b)O^l2(DJ1IDOQTfN}w}gbm@8935gh1`Tqi=ts
zB>4$a-h-+sC2lLHViRK{(okX_#c5Djsry+ThOya%HC_OCUyCsY0|rH!&!=So-_p&H
z#p`x^z0(Ku&(gWP%*D;+4L`icuB(KHbSO-D4sigA7RZu&zJkJAiib7GFPPREy?33J
zK4AbnOsEN3dk{BO#}Ckj@~>;V;U|C<`JT@N7!&|q4Ae_;{Z#;7t@ga-ec!H|jG|GS
zM>j7;wP{xos855q3-%SQ#zuTT6x9!$!5giCm_~EIUaSfAO<|6@1W31HF}g0j;x)c>
zR4is$DqT)5`a^E5ba*_xKHxlFq|MH=?u{`dZFcfKggFGoF5IpcjJ6wf>LnoLnzj&q
zuYsBUJ`$)%OhX(kH;j-^*PQ_@AJJ#0U}ynrl2rD8&!N|QhRzrG0f*k?2?DW&=BlNx
zzG|<}5#T&IIT__Td+tmut%+HEt9VH3HDWYnhhV=VbBJ9aa$<M<pM4Ok;K)AL0=&5S
zT<(M!MAbM$yq*5P<PrY{1O>%%cIr&968U}8ep^E|iW``!%4n0DHE1j`ucEZ@-rv5d
zo6JfBF5B@A4-=bZMvsM3q6qlyTj1`U%;b9;z|cwOak)$j(@}{?9+^n4!fR$TykBCU
zdaD2~${KrFAR5fst33YQ1W*zHuy~%Vw;q7h-X8-{kIRNDcdpPRFI0O_&L&d_{cL}`
z0OrkA!r(jA9<US#VCC=P2ATV)SM1==leqy9tbIxrKP&=esT-~;A86z%I03K}2J5@!
zIGn}!Jx`2+^)KnRKup5w9hT+0XuBnUNSih9?c=J(Nb~Hwo+trr&+}#eGN+r#48Z4k
z-)Xx-z!l*4m@7Ofk_k|fJ}Qs`lwUuaqyEd?e9lY^k}tU&tUsJolN8@MFRh!973Z-r
z$4A>s;CSyyS!wB|kb@+*@H1)=018e#t^l~0o@}ZXe5reKM=m!KA6Lc&SJH^FsW~rR
z!BOPo{(A(as77M<6Y8X}<+tp+yF35Cf48fLvFNgG(?BA{)5IHdqajmo%$_bcB$m=|
zF~3<1OYnoDr2JW%St_SlDIV9^!C`MUSKTO?E2`3LvA=t}l33ybf{ClHw@Rai$C4@j
zCz_9G=6hEl{XOkPd=JhDNxtuBG}XEuW&oDcyalPWQ2*JECtGpA(n}aGR(H3hqOj()
zr5#Bpn@ko$#P)UZ{PqgA4(zmPv_k(cyw;LL#OjZ<`e8KQ?J}TtH7yJYbT*ktEHL$y
z!lz-Z-^I0WT|JX<zR=T>2|OBWJWZaTE#wt^dLK}<#>h4MvU>Kq!Z2$OrblFLOLR1w
z8C~8UwUp2B%bYF_#x$I-I@_)nE~*%`k2k79;y59M!0FfIagPT7i2nK!3dcG$Fu+H<
z>ZW_`pFCvAPC_zVVlcd7Kx|3OLVsSV+Xrg8&!(2+<>R{eZV89S&C18uVRZ1A$78eh
zwJT(JBCX{3WtLN1^X1k$TpIgd(db2V<nu&oQ8qVObSIqn$>jtNaL&tJt~SBJ!8r@d
z%gc+4i>s@D3<p*Gci1Z{E32zxHw(&OFNPlqd^gRlT$}1eE^UJ$1_JKD6bIzrN0-Zu
z4Jo}#Ra$9vHe0haDv$UtUCu{qP2SY_UomFOW(ET?s!JiETPKofz;>trlEZ#>*VI?n
zRQs2bI3W|6Wx>uCoQRPp<6AXiwm1fZ+kD#0fB`fnPp=c&Or8h{TKR|wGN8b@!Pj9X
zMHE5jly{f%6J9dk;2KriF%nT{j|TBkNpudCiPj)BiFQ^Y?Zk@ez7e(AqOKi|%l07m
z{=XLQM-V>AYTKDW&AbmjkLyFu9!28iYRku?6=BNv`(8eeO6^Xm&EX@H$@7g47REgF
zWVRv2mMAR#=DMc-sh^Hd|DvLN(hV(B4#j0TO?81^4{*WPl{Q-2>YVa?@+sO#;Tf2b
zKcUN4K^BLCrn=um`cV!AMcn}%GAk;I1nNQ17TG}}8oenZIzS(3fB}B?vN)1CVgxdi
zvi57-{Wbt#E$t^aDc-cYv=;xGxbo@s1_1j?{^OUb8H@XYVi8~&<mZzX>$_r{=qt|n
zPh)8Vjg0Jdva6p*PezZl?}i{uK5uJViWNfyXMbIz%+=U1D{7zqPC9b)v9dZ|TU(K3
z=yCYo`a>eVN)5SxygrO2F=X+1Jl<0kuMKfnulw=|l|)>aTzxauz)6#TwWQnme%H*r
z7qTbGj(gsGJ#{c%z{bXNwa#_>`fxZqtJ3vrms?TYBxVMId4;!f-f4v;X}9Q-Mh9&5
zS6zDjKwN`sCF+Q3<FJq}c|ynptsydbT#sH|IOn+!|8;^eu9p`db)Zz<pU;<U`5D;1
ze*A$_yV!QqlfQ6zgBy_2B=TaP2Q^dX*Zcbe^bKVg??_a|6f{}x^K-BV4f<=y1WmTf
zX-m{&<$4eW`s*{JYdo$ZBPGcY3Fh@x`GWASPPzZ6L95suO=iG|P9E<{iwzsVxW+B{
zDG~@JV%fO?Tc^4(CW;+!_U{1b2%q0`8QAf(vGMW#a<{3c5#MMq6#4x8ywYKLJ!u6C
z3v0w2-*fT~@XnoLT)0ZyuTd*+XnWWvrb;?Zqo3)Hze@51IsKu2o}OqO!+D^iqUv7h
zGHAD}Hd<nVv<fgo6v4Hhot)fcu=|^4QKqPorFMR-zeECV{lenn?hRuWubPfp=&o^<
zjaA3ON{QT@%LjAR8V0ZP@ENKoft}XsUgJBsuDk}PA3wQll7W@+C?v6l+TU*p*RH(8
zF~wC?QOdWu2^*Ao=$520;`1Ds5x@NLve&1@ri-?*2n+J^zy?f>jg6_2c9PLk<D||T
z+^kz?OR}>o>KIy4ow?{b5vOYNv4#3$YD9zW)hi2e%ZF*I)U=jt&nGh`9-LIFw82@y
z1aDNy@bGp^aKg!LKxzci&Byk_1|wjfwKO3phJbx#@wX>)MOy`(t7~h3ftaxBX8V#J
z69c;W*Ut}BlLxg|V@{>RB}^76Pe`nc`{&lo=lI;$=4ogNx^~IAtf+~WmR9r><p?1o
zYQvzvi-qtU9vYg{{jvOc#>I4w@N{^d^@7WHN8hflz=8wuAU>0Kd++`k-~5NEtetLP
zyN$=<oWj;@?aL~gAPb25(#IRwKcuzX2{LqtDjqtZDeEJK*|}u(BAIp`_Wex!uQcrl
zF(!CZZ=8ZciLXktZ8yv{svSB#K}dN~5u^qyD=Qp4y!_?Br)iK$cXVPR`Z=7K;9TxD
zl&v5qtz`hlwcWBlIV$#DxD+@!H?Il^YY+J<d0U^`h=C{5Qg{bK*rgh2k}<2MhC9Jn
z*C%n=!C-prAy@ENpZ+cof|ZXaBW2bNklr>+Tp#YBT7V@kPc0p7%)mi7aXTE2^Mj5;
z)MRGjJA$<*OxofE1P2S}<T@tGx!YtszJCS<2@;>(O$n&5ZD0LSzB(}@`6WzOMlEmj
z<-qL^nwrqlBlrZgec_e~pK=`Z^>;IL#KgB|1l#=n3eTl%f=`2je&S~O>Lv}R)%xMp
zdwoO|9CHGT^LTtN*B?_;4PaRFh8kN}S*%R(<}C1F3Gzl%dP6?0|1ueO=2)fs4X~mp
z#1sVLW<y_t2O+y-RjZ2Wg*2LybYsM`b!35tPG{OuF2l>?H=3H8_p+=)K@9m_fY|Zk
zha`~6nhH5hF;~MaB?YS)rL%=`>ZW-N@Lwf5O*%!rK*IRA%e%rxp?97Wp}UkD{rOA;
zgd{r$8=u!JYUPuzM1lOYvYlmUC8;5+=<ekX*YN#0e*ugsJvFua=ljc5X-S1fQ)hz%
zP7W4$yl_t6_JfA7yZ7UY$(7k$#XDQh!%S)Cw9vU10fJz%GF`30%2)a%@He}`V2O)=
zKmN3`zp}mnJj*Wc5A3%L=eNfTpnBwj7xefrGhb9QNVf-J#eAb<`>@Ku#Msu>7M4V>
zwXoa_%qAI4Hdn{z>)v<9dTyaMj(!L076@V9_UfKf1;^0a<EFd6Q%xEhIim-X)YL|?
zq{x5NaU>y?gZ2mb!4<;?8cdJrl7K)-?D6w}Rq1_wLi%0k*QnC7nTUJ3DzMLV9Ga)R
z;y_x}kDOFbojHzoQDiOraLCp>;VXYeV80Ww*!N?pOKo#WP<GmER|5v%#l^*k=Nz$K
z^uuQ*+OJ-{phJ6_H(&3%hPNl<C@hx2u`yW6s1I*20cvJu`{>Dw`ucic&qwxk=5eEt
zglI54&f_AEz3Kj@DE#vffzSJf%T@_?3elne!(+eiOlb=w^_$KNR&uVTK@1W2hKUe7
z3|c~FW+w2G_m-BI!@UVF&dn*v$@$hogYi#Wk}>M&Y;9(Pb~bbJAaZQOk|5gsb$OP`
zSXgnZ11g&N`T3od*6y?A<NxBqG&C^SIF8uZ=&Y?HSKA|YQ)sf37$nMnh~gm);Zkss
z`YDrf7hq0H3$cOmzvf{z!i6Y`D7iLuIg`hYfX(7YvJ+<eU0o+?(qp7Ro5?^A3Z*?5
z<*bibTvJlAJc^qrtC?uk%JS4c|8g;3c)@DJv8UHf!Kkk9VB?c*>;5vcu@O*mVrH-C
zo@Nunb+BZ$f!h%0r0?!(k^~bC9{m9`x3;zhoN(`1BXJZQ?ChU4-0nBKqR!6j^PGxJ
z%zrd(SV<Pv-x_-;dgUpv?1d4S7DBOY8I@+sHreBIHE2BAm3pKUA9V^@>JvdSHgB;&
zvDKfsVEhxgfDb)A!m44tE)1nyg=WfIu;su%G{D&Pcg7baz{7+8NlQv<*c$+CYna`r
z-&4;LW|dEL%>ln9Y5P0spJb==g}I5zQ5=OVKELaah8t5E;WK{G{%Wm%^+<8bnLmCd
zabq~OpbFU0yIZDtD2ef=oMwtL5->1f__<$18E+q^O~cvbyl8zy4Cu#*lUi9_fPzM}
zHos9?JevdiDV}V?%Bd>NSfeZxxAcbwp3X5o4Vj7tXm6`W=JDuD0(aWZ4z+u0S8LAi
zRDTXk+^#~9(IlbLl5>%U?2Vdg-~BR;wY0LF{%0>M+n&V3$jC@bOG`}rEl_u2Vj@r-
zP)X|mnf3gO69E^C+6dUREGHBi8agZtrrl=+QYk|ag>XqgS?|(C5xiL?RZ^!2HmCdF
zd=q3yM<P`=$fo$XmBWJhBOfuN2SgZn^0SA1zX1#9D1SG0baeRl3cg!!w!3wcaoIEg
z4oxPvQ|S|Nxye+flatd%o68kozGBd8g4`2u+WL}2o2G)-4$&nASFYB%Fr@auIoFrc
z*Su<<f1X+~WLl|wU)OfONVQUqBfW`tS_DtZrC%!8ICN#LHY@i9XmSk$dPpa^r=6<S
zR`;xkkm_orZCHHRe==Nz*+?O8Pd0t%ta@-AmtdwS29@m6<M%YW;cA4Dbf7xDW+TGD
z%v2liUMl%VaHjM6qryQwbG#$OD1GSgs>JmLcM8Sp@qC4)NCVho_QwnMMX#-MR^LCH
zj*806%uGmv6&PHg0`G2@*o<@$@p(;^4MASOoKI%Sx|!xVOHGUIECO@#H?t?+ijC(h
zCXDOE!ZB|mpZ0$&5*CAg9p^<U!ax6zK`6jol7p8jI~R~P0f9+fm>U9CTGfwG?k6$N
z=B4%OlGHEP)YPZ|DQL%qfTu`{@r(51`HKVgRQhQKMF<t_*t+N4>Oo^8JMIC4&i(~Y
ziW^cGpS|}@i}R1kHYQq2h0uxiEK7U(k=Qb=M1Ch~*WpJ7(p8Gs3=<>?RC52EqM~5;
zWF$&9G<IDE)^=D+@K|O@HW1dooVYDhLfcT?UYz{xW$%AnrE}yk;aWjs%yqGZo-%{S
zj^24D1EUT~B65%O*tzgfmN<S9bfGuz7%oSyu0%W`%WAf|9-Lled6z1d`);;ZrSJ1c
zvFII`em*<lu)}M+Z?h>WDSc>fL9gcnixGEgCN3@#&??GZ9C4rme>T7KKS8H9et6i7
z5q-72`M`L%6p4~$Z4Ie^p6Ps*!f58ze&J>Sc8mpxosS@xzo0M`&M7~{n*8oI@OXXE
z!^9^g2`KmP;Cg1+JV<lxCE5u;S*|F2I~I&?b$qe!-C<e5r~yF#&|qrs{%WVED^9~q
zNA=r&rmsrM!{W1Ae6>GpeH2m0$@-M{#&ySp_uD^xqefE}pi)iwF;g`o3ia}o#!cxK
z(gNyYCq;;3q{#c~Dto8(Jf0H!KpRjYbl`_X&T>=Wuos9D4^t$o|3#B$)JoZ)h!QyW
zD(G)CNb<z5zB4zVg;90j)t8vxA|_x-E@hPRhKk%goGp<msMHNrDPn-PxI?x({niFf
z23diR`^yA-KsEqQ$KRpA*eEztN=Q?Jo$%v#4>7H;eb5vo_~)*H&-WUQuZT=c#Tsgy
zlF`o#M+mUy`=!Zl!WvaLtJ<|Rcx(sLS%p7;fVcC79@X{nJjs^WU&C|OQre7@Mz_TA
zwGiO7dJ<1hPlpf*&S}e}tvzEGA`dkCiJ#cYeOzr0W3bE0$7Ce&fuqe2szGPkjMc7S
zCSR$wRt~vuIi&Ab&d=%85`Mf%LVNjbq~<?lj@#hv;FqI>ra@64Loot<2lhKOAjh`*
z{AZ<d0$kUrzY9K*?*ax0Vbi60o&CQ4$I&%L<^8wuY%RN%Z7kcijXSTFtyRm*_Ofl;
zwrwnX&+q@d==7p<c6RFiUij$BrANk|S~>4H_%gFvy3=)d0caq#ESpWiPVVcq@52>6
z9dEY;yl30!YDQW!JlRoH1b!?aMOV{}nWMeE9OtNf?{8oE{CMl^=;*<4CRoU=g2kM2
zblvuQt1LUdGr+%bPg@c+zW@fff5JX27L5M)vr_->tWR?<%n`H4g12}95PHf&kEGFD
zzpSiQhNJMDZ+3c{gTR!%cl$z`(K?EyNZl==7>YfPVT-D~A8vVcqZgI+vV2RvzM&^O
zgJ-M<7IF!uxi$>&b<dU7thd&T7q+ZZz2)l+fEQhp?e9#c+wq~5!A<5mCIKOg(_^8R
zW08lSHdd;H7d>9V;88ol8{7E2a2b=5g2Ml<958F4I-n7<`cbrP(CajoeIlH0sP3Ls
z*Ehze<cQY}44-t>*^W85-Z-%JKJR{N9L}R|uGC#iAC4lZo4&=UJ3??1gxS5w8g-c5
zJvO^J6lmvuH`wR$ezYSP<fs=Z!`%ei34J&<6G1EuMJ1`SY4cA>{B(>r@eLS{OUzEC
z`vvS*1}>j_k<4JczC~f+%8BsBFrbF|dUlRm9UUFeR3;DbZvoksz+1MqKHcpP1C)UO
zg@pjFR7L=0p`$}+LXC+6T4W6A_|M(9aEDgCCoP&3(@BHh+^)`^6Sa{lVQb{5nbCrC
z4n%RisGDEqN)xCw#^Y@%;IX{wl*{yZM|m_-CNu_mx$@^vKf=R#8+Hw@gkf+{baCCt
zpV*ZrYkYsB1{GZinIO4(k-tp*Q7diw!ZF=iQ44LU5_WN91Ut$c!&p1nX_?^0)>_5)
z`aJ1GuQX9{s%X`7`;_JNaVr6!#2Olh(Y9Jy`P_59h%S{F8y&;QhM}<}LWVWQ;J=$o
zO04#WqJfqP6$pTKtio>g_V#jeh-m*#W(xqmpsA@TSk6-YVJBFoP}Nb$s{oG)^gDCv
zdn2k_k0RbR#8nCEuBp(9u4c4s`Ng&<sOEe3N7K_8ot59N(SlbuVs0<k%+b+NCtv?A
zIgP?setMzoloZ&qO9gWog>iLuMD*cgR)hE$5<boHn%B0roz%2_P2wmXH^Hj=*NL+B
z(g#$={N!{ALRW1M0$fn@PN;=$c@@PkK2OGZ>ewNu#R>5A++SI~CrDr+a_5X`Z@BpG
zT$i8{a(Cbt_Z9moWqJmaUNT4L)dhUg8(heR5A@D<Oc|SrNU}cF$q>UcmD9G!XzRR#
z>N<+dtcG8EV98(ux>IVtLE8gn@bc2CTjYr1R|eL!HnAK^vAjXphPt}C++0$Muw9+B
zRW+T@0NoWD?8t51yfLq^*q+8GJeN?@cbw~K)G~kXo^e=Lo`zw1q0i@iRO}g9rHce5
zK>(On`73SVpE_k+AQ=ro_f@zci48p*3cBUQKDgnyrh9rF$@mKzT8;P9)z*1H;^tb>
z4^aG@{?LkPSASwz!eP%}V7qr0I+a~f$8Ko+`bL)Qgfeb7Y^k|bo9!%1sJ5qmGg5jJ
zH$F$~v;m**+u?L$8KFTF89040r>6Z+n*4_Un@Qj%0KB<<Ol)msPalhY={ya#>hSA<
zo*hfw!lc7OWy0@BJ8WKI>qI_5LIuW%pwl<)t2hz8v;Fcm%SK~b1#OSfO)=_oKcc$p
zv?Sh)t2pQs{-DDyhs(83{VuNux)Tc;Lx7jINR0o4iC^vQ?Oj`YRZ#oXhh=MFvY1h9
z5!;FQe0X!V8c0%gb?<Hj;&-Yo4s}lf)vTlhWzq*LZ*XaOdBLtODW>cq)Gc0RsV9Ab
z2{(Y2C@D=hLjX7c!GWHOi%ZUi407}&Ig$kMCZhh@&fRhAfBPq=9^LeaI6KT6+{`(g
zvI)ft${MDsu5KJ{C6lXaOq~x}LLpGlmv|aGl-Dkujz>qj$z8S5bjvxM5SD4_5xE&N
zX89@lRaQ|Tw-@F6*@mPPSlULhKY?Mv0A5Q4*9`SoaOqVV@B;H`_*0V2K8aKoOd?kq
zxvZ(lQ5vADm{GUZ616u4>hUuEnNdFCD~$o`lh?zTMcO}xr4}slCr>l#kVhSfE|Y#3
z8jX^aW^BP3JVq1Z-%}s9c-eWvJ9NrS?85B&JKO(0-IJPLm0a9W7Y!jau>MIT)vH?k
z!_SHf;(BkSg+UM8W9Q@qlBYE#`%z{t`iDe*X3QCrEWjEnD?46q35cgPL}mJ)WMoK?
zumbr)r4RBN8Wu{Z#EE8;U2~~rS3lL1Ah|Vz9H*?jPczl_e*f$FdQDwl(of1?$u?E@
zfv97Kqh>IF4^M#rKxwl*?O;@7vSdsJ79+uMphWSxqe+;q2yF)9KW2n~i7E?4d5J=(
zg1<}AR!N7#yh?Hbd(!AseNR?RhPwuCm(my6!*Ey2w}#D2U^ZEar%zZ)3b_)*sO>yG
znw%ONOUR0-y13u#JYa>sXGMhPZzovxPir;M(cy7DQ@?Rbj8oaxy#+q8AE2OzY8~}F
zM}pi8kuT$kP&l{OlO@lA44t0CBn&n^VJjU*Gf{NH{@*$&>E5PiUr5&Y8PQ`0bLpw+
zAT<NN<wkBVMA4rjz1KB2`{VFFU7cjJnDq4q!-{}K*zq^~F&Q#x-!xn9_Sl-Aopm}K
zOIl{mDZVaMvKdm<tD(&}v((&-W<GDoG`Q?IsFm9_13=97mt_Ue#jc{^IB~Ol%CYEI
zj3jsxa7r2jO#By#^E~6X06U4hUJ7)0QV&OA$bnV6#|$)7aY+WTJZa_M7-WB!>+@BS
zY2^#x>7h;9@BgT!2_h0<e?+MYtxt9zX8H4%6OHqS73TP2`Bs?O%?9eEwKh9^u#M@f
zs*EG9qU3yx_4qujxdN#$V-pilQ(j(PpH*xrqvpZx76rA|f;p&Nc>0L?446dxakLG)
zypnYG4@NVy#>y9BNBLdvw{jZPGXHcn6$sR3U))!vBYYa-!cfw}B)`XrU?3xN-XK2w
z>XH!`OF%(;y1VmsR|V7T=;+wk+_<o_U(|jC9zjEZ#wa3p0g4`4C=MS}$hBZtsOdgk
zyJ*vbyuO|-`xAb;s)f+MBATPr*`J40y&2Yqt?6DA7M7O6jPVmRJ-^t7;DqblkGaS=
z&nCi_gA4Fx@v6SW#G=f3D`2NLA%}J$W~&@o<vL&U&x(sdmYG3h*5oN5t#`xa)Mq*4
zRSjB?nyvFttDch-H}CeJTiZ`lf2Scx`K_s@@p3SVLGwEd^=dV&8f!4=T<0gjoadHd
zw(^l+_povb^bDS>en%;>32b^_A~#sZ;Ga2MUH#=R=Bq}Zw~PLJh8iEnVQ%NhK`I*K
zbdu#4MtOJ3=}3&b9_b(^iJhK5YF^ck5{z6W<6G%+w3)LMCWiwxtlewv%4QWgIZS~S
zln5LsTcU0`-tPy@)}(8S%@y13WL-xu7n%&MyBeyScktzMg>eLSKdQ7rS<SNF>oey?
zofOz>kU$>O+zl}lxadh};x1Pt90W}Toa55yecd&v@%=?bMZr<u=qJa<5UfEK)y(ga
zd+~Vq_>V8V9*A5)KmJi?kBUQ@c}M8MocL}fBBLPsqYo1%45aWKZr6|6Me_Ra0PVu%
zZ0ewKIKk%HJlT+c+pnsk;^OvjT$z+M(*4$w(DIe(T{9LHbQ=@%yN+xok!1mS&U-_L
zHtq8Aa<}cb`BjKzN8-rRy!2GPR6XH8jHfdkBcD9gneVSy{_lT>#R{tRI46zKB+KWF
zOHbZ42}ID&^yIm->qIbJf7(%)u$Fv~{{S0TcA>R8`%*bEISFnrnunGRJ`->%7)3l_
z%<b+Td+mB6-#th*?}q4Ox!vwgV&UviQc29`>^Ji`1YfQcW#q|!6P6(O(r$fqid~;!
zG1_>m^RSe5Oygi3U}^H~7qO_$7ME*IN-C+Obghua0G1N)^_$)|XE`}JiU??kKy^A_
z+vz3%5zuD}!vXetQufJ-iDw@{LQNy1ZvRZy6%-JJAH=1W2CBd7{rxDDGt|7lIV>h*
zmO(`OuIL0AN-DKFOht)2O}C(h*h^+|F%OZp&*{TZlQXkXS4DO8cdL?Z|AwGbBPi`<
zE>uhB7C*l(v#m*G66j1JeYQ*%uh8-Uxqvcpxvc?H)=c7CkXJ#PD4Fo;ZP9yf@7>al
zr{BkeKZQ?&4Z_n-<<oJFzaUiAp%`56N0{wPwf{%CjbU@dRPYLmKvxyT0EJZoe`DNL
zoWdVxor!I(Wjn5PMwn{wVPS4ke4Ar?ESMUMaPgYW(a}=_+{@#0@?;%fTVtx!M{;hv
z*z=z)_4btjZc$iRXi-SLfqp!g`Apk-C1W#w9(AWZv*b2^>TT!D1U{=Dk^SvxJI~X6
zUcgzt$blO;q>4Ew#P>T~9J8ceC>MNESc5X8(mSoTmh~G!a4udhj8^e`;txI+l!LpB
zcq_0L|M^x0iW4{=p8b@1$cR5Oi%f$`ySl&}CI?z#*({){6ioL87lWLkofNP|P0r8$
z39#)SZ;wuyWFab2s~DQmb6<gU(k{dl0Re$s_1%fQqM6k`c6AI)2)n>>@N0-HZ4xD<
z8Tn!r8tlOL8iSomYs6vO+m9|y>W{E1ZLr_6BB)zl{x63qbi$~Yx|*&o<tOh{Y9Kxv
z+&5eWZE#d4{j||UWOLrI4?SUW$RQnVZLp-*NQD)MD6uFv7Z<|JSwXXoa64y4m0Xz-
z?0CJ$mYtHR4DK1b%Xu7TsjKg4ZJn%_NP_fsitDz^8)jTQT9ag`d;3!o2DhsUc7QKq
zieZ~yjI7nYnG4KGSTl(0C+LsQ8*U~~3tm7O_#wL;RAz6xZ$Q<1g*cgr^E8aSZHQ*c
z$6BksH0F!)W<9PTBG6`>bk>lkLN7PQ^Hb4PR@zz&VDnL${^a<@xq{0H#U2uuNx@V8
zg|N1+drBM2?oh#Wwxb}TX6SiR5J3MmUZX_9^wdR`NRT*w*e8=$^|>{atAf<hwD&V1
z`4qHcVvJWsiG*7<_&UAWAWVXlyIoeGVXz^#u1>T`$BSx(KeVL_;qM!HT6ci&X_GLR
zsTm5Q3Li3=8}ZF>fPs|e_Mm2A(PhAfEFvN#{2g;Hh6beKNr6(nh{wfH{32*EL~HPy
zkJ%h+z;9PtDsB=)+;pR>x;W;sUzt!ttyGoEM9h&!SJEwNY-PG+h2NUU>PQx}4D6a&
z7S_^R^+tkmAyyvB=QRDE&u$l1A3_8Mir>cfz?ZeK7x>+)XNBw4!%T?=r>x(7-<BYg
z4wXDRuxNg)-uC2EOo_S^F~B-Zh`m`m#-Fn6;<b1<KHDz4H(muY>f1wUGU)O-Ty~kR
ze2dY-FsN4$ZNlfBBAbsMdVP5$ISKnAIX=BTU{M(84E;u0yvIER#g>4ikO{^9C;~qY
z;?|w8{<=nOh49|BX!<U7t;fj7h?NWb&)~o1`(G91@jrSs30hBEM*o1oF%1{u4Gx74
zPUz1lR>WKdu2^PBVMKD2F}1Ta&4f}+GLE&*_7-ps(sq~}nZ=A*(s?uy!1x1BLRx-#
zX?IHaxy(Dn&^?&{oS+zI>#*JKoL_PAP_9*N)HSl{i8-*Jc~mueJ}x2{M$%!Ev~;0<
z0}OP@ET-MSh&t6$7-TJ^Do}@9A_)cQn~Ma7KiCe_%9|S%;;St59^fKr(B<IZz|-B>
z>U<2GMWd4{gnj?k0bR(NYb+I+iwqnuZOfLXU=yg97i~UISK5s-vdnVb0ssK(QSyw9
zsb@$|SQfkEyX;Ig*EnxQiKHzWd%JeO2U%CwMzXXKSDk0g$7d{#>z9649!FW_GAjeD
z#4=N~Z6YRR8qB^sQhm)SA*o=@PVf3O4)i2OG<idlY)5qBpwspjwPwAd92g_ftmB7D
zn2&D9r?R5wu7~B1uJQ6|!8iHC&`u3;)aVx&y0_)i%-fXilG>~Wc}RKCMWE%X)jNXU
ziefIi74LJ(Zo0flhUokzFv*X+EtO75m*<a~S!Lo2=;@9~;o%0VA1msbq5E+-%rb!~
zo9V3s8NhAC0-3%wJm+NDiFBq$diS-q;Phn`b-bAfS9~~vb*b|VsZEim?Qk_!%*aK*
zN5MpZ($vs}YvFh$B!%c|+YMiRqTU<1(&A+HPdN?v88I<&uoVc?noz;xN(fa!Pfri8
z5GrSFGB@5L;Twh~o_0vThPFj!@2UPx57V{ZPWqU@iY?8rPTZ!?<8}Bjdb4A!CC!{y
zO6bLL5C+JeCAt!9`amxzb?D2Qww{Q|m^qs;V<vz4Rft#Rwk~Z6<ewDtqG0o)U!Bpx
zTrHJ8lnND?!`Wb{lU8#!ch5h{ys0(_jC`uA&;Gq9y!Z5m1sZL?dBfjCKL3Z_Hu5)Y
zvLt=9H{8L+t8bg)3+o8B>twIN<;y7H_hr<58OR!+17TZNB9)JFAf!#c7XN}F4&3sg
zM^qM~bA&=WIy*!7F}=AtIiSvwNpl!wG=<k3h0)<W8AA@1`=*qmnN`;2bLpjenskAv
z{piVX%#YnryYY3u#CL0&YZV7<@^%5{8*9888>mk<!h)q|>Q_Iw0>ZNxIRD!?h+~%!
znI2UXv~vyk|6nFq<C2mn<D_4wDbF`sH*=-}ge<gGzKH=JT4S=XNw(a-zfeXm0!)EU
zc0lWT<U}1Xr5K`=#TMfLsN1BFf`^Hb=mob0t(Pi*b|c{PjFvStW<MMaB21>rnq$jg
zSqnJc*$EiKFoc5&axaBb#{u5Q*si6AC0mJlj51owSS?s)4z`Rb^vo)M7sM6R>4e&j
zpvvYPKoc1u%|BOf_RlbtreA$!u2#PrQ19HJ<L!=&wp_uu+<VF6yLt4q#B$%Ga})j&
zAcpBYL07(6_*|-HlJfVNOpGp4hFq*YrpU3tQ}cQR`u8+&`@;i<QVQ0f$gUxd4~`?*
zzjpMGMI{ceq4A0wlC9uoo8@r!!K)M=D7$!JD_X$IP4q8H=&+s2y^6;Oh;wDCQ~rm%
zX{??NdRyc`J<RvO{#`I|ku>~wn;!9n=_uT2Rk8ffE$%$LEo69jPXLb%aA9I*hFvyA
z<AuD<75*homKe@P1wW=!%1`e|;K=LzjRJkPhi$`SO!9GsvT8g#(1$CM|Dfgsv|h24
zA4x=-*(~JEb$u|}czLPEY~)~PQ})gC768#4@h2-mP<iXXGqreSAhaZ9<KlEzo57px
z^^@X;AAL8PnK1ti#|u?e`R%C1bxC5Jr&80}z7Ow>W~zQikp+31If~zk!=$h%JhqDB
zWc-Wnze7G9%IYm$!Rt5iZNQqVy$G1zbVFejG*Czwvcf(NnaQn}pfap_?>CR&u`-<o
z_fpOJZhT{2+%JAqN0`M%=A-l?mPfhW4g834##~j-Q7lB0p)e*xgAM8RIrlQzfe@bA
zoLrQ*VGpvgH^yR;tJ;ThaLev$IX`q3zS{>)Ga0sC@Ci`aNl9$%hNcal)#Edpa~mB-
zPdAUY9z=P=#RiySrmWtx-idgtTFIki0H0GD1uTS>5LK>Vbq+Q*{GU@~aaN0E8sEpJ
z;m#H(Cly)`Y<jHMtx7q;@GWA%#QIu=vhZBev6n**n|~f^8F%N-yIRM71ZDL73XcD?
zwlNebw9%empjy{iUys!9w$-`5uoW}C>R?M>UDEP&r{qVwe(D#pvv@LETzWd&JMHA=
z2DAIlq#vqwc=zG~<_HP-O#^@M_ZTP0)8dOAaN!nqVt;(ViFyazD{YT0>?r#y+`2V-
zT?k2=!TgGp5;|Lu)#G+|nh0Ns?sfCykM8A^ovW*=bh^hSU1A8iGxf)?#`?Cn@?@5P
z;OY`xTA7mH(`7jF>(C31`aaX1qmb>MT7}_Bq^r+)>wjn$hY~bT)LJm}1+By-lA(NV
zo+_hYcvBG+vIs9_imJBfFGaEe*fd(y4=H^5w8g8iuINbH+S*x9Z>{bSj}xTnYC=z9
zTApKk+IU>ZJ}%4xk&>0nUnkF};5wb#W-&*&kLS|i-6WJ17@MA+9`F?tSw4+)Ze^v#
z|G4RPgO``LhFcHFeGkZ5mv#^4*Yf*vCM~sE`Z}K$?M^ul?*qygLpPg%u$AckDfbuS
zxR2?x;e#ohC-U3|)zzJ3xW){(skz<zy9Fh0V0(niw8*SZ)ZcOonMGxVZM+BxQ|E86
zwc7mBwU8;j)hw*6BD<1*TWwtMMT}C&GRzY;cT@e1FkQuQyS5#+TuW@j-uaYATKJy~
zDC9~TGv`gymddD+SsP-AWs={Ou0S-B8s`Gf*S>HJNzh*^&*<<5dZaWgT3?M%D=c_5
z#}|yW@X4}ttmD?;80$&x!ajls6eF+su=s06y8vR)1Qruqnc<kjtFr&%$na03z$vB}
zHsbZ5L{uma>F@0UD}6!WP*zu3j7J^sv>H&5T-F>SmV<hJ{T$b-+VH$Tfev*Q@NPQ3
zNDE3>yj%ZX?GaGv6-7x{nNY|DK`uTk1#C!^@P`cBKDqTwYJDcGFn`!n0sbzc&HBDP
z&wA0yr)gT3uE*Bcn3Ss|DJ3PiOEP5%G=Qg2&30zNPmD^LbR`DD+)e1WjORJxsD639
zwLbb==s<7Ciz5hA-OusbGWmS)kc8z;ECj<#q5<Z0`FEc=TfqClc5GxPt<*f0@NNZT
zFLgh?2)s?btF&j}Op9c{QeRT8NSXqD-8WgzDc060*5FFRaj2r6`9{orWwP6(`-VD9
z!X4OZ_LVk=vO2IyPI5Fr##njGh#ynJH$oldLN5mxR=MhxPcvK^Qq5!PF2n&-K1Pgz
zLgUONIpRbeGU+a3E^D!mp3QbYcz*&fhYVE|(jm!7GF{z1$w?8@cfe_uaoIv?b!ygJ
z4ASba%6Rp{tn}np$@Nbg4>0*~?G&?0`m8egY&#AUXlAPfo#&VV16V$X4UcFO;bU9w
z?+!xFMXxXQ6JnHdyzN@<CSTfZ8Rn*TR)3qt=<JC#z^;DENuz|nLXoLf+_-{ufuGtf
zObaiPjpSP!Tfw6TtZ#01R#sjrSp8?`FfsWm{hgGkg#;TLTlPz|WZIh#A|V>wiuVj&
zbSOD&=%nQ-0h%@ODn&6SMGmmTjH;ep+>(SaFtoqTM`sllo-^x%@#gf@kSOLzmU6Y{
z2<kXjP(0mBn4IV?t&pi4n3Q=BvE=_Wp{Qx9Q%|msA5TtBwtCdMZe016rXwnIp<>Ne
z6&X0y<XkO{PEUlr*_1j@2Dc8)c?Gl$!yT`?XSUzR%CnB9p<J`P`7-?v9{=vV-~QWr
zBWAzX<$bY}RZvh^)L@fduTq^MF`ecbmQZ6^qA;)baU;)8bK0Y8d*b&yadWzGLYs<-
zblCqh#rvID>!_{)7T;0Er}Z$CYPCW){s9;>2mh!gVf^`002q4GWhfIycS7)R9cKpA
z6Rx@A9prw<!w5^O;WL{0YkR%*&c)vr3v7a&9I=C%a`=`5Or(IzYrY2uWy96Q4pj|f
zT}ayw)zatkPnmKJvD9nSsS$^ST!@wVa|;VUacJ`DRZH+Ydjm8hoWDSH?1YGufsa5G
zfiD3^G|z-sFUFGS=Zh4?O!x!GcRX%he0n%K0t$L99yL#7d4n}C6+poCSYSs_w8Q^|
z?v-mwmvtJSVR?i(1)$L$U<B{VwcY4Y6Hp>HsWx0}Zm1}$*!Xuax^%wGY}EaE`*P<h
z6^pag(Oz{P;(LbTv>`L~d?=sOp@Jl3pIsgvUaM?{a2y;IM$S|)`{=Kz588_?LR$Ip
z*kVC!%d%WJO~GD%u!DYJm_((ue(Wj=Kdx%?6WDzYAyWRDoTQoot#occHs7G5Xf*Yo
z3rGC+;&`!5C+zz!YTA9T|G&3w4WiBOZ7=_X@2cva(x>{7CtU^tNiz>ikpbW)*J3Fl
z)Da@W>4Bqs%3+JsNA>Rp{n6h;CJ7~F7b~3~S$XzIi{xQ@Y~#NfEoZDN3?oy~3cyuq
z8X6N4652Bm;~1CFpwKD`qKB$~Q)FWfYQ8e9b-rLF`<LEXDKKsY>T9{59Ou}wRG{=X
zWeMdtmAQ_gHgdIcgOlX00>c-F$5!TF`W3&(5VBHf^CEreLLh)rPY@DsTO8Cth$KOE
zDi9N8`kRW1YOBjvFZ>9Qw*sw#Ea(Z|0D`ZegyEwbLSjUH4lg>mzq_QJz$o1EE4kx=
z`Lavs6Tc%5;oUVaAP;Og_?zlE>3;owd+F}~=?U`mG_k)7S1WeQs-SCmybC3^n@6<M
z-(@m!X=Nx}`gRf)=R!av!!kUvG&u+mC*VxrEm85Z|J)8aF>FLrjt8<j3|Z_`hHWP7
zU+9~$su@)?<b$~763Q;HH)=BciORIoU1TwxGXZ`K&gOK2ZlPPjMS6h?P+f5se5yaS
z%1WU=uan5yI(9wpA!@j!gM%Ya{wgCBB;<`{;ecGId&kzBvW7OSfsF8&5vR^<n6kr>
zAmPmF(!+J?F?C~?rXy|q_6YS(AHxU!4_{jXtS(xtspNDvi6_SU*cu|=GoZD-wsq)?
zpIPkO3OLU6+;SS_YdK4u|ET_P1EKd1Nl#hD`E4o-<KdwYkk0h6fVq%|GM1Ou%NbHf
zl%s`Sv&Zt8us_x184#{_|2kwhvI-sks)ml{pas^TDj2Lu^mEE8{w~0_U?&%lhb1K?
z0XGOIMn?nvMVL`rsKMI&bKI_39S&Q25rHqyK0RR&WK-f9&$3k?!uke%ehE1s>j;>d
zTAk$P`~IQU-0kOESLAikfaiJ46o^`X1rKA9<dZ+jvG<#ykQl*8iAy7TzsQ1ZXl@K9
z0vE}m;tHcy9;a(@V%usD->0ozD~91PQRm?6Jf63N3P62WW2W#}#=*Yit?z;p^qR0V
zgnyCWW=wr7VDVvIwN3__YLg=&N0NVE`^UVJkOphp^z^^_X+Z*6Ee^XC77ZhJ|7CRz
zI1;oh<ma|{W2>~mTFM?PrwJ!CojyG8%cE_8Od<M-1!p4+-!Lbcpb5YatIY54IU)@=
zTyXTEO4&O1abMJJ4Iq-_sUF}h;HMp`n0(kvLu;G6eDL|jRXn7T_j48cZZ=nuLEM7<
zOG1Om+t*tj`4tU~jp+S!*+Sdi`$JPpOZJ77Qy8W%yJ4BwLSU|^svDE{<J>h7=8#z-
zmH?G2+ZlCV8WV#uS8NPfrpxk;L``j;dlv8E2c##UwB3DP`p$(`=I9ClHKcXTRj}Vh
zWNe-jL*zx-Tq${h8;s2EdY=&dWHV(`gPUBZm6t?XNse6r*IAl?CD(MB)m6?>-{fk)
ze5b%%8*;fk5XV<I`T#srU{#n+2(`KyK4Tk*L3zaf!Gr(wPeIdGtWVmPJV>E}<_LEt
z&6sE~t3i_YF40G`(_+nfD=~UQrkoNU3s@<4jb18XO1yk^XyBaDrh05cpL|@aimR;V
z;9U1|9<zb;rC1HJ^5!P|GHF#$(=kcIr1`V{y!tk>_~IBVbWq<9iU<)U_&&F<yucs|
zNGNam-F^%)LkmLWO3V)1Ial*y@dsSyRz?ML`zqC2IKSwVqv($PJn6BlEx}|<vZ%1W
zka!pYhYhg8fz54}wgtFH#@N-CGeevi&wXb*GgPkTB3if4cZ29?Z+bc#e0;wy-#0);
zFTrO6tfJuQ->~>yn0%1-6!kd5&r*MdMkOtXdFd!9Amdi%)o@3{^|$tdVDs2zxA}i{
zX}US}=WU6DzL5Z3;~U<$!Y#8sVGvgaV@kRjh1tUa`?vk%R)$MAgGY!HB*%t{Hf_Vr
z3c_n9diry}*L!g_Jxy<S*~7vUfkMw?-X(W|`kcmSoDL#XBKCIp2FB5^GSS?YE$P`Y
z+2D{l!k^PoX~v>zYOW9&2dRB{-SXou75t68n9wttHMlekO7;-K!C}S;uEe;Ea-{{{
zhd-#RhSaQ!7l7|U1i{7_FRLsMLN8hW7PXB2lhVTYz}eF0#*bmBfxial*W=G8TM=-_
zTTTM`Y*`(WESFNyS?;=^ELd=8IPBr1fE~_A-T@REVCw{aykylU>vRDR)(o_bSwRzu
z$>hO3=|XhD93`^26An6Q5PBM~Y{s9dJF1m|9@xArZv5^Iy8S4PA)SPoO=2Aa%`M-K
zlfH*l3ypeN#VNY-7}>1(wT%UD)uq?Y;tHINUUH#fLRj96;iNOYsTt0{EZjJ9g>zdj
znlXapn2$#4g%{3%+d4P_=ip8c9_1PWoQF`+p94VO|Li4^Pj$i$>{bZ#sU2b<R}|zm
z7^DdFCulNXegVpBhEDf63$b=PfWkL5JucGw&}!H_pI@ZgzGSVu$Kd>x(o>aHj81Eg
z<8h<?WrWC|U>O&vL9yz&<XCr@Qb0@IU=77anjXr_x+6<@WXfqk*UJl&NdpW*!ZjL4
zJdQtv@#WB!ZpE5(3v&WhIRQ~<f^RR?c@RYxA*KMZ*8l`|i9VpiRKPHT9LKvJ|3IZI
z()Hs!2p0DH4=-1eKg{<bbs?5%;;2b2VgwA*Y{F^Wo<!M}qS&%d3Xqc)=xxy?P5mxI
z#PuiSV3<3cI~h1F*+)FfGgP`+$ECEmZ<ST7DIkU<X>m(YrfG4pu$xQiNoL6t8{jqn
zf=u&^T>T8G$?)`Pwb0F=s0|YEYvf?zuP-wBJe8BH+SGZv*Z3^dE5^^zP0yIl<SF=b
z7Z|S_If0VWN1a@s0aRXqO@Lb0Ms5m@`w}BzT#G*AtNcsV4C$K@VvR##jgUMDlN%}8
z^r=|r8_bB<iI4$u2{LsLyJ><s2Eb95Ug-e)1w^_*iXiNEFh)nc$9`{7C@*Fk#_FM5
zN_=c1LtEgXLGSH%)7DGW$>M?eVTQ!1iaJ0o)XOWoZQ7P+(9r}`k%+@)FRFeHX<itL
zz|t9ghIrWtPGns2lBKo$@EtI)Nbqyg-wHw8{jj~Wb+SUXNlTUppV|Z9?HlC)4j~L`
z0S|b)P2??_CR4x=68M|=bQN|3ctZ03j-l3nf&3AT&uL?2ME=g38zE#)TZu66@pa~<
z{Cc4g>XWouzW6$cLY-zP#2;)BYPxR(vd7k=H>&~vzPgG+I(q++NaS0*C3}R)lfa@)
zYTymM>zdF4%^DQ1uxBFpv3K2lWZWPBl38$U-Xk?I!rVsi8Lv6C`O|jbf7=TVZ6c&g
zVK>g_?mLg@(>4D}%us#US6KdBVhaePY;EaH=S$ZlJ#Fgm;-vu5*7|`9nA$?rMtIDq
zc~`K0L~)m>HM1A0z?0-xJMmjQ=-VKi-1AxSYT!>I?grqW#Idjpn#6=i2Xy!jn%VKd
znS?oiF!_S-IFa(M9*WOx-(}EF)OHv!zX>9HuL;Ug=QPbTc6y_>wG)E~tHvRDjRvs|
zys<h;m)H*Bgj@)|k9j#Jn2$D6gi(A$nP<*1#(+e1&0b#C%`}0kk25a?AJ3EoMJ*Nj
zm4D~Wq?qN*5v_^gsEG)O08^6b_lOF#_@>4Bfm&iu*z#p^k1M?ey@MHTEx->L=K?;Z
zBM7yhvOmed=1GojGf_cTXbm1DMTx|+<0rsA_`C(^$i9amYnkz!kBUYY+d*l=^|{S-
z9{r%MWKC~DC;`_Kwh!M|RNiDSbvSjIZhgJ2*`+wOiaEDR-sk4ss9rcNz(wgF#oj)F
z7cTOx9Q@1<jpQ`2{*~-V$da|&KGQ=t*JIkoQHws14$wbGPL#y-Bwje`fbhZnL(R?r
zRddGUvG?N*PG%CJKg_7wus+9Nzu|xtY|Z+wZ~aIx@16`rSep7WN0qRtg>|nn%Wp#%
zOdI-&r#{d#P5|7@A7z@F&6ydr>|G<uTv&6CvC(g&S+i0*jCsm%Qe5h|anj;qmpiEU
zS9Veb)1B>&gQm5GA(@lxi|Ef^D#!w&P&|crj<#JVy7z0IyVSfU+a82C7M8Wt)qxPb
z<CO4q=4_lKuF3X@LnD8zhP=7Tll0@U_P$>NKw}nH3G)m*r**L2(R3POwD6dQ{Z!ST
zcM(s>RSiwB*q*oR=0x)=w|&pb<FR`&rP#H#pOOykOtuDFWDMrBuFSJ1yg;qi@q-3N
zB!wU@JOynN!VVo4dz|lMFu$j7us5*!U001h_9KCW(ASR-1E0TxIRBJPNE9JI!C|VE
zV`TlmHC9Aaas51ff9n3{`+T!&j;@lmFJBX5`pSekOjz6s62Y)%b0uI|<If|!C*oKZ
z4-=0!tIM-zsOoq&6Y&XT(meKz{j*wz=NV<X=3ijXG0}5Dz!=lC3T0pa7vP|fvr~w-
z1hP>s`ModD(Jboz*Z@u;fUTB@YyaTtywQC!-z$rEHJDz0G#<Cf@M>#IO#gd-I<KG*
zt<+BJZK1(~<+g5azyY{gg#NW{cGv==VURt83Ncho!nnDWEUqz*h3Az5XrUPi44M~G
zc$V-7U6L$S!WaKHFnmT3%<jalW8w>|w=-U4VUW9uBpnAc5fK`H;MSM{-cKlG;Q}yP
z&E#Z=yr0HjU<QE(j>Jz=9#uaTf(jsLH6xb{Yr0#0O^xmCZ-7h&FtPz+NWjY(lCSNa
z@3pn4B_gEW#?*SzO?0<<BTm@4O?Q{+YT52O#FH-7R20s<fa$GYo2S6g%ZtCjdV|9i
z^Jg6_QA)LPaiz*e@~R){XTd5{deJBOshjdL!}j1sZAhrw-)G@^CE`i)Fe`#$%A4VN
zuNWf~7nm2MmgNMcWj1sClXJg^MJ;m4D35J_QsCt0XjnnL;6xFDUlfUYY_m<^;_p9q
zpd4Q=V5Ii3KntssOn|f>Riu^Ak?5cm0>(CkqHEJP90YO{N9wAjFR*7a#b+|2xva2t
zR=BaV2XEF0@7=|^{z{GbgDy~kToESKQxEFwKjyI&$h4)<FEMr*la0CnS`Ao(2nWa-
zN44YF*T@=m(j94x`Hs4go$0Tlw!CdV5G$TXoGup2x3by*H$aJiD_cvhNc@HsvDTUX
z^NRh8^HWmw#J*gw%uI6>%Ym-$m<6Ei&_#e0w6U>yEUN!ef@7j?Eo{?6pE9nIk2i=5
zTuGkO?m?kh`K3-L5g-}Ay0Jl6Sa9Yn{H%U0;#jC1dM5&c7G0m4>zj}taf~YPnh5EG
z>R3C_|7q()>vf@gvqIl_-}})H_!L@y97g<*(6F$u5cAy}`Us=05ZKl9z>BcGz_z<M
z(u()+fx<hnBi0(q=tw?I7{+f;y-1XIN~@%Y2d71j!0kcM$TcH=uFM{#Y1@=ee}Jna
ze5dKS3fS}!GPomM3A{)}n!^322ymQ8_NJ2TM0kRTK#La+?-%K!*0X|xxR;j4FeBC<
z$#Gu1BdG_#dXEuQdH)@D?_KH_tq^2;`SOFinF99Qm-Fgwm?c~S0s<%P*C9pTc;g+~
zS*GkO9vtE*);nuroiqJd6$UA*8<9j>F9m8;YMI3rVqHs&E_O_RDKzWUsFxUHOJi0@
zU2g#%6bG~^(n)C)c9|F%fC-J*R|K#>T-Xoe3}+eKE|LbGUru2!#eq}bY@1A~BB%dk
z+P&76>5d*!D`o#z!HRSIcz;-Rr#Zt3z1OJq`U=;t-=;W>8}%Sw{n2o#))&yh%^Ga{
zEhl5n7A+|zlk6S)P9~nO(G#_euj>qz6Hkkza3TsTqSkR^7mHwkG%ajz+#DVbFGysN
z*ZZcI)3YXQllTIGG{EPhN%dv+03%AQNo*I`!80SXSD4>N`LCcnh;Aoh>|t+Q)B!s!
zV9UI|%-`sC=jLS{{pK;4+V~VW^=w8@rSQ!(%}+8}y&o8c*(xWk_UHgXU(vjK{SdYl
zu6Oxe$7wc?^7D>bT$WlMudozkMc$9@#bevl=OFF}fl1wMQyBh+Nvg8D4r7o<k5i9*
zZW|~akPk-k!&N2X4`X$ZBFv`_@85dx19xTepiWTp_|z9&f_<Mh-Nigw@6n_w%JQ8_
z-hdR;K+XWmK_A*Ca6QQyIc4@Auu<ty-$kZ9qAa?Y`@M+=T~THQMWXj%Ugh*SzUV&0
zaS#yQjn;g*9>$Id8#4N0M}fJ5j1WUo7joJz4b<{^W+Ota2ql3C=H{typrkEJ^bQ$9
z=Y`G|=MBXP64-m&oM?f$)WveHigu4(Q8Tmszx%_0*e4@BU0A}fZK2k35<jfPz|sk8
z@1)>X2N@{7fbfL(XuHUJIykJ0;pb?|JdJn&t*j2T_64%6Fa^HX6@Y$M_Y22e<5C()
zDqq=KnR21wQScCk{k1&*D6C<(@=Pp-oQfDU|Do+tw6bV5r<;(g;8j=3<;)D2LJ!n$
zgfU%LEO$41JcrwI1c#0TT-~`jfU~7{;cc&Y1pY3m{zFXdLsSKGzNfA<+~Ych@jnri
zCzVMx(=Gw&B-kXFmCA%lPi-~>raH^k_}451l&CKLC}WID<{U-mX{fcmSqX|>ay++w
z<iQ==UfbN(Wb1&xiO3za=UCpMX+Q10ND_nLK-lOj;fsRmzGUqx;sL2*zX>OQ+Th1;
z<~i~Jbu+xoL>#7z@|&3$>O38Z9nCD>48P3+&B9jD-xf)zw%RHNJIwh&oE8+!DwaL4
z4zQzm!h}`~Ehp-#qBc-K42+4y-Pqz_SoTN2X72bsW06QOY7|hWqj^T`Nl|9fF1y9q
z>f_n{T1OWuGo8j6<`wloP3z|Fzs%%t4bvPXQizT?S1mQy4;R)gHDxS`5k?t0+#V2i
zPBp4OFP)F4He37}i%u&Hyb{EE&DK4a&FjwwYKP577bP*Y0u(_nIP5++7*PyqE_5iO
z!e1kyZ(H3xn0{A5&H)}c>4<zkr!6~W@fw1eM+G3XsepD*HU+Hf9w#ht(vr;z9|kE%
zgCdbQ5xtjAG+Eyku#1-7)KZ*cj({xC&|BPQrpC+(hszWvg*M|f*RM2xig`}7Nj*9f
zT-22pI<@1P9gVA=Rrc(T@(%<0aR7X#d8P3Xr~0agC+HcV6*?^{w*GBq+zY5!Z2Sak
z182XeA=}}(SUIK%89%ihOSH{WX87y=dYWG0?Zk^@;qM%$eT@)=7*=7woxzF;=@WBD
z7_0sZY{b5wFUMjJIQPKS9;8|reaed047RR~nG%b|gUhdC$AAC=X{MsIv{WAf%APr-
zcNX*67Ji6rh=C>Y8x7p1wG}}m_dao&FsSa1VP<e9RZOl5l5N$Lm)K8-V|IoKWd=!}
zy8X{w=doA@>H-Vm_Vn%eahSufS&Rfxp7-P-;210rA4<L{cG!om;;u=xjPqGAJLSxA
zRRZP~gq1)~rfy=jAJ(6rV{cDakD4)@oSZ=JNeGU2Y*w8t-=+G(rT+auYYCtc5z~=n
z9OOjKaXn%SJ4=&8Yk{N)4C@i^p^?_wVN+HW6N5AsfXf0eurgU7bqRG3Z2f*VFPU{f
zN;f;ek~pinOS`k0X|0YRgKlh<Gm_Df&0`7PLT|YX%W&L~PT!YAps*<*$cE$n07UG0
z{HnQf@NhaX*H9FDYa?dpe9hF`#qyc)q{ptoxZ8YA-A$5oN{eRpm#cu{73&3H2oBW>
zwIP9gr$ObBpi}SXJU*AfK;Av!4a%CBg`ihXqA*k?n$SPJm25pYvct2TKaeZPkSh9g
z%n+p&N5$Wuo>NGgg7<i(CPxe+Eg-f2p8NI%(`%@y5kSoMZvpBZ2>ZU@MsM%KNDfC|
zG1?^$gOQPWQ3<$^LfYS{vs(43)}^y9|Iowdw-rT_p(bRh?V4EOOyBFkkp@_1o&+=>
z=cea9gmV4!PUpNpik-E?nR>PWgFw@#Qfp@Yz$O5sYc5<$G=TXE<L4z*;y&a|9-oMs
z96B9?C1+3<d*sSx>jk#peFq+ioa<z{jBQ5>U|rtWw0OGytV=yP98Up8x^9HgWn1@T
zU2{VhIxh$-Hzps32p*W}y=T<`xDn5MvpVeIY<aRDD-1ofxp)Wcx>rAgzCYy<g!Ult
zbs%Q#S^-EOKp~OD1Y>K17hw6WN!n3m$sf;TCbR|;+&KAMzZA_s?O2QF$MP`x?WFoI
zGhKt`OX=CQ7=%<k>t61y8*(cO3lEQT9I{GE$4iBp%MkVMaK|gUe--`g?ROVT;n-zy
zpTocVi6FBlL*t0nQN;fEJ4-c+stQ6oSjwERAx^*{jr2k)T?*TM&*s@MmusP*ioyd*
z$I_Sx+7HJ=(@S~T;q>*Eg_N_G7r{lK#5=xUzQo0&$kP(TE`K44zme95IAuO;YG$o_
z8L?t(b4PaDofa8Xb@oEI5ion*PW?D5MN;Zh^HmsxCP+if*_;BtAek@-vos?}4G87)
zz{50&gey+Ykiy)EW5rAlLYZshLZe8>k?qZlxg+H08qVamC7#O&uc1j{UQGDlHNVJr
z`#iRoHvL22f$OJ6eP)UgFahLLY?TVxs^57(q{i>LdK>JC<@g|*O#Jj%nqP*UrydU6
zjIBXlKipC_)hZq@FD@!7D?f^V`#tb~-&BdrJZBo*OvVG5k8%Psj8-OZCWR`KXNCg>
zo$0h1Sv{6e@#8w3XBJjj6ZlX~e(-AW+)nUlbn86z5fAtd1-_@Lt31Lc$y%{laR-(a
z2tv@zsXR>5NRl%|L`YoNNp`FGusN_rIvxRLNPt}puzZtnue3~}%qiLQ-8FrS#KBsw
zzAi1J<8dv^&o^CtMfS7@gmz~1*JkA;CaaTL8wkwN>*1c&t@IHql*4ABc5y`uJCS8K
zWCD};?c#dhvWb)dy&ra&Fm7!Qt0lOZJaLN*fMakx$*%wgo#?YTWDWQS376DT>I~i>
z3ig^zk%q{Av5E{>U{f?{53S{f0}bvxsB^UIg$c83#stuu@O#}`0YeJv&#6xD%&l!0
z!gL3X^=GDR+#mg0^SYX&$4^;PJCuD2B@0vhA-i`8rwegj3|7&LtyjEzqq*55F|_#n
z-;onoNvm&$GEGg4!<#QDhRsPi`Euc)*o;%SEg*?!`^Y1?Eed9#d`wV$pw>M3$Ib2F
zrTLTn^MH;QF%b7w$v!n2Zv!>&*Dz+&oH`E?00Vv54o}d2vCtt`eQel+mRs+hwAJn1
zyuUZ7*X;0cb>jDN>-jo{!R6_`JHixVP$icGh#}qd{?4&|a6E$nShn?6D;RBlZFF`M
z6#y%~FWqBFYT>@ik;MhH()8h`_by6_(9i4kX7`YD{EW6jaS4sCfzD2{lFSasuHp)p
zUdRs@nS$iblfP%QOdZa0r1d)k+=xKX4Ih&qjZ&#Bt-bzB?r~UXksPkumwsNB3p=Ih
z_r>xL=UY{!rQ=5Ts*Zm)n`#Wrm!ZdVw#4InfF$+zv_sQWn=xSro-FE#9cw#)3^Mv4
zdPo7zS!J9XS7m0JEjU39hCMJA{CAo*5XwAv&S5idi9LD;o<1|<yp#E{j(N(eawWMn
zSE!rQPHL%>K9)6ee&ghjKZpi@)Ny8(+!6E3_^zPH{**9?rU5n2*?Z(yg%GyrO>pCr
zOAw%hzJ8llUadq9T_C|_3F$Y%V}s&F*T*@R6%!VS%Z!(*C8TAtp2-$dLyhLD`UM#w
ztU2F#G{+BY@W+aV{fKM7_Vw^Zhex%|0d>n!_Ta4n6knuiO!&TB;Ic>P$w)Am6~OQO
zU7<2*uV7mTyfJ{}6H9S>x9vdkrpXzx+U$<W#F=?7mdAcmhcD)Oz5VGbiw~8AKj2s|
zD+|asxjZ>3sV;X)jh{@bo8;ZEFrB>iT+c*#gXXrNb#)ZPY~~Q|cDliay!#6SB}Nyr
zkTK<$Mw&C*SkPl?6@^P0`6<CFnMhxTHq+S9pp3@C@f*3l7aPh})}I``0U0kec^<Fp
z#gQu0NiNV=IUY<6Fw+BCZ3tD$a1HEwz5FM22d{9(&O38og-r`2zG@(yYiX&IT9LuJ
zTXVxx=tEttFRyN*tts<}p36BVUE;D?9snmjiL3B`viu+M;3okjlmIVZ0_UVpYZu5|
z=y`aeCvWz~oYz*!?u!Fn@nD|EF>U8}f`T@yEbFK#{wAKjtY3CcxFYHzKgWp*Vc0*#
zn;)KAv>Q_8N!sNwH9+8snd<<~ffjJ`{_(W!U(;R343LqXktk(n8=swz{{No*ueBJ<
zsc<{4T>umMx3eW6B{NtP2P#gOl!cugz|-xj!ed?%fnMlCSMC3G#KgWnyvJgy#RBS_
zCSQzfl@e>sjh{h7n6=@UVwj@L8;1Bog?F*%N9gQ8Qf>_Nyqk*tF_{0ZT1~Oefk@G`
zsK?Oqn>YzH5$xU+fMG{AXzrA9v`P{h4c+VP2z(ct=zaaQxj6e~vxl-oox~wUQgVx!
zq0(YDyfik(xVX4}cJAKP@DA9Gd*xE*w5lCt^ywnX>!j`DDK+FU=m9m1st?zKs(}oP
zUud@TsTnq|Zs;lgZ(53e!2p=Pe3jErnD5}8Dq9HmUOfW41~kITlifUQA$HO;)VO?(
z)RlSO)7&Uq<{VB~HA;kPWJ>m6ycrV20T?j6h?W<KoBE&q^~>k&wHEF<Ri*sU08rwS
z+xW0>IuEKc%Gv($CD!2;Q}5wMqxUewj13#Df?T2h31F}o=clzT8Ztt!p@qVTluLfy
zt3oJ#u?s}Ed|?JR0@T{rvcx$aLT0G$eX>Csf70Xl76lUa(qYKbFA^c<QUx4n)zMa2
z)2@;thbf#C3tJ|>@=%=WDFQ>HBCr&&JT9R<pF3UtEb?g)0K9ZL)3C82!lb0U{dXxq
zyztk5OFOsQ<V2O|n*I}lZx{7X5>n-JvRwdi-aKPxFU-zSZ(57X*)j4=#c!V-85+(!
zVjg-e8sy3$jMpOklnr~y!|?`{K@;LQB&E__tV5tJkzX^x5z7FKM#9c37!B+_Uk2Ft
zql)eRv=l+2V9IbT1-K32+@`(;qk`o?(1HLoZ$x<Nrv}4QNn(6ik~>Lt$3N4Df@Rxw
zLmW53H8nN&imI!dMq7K_rT6oKtHbfd=xC{IL3kY<Q}(B#^SNWdyI>I6zL_kLLY3VB
z_{7ocl7ON%GD56{^cjr3`qN4p%!TeWYVZ4b_iDg6jrSlajKwp-s65pfL42v3N|3d;
zGW^a!=3{}J3J&Qa?_0j}uG#Q=dyiG9Ql5hOP+L{8P)AR|)cCb)HBEU-j7ivZCT(T4
zf2c|ER943FTJ06QAc>vt)A>oK2E#RylYT2eLQ5#8C@9CjV$TD3%ijtUl`s)DF-Awo
zWfADV5X-v;s~B2A1*Yg7;YV@E%9b9?0kH3p6n8X5!A(|V9?D2L{1SfU)`06qnOXeJ
zV~1^!Q6Vmrk2uz*?}@zjr=_UcArS;qCV*LN@b6>_!vn`6+{=tM09@qNkOdzNvfEw5
z_k&eTt$iD@v%0L}DRBx2d11|Gha&+F_7Y4QTZ)1QGA^><m6K;9vEiYCwc(yrq|Ri3
zXF`*m|Dx)ZEV>r8>d6Zk9?pVXrWX~df?0s@2j32DV$Dr4A+G@=si7Kg2-%l;3<q;+
zlj7>9*HU9izTlN2^cF?g<1RD=qPnu)gE_qPY;@*BiG7F{)|eQ&_uUR(5Cy7@NJ`6I
z|4!iT{D2JQ1Rr$R(B53q3^f_fc9*i}w@li)(!a-Cfz^k>I&Eu!HD-+AfeFBtUk`_k
zBu<cOLhAKy9Tn#gMzWI#C)yS_^gxV&8+`M^=rV!jI7dQ}AwzF~lLt8EdH34Ci6lF0
z*4eAK3z9c@JLZ4b%K?A3J6+3l!$Ga<5zp`fGP_&i08zzAg{`6QztOWQE&4H~WOsdj
zL$&!36D5`lX)&{{kc{)TTF%&P1;g4LlC~^@mRVCbM0_h4i3;mIv4Lat<KJ19A}<(3
z=$sx~Fv+{ZdXDfd37YgrFciJ@vp<vGXR>;%l8wOoy#>|54fD8WNo!BHT9wtvM}7dU
zmNZR7)ArUE*-?pMx#tHAv56#yHOF{(Kj)QmLTY+?(09nK$cQOl-9aJG^AB*U!RIIZ
zXD*ui^PG-k4bF$IdAx(zxHuIf-VAta1~bm86!5N0Pjun5qwGq1E|utgh#qLd^F4Y{
zu^1%ES(FtHh{GDo82=9|aG;IT4StYvaz;>Ong4`O%-J<N#q?5xj73xl8{voivJV2s
zTMuTF|5ryNCE}u3P{{^G&Z2R)vI${}A#eSX%Ubd;B5GllD&Gi3hwfRILplge9kcz%
zAxSV7#L&v}(pAsnDJMY44Sz9Mv3Z#6oNf@2GY(qRvuJ9^+(*pcOzy6c{SH|Qhr0)b
z36xGix_zqF(#D%k^BX5ZH3%j#=_MrzawdRSUG~IaAt`z&OGWs5q6?%^Nnv?GLAexK
zG$l_VT+ubmDZ<D+`m<hPR;aV1;q6Fa%{YJ8hRLk`@L(*1r3Uo((hMCx%7R0O&*<fP
z+o}QL5_#I{NG5FvoRw%3R4knK`ilG;&E?y5)DGw7^iUE_2E8o@0Itgh6y_sbf4}lf
zaHKbcvim_#+E!(A9C()%25La5gD2J(g<r$P-+Px~sps@5M1*7oc87AYs^8?^@OXPV
zmfeUzsF#SvcQHgA#fdqkK`Ji0Lw%8D4X5Vb^*S5tTPQ^j3}Ak*>=l5t2s+^hK=J=^
zbd7Ogc5OVH%eGs!Y};P8ZQE{XEo<4fmu=g&_1@3>ZJ*k&?LOyR*PjcF@|H9-H2#N+
z+89?iTMKHp=5GA}hqWI1x%GRheJ*W^D0k-o*HL_i{^4LwLXo~v7X1D%r<z24CXL<6
zvyR1@M%$6j0Mh}z$KX$;*xqnaA%gh7Yk}LO8e&SB=HQfql0jBs-=D1wYFR^eGvQj6
zQXoK&g;r<|r3;U>LwZpI^aoYgchlmMtRM^p<aL_NCz8yg(<d9<-S`J<ps^p(`>m$3
zH?*|Z?q45HUthmvj%k$cI)3ZFr9Hi1IUP5W2gDHnuu!MEEt8x_D2u_7Bx|A|ior?%
z2>_;O1JVY5{L72A<PY3faq@3U|Gu$>3XSB}nt`CCe}ry=VfR`au)?jx3WqVDsDg<I
zI!_rq(=(^HjB*}B=N22RGAP}n^49*Gr4co+9~OD^bgwWch~{>2>vg3+yj~eU<ayz!
z8OU1RUDJ4eOvJf?m7t}?U0bW>^Oz1cTq`egTID`hVTU7&A5%H?2s{I<iDokr=m{&7
zf;eEQ82nSJf;HSzXrq|Yc(m~Ge<mL0nQms9{}dS$mh?X1O}OF6CvU?tlJ^3-*Jl&&
zI~qkY;<mJ|7r@?59Zc>a=b?`3iM_zK(L?J5F<?aAua|XIZ9I&g_RuN;gb5xr*kWY8
z;c4N29Px5sWQEJ}2uQwF!>w~im_G;<-x1qV0#0*vNRS8A$!<3txOs#>fK0aFPn?yw
zU6ho*CGiA|Ix0Zl=E2o~D+BARW2h;ii3J+`-$mvJ=xwWL%2Vnf70c_6ZL&5v3@C5E
zaz^X{7wic~ya6{Hp~g?~rG0Ha6-2*)CeL30YZ<|o?){s*O9x+@REqQEYlR27rKHOB
zVvF%;-8m*wSJ2Dc>#CCpq0{{rEts?fgjl-K6(oGqKMUCyipW7sHd>=AzDMl#$nTMq
zwG&U5nvRzt%-_VY9R<2aOz<Di>lQc9zWB?fZ<iz#wAj2h2Cbj2wme_&kEj&<yxyNT
z_lAH7=@QLWr`y3;{jHYIlTdj0_?!|xC!5Kcq&tOd9Huukt6`Lr+=-~&?@@{0voX9o
zC9>`_JYdh*vUsu15L0yE`O|T^CPT4g^M0(*b89pH(`4;UHM~{W+td#;O}AvlJ%U`^
z5x`9mP8_;@WyC~!2UCvB;3kT3-fqBKJvHvyn+l)s{!8E~tJg4NK91u!c{+WgP4}di
zb02IsOQE}y4vr?jRo+^a;ivnv^Wk`~(sgJu=j&ujK;iv)uleD@JC3NbqBOy5DjDl)
zI-Jh?#WLE%G8&|ma5GqEUi05JEtMFz;7T?TD5@$Jo|6KW5TiRylLq!ibukMv^Q2}r
z+<90?=tK{?2tx~XZO%RAunkw<FP^{Ka4=EC-2uY}<(fBhG`%UmiiZQhk4bXt48xiJ
z$a<3T796lSa%U(4IpO1I(;m$ii-@kkAD)x5%`!}j%wv8`r^!+V!&)Jjhw8}ZGhaZ|
z{<%vjZ1Y0;8yX+-Edz%GQQE$T`bRGMI5YMSn+FKLh77OVu6Lqs`ZH)ew0D_IlaWP)
z?#;{D%f}eP$&O73Q~Uh7=8Tf^LSrJTx?rA-nb`g9<9~Zj|CkG*e_9VAHXjUWoURpl
zS`yv7G4>TTOaB?Wx$3rA7&R`IC**W8`N|Wy=Jq;YP8^7aR#nBS+GO{cT2X?oj^n$z
zdsca(()G|#@~^r+>oi1bdY0=#<^OU#*0E_)@F)#m2%mZbj~J0EgY;&g(}Cts#o>wk
zx4?9aJ_#>aP_%!jm8OJj3l$vLGHx`x5y@)qh)_fL&djc&xv%k1lV#W#zde0=%NICj
zA3E-b!v3VBIHBt{t7F-Ht2A^a_o>9OgU_x(?>-P$n)><2>ELAep}kAM`*QHXI-F6*
zBe!}HYOmuyBlbQ(*lkq8U=Z>o+HmUeT~b0x-`uo_k7_xAvKW@tD1|7cHw>Q~C6b})
z5>uVM^IYmzAKwN!pYHR9P3gwDg@))pZNrg-5Pv>JD?x}xws3Cfij`pnhS*&AsoXss
zR5x;7v>#*=FS0FYixa&FU%l;!a;@a@GZH9w^nlSk9YV3QvodXpnT4t&cQZHi1j@7r
zVkB2_Or-pS1JA?B+|cM~6ktgOomE|-LxU4*a<Mn}6fj_*_zMT;#xWImU*b5vOoNo~
z0UxxknGMwh&xD8vQd)becm>9zXJYRzg-j*S$WhC-F_n&LAf)QnOu?PR=~9)<n5;-`
zf|&%mh^8kC)ju`G$WF=4OscbYaWr}3^ld;>*;i*CoT62~Y|O2E%<Tu-ktMU4UDcX7
zFWv|;kdfH#bTC)xEl$wMb=OpNi2IoCDw5i5_4e1<UzYpH+UP?4N{LGNkQX8CuKX*x
ze3V4{hsERvpT(-2;i0nKDVX>;x#Q)}z6uQVY6RM>5yN_r_K3+anmR-s@rh}RKP>9S
z6Mt1K5s)22GJf)Fm|c(AWv_rV_LMI59|^!_-Dw}6>mLMj7^1~4equ6Rn19W*Ct5EZ
z__|V`MZc10Ps;H=AslCZ4E&Bv^&{$Tx*Q^^b<K3Y*z~<D&H<SKWF(q*1L9sDjZE&1
zZAS5JJ{)9b`G|DGO6j{j@h&9JYSLn&9u_XD<Q>mGUA@o2r9(xx-=uY5(PhlTC`RUf
z6BtcgH<QzXE_SYq`daY+CU1=H+Q`{viP-)bQyIFGCI^dywAE5t55q6^7=*2?$VHbI
zhOdLPiL*<=EMd8>-42>X0v5tl4RFEFWM7ywD2lV4-tN5m-0ZnGo;%|&o~*j{pnlL3
zNwURr8Kvu(?X{if&p#B6=12z>MI<<*(C%X<^B{f(zJSrmL0MTlZOh=PGeLnR;&K`d
zMb{jJXeIIm&irwAGkK_?fP3bREyj%l*F#o=4+f#us${39A0Hc&$@^8VCf!&(n8i>>
z8Sz>pd_04u)_+>tH46SSvi|3|#{I;e03j)N4yPlSHu~O465@k**iu%eA3?$0yo^7l
zZ@v90#zq5?D@zsc*xPM@!UJH3=(a46e~saX+{8IXsGeTwriQWyV*<?uom=-KO8SV-
zS}&!CQY%pR^eQV<rb!wJX@PwB6s{ca5h_&9TkwK~<i%=B#?=yo!vPf-q6C%ErN(VR
z(5ZJCe%wxPH@y42&T$b_Ya_GSG`^}mA}zKd^u0G*d2~*ByNu<#Yq01)3CvkaS<olA
ze)+vR(T?C8d9~dBw0U>5<kzat{NU`?O)=h8-{T9uyV3aKd;GaPs?RHsr*XPhsYZ=X
zn*x`getqpwv7jx0NMpqpK9x<}I6IOxJCT66*;<T-RZ)w`f+A@<6%B@VL8?Fo{Zp}D
z{CKZQ7TLC|oicnowvpeUJZMyzWYmaiGU&dy2tJA2$unrM8CsY{ls>?$B@~d8X@QB>
zbL05%+LVv2ujbyw8B#ixZA|sLRD-}|=UKQqI0VdM9M82ts1KXUX3tBd1D_cgBtn5$
zT<Fwqa*USm`CBLsbBblb{lMQ5GP@~+Zax)1XjRN*2;!J42)sb<(CS3#&3ue~=@=##
zgMq@j#3;FL+Te=YEZgD*GtX~t&=3$1P*AiOP67h@P(Rnl(o7kp_sJKc>eNfdfPj(3
zqnMZX)kf={y8nXiZia{=tR>_#@f#hllIauxXAX<Y*a*&&>s+-_wPv#m*`pp3PP3YF
zO|SjCSk8~Fa+$N*Y^_2b-m#~e*U8+l*!ydmkdoj)S6xEk<RRx|66G|V0(CEI620pn
z(d`=#Jw06e%E2+94ih|SqR?Wt<pXnubV#XRycW%jHmVyw>qGErf(T0XRf*pMd=by~
z9kTi~iJ$BV0hm}%k^L9fL5e#XUyrYhr;Z16mSXuo%#AKi_d9yACbN8B?`hZ_pXZ9h
z<+FHwFSLvXNc|?Y^t$6IUdkcUa#nBh>1nj`$z?AN;KxIki3J&nh=^48fX{G6Jg6ZQ
zCcJO^k#g9Uiv)$hwZ2$;?xP2iC+cnWhLpsuj+mC%q^{_#&9Y7^u%Fs+A*QAD7cL5v
zqA=~lnBjh_7gC7E*Z|9#E((H>mN9FzC@CvG@+A>KhQE9Im#%h_jSA4>J9Ksqbk&N)
z#Kin#89=aZ+VZ~YOpUXGc(bX-0>Q1w2c356{HqL`)a&pqU?(c<8({|qO-bhVPlT#8
zLbmMVpSwjP16sluALpJ4ucl>C+;p$tkT%OCJr>Z0`0yFj#^2csrOr+8+)C(lrHhTx
zRPZdolmfLXp-90U-yedIdkCH>nMwPA<kPIai<3V0#6s9v0gNM`kx7lq93J<fst<3s
z3)ZURF29G<dqf^5b=&GyZ_oQ$Z8h(z<RyLlx);@*(~XbA%H4r|Dd(RSWpS^uJVdM?
zZo}i=2004{?IK6-Wdxp?8QeJw?%V<wWLonceuqaD%TlseE`fbm^8G4>=+~{^S`)uN
zP>dC1g(!ve82;;VLO5IHwcI?-wd5ef;(y9<kW#5-^tmjkS=|yg+f+i>=#|n%=RG~!
zd8;yt`>eCO4IR?dpO57!2wp0=%1O25t+hBvH<myGeg!)q=65KLpxx`hX+k18I9dP$
z)2AFerm|QTmdI4<rj9D|uhrJ{cJA;nj3l+bzWx`M@Sm_{sK(%eS6fL1C<%id5=v+_
z6!0+aG36d+@VM_%R&-^HLg-iIGH!RH6p$&N&q%k|3<gw_I|ls|hq-wg%SI-j6#oK9
zQ1lH8Y6B+Gs}!jPn?v%-*+VoyOhXR&eP3NJ*Kzzex7uE9eVU?>a>fDz7<AjVxn@m9
z8@w9_f%c*2*$E(>KQ~4!fD#49i(00aNjNPYSA!q|U)xTUsEyHsF33K?&nc1D>Y3W_
z)rfj|C-^(Zy4Xz4wl;!`F~#_P5rf0rehW2=hb56=uyZ&OsDm+C98eNPS*8nfkJXii
z7{2Y)3S#6>2I|Pm$JZ+@ag2e3%8*e09bfWV+iVNhZ<W;Q+{J2o`aZLFD2|ZZarZe-
zBpm6q(fF+iG&pLH(l@#NP;tclXw-eHYT3K^^|-(6;|Z>tOQYk(X8ZW!6RkRI@)P42
zU&mR3%Z0}ZE6m+>g;4%h=Xyra%I;K{6NP=RfqFESfucm&a)(Tcq<_N~YOY{t&Y1F-
z2yGU5B!7XtID=1^TFTTH8og)FetE#*bZh*3(I-B)U#fze<!2c|(xd<wyugLT#`9b^
zW)&a*PJY$xuzf*SnSlGwZr;b+)k$h3#D~wD`bZ6}d$o?4(RXrO3KI|e#KAr}!6N5i
zV(G{jpo<3{5i!)~=Bu5!vS%CboCUf(I#;t5KJ&nG_T64b4V*YW1e_nZcWVuy?(?ih
zUO`Wme>uYx^41e42gMi<$_<Ry(FUV4CFAQv%Kggj1Y^c!ugUYAxmT$!VB_dz1W(`D
z6bzo6Mq%j~Xgvt>nDRMi*`!|CIcvTVQZvI7N+VP<MM<s#l5d~{d3bn4lwxQz^YAfY
zp3&h}(s#;p%<-(_AVN@mpxZr>O#aCR^%0Fan~jq{6P}X;gFT)qhN>N71SX=+CCi~V
zFlYG=Wm6c^DNU-Q`$osMktQH4tOj5NMEA1c?eFb@ql2(QLqmfX`TF{hl97phOGA{B
zMj|SpU!i5(JS9|2Q2KGTI}mO0a0E;veEy&B`?bdHNCcHy-np;$b2nHndy}VwS#v3i
zx^$K8ybon|_mR+2cTLveSMceM0v4x;%kh4LiCZ-ZGfCCd;WiYn$Rv;kWRcrOpc)X}
zBn)MN|8kk#uY`MW$h?9$y$hGFgQIrkHpvvIYYNk@LZ{vcHd83JLMPt%91a<~3gQu7
zUEZ(+bg=8+_Mhk6Z`Bu5ma?)O<v31<6@r^}Xg54fKih1PFE<07H@p1>w5ncr?7LN`
zo@4M;83-Dbw5}0*{Vjk+EV2Lr7L+*LAoc1DkB|_l6e3V%uOVm|Dpc8DjF-NMPl|d{
z_xp(U1-;CI>iH9{ANDSV)Tb4PTvI3U?_&XU29>Q9ek?r8f%r?2nYJ?K+R)bepM9{y
zP?h21PD({Qbj<kcX+DHaxh(iWBPRKUA}HxfDe#+WZ#knrTHcP_?jCOlFg@0i=`v|<
z{;yig$J340KaB;wJv?9_z&7D;Jn_xTmgP~-=&z_IKcq9?C|K;G!ri+H$Wi%!1&=Pn
zvMsYtqAt^7|Lsw(S$<t07Z?!#EMf#@0B)pHs)OwFD#^mRk}yp3Wl!)un_se&;7pc{
zS}h?!@b<rd|C0CjuWOh%zHNlzk&J4=Ql_(P%Y41ZH_cy&A)%vm>UTrkPv9+CFWS&;
zc>11uZiK+M37yHSUbM5_<Rj^+Fr0<iwPUl_%JbPY8%Yv0g&A9BH8@n7uP{WAvZlQC
zpoUL=4gfEHby;v3$y%D@+_^_W(EUJ{@l?p<3!7B}5^tUP%H{P-d@bc+er%qzU%#J*
zZ=ZV~@(4>4J-4e_>g7ur9tZ8&)MQ6#hTntcN|TD1_0vJ+X2#piD-Z&6oXsD|0<jeH
zyPV(uQ&{?iRCw8Ar{^HCqxxb%O5IMUFx6jJm{*5Y$HT{mmplQe-&|&p{59wjW>1_#
zjXtXXm^MIOi;;h>mLI6^;epk+0wv!`GW@KX+^D185co|QRw$YLNaiIM5ry7x%nNA+
zsZ~5adszApC>O<uKKRwONJTR{-LFZLlaY#U#AhFvavNJ)#3~$#J35$e6S6TF@<;A@
zPIyGB3*=X;A2{z4RMV}_=D_=&%W_H%#Sr<GuM_q)mxeE4h|jL1@l0*G!)@8CCAImp
zp`=z_wIZ64&kCQ)vx?OpG5Ik&<*id3AIil_0w1);Q~eyA0ft$?ct8Q`z(-1HupWms
z*b%CNbucz?avN~$NqdkW%)$Cne2Yxs*^){N_3Z4`4h84W;ru$2K=xnY^Y5}=Vh}ww
zkN?u?Hda`9Eij%Gn0n3kdruom@}G_sc(a+m3CCz>z^Ym)m>`;nu1?0ZyT@@y?Qp&x
z=P?Lm10a|%qTa)YGGCald-|;chOL59-T*cR|G)4#UsOKT)T<%sm0zde>UFxw?c6r!
zzooRFY<=G?!sVM)ipy6T-S21C4ARVXW+pzRIDEbiBF^&xeTzWL*H5*_r(%Vz7B|zm
zM?O99Bgfg)dNi}z#KQ+tYQB>7>xkj~zhCVpE(<*tKxU<ZOVSABbC?BOK7nehTp>#d
zKtOQEwv>XZ*h9#-pYWZcQ(49lJJSeU7}pF|bg=|Xo*K5#7e1hT>k%NK0K&B_Pzwe$
z=~`&ui3_sOPcCf~_)|apg>ZA?y(|iN{sN5fv+<`o*2eR%BaUL=3H~F65cV`~Pw#Bu
z<q?WjQ&S_Tk{rMy%(mtq+3$8_p_>eD!$l&fZQ0Aw(GZU_gH|)Z*e4N~0reS_pqhH|
zi0dfIbK62~9a`rR{WQD>qz&Huj(}8XBvS}LE_6u2XGg;^2a!%1!~qF;jijKWqGByf
zPBy9FNS3q|TKpv}QI}*@deaCyQ17<5M8xOx3^Z-{1FEyDTnS`kLT*-*-m;c7I+?S{
zqssNbO6k^}>zc|TDxWWvgE$oyMjeK)ON0G=xz*Etza%?|sgCMW?rpYRYJ;EJzOs5P
zo95$Bd5y>)RJ!WOYdE#nmiv9h0rp!z%ngF4xsbul`R=oPSZX{%{jDDB;E$e;kI*-Y
z=0J0Y7A|sb`|H!K+h~4f$1b}WEi7iWyI*zyCPBaycY1bazt!gI@Rcofrw+&RTQa)K
zcu>Jv(Jbj@Q#lRaDE@DE!HNc4Xy4RS^$7ai<0E?mKZ^yrx(5Y0JXNW>UW1f+3*j;S
z<VzwU7g8Pgt>h067P>s~%z8K@s^zc@W};iw<jzE3c0}hKkHg!k7<W)Bn!%AZ-jgLp
z0U0g9!{%3QGFJ88^$Z*`@L22+JDM(#Kn7BFbL*p{;q|M#&sl;Gst5z*E{;>iVlu!O
z4G+x|*Q+&ri>=EZt;2|ZW7z&%*L?lvyLJGz17gF^U?B7_$lTD`KVIWGGgVoJ1dWR^
zh6SWhiV6k<fMsAO%I&wh3`*?RTloEZg*oCD`D);_@#yk?pJt>csI@qLtP=3Joo{x&
z0Znb@fIF(i-hQDaav-Og$oaHblcN@=vi2#Xtt_h;YvawfP`>$or@#?ffpYS5(SDr7
zbcJ7Y1Zm(GcMqo7ucLQd<}<(HbLgnVtG=TTRHlP6D+}G_jv+r5LXZL$Dtqm_jb8A#
zrz(C|r<*|VX!*?L$8*gr`R11=iT-B%lGZ{z-`!%Gv(w2;9{;z&hXRRs(xvyID`XDe
zb|Jz)!Z1~bMSimJS&IXKSV_O!9!?j;i2U&h2{+f(f`gIZ;DTBTx!&l};Szj-RPsyw
z*sB)!(>mQim8zFGKKNE*+?H{vbkfmRVa{!5;>HGp?|)2(>BY~{?M8KKrecmG0TfE`
znxB;jx%->@@2ICfP7(Y8117~~GgH%0fsN`?pfETC9OuUAO}9jOa4idO41d){?*+fj
z^$aT10T?~9GBa~E#22M{j@s(yCvQKOE8;&zT1(-3h>h<;46Ml~o^ae3&ChW>JP`%{
z;@8D)`6om(XC<*psCa@PlurGb3!L-+MxkIlJ8U6aB}r~I$yu&iw!Li-pyZqx6%&&Q
zl<DcK{oC><LYY}oUsp){iY52|PUG>Mdoxz0(`tlm2}$_6`*H{5dF3?MR(V}P+RT3Y
zDEB$bqv-y;c`@FoqvAHWH;>PW*=*!sviA_|-U&R&@Eyu}%66GEU)|`@><*p$=&!#8
zF_71*)8*OG+AiroaBjZfcQ^;|(%#P}LY}6(88X4EQ|BYIrG?6F|F5yLrEohgcBi3{
zg*Eq&8<Y&5gXLOd_seymqk7LFP_->Mw{EezAXmabwI=eDajv@@zWy-Yuj_tfAhjQs
z5;D@5w`7!a!^g+xvu=u>GOHRc+jGjkk3!Nv;z!T9cRV*NC#HF|D)Ji5#1|APT@Nc)
z$+*<-DA(CZEfPkZYKF?<usDZ@@w%h1+GTuD_A6|n*x5SZ25e~fXI-x?2CO7`T+=vD
zlUHDvH=B@%5Q|gzyR%E(e8lg;i4bb;XFsi-8V#Slp3xJ6=^$`quwN;NYbnP<a1VY}
zVrLEcT=5+?_*_#unBV5hC3Y-)qA^&N$6L}qmP@-Qih<ft63nzp{UhE(0AZG+X8Hp~
zS9WyOUsi_dOKqj)gBTE&+BryP{aNO!?K)NS`4iw51Uzs5cFip(`AWpe>F5g_?q{Vn
zyA<NNfuss}S7i8~O(uhfAI)Fsc;D_18cR%O@j1UOPUybHGi<q7obP7Q7;}Wi3+%Go
zQnQ&^S)BZ=S3l?XzT`cJCwLv%8=`Bn<#~R8ArY*RZ$4VmPsQiec%Pvv`0eL=avSg8
z7$D%?<V?lR`nIHh8C$74AeAi>ok4=v&@0ccSb?<O{1tFhasgB-1LbOm47@*n{9t88
zpDhN?6K3>`EF&weCZCD(zdowl{McSL_u!(f-}}A<OfR$@))ItIIX(mav+<xw)d6`>
zOZ2$Up=~b3R>w$!O&OGHavAnV(iEt$K=c%_E};AcYM^1`d$g6V1$RvTl`A+lMvwm`
z>2k%Q237W-ZdGaKXV`AiQA^9OI4H|=eZL>LKZdP%J^``Vx;d&{NQ3r)+FbG9J?JUr
zz<4b>s#%q|z9sOE8^jE%Bug3rNjQvQfnWzAMjOOgH+6LUk*+_ck-=_nCiOwTqO=UN
zmASC~nS(^weg7JZ*Dc`XaXMf6CGuk5UA0kL>@^p-Zdz_VrT?d^({3P7#Jftr`}!Q`
z`J&Q%Qm4bA*Q0qV``zz#ex#L%@l1f&erfU*3c3?$^jxBn2T4UfJL!09-cl#zR@Kw}
zxOjCvn#}(AaJQkY2MRs1IsNY6SdP%b{2&s*c^uh!Oas|?Sb3Va0mB`lTMMwaL`1-#
zCBTBTalIh~UG*N#;vi7Ql1G%reX2s|C2WO6kLJ0imw^Y)**(Yc(2c%=IgQ%yQcT@;
zcFaSL=B=&xT9b3>gXQ*0x0f(iP^Tjcgj8IHJW+Xy)B$mt&=SFWd$FUktBlwDYi1s6
z-Fcoa-fgX1*JN2HG!QwYYL;1THdr;-^w>=mCW<(J7_VlE!HRX@|2Sb(;4o<=zNt+&
zXz$z+$<(K|wsR*%h=K)9?_-;@j%!w`<GIh;jTZqY?!VyerDVJHj<n?8A9)1P*s8tq
zWfWraN`om=`p+YQzTUtc<#aur_W_NB&+GTzb7L%{UUe?e;&D?XM@7inQZ%PKdop>l
z)y?O2p6Ca{HrpSL9FNacXMeRx#Q5d#e4Qtg%CNAs^mRX{5QZ3+$=!JHc(KR55lE%b
z#qIZA{`wkxG<C)20P=o25RK)2z6=`JJF+KHw7_sPu1C+XN;#=_kc%&g2@MP<b(D-d
zfN%`xS26JM>4-#!K}HV8F5-F{KXBsX<&A)FM~(yX41o~@-AUqyhstzJ4p^}sa=IUU
zF8y~M=*KkR=B{wyGa-Ud%KW#|;YYM`N2<GT$xcr33DexW7nuVtAIOfPS8rQURz}N`
zcLz=6b1hElpEx@~w-krfE1!9wS*yV2(#~W|L*<A`(eh2&`(@jHfUDdj^sFW{MI!sU
zj2q;jd+RR|-?JY*RBZnaGj`13;02h=G&L>S;g9d%F8~$P3|sNhl=abf2P_Ds0!m5v
zMJE=1G6*%xf_ino_LKX{4D-v4_WL2CpE>-U*JHu4?QXvQAFt_IJWB-pKJUN}m_A`R
zbGgQuc>($!cpY~kYMxIQ%CDz@=DmJ_+SL<%{|%t#YNkLUhqt$EVrhQi!vEIfqfln7
z$zCE_fbb1KS7Wt0kqJ3-V<n$paC;+iMozVj`(x*yL$kR(g_k;h1A%SasM`ua&IZZ|
ztLDq~Qlgp-d#JPua`_#B|DZ?))$vzkg~Rs{WxamnmzWnqVM?jZr4B!MlE{B6bfMtC
zf9w@K<XHh&xBPC`GueC50<eR2jGtfk;RTN?Zfi`CGr2iCdyDY6C;meNmmv^!H2BRW
zRUZnT8W;~Cg{i@2>yJ4unmo6E%wGsSZ_F$|aRJ~-xknLKX86DW=u42B#c=?iwv3au
z*mgEC+5fMbfpM-xsh~}Rn}>$w!P&aPlzjdbL5tgeVi8Z_^BIS}N#6bN^TU<LZGGva
z2Py|dd}*ng&*gak<1IrWtL=6#9;ozNYqG`TH0?)H$mRvg<G=R{>?mwUSL=0kR8-l$
zY<9X1#c%n&KWEFk|14hXuwJV6heTFb@^6+{^M@mItg3SfH|SxOiQ)(y;dnfhTLM{c
z3FI!!kc5+j4i%m;3F+Uv(V)lt%MvY*NfGlz=&W@W$ML4nvCp<!Q*;U<s*;$=cmX*x
z-ckx|v<62cd&{u(Hm!!2dR%-2mjC}ZELEZEm~snGO@lD-_+>!PQhMZH5U)4IZ>N~Z
zBb9XSsRUdpw?cSoB4_jcr!OZt1_d5PTL_t61e<~L0Z>vq+))ZhN#cjB3g*vL+my7l
zWm4o6U5c`@<6=oJ-ay@1zF2gAL4IGKu&xw6eBEyxf#4+GpL%s!90p>e5fnV~zR1{o
zciX*#eBPJGF3k^;kW+6Tv1fG6HZ&QWC(%;^?JhfD>Be(^Cc8f#Gj2Epw%XLVoop^6
z5{iLv=sZ6vkk@km&sI4Bg3$|Y4!WXh6tl|wp`~GHdn1a8Mi;E8f3ts%ODZY?1^wp&
zfd2tz>8L}HtmNeVgM)*M3)`Sc9i-^-ZzE9lNR=OQ0I_cW{<59s9%Su@lywJhf=OkG
z#A#8z6V8?bO<YI%jO6zF=}bg@Z|bUDMRc2O?F!HgKN0>Imh}Q_5Q3fFhy#FZl4ndu
zl|$Is)MFm|YGri$ou@Bs8{FI#_jga=a|Y;L&^do2YEn-6u)s;g!wz9*rJ(e41T#Tr
z8yg!jF)?sE+%w>ip;5}hz`}xFrA;^6>j0XElJg4^cND6CA!z-n2@R4H0r%b6hwy6M
zQUSm7BdYkR^%u{lmuQ*hrKy!#``5rEOXpP8>do7WP4K6yP1|YBGr+)he8>27Rav>l
z;?<uQLy<w5-NEcQls9$imfh+KpZVO{?mm?x(C)j``zv1|1X5d6lg{&lpEOieyPesm
zqtOA@!Dwb?<{F8IRp<y9#e;a*bv%+R!QO-(TZN=?k7Fuz#R`V&QbWgICfwg4eUC13
zb`JPXAq4hRYzPzgwE%)Uy(}1bzJJ9xog2Kr_!f`acDQ}$!uV-DnY~^4f5G$k$VA%n
z6YQ_%Le!Ac@^-)jgaiQ4^!i6a(eIHj)S;P0)%~_EkC(pdzR7T)X?V=@ndYWVi7IQv
zo5tA$GOg5TQv``EoMP$&6)hN$SXoP@HoAP<TUt1Q*~Jf|IyHOdMw-zd{%02(T|`D(
zKu*`6G^S`2W@!w4Wmc#2n2ELGa{;!S<rGT+-ap<FPczkRw-aw>dj7mGeH)S5Pxi}=
zSC>;7&^3UG(`KuSz}ltD|7vXmTD)RYt?8+__Q+-64EcM@olmbX!Ml&m$IihU62ps=
zsn7v})FYfF?4o`gA=jil%8~e?Xh=AyF`_x7p}45%Kj4~`18K|61$oY}_&Z2LmLNH8
zvrkPi!;?D3Hk~%{cV+=rh~+AL+zTWy>1vrpXwD9H7OOn$;<{oLxfX37y-6dCcR>hc
zZDEjrQOS{eug`W>#$LCDb@@Ujm5QA%ZK}`kYmRaP5i<?}+OVRYG<LxqYJ1uGnWU$I
zl}H;m5?8&Fdb0voy_&!|4CB|7ks_ymu`>{;rxgJQp#h7G{B(P3^=-o9x8-#rnM52A
zNA6#evsDlXR7hteBm@qUf`0DCLTKFG+%-EHuEOs@rpfTpJ<`g!!#Jy>iHM6TyqS%z
zWJD95Vy-VtylFOXXIHuGU9Kz<O}$A$*JS&AUWR=4=D6f@co-UMKRs)@<Wc^3n5^@~
zP94gbi{*ZN`-S+1wb}32avf;5!`QZ-vSta|np{$vnfwwILy`PzXCu>t5M5oz1|c)~
zh9-|CM9`==2ohMptbMouVqdGx#nQ{Gtu~k)iuJS|DG;$k#0VEV_SL2!H2WcC4WBlc
z?7PmHz)RFsOMO&tMz4?zbfrq_mD2CU-smjOIjO{BdLVxXa)-id4MrbwPxn_^%h)18
z>s$a$#EkvG>x{g-{9*7RX4U!oYn|b1yQdjwLE(#aO>1d%7i2W;cOAyz=Xv*Y&8q~e
zf1cJ6s_d!#>_sdnnx(3w3=&<xnwvRlpneQvOZDh%&&Lt))GTi1_(S1Y^I~9^xrOEA
z;_g9Nh*QVJL@3gv$eRR0u0-SVE30W$W7lC=#}F!4)4%h+-CbGP^S>Vl+I<q}t{i?i
z+wrD(<Gmqljend-iGAEGlk6Es4I*v6e}oXZpiQ0)X3fZP7SnC`wK}~e#QV+ozDL`=
z&k&IgR_+#l-!1yyhdD{TCKaDO-JdqKJ?7id2*qgcBP|JX?6^0GGlU29{A@Pc6Qx@p
z01Ge>GZ#D-9~~`H?t%k?NJRW6a6iZ;Iw~sYqelR>VCFP4Y~E~k*yI9aYBYHJfQCek
zkUyA{<_=&+Z&`jv)cq7TIiQwuQv2&~U^w&WPLsa>Z*cH1i?(^9*sId^6~0$#vzMg#
zkR`>7E^giOHh-El#ca}|z73oNC(pw^!<b7ZX}uFZ6XPTFRjR30EDJzGkT>Zkv<ZHE
zMRI<Sai^FBKyvZ`^?+t%LW*%7BnF7YgDtdpB@lS?^YiQN?$w~(NCey=I!Ebj)&?2N
z%NpRfxCKnq)SSdU0@T#g!3DL%)>#-Ie<7_wl~1GiPNRGigJM5Z#=@3z-jBPgtcD^z
z-9bhbhKRbA8y+3jD{PgV86jl3&^Qlto@4qgbIvPQy{&g4H$-jJMw3_=HmW;6tq-X(
z_Fu-(Mcl!!OoG_~hJgw)<;lV5ZXhbc;XG=aHobtw0kf*Dr3Kp+Cj7~Etx31tty1V5
z$Rx%fB(%$9zE*%Si&0^OJA=W3(*JB!P9f=Cku961a(Gn$pbf`FYXD*Zo-1CuN6%PJ
z(n-w&YN)+TE3xFJ0{7r2_o$h}Ih8h7b2=;KfUvJ(EFEAm$=XVww+jxjeIXOiQ`GuF
zCx;IsM{gOnVrb_oYKudAUw7l6f?R_&$UXK73+)34n~(z@%lP3!yW4D#<xu<0W)W(7
zC<D!E11JYggoGb)_=$61Kt?Af_&lx+k|yeaGYC-dKW%NlFN+&*vH>4I%J*ha9t1dq
zztTarFSTwRl14p>o+FsROeQwsJ}Z<FnwIlV{*~AL<aF0nA`$Z(Hp7qAYNzvg%2i7L
zG^jW`F8#}S7f$!S&pR*asgPln?|pkC(LNq|&1Jay9bRgOl8s&A1WA5WM{|e!=ada_
z;cL)Bv-UTZsM2nm`H<m&<1+A)kr@Nsl_;)&udC!{;PdsY`tE2d{6i2W@h_GRs*Z;!
znV5uxx0e@KE(x3$Dw1OfdL6lUE9a5SLQPi3d?>BWW=*ie0wKRx)hRtZmQee8KH_1z
zII0IwoZ_(e&c5i`M2;RvCd{=03|PD%)?+$$W7=2?t{Dp|HenC!w(F-e1APNxD>>Ud
ze{wz}IiKkCx<iA;3l%@>cZ-#9@J0NYDzJV82a3y!u~HBtIY5t_0W(!!C_Em5kyS1@
zG!lsO+o!Ia6G{L^(N9qi4@n0U5zPJk$vQKL7E34M5D=Tlx}QBf9JW8WN<@DgmoE==
zQ}~z4ybGt+$fh<3AMup&o6Ru7>Kkm4SI&wtdTp!RAo9>BC2@*mcFSwR)$kC1*O7_G
zH6H>9Bb4&_bV*+|5S@hhc;r)&P5dm>TtgX$>7sB=0zR(>pnIJH{jajKCCrd1a~i&J
z?Xjq4Lx)HC(slprJKJt%3NX$q374&R+`cNZnyCfl6|ebJ4B;V4u6C~51)B=u=`-HW
zC<;CR^R~!gUD~+&%xP2Y0aj=oHsL;ddAF^ygLlSGgr8()6SAT==d?~Tu>IQzm!V3a
z5BxWySmdR!(xzC4nB&F*W!Ce{oOs&ZnVI-|X|)-1K#>S2xfe~A5Odmp5}|^bk{_ji
zjZhN`+>*(;qR>hV3xH$t8uOwDAl{hTv9Ea7Z*@V{L6ljN@4K42PBFuq{mlc6zA?Uf
zRq*AlxrB8>$&Hy=y!!ke(Hh;9%mTi(<=k_L6}6^NOd2|k(y-xy{k=jd(uLjK{(D77
zgMI>RY!5)nNfF}<uwMSRsRlly46>$%0nt68U`oG!WR=!2Ri7HC??CSud46F1Frr$6
z-viKjqWc<F5r0FEHXdcyW+0SMGX=v|5GA5c79Uc~h`@%GK_@{omejto3bNYaEWa3A
zJ{Kq=wDsR9y6TMkSZBjT|La?VD<K6&@%P&?!hidF+D<=C!`E5mgi$R7n2>W3GOW&j
zwSMo)tOEX@9RSNVjW#aY$X+8$n*w$}b5B4FXb37(sRC(gkA_fupu8>aC_ME6{4Fxw
z$alzq&Xy*MgD!eWd|nTf`z>}85sACuu%pZ*tHPnO>9r-_q`bl*>*oTAV78S8-%Rzt
zbkt*NIl~)M&`&=HQO+p!a!A2)RoV!|?L>X69MF~`Xd%1?haKVhjH(fckgJ{Q!IZUM
z*W_`D8BGzu*<iY30dxY0bjxBXGvUOx)!O%f%gcU5Hww3&TM%>;0Lz`>_tv(3w1S{S
za6Z*tK78A!-vwUdQZFXa(Zhr=yIWi703W8ac}u~#TT#|yndOz4m;=h-@(rnmuQ^Cl
z2zWQK=cD<LlEDw4`gR0Ny+=KsfTxKfq|N)u&dKRkMmrPq-*9JsBTd3Kivt?S#J2PW
z#LZ<o5M=3ZH_Cf4j3#KFGgAy8r*|0+Gv4zwf;Dx=be=m{r?d8erVl`O!L}c9yfHiK
z0Zk+D`8M|T7Y{3CS};cJjdi5J;hb+i3tEIJx9V(SPb2O|U0+`-@W1?}Sn}KHgGAfJ
zDI|{y)r%=NN=ozbJ2ZEgbO;0_cpTL_cwtv--)8aPuA3W1zU9NSR2W(ZBdvXvgow5x
z{Dvk?YyOcsVZ~WVBBfoJp<cdDTjIRk{osX$IES*42R)H#-ea9=WgG0(daH|m;<oM(
zG)4TfJyDTM`WT%sSt5YVGz_5<U<%~@eJ7a*NRqz3zUkvb%9EJs7%1D958Z1-4a;Yg
zZ}`WOKheO)fSnu=YzFE7V7wr!J}nqcyb2gIC<z6xIfw^l_~(-x+HjjYLf1a%%!d*c
zfOT3h_5O1+x54YYUES0{-84_di=gefQk}5vfRY4~n=ogOGz}n*0|NtcNd9)YKqoG6
zvVoTCKPBtpzWoR;MYYe$AGFvIU*D)K>s;IOUs6OI?*l|MK}|j0*ex47ghvqIFPif1
z=$1UNnLe6YEbBvst+SNR27ia~zh8Ae5$-c<Z2Y`bvokcj>4hfh$+9_AWbbmLqbqmg
z1*HpGk0UxEjxnX0D5TDhrMecytm9$@ZvlEh(=%=U0?{AokZT!&EKIdhtYkW63s%o4
z1Q--5o;=ctnvwM@k@b<Fg^%ylUgM$_Yps4!G8ZLi*9-uzYW>eJpb?<?I=t1-fuBtR
znbPWMmIOAIsRq7b*%xum1jEx)EFw9*kJ_m9JrkLD`B@cj+5Gtos5sSjdMQ`04BAMq
znKGzCbQ$E{tL4nEn4;MsBSAV5d9Zt$J!d1L=%4%ee7njpUt`To;Y!7}ou)W{u)bDg
z2=1}_W6lIcWfs=BQmGyJDo(}s<}WU+E2oHo(@}7I+3l4l+<#5AjKHTOX|Dts%{8Cr
z>YmFpk{B6i%4$g+KEXP@BI?id%uh^gFq%yB-4G3X9JtiTW`U<X7iiL(Ax}r##A)g+
zTD!e*JMiG4V*x;h=MOC4&R>qK2XrS(alCOW`!mI$bBXV4n*|ES)jx_&#qfIR*J^^Y
zFf8q|HLp*trVQ%;Y@5tHmr|bCAXu=nwo3_$bSMF}+==KdUJqrFS}-D8Od?0Nn9&zi
z@_@qYG0*GcNPx&CK)65%KaTLotr|VFB5;oB=v(`Qbfmg0s7N6-`n)24^CHORPxY(K
z&K=hA?r)81gZvFLJf)!Ru>u_n%t<@Cd|4W99n{Ehc&>6lpSchJmWv8a4c<h;gEBC8
zO?bSb8grAe220r^O%N{&0%bI<$A2s?MWYJHXVkJrq^b(T;=zLHCp8D8(kt=nHr&^_
z7N!XRZh+r-3g8B$oVs_5#tS533&Pa;p#W}|iTlZs>KjZA(K1PJui%+@BmOkvF)z?b
zr#q&qq=br(Z$r8oNH%N1I?sVnwGfuyU>g2dL(*^+yK{K>7&dKyA6Ovy=1M#9nLtb{
zh-hUmgSeJabeCbR!xA?%JzN1Ge9eLNVIn<ZIJC7B0;$@CGb=!|m;P`?<Nc=h)ym3B
z+CZs>#=RTHHyT+&<O1*%X<MddHSpws*o&?1FTi3q^Nq=Ny#=+>Z3cTHz#~oeaH#`i
zq6Rcg7AjGAyvMAb#-xry=rn&EVxHNE62Lj=WST~yY`4j$Fpm5Rsb7%z$D-Nm(9DBd
z)e$yj<Fk7r(t4sYP7l*AsO8TO1VI`4tF{R&VA>yUZhnATu#S&11^)SwwKG;xLEmqr
zQPgO+!M&UpiFEy|EWnJD4q@8nH@Mnn2DR<eTmFQ4hL{*NFKP)AzAp#|C?q|4y>6R}
zqmz@@Kv~}92G2?8<9ieO(jd)UQF~ECngo;GKhdCf->;7+l7gL108_8V&-T6Ug|4An
zMqa&W*Nlv+2H9+LVb4hl#LEyQ2CD%A-KSA%EZVf*_TmNX@@z9%Ex#>`8)ybouxyWE
z=BJs`Q`WZ^24jewQAZ7>kNscM3EoXVsW)({y?2=+b5D2p@FqERY;PLOK1(*lY(&*w
z295F_Rfz-IbZLW`6!-{H0nLFF@c8h+25J#29t>jZ^I5Wilb;Mgtj#q`+&!Hmno9^N
zYd-urlH|g#{WNJ(Z#Hk6`CrwZA7`P8dH3C3I5nI53Y)XayK-2qD4h-VyMKm_SZ(Rs
zU=gA4eeIYFw)8L?q#5=GTQRPr>a}KpG9L!=Z>TXDiq!iz9?GQ+QU&2=Yfw|(ulFav
z6;H+(yCDWWPa*l|nH*pPo|Re04sM|=pt5I6=BxNBzems_=LpHyp=-Ora9XJM$pVj^
z<D)dlkMB<O?N_pPx4bp8z$Etj`xJf9m4ULLrNU1oB5w{5-qVCQhybXeS7qRpFYD<M
z>@T5n5xX|^^ZQ&V|EZmb4kUzFwB1skAk@NTrbv<p{X25tlt)6k@RJb?2@tp<$ewK3
zVTQ)+@N0C-P;=fv)?FK$o&A<ej2Rv31xk~iiHo}pwgps(KNki^{ud|;KwIDszkY~r
zbnG;fkdT152MU73yh&I<h4pa&UDKlp>l+Ds<Z?wJu?|W_!92e)DpXPy%fl9L<}f4c
z)Tw2fPJdQsga2(Zdv2$mU2lAO3_7lQM3BG&OOei-v49dZ{#QIB3y#|gBv?i0S*$8F
zY@_SIFQ5K)L0)TjXFRo7!k(@nsyU)&1(PEG)-wB_EW5jXzFpC-`y@g7xi6U@7}6to
zAqqLGRj$;;)D*G`aRwA<w9=|n=ct*2p)&}ilFzhA7X|u(M}W%3Dz>HZGDT|HT%1p<
z7&8!>V4-mPfD`G$zCc=NZk{n^9aS8N?p^>EKbb)b0Z#?Z1&l24ECMQsD^>Vyy1!b{
zbI-}8NS`r+eJ>9vFAqqH>mZ~<GkXmHcLaO1B_{-!i*s?vIm>pt)Db)hbmkqfQ{?b^
z8j;E|=ykwA;ec$kD1+@{N;>So3&qS|^6CCZa7XM+STWqVt5@qsuJ*0GJ9HKvWXHxL
z)=YuGJdYpU;W^-7qhn+L0nD6=icIE{_<Jd&v5i0<>GJZw0Q&3f3B<f0k$~v;3~RG#
z7}2_DXpBFF>gCCBX^nLCso{O;m++4`K0i22vsvxg$jlOnePKI9HOC5HwwR14oq?p$
zy`n@g=~BZ5hNw^W3PJcfswBD_RbYao$m(@-DxNJhX-WtVL!JO#m0?8tSPtCi4?}GC
zeRFdj^c?<0^2-hwC6>o&#gmdiDmptmcOYU;5UA$5o<x9E?fR&*f9M+c+`@J2Qmhzw
zDYWT$#vL??BqSvA5_<T-%K|uukMk&(0Jvawb~Y+13MlIZu<U@DT4SK?`W|RB#$ho7
z@m1Wgcg`B^5zfcqkt@JC>lQ0Wgy08IPC#QjnzAQu0wUqPit5z{=iLg2AL8g+qUEI6
z>N5>xxw%x)VhPdxP3s?aKpnAhDY-u)pBFEU12#(Lwzi4b<M{zY;ViZb<=SMd-3du;
z{wwEQ&<HC%2ot9T#PQ$}6o^{+LXkvT?+X<L1p%i+u24FVUP#5o#RaHbLtTRB$EK#j
zV-d@Z0~F~^WX&C(2-X^bUzNTGyawsl7{KNXmbYrj9O4|cw6}vN#DY%j-2lC!ztESH
z|NMbjL>^{74+UYpmb0_FEK@62j0Jq_sewEtc`G5)%X-FF2t)za79a3%2i2$w09P+<
z?kQ<}K%u>0+77(=WUt5<a*A%v#Sf3a9vu$BY<p4r%aJ!yF@M16jknNTuJS&IWpM)9
zZhC(<J}dg%mHzgCV}ZZnWaY*1-Qj$V?PP00iQ}i*WTRRnBVPbHq=5O#Y=J(njR2}u
zFq2!)oJp>Om6;98O_rzrU^@02$Ev=(y!`I=mbf)0sj8!cmxl-MIEI1f0h*<Vg&Yjf
zxbcSh&vlCPxSod00i{mNNa4W(WrugujiahZl?(ud88CUR;NZ|x0E{-^a$#%~Ac!Dd
zz_@d^SQ#B1&7zcHyABSw9A5OrT)_>R0AeuH-wz%)JlG?9YY>5>WPDt!;_)F+BoRyY
zj0tWC5UIMhrP~GHN3uIN3adr|Qw8YuA9PiQQB8{P&tYr!pj1HC)fcJPKBxND3DRzt
z+d2%#6g`if2jFiaN_R)b>iQ1sJ844K1miI|gJ;dZ{^Lzv6T->Ug^hzE!@?mGgMiKl
zA`-}H^es>?(5#Genxz(097TS+bxz47U|8SXH8N(!qav`_?g^~X-V--ZzU?`4%WT26
zAN0bCVX<pj)ddc7nJVp#L}TN$P1D&KY9x@OY%4OyVqkPUofnX(+?fu+8h}U7vQAEE
zRgdRko+1qy07YFu@wZf!HvQkX7?v#QqFL;|kVqjG0CqY;d6)j<sImiDQYaDIoNv*l
z;d$?Zx?CqhFGcGv7gx`UGtP)@D6P@RX^zpnCCoHg*Y^NOAc0fOJ}&59%^ReUfXFhO
zerethcY4|5gfi%uc-XT!EQC>2CTn(XB6rZ0o1VjZ&^`k8Jy@DNAtJ0-eRxbnITsBH
z3CV7UEkEk)Fh6}3s|@HKU2A5<FaMFX<_PBI<02l;NWG0Y!1uHLh?!6_LycDlXf4h@
zg0iax&FY;3OYGT7`P7X)$xKNx^-2kc((MBgSM3Dx2_R`8$Lv7c(4{8zuO7`xh|UJa
zb|oX%Ri0q;O~iCZ9*0#|y)QBLa};>wGz`sx<|!*Y_1q-R86j+B%s&~-@wn`X8E~7e
z>VI03ak+!CnC5^F99QciU)KG1v1USN1+^ZLRvTBuZl?sn6M^@l{W%w)R(D*CiU^(R
zI}$*uY9*sv{TEMgad-)OnN^9EzJ#73oGcEA8Vm}1-!igU*BsJvDRt`|jryR#p%5%)
zd7AfF=b!I3yR3o-tC#fGAyT;jMlWJPYsGMqZWDkLVZIm`QsI3-pMSF}gL4-b7D7To
zb~bcj$r%S3m<QbIqnOkO_@hrH_nb8MV||uVZz9~^19T~ui}v)rHlXEzVg6l%W)7ZQ
ziSvonry)~U%*HY;^&4#Y;(`H^Y+OOV@06#1gAOZ`8PJ1ve;El476XSI5Fm%MPpR?M
zq(<i=Z&at2Pya@v9o34H8`W(SNxVw|aDu7jD>4DEOU<q|Kr>=^mrWhTuINe2o;1n+
zjaxf{8XZndbPWC5w;(IR^!+*#3P9?P_m}q+<B{^WnysNNo}OK^z0gd(`4kcg$hOzq
zN&FMO#el&=rb0CUiIfyB{F!ugbd;N2`7?uofpK$l!&rGMmeOfq#NY=0<79iC;^gj~
z>g$-}CJ|W9w>fPl?fv!`sb$WBinbso0UHrW(03Ro>+7*Zay+wKNTr<}?fo*sZTc(o
zF~nlAxET%wEUvLcOH^Y{Dd*=80Y|03z0v53R#VXp#IzSLO6=pr5=y)je@ETVadrZx
zc*8GdRS7`^!^b^|88^|@->4fN4WEO|0os9JV_23ubVCxvLNYT|eXn5B9^e9@-qv-P
ze<vpclj+`$%`f=N%gYN_s_lt{i|9l4?V^kfJiW4JdxqvwICzhUhs$IS6Xul`Cp<}r
zqGbpGqYLEUj*y#%Yc*@sLevo<i;f-0SLrE{c2@ULzci;BH=r87#dGI(c?_=MLhj`$
z4_}C`|7nC4gRrqvHVUzaz$oFU53QsRpC7gT^SnaR`C;7YX|y0L79HU^CW$!*l_wP7
z&`?=mC2fTi4dAs(nR{K<Zb<P5VA(4SpbaylnMKmOwKD5xv9#T9y)}Ee9nLn5<zM1>
zt~6`))myT2ZfIyq64AIr#3`JY$Dt_G#rwJA(~u;$69FcVg_&8Fidb4CMcR}%A8waa
zTYJr|6?(e?7)ETHRT(ihHCDPTF4tS{&aR3#s3fnjUzfT+O&P3}(vpLNgKLAqK@JWN
z)0uoF0StlB)&<HQ#=`Dg1c#Mb7pI)Slho(a_+ogkqH7MoXGWsg`()S}<TY;zC~P3g
z?GXihVL4p$_!y(`bsz@;KeR1lOrs&>$_<&WDsWYMI5|*|nUPYMa8j8vIA|s};#<2s
z(l{lOtMrwDZe5>>Jk=kB^xCK0%&oJv#^rbQ1?EIMtM?ARH;DDP(=fH|^8kQf*WTm^
z++tan4Ily3OIDXLt>K~<?`K(p*Y$9om_}(Jr=;vko~#ivFv!h2!2u(eEPrf+{(SQP
z!t&o=tIhQ|C~UbtO=uRYTsZh<@5r7bS&9g81iQJY*!n$1T1Hwr?9{aH>EP%nLt~u;
z;68Oo(wAQCk&-9=DoL{1egy!o(?kAQRRWNwvP%9k+$OS;!=r$apYK$9yW~r4-Uo2Z
z{1|2ord0C*JEo!vE`UE^M7g45laN1@p$q}b)SoVDu;2U_FsAWY@zT^FH06oK6^5xf
zCmpp}I&CWe&er{3WqNj`4indD8O*7KEU2>04F-sqP|1rYLuzt&IUZaFqYpin92f)I
ztSQhw;yGd0#5R{#v+F6M+&Kdl8e_4qO_r?bhaiYcglK4UE!tT^)V7AfAt4aP+<ObF
zS7t&p>|?tc8XDW68qgkH4&2?JXisj4@+2rQC5Xu*CSa6d2smKx1h2xeFUp#lX=!PZ
zl)3EGr=ehAU<Lx)KVo?T3@yz5N7+|}wb4au<L>U-0>vGQySuvwch}<X#ogT<ihFT)
zihFUF0%yMSoU8xt&rK$oNuFfS%-U<OR~Cz<TrRg=Po=Hq@AEeRnr;*(47RxhODuu5
zfh^mG60@d#nm}0j4{8m+xToKMd`!*Z&LUE~tRxV+Cq*a^oe}Fb@u(x(aU-qdQtm}a
zX`)|BC5E8N%zI?1+m@_73tY84>0^C1l2-BY#D%C^i(~`JWcMdGTSW?<@q8$fi8#KH
zxqqr&E&Gyf1WbF<4t@}`KWOYE|GoCRzTYq^=cjvOV~>>MNm)>1Yu01rqK`7pMlm)v
zPJpmfQg^Y0QCCv~%P|(zW5k_uL^-tRJm>)+B8B5eTAKB0WORnEh%&Fv#n|TM@p{kb
zJKg)CmQ;ZFKW4_Nvxo0lphnwI5ZqydWT|GH_46kLJM0I9_UyBz<E9W@s0ew|*z7!p
zKUX#qOzgSv{*CHU*s{Pa{YFj2CN<M5r{~Cw@Pa==wm0OYE#OTcVA_ids=Lm+F9Mu&
zO|Gf5cfAeHe8tB&Rg^NaEO79Qfyi0KiV25kIU?80nzN$`q;-Q48DdCyGRTC|2t+V3
zt%aUIH6CF3U0;5Yg?^GpqWSuoHcL6mvR~#M&r?C7feIdQ)$TG*zOi)ZLq?l$R0qC7
z#QtCo@X!FZTf=I;ucGCJC{TzMvqDOdu$cQs^ti1vv&-tmN2157$F0Zl1&$ydCOb44
z9v}s?+xA(Cnhc}?jMD^}gmp0r^sPUeo9lE>H<CTw2vM!)tWiF-kK>1^0?%MGVr*lG
zE+m2=5dh^K*$jLtF8-p%Z&Mx_Is=%b_EF+QL*t1ohy{CkgIDYlYX!^yd(h<P%mN_$
zax4tZsYqfbrVoY8UX7nm9PRq{XTFXV<ZM^aE1Sr*FWSmoYyR^f<z?rqIcmsKy+nY<
z{c8DO_+{#Iq{##jRq*Af?&zK$Wz^J_P7i2!rBWpYPgo_I!vFYM{fvLGL(2?!JbT#X
z5;OfI(i6UBfN)<J80}NkTY?0H&tQAN`mm2Xc=(av2i(ayKrj2*E1~OmE?(|PXkb7Y
z7jTt&@Z;;W0X)-T0*}O^)-TQ+Ia3xZGsvQbL|{bFaP+w`io=AMR>68uQBW9ZXoSdP
z$?*YPx2vmbHiX(z^ST4@Y`?(U*jKb4RdDxJu0#od%;(FEyh^s=EC;Ly`v?YaM}IHB
zyuIP#;!?J=?>Hwx+`;v}Rw-l+7qfk52O$Z1aq?DZsJcn1x}g~Wz*o$w^m&N(0C*l`
zMSo$CwnnlqY#xU?Y<=mmblMV#7rbTjprg{57g$}0uhHckS}{hsnxA6<>!?o6+GyG$
z)G6TBgh?&~%5jcK!E{#2`}Z2&FyD-5w5&8Age1<@yceP98b<~_(17F<SZ+BU0?a5N
z{`dlXXZ$<<b~mSK8R;$SPx!QE$~aotR^bpK_^PmX0G?e?yIj#A#RmC-3kN-&u$_lw
zxPI~>=nk08eka1|OOvB2S`R*gI)xtt8L^l9e|}sepN-l}ow=X$M|gnS;kGzU;F1Ki
zAfo7atWkDNyl<!|K2VuiSw-RmGsBTl;6(55?WxhGSTO?26?GdnJTroYz#$LyM_eSW
zBaVN}37VXikdCQ<ia-f2=IBuuOb-S^=58edh7poW9-g?C;1@A{&CDv>b)jS|<WFyn
zOHF{fkahuHc7FC9O>1)Gh-9oFEZ!AuWZii{DsWIr_;9*WjV=mCk1ZdF&G8s2hxAvl
zzaOt*qI33Y$HJxp{=}q@?PRjxD0ZFmKGLUKM!@!g*i5Mr+0rPg7^Ms$@&F><H$VoD
z$v~$?_gkxw*hx;;k4OaCeV%A_D-w54Vi!!k|E`B!9w`O|$kpKC1MdS*hWkLzs=Uiq
zpYdz)qgM&g;u{_^{ipvlI?ea<QD(+9+h24WoC=6R4h{}r;l}e;9v&W|p*8S~fZBK!
ziL<rsVqqe3A$<1=;0i1ppgm0RstiipzWP-Y>A=>*kTL*k#3^0KBP>TG_mm{QG<+`k
z^cQ@=3rrvf4VqNEfM&r{G%W*D0sKiWpc??ZR>7H}Zd;^&5(waGP8XLcutY#E2hD~2
zn1RI%=<@*)Y9{*5uD%okz01mxiz@E*s~^;Tc<i?V%0BKv@>S_3^wB>Y-v<IG>RXQ*
zbI@q}(i~-g9Bz26Y7x}A6W#}!iF&`Lc2)|-o*ETTu>vsE)A6=?r+|Z?5RsLS`TM}+
z`|E4w+MIwHF^UG_wC=u%i&m!Q4S{~AnWaR>zFo)bn|KEQ&5!ph|KRX5X3c}Wy&xj?
zJvgBd;A&f9$qZ)g8Z-wZ*dJr(dAj^FygrqRZ8nNLYW~**x`XER?u{uG*ALMZMso6^
z4Ndf}Z>B#RqOt#%jB~FBN`-35<<8S*2q3#A;THx=<@93_G==|KJTIQHTS+1X_&mU@
z4e}aZC6}pEN_6@hLgWVy7}=iACEfEkH*NAY?H*sQLzr)33BQ^s%-e!Fk!GTD9?*gW
z$Vd?3e_E5%CZMkO$>+fKXmMKd^E$O!ya;g9d4i`IL&D*$!n5t7Il{;N6oWOEWAfPb
z7kuu;$Mv~C*#&w#e+&C%kR?SUr6?u6U$F26qk?#)3m6uUevVJnt-p0sMRlAfqVoWV
z_gmxNE=LY-%|@dURZ!=|+8TZgi8V>+swhBA&%_{u?}HM$0?LdoK7Xy`>yO1vAXC3e
z$L&Fp5wB|PFeRWXV<?qZtd<cqVsn=hHR}It3(HX|B56&+7et0*95?8f&p{P~QSo`k
z!g!bEYksNcf>q{Y`@sXGg?JW5EcJw44Egv{vB<f%jASs$K@;Y~@_;}TeDH7Oct{SI
zKVUBs<<bLn+lXPUHh88k+6L6YQ+-FJGlL5{&guaQJg~+CvTAB8)^u0fb{y(OAiEU3
zyD(p~WdK^Hah!b#qez7bf(mI4N)EY)lIZN{=zp|ho5e0`%6P7L9JkM&Cgym3o$>z&
zMoqS1r36a)-Elx=f?xrdp1=(I5E2M#)}Tmthy^g=LQ-e=;QMnK^likIH|_?&a}?&k
z=(<NR@TQ})p}Q?WNFc)cqHID*0QGt>ibHY2z%C&Rz{JFHwsF8sj^FxJ*)(uK1FNQq
zVrtL_K!Dkw9uw9O3;DsuKfWf<()MxbNY0Du3IV;hk>;;_u{msuzdX?aj0>`sU$$kx
zeA7yu1-U>ZL~y?Ryv8`+%Za9^gt5YxE1FB+9^7|z+M8V~?g6P<{sDwd49<VI1!=$^
z6cjB`yY|Cb_%()q-$uSSz;oW|*bIaQ2$(c*Q5t+Mv22B+I!CQhPXg!WqOAbes)DtF
zz*LyMK4nK2N(<_lXW|W@zW}2iSn>emYf{iL^MxbQx0<rI8nSmQfT6*zI0}&j3O%F>
zz|soDl)RX}4kXf(z9v2gvW_amt3H!wiTI<LV(4vvJ<V)FZK^+kVG(4Lid~Cc%mh<+
zK^6f`t$;-Dri$vCTGIY|!Km$VbC$82cq>uv1(2}Z!2X*mSR<Mit5Dn1_&fET!!v08
ziCrnY!Q;EjT+?!oT_~{x|Mr!Yc=Ean<*QPQk*2;nKCnVC(y{wqu;V*Mp3)pK7hrD0
z_0<*1)7r5|NDFY3DX+ujccyQ*1*R;n8Qb^g3h;V-ZV;C|=lAv?bpAPYezaL64Pcdp
z;mf6EUcXyklEjYxh_pQ@488|Qj-a+p0FauO8#AhmVQ^(W@u3z*;j^ExNvEyaa@6=-
z9)X?@$OdN)m&ZX(&eo2sS38t3xh?JvH0R}KizSi{lD~+lp=eG^Np@(WfVu_xKN&r>
z3@vDG*uR=#Q@I!VLI^|>*Wb=WA7J8RkjsEn4rN{Xr=_&Y`VD3OzaUsx9@U^KB60Y*
zJ+b<c<OEtFPFvFn_$FgmQ`73^7lof+Jo_IR8-F}@MUc2-@!@`qM=RtwO7xm*gENjy
z9^Z!2kNDmH#VnAF2Q2pN0S6C%NfYMm3A<{NM^`#jZ4bgA266k*YjA^3A5>>$<Yrum
z#<vceyFQZ*r@_D`A>H?w97rH!*`|ZPF0srL4_~h^+l2Lg)i#}p2gljCrLgl-R=8!?
zl&@{#TS~2Lbu@o)&R=6==l5RSf!`|r<$4+fW4{CaEQmpN#c&oHx{}&Ofd1bt*Jh$1
z<rzZm&wD`s>Zt7KM%?o^&Fk#_^J8Oe3|v<|qn~1)8!B}(r>dpwcl9?~o6NQ*di-)g
zn%%H3vFbc{4&A_+&CL6wd5bul8*V=vRPh*;E`TLZ8Z?wxd%I%|PmdotV$Q6M#8YDt
zZ|{lFCMHvYAw#}t){-zmCe_e(Ioauc5!I&C+NE(&lhp^<DOC=-B8q8)r7dtq*X~&_
zi#?t6n>5@fdEGtpp~M5doxa;ramuAjSU7q?aBB7)MP}%cr%Z?7#`c>#i__<(4`UAG
zZSujX(S}Z$4Akwk`I6v1Qewf8>E2`}_|rxx-w23PMIk!U@Z?<D_2QV7-00b}rjdiT
z9;f@z!tj~j-9aqg<3bJ-zb1(d&}KG(kgT!BOwT4<wy;agfbunMVh1{J7V#akCKcN9
z=w1J$?ShwUMc1(G=HrdKRM{y|<~)uIn>KuqMb16&n3Z4{N$;=J3dqzH616cy(j-i>
zdQfh7?S`H(R79ALjQ~!zL}iz_W@9ybr|x=iq)Jj-S9iZ+oi>OJR=eg2I6pbGgUD_+
zH7%BP8J$+5B~P766Jf6j9>zG{o(a1$u`V6mtOUdaC4AmPTO;m;9x~nD4-`pb!1VDc
zB4n|1E_*6hG}i~6p9fZoj}%r^L}^g==gir~C0LRW?@!=_qi6^C*O5-`r`z3d(huFb
zM~1$Qk?Q{UfA-GgtgINUuY%tA5Dum-COm{8j{gk1C!0|DYL5W#cq=O_3yb4}12Kxk
z+1mw*L@9F>A&cfocH8sIOZzfhgR=M1B|sYlWB>PpVI(#hrJcez6*5@hc?WW^zOyZ7
z@#G)4sN;4I_MiF<gB>TZOyGWiwFxHh-+t?y<@&rUByHJsNzyF@SK8_DemsvXvm2$k
zv$M0i09fu15Ys^^sYM#cr9fLQpu+FTv${Gm0@E74Epi&Q(7^z-U<!?Lx>xJLo+jAE
zOZlP{_TSq2#Zu?mu^{qD$*%Z?%LOlpQOvCCwb^Y6{8eJ_wG5*E@pJ)}?C;JF7&HVk
z?hYLqoYFS4=>*c1D-Ur2ksT_j9Q}M?)f9;?dl<pED3)n&q)bz4+6*%7^6WqI9A1x+
z=(Lr4?cW-NDlPhr?PGcp>O4Ybn=QKoOJ@*Fkmcew(qyFaUogZe=Au6~oH?^D4h};8
zH3l2>-2*81zO+SdR~QmB83m$g`u74Sn>44Jt!&;m|0zTkh#FNe$-u!tCYT6UKyU$^
zl$i)FS}9{G3n|CsxiCF@#wMhPLix+7A=EhsWCnCc$^PA0Fe1LafzT>ssPdN9R*3^>
z2}-mDOLmg@<FhmCNzk*AeG}2XH?;OC^h=y~q{qd2%h*^u(xIR&%u)GaRT%(RKAg-!
z=4uF?0lp`2wsy7X2<p9kob_LA<2#M?+9&0&jUb8=<6*#)3&14xBp$Ya9@c0L`|zQN
zKrg3u$CidZ32k7^d(nbep?05rS=$|V0bEYAvkDM7?Ihp4nEmhT|2_7<9ske9S%OF~
z+3Y{aPoN5R4Rv}As6)l7kO`=AEYlVMhzd44z}Ve`eF$Ug#!Js>*BV<}TN{wwo%%nB
zxcCL^5c*2=w26cz?t7|q@v??jcp%>?aR_kHNNk0cggWke7X@8+j#(Mcx;z0_{GN0O
zB5o}VtG@UhkFh8~vhF9yw<_iFxI`k&&d$Du;=;+qfWoJtDZDYb2f7vf7pdlnzP@D%
zSSB^~;|ZD&p}@`%0RNc7ajH(Wp1lG%m%*x*d?m#<^ix;QAqheUFl@Ahh&6l>+u1d7
zQkV-mJsmBPb4|0i*IH!@1=4-j!@5qp-WJfc1s#=~`zn9)cy3Zx?S@V~a4?zVDOSNC
zs5ezwqWZ|%4qC+k?;T11Q%ZC0lU@TjhwY)zzr>oRO<+d@vy#vvPyWAtJqezTK8GQQ
zvT3?hVRucZwtsvhm4{NOA&Jarzu@(_jG-DIxq|~nn<@&0hReVLJ4Ul;j2HUE-?hQ>
z@R4w@2tL9C7|7ToZX$@fx{*-CG^<nBUoaX^ZW7lV;pT$~9#iVe-H{Y?d`m!udgPlC
zURT&i9e&%~e%r}kM5R@;;YRYQxjyygsVuzyb#AZ`T#IvN0AdtD?C8<)S7`q}cbjjS
z!|md!HT%-%qwB6%Q(1Yo)y+#pj7)vC+0hgl8XCzS?h<Sk?vZ@UVE6^%2cjfP^*F*6
zK->K<4yV4`O>ivC1yoaYm~J{FL~bAC|4c(!4<d!=M_vObH7HQcxaoe1Kf_S|G-y^J
zCCGoZhTm8n61HZ)975qD<e4SxE?-I84Ghvnna02LUDzm+oT)crFFun+4U~W_%6eH^
zCUR)JC>?`r>7vfP(RSMb{n?pUnkHyGCq;+KMSo@(cWqZR(D@c_|JEuw__y3?TZH>1
z4F|>Xm{6jg(?GLZUr*HK7}D$bKYB|X;tcxvw%D<jscYsI#_$G}UT*(0`_l$jloflT
zRMOfX80_L$2U>1dmtlt_ze0kv!jnmP>Th}vvNOo-4tv^`aku5woKCLKcCv9F^ESRM
zP?FJ)oL(A^SP|men|9`VEW(lw*O*eq`?(5JZV}dK&dtkHo1Sl)_R?0vP2QM_dZvZ=
zdkG`^6@(79s|BZXP^Avci>kvz!3zhJrW)ne*k)NY_y+5gShcTinYf1itX=oMTSHo5
zRDj@ubStgYH|Yrp>XG3T{WUCikE+IokZ1)Bf`j|l>06WZ=dHD3VZr()6q@`M{>STh
zl|^Az!69Wk;r2mYDpNvhX%pe}nLImA*8n7R+3Wn=Jr+WVFl|N;4|i;;2go6NpiX14
z*-TAM)*5n4uV8&gCa`yekp0<aKaVgdHBEx)1jeSZ=|t8c9!9b1lo$Wv+Ag~aZN5wz
z_Rr6Lf|FxplBh?KCUV@ZS))OJf}qv|wXCi{UC)e~t)AHdjGi%>#pNjEkJE2~d3H$m
znlLgh0i49mc-xS5s)B3I1Dq><Po#&kh_KZynl?R7YRf<G{abQT>$MSe9SFCx%gbLR
zmk=4wFX4Rbo1*NEQmuHn+OJ{KX4^S7XPXyq%Z@lmm(pPPrtm-8L}ITpMi@t+IM(y)
zY?F>nsp3A|M1rWU<o225UC*uloMqyqF4gTX8te+#js+TAXfpIRe{OHJ9E>CmDnZ_X
zQz0@zKy7&@in|-ULiC7jf6|iQ8i^E1HqfLL*K+<ApIz9mOr`noj}l`m{pZ@nG<nXr
z!X>tMARJiQ+~CEURV@#RZW1by>Rmpii-+i38_DA7-=CSlNNHigyv$`w`%#&&!>5a2
zS9al@Puw{9<>UFLDEcRpmP~lM*;?V{0>c8#yaf&G?&v52!}Kiev|0~Wky+IOmrh}1
zl>E!}>+6};A0gh(&r-)eB7HUFKzrqbzL{xO5jpT@O>iFwtge13z)N(<!o3@7q>DIA
zU%UxQ_OGrtzehH`*em`ow-1#hZ-$g{jz@0$^WR7+HLZP4I7Y{3dXqwdKTGJ5{0$4c
z*NKBxKYbzorp3p{ClRPNjl|K}HKuQ24Sss){tlWe2@)S7P;GyG0K7-y8dKADgp{YC
zF<9BK!pWga5Z?3q{eNhzEiAfvL{^UkdNpBuBN{v=N^lUCk9T@G3dyMJItoRMDR192
z<(n*IHxL!SgG5?Fyt=ybjYupW%Lv)IaNi%Da9DM!Rq79u|0kr~KHLp9*M*)zRc>U_
zzo=B=HP-Xez24X!KZaEgk|ueA)|Aw!7+~DKVD4C}yy3;Ftj=$nN~lQCqZo=g{ySck
z+^pJnN|$f^Kn~MaK=lh(gL>XqnpSR${!2!)d#1bIk3oNzDGfjLN`a(TwJGy4gfjJn
zu_acy1BG^@an`VcXaYa%G1o)2W|VrtvWq=<$$}Co78p(ZoA^eNik=RI@1w?>J?lC6
zlSJ-}P)UhI3DpH>q{bAx6kn-Y?Y!pC%HGEr|4c9+Y6jV~dt`mf5H}HTTdCVNztF4A
zh#dN!kG!6NNlFxn;m3bIU<=cG0znohcxkyDL0ZT|w>%iCeq`0?czFSND@@<V;vb8?
zS|yp@r$+lK-L||vL>H<B|EQC4`xO3K=p4J|V!ZbI@(#P%<x4h3L7_3Cc8V<2arYzG
zC|DbZR*OvUzo@jRTX|&|J#}Z>RGMfwFHZ7hj;&f4Hfn6+Lh-dnBRsMD2Zdnc+J^qO
zKloAyh4Pq#L?4-P#;H|_zg*++9|_FqrPQrZBGA2j1>cIq`U{ALBehV_UB_@Pr<Kf6
z?8TwXCx1%D^@QI2j0zd10F*ghQ*%}6a3o6K@W&e&nM>294@<d=E8eGhMd{FQJx}_X
zOmz)35xFD^QP%@|wneS5!Jc5iOKpx>HlF{@r73T+T=+^Z5U8st46`272l;dbxf_Ih
zvO!VmRHqm!4WTrxDv)bfS;&1`WHJs<9#TpX(;#Gr%`*^F*Ee21G~f18E(V2O`L2x(
zQe|ms5-Nrd4p8$aE*W$63ejCUJ%w$4_bFNTig(vC-e=L-S>Lqh%00NI>1m=LdxB^m
z@yyyaPSv1@a`xMP85?g;+5Wvg&X7;DzSg6i6Z%Koe6D(d_xA7f`C<574wn=mPCV!W
zxvq@sYVyh-YjT2Xy{46hY!IZ}-*xB8wLSOE=`mT-`-;_F27xrxnIg+Ng9zB8j~C^!
zdwl=hzW45TyR!-+9_Mk+doBpGa>sp5<{Lok|L8WGsl@1^9c7(D@*)~W1sL_&AU8fn
zABD}^Go=7Sr>RICi}osMhD0tsW^1QLcawI-p>r_Mn2V>bFw^gN5>Bz_bzdYaIlfG8
zch^l*tK<31D6FVAj*UsBXac+!cGS_5+r$SQ8@gt;6{?$%5h<7P)zeYxvW6<%AKRIX
z1N=5V2Mng?6NZ!%Fj;J(+yZEjekVuG5~XL2wIjB5jMNNjb{^K}DXp9JN8VT$lWZ<~
zFtnycujabpo%kS7BRXXl2Wq8<1WS^k%~s-v7@czun+eQ{M1oNzi6BCMd6L6;s<HC&
zkBhx9p(x5|UyJ@Oxk?VkiobEpK<({fG0il*XzpDg^Uj)X&-i<it)JnC5-*cAGTX+k
zZlm8XE+^u_&aQD-QlVJq{C*NNqKqwsV$dV{(<r_sc1DMpg68uZCJJ@=HsVDDSUK2t
zqTl8y0Z~w+a(6H@XV|hEwrZ+0fwn?;3R|=kv*SfvnD5vl=>!cJDQ!E@-CADM6*M~R
zs1Yed3ZHD!&>qwUDKl-$az!O(<=Pv*K0Y>M6zQ9*p5G1RR)_c+ReVpSnx7X9r!3RB
zN8?**+a^HuH`PV5I<B50J67`0{;ys8^9NX}7^%idsnU8c_W%Ck9hD|tEL9=|x-y%N
z<r?mPJhdmlqm1F&@Rfthk8N|{aAUmoP(7!a+#5g{jeP0+@#}B6xp1~xSR^M&w(l9K
zba$Wv%=uUp%;e8?jYDHI*OB=tiRr2sqc<o+bpqJ0?knY=)K!P|vKu4E(XDYC88#Oo
z0dk!R`?PvRCYg0dWV|J{9niJ<$G4!pt$sp@T!{?>Z;-8?RHj3FT=03H<FPnh>oT?m
z!uSR<a<GZs1A_nB8@|<%E-SUwRoqCEGbttY@0}WMX*~hCHHQP8rJvLLn(NT~@8Cgv
zE{?~N#3YQXo9!96j;%X-EnV`qBcQj3QBMVyK5uEpfr)xf-L_T9<^A~IaIEJ=c;XDV
zgtgl555U|*5fbz0irCZBGXi~%B{-e8fSnBH!#B<-<IcU8u=)>6LQ~C{!|?hBTKii$
z%YHD2f(&^v9_LX0q_{>?WIC89oCTc<e}jv4D(ri{D6`J>#RgXb=7g`%zj-ft^{hIC
z`7bJ_OS0_J-sRkrqj?|Osxi%r#7>Nzv*H>XYC`K1u2F90$rP=cw^<&Y>1pbAMm*P^
zf4j_@^h5b+&8yB98oQR!J<y&}m~3TjtsZA%FweYE^zpjNFp8Zv_ncs<$ris^-b8{|
zj4)=XQWXVnMUACnlN0yMcIi2wN3#r*%v+xtH8yE~o%MsLGDDwei0s7Cc>swYSdw6w
z89HZvet^HwTaq#kVtxzPET9od_PK&iHmTVq(WRI8xQMdu>Fyq*!~;Y8Ncl{@;I@D3
zUSx*~0;@Gj;($dA2E_+icv;IaBX9=bkeyv>M)g(Gj$KWbl!!3g{`~w1@IAR`50s(B
zrM!xqS<XciK_$ar#+t73Pz9#+fv|C<ZV|cX1~Z!eu-TbHyy$bp-je4u|Mwq^d@A!z
zL~9v`@E3vj>Ji9VWS)_XRyq3A$uIx@#Vqu1X&OFHJg04k7Ew2olbTHoPMJV9iu813
z+A2!@|3BK{{=ty55BMX1?9E(Uok7NS|7|&#SR-(8lQEP1x539p#-!%u2qI(BQZ%s!
znYxlOsk)iC{_i`|cE%RK4oxdFS4%QZ4rVeYDUg+gr7IaL`*$)X5i3_0C6KeIgPo&;
zJ;>gbjGK%})WO!lS=G_l6!@4J$lb~mB<XAnFli*LY+XUlz_YEfD@Y7v>R<*OAP2Iy
z0KS-om5ZB&OaRa>{P#fkzu)P(exdIngr$kOUF0DgrGjM}5fD|Va{h;mfY-gK;b}EC
zkAXn;{Hw`eYB}(#Fybrc5CW}oy^%0|#J%&|pPueUAGhs48tpHX->+BSFD4zfv$!0#
zkb>}rX(U*plw(Y=ihxa!o-MCp9cVE~nH%TLoiMl1^06b&8V;sWs`#0S>4uzQ{xeoY
zcvF+QNae-7fj4i>;!3?!`yd~mj`M=g^B+;de)z}c>)YPgJ8w(5=G1VMONjYl6-?VD
zr}f8cjSor=4A=~bK)vZgW=D~aD;#R|fu)IA7S8D_(pQ-nUQ%Hmt`lBUN!znyU$rDG
z^AbUxCM%s!M<#_yyR$#u&xN9G+aPQ!2%71)+t-oZ?Gg8i{UOW}jZW{RO1&qGt)aQE
zG6b@hLrfStZ>Vu`@|Z<xhYLCHy<)HFO%bytV=>+EhTkCRz41FidJ$|-nKh#N6_%o<
z{jO3f-Bx4X7CrS!2OC#2#e7%>tXCuV6`&Cak<NPj<Lwi0rEGU6;jE}l>o#`*Q2Q`4
z-biD;(uE)C;vqD0aGBw>#6#NVM)dl3=BU|*g{v_WNVHN?VpUx;vqG^#_CL?9$j5xu
zEbrFf*7bbbbAu`50XPht0$fB!TP@JB^b>rTbj8+PcQc#wv!x4(bu{unTidb--{HEc
zx3L!hSCfxD$BD+Nq3xDuZE&L7S+_mEWDPGuOzA7KZL(L&8ZcQ5^QCWta<=N_hs$MC
z?Ry1p=eRWQw<jw?>u6z@6J?}ETOFi&<20BT7|mNg%v^l*g8axJ3FnVXb4A#JSAT5g
zkn4xa9oDg-4jyxMRK~r*;M_tanR#|5QFQN`F%X*3M`SGyC?>LY_j3W8tYfyWlwm@w
z+8+g^wN=AxY4gYS*`^eIi^xRNJpF{E<I5#ON~odzZJ?9zxt+%9<VzSUli~8|jQHmQ
zJ(w6Natj@#9QzHa4>Pd@12p^u#p)V0a+K&=;!N;;Of%|?5&W+H)qKC0F02nVuEh#M
zrHgFQSFZaTXM;=9;!K>~oy)zw^PL?b&7S@pMH=0C+1{2#lw~jhCQXF&6C(Vi?9AIU
z`Sp!^mv`w6L=LJgnt5HWD-Tz)@39YQA5XheS<Q>dxSG@}1;&Hp-q_>0IS-lt&PJNT
z9V>P$EZ!Ki3^6`GmMWRQ$!5P2O|mNS$Ky`Swj&8BB8lVZ<r-`V<*4xu=Vr{QzmlCl
z=kf=<^ov*&?yD~$6}RvHjBHsi#NuIBb1X<BHo+jM`SoLx*g8SR`b)jk5QQH`Hq3Ej
z#I>Pn0h?3NYu|9?l;Mf)%Dc1l1y&xuS%3t@Pi=NKf;!0|dta6;IBGclC^n%OXX>_x
znO+3X5!!j_YdGrUdU)mu$sM`-&j{ylg!fC5^6ru87l^Tr^i^VF@d$j4cK)HN6op^?
z?p4E&Mctip10+`4BJ@L)AuLS%lTE#fSLS#&5a|br;G*lraXP^ac@eWC%i7Hg+d~D`
zR|i-D*w?-G_9rN04q_&t{65r%s*vtMX@sU#*ghP&qlw=fw+)LMbJSTuj2-$%e5EnY
z2BZg_REX^A@*X5&AlnV;Wd@BsY867~hOMbDM8?T-Yl2;f3z{(-3t5^ERUxmj`VHM*
zwBtc=R~ZvRBLjv&HYXm`h=@|4;T1vS$1B!iu;?@4U!E=_3P-Au>%hoR$MDoi#>O~i
zoayQQ^BHzgbl?-;Nz<R9?XxPG-rz=s!P#H3+yY3uL#BtceU0{X%T0f_#%sYok716h
zpnt8V+>C|GiQIqb8^S7A&8A#~^~9U@*)jUO_vvSIVkQN6<4xPg*Xe!%`uRxMV?XQ{
z*m(Qd8gNO|Jqf7>UBKR+4TOo2+4x5V29_drGVuCgsSJWLUTd4;9gvfAM544|wHBqG
z<&bdi0wPDrnrByM)##k0ogP=}412M$RsHUNnk%`lTVCg?It4=QtZbT$Bj$O^?y}8s
z<-y8@j4`fOU&*tQes-JR?%3B5E<e_t?eD7&3o^G`iCrbK6)6h-o;9dHLRpNfo#6E=
zsyyX-V#3>~GhKKE|A7%8umpL=ZoLHBc{X%6lzFLqugM63%ZvQ*UA@=<Z-_0`tX<kr
zUC{BgsTQ=*Zm>TNrOvY=vb7M+sK^rlizdCm)7q9-ebzhJS@66rwIQG*JxNrU=A}yQ
z9e|`L>{wiA?Hn}Zw=q<t#r}SA7AttI@n_pVxarQ=Gvt#2xde?LP5qz!n0iagU_51z
zMd1=lGVW~+r21Feprc*u2mvH}{nm=*9$r*+vOeTjUza^J$#G3w=(zYP<&FWcezrMR
z!g0(oxPnqhpVhmSV=`4-w3SQPl*-jU^}#^m=DQzGAz~+7hlbAMYlIfS$>JO*lrGTF
zrGc_1mG6?uivk&#GND7T;5#x<Ylwu=Zk&S8vYUaTauEf7u-?3hd@!TGk7gp6wIjLQ
znDMJKAm~3S2=ZqA2@*-KbV#u+6R-fe(pY_UrCRomzm=x3!z2p&b$|ictnXCaJTDEk
zn5Q0U4R!Z*5iF#i@Ot>(R5y@q?gnR^3BQoA=>anx(SU`1%Vuf68t-*g$fsPA#1xN`
z_8rycoRt)v6H}bmOG514CS756i+M`pb`V*^D=TRa+tHr|`zZ30qy4h>uHCG;r!Rw>
z<S>VC4*$_+c#))V<f(<jZ1!0Zk1I7!Vd#)I3qC9Iq49ZC><@KSJ`Vjh<#{yxWWnIE
zdjxZ+0+r5Z^sTb%MN1!6f_z<HU@Vc*UQQMoC1#>m*B6RR4Q<}@ceE9r!O&RRL9F*n
zJ2L>}i_eo2L$wwZg;k|8g2v@U2>7i{hr+@-M-F~iWyw!(1*6L6M}W&|jR4NvS(zBw
z3mB<dN?<dzUl>_?#2pMs@RNc4aA&%ctIiuVq$>2cjMnP=Qh>5IATdY|*5mZ;uN*t?
zpeaRRs2w|{N2oSx46sMAKPHj{loRQvWH5m|C0#_q+(J_3VqEkM4a}u7#fj>dz&;md
z%f0h;ApB_EA#l7X7(G1Gpd5SMvW+RlOk~7Sb}oLcz|#U@SY(<Q(W)iYWH1VCksQ{G
zL{0Y-GImJBf`Yt#srTle3$`H+Ryr4>B57vDjxt-;hUphuHLY9gRA(zV-12D_K}Vw^
zcxJHej%0DF$1$N$YS<JCdj%^cSyGoryL-sRyI49Ox#m~-7zE{?nF%C=FqPa%?Wk{@
zXHXBGGdU9km{qxw(6rkl7<q$GjKL=*xPLjVp`JVmC-5+Jd&NTOScZ}`5?XW=wHxh<
zG94?v?)8JBBA2!cL{dQ^l55Z}ZCUc#t;)l$za!Mm@J$L21%s$4bmVA?I=~*|i?j_|
zv;;$;($_l9nQw5_c7B#h&EUiJ{xkPd2K$3U?h#TR!+g^zQox4JPv?RCw;y+`i*UR$
zQDabW>naWu6j?i?yc^xcmtRG~=&&qDh!e}M6hGRzQAy7{$5e;K?3Be9<06_T*gKax
z%p50B5{p<wm-d6|)jSP?%0Bh4QHp`TVHa-%17*J*<k;awva<vgLR4hVeV2%Pv!JZT
zdL8w^Xc2L&92Od8_x)vbZcB7dN%Y5i<7d{Hm06)tdIX*lRNJ*JDNbph4h$Bfq9Gv#
zZMWRyH)y%dLZt!bV8-Mw_6RH0XhwX9`s=-Ggp%aQLls$7ELdSaq)GdUXySouBJPpC
za6in2^-^|L^fd-w3LI$6W+Tsr?ik{nbo$r>drzv9;P1jE36cCEGef*XQ~TEx1hvm*
z>Ub_zJag<|lfB&2Wz8(gpG#B@A?dNf>9a{T+P)qrs*I+@-kg{Mc|<$vmFYA*46YPW
zV-`0Xvnf9e9IcSpbP0J+yl+uAC~aj>F_GaMZ^Tr78vsh#SH_$M>Yza9Q*d?-X}{*#
z-bH2>rIRu4@f2r*xnC){+*|?fvNeU6M$>COjDWE3H-#}K%ao!vI^jvT@MU7|<GoJh
zfQ>hLkKUG$8bP|{52fE?lj`!t70*$ZS$w&M!5XT_rG!Qw9X)Xa78HBVl6@1{ZeM#}
z=lt`k-!JG-*izQMy`l?b8OmPEiT&GxOO2#kvKlyGqOx~$`QrDXg-nG&Y<6L92TYU{
zb}rB|L#AhtvzL=*g}qubx)YbelPCM*La@ec==fjdD7HM7@}pZ{CO4T~B=O{z*9Q`0
zewKM6WJdnhA;)0pZj}JZfzEP6M|ox&72J%f_op^WmO>}RgzW{lm(FoAY(bZ;xSg|3
zGQYfOO~C}kVd2suF++Y2rRh-ME>q)o-Sp~PS%%?$WPA_M)`bWi9|{#QGb!av`~5<j
z)D~A_92aNCD*N50CZ0K?T|7dLzPg43{hBA~iSfJIs8RHT8N2bvyBsVz>rDjN%Cvk7
zZSfvR!r{YC*-KP@IcF`d0ksn$A<_bRZ0K>FL*)aism*_JSShPT)&gfYmo~_B4_la3
zSu@(a%F_JGuyJ@ws0f<h1jl$CRm9XJh%95(uuP@&h%I`@i4d~p=!R>M=e4OYKi%?H
zC_%3Hdcec1-N^of2ojP<qc?>)nz<r0A>A_Lixd?~QGP0I86J!iU^elae<2+@onJvn
zt{K<-=2xWJy@^2Z8aA!>%&e!v!#>hqtDKa?$jNai>r5-OQTmv+u+(lg;<0OjS1qps
z<~a)G1UVuJWsX$LEw(7is<eLiXrGLwMRle?X61Z3oHKr$su5;yNBX+D{-eLHS<32v
z9<g9BW$mye$>l$fzGK&nGM`Zl`@jcBmasL-r(Q8K!+qr%a76s2Aa9Jei%l)ed=mrw
zQ4|GHstZLdmas!pW@Jv++({$ui-;1mbWg1WIa`RD;`(&Ckyc&4llsE<zN#3vJ3U4y
zDtinHbi@Z~*qYXWZyx8|{K$2a>qJI0U^?@LU~+n7VpKoo9I%{;FeWBpHjsYFY1^={
zq3n<9{Il5OCTC?)-~P2)SV}0!96(JS!fBdH>e>(h9)io@H8XSxTmwPi`2TW?J{U@g
zXORh_e@7BjdkOcB|Jd_0^T|TXcpl3i!7kmkQ8|@h&(MhGLFN{I0xYtu`7HL3gc)<@
zK9T5KPKn$iwjkY4T~|J;IXH5+3Vd*TGgkG>Ij~HT8JkbpZvSZ#fWIqrYBPwW2^O=|
zs;;Kb0P|BIf_m$$QbNJ&kiTUW;(cr+MqTQZ24AVXDYg9bgjoD&xg&^x;TK}IcUGx1
zb0+@&dP{t11+AGA<`(PRYjr?m1de-=q|Xs~)z8zuq|?D<KYefFB9OnOpkan(BMEl1
zZ*h%moG2;}C)W|P<(~C|r4;c*<(MkfG957eTltOJOtW)?p5cpPztQVBd|J{BK=Xn2
z6d|Z7UVkp56mxEqa#ec-wo?weKjyk`Glza%zy29Xg&Q(0cH_ZL*SJLx+zQdgb{W2Q
z&uuy@ww#$JcR!86gE_}>kILJGwCd+IB_UzG&<zu_N#`{-)DuKa;hh&~=a)A2T36gF
zx|-m<@vsH&Yda`OMS>=PDJzn$WbtD4N)ll>senVK;@_5we**31`CF_k4G2CwIryR8
zj5EuK(+q5><q^%#l7gYJN934(E;8VlZLFM-XKp0SO{}_dVv|1>>0HjiNHfy)5z^Z;
zSaLFB=1pY{HVKRsyFO+`jZqo!hOyKv%_!b6JdsIe<U-_M(0ep}ahjk7L}g;+%!J0@
z``gA8wFv}+CGh&`$<DNG#UEnjiZ7#U!iF&&#u>5wNCzmg$7ynx7XuF_W;~ZB>3}(8
zq`E+yko>00q;t^DZ8f;*FJ9<EBu@gL7f;wikxJi?G7`{mkew)3PcStjB1GslY7}3b
zErut7i1fvfV+NZK9W{&1?qobcyOLqmoPretWyen99X4X{PCEp{Ts2f-$bw{&(3^~?
zxWAfh4vX>6OAT9+5vMBbZSHLB@_BiEq}FUIv}jiUl1f%T4`!l-66}KuKJva0mdy`y
z#hBPqAdtFfvAAlM@4LyaT2Mf6<YU3IMGQevH2gyulZTVWU`E%}sW-9kDHq|GBVukK
zOkUSvi74h1JCh=MQT&67kP?m$xypLagQw;1+$WaS#theMfhO%2Vfu44d2}HU5lVVU
zSGXFV2LWb986`b}C|LNn+50bYG;TG_O<n{RUR%Ow^aF2Z5bLya|IjdVC1F1qS~~wX
z4idMN_tSwwW8*y<&Wm#xmtw|LM5?&Jxh<$8$MioTeFd%V+QTS!CV~jTq0nO+Vauj%
zS3m#w`&yWI<T5wl$Sq7u$!p%&Hqqo>vH$yJVQHtkd|?Zz+IYx{D|e1mG&DB38?gFU
zG`e}I%zfug;XYn39lya30sIC77p}99#)-m|S#DaiPikfTwk<k+MXbxLA0Bg}_weof
zAEltVg<$#}jkHcYD9ZB>IweyKCIyASHknZFJ_NZMhsKeH&xEm2_VN#gPdO4cmP|ql
z3ZRX0)Sw>OCRowPk4#vKL8W%lhcmr`eQTFz*&%ktxO6zoT5%8OK(w==iV+SvjE<Fj
zlVaI|$u->-o*Tm+4CT#mN(u^HBcs|wuzX%QD23{V?6MQ6S5U)8osWL`9;Z06n?22S
zi`KM4ZG>eW0;CHGEL;IA*)2z$k6VM^wkF%dq+#<fDA2U4W{zg|fFgxJozz?fp|r<g
zn!6G>^dAnKkvRiXAcGIcW2aToS$TMY>TYi*n$E>_<t;fnewG^AlZrxE3!_5HPm3Nc
z2xwh#>p(C>@Zc)O&S#ojC-KYwhNYowP=EEJ#$}0?M27rImr{;K`t)Ajn(jEb>;PY6
zSxvIilaSk?NEO`RaA6}&iMgu)$}42W@6m%=ld$BmRUaj3uESnSgsNOT3-$l(HLP4P
zGjF3-5^~F9<p=N4bS{X8l!trkYf;du+htuwl`?!IT1fX+8b$v_da3CkgKIFsMPaT~
z+d`#S{Mp#XPn^_zA<bRP<=VnmL0;#Pz!#nh!?>HbgQigY>-<{}b2*>5A`kd_p=SBq
zCRM71f{9|K`3;4;Q0HeanCO%qiu;1Jg(R!V@#^_Lq#ly^>OcN313<gNm?h$K1#RrY
zPBrVnZtwdhZHb$k%B~%3O#cGgw7CPp$#;upVd^z&(G^RA+WjbVnVWUZ4IVtTmLD{>
zy=|_@(TPS6myS2xp;Ikh5{&!`M}LjyewwS1)???P9EuXv^_Jg`8crkKExF)MnD0?*
zl}A;IX13Q@w=K#izQ)D;@F_^~QV(WrCQ|#+;-I68%0Xw}$wTShy*yJqQ-1s?zk`a7
zSrLx)Z>dLUtwAun?=6P&{i@H@ufnFkQR!Xch*Y$)ua{9h?F{{JR_XSS=xB++HZL~6
zsnXw9w|~~l)%aR<!+rR=)1PIZ@Z}GSc`D7BOk%$JSz+19KS%xlX%U(bV7<Iw6$ltT
zowU~u^UM2Zc-e^;qlztO`TfoD7T;jUqDz-{s;Xz!qzDb8LQQ24_1v9&ybg}YBiHHI
zvF3l$zuvnKG9bVFRin@7>ztj7{SI@gV`z!o2@-hSFS~*ua*K<jxVl<7TO-}=ewF9c
zb)V_pZVJPHyZAVQL}%j_7DAcpj-Y#Sy}JIDA=-m@?cS>LP2KGBWWn|KUJ=v%kHe<Q
zGa)JC(lOcSH`B3@9(ij|9xa;i5V>mFXSdAu-O7&+pSR{P<F%jCj`mW>0%{@g#xx8S
z*Sb3MIaQzk{4B3pr_zVc`ModK-*I7ZxcCd`AhKoNb)fT9NVcMQeVn&*8Nha@-^&=c
zWMsImUA>ca^h%GlIjqBQ4+dgTC2x&c9c_PhNK{oRX-U?C_oxeg565bVGT}d%uVx`d
zY3W<cu^n`?S!M^Edi|YirY+^IC~e2NR;|kteXlW%QS3$>ku@|^tLav-Q4k0!hSg8m
zs%TpK!m0|N`@|v;&J^mnM>28CA&vcOTf7Y38+|#e$aXez?kK)eCwg~;Z|Rx-siOPm
z9Ap|`@<7adzV=X;CK7%<dKVa3@tAz`F+5WdUr&9TVj7aR<dF4fiY*pJ$Bt=n6Bz06
z&0$Pq?=|3@)Yrd7cJVkVYq%jwB)-s#?zwMBZoGqCv_nEi7FKA1I}V*hJ&$*-JzXA6
zQVu9D4Z>34z^I7XI47DG-?@=j4?6ii+K{1&$U6AlOMQQhM3wUuepnSkj`QjLv*Lwk
z#5r1-T|<tWB41ZpoF*{lz|nIP5Sdk8m@~#O{SSv{fsMp5`ubN`b+F+oRD(<}pBzcC
zP_hy~q=k2|Za$(L1rpsJmQiUKk-;c{g>yU9kYa{A%?z3GR$6g~X7BNp@t%*}uyDwa
z62LS>kV|i;nviUg`j!$wzo)3Ce@k(rUUSOIEjaCJ{~qm@u(iDO);^6sO>I#b%4<vN
z0?(Et!s$S*fbF#`QMahC^K~$QmY%=Ed{4jf^q!$(l5o68WhP)DeKiMR(?QOUxtJ^U
z0Y5G{GN*zf_6Xx^%43QB)pxOHrTs@CyJZu@x&dUKW&N+Qs#GeMBPY+9ya~{vFS)rl
zQZArWn$~pBnOO=r{SLqrs?e9cnb3pG1x7{0Gn+T$W*udPy}b`1tOy%j6qK=!D45rM
zz+gvH7b{r!I3)Lr36%WC&2}egjUraUlugc8mjiWFl<ttK*&Yy9iq2W4whXaG?VT!$
z7BBsD3ik5gsLK}F*{Jl8ygy1aPDbayH$E9JT27ulgpb~KCRcT*HSCDLTZRrtY_jDG
zU12y|<khGOw@lnhcoG)vAeWSK+Vs~wAiewPaP@k8ktNIt`<YLoh4#TD4(v$98dA+i
zQ@{l+ic3@M1miD`>>eg5Rj=>O*@GdmbWhe>e$y+dxL;3UB>s&_&%#x&*9`^7jqzS&
ze}Y>|W!-)ukLQ-{IJA!(C8Lw@kXU76QkYLVZAkJgc!Bz4HN71%5z(TW-%XygLOgoK
z7nahbvtjxiBBJw}n)_2!{Q6Kg#u2XId80NPTS)pfxI_|te@e{J%6Y1$b@RC@#Yr|i
z&uzwOl%`VR&`)cP<CD7H)xp>A+j$_4j~`qZD2A~eGnNtwJ--o+b<eXD_<xEFuRUXY
z`WPC93902I4Wy<Fhu^P@+P&b~xN*!|46J}$&wWxyq+cI?j>B>7p>fNtfcaPZf!L_X
zxlubu#s#dix26^QWHh3Rf9Tg)KriGQVp`pzQfmltCy%YqmGH2|^;NIIj68)hy0RU(
z(F#7x2@Hbu+A<qS4z~WG3Z{&~_o1jpQy7-<iBra+Bcr|lK9E29d?BQpSO8};(>}G6
zk*XH_#+V$x`cKP0?eTq%ln-tC>la1DAxMf=BhSa}naSVj_cjbVV3{jfF0troELWR-
zx2|B#@Oe(>u_Qiu9?SRD5!WKAR``uoP4Gh{6KAmOsC;#8V)Lu-<HgIXkXM#l>YW^&
zD6zDzD5UsF*KyXYz6#doKQB3^(0U@0j`oW8*G5e6*-buOGFP?kT}`QR*U^8q<alJ%
zR>~U!txZewU`y>=ip($gk`f-(7hLsk4-adSvZ%!uX~M5%*^j73wH?7ndmii43Grbh
zBJzlHFebG^T1BeXB$JSka(z3SI@-9Bdg8slU)4wS6qS88+Wr}t%4)k@{pICyzrKB>
zNQl2wkXRP{Z9q1oa_h50BZ*Cn3VTCt)$t)#0N#ENb%iSip}~}(z^x|N<w;<k&ajN^
z;b5Trj9xK{LJSS#hw7BVKM~$I<w(6Cp4e^guit}p;7ShuRy|X?yoGqym$|xJUH7=e
z4fXz$so|0O7(J+B*{6gcMk}(!=o}tjR<NALw4~tm_(TadcUbjfy|llbxS!Mu6=Y<b
z{P@SoaeDM3x{(@xinDhyG?i_7rq*G+qAY8QlKuWPch?-R4INd`MI%VJ|IqZNsc`X!
z-N8{Tpfl(9Zw~$WpNsCrj^f`*OxT+m-R^Fx7HITnk<XqroLgJjcaeoK5&t@qEI|Yh
z6oU;0K~#0{YGn|fLSqx=xF|j{_6jbpm!lRrQsW@w4yWax!oGD9x*21|4R=Yai5KDg
zA71(&bb6xQ(fQn#x1RCDEKj$1$=8HF%#>!s>oz#8ZaOfIKWKP1+<H3X*17z#xS64d
zu>ML#DxP+y@8)i?jhj!?vIk35^qC|P$FzYbU8eB*n@3vEeILD&4zF<7cO{4Nx87e_
ztNTqdRL4BkyBQSqLEWW06UN?^N|kpFc^mZuv(BOAGUdM`9Qtm^t}fV;`R#Bz;lzCT
z^spY>6jIe&a<+a_&ar+=tHyhX<8nkrtJW{#hpgT4#Rm~{g_A=yHTb(S?=A85@|!_1
z&md!4>S>Pc5B5$d@w;vPtRfDJ`c8@sYr4m#-&-q5^4VuHl83(t2ZnZc2x2`)#JIX5
zG~MTu<kULM+<U2qn#gyXi<)&LqyVdkiL-$HTi9JQ&0HLhi6h336za%4W{<Mt^(Lmq
z<Hkre{n*JFY-HoTc%f}%&ygXLoiX>7=EmC144{u?cgDZ_wIWLxtDb1r_o?b%Ob1b@
zY#jbg_|F3wzm_#fa`ey-I3F>q<kVI!1e&l|(OnA^m0cwM__mGT#TVcn;5nyiB63c3
zX1ZR{lTK|D#ro}<eqD>#)V455o5^E-(TlOj3?x;dP3EDBdJITXv;F|ZxeEJPtQ?8X
zi^je%oDQ7?Fs(V%WpLQMBtD%$a#CAZ#r|476#R5p+RiwEXNXQcT*YOIZZ|EI|0>tz
zD?AqzoV!D<sQL{PcMWK%qM;<C$Xc^WQ{BquMis?J7_wC4{Jb_aS5=x9&X>5<TIitU
zm=8PV*qZAmGI*xhI6Ecwm!QD71+F72-@GeR;_t)t(50?j{Tp}uaMkqi#2}x>wEAC|
zH%%q&5vkm75kzP8G*_5-dUci7zbv`i4-9;@F9lvD3u)=38AVIXVlqs>_GVs&xIj*~
z42nR)Ssc?>hK0OTEkE_YaWjoDXn_bCoQlL#B#Z+?dUShDxA_7S(zzw=2^cgWQTNok
z1EJ6|uqj|s4Fwek*bhjEaEe_%w{O3>ALm3Ff>HIzP2=De8<Hw2IlScA%WO3bG#8;S
zrk^nHok_CjWdA70D>>h0GW><rMv%w44=Co4rnw?3?<0C%ZdCzA`knumq1%&g#>ZaN
zwc)*>n2^vdR_Ax$kKXvupDz~+f1p!Z*ycRE_P8RSavCY+9<v5lakqyIer~?88%xuy
zBig!2n=h7fV+&`=Zp1|^Qg^@Te3@44{hsDYG;99*F#e~cuYXTla)J5Or^kV?j(K4F
zv&~tcyEXq}TxFDVuID~m6izqqAAR$a8;m8r5A2<&el<vv$!3YXnl?lK@u`;t-&#b4
zo!YW3sr!UHcYEfKT<_`bH}%`Q=s)A)55gPNPW($gnz<f|x46)CD9wrQ=KSe7OvV_;
zbRZMj*%bvuGc@z2UhwGZ$C2S2C<nz*RlJd_uMSO6q#CKTpHrs>^OERZZU%l2!=u@o
z@!y=<n@qp&n1qGEk($6-3QCwEX1boMm=wy{k6p})C=W1?<U(K$VjgVAr=;>iVgIWQ
zh(E56&;K9x-YP1tB?uJ7U4y&3Lx2R=;4naNOOW8s;2tD+aCaMmCrI!RAV9D{2G>Ba
zK?Wb3H_16U=e%|Ay7zs&`*+#17QK6SS5;TluIlRDB0H!oH_@VqaU#9GysFyjPc6+J
z4Br|<gf%ghjETe0M&m`vV~duk^;|t!rj{hEf&?Usr%4=5n1ub6RVVJnE$h!8+$Fw`
zrW(3yQ#+TliV3JGY8?<q<7@d6eJR)dl%jHi`h{7p{-gaIp9A~XRcSQ+gJacuJGEg?
zX>*>&)~{VR!&(G!t(k26+MK;Qtm`MEkL)p}5Gu=gN%s+B3dVZ*<O!_#e`vurMtTz?
zQjkN!8)aodAJWfl87cW_NEV6of`{bTh!zPZwmLtV81jCSvOuBJdPz6?qLB1a_iR47
zHEN;=?IS^k8Q>*ASBOYu%<sa4L+F_5Wnf>2MJ;{24}HlgF4xvnT4&-sGcF2R7vt@T
zSk`LiP#6AiV~W0SP!^%8_UU2&sd>z$jwSBJ75j^I$*0HqkGi9~LV&s?()EMg{BL{-
zd})SBs1bu<6}eOUVu`+h(~?vyqP^6tEbEvw<OvC*?)OHaLDv*FXA0{>5s%amoRJaG
zxM(2fO!^P)dlk||lZBu7g39Vs&c=zn1SWG$ALv)inTWQDkX)L7j$m9>U}z%or+yV|
z5OuJ&3^}3Wd;ZE2^QAgUf~ndY28B;(P5GQelU0}e+y_1ElFm(!_>%+4>*%wHhtoI@
z4izlz1w%Y=t3qlX3nl~P=+y*KItZ}IfGM{Dubon~m3fSA%c7rc1lK5BI#ZocT#%En
ziqH!P=&c(Zq$JIVH)@4g6>3@*&R8~m(!?fFBhjJ8TEY7~&AiauA^Z8G5z0t^M3v+X
z6v5|`%vZtW6HY_BN*7(INj3!ZI%e%=XY6>{c71Pfcp>}J`+kV7a$vtTL-2U|V?h^b
zIMHWR-~7UEfzm~kS<HMiRSN0W*-r|#4M@E6k_{97FQWGmo)Qs-BxMXa3~+HE<>pZI
zpA)Z0zW5j)6M5?ILz9#_Fo9BjdYsYJVT=y!;lPhyQ(t-v8ex|RRC37?Fw-n!T88iK
zutR@2wkaUF^@9N>3n#;4{HR#S+eI|J&V4ez`E`4uVStf`r_kEE^@QVdSMH));VIfg
z3H_YL>IA13P3^BU)ovPR1^F|`TGND%2hh}IC)3N-J5kSs4ForohQnXXyckzbjW2Ea
zRPjiZuyM5;<mw9m{G{av-)oKckCvB4t0zkFX5X`gi{DR+>;x1Yb;=mBTE1?ui!=S<
z>$6Dl$pF#m&A0J>$lA@ez}HLWkth!y^?0Ue{rdqaAR6Cigee-a7n{Vfdv-6@*#{72
zu)$`AthVd^=`R@BNka;EUPZ*%XyIjLB|P1(sQx6p{nAH09e-&2g%>r4En|ym11e6t
zN)B!<#zE%c*<kc*dVX%2-aNw)nIaQX<7{75+Dt^Xch<MlkJC-LiFE-2hL({pN1_;~
zLQM<4t=y-0O@mbgvRYBjU42Hfed9a5^`Eq+3yhP372igEqP4{I+p|o1`>ZC2NeqEj
zb-cgweer~OBdzyGCc2atSs3dvKVB#gP=3ZmMP_hmS#6m3&@4n4D2?x0A|@l{;rMEu
zM3Bf8K}brM#@fxNyBv+R04-`j5>`65tu!OZ>hc_uLLx)gyju~c;z7@Y6$!RL$_;i=
zjVe=az>LH%VG5(;v}H7m(nKzO+Q!v3?HaM%efP;*Hw^lw1CuB;=e9}kIDX&wQ~y9d
zF_{I@lHSr*bi>5Ydznoi&b*H)!t}w7$XSFIMxjk_VReG61qzP`h-9fUC*Q-U<<X=L
zn3ijVzojUn2^o=kJA`*FKh-0_{%GYd(5O@4bcvkgCi<>k!T)vLi|A${cc!c#$Mc^z
zK8*TBS}}Ua-Os3I5INgA&3$k^m0Oc;HJDJyC6;5x`55olO_^<X9Ew1g`S#K7b^ee-
zpuoep#)8tV;)7Hc-ly&{AkiZtL|ti)*(v8Hy~Yd)mJoq5ck741d3nGK#v;uUO9C=P
z`FylzF-W@i^{OzJF0IDPFG%lvksYcyWpZWRt(MiP*@XLK?Y|+&wpcx6vi{VgXkvc#
zP*^Q$X%Hu>{gTmev+MyOO@!=bTwX0GYX{93>XjjJCjE8MGE-pz{%Mnou7>4sRDM1U
zVOZhY(Gfl18XrGK)U~o8<^`?n4WS*2OG$DJV*_=FR*i4Ogjs2N)*Q)x*QIK<Od5Iy
zGiwWiaP0F9;uR*|Avl%z4jxGegFnhgcPGab2<_T~-|)Ya)<G+y?1trf$+zbb_{hBV
zkg@$<PJGSkdEv!R=xZGFYLnUgKq$sG<Ipm!Hcu-2I^r<*jk6t?-#uToQHbqzmR*U?
z(#CslR&zFm5%*lzFxz$gE0JZSlQCibN$I49aGcRhZ3Rgp!nJu~!dBLhRP>S<b(G?)
z6og%6nKT71RrNHxq80s4(>YwBehF<_?BplaS~hBdSn8b>)@&x}E@LLs2a}J}zu+;4
z>|V=Wyn7jpx(47|RwsVx*eLf9m0}Rk)(<6#MtxyEYVU^YVaqD4{(OERWZP*j%h%{C
z{J}0iUvV2l=!q0-T?Qd1X+aE+do<g@c>b)San_CoH4)WItFVt%uUIvc`4p8;fi4;>
ztlkKFcG5;qLdmIxN{1Aj%<zzSDGuN427TU%Oo|bz5;&GP8YgB7ZB~`uO@}}w@z^y4
zc`^c2$cchJWz8vy5*=^pzqVDr|4yy8n<YYU<A-s<=lf3k;(M-2p)kL8d9rJ4zN~tG
zPy`jv;@2|xG8NdT^3NGYz8F97CbjxMr_ehDnV^g*d<qlIryd*4qfN=4i#}tnifDc=
zePt<wOyv0bC8vfvdkBG{n1!f;<vKwF$p!~*ue2lEtZYXV29TBU2661JOs(n(;Y;Gd
z&&XUqzehfkKE`jL)vX!R2%Yu$82On!=WAN{<XI_~rGMNJX%j-dk1noyp4R1Qj-m4Y
zn&PnURw!bkIA<Kl_w_l<B6y}kKmRd5;geaY7-t&kD@$@B+fZx>Clh8`-Y!er(e}F6
z0rC=YjntH{A_jg`%;gzgxLyg0Ad%8rR$2Z>7NWY`tc42>uSzIrIVHGHi}pH`J7RQL
z^>s%1Vlt+LzloOnlH0I`w{o;*JWtP_CN{D{4SS#YWZ)SUQE!QObVDl6ms};{M}eWU
z_rrRs@Im*wdE+BBA3av25HEZI6XZDb%}uLSfz+LzB0vT|B6VgG$tI<4Uq8m0jjmcj
zMZbas*!!8j;$nfqK4tEacr)t8UC?NgaBtMis^Q~SB)1yf#zEKDUq8F~+J$n$zghWM
zlBhQYuws|7|725Ko*R<5`qF~N%1CpRP%k9=;vH3}E0!c5->f4Zy`DG$(`s1T<%=F6
zS^d|O<8q=zH!c{-j|Lu^y}UD`HZLr1Q1yD9t_Z&CSRA#7dz@iXX3Y_=C8Xn_GwvE=
zEpsedyU}+Xre>h3)`BH;#l)GGZ8~l+9$~$DSIVfz8h*8({vqe_$jhmrWG4?7{H&dd
zgqVU)R2pd-#Vr|8f|E!-#1?fBVoK7h)82rBpJNxd*zn_IdWi5T*4jh@j=<3c#qw^I
zq$uR3vH%HBmyHJKVc+QZAVQ|e6GLOMueP>%1&ZPe1h<K?YuAyJwK=y~X%^(~5@Hy4
zA2G9u_+J$!Dc53#nh!jTJ1n}ebk{FE`w?X81R*3{2>yD=mH-sg_Cn)n@9?|_(N#DN
z>OQGczO?nH3_)y<OwiDGs$A!qKihrzGP(XS|0L3CpjOVZVA=c|8b_AzNZqBmO`srU
z-%LNY`nM-KH4pNh^{xeyPDf`w?|+U^h!Z4|Ko1E-h+I48NwjSk$2-)_<V`dd?dlCB
zH@Y?1yNMqV@^ac=gs-k!IonTbBNH9_(q6N$a&k8vl$j-&P<~Y?V`<a(!v2h!r(7#^
zLJ3+l4N63h@Oj&1@J_woCq7oUP(p<#O+cIUIR|UY9eLcl;hEWfh5p@G)$x|c403*}
zR}$p&4#|a8Nt&!P&w0slbgbltP+g&~Wj^e%U*$Svy=h|7FB_P0xk;Iz_e*`xm$pDe
z65iB!_|u|*AFg~0Sh{3-Y-PUTu%U}8#$mL^&!X!+%+h{2bulq|tGb%8m0uLBC3c6S
zh<N!6ytuNf7o(MzwWgj$Pk%qRELqf@X2*M!m<b6~QWq6oeD?0$L0Nu`@_>-4y3wnO
zj2PE)C#Z_$40XPGUX|CC+2y7Zf!<X`_v4A}YE9OZy3E5^_AOOYWsPcYj%5>vOvV0H
z$ul`He=<4qTk}Vi5_9{gYSk%t5zchA+SDk!$`0^dc&UhV(<Poacwgf(D+H(UoC!Rx
z$0Rc?7hwUc=iYNpxF5b#E>=yebZ=o!DBRmLdMWzT0J*=jlw4NUw{wL_n3E0@iOENq
zw|Y}hkz>Bc6r)4WWLm1@oO``G8LWGllI#L|^8DiFqQ4G}e#n$V>D~e1K{EOY|D8HS
zs-V?)=DkPJdYunsr2F<RARQUNmA+xqhp~!S^o0m}Sc_pxx1KGb)1pEpUwN{LJKd=_
zK{dihjO8Kr@gb&8Uw`k3a~HFb*dX^*$JIB543Z30WkFY29I=9#&kB3;t**<lY&8p_
zU(|xpN<rW4hozS@gk8?Ff{0*bD`pl3g-w=5ZrHc4F9#04^gq5Cz;E1T<_i|<y*W3&
zMhNBe57_?1{Z>ZY)T3k3<fjFB?A=G2z`O*+URZuU+h8{$t*SmG-AuQPLb%t2)~0wd
zowjn=yaDartD;)sMS|J0?~@|X?95Ml`X?v-__4QMpZb?*FdsQ?E#<2XRe4g<MOTK|
z^t+-4#Vzw%aw2iHyx9Mas*@Nvt-@B$!~Y)YY6rRuN>A5ns2pb4>^CsW$1pGb+_ZR8
z5thGjOn%bL{4oxSbWS3%{Mwezz{DTFW=wU8_u*NcEgk>OE!R@jNT<eaWf3_6aH#W%
zObetSYB}mji(x`fGOm}0LdL8l)4l#};fuG8;_f&4otCOjO;?DADIvaI4$ijmhsjxp
z_=Gq8V)3y97Q<SYi;Iqu@a^h!KevzcKff2c&|-}Z5v*D%v8~7Jb>Y0>s+i94NrKGD
zJk=Y0=Q6lL<G_(G+Ir+jImytOSnYH~oNXzp?RynVhbhCnuESQNVt4nh>=Q_F`q>KS
z#Kyu$lZh>J)rr#0CsDQ=b=s@bY%kH(Z?|4;QIT#{Q?neqEbwraBv)d}27HWFU@b#W
z=4nJW_J%Gu8{w9Z%+2f(PmdeyF6!TpF)c)*isce`6~`|v)dMHYospfCdv@k4>e_3u
z2iu+fp8@e;+U*fm%9-Z1fdnaE(_O4|x{MU|{893=a<Vut<-``P4!;+vq(+4uZb1$>
z#+V$YYJIHVkZGCd9T~n!c80I57*Z%D&$hEJ(T-Pt9II`Oq^V8nM@8~$MA0f-;R|^{
zC<T1^h1@-Ul<$GU|F}NosnCDcr~I#0k34=XBJ$7Tk^ih7sfC}7wDFO!8(M!vN6xc~
zUo;tNyMXt;Rbz{EpF`GTn10Um1SU^1Y^kTMvv^YSvWZ7K@Oe;;XgTk@+dG)Wk(Sy0
zNo66}OR<kEXwsZ#(k%Xc+;L{l!{silFauQbHS+?B{m>?*XI#Z%c5Qo*am$<^V&4@)
zu^e)D|9t|M$Ggxrps+c$c_Zz^dU`jw>FTOVQJSWwx*KqH_|@n1ei3FNlHqmJ-Sf^u
zbOB1V8Q1a(bKWcvLoG3Z`>E}A5$Y9IYZLxa-!@BB@YS~T!?(<&hqU4lNor1>uaP5}
z<kOZoDax6C+brlrOR5abKY!k=e=XB#qaI-{eJ192?s9V7Loi}C!q)r{=~DOYTM#&@
zr#qz4+j%$eD#C+h=E1EMu&@W?`$l4V*3|~%$GEdGndQe>aX&xwN579g*YvN>?@$hK
zYg>&z9_>(Z#$0Kw^yqBvw#;1-vl;OhaBlM&#ti71_M>WlWmrvx3`1;Xdo>zxvf0Dq
zGpf{Awl{)A`ASyQx~~@3H=3cP*rY|_<3}Xn=95$xj4l)_mTK(IbdioA%>5lj3M3i>
zdP<FXIw{!w^SU6O^67QH&9L^W!3mPavfOYz+xPpg%2Sq09+{fKcoB+z5M=4k89p`t
zM74@m5{f!|Ayb@p&A$Azxty1$$)&JU9$C;JRcAge*!ZL!JEgQU+DLGvFUU}(?$p4i
zlX8%g^@Utq(2J?f(G6K~Em`DQ4^t&ilb^kWHy7{j0-a^faCgrgoE9CO1H`eFG}S^&
zgNPHW*;7;0%UL*PZ;+q=Y@yHU9>mP#*{1p)n$=+c@?{-i%Fx>dT4gKFfVpK?J$*!>
zB7=Hy-CXuAZwBqK@Y0TF{olTHc@xG-8X;YTP88=^JMx?H&tAD<Zs84RSv*}dETcnF
zrb$+{(fzUC=^}}I?lzE4t3X@bqW7}57=C;mschaxC_OAfMpsjY{pZOl1M{HR==Gk7
z8M^vStbtX!I52WgZ+Rv@<;{^#1^c&zo%YzlSlazwSjH<8>>t+cIs6Il!_<F#Depz7
z-7R8~_nu!f=EM1xv49XIJEfj8@y1yZwP_EV?cF$3rBc=K`MqVCC&nWjbd;-9PC|sV
zr4&KNDQinbc*yIznl<iFQzFL6&XM2AWEe-|7)%gC{=)9*c^IaEHY51{@E`_Ui3&fM
zh&>j$66#aWv<yWogdWr>7^6``Rmk}X+PlpoWDbN{r6iIwVPp}pFBoJPJBu>p9;>y;
zR}h$Q6!hS|!>sn)e7~S)Y4o|jN}eSHKi$xg(DYCeNTCzZe~;VjKS=8o|6%UXikl=O
z!r*1GyCEsKU$O1E$#<H^uiTgf6I%<b8kb+Qg;S868x6UUB3rKu`LnByMvy4?rM2NQ
z#9L|;>3G+!Q>n@FSC6Y!ziAGGAO>+R4>R~IcR|&OyA$7@NL?2@R3OO<lo!Q>t59LR
zniEYEE@%MxCCE$GmXf1M%iAt!EQxiOY|&G&69-&U(V@JNk(}r9j=86hy+~I-tubNE
z!rf7+-=WVO8Bo|?LyUz0m-La}`tyDYWu^vkF&x$&e2^P2Vx6?OLwkARs|1eP<2l%W
zxsQFcvaX$MWFj=iskmk6LH5YrKxo<Rvx4<A4#8s7KB~emev1*Ogl@@nUm{T=6XIes
zj9X;Fo;2~QMmMfn!Bkr`#Diugo79tLgVmRfKbwG7g|rEyoSwGGo@0HZvfGmiu?)gD
zOaP_gFI|ARL<2Y<Yn=HRRy01DoegS3(~35zAse?v7(1uf8&F*%@3E6C!-U-iC@1zJ
z)s_9=?k(x5^;wVFqQT-41USLsGM*)6ojv&}8_IW}O3}G^i0}!3yUtCW<i^M(eRM?y
zM7jwqweBR!nArd`=UO^N{7A@eHGSd(G?cfjw25zt!+Rsyof<>kJsZ>>5@Oa;UXVZ*
z;bS5=(Ctq8LzjYhRt4!;7%^;A#r0E0ao|N<@1Yapg*_2DS_I;>E4#kJ?J}6=!tS<9
znCuvf+{Nb<Y%7H+R|htnb>Xwu=D3o<w9n_lPO|(_{Vc&I-f<p}jx=Ddw15Zvy#)L5
zHT4vCJSqaodXp~_J4PwEx+19B8bjfS;nz%sDKc^+(@2EkM~w<&7+%67MP|hrcKL0U
z7gI*!DXHjhuhpLjtBFssb!6>i|FjLOBOJ$}Kdusxk<YSoDGz4jSAk;occwB-H*l~8
zL3>prV<#Hc>OzP*evU7QJ&N|(*`X#E;9grCi|?;peLC0Lc-SYb&gilH{a9n@BbR5M
zR(P}!)wd0EH@QG?obboxfGNK0QsLQ-336xdfS2@`ETj4#jbS9RF9a(;O6kjlyTzdX
z@LC+Rl5{DO|MEiA9e9wPFH>8-C?YKKI^aoZM)Oll%r9~>5z>y7zJ+8|w?9)aB%QCN
znny_p81TNkd9rX|k)G^mSve@aj0&T~h%(Uy9W{_>xi=K}=<xhBQmPp%P{(|Z^nS+R
z)zOflUKgb@sO2f6RhOjo$RlaIyYtk}?OUWRy3590ozb!y%g4E+qA%UCgj25;npmIl
zh2cBS1uW~fN9O}g{A#w4nx@7mpdTA;X9p`$_TuWT6Fc9>QARZZQCJJEzowt(P`U`d
z`yw%^%~!n5o^jYNF!U8iUX8RpTbbx-5Hfs{+zo4qWB<*`8_`5lLxYRyEeff?ju1QL
z<`xeR##BWZdwvuS78=BT7aci#3m@|vMgj7h0?ei1pmmS7CxN&n2-T{pjK$jlI9^||
zn`<K{quDJDmMx1udm1(oh7=pAmJZofL=qBv5j-zE?SW{--?DQ}8psZ2V+PTpQ(C#t
z8aeBe4@<;ZKFeN+X`S$`B@FfaMp%Qk^V9Zc(x_lt7Fq4HtSHJJR_G={*5sJJG*o?D
z=c4+9EYZOR+gp7N7$Am#cY?BOhuy$-{cRSt@SaS^{nNd<8x~5Y05p5MiYCe}AraCp
zwl3UyZ(VTG{l!X+uYy0m#(;p}X?x#>vf_u`8dIXjh;HT<L!s>iWCscHlDfL{*3Cv%
zw*3`7SdGX6BHx`r3z#H`wI<opJK5CaS^6YR7lx-`XyFBZ`Xh=dUd=`2+_G;I$>BP#
z<ViI`DPzcG5kD$gZHZJI<_#jLwp$Y?P)J&jbT8hGC!;aik$S>sCTPK8@O1b-1%3Uw
z5Or|55bZ{)Pn5fhw_MF0$k(uY<?r>)hqXT%=DK*#d+jDWbGY`|I?6nNvk*uy9N(Ef
z`t5$wI?B5;g@O--73CCBeJk=)gr~Q_IDiQo7uZZ_ga7!}lsRz<oi<xHYFA!+LmtL!
zbI!^6m09yu%|a2KJmz=hj-5io7-tTe37>Z5Qx#gNMyVH3Idcvk$J~btr$e%*5+^Ew
zZox=W1@9Qso5{u@17!@(nok)M=dooMuoFW^HEm906_gjGkSgE#``l|tDgVfA*PxB5
zeIBWu&xKjzZL$$-&>}3`(?PQJnc$%Kxy!~dbGG`C5U`zu)5`i$@Qw<Gdm?hgl2Gy~
zJww^4rQez76P7~;%PS6@_-rumF56U4g)j#DAsDNqasLNXM;|wF?zD6GF^GL#c&Lx|
zqK0(Ab#oCJ7mLvk@2h%?{JSR?%;n?jksrv8PShts8MbbDv2h76ImPR+r^zersuShL
zRzS0acAt$Wnlp3{6wVbYFy!XKO}n5b9b9V4YtA?fWkQ!ty1?X^QW@O6W&(Q2B+tjq
znwsAJpp}4`op)N=IR(!MZN-Y%J73Jr?UUJ_aeFP$@~aTr_qIn~iMSDqPL-2>FRyK_
zeaA6_yxja_-pnK*%g4De`e2kbuviFNS7R(9?9Axh9f-b5Du}MJNe0UzOW-P@z2+*3
zKLDi_QSzANjJe|@zXActo=WWb_<G=1*Ws5da+~iKk(}|oBB+KRmMZ-KFdm^)Yt!V=
zaZr?p@5XtoE(Xp|%b+!e0<Aa*;x2VXKK$HBqWKL2)m{`eM8<k>`T~fo{`?ig4O1X2
zn0Mtt{av4sc=7Ckp>jZ3<+Oz0M?C3qHt?E8v{F~@igtJF$0yHyKF6S!RuJJIG7S0l
zL^_p4K`ZwjUmfmvhCXj2P+CV%d(YPR*$>;$kCnF8hPUs!)0ZwwgmIGwGQ%RC&d2?(
zg(~q2H>o6ds4STa;|I_l@KWVNOHOGsbI8e+y0Ck1HX+w{T&6KgQ7~VXRQ*i*{p;_X
zR~q1Cpk<i>Gp?;j(RNj6WBjENS!U5$3v&8}meqHxuV050<eA<F3pMg4bD~dIMn&w@
zpP>k9xK^OQW~-&mNV60|^$>q29*g5@ayWxV<4>ukPogbpKe;_bB^8~w#C{l3JSXF3
zQc1Qxuo9)_&XKt`R82SY$!6(|P?7M0+Kd-7y5x%>(F`qno@eZty=AUiG!tJEynJ)7
zcXQO0vXiG{XeFX=_9#{xfM3WCrM%=q${QAo8@|6%5z`{p?C$|f5{x0?zIV!5J@DHB
zBZ1FYh*!nTaImv0H#Sb$l80tdl^^p>;Q;KhI1|i}3J5=R3p&8ci56VajdpeqkgkvH
zTFsH$wmgl_>=Em*28@7~g!HkG0}8_>F?y!%Ys^cAOQi1(6phJtn=d=%9gJhK0@$(2
zmfuXGMXV^*lk6l=cj*u7${qDTmovGUmy*>rv1onqGG~HcGpb#BjXGydkuonZkuH@3
zgk4`NEkWcw`2Nz}PCHdJpDNjSV%*$(!21nZEh$mWD?V(&VG_3CSQT?!v!^Xz_qal_
z8k$USXwSe>$NC)UsL}b|-n-TwgajO_VuT?VZm4^C9`lGEV7ZTsQV{MRBmTQ?j5GZM
z?BW^`>FkG{1va{&F9sP9=M@I0QyO&w_l=6@OnB9gUO^77V=+J9ARR%+HZJqEIk;^w
z4&;+v@A9=Kzp$1J8GO{c@YQ~DKA%SBJQPqkDh~|`|Iz*S_`0J}XMXg{0*$TtVki4a
zNjNM1{4skVCT_!)s&5`v(rb+pSKTxPb>j=kHa^~DqCqpnlEAK)aa``YpEc(_Vb{xN
zUs-1Hy>Q4XVeuEaYE)>x5|2MMCNU=tneUn%(d@N0CG&QRv`%jJsVZk)V9m8R9~x$_
zyp7mU)|{UbP|}cvIw$*iuYW(x&U1FTc@O-mnc3CE^D%wv8!Jx^dpK8|x>_2}gVlFZ
z+h*f{rz6O=xigxy73=CNUrk(|eLr49j4RXK_UOb5>(7t`j#u$4S;i_C_x=20+I?jj
z;ld`NF@i2C8#@pr8OTp|WJg1$abl80+%f~xQofm;#IkUrYq+x=DA1H952A2Q|7H@#
zGN^j&P=Cf<SbAzxDO`WqL=*GmLJrN8sbNLKrnY%Pa)_?RZ8SHli0W=*;n3+>V6#LR
zgR_*`^RUU|D?@<=x1_<q0MO{<Y~R82M^3iU2a8_twGBRNM_-XgEUH7djCl1mm2WSt
zj^d1fj7)Ftl*Lb)rAf!urxgaxZnk8(HhnA=c=WH-*dfJs;g%w|2dy4=bepGY0*47B
znGdofbY%4h>$lfSJ$xZNGs?|;a@ySUR7?kPGf?BEO@R_CgrYt!q%~BDl<i}K4T1i}
zAnEaec=@C#bH`pS<1|u>*;b<L`?{KZ&O`0bi!cog@g#M{DSkcA(nnzqubgB{dj0q)
zO*6-+C(qxd2J3t=hm9lkn%!I(6*=4~@kcgWn>W2hC0M#qW#T%+cuq&ED4!pZGI=7t
z;!#pf`xOZS>yUv@T&XldoK4>^`Br|;gQ3kGL9Mq^8aiULO2U;VJy(*fguGSqBP|f`
zTlH{q_s2&JD+bTHDlb$4q3e{gCPt26Ce)L_`lmf8I`tXDCR$!LUA)w$m5sr9bS!@F
z)LPM)?W`8^ptTZq8?W!Y28i%iRK-krbe-gwD%1dNyJG%indId%^SA;e6tECl->VP*
z;}Fi1r~etk`Tw-Vh`>`3fq(WG5q$#RPvn2I$A}3%pNfmMjjxUMpY29;l;r3IL<Q&t
z_#eZu>(HD1ulF7i6nV@q^w03ozk84DFUN#X&4#V{h0@DN$h24+xz#0d3~H0pCklK&
z(Dd=ABttwxV39)r%1Mel0{2SP&1gHs-NjP9xVgNY2FDLaFAg=Kn%=h15RW|>GnpUs
z$5`iBmv>TyhsyMZY1{-B-3ImM1C|Q^_wRqs0X4lq7FnUD9Q6S|5<)`g*t;&#dhVeB
z&p;j?yDfd8&MJcB{`Xv+qH64o7|=1U%BG%SHNHYAdA5V}5A_A#@0qrItr!^X%g<n2
zv)#vIu9GS^7e4ntJFWvfMIiN&c?8?$enI)K_DB5=&|A5oJPOPQT~`UN!BCByM%N&V
zH(nwq^^uu;pK9VYu^YeUViT&^YrK)ZLLB|Nv$(*{x!}o_^PPZ83xd01{zh4jO751x
za#N-2(I%O^c_&uGl)oOaZwLb;X8AxNLQPXQgQjAD%-F8IpBX`T`KrR>01AfSi2AAP
zUMlT^LsJsh-fA$zIVF@|f%!*QHe&N=F!^S+$H_WQyQ-vG{x(h2n^(LH!SQuE6FN$l
zon-{BrPclBWF<o;ID}*OE4Mse=}%!aKTcB=89wPbY(G)aP!P%_30M12c5$~;P&SUx
z{BvCCh7lp<Lg>giJfEs>f!{-z;?9rfpo_wUMLr+J;eC;2IB{vQDI)LuG6g7%5)_t0
zJT@o!U~#u6efPn~?*8to@ct*?z0M7~GLkSN{JLsk?bix9*?vRuD&#s-yGIERfGiCE
zq3Wr==?UNcc2oTZN`VQM!3E1`AnJkhqEuMQq)5;mkl+_3H{1Oq=JY29ci)v=i7@^u
z)JsgZ&PHFQARTii685VaE|^$d<yiO4^HS-!|71C?uga7$-T~9LyMNJ13F63lw9ZGM
zlYHN-jPRQ@Keae%MP)tSio_EYha$9JKP*;6FqSM&XwL8JcQtrT%zv)GAcg&U{MW<Z
z|NoDb{tNIQT>l$F{(mg&&rr;pKC(HLm1NMAWKbz>su$1>efe(FxETwK0>3z0s=9n!
zk$^*hS*mh}=A{~aH4_GzE;Er|QbU3uWSzURA%J|6{9Y6lP(oCzjanRATDEY2<v72q
z%j#XY@*#luAn4sq*x;$+6}sE)>X~h<=6kyi?J8=EvAe5@Ot_XXF$0j#LA+E-D~Jxm
z03U~-{8qJEhlkmNaq8l_J&VQwM&ZR%x~M-T-{6fhSs*&J0cI17pe@47%gdr67KL=+
zYXJu~xew-OaN3H{AofDkxE=!YddndP4|Rym&p|-(k=l002xn!p96m5xZtu#kM!<E!
zrl`A4@&JN+@UV>HkPV>PuB2LNJE{;OlSBb_>Fi!InpA<KlU8V+982(k%^rwmR_dMa
zcQwPEZP%X2Xu#D{4g0*eSJ()!r_})}ti*fybRJ3GIgic-zE;Djh4TC*sN+V@6X@G4
z5b9L#Jeuj+QGWo@D(NUZKn7Mi<dM1RSI@LJj0aCkXmvv;y&M;f(^jPOmqC;vIrnUb
zjt}46`2?$twWet6c9izN_e=}oTP*rKyqFZGb8c;c{H9<8>3qE4>Wlx?7H~pXKOpmL
zV9~&*?nbujLnSZ>tn+@)bp^}j7g@`vW<z?gE(ymGAnvU9GA?i=c!?sY)l`ktAwL;l
zV5;?@rvzA>wGrI4<-c;jMs%Z8hw~s)Bwb}0jJr{Ko-t9VYPA47qXiJbC8i@2bda)F
z3(e?j+WS%6ArFAYo6ZLHnHGCXrfphd;{eBOWh5>8Qt#ii92k+g$Sk|vnY4ClwrVUn
zo^uq}8A!%G3P9F0FvHiv1wNq1$2vxKz|g}qA6lKE7KrdyG0Z=?xFxS!$J;`;IiO8?
z=rF|12LbYWa@NA^AcPr?A`Tl^xqs|T4UYPvcc<VrNFHV2Y%7fJm@w|&`jUOl9j~*i
zJYjn8W}M#l<f()Fs&V<08C2NJb^`Z84X^pGvB5DjO#t9nxWt{VY8J3*TCOAd@cga~
z#p=-PhRQA^@*pNN7V)=w@}Ys-t2g7&uQ%GKGwT*#Z=Y0L>>MVTd?D%tP_?@znyPva
zQ+lqVI2Zv8SlfQeEPE#iQ?)=zV4-b?@e@GwT%fIg*HaqsmuAbmQegGCjfuu~9F^7K
zlzWj&2F#4;e8!zqcyBwp?tUl3G4>_h@7Dh)Uf~h{N!e$2IvY@D@3y=>LJPvJRH6?Y
zxgK3sW8h{paRXJh!?$VnTs(CfkqQM)HVY)WEYvJ+SLa@6A*UAcwp9#|xqA(^<BZ(<
zmHdzh5(DMOHB}eH9C$z2Votm}b?I1e_+=vjd=g;s!!aqog?orrWAgH1M|;~|xIgS`
zR__Um6-V~U7(qrV4=<Pnzh|SU&i`4CncBFj-?wh5Oq$tEW1gEy6@aMZlJ|jC056v3
z{iv>7*Ji<vPq#~{bhEj;Mun#z=`S23U|8MW=NhwsO2JhIPdZ4WCiF>eK21bJo>OCS
zoj<RI_lcZjXf}(-0B*60hwJF=6$)q%8TmFx()MuNZz>It)c#`ey7qoU_5(89zQ}Yv
z2jDvkLGs#Gj-!nWNqfW~YXNl=9TE?l)@sPWs(qiv=k~t*4%~=*xD)yK2T7K1S93@q
zy<-aStxQGnd`v;rhtkX4WRRS$g082>SPn(3aI=9LaeW<Da7VatZ?HM$C<Rz;G6+jD
zQQgW{L5E})uW$x@sVOb4^DW%*3q){;`~0kGffwKOg(M;*YNF*hC}>a8u{9k&y?BMr
zFSFtPgV-JN>_zKuFOrE4Ssu#`sDsI$_Hxb8vWs_^g9gP8xLH0-ub?_`0B)X$Q$irt
zpw|6LU>BK!7CO+L8GJaVA6V>?7}frBer5?Ba-Micr0Cg${k!$l0E^C_LDKtsLDRu&
zTVVlLKJ#r+j{Q3C^O38$JS{X^J_3BYDhm!g!Gq!L5G$9y1*s<EWj;OI$aWX!7M)s`
zOx12crfryUX}iq>GvMGaMUVn_%v=;-PaSH&l<Hi~TUH48R^52(Xamqk@_)3>a>?+Z
zI&8fEQ7f?5yhK0Cngps<t8CxWF9bp{52}jl^g5?&?=%+@+~bBTBHM-Cd}p0xs-^4d
zrs2Mn5=2G|$wli&A2)3d(79cxy~p|3#yT2t{gnlF*3!x)g<R|i2M-B)*!vJwWKL(O
zK0_Il*uL=K_F)gr!vR#y^yQ9c0?L|K>S8i$hA(SIDc<|d^MD2KH#<H7EMQJY2OmbA
zd<H+wt{jyG0u5G2I|BQlL$iT-1yGOos8Q{LfthL{pj>my>bt#$XEoOsE(}t_V3T`k
z5nn#9f>+b6lHC_FwGYqVOWxynl}hjf*N~eDFn4QNga|OeO)>ySXxDgvBCmahKpYoh
z^YPxE)$Hrs+%OBMd{A?b<tXg&`Iu~G{aC=$dB)YG_5;Taey|>t+VA^=800|era-Af
zmydPt8fphwCaD#sfbfKXs8U;e8WYV65rFiYsYb*o4#ru8=Mk=#J(x5QjR~Wlx!nea
zs||HkEv*Dy>pdqXtB;!k`MpL)Xo@nBngt@9rh{nD<jhXulMhDP(Y#qdM0FQ1xH&q2
zy2O*}>S1CRuYL&Q-}C{X9kyu3_W&Ps5h=$kX(F#yRg;xMQ!%2Vab*{#wv2ChChVW-
z0pCkxy2KK9b-5^5in<})d0%gb4tuv0p)yw`RFJ*<fFP)$=g&HpO;2;pmLWi{TJfSw
zj@a~%kq<a@`O6*qn=jG8Gs~hl+SRXHTjo5L#XTN@abe>>+FK7+)@tQPQP*gwAW^5~
zt;;5n*D?o^=yj`dW%?AixwxiAQazsFd23l>ohNWc(*x?`-NgtEY#;%%o{BT)y<tZ{
z$D;iu$&m*autAh-RkQdu+{54kmpgf@thM!aS5?{t8XVd;Pik!ToK_^OA3ip^EB8$`
z;n#_PYv*;xiCjJHmjtReC}(}R-;PNuw!T^sX}6W`IQZ=6xZE5PgAe|)+H6dBspiz7
z@1Hw?VJl!H!h>SvP*i~$*8b$xliG31_xH72^=W)YGSH7fT@I3~Kxv1dw?Vr(6Ylj|
zV$Qf=wy1@bgHp3)7rk1w1fCR{op`^ESJm3{oYT}#5=L4UHn)OHr}%jlUBs}vb9fFK
zbAlDq1j=SYh_f(&4@Bps&G}&;1!C*YIlG(>)E;}iT)atW2fEfcG`}3f*@tnKJKsT^
zLqKC%c2HM)C5J`nge+;N=W6Myu08q}J?755)!o8X-(`L=ue$rRH2S-$jPw4OS@(O-
zhBhCSR0GkgldbVO_m;N6&6$MPRJj2cQ_f`I4ynp2*XsksV5{ztVNxoGQ@n|l8m^+{
zZ@`5^+cVQ4?>y%|?zPr@+nLdFTeL4k#pd<CE%c6l&+hJ$gC|xn%QqG-YSyn~&!;dz
zwG%uYwz&JtMx9AfZhjdTOqW=Xkc4^Az-^O#_QFNohyII;yROG(Qs1Iy`!WsNiZ0q%
z&g&lZOis--Th;)MLw+Ea*7k0nd64+^j2NvomweLfK~VE~_AA|!sv_@@^cD@L@>h5C
z-x_9ODF{SOZ&DXJ;#)fQn2ODz;JlvxnNLQ3rypA7mgKxNnBNkB>;;Ne$R+diks+e^
zHFtx&2Znpy=R{K-qWPlzYPs<4N^S)dr-rKI@$Iz8c6*-FkK!3$r>1w)-W|ry=MriK
zT;)2$Mr_NIu1(s@(5J0AhdFfhu%?N+j3v<`h@Dog9*pY_b*47szBB6L?1%HSCiLaf
zx!4Q6oauKkJc@*%yT*bmRI#pulq<D^>gTOD+p`_z&IeCxYwzhg#BaAnp`WIO@bn<Q
z4>;Feeh(k|UK>;CcqWhnzFW%Nxd!EFwy);OhhAS-WSGq524r5;nLiegnp;|Y9Maa|
z3DlwzftoeDj<^>E6?mRc4}|sY+x2t_Tva2U#{<4Mp>_xvnKv9!f$SNIG)eArWh=f9
z7!CpWJY+OH!^PP2iwQs+*8p20x!aE9KKoxWQ6evxLsJJ?m<Iuz9Zul*a4_`a@vjI@
zi3}1CKS>gx3obS%ENLK&BYB2f`71mU`wGBjM{$T10nh6>-%=;F@qJSzv-&GggFEM1
zx#K1R$2(mkrl{}#WYXvigg5Z63?))({tga*bt8dxn&PltwIuTYWU-e^An*Kq^A`ok
zzd9(<n_e?_xLqP!0W>DRu>bNVeO|^liHY!sa9D6a&F?gm9?S>zf3*hg$sH8)e}{U%
z(~<x4G{jF|qTpZhBnRI|Zan=RYX0t4dvYHvB;ouO{)Xm&;f(8l_RnovgcwWWT{psC
z(JH)&p6}Qdc+vj8)likAkv^0Y{6);~fTX}=zKOm6AO4gvgB|6F|9iV2dZ7AW;3fny
z<bQ?Gh{9Ev=kM69{|ZVmK`I&`-NBVzl)nk11Z_V#&X4=UhtwQEeQY4<KU7EfZ<G<O
z{wsppejoFiFMox#zhTX4eq5*e7uLx|`y=FEM8g@!DqO5t=u}PgZ!6@v<of?Z$A`%5
z73@E-!o-F=gld@oR7Qh*S~bxX_)}Mh2$HRz@`p-fU{NiGHp;(v#!35)|7kur!r!?i
z-TGgmUmQ~y{HJc{rh?QzoOX~i%M1Fe=x?dXre_^basNSEgm<?182OJ>V&!=L=K1fU
zy-f`NuiG@Tw*T-P=uolvmjr(6Dav0MaewtTXugGtVgCy>KSSs*e80PpFS->se$i9w
z%@>0U&o|Zw8Q7T0*BYQW>;mc)&dpeO`%&G}v(wbr;jH9H0Ss<-Az~aexuwrjg>{Rf
zo5<#&8N`@jT+|}M75#aX@n`1`H7D!2Yn(zxY%HJI*~}FUV!Ghy6W*#}16%lS;E!ke
zVZC~mtFb4!+i0GvDYGfHNWy^cDRTM5sP==LSjWh|>9hDYwX_)ahd11vs(b1UmA^!;
zVgouABqQA(@LAI{mVQJirC9rb;d=uNO$SC@mxqSf!G*~REY4`2k2zoA?nZA&x$gDZ
zgo7z{GVeSS>Hq3+{hSa^WCk%LqHIj_x_p32nP<#iJN}GH?KtgY5SlfM*R}UrDi@p|
zW!zwjVRAd@m}?-FHOtwCXhf%8@Jkglg7qc)XVXHwuwXT-_69;Y#%Dt;kz#B&qXq(e
z{f?{mq~FFX&hRaW;76{Wxbh6-ed`D+&|G-B(er|r>X))X;H7!XT-CCKS94b{rSh9*
zx6W21G7SuQ`RrQsbv=DK5QG7#?*R3vjiwykUjjUtS8}(bfKjjPT-bVx2tX=|AR8C&
z&jBC$-*<9N2L5WYBHY-aD$_H<rKdvz3G{-NC@a95&~yZ-ED4-IXM_(NR47IppS)sI
z_%%)<!_#O9_UUH)Ro>Vd<eA(pN<;`(_3xJQG@t${Q*GS@9aH!X=NrHl+cI)i>Q|xE
z5<unLe#9I2YouZZI<-v5{FCk4Fpv4WL_6TaKPj@vp`)*CzbPYj5D{niog#yL#DF{l
z|3!?Xr<1qbFEnNR;6Ewee^UR=qxiRegyE*1*6&K}VE2Es^OR3Z{zeAj&dnzvguf6~
z*usCOIB<Zn%zk4;zj8US{*zlkrTtgte~O-S{;Bn!Okkqphc_5-4N8j?GkNiW=oe>M
zZHhHxk?MVZr}c`*|Fr!*pfci55xru~f3yFjKEo{^O1<~*dWyxb{)QI$PYU=?Dze!8
z<=>D0%>))}x;vBp&2Y9qA~6CrL;D!{fPX_BZ~R*l?*W66b8G3EfAh@o52v~R!)dOP
zPYIxyU(I_SnKP#OyP?)++hKtnPJcw3hIHlqFDbn5|B>^bMzsS5QD<Kf{$?>YFy(J3
zzt}~<)j9MVIB}%dz;7*+(SXS1lR0*N`8)d$PU5IPDR5*vyFygvlfM=3UnI8k|ES=<
zsSl`V;IdF8uc@tObR@WpBT<2GkR4Vm(?eVf)Q;1eii}j*^xwlxX;%Xvx+#DiI`*MQ
zl#NV&kGLL%BHwrfZiA4|!{zdEtYF`8S8~fcMYl_Ov@|4&?-a+o0$=58E=3AKy8}($
z1j3C7JUo2l;4G7$2aDtAfDiBYTqL-5sjxv!r0>5_$q~mAhFA?dNWbqo7n>PRJl|bh
z|7Ce7>B$Ho1Zd#vfYZW2d<R?TqASY%3iYky50TgQPrHIlet!7%-ueQf`HN;&;O0SK
z2tMJq9`$a8+Rag}6SwiCO!JqeT$iQ!94f}q-^!q`Bkj*L1(O^pcvKG`svpNnM*a1&
zzZQ!Z0;3+&gr?7kG6ijXDEo+8|EPmzKLY~}pBN3yfCJK9PoP_??li~VgovrI^c#7l
z&zV^2)s13mg)l|H5DzW!!Nd~ampCAzb*dfCMpxxv*MieUEUsbei@GN)Fgm->Hfc*<
z_#h5nP~XQWF}{!7Y7iS^#KKIROXo<g4`r;2<D-U^24RFC4mVIAIl!@=xhP|SYLaHw
z4(CNRczh&S;O7@v8uVvH6`zXntYm1&kW9+qBmOJ8sGv=Go+~BjW!0o6oBQwj@)Q8u
zP-8K$N>N2<(M){>wYF4ToQy`8Jk3|$-@OGI29VX2Ho)~0BS3cz5dBwA2BIu0=>VU#
zN)nb69xaykmp50!g3NJp4Tt7@4j1E<Gm3!QLtJoj_(F&zML<4+@E9T_Z)gy1-Q?MG
zhnW-ZNkL2q%~1dMNWZGUp_IX)@QH)x(`-3OOs1n3TqTXLh<?Lon1c5nAbvt!uR6yH
zMQ_B55>dbu@IJ)3M0+W4_UfQZ90rxxk-l9sME5TW`N}AhkLg%wUS$3X6t^(9e_;YO
zuE|4$D9cE87Q}0Ghlz!LHIANfJ(L26m~>l71xk2BcM9KQ0N6fXXOn|6$sy|;?u$VW
z_C<*p;MaS@9&a$fOaaiHot>^?rDpV`Lpo%L(xxCFTI83@i4rp>*p?9efdGNe^K2;C
zCx*Ai-%E)yMQKB6L|5jH9CSX-@bXR_2Rwoe)LaS<paGR)H2X--W6>i42>>0>mSN}H
z(!thwc{6m~t?>Z1zQsBOP$ns)hM;mbOe_b`F`K%1OaMX-^IxOaqRl%?Y9I$hfAC`m
z@I3{qxVzARn6R1yBq`u%ujc@(PZ7adt41y$PllCCv|nH%d{IG6*v%gboJjD(ew7eM
z1V@kH0$E3?RuRsp<je?cL&e?$7WalV8;3;!6Sk0m;bP6`HZw#39}gJ*W`HLn#TG5J
zG8E*C3NRglL?2l>&)v!2$<_6ByrwGH(eka(3WE8FQOq6vT)DwN-?V{B`tyGr87{sE
zgB|i=fVt#|;ji?znY*D)Q6d4j5f?7FW*iVSi>}AjEL5*s`Y2D_#^80Pt>hCAsf+F#
z?4?%PAde8jqH$8Wu47yP9~YRSC{YC57=d?=MvioD4WiSGxK762fK^0aj}yVr%|^yn
zd?W)y5R1km12(9Bb-)r?;lAq%4MJ3Cej#_aUx0V`q4fUfvTpk+EHN?h3}5=tLgpZR
zfl@^Jso>zoM9nT#{`9z1enSYK2_MM%voHou@h{#-w>=>7kb*baHrbNmv-hLxk{ca+
zvv26}!1(dUX8}>;(OOGZ5VrB>mtFTo;$H!_rwk8pNyF8iFj6enmW2W^64rh@x8S&+
z{TiYG!e6dzf>QP!qmL^uUdvcO*yZzyBcH(SDcm{=B#&F`r-a-<Q>n5EF2p%1QYKxP
zQc)fFz)2EupAI!qn+1x3A$O|K#RWRt8m>5g@W40NF5D!cLF{Dnmkrf?UA`u3)r=m0
zPEKg<Gz>kd+KjqxB!diIpdX4MZ>a!!pg6?`Op-0Fy6!Wf(r?7T_Q`+8Oj^}vstXYn
zZ<XzO1n&!v7ncHO+O6a3VZG5IbemGQFU+4Sy;2C;fhDT|==>slg8B{R4%=M^`|RZN
zan2w8?z$(Ef+?)xpr0YJ0>E5>RlnHmvSM2)id*|8;e)-~zKP&_?VmRT()T)Vhyf-5
zX)#Q&78>8J7=+H=E#0TdAjp<@P9N?b#Vi4$s^vO99L3|zIEis}A#%22OY{<C{L@@=
zftPVfq@{%+PojyElEbc7REk9SK6%)TRnp<%G&A7>Md+^kMFD(t;Q9FZ5DW~6OZg-2
zQoVzK?)MP@D)j)?k3t1L1^cSzR8DNKO<B?(t<y;x=iLijQj87B+>NUg(?Y)43N_tm
zNBIlyldQ^gc>%~IvBS#bm+hNxR%;h=i!!i)-p;<C@+bngRqP+yuMiBvw#mNkCP`h>
zAH`Yeh57H`rTehR1lc-(N;`$-V;?O(Af7*x?Xu*&oAT@GNyR(YAG^C-aGJJBnXP&r
zRb2O6%;RNxkc>UR7E6wKy>GQs{-J;2)I%2cT{GhO1Q`~@0=3&J{(V4@w8xCi1R_Lf
z67IC+mN%$CnRt+oQb7Ov^jW292_o?M%$kprF020nV40!A@*d|KPiD1_S$eRH5r8d{
zz2-9L^!lEw5_!sY67C@1`rAA$qSZP1)`sEOQsufDU?D{vD%QRYN`E2?|AvJE6yfO~
zA^Y-90vk-@`NVA5^|@I3N{dQ!=M~g{K59WYBJ%)}kpr+bX#U|PB0>WG^0Bpn@U2*n
zIX~!uW5?$uNm205Lf7I=5e{he^d?1YR12-Rn@SO6pjZt5qARIY=~|dwbb<qz#jSL_
z5PV-J3)EfM_Zh&41kKkSa$z>#GtDf#aW!8ZZimkpq9utm<gb*z27dEg<&+895jqmq
z)P})0pFRaA1-TaO3WWzxc-oF)$aV=PIbOnj%W`U^?ab@f3vPZP2wh=Hud05c1NB;}
z^+<|m@-+t%5zn(GvW~mfMdQ%Um3oM?Bcm6d$lmtf%;|3E8$d@f&ii_CwInP~5`Cze
z*yYT)-<ZN<QU73){`vE@BA1=jH<D1RFHaq}RclpCpD**C=b=MLPUY%c>Yu|0)X_+R
z!m8t*9qF-+;Wd`r?0bc!Gxth#eFpL87U!`66j?Bz1y}e7%_{J5jk-|xihfnInS<9p
zdOq?6-Tc%&alTve446hZRT2vlT4qj2$4d>SL+dOtKn0CegAP0B{MOpr(&6@?BK-P(
zEet9_ZmjD9Wv7${ofCnL;CA3D{ZT6)3P!Wk@iMJ(_Oz3;k^bSL+GxvO-TeNPf7e3D
zQnrYNt09?Rvk8ggljRAcbF*f;?`XaG?*q6^-5=_#Xlj<~KGq+31Z5Z86$Q!MvFiog
zUF-V-_B6??1|y7n#<~K-Qm`?B9>>KZtR;mXWx5=f81{~l$-wwgFvR@zLnCBQ_!;Rv
z0jt5+8|Vc@>yxkjmteH;u^j@I{*k?2+2v!ir&~CRG~7TKCogDkoW*Rc8%zRAO(@lj
z@(*GLclExW2Z-hk!lH{0pD!1Fq@x5^Tb}*gCkI&}b`Bx{6^ccmrn~WObdtxP{A^_=
ztz?2$gRmXo-k;9b7;f~R!gpbYGonrI&LtE8y}<#Y>xbLRZ{4kQS6K{%83z<~nF+#F
z?6Y4(D~^BayRJSqr5Y1?_hnnJnHmJEz8=h7u)47-5_xuVojMvfXp9F=sOWyyWqluW
zR!As;Ueh{87*zETp`#378ytI?^xBvX_t#)d0$~%!=|*&P(jm1}p#DX>Br<qwc-wYM
zl5cgk=W8l@UE6td5^d+{TCzw%;?Jj$JWgzQAHlDP)>M_13<9~7wViGQwSV`j$bf&O
zvs#O2k*&V6wleN~b^CswoAraJggbtolFTx&Zd(K$LNm+lQ|s~&WBmFPERT*oUfM*Q
z8Z@|y_ktRHEtaJR_kOmadj~r-6oAzLC)$JgN$q)0&ZX;Ew7CgdMFNMh-QOBL4zR!=
zfE-(m#|E4d^$o-#H=p}{r<TZlV>~GMY&M^w$fd0Ze(vCWsQ+R!LEVAqBiA-Q$we$=
zh*R)BZKhRmqVrJqHVQzA6ykFzbuM^!IWDeK6ujLl4|`d-bb2$tHoWXpg5GSbse}vJ
zLq|uS4ZCJ`CEr7o?-Jhh?Xb2tbpL<I`x1C4yY~M|5?PX@NNAz5RYJzTw8%F0EJK#D
zGcv|LwxUJxl#rc-v4@5%Lp_r0$sp^LHDk*%WEk^5L-jt-^FF`l{cZox`~Kg*=A&`$
zb6@v4=UUEnuJb+D-Fh+cTx`C5zkRvvgAGAjp_`Rc8@jL7TPj&l_1L(pzI%ze{+#~V
z-XGTPx9mo_t<P+GtIBH@9U!rt7NX>CxGW|kukmV*dHT{Y^Zo2#Q;yZ*0&2Ry4#(QO
z{mRhYBlvqVypkAcY|xxmnh`s(>0^{v^`3SYjIdj6d4X`rZE3#bZCLLv??E!GZt;1|
z{F9s_iOeBFvgeoJwIM~%3wbpX9I<XX!KOS>MPOmj-(xEekqel^)zz()l<%%usR<ld
zQ{Aih68UZD^V-VeRW^5KjPZV?Uk!9_)m!5z*hZxz3{)5I!6QQ}W!=WBW(KkS{Ll5g
z5Y~D(Mj#@jofu(@hXsU~>WMbt?!7g{_*_1e5@TXa`!1B<udT^~N(Di3AY7%@5C+Li
z=v)k|oF=5GVWe-f2C`u4cy$rJ>{wX7PdK*C6V+MmRnd{m>xA~S$o<$Gw6Fb58P&XU
zHT&Z%s_rI4a?-0YIxX^q+7Yn)jATUV$z*f!5j|FqLK#?4$N`oMR~4Yr;qC>@`MY{<
ze+F-+OfO`u<<6z^ZaPelE&kG4t(M2J55KBDcYq^3QUmu2gd6=%rL{5ka%rleh->#g
zBlqeP$Q)t_ZrEUzZB5saZKzjJ<AwsVSVhnMiWE5}B1t>Xv9#|ABXPVDgy?7cQ(?|{
z)nJI^d^kHtdiRjWKHx8TGGj;-Z)9<1c;Bp2(Apl`v%OjNw`!1vRF$|8$aTyLgUB)-
z31gDv#7|9;-j+SEMeo8qpfixiW`&uiz91YKkaSIfW((E8G{Y-TE{f1xs$o@LJZ>V`
zj~S1salAk19Rj(ADStcop~RylXRfBpY}^Zu9*Lqy)ysPfai66-LyspD@@o}M3ltET
zi0HXj7Y-4xsSJPG209G3;7{&ENw3rg`>kBJM<N%myT_7WIqbrCk1MR;?n8Uf-g1-Y
zHAv|i+0_w^msDNFw@JrE^X+kJe_H%O6WTw~BD@&x=em$j+7CtSe)xbD&j;Q#c^Gp_
z4%2h%9SE1p?;zex>^?j)#=BDIPq|!VQ9VnCRVYGFl6^4VA%3jTL@vdi+lZX5fpZUC
zS@Y$lH0iMw#SS)CG5vMTJ*LOXq~s6a$Gx6r`l`a7$ZeOV$9EpS#EVhv*ihYEu1g81
z9p#}TvP0)y8n2o6^4glteZUG}5D>PgaB}5FV^1&`WMPF^Y(BC|t9|EEtbbiJBb2AS
z-9@WH!ld^YqU&gxyad=Cdj_$`WFKPEp+jj8?Jzn7tv#qQZvdqlQOh^>W7MVaJoM?K
ziRGuk>*P<>RQi%iHX;(y*5V{SNu1Q}HZcrhRYra%sUSLqg@I^Ov?f+gyqWR&!`G6z
zSW=Z0hV&X?Q#I3P|IC&*!)0be=95z^HsWdzMPIdRNO+*a&860es2N(>ZF$KEDW5Lw
zR$#6KEz;UPeIAN9yfPH%q(tdmY`-Kw=}lIgLxd!Zs_<HJKK<qHcG49LiQY1<s6S(L
zLz864FibZ%ZDxV-4oE%{N-jC32xZuft<X|$6)bZWdE!>YcB7)}#TDN0N_=*&MMYP!
zG&=&MLypQjkX=Z5(`sgJ?D}O!(V$LYM4lPd;QuPzKeK#(Vy^WNacRFd{>d)l$FtpN
z`w;=vnWPM{ILrG!ZOD;{WkSvl6g!5Cj>zeH0mnW5V&#(g4a|eyN!}jc39ZDB5rt=0
zGGWsAw=Sy>7$X|Jq{U5AcjI5HxVAaOC3aCRWm`DDkkiOSi8bRYAHW^a=kagR=X1^8
zMpqg7^A9RA4i!|z%-kF(j&#0W*(KI9eT=6(#*MpumMEPzYoh@CQSz(piEU$@<L!r&
zS5=aeUkLZ&p+Z^IW43C3Cyof>y;2{9laX4EGyLtGArRenuG>(f>21%#-rcV1lRdj!
zrL!G_n1*?Yp}N&;oLp_j_g0nqj=Yk8s^C5{Tl=}OOCH&_-jp;~hJ6MtG7k6RDrR^~
z+Bl)zKf9(B72IcDnJMpB>vZIK@vNT#S6@h9pHEA|T6d)V0J(DZuy3>66>Ly}F;ye#
zqY57N>g}sk>!7wCU3)bFK?dUJl7EUrL+4Oo_r1@ml=>?e#ctNGVS=Q%T(CngZKeee
zf&2<Ze4*ZR7AQI@VWB>0t2<^$7;?QN-hYfJT(!}oYgu*v*U5BIwW8^kwX(y_OsJqy
zM|+FJuDHrE@~c2J9^0N3&Jy)aeg#k7hBp%f(2I5_uK{V?boW62qpk)qK_uV%HRNSy
zbQ@Hse#!u<bFpR*vBQXgj+7l}pAn?E&xvw=IXm@=nKo!{hY=li0cs!ZJ3|m>CtozV
zTTDQoR)KkG8?#+?Rgv$T{EVv+C)ldV^I;?h$RkFSY0q~5Vxv0le53O99^b0Kp22dq
z&JypPQ&COwP_u-DiPE*B#=>;o*<xMcHA^QkgACKlNs1TY=pks)T}gFlL$;-sowEG!
zpvq}{D$XZecfD7LOgC0UHVU#*_%>YMGq%vvSERTIySiL)Hh-hQ6dLuM8^d|q@0juX
zjY$tCk)s;-DYN>!h=VsH-rmPj9@2I8M)ZWI4dKrD^>a`d<MOrMw1PIf?jQxEbywuO
z#Q1R%I4TDV^x%ET7i7&c^>TFQuM-Mf3<FYLj)`2)Tk_kmOz%ADs<n#6)+#kDW`8lX
zdl}MwH(gt^IMxM%i7HNLp~n)c!VEAZNvrl$Gesseg`p=Gv8J2K(Gm3CzNVF<t4Vpr
zUJ{zejJly^T=~oWysrn653UVn@4`#xuh~EGWoU>iob@?egMzp4mRIN16cH-VudLaN
zO5=|+5Pv;ism*{Q--5E?%&y!fm-oF|&1)xpMyA3zoLtplM<kzi-`XJSj~p&b8nb<r
zex!sOV;BahDURi<t>HJoFx0LY06R~7<%z|HPT#`~HWoPFl<1ev>Q}+9-*?^&YMH*f
z=|_eWRG~a`2jX9)&5ARweG^7LnXZhilp8)wYUrK_5OtJ^55PQF*|-4B+k-kAtvSy8
z>>?zyO~ICtFjw;w0_D3Vo9Hrh;lkH_fito4A8KT_kus+`l_}#x&t_f&zmoG7q!29F
z@Ykt3j;}(4X3e=NV+H_ScqY8p@~Wx!f!+AZZ4_bGVAE5WL5eq(611+vSc)Bed2Zw$
z0~6K;y8}9RXyT#w<s@89HzB?ysa7hzCWj4Mq{&FCKg%%&`f*jI0BvR`Sbnan^uut_
zo%WqrI5q&I=w);V_k@X9RM4Op{US-^Bt|qZ#MA^cp=)iPH<Ze-VK=JlnOAq4oG|go
zp_cTi>g|rN8d7-K@#UxGSvSeA$mX=gh$`+3rw3Jphsbp^N;{Oi$2oF6BdX?6Y0`9E
zJmFpg`V6kb07_7lldI0SfTxZqQWq7etyJ5M#q}!csv^}tm3^u2n#H0r`X;6MCEGYP
z?tOaA=L(ypNJ5y|+jEE!wM9F=FFYCA`fRE=8_W33-C7}0+;VMGI5~Pmr9ghpxu-xT
zMVJ+3C5bhF@;OokZ+BeNG#a?35wWfN(SzjvzJs!L8`3rw>UlZTeGq)&vW0E8+_$fs
z-EVi3pZf3VFgr#gj*iGdq_J`kqbbO{lUcdhNlD2lV*iu3F$yF~{n_-gxJQ#~&mL!r
z2(3X|d^#V!`B+LXpoOmW9wngDyyiShtSpkTS#P>bGAz~Kw(t9_gSPZcMJPNsMq5^6
z%O|G2JW3Vi-BewfONyqo!~4#p)Y~exT9-ttxQgC?6vRt9o4$byv?lx3a_PlJ(|bnt
z4Q-P<jEQR6;N5|x1`1uIA9}iY%;Jj4MI$ud%fi%3JF$hD?poP~R%wRR5df6&A-1YR
z=Vr~lW*g}7O08oi4Y&FvB@aUImTbQ+nw?1ZvZONM@6Bl*XwJINt0uuzeKBhNh7Kdf
zn`CK;8{#v!toV>RD%GY!sGJ?hFPX-*X8KC`H?R?f8H}X0z9EV~GeR%#Ay&&_7KI33
z-}vtkcfr&i44v>MD$765*bmKP#Ts3uUN(P_M~@10wfW>PG2LA1cBm*lue_zv#U^Y8
z&N3@ZDX5e#;?_(QtV;7DJojs{Adq+abAfP>DO5+2ImK&F$FO=@nb6?l5K{uzSXlJq
zL|9W81gCeH9L0A4e_2E@@qp7zOzL~@eTEFg&#g6_Kzr}`b4Bxv7qODP$Da55BKHyp
zn~%-*eV+NiDm}K1h*1`u%Q{>iJ@B}0MX*^wl@ed7+VuXzInK0YT;dt%<sHOpF2{-}
zdk(||LuS0!L{60L#ldG3vf4f8%uCu%6Wq%V;C#Q1@5PUEew1!dM*GfnzpkdbDQT6e
ziq1*J@f;1(#4rdBj?<Bhfr~4BY_0Co?lt|3X%?`%QQvClKrFhTw)!FlF=!Jwu!n^R
zqwFm4cU*owK0eMe<&@{lz(BdNFk9bh<}aNb<)9;X!CkDWQp)|w21ou7^NF;q-t?3O
zqfUJ}N3HxL3`F*or>r}jR#13V*=UxOppB)e*9|qlJWtYO!rK(q8?qBs3#$64L$Y!Z
zKQGU38?SEj$wB1&92Dxh2G?|=B9HL#%A=QFt%b<WDWbhRmxhw9Fmppe4gpR6E4yl5
zZkVvq)n1!^32nIN*xKmx1RA9WIgjCz*=&VV?}`C5JG;Wlj_Nas9sVfLFp##<gpR@1
zEeLUZVckybnwPc1>?TRfWOxtxUN5@fKQ6WUT7T+uwwoovJL^=coQEPM9{res4qBy9
zPvTr6EY~ilTSIwn5>*^J2_u$QfR$1z;Kz673nuO&PA@q(3hFepvp7-iO%l+n!x15O
zXQOTl)urBMCcT-HC8c#M`Zcf{W%S%w`g(V&C0%|LHwC-2kwRJWFU*kh{}fIRnb1!e
zm{+2_$QQ&HTPxo&(5$E77EF~f(MJL1kVW^-MK-5J!6&oqOUTsD?$p+rBp1i@ZYrTP
z`?6rR4ld(U16!)^vl?e2<=f=vx`wRA*tg>hDL8MRXB1+u#AU)p_TyAuUita7A}U$e
z#1LZx@Gli2v>@+VSB?SkH7~%DtFaP?_xnd{(V^VP08pbc1JmrTwIg%PeOZlook4LM
zAG2*{!e&kAkJv6AtLxbKg<O+fwvkvh$8$WfbU<fKzyIw)^4-OklYUgX6?t+(sz<xz
z;o2!B3UsYgz7jc67z%ADkrDU+qC!q-%Qq^`3%i8rQ1%d8W7$yBlu^aN9d3N_`X~}?
zP$Y}qVTnoGm|jI9OUU{&TzqT_<YJQUOkB{n(dda(a==sjMZ}D9ngS!fZSTs%x+&O-
z!)l~*?&5Q7c*`%9L0=nY*#^Yf4mWOaR8!xu2VB>K$zW`X4KNx_U|eIuDu=|Rr($03
z`J^GiTbj%JI=0ft&FkqdQr~FB+^IC}#>7>CUvvVP;`UkQvGBed>&5abXbc;c&qprk
z`h&=1>`?f$cXe$ER}M)&kSM=ckU@PzPwJ3mQY}cuh|#tdLi`SPwN;v9s>tl7%X0~1
zUidUIZHpCodOYJpI%4LnKB1M%$(j9Zt}=XPHeYHAqoN{bNv>!jgv>GJ$i^EaI?;Me
z{-7K^+BWLxVCScMDh1DI5qv9E*x4&HU~i*oEa6tp9L1voCe}ZJ(<o(qKf#8zn<lnE
z?MDm1Cd0*0zxT%eCH_8o<j5Wr^5yzRZWY<*waE5xzc2c%Gxo6sIz|VbI5Pn&Sb4z4
z<{&B$7IP+kNTi2*%%h#OW+!hAwHFi<N@PY^C$O{y`>m!aEQFBX%hu1BFmANTDx{vd
z;8vEdaQLtkp}cnV(^qqK4CiCo1_b~Kn<MeSU%h%_>M#;#KrHYk5fC#y3H_Osly6=I
zCfL;RtCuS+yjs0i%$+XpBwh>r@Ped<kx;P>_LE5u47l)kDnsY8@=WQ&-cNOJb0+;c
z(!GMlvX{>`3Ajl><niIOxS~^<g?B;Y&_al5saL;Md1g@B3q&Nd(|FVKtEGy14lKN-
zSvvmZ=FN?jefAewh!50Tv~QUb0rR;Wx>i0jLkW{b0{)+18sp>=w##f^>dY3=A{bOo
zOy$)*n|(+e8?R);_l<Pl|5heexEx|yF-XMr!52A}y6mxf>LWf5CDk_kmNz&oknKg6
z>6}`7mo%Z-wkhc}zz2Eq(`ymT-2Gc*2RXd=(QQ~KfL(*$-h*m5B;%hxRBo5oD&tZI
zM}Mg5Jb(LB*T$T%E4t&`Cw^G1d*uw|ER1Q)l-Qdlc5l8%PbU~+_RGsr5MAdau9$Nf
z^Dz)JUO%PBZVbk4qiu4mP2Xtba`G?hGv~}w9V}MJfNvudt|g7miu3ZkF<D!pV3c{y
zcodJPY$N54cDg9xK7x&oM*K=f$W&QU4uUFvgcn0%%sEUm1!k&)qL!G}=DgPUxyt#F
zEK#2>bbXA}uo#=ziGDu0v!eErt4Zl?Sq<8?4{rNieAoY&856gSD66kR+3<{LJj#X@
zBXQFRt=PX5-hi&)3KEecg=`1UIH#7w&N;Ix4VJZCHiktH>)tX=4Qx){OSD#A_SuHv
z;vur%89G$m?SvmQVRON_Hf6A&M6%m(F95&l!YU#Ah$y6Daf;07kz18S6U@2iAy@kc
zB;W+eQx>c?$P$zE?%VZ^9Ll_LwFh=#xDF7Ht<klKiV%Fief|1XJBTp50!yW~pih(I
zSF9a-!i82c*ibHpvmTnjBRr`J<J(TEQTA-z-;H|I%Rr3uJ{2F0OLB1!l;eg*QPKmM
z@x3qBd3l2UUZ*Q8O>=t@AoP@$sd<<`KPAQc_T$^|cDrY@^bS$8Y8+0K{*#-)t8uI`
zfHN;`zfm3}=I5?9xDc4)d@6qB=`Z0Fb?ix%0xC2WB86AJz=Drp#oq9ryqx0iBKQ1F
z&4h1_?^L9JO7XIM4@GL$U%qDL+4|_`<lQQ~Z{F+&A!{|R=m(*uwNr+;9>veoA(eIk
z76|;g1=lH~c$UNozvJz?iQ|!v{Ep*o6c{k_Xnx<<FoU$fb~Y!QJL{t<Q}`uS{{R<x
z7w&*BCF_w7ZuI)owFn(}$96=veC%FRxKQC-u_R5mj=X%;ONSkI$>^ry0)34J$z5fi
zi%D;0Kk1d+Ta!-yQb;cAg2{kP#jbM6OvAY0n_)LiiAVCT0;LVZ+-U0cE}A>mlfTR{
zW8zJaW}cmL`M5rcrbv6=M_G5{&JfB|sBOuruJ6}B%IHzY@oy&xLF3%Mmu>gxwx52;
z41#1IR0j{Dcu~|L?${9sUf#K++A1I);hr%t>l^pTH@ru5%6dxge0n6@IP6-hHXG@Z
zlK1{Qd*&nPk>m*#Tdw_11lEEmg{82{xx7JiPl^zvv7JnHmf!dkKhZlY-$G8+Qy?mi
z<xCw$J-&n|Zv>xC%Ay%UE@GiKW{P)}IQ477GQ;_ts;NP}ps*)`0j0W@b5aRob`8k3
zstFk@S23Z**^@_3VIFY8Cx>5v{j7Hkr3RAX8!xXzsR$wexkv^4?G30I$H%OOHodvE
z!rRU5NzT|Ka-Mws$Y-*%IybWVj(J7)$!X!3z)6v5XrlD{gWFNrE=8nnV|0()o;S-g
z<so1>(tdhj!|mym>s(z&C?xju?uRYfbSOblwWDe5uljy%MN?JZ&%M^YNlx~c!s!h5
zXv?xG&TE_3u6*?U=zpAHUPxyS#{t8T5=UGB$PB<3)>5MJbQ6!9mt%~{gQb=IWR_B=
z?#}W)ayhyREBX9!3wlWuDbxEZa#;7Ksp5>tLi}B*eb24kvn1pF!##V(mc(}`QV+7?
zB}kt-K7am9k*#6lt5K|e&<LBMn_KJmzf666+55=J1!>vIrN-PH(KM`vfa)M`X-i)J
z3-R3CCqnVEob9h(>c!8dTq}UiHVUv2uZiTa<Z{FpL+u|-?0l)J>Q@|&SEOG5b*3?P
zAF+Ps8PuNh$vTZYhlm}<YW@KQEhXIfJWVyWdg+n%$UX;|<fh2b$|s#q+88Rw(xskl
zG|+pH>ppQU?I7(?D)@C&8mP@jXw(rsZ*$p*)}5p$sq{zbP^nH2``@vu!pzm3`A#3F
zOL=O>K78|{$top}&|)QIpJuTW^Zl{FIQhr9VEblq%Jw0UqzFu=YJo1!=zW2<kp3Ya
zqK{y_1fLc?)$Y-Unl{A^r|oUp)Ok7q?M!RX_lH|8=XGj_6%Mdf>WH*&LzO!YRzir-
zDT6}p)6Qcg`XP#C#FA-%nE0~?)$?Nt_q}R6sH}H=z!o2bdvgFktHKbn`U7qlXS1nu
zX@$OGFCs7C-^#5xnO@-B=bR&>-Qs%u&5p;~2PPKL4ETs`z_xATikewSzj`UW<WiN3
zUL}ORvppwJ9b_xP(|9HBZu`s{D~4+a(MMol1w~W%$fI~us?--W7VLNTiJVe9OhY_{
zkatpZp}X<t*)TS~4>zY9T3tbadb?Gb@&gYGLIM^#Kk%+B_>ny*`9quN0U9z|=VS}X
zmgX&Q0#oI40({n@w{SVMGkVXYLTe-T{H&b%Qq3XG;cu7U+(W+nyg8w4qO4P2B)tQZ
z%hm@VCU1M)J@E5h9bLWsC0h>yzn`7N_l91h!7n#I05KVv`D-uO-k|l0hR)7D4bbMB
z&7##ljgN<h)7l6=03Rd|n>_QIKMTH^d?y6pe@5SEQGD75v{P8_+4l#42Zi0<q`=FM
zr{PIoTmzu4&8x&$5Uy#8GkCJ&2Gm)V=!?)fzyPqe2sKV3O2C4_IG&LPHO`_3yCPKH
z+OTg&UN&j&l)b~WpBVQ>_|xg)9=h25pw%j1*GI42f9Sku&Gy25Yax4k->Fbad>COT
zyiE6lE;)WqOQHVacd<GgTrEpLq=2A}6hPbR=nB!E{W-0zjZ%ZZF<z6oY<kkHOe54Z
zdd$!#rdwE9oRtP5JCnxxxd!hzj|OGszg93Nh+qhu!{=XJCAzXNvAKdi?PtO-^6A;%
z`X&ep{SkM%uH8zv2;$6vlV)8Xw$XrUJ5&l-au$(+qGPwd0sQSDom%#9{z!26$w&Q8
z`2`(Os|y8`chT)MfVU%idUrTJXSF7_#8Dvd+p)*V=nE880jBwD&(bYw)3zV8kYAfU
z1o54#L7A-;y-avOlZ#Om@BsJD#Xw0%8>e~7T)>){=R)B6gPs@Edv|?AhM`48sR}|4
z%SO`U^F>TyN?Pekylhqt_YFd&jTkef_XpNpcpI2*-<G1-UOUb_W~|@4x}K*3&JUb~
zHW<}ii+Zds_zJ=_*SZ*-U{GyHlde-cQI|S|15SQG9NZEP(Sqs(f4jW^bWKC`-+1e@
zQ9jO3L!$#{B1ib7uZGR$@nfensRU}45DkAzlPHwQ**Ptp-J3_aL;T1pAD3>$G86YG
ziB`mo*KFTJFYZPiOiX$2p^OuIQw6}(+rfR!J5eI(U$mY$+(-*FtfnvA=a1fS0pqOy
z1X?=DNY6rYyukz5Z?qqr;q!}6v+Lww;wJ8CL2Jw`uZ!o>gbJEYD?CUf8P`|r`F1+b
z_)X6AI@Qn@4)$MmOD3j3-$QEy6HD<QAfj(fcT;n#OF|NUy_KM)bMiW-r=96ptXC-H
zKGKlJd;rq*Q&CpMZItYt(3YUcFZ#_Hw=q6!3!g7)66V32(!}&G-P!@pIzHztZ?8qg
zY7aGwJ5t$oPSmX$kHr82r)dO200DAuf2f@(?f~1QWsI>DR3Ee18#WS|942Ako{~MX
zY*}A>yv0L!{_Dn8QvoJ7<^AEbERMn>8o6J$o@Y*b9uOoT+yE&4x9?WJ`Tif3|7YrK
z-D_IEbrO5FAxr4^1k24tTlJahYujmuv!$#%IUeaMSMMo2S5@#Uksa6oy&X<c@&bp(
z-k1Rn3f$X-@!Nr3>)F}4-OPKVEo=o2lS}4o2i^H4DUKaU*ypc)7kdm0@3URB(K%<z
zIq}JGJQz~Aldk$Xz#RtzR$OUozEg@X@mnk7UgM#G#gEd)tMTMrP_|Nb#lGu$a1SWu
z-YOM(v)Xx5)|qQaUR+HfZ#P|aen<j-lT7y$(<J|?=}8ICVGCcwwWyOurMYKb<$>X2
zrw!RNgBHew3Kg*oR=R3nZuW=J9<4VYV;gFBsyux^UG*~n6s8q!0)+u+_wT+L{4baN
z?HRwN42k{3B)W-r2H@z<ElhS;dgNF=cTvD*vEgrvIo~jeqyMh>xA&=qen-0JLb<>&
z+yNp5knTUe!7<W6SO2c?$7L%_DPY?s%6d~jHou(%I6wLC(07_VY=Ynqfp6)bCS1=6
zt3w=x<BtOKAufqmcJ+HShLfS09H`$3QHQttcxZs-H&BM+N%epeul=X;Gr6qimAnk8
zRY|fsLUUyssb^}p(`FitJ=k?EofcZg5KxYl!AUc~h>x?Dnh`~vnm9`Ale%0Tnn)gS
z>P<|sWlH!!_%@VyUZ1d*erMy}`u)A-gnAQ3+=kxQqFr0n8WU6Y>)e$NykEMH+~Ixk
zO?gI##U63kHj`5@AH$@3VA2CeHblKUqraV+ALI+pPkNz@dd3w~F#JL-r=1>s;si~!
z_h9i_IvxOPERdgHBm99q(5%Dan*BB-Q~S`G9^S1QEm}G+R>lz{dDR?U50B_gywUi2
zx)`}Vm2o?8F2d4dUrshA_^?=HvgRBM;QR36!UJIVOcvI!+Wkr&Dr39TNsqzlZeq&u
zH_{#8P|5v;0ci=sBk>Q9<X3Qz?#Z;CLV!jm6+bGUR96nz1A`Z?vy!WAgW+M<PsQWi
zG=^rS1Dy`z)vp6Oj849ZDZ5|moohrA9uD4<Ken^%>P+=7)%4mP_d8cb&g_spMUTzb
z(z&Ms5X?1vgCQ)?=G&&r5dw<bL>6;EV2_gt7+abVf<T~A7CLVS!3M9vP1EiyA;2Le
zfKt9(BxC~+m(d<jjlQK#|08232}81V#_YyFJB%+*dMvQ%Ph4gsvQITK(GDo-?Ipf^
z%%cP}oeuRZZ5g0kcU(*pq)+b-EQ$T9zjf|y50RM$PzH`{8UJS(00Th2qn9(bVF`XE
z%z~ilj97(dk$`LMS)NUcWl<pYqo0rbp~L{dB_@=J;+ix->q_E#B}dR3)xCu6sN-oz
zOm)2Ofs;*IO)AL=6cFB~ft7s)+-gaM^-Tcpx{HQ9HrmUMQ8&TZOvZCP+(z1`3X?nG
zRy%A>kIC2p5X2_jdx7l;he#}*-j`-!ff=E{F-t<FMe^f|chXw?bO&~sk^sCW1rwPY
zYct@=ar}+A&O9G59Ov=%Cha=g0sc3cb9e~?z0Bd%<5!~ZvH;8@fZAdYd4rSenPHGq
zZ3T2FgY>q(sm4d{;Jyp^zLlxdfa!O&6RUUNABI87I=mrK;YwP$ss%x{c7Q)omdXHZ
z{YAyKK&W3yHBE3Vy}QP85%5UJm(7<4bBr041TfgN@eIfktqt)rNf%?yW5(--_L=Lv
zy?QP!>m-1_)V>9CO%zp}4gWCQ)N-rbL5a7y{R|cCVgO9?El8Qb$fa`HgkMxXzzNhU
z&Eg!!=I;LeJ0vgebfQ=jH*vM%d+@z2SI<4Xvd-5>9ec#HIHdC8W$YcFyha1ycl))$
zbre=NebI-Zkd&q|hPlhh+HEXT-}>GgN7>~a0*(eoGmz$kqCC^uvOa{Lqjx=?+`{-!
zQ_y%VYV_XXL5_nOt%~2)QYojKDD!jF&(eLC#~6S(K5RtL+7!uzpEp+~M2N&SnNn|2
zD!y>qtVDZ$%FZwCW25I=pXTBdRjMUJ@2G)udXRmaYY_IEezm3rUa{;%AGhnDl5XG@
zZS^a=R)cf8lQ!0(4rHdbett_ktfnUj2Cp}*&GylyAfDa$n5*i}d3tQDwN4f0g(7_T
zx6ewshVQQ=Hw4S^j>h*$3Ir~9T#h(S+69<bEmRu;C~&m77alNgcC-b-C~Pw&p+Gb(
zmJubWC}+{eT(fx4pom^7HIZ{TiceIJ%FhIDSWH=`9nRD--+jC-k&^6!nd*n@Xyq6H
z%&rwiBQxCm8k`Txjqkfw_RG@hIBx*v$$L3ACqomxb-VPWjV}|WgB@VeL;l98^CFcW
zD$W9e8k$~f1>Ox90Al5JK0jD!Vi0%yQIAO6?ONLk^B7|865Z8MQ_H%TI}KUEkOO9?
zcC){aC;l4Bf+2ylNT4SH8Y!bZ`!R9!G|c|mv`>tENTt<#bJ0Wg*;6bWQ)tIwRI>tf
z#kQ(XpMz-qI*9(BIEeZ~FngkIeOnDxrj({3CIJ8C4Tg7S=Y;sFr?rjO&y@Gj-K^|9
zd{8CPUBfIHH^zX{D9aN)k=~soW^1jl%G*31`p|UEW6$`~<=VYZX`t4LmEjlf?19Kq
zbUgf$WA~t7e9F?os-7A)MF4-QRhB2onXb*L2H}z2g@1c!X4|u&y?i3vcs`2Yr)Ziu
z*C_hY&eBo>@5r9eE@EIH1^W7&Dsyu#bygobY1lvJQPr0x>-<LJ#Ed>seul2qDR0C?
zp$67PB=O}vzCSif4znCDDbefs#z(p3(|C>A1)SXw-=Ln!8RNu0<BtIIlQnrc_eJiQ
zM?qgHpC_xsk*rMWsjJD*bg_H7+EL+<SSA(Xvv2O7(>A`2u1ElnOJTLWx`DE>lQ<Ua
zcX6O*FF!soW5Tb$LM`vZ5esIEv*S(W(+#esRibN+Gw6u*z_#4ET*|dBxv-f4%Dpx3
zLY*OR%g=C4&%`cb_s!+)@<I3P@{Vxh>mQCx-heV3C0VP0>{4kd{tX+O&wvP6Sg^4Y
z3l_dPCu*cjl`X@di4$e5rvr7mA0#o8$R{yVhAXdgQ4GXNx*HWz%&5A>Y)qGGC?t^&
z32|n|lc&9zfF}jc;>OC3uLwcK8Nl)iU_cROA0L>o4~ZEPWAwUAkXgveK@sT{o|po(
zKfkk_d0|YMxz4Q7eFkBG^-R916ZCZXnmse#aM2;N$*5|2U4V4C{DCBq6N#=2G;iNt
zr8N=-@>(q2Z>}>FP?q-!`Emh6@(w*pTf8umZe;EPqh)$WV!H~AC#b`-UsABEq1g4Z
z?bLiY?$Usd%AgF%RiWZLq_f>UD!6hu;BMu7ol@QBDyopwvSaU&okQqQ*h-Z-8$EIK
zBd^R`6<*obAM|LJB&Gl3<OMxCQi&2;Knc?@=k~$9!})0Mb9UEmShsY36rv)|%G)lu
zl9*C9>vN=x`igVyV<8<;QEbVaV^>$f84%+x=JB|H8)ETGK*FXgKUOfO#W&#rjt}5U
z%gf8@@Irod*VqmMsN{B3dvh6!Z6lhyYBXfv;A~~>uLm(xAj~?T$n=El<%+4Awee<P
zeL0gdTbTOoz%dDx<Rd|x4L$&5B@Aq)lioh!h`PKEhHbq{io!01EpKm~O!1rfG(7%b
z<S-A${-)pPMNL%Hy|+7DZH`t9`&QcvOeUls`Nm|ER_J4#Y*s7m%UAd^=-|gAHO;rD
zPT~$}Yiiu|ga7I{&IoPT=dm&f1aueimuD$LcsO-=R24mvi~6b|jo5~I2{qnjl+Z;M
zGisXE(sViAI)&x_kS69!J>K6faMdP#_FZp&fFPzcj8Rrs2b@xRW^rQkxJJm;WYg0a
zZ!!PKAV&sFT@u-i+1v((Ql623!KBL>5pF9G36RU_X(6P7kHEc$<VQ8S-en<l9uxia
zmt;m_<K@ke4g6VTgy#)MfSi$8;&j`NdJgq(S6MSxli!U(oX5xF-;U`H_NE`fKms(J
zEiuW8AcYGL6TUAP!kNh8G_zNoHgx-m+jI1-M@7%5%3Mb422uBXIZT@6M7~`ceqFfC
zxps3ZPl@*=zUxNko6L}Y2NRvb`vY1o%@w62FZcmWv(Rl*!gc)6{me?v9r!-?3sT4M
zeP<Vdg#m$=x4bShsSU=Gn9Iz$swO0KTJ37`C`Wd1pF6n;HQ@~V!1r-Gkw0DRK@~Ne
zfL{UJWO*f;mzX@F>RPnDC(|`Xj+7N{N=OUnFY$BUfa)A!(Fylkz--2^RAV9bLCriT
zz#fH}s{UY)XO8&X4pcDIpUYjgyzVQ{OrOx524&a#c`Ga@`WtRn4j++`zSOc!b34{o
zb+3&^sh#%_G+Qv$GKCo>;yvBJMDxf<a+z(88DCy*<gs+LuNQ;@6vIGYe^rS$a<Ov4
zw?|r98ZkRq`I(I*Ig>$`di}_1R=W*X2t*0fnm+aECe8y}qTo|^|87KMxSIz|Mhed&
zjnC{&>weqSy0CkWF3b<L;B`V@3AX<z*q5pGnIEQycIqj+nunWwymM%6aYYvU^eDbc
zTD&)VF9Nb);cXNK!<}Wup4B^YW>t%#FUIl-0CmR9dY+J3_Y={n0@z}hS4Nb}puY(U
zeHzpH;G(}i$h_e)yxRFO&K_$Q)>m!d9MlH25BvDbHk63?9FyoFXwdTd>S6~UMQx2u
zegj#cK<{L_A7fKrT}*FfapA;pld`KAwM0Ct52a(>Kf|noo0@nr|7koiz3eg6dvGPo
zLo`v!i`CVC*Rz!5#`UyW12D0h>AMM-E{mv?7d3j#Xd2CFvtieCI(WR96!<av9C44d
z7*Kjc1Le#Z%lnx=a(M9p#O~U)bv7lfi0vqms?%y@>i*eXySkW^T<?GkA#JbQw0gQs
zxe6|eTS&g`@1bS6vOH87Y8o^y25skcLS7l1Wxn7_o`7kM_fAzoUO0cOxlGFSSA)&p
zz{)?5xF^qA)W^1BZdQO_p5jPz>pLGO7gXUJg+c1O;N^h>AW-r!q%WSlu~0wPpjtp0
zwn@th3+{Weaim28!^%g5^*F-m`shaD;&@A09wYqHq>_h7W-eY~xEj@9zrpJ1m-Z}{
z{#rdpl3aV3U#!fU)PdszF+;O!=H<y+imt}8g~*0B(dXU5ts(w)Qs$PGGCPQc^5UNc
z&e%5N4BUn$FpK&kJt@10*Q^%ySAx`%N5lXU(usL3aCT2=(F5atV<x5KKA_yOXDP|w
zYIkKS#DJv?o&D5{1w;RL(}4zxyy$v5l9pvQVL*0h<CKyp8__N3<}b@$ImWr%!kMl~
z4+LhtGOC6OTx7qjs=DTDIG4Q;>JiS&l7HiRNY*cQi5Gep-s*oTF5el~)O8hCNazFE
zk`%!~FEp;Re|c#9Lsggq`cts04f6bqPO-&gm&!E~Z>k;pM+0b)_Q%_J|LUcyy7ZXT
zsL;Mc{!`S1S{6Ryt&^itv<R()P7qsEPQ8;6FNRG@dS$w^NqTxveW|l+dEayA9vV>j
zf~yja5pSh5)7RAOL_p^Hewk;7dXwYCq~&{brOBygH&k6=&hpP0O}bE>&4nvET?-Zh
zQa*3{WUgnAuj=2gkiK>Xnw`~~V|yB7tTqhJqK@_4OIM{d9>=s2&^bHsZ+V<fgMB)A
zeI5m9gg(zzSjyPyyA*j6NSH%K;S*;s+A9_8!gFW2M%{JmFPqk?X=-f(v%OhenQ1m|
zs$T)>-N%Y=v+p9NROwd*UU86mD-O_@&u51MUkqMLmB6bPpJpVsEFp9bP0hR}IORZt
z`Ybmny+Y!F)JA(sRG^C-cvYHzpRuZQ6%HCC0G~D6hd)+8n@{}=?;$A4*b*qup5|RE
zr_QDi%}We~`!QftLrn=8ZypyK@ec4+ne}y_D8OD_K02+9HAU?joF$<0TV=~(JfSnr
z9#VNQ3RhQdtG8CFi+4)e_JTyBj7R2+K`BK&??^bGHF|V=6{S|QqWi*p&@UXSI7q9I
zHr(Bd++};HXH8Y0oc$!e9cvk|-Vrz6IKD79m{*h9ugEXYC(Uil?1WAQ(E4@2Mg5|y
z^X)I+n8vW^u!;@!byiucq6>l=B3o}NV=l|K-T^QGtxuOy+T`0FU?4G9Dc?HhP=_r0
zDBd!SsVckhcOA<%lH8t)sEzN0u3Z!5B~DoO9S`z#!BEpEvq)^(axw4*{F>@*t!>F^
zru3Mixz#}}=Z4;$W3O09U1z;}U&>7IxNnoBrz08AV3pRynHq_@>~{kkl^?)Dd2vlV
zMUh(Va=*|OqlhjHa{qu|naC9p<Pn)sK!Btu=;5i->4ig_X*E8L)Uvo9MDI#WO~8$w
z@V@Ecceqz3B;bC}q#yUS#>fj<W~#zA$P!??_pW_i^un`x+f<?sWib1+%8X0{8;~NH
z<Jm$i4XY<uBaJw0zLJz7#TC+@7-4ZJxH7;MA0iGS*W%moCWdL<;04g0W2%&kK`il-
z5%PzeY^d+ST2C*YMQ2~eE@fvPntH&J#u75s*9lgtuG3^kLhQnIhbsXyPi4*qHiwfx
zVDmC&Z)2xio(xx1Fn|@{6{z^t$(Wa(n4k&2rM$^BFEX(rNmR{V<Sbd0EE5PklF7g;
z+6M~+Tcyrjs(*5>N0;!GISTQu6_zO1FB=65Lar=ekR3HV5f-&Cyf^d3NEm)=FG=6<
z_}Vd|1Ph$+*XauHTR#bc-OTUm!cEIpO6&=9pQBrhMOzA3bnc({9N1jt-K4<fFta*P
zLkGB~4NaX+)i^E~mqIiN>&>od?MPo@CkZv&CWra6br9s^cA)t62^7vxptljgOtlXd
z<@44Q`Og($M}}0Eo>C8&SLigo%K!Fm0>rxW{3HD;5knP88MBtzC#VbgBdU~#_G}$R
zibl(aQB;U1=py}LUD@aX|2T91s6+aL3@KIiGY>u{wH(y5gd7YQ71SpvwdT%6E6tc|
z73{~4M_XbTu}DeUWNxj5{sg&<ZZ>(3?nIaPcKX#!NyWuoGQSJgjF#;j9+&Kk^8!mC
zaAG-zLv^NiQSB^-gdK*{r!P<Y?f^Tt2cecd^|<QNwQ*9i@L0)45SV(&I=xkxM6_!|
zO(lSo2ZXjeI@u(c2W_-QFvunf_8S)#6G6a%9_5#is1GDpc`&)rF0`-OJR-SqH)i4n
zFZKpgMZjc<jH+vsY1!qTxV5U-_=#k&^r&-5)o-sdW|$lHgs*6UZeLMO{-vzP+a-a=
zj#YKAmh~DOQ)|FNG+ZX{B8G*LwHUe+#|yGs*=AoB-~wh^l`*ZI_`tX;bLPtd#QF*d
z3;y_a;<I<^p{5;G_N}a>Y;x0F`%C`=+fhoe&bivp9{ouLQsQ1UUW*>9kLF(wMi}R7
zT|`{l3D6uPNrBx9we$^bt<BxR0khJbJDd`DiJT8(ZQY#`j#Z?mv{NuB4E2-WQhjmy
zhDtgTKM)0=H+!A<D%sX`j`1BfmwV~;LG_3^+epz4{CQb?z2oxUWG3Rkc^l-K&#@TR
z9Ymc=Dika#G4kw6#>UkcL&C8bS~TShhU+8^;}^-9bxjML^IXkAq;LvH>VgwaL9+uH
zefC9j696N&@-|Y4$^3G%CcGhtK-)OlWVQ<KboVnQ-qb+R-qFQz<V`ZiVoR6MNZH=5
zqz%<?(qm7)f&Eh;lr*_yB;9+b5dx8{zk$C=9wBIjlc&Tpr)hR!^V9jlvcT)6EZGYs
z>j8B>_W3C@rc?iZ%fxQdoMrWycHy8b%J80^jMT)n&}Bt(M|P^SK@h3_{lxOxdbEzh
z+LcRNWR&C$aUQ<4`n;GMzut^z=o62*imP0`l40%!FiVcNl6Mdtt5b#Wxtrl7aEpG2
zshDw(@J)Mu4I#ONHKl&D@nD>}n;Hx8)ObxcND+Y;-%6IWDYIS`CUHzCeLns22N|AZ
z%k+T<2k|CnZ&WPrhfbWR;Rb?D`}nD+yS>yc{E{&hUlIjH;PYkXo(tC62SZ{LFB}A}
z@72wHRoXU|gdu`RM|4b|HL5NhkLiH&Ac^qB_g(Jc_);*cJ`4qo(1~oq%+@X|jq+%4
z<wTuJv+qkYv2qgIW1=Wpq2I!;%&<$S+_?Zus6AK#?5S0w3#vquG_V;sc_>}GvFgrN
zvo?F+I;B<;*L$z96nC=PRtYwhTH|(2<4ucGGCQ=VdCZO)*Vb{A4lDJ{BA$`xqCA8(
z`97?;d}t5u7Nn)j^_4Rjyaw)4el={;&2?p>l!qKzdNs4-$eEAysP;pb{euqQ*$G06
zjP_52lyQQK%v-Vo4E8C!+1Nsjc&Ypp9c)AZZRpeNQDvp#c0Yfre<-3T|Cqs2{JdSg
z{OPEJ_`W+ns9oT2)}_(!k7vP03;WWe(#j?Z{CB;9;9(KVxgg)$GAwAkrp>u_*6#(U
zB*unkM7)EW4JD;WVG;Yz3vNt<5^O&8rCUt18KpVDl@0Im+WGB-NknVH*urj1@=Pm4
z!g3j`qKXZ5@1*W04t#8<9k$>6q!e~v!K~f<6e&YLOI9+uZL(Hv8dTaLfN|z5xRjHL
zP8-B3o(hFdWeiU@rfJ`uoI`9Syaw_tUy|z`6X$=V$~B+6Upn<uLX{Lqq*`{Cr<mvu
zk~hqXY>NUp60DVvYwyI4d(zT~X@P&d$H9rx$ugNSkSaLOwuhvs(t}1D$%I0q=)IZo
z++QbY*{VSHZPYK|C|={2T!qEKYkXi4%S|^^FmHBeIf&Uo&4=h;jn4qbplRF3v@KPJ
z$|o&K)l~$rU^LORvnVv=M&V*6c6Upfv%@s)#F%(afMZkAq3)(@53T0YHd>##ntmTU
zkcCtk-<-dkXi=p&0xL#@!H2Bsut*a}e4FiCJ3`DCixhZN4tQ^iQUO|Jo3^v%Y}_6O
z?3;cR%{TytpT-}PmYZa|BbtK3JlK;xMPaGVi~U-ldl^&ZCF-o^UdO>z9X5=I+9O|I
zC?NPO_JB~Ecxd3<*63So!(4U~=(E(eL*qZHyy(tba)SG#Onj?!CtaOui~OVBzYy;)
zndr)R8HQPeneGi_SO}|pa^Pqj%jm@yr#Bp5r)|Waq%APPhqPaM>`P>*ss2F1!5&0Y
z2a#~A&eJ!qw-&f|mVCvZ)jKU%hnPo*A|}v=;e?vWmi^#lkIKT^^g^RIhy%0xQLLMa
z&5aT2T1<L2YmhhVJDq@1b)+MbF6F@rV@2YoGM%c-`w0s}<0YMhU73|Ir<Xz>&DcRy
z+oOQx@kn%<_L$^*I@u|1uHh(v`YC!gv<pBGXezCnUMPK;5b^=LGyhzt`gpFltM5`;
zBqxWe0h>yJ1Z?+0SGi!|Y6O7b^wcu@{ZPlfP`1J~ixpN(Ind(bxlimS7_Hm&PEI@V
z^R`3w??ruOOf1$HdLl+W6*lp6G*ic%(@)XJWSE++(!SQd6V-}5BKH)!k|Q>%H?NF|
zXne6d$&oUZw6Au(x1G!8^@<|njogFZ5g)|K!uJa@A?8l8gHshC_`1IdYqCI#_#+|j
zj^YwD;pc=^Y+q1oV^4mG8Z$0=T^o6?FOEE>NSQWfPu)%>@hR?APHAP>R4Xv$P=snp
zRc_~Hr}m`{F*y#)ZUazam(y`vswW=RuU7w3;+*})`mtqv^9pT$3$RZ`30DKA2lSCC
zlzc=5j}UHKekIKEAm+hh2Tx<ytuZZY(fG!;`OL>K6Up0Ob;WQ-Nu59LwN}Dml8qPY
zpMR~F7-?>s2RNPY@}2OQ<>%MU#PEO1|Gt%zeXAbGOucD9Y3bsB-1)OQ1f*5m%p0E|
zml<Ljzwwxzrl686Wv4e-R8D`O9{aGV*;+$-!`ZuYt^^6}`~LT^%6jeIoJ)$F!dnHZ
zLOJnkMj5HQ*}o^rcL)=}a#L1yE&}U7n=8)CJ=>U4yxsUUdBgd}X<8CoP-Xy(*10TJ
z%7^?^Cf)nACxg`dSI{5_KUpH5`NnxlbkK=6IR?y|Fo{NEV)fqPBI<(p$)xj{VOKzU
zLYmdlI(Bu^K;MDWgGV=racgSVf0aReFlrk5>&dtlkym>_G#`vW0&UM0e7U>%zv)?w
zesu#aK^PXmT6Q@#4jwo(WOK*iZ3%#2%$;5xk{kSLa!2CR>C!6*aZR;cgT0EojN|X_
zR;A?n95O%QCD>G+D|8gzbeAtQ?@i#dasNe6XzGE9_lDMpyK>-72<MTf=66+IotAZ~
zk=9`#O|_t#r+;<re#HLpR!L|D;x6SAi=_Cwc#pd)zVrKPt{#SR`P>9ATqwBL6S(bp
z2|4{j%!4!dzF)|q!7+G~*M@kt<C))1wdrY0_2&YD{vAsu&tfI#D7kl*l8JY(WE$Q2
zreNcAYH^4FUB>wm2er=Uv?t?&OC^?<o6Bz`+!+O#L>|m3k@C@VJVlJfxnFAMm->H6
z?nX!>H@Zqa7luAOK9DWt<1BF0S1<L$kZ}!Fh>#wgEi9^CfFbJy+^49BC*fKj%-e|-
zKN@=wcISAk)&U>q<cg(xmHkj&XPdb__S&iFDLLL!s?K_ZuMMutmMQb@mB4s|a}78R
zos{9Z)`5_lW^ne+%wRu$)7Z^U%k9PS<2TTk9~VUx@La*B9^!YI#m^r-`iP!Xxpyu!
zuVp8w{7?;JITjwP=MUR$VXvlkoQ>EyECmKT|KwN5v7<+Qp_bQY&78_jG%l&Bodbv@
z!=$R<q7uK0NyD)wpgIS!2Z4+=HO&sYHUi(OqA2`}X&<B0!x2O9w+Fbz<PI`Bej~*f
zw<0`o>x#s@mYP~gdbf~E{mddFIWu}?L8C9|jlRh7V@IEEOS1@~OuH(V##%%y?gIt<
zX%?2X?&H(Fi=G1;dEer0nYhts-o1130%r17Uc-}V!OhHjM*#?7hZF608YtNMO`Ey@
zaFtk3G^CZ~Kx<hG&&i7GRp;KhbH$YY2Ddrf%goltke&VE3y)z#_n43L0v|so-nlc1
zou`%~8BXo4Q`)aotT%P-V(z5Tz{}_RtD{#GZ^_?G&L<DC|9V?T&`M4(yYlUEgKfvp
zt!W!RjXt$)-veC>vSjo11s0{kK^Z&xM5%9H>AUz%IBs}nYnyuQyBxBEzgOTXbFN=O
z12i^ec8oEYNsE^-5nvP6>~Sad(RS!Xg7$0Exi-af+nfwZIj$?~2p0vto!kC(eq3I&
z1#~d!{{8EIw%)jduy$gSll$wJ?ns`K{`txs{@=W^dHc>CYZNM#y1cwn-_R6z^KRs$
zClyt-9i3fyg(YrCKZx?x@rf^M>+70&rf#}6dGL^q8;{3*QtDkJD;)1cnj>MFX36Q9
zuq!%w*Uai+*T-$(9hO!VrWTQU#%>yV%jhg>a9*I1v!<HnEZmOj=oqMNuojfF=HM_e
zNJus`f!R1|>X|Lu+b3W5AF#5S^~;!@`)Uzxg(|&qv(@ije_-Tr$bCYvU$v)uj;paR
z47_a2_7U7E*%9h|EmO}8@Q8qjF%gkfJ1Ux~s=9{8ys^G{ka_P1$NO&5>TjUXHGBII
zB_osL$FWzh9%N*UR<{=8IwH0Yfbo--{4xk;W@?8IyNQYtuU}uXwf$gZ6e1~!HT8{_
zlk@TmSy^3Gx}<Ya({O5fX23IE^OAl}-gA_`T|mA2?T`MU?Ov`0HW5RCW{>pct)z8B
zub$Nw^{jL{bdrmOlliiT@(H=4wrOVN6}XF*-d7yM4ewWIA|IP3wAqv{+G3Zji)MBF
z)8#cx-+#bcWPQ4xH=%d4K-tjIvS7+MrAG;sdQ{QyQgESl$%0wd=Y9Ml!rD%n5x8?^
z0WuB`xX&mvvUBOhG+oj()V*fugz)kY4ljI;&B=e>*xY9C>gkRONKDDFbM{D1%PcI$
zj*N~ON53&o>%rCjYNu@(5OQaT@LBna?!(9NLG|9Z@d3J53~Q#g+x>}vojV6!>GVUu
zwq7Wu1^2YFweqy$7uQEwd-}S=`NgkkY4Y=nYr@^^JstR^q|ZtG{rjEd6n)&SY@Feq
z{MK-LM>qMSb5*rR`5kTLj~YvAozZevg*!N2@$-P|`)M24_&M9i*dA3>V3PBJ__(^e
zf+zC(xVpF@AwKd)t!xq2a0vKLD;7Vhz{Jll=V5CH(Nnwhy%P9K{-}eer#nPk+}qn*
z%v(YX;bAX+Rz^lf{LDG=bLT`s4N;`8o2QkJs2lRwCXMe@)Zj=P4@Y-TM}!+cjjEM3
z0_7=x^eA}BpZsh!%KbObb3=-4Hc-q4;VKR;h@Tac5dX`&K?n65?HxRkKim8}8a`I;
zfW{edkIg!NP0H5hCk=O$hs$QKZEeKiE^t@4n<o;`Is4ZH+#L{}2&4nT{U@E@wdLpK
z%@+NmUi~?ZAN~19cWY_=_5H4{f87<N=S45TxbI@{yKekw0+J@s;(Bl-0_9-?zvu<V
z=-6L23-Hs5CbGYgEh|Sy*UfRJT@!ahiaXl=b-O@4)L#)n{b#8FRrs^c0s21wSm@k6
z9FeX+32gp_^t5ubweqmt?2Vh1D;x<F7LF9Rwn94Eh)Vzw{L8U%vhw;#kEV2Pj!48N
zDWr`9+|>#x4l4Yo#Kr(>|C7+4vdvT6-TtrJg7m!P_MhVYCB#2sxw^Zj&3T}rst6DG
z&t?JpV6)XXPh0Wd*4XUercwCo2GWG(qK_xs4GHYbPqMUO`#X7p|ETtlDe7;I^k0+P
z;>JG~bEpTx9q!@j``6Umzt{T5^k}N~*R*I;$v>uZ$r0}I4>Zt@R^^9L2WI_ugAMsE
zj<jiG>t}QFP_eTATgmsSz{PQM25`6XK*G10|HD*#pD=z>pgm!0@V}E+P^L{%{7^6@
zZI#Q>=7K-e`Pl**oy}H$r{V4aw{!GS0C)WEw(rIN;cY*4)=*Df;Xg19o0j2sRDXBZ
ze@OcWqklU{o6V-pk^1l(e=Ps64&l~NZVti^$%D{ZreNS<<?ar*-D>jhFa55ue@4K|
z$_2H_IRyitMSmdiw~POfj*XQk+#ccKtKjJ73HNZba`^+PpV$8(CELGw7Yg7lnvUR3
znxo-p$M5L?=eI@JplA~{zk?MLxJhs~epiI8BM@g>eoq9yqnnKj$`;P=NgEMIH+xWE
zhwyOSr0|CZ{q`|`*Csh}8qZuD|H!hf^1tKd7U`doxn&i8d+(po+tjK5iT+&Eg7Ry@
zZ5^%nwXEE%>^J?F|A8t21Fog@hn{Te*zZQ?w>tPcwfHk-`r$+XKje3We|O`5NdBLu
zPio&~OkM2{GP9{|ziaPbs}wLYf1vPB*C<0nbv1tAt^j>;v;}%ZQymX}pd|cOE-w6D
za1YvSi{uAx4!;K+Xdg}Yko*Wceyg9V_lGY1w-o7r&GxCg(R?x+`2SzLLb}6k0OxFI
z2G0su+f57kugfH(EwTC^^XOl3iuz$Je(26`)aqYKQS^Rjv#N^~68VRw{Z|d!UlRML
z+c*Q@C%9U<xdFrG=*I8u;ArFU8}r5Qh}<-7aF4%pnf_a*>|e?!0N%8?GQawF%l_SU
z`X9O1U>&|CEI-|w|GHD~r;9ca1o$1{e{rV(EOGwm9{*RhMjg1u_Hd6s=-xkFU7(<6
zv>AR&c7B97{?N+*mTLU3&GI&gEk7Pi^nYmAf0YOSic^T{-+IUI0{4Qu{NLfmzm}oW
zQ`gtxSE0qtz#?WVTK+q8fZPn!{B{xb=K%m(knVRk{&zwE|H<Iq@1FB7Zm&RW-QC5J
z<{E>DfSseuzoX(nH-+ejGrZ*m|E=HrU;#<%I4v-+<pg*-fVux?1i{MI0r&uabcO!8
zhRz*j?eab1p$$Si{~E@Q7GC{d3+X`r)<b@p3jLvr|J>oXvbXnu+iwaIEt>g<w*8al
zn*VFVPm3o1p^N|A;kQO0oG)(09RAR*e}|#}-ZR?dPK#vHI{UZv$?syD-XONQX$g=H
z|2u1x|20R<3*iF7um6V3`K?*n>LkA#%GLVsy!C&sg8XTd{vCAVx5Mv<M55r|N1xx@
z5lq@{VEO*1lK0Q$hC2wl>sh(k|6A$<=o`NWt#g~f;oqC5EuPTIfa47IV!nrsX_n2=
z^SkNViaq?nQv7rI1q93O4-NWv&>eMseeIu2+GGd_?r%;XKeo<(1i^mK#edmJ2Cn}9
z0iyum$i~wH;Re>f|1a~(5o!JJsZ%Py(W&1oS$+nT{~ysMTZH4kp~+Kow0HEh`akSF
z2|SeB+tFgF>=h|ZD*KGtF!t;e$yU}f7)zEBW6P2drKA!?p(rgvWi2~pDcMrCgpkUT
zghKS48B3$o?YjNzc0c$2G&Apemgk&jInQ>^v4N<Y>;YUf?eCt9WRj!ii^2ci1qkFD
zegV(lU4L{mRljIqF7D%>YEc4^AXvh*hlMOf!xU==QZ&HQ<Yf0{a?0<;gNYjfCkLSV
zk2>^a?1AUY)e5A&fXOF1z#Ogi;fSUt-jskN{L36(K+pUSqhcG$h9oM+L}S5`h=f_y
z=6|ioas>J7R)l|x_xPs@uBpg9w<F{mqnwkJ`gviNiI&LHE+<64!c?~FmwB4R+$=HR
z?Kg8W#W_f+I$@P%wQ;%vkjeB52uN)gg25p?;2}iS${7bi!jVW`Qlp=%O{O=I+S-b9
zbHO{9IZ92Gnf^&$21E@f+m}MXgeDvN{A{Y|bOyhe8rH!YX9kiMoq*l^)#I<r@5GsC
z5r8NoX8ld2Y??aYfRol42b}%3AU}zeXAJ!2AXk`}jO`|f2ASC8jn8OfstYqyv5DkB
zDUd8U<#i|Kph^gSennQ8#0Q@PQl$Jn85_dqC!f7+AiY`41{lwhoH#qEjuxks7S@I!
z22pdeAy|QYMWDHmz7nvG)*yM$6nHpr5LFz=<RRuP5|go%!A=H<0?1&naD!~ObOTW`
zAhy5ENt}2>1_BB28DL6)k<3cPe+&8_?Ru7;hyYA%%lJG`f2(gfyuBN#0SJ#NF9abf
z0vCX60lQ=zK~AVF*1<slA`eo{u^>EehlR*mIRf9Q0GLx(9Ef~D<Q=dUPB?-9m_sWY
zD|>s064-d+A^=gawX%UIW5Kk8!s?D#TP$QNAWN`~1vF=CMR2gPf^5Z`xDf+rj@DLI
z0+5|JD~Ku<d{M&^fUtG}=mIDbfT%bFB}d%`XYFX^=434Z*=A)8G*ifSyq%c<M62*B
zNVOwafh<)`907a^KxFL99ITuiAv>^E1ZyYIyB5~g2}FE%fM&FSJ?I3gZNXa-vsWA;
zJFymk1b%e(XHGM5;z@=j@F6>VG52I5Wja9_B)h8H13<TcjUFI<_nYKKR+mI>U-dyE
zIl!-9^!Y{7>}2+<6c9mZC>#YvAhi()F}Sc80;T`00h2XB>Kg2@n(l?zD>0t&MXzMg
zl8<yE3t)DBBC9gNPh_nUdm+gl{FhTo1{--t<dpjLu%-ou)K@h8y~1Bm_xFm<p!@F?
zB;$zh6`R2;-zzv97k#h%B!7Lc$PCW=Ucs3>`Mr{4-1@y@U-9wx3V*@j-z)kR?|-lG
z4@w0xLL`yz7vxH$;EBClAXDGoi2x(uTuJ37ACj$i#0AU=XstiSQly3MYxpOge92Q^
zHcf=;b2>7B%p5NUJQsopILwvwpP-oo)*P5?Gs{f@3$zF^IigB{lP8LV*=5QXg~;ow
z5w{Sy{Y71}LNn@4Ca?TeZSrE|^+|B!9klUaN3|-JsG1Q&9uP|)bsWGR7m^k;F}TS>
zQ<aIEWvtA^h<RyJs0qz*>gN|l$g-!TWT#7#Kb@)o=15IVY)Ua!k(Y9E0&SWh%tWv_
zb2BIcg%X9Lh0vx@ELvCuip7EdQKE1mQ8<<a=ghh@8~vQ9J<+JJh>*F6xd;}DLnAOy
zGzNo&Vnu||P*WThe1{8}BTY=E8YQbcv(f3qVkyK_+0Tz=7M+>0DrJT<#{z9^s@4~8
zr>c_jg2X<jluV}5ND(X@aKxw&G5Khg*E6e=xp%jLga_gV+?mSZ<dRFY5=l$27HO{~
zL<?_DaKSoE_wz+NQ*0?Q0mT=o3sQR~*6}GIzkEFtL}Fw_3W*jGf{Mb0g{SI}p3W=?
zm{RI1rkntP_-bb1X*UaKT1W_vo@)2=^O@y|CNjwvK%!^T+<tyERTPjY*!?I*+?D7!
z)!*bgN+xB<*5DuAphnsd2`mCTf+E<{DW$5Wi3dr<2sl&|XW?Xn1!~wAB_<&vb_D<+
ze+jfpH_!lt)B*$npIMdk_FI*S6!QJPNhM~h?mUrY541>NMMxoGa5!3gyB1{P3)r*_
z7~UD!fN&UERNNM8jl&bnVK!DK;5`h1#s~o!GBJjk?abB_X_A0ri3Ey`t;6I*fz%n|
zM8QDAQ*F&C`K_jisWwtz)0EvrLdxfvB)uT3_4V>HtwGHh%UVFg|3^qBHZUg~f4k>@
zABr6(X6aAHuq@sNc%eTN7m3Tsi0d0X{7=#QWw-xY+?s&-`IA_88_q=z<hVM4+(e+7
zfdoas#j+r$>hI!smUd<~@z?U2*o4|F-Txr2Csh_P2pR^~$T<mNatP*;NFs)*nVeYB
z=aD#}rvNGUl^^k+E~2w=&^#v+MbRJ4iEzx%K_VP8Cr+HD`(G?hCn5Nwkw{vU{=VKb
z3rEihiL-S7i;*}9!Jm&rytN9*c(F2_i3eb){)GzrQ&Bk^5WfSVQ*iwGXtgo>7oc@E
zAbtm0r{MVW(K>I2{MpLSEXLfA_zQlruqBuEzk?tDa1C}Ii4z`?KU$p5Vh7BL6X(qk
zaNZ*+yz#&Hs?Q<@fh{?2hWsb|tN#LhbxQsJ{p;hzWc<+rcG5!r7oc@Y{r}x)or2?!
zM(bpB@ux&YCfXuLiz4<19qQiz;}>hQWKhf_buwHwkJR6ja`2UML`Ix5S!@o@y?Lbm
zi;()WOcx?j=V-o6RQ)QD^BctPe<7MO&8a`Zl=&H)IunfFfYkp&bY&W;cx!cV_|{(H
zwxqw8CG-u6KC_v>uP@C4&F@0-^wj(}v?m+0S>|?5UZtPI#j`;3yD&XHHUAAv|AaWq
z&p`1^Fn$+q|8RFcnV_C2TIcoTKPKP$8%yO^3hHkY#`ByCHWdBUT|Qr^NApPiVgtz3
z)(GOnsF8`SKYmS}w`g%v(@34SXlLf%{|rTG-lF}v;&&Fy;x{g>vq1Cvbm2*0{&<-@
z>4TYNZhsr5r;SK*J^goKdK#EN8q;ccJN(beXPRh>95+84&ie)!zZkb<P|PEBGMqP$
z)PG6jhm1I9ve+El6!S>^w;^?=f;y)a_Gch<CK$g<{Qlt^hRHbfCuKkV4AlM$lH0zT
znE%EqJFN-+E9CO5(ENQ)MotapZywu0a$`&*dK#Gj2Bv3vaeteXo(aa^$L&8l4PY9#
ze|Xr3Y#dXhK7B~f-`)E-(bmjnelRfq4KU`BN;<5H^E=y6f!_O7gy0*qInSwo(Ai1f
zM(RxM^tT~(CK$hqQ|FcBFV5-tIYRhrjs15acuHaaZ>Ubwn&8Y;@;5mzW`*YOBl`cj
zlB}j<XKIDD`3L^v=@w>E#SiLE-|YSeG5HVA#hTImpT?!rGe74XnduhhaYy*Idtj$$
zejay!+;I?-xcdhgFVijj1HPN3`(G(D|Iic{a=x2s1pKHiGTp*Iz};kEp1g%$iMz?}
z|FrdP#*v=j$bZW@%g(P_BFDs!N4#eP;fL`!ef0C3yt7kX&*PAEz$=j&=5a{)4d-{v
z^C3wJ&GX?rAAZh#pXbAAb83D)oH|{3=0y;*hCZfN^_ffATpVok>me}`_&?=C5@nm(
z+FH%L#p6qY|EY&+PxmQ8P9ndiOK^kZX9@Q|?UH1QeH}xZ=Z-)A<nVcIab8>ed@aYk
z)%LqiU7Y8`c|M%yLvXnmxlZ#x)fVS<ni&_qP2`wPX>@;x_k^VM+JZZwf3?O>?DH3E
z{KPJQoyI>ovnIb`W=($m%$oednI*{`WpFDxxMzG$msJw$l1s@S36Bu#%yv2EmsaES
z#d<SiASiG_B}&`?g+QRq5lA6nBd7z;28VUTi9-kka5*ahZ)s&mfbcjYU?RK_s0~CI
z0(F20!68s32oerHT*0l25U4T)fk8l^@(>#=!43j7pZv6iaFW9Z)s}$~TnU^Is0&01
z0^JFL5+G1r2q)*%pl8DN&Aj5NlO4$0h5>M=nwcT~zSRn82_iUvi|K#)+C+qVo<`<r
zWZGT%#mOban{Vf7WL}Qo{vx5DL?gr_{UKlH;r>XD1+o4NnfU|fNfYY;gKp|{Zu08C
zddWcw3w?XZ`3&O!WYziH{2#aQ5J4wL+y9(W$UFP>Gy=4fZ>c53nfRYk#~()*zgUKR
z-QBOI4Fvi-@?!$L{|R+WH2ufX$GlN9Vcg6cH6*=N;&X)Zv*A0xwoFmS{Muqi_)HgH
z3;X}6*70?B^J~jDXJ9&e<2O1(h}<%7#C+v9BTmczP!aju{GX(Ze3h+2cD5_IsqrHc
z<{NFzY~}~|*L?$wZ}QztXXg%_gQFGRP7Lx7x7m?F@rQ(ZNO+nR5wq;OnKM%7uWtGE
ze(qV3`VaQM&e%)xGY-1_tw+QBmpFBnT~>4E)R__AABlkx>Fo0f-!r{izf1i7=?6|s
z<qOPQ3g`2qZE)YDsA)y|-*Q~yJPzlxcnP?vqiF#(%w*;t)N5x=VEiT%%+zYpu>V(k
zFG;gP+NUsm)(KH4d^uVFn|%rxa_s(^E`@(tm$MrQvy$eV6p#7*==uEUIave$&m`PD
zAI?ZFBUv8PQs|F%xJ-|NT&B;<QpKrc!|&!1&TIU(lXo)BN=9F5Bi~3yotLFJ2fJA(
zaGnqUrF=-D?BBIdVWy|-OD6mIR>p7H8}WNKD*XDX{gs;|X4m4T9nv#BsB@A%Kf^me
z6O1p(`x|S>PmiPh;f`D)BIe_0^A|>fWE_&e?;A0>nM(f;dIrBCFy=XRPN(rt&*n@&
z#BUS7Gr{;>Nd3d_&6r2(M7(Tfi1xpN)R`K}Z{yUNVEisl{j(IM`TceC`|JK{f8959
zCDM5&|E8OM@z%3I^Si{rG%$a(H++8G{x7fF(=<BM?3qJ$(*&K&Z07G;PqRWZ&%X9J
zNls0i<B!-<^NacXV*a-m^Z7u_pL8%ANgfhYt3=6#?Uz#Wr^Y;H_ma<(&7VXz^EmwT
z^HoSp{(EAqzclzbJFQMzA~U@_^UkJUcuDKTvLcJMn7UwBLeSOLUVFQRm?B8;b)0@W
z`Ec8ItOd?d3N>A7@&PG@Xre87oTI6Ol|2D$`6Jezc&I6_C`Ah@iGg-(aRlsCyX5CY
z{jKjaa+dy9&56SE_2)2YWL4(pk8CpL@O6Iv<}hhwRp#f9Y%=EXb$<TlFll5}=I4)W
zGUo7=Rzlj<9vrxbBS3I=W|EvPoYGRXwBV~F!2yT0C4R%3?4bn*w?g0$;x`Eih#<(6
z1e?CVCpfMMCyz4)w?#n&RdIF}1WO1SBP<M&l9~jCcCs1QhI?yD?KWoiNbRnYHkRyJ
zYRt-$*(CF<XOrX7XS^~Fr4VJ<@R;R#-Yuzw%x)R@ODmU9c?}koy&>%iFZO>LRvJ5b
zT6(PYtf8dFAia~uE}GbG1^!Kj)u(gEKMcLWbCy6JEzXH5ioUelZj(Y*%jE^;(rh2N
z{XX4Y;yO0obNXy`Z;TC##y!ID_<JM2oKG1aJM^~nTtsv2@A^nHjDO!h`hK?v*U<Qw
zrCqM=Kpl5^$Sp<7b%Nyztxk(xUp#d^L9H%&Fw`Mu++Jl-aPa=&HhQPq<NU#6jjg&G
z%3Xr$>y<BvM`~vAY&f}0CfKZ>bDuknBJCyUNnS1GhL6!k{mml0k^api9i<<R8(`kY
zXpN`U4)tsaPLV5<$ja3{;iGNG+H_Tw9u=HuGM-_i%lEz{<S0VR%9^iwDP_m5OCCC!
zOPX^Yxi4<~z;(FYsrSSI<r6fE>`$&JzsPvZ@9N>Fie|@{m-7xK+b^JEI*7n7ieX>O
zo~=4;wntwtFQET4rI^@G%W!FqhWd|vflzI)7#5E=8iy~$#IgvZnlRCe17KUb1t^o2
zK4m~Gr*Ma=Q(WzD#8&w1FdC|?yKqav1JUnxE^e?!PDoeyeG0zM<?c$_8#hJvylLd=
z_Pn=fzttF%C${#ee~$1~;e)!1^5r2Nl&H5U<7ZXd&F&w0!_8G4bRg_#|Kq1Ph($`t
z2Q@fGbd|1w`;J|hc0TC%X8k1+7FKV=8xx%T;uejtFXuNr;c3BAbEDc{|LUr>^(@19
z?%h#)-rZ@xac<Gjj<D!Fh_9aREq3^2yQ2$O9waPBQjW<UzFO_O?9sDjCGG0oH$}y3
z-zFS53@sKgxwL{Wk@x-ixb<)`^KB^98NSZt(TjB>Uog_WsIrtf^5Vi`%j0~OYj62`
z(J39k(`e%pD&D?8N8gWZ#rCpjRxDecdV?lk?+nYHjh$@GI!tKj-HSQ5m~*b4GmLxx
zq@<qp9(_CA6)H79j_@OgKV&QII(fOs;N+>@V`|#;dADonR`MKOc&U%BhtWtbke0h`
z^HI+G7F4S+b^|p?r`XYc=e<jh((s*y=!Y#;u)5TI#Jr<sgSN6y;tjc`a)+`T1)<pI
zvCBdp+x4T0%^no9J>ML(W~{u=XjhDNwKGqb{^|=zJLVE9kwpcn)~9!8tT-cn=PDwl
zFyTr1@|0UflxeZObY~OY3R>^Cwj0&Hb`oMv<g%j4eXf(c@UCdhJ)J8zI#$#N)077#
zuiqmUARN4!(b~MU>{z{F@KE@x6!sJ!m5|Z{nz-`*_6*HX-bb$_h0l(`PSU<Js@qXe
zQzofBLa!HjH=d8ZYKwYo_yRGSCx-|uiM_Jo*E&bl+75C)&d~I4j2hXjOmo3R*<z=e
zyfSP}ONrTwaN8{_wWD7J-VDsUvG=TJ!d=^i&rjog>D9Xuu)9JP!aTPZkDsnsT@X*#
zJ!Eq2cD(ThchjgaQ}M;U?HzA+e(+j&RwXjB)^97H^*F`BHOyzvJqo8VLmVol5@~;S
z{EmHWspg7hJc~6OMRKG?a@&seJL|ucxM%f2DZ%aBLAq^bMbY`kn``<s)arvq>ReRc
z*1}!RsPGvo3)DB;jP6u3su<VtwYzg7==Es<$6LN1!`3^PFOTbqr)H*U5FZkI;I|=_
zpZRq%Erxw(!CgSePAc7dR0J|YojZ0^8yJRJyBte?0}uBxPsy%KdG^S|4!+JJ-M=Bn
zc0*6)Yw>`U7g#bh4#>5_-W;Ojw>IN2X;8Eb8**zHTJ3w`^$-<7JpSV4OGzGPQZZ#*
zMlWPpo?Lfgvs}3L_6GQwjrb4FHe0t9*b=rl9Tni9v=>QPwi(e!xR7>Ku7NpZeV53q
zv-!t1oN_&!D#=qdw!r7%6NZrtbE%46vHo>dzJY30A`#*1(G5-n>8f_y*qAe-#~xJ+
z5BV<+e-Ke|ykH4-+4%$e1GL3T)!JU~w7bQ9`zl}DN85#aJGZ2|u3Q(QGi(@sw)Z7o
zUu(nKyjLR;JxnW#wv@4(6=zdy^|EN*Tq%BhrEZ?|I=vm}HU0yZtW5T5)PigEWS{mQ
zyc;&!O?zMGs$Nfxmv4d1@ny`Fi_;%tkNF-fUxM?{K5V~BJg95;aA{=f`bb0pZ+c*>
z0XzzqP9JY%C57Fv%~a>^7*&kKN%5XdE&7y6>v>Q5H9>W~_AM>XvNWiXL<B3Q>S@2f
zm{!TJrPHLej8n8d_EA)tjh@Cyr4)YGbdE~l8$#Ajv6pL0@5n)`DwPFtOg{)GrCmus
zI^OAK$5DF&c~9eIg<XS<22JOqdvOpG^JQj-YU4I-L0>M}w6WJ&O&XdUw}T%p(2^{A
zXGguo!!l8)g`0AvBeoQn^P5Vpu9I1eGeek&tVFw)6~5;ZN1Jn8T9UKYLtE<{UkLvv
zJ?Rv6frpVvnS6b!wz2F|9c#yp-l}$cs+Ar%l$9%Sn-hELAhJ?9P31BEF@=q~m!RVy
zHtlRJpXWyJ492}_>vK<=mo-of-}m8K{89Pl2|vzP_N%sx+}?DiSonn#@?~4)xUJpp
zcRuSmIu<AEW;VE2n&X7R5+53}8wn3zMQZEkauQIDtvUuZJ0Ei6PB=g1lzWbEVx?TS
zEI`EP-~qc#F1e;Wn#bl0=W660Q3~&piVcW4x~j`pIV@(qm~(9EmHItVs~zh0(H<4W
zgo$cMm}frK;443D`S@slfR18VE?bdNXui3XRalUCX_W`wwscK)>OCXrOlEF0Z+z-Y
z^EHpzHJ#4Bh_1Wx;zTv}WiLfgD{?qA`rXHr%DB=L9p~ybJu0F7?QfKCr+bAtJUWHk
z;%2Q8dERBEzh1<}%$A6;lf|?Wu^C5)AMuTp+FC^AdE7PBYbcX_e6_}YqyMP~Az`np
zAD&mLNKED4zoLucIiLJl?oP$*8(~j#;M=S($m&DO()yyzVi=t@(gU#7rcl$E<6f|p
zA65&Uw9+W(YhzgVh*ON+nM)0lqkY&04y|X~xcv3DHfqGlZPMnSdRIZ##B+CTS#28i
zv7%;M+vW!WoYCy}RZc&@R${trqeXh6!}jrO`qyq58nSC@hc@Xq^Iq!IqZ@8`xY3&t
zb9BM|^83`cBrw)37ay{o8lsS;>e=~J)6bu`V2|xfmzDS}rheJ^8PRb9P{gih1(9VI
zw;Jip4;L=@$Xi|gA|KtRvV?7Cio+q%!u3}?ZY5omRX^Un$3$Q}_V}^j<cilP)M*Ff
zO}lS*q|*&0t}#7%Zo^`H3Um1|n^ShG>KTFSSJvGNej0VUxJYf2v1#XCm!|vA9zD^d
zlze|Z?9Od*J7JbvYtvspJm}37A39um;$4%QyJ}~$jPFXR#3f^5``-zCQr~EJN8AmQ
zIwrwLEvsjgun$vSU_qgOUxP6v^=ww+ffu`9w3VNMy6~Cpyu@SGumX8nJC=QMad7Bn
zZ3q3OMFZCu&PQDDiCmA8e`h1JqMg6!+|hl9U$F6^x<cAo*o*Ew@V|DI!(?~t8~)`|
zZXw*!Cc0|l`E}I`_{5<a504KG<kg`LvyMG=61;miGpxGs<zUg38?0wftQ<SvPQRi4
zOb`buc)(Fkjr(!FYRRSIn2Pv?otn5+v5(GQ%ax#u%iX4=`Vtm>JqsO9bqlYkS)k>*
z)4U+ot#+ukWSNxul4unz^xCK^FZPGooQx_|+<wE@nk}xVNx~Us7`}(=t(Zw4U7##Y
zK8I3CZ|}1$v>P6(G9SyWzb7r}|H+6QB8q)Z8-V1$)*rzf)lwm{hJ|%Nz}#N*x#BXu
z%lDc>sG}QCstIRYAK|4fStx!Hmev~@9<ywhE&it00y)LLvi*_N!O005BsgwAWx$3$
z&QO&Ym$5=D?taCzveZuA?Q(jr!l*A#QUXVY`HAR{%XdK{H-yJX+b+^x;O?d5+wFgd
zQnquPswJL1MO20A12*x<T1?(~+PgP;15Ivlwx=_D9=Ue7=-JzC8$uZevz}S_?Y(=U
zzpOvRPh%tt(GYO3%4b16$H4mBH$_p{nz|c{^PA~f_R8F=SVgn@i8*hBo3mN;74_~5
zVqwht_b_a(pwuwln7P`_lHyqF;kDM0?ndt_UkAO9l@uwXI)Bb|LvrNV;9VnE6S7Z+
zcUHM`DY9O%8RMY6`T_me5^d;qiEXnxrrQU;`M5OCmOP3_dl;!+sxR6}=qniI@OiOK
z?Z_>;pd<rb<7zq!zGK?iX~G6v&O*n`*;p&8ooTFh^VkO{Ku=h#7wFiCmc3yRe}o|*
z@N7c`4}sb(f3<;}M90bQy=)CeIGb$hJqoXsOpxV=j2}PMTHN4FUp%<u!;02TJiN<_
zb#cc-Wygkuolfkl6J<Ziu99*5#ewwHw|XjPczjM8OLgrGKN(VX)?(GI<iyw|>H-6K
z%Pd@rb+as(xOT-|ms#a>G(%)}lF0V5+*tqJY426hRW5-WIU4o_>iT&x$g5Gw>}ur2
z5ZL!HtiQ(FORvr;e$h;ia3x15Ed8{=Ho4m?$NDcu1+2sh6@{`<*&J!SS9&9%>gK!k
z(3Ltp(!$TWu6;7}J+F6A_~E|S?~3?#CV7sskFuqUrsEzbwh=Uyi&t<Z?v1WbJi0>T
ztf;Bjny6&SO(JFY8!b|B0o($d%CH+8`h6c_T%IbX3p`H0u6A0g0L^vo%|+&A6wMJj
zqj%QB%09Im6ivw9ouBj}Tl*RE0JpiD^gFTls27$nI<rnkLqwu6{g&u<TJ}A(q9YuG
z-j+%@@10&@M*|K~afUoVttl%0*u&0jq`K0PF|O_fb%pMWyEnJPuO9Oc&}(HZ+U_JB
zltJg*IrMDM_&tkjoOXTaO>`^QE6A9JZJ5)@1!dj%=QmP|Vh`8o0f`pEy3ym}anDO4
z+FsZEA88C4KD)*cX3&exSX0nn@H)}B@!_iea19QR)we}ApE>kiDjLsaAF^<vdJ?<E
z<rf7*&m6E8g8KF933>O7sIL{i;~W)#%zy664sk}`h{M-fD{G(K>~RhXSd&<_U1^iY
z^&Y8KF6pj~6+x)=QAdK8q|>^+u$RAWw&`|Wuok!18~H4kH}ak0&Mh^vPAa)9mx|cv
zw$buAYgR52YTKmcV!|$%VG*{-Qo-;czj8J*L!LV`@Vv42n@u>|;k~OP>J17-k2%Ym
z4~&o6B3=6n8j6$U(O7HLrl%6k^^C3S*Jn}0dC@PnW4F0OQ1X{m7*dU)K4V{y%zZwG
z)8)DeL-@H!wp!){dEGi#OStuOZNYmRnu}v%77F?u)!w&x-;naXek{DMR7gO^fLC{i
zwh&*PWXJu;1ex4^Y+|=VV8U4ne}C7Z#sNAyPx-~Meu9p7<8T`}^vcgkLaCO7s>_a~
zcz&qQ!v>jbd!5IBzoe;Ob+7IN%4N{XrFO41Rvt;w8d@3@#aMm#pm*7m@K!|k6Z$7f
z#~e~GV>gK8rJb}pV;fb;PIr-S{~Nls+~;n+JJnuqw8uvdyJYZ$(fzVzEvL4RxfZ-`
zc{;`~fD}6BfIk08gzxR6*UR`g+SDrE9a<Ey``F#=v;|vCGgP=PaKj#45Z!TAY}cdX
zm{^&8mkszM0#@oWHkrlg6`ebK$Uc6JNnY;LTNlpS()Xy{uJYbJ5E^7`dik-QN#l#F
zp?soYrwrIyc?B;Ut}!Tuq|k`ppbOvVi1#{S&V{Y2Y13m3$-MozB#mc#Jv?FmDW&%(
zZq|G4-FL98pQaG+^Z1d71>YrG(FNi1;~~+^=q}af$U^;VMZ(<M`=2Bkg`aeB)`>_7
z$#TCQA=Yp`wM=6C)26d4>pG8I)39$XuCFRM8?yePgR-{Orj)U~CSg{eU`Ns9nAkTA
zlo#*jc6JI?vsC(cv4;nx-5YGSdFr%Z+TXY^%V>mSyNUfS3rq)1WC}J+vc=OVH8}fa
zPTKyp4O%x-pWfBa4Z0A&e2bAZ+ntCf>78!UjFs9*nn<2CPqx)8P7C~4O_^06!{MN!
z<%((@;(2BNlrNFFK0K595_N>#-p0Ber7c%6R$1Cy#>4JQg&v7!FOHR4tiH#^^n&Gk
zV@a)N<AEJ$%>c(6`PUH|*Y(~r#3LVhS+C=X*v`qCs?Qh$tzdg!C9~AzZRDPYYjFoE
zb&j3SFSD@8WsHw984<n2!b@%XT%Fh2!Ifj3RF;EVjKHQxy_!Q0h7Rt1F8`wV0p?lm
zY4(#>&+K5;gFSh$<P6{GwVyiPfBc9aerLUl&h}A9ZfZxcjTIZ#=+*m*2hq7>OVT+<
zw=$HzVuD4#{>X3GDKBgs=jbV!X92fVr)tV#@7?x5Kkt?J<stp0rtY}~iwEyURUc=l
z%e+%jGW<GcN5tC_1<zBqb>bTqLUkq3J@@3Y8~eHS<IE26GH6TtcfCzxEh&=FXJX`w
zzUuZ?_Vzj1hw<<4FRk!DyA7**&D~PjChG9eG1wC9d3d7XRlIX=PhfJX_p=_@ss^T`
z)YyoU?R7WQD9#e%yraZ?*14mN!?tbe^g=%5R;6VuNC+J)gw?{L4%u3ze)N`Hb}KBB
zeqCWj<)Oirs-l3F^Qg6SbhyD=;!xxOjZsnP**(qm5ZN;;?k8eHoSuGKkYK1UKbubt
zP3BW0UKRuqB!=}Mf^s0_hXYYR$b{QX6Cya3rAB;&0=2*+HJq6hRu=CHF#x~e5Ft?^
z7*a$Sf<Yr-7*U83u`LJC2n0MMm6x%z!xNiC6H99X7u~dPkMxA-+cN++En!G+5pdCo
zPjK}Tu`~jKnEXUeeojDvKutbJO+6=$36w`oy`TJzAo>r9?My&}K!{A1n|zL%dOq1d
zYU=&uZ^R@#$VqsR6a5-Z4}t{vl!J0&GD#zb2x>W*5D%sV9!e`aYv8nmpao^|4rVw9
z17cfnBSB?B6+t-zVuNrnZJIb!0+>^Ts0a*&MnFX1NSG)B1wmpEFf<Yk5ka6~7-0;=
zNKgg@Re>{93nHk5*TzEx<sm!{ICH3zE!YOfOLT#fO)+UYe<E~9xG0P`m%>6qFt{jy
z4q#shjf4mbi@@OM??8uyBSf$cx>n8z5x6iMB_fR6@g=Mzl+Oz5Y|{pZARvg((}omA
z!jJ&DXi+o_1s8&#MNlwdI0D?~B@BL}Ax5;*2qEEvgbo=A2|*&t1Z7N#;c7uGK^;vM
z;vbJCfne__CMalzw}RmvECdm77$8xMAVLI<fx?kUgdj=~juF&?W56Y2ykNq~C!IvI
z|70L=6b6D82FK+JBOoX+AS4C}1|$qag8|NQAQn~xOD7YUDc)8PZ)fL9`sXXSiM%vt
zxRDr`2%rQs3JnuNibBwULWI!55Hwnt^!uN~O(JlpCC*-O;+wq#&fWoUigR?dva|SV
zEJQgl*Rh~Q5HMj3fE|t|eF_PoU?M_bYzSePFyPHOj)jbvP$*HjAlBX<fdq^J7ZO2#
z2|r1`%@uxxun<XBOu>%;+=&6SgeG#YFc8xJm>K>W{(nMC2n-yCK_CI`BVi&y$N=z#
zfuNom&MyET@%0ipDb;^K7NOB1B+^HTpkX4S7|0a)pfJLKj8H#e`oAfQ5MR;JKjDKZ
zI+_~D6dj>NM1f5D8GP_J2J%lZGEGoml^|jS2{0iHREMd7{d0=?@qv&uk~wqs6lb7-
zb|54KK?wm8!$44IG*A|P0$~XOv<mLS+2PIbFcb!f5k(1oNl_$C>L0@o2iJ27i$G8)
z6zB(nLI8CFiJa>IOwAl&rr>}fn5m_p0|*N{m<fV$AP}<vPgL*b3Otw`m=FfYBcNL#
zL`0|g8i^5tAy7X>$3UQk1+jK^P7XL{sGXCk4HN-Kh$2LQ%0c|{B`Q$l!SNZo=MPA!
zDdwIU%@lJZMS!#wo-;1U7CCV9>fag-Q5BynZl*XKDU2ZD2898P0o3KG(ftGt|9hiB
z5DkdAjs}g!0QN=!V?YRo7WqtQa1me&2+f5}fG%r~wFfsb!0d2NAUzW>jOoM^K~XqD
z2x{*}z&n^)La}yGD_c+wOR#drK^+NhHaMuIm4zh~JTu2T*kbKWaZocH&K?Tx(si%{
znitf@%ESTd;0CidGyjS}{|UQI5$NP3OwlJoRFpI~bHM0tnuLEs2~%{9z@Pvn0DA@n
zR8wGcPEN`nGYKR+damq%1cMg>whs{KFkw+NurY*TaEuV}DF9K0p0hqX>#`v%49vza
z>6m2w&lUP96pEr?7$IQL0p%RnM?mX`Vc^1Z;pHz3Q#3|M2pAJY;~I!QVDFO5>#v4D
z^fSzL2q+9NqJd2Zv?n-_O28a~gP%f3F#B`1NXZroSJHoiW)4_$!dGyUuL|-+2MKV7
z5M4Tgx_bJM2^oQgU{G+FD6lr|oNR1>4~onKLiFEE4SSaA!oV;B&J{2+VG)s8sw2rI
z*f;A73lRxY^y~U0r`cb24`vY{4fAz%1d?2Ke6xEYqOSm~3tx{O5ZVt6UkC|&48ZgC
zb$#GT`H}hv3=BET<O5yl2Wq1+qA(Od>es!ah>o(qoISLVC=9rszN(HPw-~-TcW9(2
zFxF<N4ivl}=p2O+0d9|3Fb~Y5AE_<`EOQ{)z5*TrM8l8NM~c8ifr<Q8eIOlwpgK|%
zLlUH4SO1}zM*^}(0CVc=>g3k{H>VyUf`QF8bqM$mF&#n}PIB>m)jI<9L#RUlnIQr^
ztY6nB=USpy+y?KU1(H>9kO}89@Ng4A9;zK=!uKqw;7U-^B7l@s;F2UtCgAG@j&zcf
zR*~pQ2bVdM1ePM0D&T#Y{0^L`6Fz1^PzVM5yOS@7@=au_1~8Eja3U^<j&mUL)J^sf
z-NZ!yuZk@K0(?4CueJkMm=?&UH2@BHc}0kzHqMnmECd|;ax*0&s4~G*pQYqybbWqC
z#=8(fdkb@W2MZI3!SvV#<#EndrZ`O{*%{&%c*iE1Hy|3Nq!@t_L=>>10Yn6NF^zyT
z2N<dbU?~C132@dMfdz0X*B4FJ<!A4?oQgbgI*H~DT<-fhz!=cObP6!!a}Kbr7T$&#
z$C}&rWJo|srVWY+bI9;jE=j%N{kHjZf?WfS9ZD!Yx+uBIps{M`b<^Wxhf}H(Omi}Z
z>=VzM>3vcyF3Ll&r0W^RAlAb|)Oj>=1x}Vwsck&@eiI(COXw~0aJE}?=dfhO+2#<D
z;S=&|%VkcQ^~5iSZmZxIZ@ib0oRF|}Id#xU!o`NY>*;J4A6>3TlM*hU=483_VBKno
zyzTE)A2w?}$z|<QG;8WRi$%#CKM?eI^kwXhcaV3dqBH|obM9y5+iXz1Q^p+a`Kc>X
zdL3U{aZaaICtadfZEvUFh|yr?ZT~}j3O=-*Iv0<#&|Q<cS00{C5qWrPKjXIGTdYi(
zELFw#IHfdR)OeR<Ki!-6KxivtsP%G=l^X&}Xeciq;#{@;@ymB~0ju9NZmNItvPy|X
z>18h4Jq=GT^c(B<v<WZ^t<eKqnGKg6Bu|zN%ct_QYIZ-yU9v~V1v;F)7_TtC`{RYk
zCDAp|J02$T%daWXj8TbQUH6dp-2#Q<`xIEI3&kYkHw-Z?HRB#G+_7CT5vd0&>_F-y
zDo8{m^c6XDKv&*wakz)OrQI9j{>bcJQ18vYe6*KwO5W-2BR5hOEj`j+O(_6-6`=3)
z=4Rc(#R8jy=_tn82e;&LtQ8BEOdCDBMF3Xd!`M$_SrE4-F_zytRaBQsP3*&(pru@f
z9zh2c43Q>M0anV~>(ygZ8#cZ`B>1XV-Y~n>64&l_H!^@KcjKy^7aWfGB$pJ99LSYs
zxQNdaE$n-#0;Sj|({{M+4F~(S<=&<P;<2yZzAe?lH|A{@^NI=ZqG(2M%&eAy_+~E+
z%Jp8+_wZo<>s7T&`HfdxcY5Zrva%36)bCoCy;z=V?6BNT3(ly`JkOd>zh2k`ky@Cu
zaJc&*ePZ=+qyGu|WYdPx(@782yl5?#+9_|#{}>t0+kaoZuWud8N494s9jIdI9PKkX
z1F7!j(FtrR{^C#9_F68rq!izAF4q@#!}Y}$E9V&W+p4>-s!lvI_T$)UtzxkHV!#ru
z>?D`1RLmDniPg{#Equk9L6NB51oPUJwXY{0Pa#Rg6=Ko6Eb?fPjG*n+5ew#hC+iap
zyj^eZp&Gt9+7=v}X(JGPuG4tYQBRY*O_x>ER^m(YC1McfdDo*wI+c>cRblanvX3Kf
zJvLnNIAlbT%?Bz+&Jea)VvZm(FeP??Y$Aolh+I9jSrAXm>Ic7x8)6}XdNNz&G?e*g
z9zhQZ*kCOjA?S&cvJ)Xl;2;5F28sefCEy>%09TR__y*!aGLELiC?rwE7L>!<E8~C>
zO8_Dl1p;PZha&ME6aj1r!HHWaW$Y|$Z~#O>EdtJV2Lyz+1a&9tpn)%*1RU|8W}y9&
z#MI0{Cb_2Zy-f<hp*^dB^3*i4Zi6#;)F#!rQ(vS{48<HUr17t@yF!1`YvZY*suRLj
zEd|*0UX<kKhOW81y@1U|LOi*xK9njqN~B6QVy$8+G+?KVs`X;U`fJzgjgPZe?YPpk
zS|g^r{smoB0wX&CUKW^DdB5p+M&<LmE!{e`3h!OFj~G19U04XsQCfFIy8eM_N>Oh8
z9-TWCFQ1)BIy4%U;Gk4JuD;}Mz7CzwJ@y7gHczTpgL2Uo94Te09TwKQ9_3Vfqg|6y
zQaX>{cz2@Sa+xNLi()G;pZ-bwvvaEhbS}L2f7r>8%T$kXr#+0<{b;sH{}MesE8;lI
z*|fyqmyZ@$SkW@QH&e~jJH0}-n@Vuw&HIsw_?Apl_iKzr6v20H6e!!g);td#q^Li)
zGEeJ_;Hxccdxm{GjcV<hWtaw%LZTR>FKvphA}C5pw1%A8e$pMy@hNIw%9%}m{d&lU
z$)&gARbipL>f&uJ?WhkryOyj>NJZP$xv<_3jz#Y}c2PL>4Xq32!FX{0ORLgxit&&R
z;aSL*wDwMEpr6ThI+i#=%YbYY?7(gm;0*!vWCm!<ir@xSh6u`H9dRUG;9Dd;L5gNr
zaHBAwen+q{60dG2uIYe;wedQ3R-{cX2teZ#3;pNCl3c-?6*BUL^9Mlrf*lbgjs<4c
zP5{!RZUS1=a&rU>qGD%`Cn+J|=5#Bt)y56NBLnv9;CLZ|>cj{)aUyu8CIURva<aF#
z!Pyd-4o+l%&sqj>s)0wu83sc~f$<}W?rf4`0Mudw5WfW}24sIgfk2Wy20w)+e;NUW
z1AGOMXwo0}NvtnI^eTfu60s1_y@+o`fcu2l3tEKu3QRY61qKVgk}80=A|Ti#4C)v`
zfV~cy1TYE-5%o3TVuuJ3OA*^K0{?=qXz-NioInE{O??7!GyyeujwCe%vLT2?MM*zJ
zfZQP#B@GOG8UgziyhZ^iP~aO1sCA^4z$Z~)0R;+9tPOt7P?yMU!|7xLa;*P_x&-1>
zh|l8av%2J|OQXwD8<v;`&DN;qyI5xKcg8YCQ6W|_NKj)XqqdpV$_p}S=9laDVQm>?
zju&#CUoM+WU6SK_=#aNJ5B05#nCBd(CPf?ZWlv3LcfSxe&k2A3P$cP2?z7i5qMhTN
z=RS=;me@076Ik5#DuO+{@lCR!Eo=vCZ?bc!Ut;ZUQwG+T>CF$DxpldooH=uAU-#wn
zsi%x&Z$|gkT}sw9l6p{ftSaJOLgjN2>xbjT9j_`*SFQ>xI~FT_foIoi%7<ND3Kgkg
zC+Nj;^rNkFKE<S-ix5BZT;om+@><9~)tfc!9Ainbc*D%MTMI+?u@BVZjgn>d7w*fB
zWU@Mcms?w%b@j`2tSuV`X^i5z2e_npDCIs5l(-+N+ugmIZ>TavMdI%IeVX{2QHO(4
zx8rx7m%o-?TWlmiv!W|OFZS{(zLi~WTQA8pK1m3<pY>#kziYy~jlvhYJ~ppDnE1#r
z-MrSzu{9z)&cEozlOpR&SWU@_JO)Ynn>CM%r3ndKU3uP+aP2j>1dscLd~9CvQJPC!
zlCtS;9EZu_8l~0SPP7?q*$j<I;a&S$at~rqYH&TpE7Ts*!Nr4{QcvnDB*Z=0-o@j&
zM{-ao^+bcb?WJWra`z7*<D50PYV7WxLdFhhj`BF}k;cjBX+Al1{MH2<Uh(@;4lT?z
zr5Ds+S@%V&8RBF0uSUDBPkvDz*kb)Iz+CW*k(}VgpqCsY??v?;RbRN4sYH(sKlQtv
zbbO6YY``1W+;{J?Jm2}hU@xzz+){Jr(qL!i$eUibbR+mq27cFM;|`yXi$CSqKazc0
zkfYG>RGqSSQ#!SKpJLyrhfPFTyY$mVt-GTsskX(hwWO%^v4y=A+bgs#7|YJp$+UC9
zmaX~_c5KqoKHUm;k2F?wxTtUVste0sGx5`(_TMd5xo5#Sj_a+@_O_q8pG}k2vaDL$
zO`lcFs?KV3L2toh|6}ZGo-16Ac*%{vbzYLy))TA7nZ`48-^j!Hwrk@;kyU(s?=QJV
zjd*$OmyzqK43F5hS@LnY{~bzJla4|;S`H326L{ga#S5Vh&Ctd(ey8FHJMY(DA6YZ-
zia?vb;FS3XiuPqylxK`jm%R1g%h5HyFxttYjg5g{(pR!jj*~@0<0RxNr^M=4?2i1m
z4{BuAbsQJT7p3sTh*B_7Bt)V1z14p+nAlq+>*U!J9{0%h+A)IDwblLSdDC|6dezLw
z*RarXlLt48!^bS|yURi;uNJ(lPCOY`eXIJ`wR2po2T?F%A8NY{n?(=&UAyl`4|f-9
zHJ-ueRX*~d;%in~ReL&gXD9f7ZITb{%DbQyeD53edv<bdqFS4(F0a9r?%nGhNF9pz
z=Y%ffga{9B*EBqJGpMxLc&TpcV!9ZJ>7DwQf$tY)ELycxJVGquTDishOC1;6Hwzm&
zzP=%7$#X@3H*vF5!t0fm2i(OQa}S1JR8)K`=2L!cm51{Rn7MJNrLV?zT@N!8mTt<+
zVSOBCxu+8wJxe=b%QiP(El-ZLu!g-bR1w?)_4cql8oxY#nT?|~ZfOZ~mPwYJ#?rJR
zzLK?_QH9NM&9~_YYPI2y)>hKpOKgEN_rKZT+QIPVo}5%;i4@i$sVav%8zqTFb@#!W
z>{1P~j4mXlDBG(Js(#vGtHq|xrX6p?30=^1Z>_CSr}e0#QgG;{P+3IHChg)=4=UVG
zA2Jry^=lA|dEJa}8+L`&Z0>1o;JJGkWAFlYC7u6?zb(tFF5h(}w@kMcpWDWx#;Yb8
zvg5>w)9z!{k)wUmgH<_$ESDu|A}g_YtmG1jWX~;>*P0qwJQvmXvYGLJVy|Dr>TD;y
z-A^%)>W*Xa@$#;e?o&3~Wo%V14W#Ukw9bLn`IufzPAU<v9IQNWCDNiN)6DY7rx!gp
zmfd_%PtSH}>oM`wtFnuG^XN7|vFpeVksEuz@9e?^C#Sk>!*vVqx4*o2MR|kxqXc7$
zsKHA5jrRI>&vYwFBsY5oneZFs-{ur`<}h>NbG0dazB8{%%iJ0}5-(yNviXL6QR?QU
z^&P5#5^4^rM_*^4VXI#zyX+a+CY0Kg&|R7!>*EMN^iHwwf{d+mOMO1Km|&g2M{dX5
zc8$~fI0s$t(_rWZ86W4Yuyc}4ML>EP?04rN3Wqlb7aGgpH&hSX4jA?*WcaiklR3SY
z16ib?%3O1AC;y4^BdAt|{FbHVOoH(c?%Sr<CEw=xF8#pZxo_X0)*YMa%M15y7Lk)P
zR9p1+v4G$yZ|;%6v)qO^wfoqY-IDXox{4G0;JWWE>df^_gs|9CVU0eCWWEF@z7_}V
z^!=IduIM#!J;%6k2}MA$6)1&gd?NOK=P%}cSU_{R_~CQH?ZLa|i;bbq)ca`$cnqnp
z-Bh3y_i25s;qgHXzi06%cZiGUzE$?Svk-1Hg&+8O{X$}&E<U+e;>i-RwMYnmxc5r&
zV?m02g?l-6eO$VK554=rtD^^2J_*0N+<3W7|3|ya`LN5-{-oz)AsZPMxj!ygT}&6N
z@y5d2E^qnZ!__qb??j~!A@h&qpMG@N=oD33Wd0iY4ll`m|GJe()FrFSsQWeVyOj2o
zR_&Iewq$`FOgntMg{JgFFLf`M&ZmldA0F@~KfV#r^@6LFuXmTEZ~j)+doFJ`c0ccB
z65Fr0F&e4Wx2vVlBWpimmlQ5-Nk+#aUW#;0VPvX&{@D+{Z^!(5!v^FBDHZKU6|4CD
zI&VR(X}HyEAE~tN4YUj|sO7I-X_J?XzDP%tR``J>+U$4+=KYeLt~HDMMzuaBp-UKZ
z)t@Pv8AKz_QH+WgmzfPx_qlh!sk0w(Df8xf^Uy3g-w{_^zmDekyInV_yxQBOab-(g
z>j<6Yb*_zt1<@$$>T4I6mTYhz6F+Bn<}uzswJ&JYrSZhg3-3f088^#IAJ2=D=vrOu
z9J~HafVcgo(KEaQlC5+x;r*jZUY&T(TMVmv29aDL?S_n%t9GqYVo=;3x+_E{+C6s9
zjqJ|nEupU&k*2o!I?l?+)fwzo7YO-E4AoxtaF(<)NzA+C(_8c`uHjg>mn=oSA7w;L
z7iGMJfz48vkfJSPOFulwI9!ypS72cPwN|mB&2Z74#iC=318u9b7QH-dy=%!GA5A7>
zSC-2s7YNWW9C-s5+?Jc6-zTA?pxC<zt@vbQ#~mT+rAJFrX;6u~k9(~tidX*RP$*a`
zbuSFT5bg2dWh<uESbEh}i!%o24zcd1Y{IPQqV(*H%e|SHr|Nvx)5AAFWZc_me@k1@
zkun!!eY;1#S-Eim^0jhrnD^vr*FApv*sv+waT8QTXq7|jop%bEC$E&{39MStU!iGs
z_mX@3hAlM7Qm&PwhrDYxiFX}+9aTN#6t+jJZh083y9>%)dMkYKZk^31E;d{eGsisN
zrkli+qr{$e#?Fv0!*HX;<<dhPHy=eXx-UQBue<K$^;64~sRkG_=;NBR7JZ}`ebM@9
zb#iV)$*~TEE48{8t>K!}_dJg#V^;S`^-8_(8n`aK;B=c^R#;b2ve0t{LFc<$k0p$}
z4aC|G#VU!}e&Bu*%IE{Xzs2XdbET(~<Zxsq^`-+Yls6Br50G@`HYgI6U%kaB+jPw_
zWi4NArc*sTSJu<YRl4^ZidNGcc^|#;plzH*U#Sf9lcxO!_j}sLgG8T+?eJM;5u<CZ
ze;NAZ8MKc2X<jNMo{dK8QG31@CeU<6!}W)zCzT8Yd{bMLt{WY_YI@0PlNo!w+7->P
z2MwNq3dYL|?AZm;-UjKE3CwY97nSU@4kixS1!9yM^jE2)>QAsCt9u!h3Yj{TFGD?&
zhr^Qc7W&YJK76@m=(6?d)l2nNBJZD4kWtQKV~*GdFVB2;C|JZ)Zi(`c)$5PNV<CdA
z66b>U&~9GQmU5+feJw{>9<rvqD!){n)i#XP8GVjtxk#wR&E26_jKmgPSqHm1`nvLM
zV@hb9SXm%TLzjf%Xs5!neb<?bdUqYoJ%endePM{|(dmlex<;2@yJmxt^G37^twUc~
z|AWxuPq#+CzO+`ppRkgEX7Fn}XH`y%H{Bc=5xwiwCrj`4x&@av^n09Ock!0u4BD|M
zWb6VI!`W`nqw8bMzpUcA{USVTo~56Q1*HXD-&?V)vlKd8oc9P9NID{|AMsyUHr`v4
zdhBQp{BC}Jy^2XZT3vh1a$kG<$JZHjj+BFx<#huc);nB7()w@gs5_bH+~tq(L+!_A
zycFGJv?__NQ0I!h>PDYq?CYDZOZp`oT1|)hlz#<Z<)tU{3aLvauIiJcel&<#>FoN2
zoC-;W>5m#h?1LW{u`pg*vu+DQE&8R6MAO!OPJMZmtlLKSqM!C;1+Go%xqU6+j=^4o
zPU*U)Zg*9>b86Bm@`hN4dm0zt@!1|tqwJDPb8tJ+?#I1mQMxjH?!v_NLuOXhrh#qP
z3o;r9XqZkxRF>+LCVX7jl*d8Y?-OsfTxQj&o#)wMR~n9tY_;wRPQMURYX%M5;}UA&
zFLFM}aihjHtvlz-OLRKU(Fz8~AK2Y8ZZ^L0{`IZbc7!q0K4&%WF6(<)ePnxkT*`LZ
zY=xEbLn>R!Up`mAt)=SeVW;cUu6zhxX7p-Z*Z#Y~r!1{cTw8jnOs%wR`_+C2+CcT1
z1^dJ<QwN6JXHxEDpq5^}-m}nDM6xv`hb=FXun)UO>I$|j*^awPovOVYcBtFRptT21
zcpf`YYHh()bYbC^4zqpqn~J%3tE3WxR;pSv?|(x3F@NhbNr8^GC26O5J#H^yiGSt&
zYS)X!*4fLSs2MPOeP~I*uD^ZS_3&1<?qkb$E?8A4I$ox48-19-&iD3m1@|t;1JZd0
z6{}a?3BY+|?KL0$IF!`G{AOd$vvsX%qEL>zYX@$|gm)|5OBFYkuJgk*tb08mAhd9O
zU8#Ke%l15HT_M|1>O1yKo(>s<<rs~&O*h%9O<N(-oSV8Zqe<mesN>nv7~fS=1zvaC
z4WH4)3(<)StunH{?KuX8C!+H1W;tOtGmS2NqOtt?jvEGR<Vy~0FKlCc_QBLpy0n=0
zW*KB)g-e=K8skA}<!&L`T%kA({kFzlZIf;2G~A(tg|}7>(}cQ@Kc_lvQ^E8}ztg!M
zSFla?X}LGNI=`a6vDNs{+E!+IS%%|Eakt^0@;gSQPkP2Z)NvFU2zOYde#~0AYUFA;
zkJM$ezK{F4?nU@rJA+inq^}xUX}s31>mXtEE&X(g_ix#IlqwIz(r^narG#9u-Yc|l
zh>>&MD!~&+1{P>UR6o5ExS=>i$EyCy3h_#@*vHG44=30!Ufq*-s!U%f-@b|`*r*U@
z(r;J2q|}|#HbG-N`;CzDvjE|4xm6O;8N*^hbw&4%wOSNr?OX(T?^Q&%<}sD^aWkqW
z=E}8?mnUJb^MA53FKq3<L92H-y`j~$1mR{bze9e9Z0}9ZQ;)TEDwjvv@2F?Jexcs?
z-Znm}>YCo0Fc}Jm{Z#he3zt71da2)`zQEa-pY|qaI;SDs)dvsPb9kM8$_d>d%2cjP
zi*v1_qkg{9mg^{zMizE<+o=#Hd7q}MEju_@WIF8I7Q10X3jPZI=(^Urx)49ZZ5Pch
zN6JS$fe1)4KrEmv4Pr2}N-q<raG_V?!6&?d!s+6c$|I-|ziaG4tyE|8&$!Dg*+3mL
zDGDp2oAm++hR+R4YitCnF)i4P{Ee%)g*kd<6LnMBxZ<E*iwyJhZ77a0ut6I*AV-Ik
zd=8|)=&m(mJM41iRKvyc4;mNqq{NL^q(=|cF`BWx3abu7Gwi2VF!6;}irKzp7|(+d
z3I|Unp1an5o@MVH>J<)sl=f(AnZSxR39EIF7&58pDIY185Aunt^p{4MokzV$91uEb
z9I4jVm2&%`qCPMETRMLR>T>FG#l1@iR<bIZH0OLBgL={#z4CV8DI#KTOLCptab+=+
z<+!XtiP!bi=gfgIX;$NmT6>aJ-l)B0t>=9r8`QI{BQhenM)hUxwysD*g4=naPxrH;
zSR5|KowywKBzgE@=v6DLtGw+O27C8Y3<f;%nZ@}C{82ydGv6k^O;=50!e>5xu;TRj
z+uwAagGeJ34g71s4+nBm5JDh}aoTrI`bq9QC%Vh0o#(`akQvT%D9G5J&3z91Vl&+5
z6H!lc7a)1K_IvJgArMELExs3Y$_U>8XYqgYjz4SZ#(gKAZ%otTV>zsTc<Fh5%h$Hb
zbONg}dQ#}OZ!AVPC7{)sj&GLu@Zq5w-QFd;cMXlB1%um_g3o1ZYB@V8j(ddccxHTq
zC%JdaohGJ^Q}5#+qTH`Ouu+Gym)9Oig*8xVWj(3C^x$gJh<nx1@`K|%H*2wl3$uqC
z?+w|$d)!=|G;ALInyY2}y1@SKW2c{6C`qNCrB`rAoE?vk3g;YhV|2XGe8nzMQ#!fj
zgrl98?rV=-yAqt+f_f8@H3S{`2)mj`EtIU=7O#JB<a&0<Xj!7Um-D;vCn*G@F0EH;
zTs8xHR-R4E5e=Zir^Gv6Tk2_ESYaS^X*BT~%&gJm=~kw&N2P}OW%#qZ4hMCsKDn(2
zZD^GCe|K}xKK{_bV+&KFLq&yjA~YgjquiKLOi?#V?`JUu6UK|hiWA#tENMrQID1du
zdSlh&vy$u3JNqb%zR${IK6x5<W!Cd7P&m)9aWkXxfsH&%cq>^4+Ii&THk%((Jy5pY
zXAw0oC2uP;&yw|8Pf?f8BP^Fh_!#pWvnOues>Dc@u-X2!eQ;&Cyal7pCYucdyj2!{
zyEg7)?9=LFR@P8voccR+zF^Vfrz@V4{t_?u*7|1pWL6Q-3-gWgjas62e7L-r>jIqS
zunLc=tO*l>Ykez2p-jPge_ubjWmGf)taMCth&3D6m|&C%YlE4e#`iu;Nl%qbdK19g
z^}6BQMwcs1qBprORt|Xe#^1V?`Lt0IKEfSwsp<aca{X3I$FDh%6o&s9-v7U1&RN0$
zLK9K{FTwy5*#l&}Ln`)N5Xl|F<Y9ong6XSx08tuE#si2xd+_yt5f1<!_o;XQh~@)N
zJJ<(D^m~)C2Bv->Kz8ltR|ugQzkpkx>>bGJ`uqzCwq||)^*QqZq}qO#wBM6JV3#9V
z1n;xU{INTgvG%<uec{f`)ym2j{QNRAZ7564^Zop`FTc8(XFWw3-=?j!Gz?59D?J0(
z#<K9&F}|%ITq3XW_HdKMdtSc4z`Cf8`pi?G%ARJPRil3Us`%5`c;-|0in4Q}p5^Cm
zvwJVt%zB+-iKx~C|BIKskS|wtyKy81-rt9B8{7%QEZS^%XuzZ6*2=NlDSnFHebF}s
zugC56@_aH@YfdrHUYBk&9uq2**=c0IaPy}}6nC#T?7{1NxUI<S-J^E#w9b*WlGH{^
zjDznE<u#0it!N+!(>4|!f7WwkHD7jog{~9KyUp9C)_o`XmJVa%f^(}j<mA*D>>krm
zE-|!eK8&tK3+ssNaw$BhoJrl`JC--hZ?=KDJu?5&lAYn|BN8>0yFM&?ybm6@&I@I^
zTcIGad81C&`RizU2vtjo2K0sUvfE-;qCCzoyxp2DvcUCVU3?$wGu~bOY`!irQ4%(*
z?u#+8%fOl33$WBjy-_9M?V1i7>J^vu(}-?qd-Z5wts<+b<Qd<{3jO+f-44zhGK1K<
z*QcEw3&}3Yv5@ZfdYZ4)FCle$XTQ1E^4#z@E%a}>KcGIkjUPG0L}L=ycj)$W%Bt0S
z9omyl1Ta->^}xK%yt{jVK~hgzWnE%$jrtSrm-j{tvo&^KjlPgn`=-X_HGSQ48)t))
zwh^|8Jz|XsX_>Z}HuQBI>Di{EHAAVA4II|BJ#_97o<}{iJQK6ay~4&gM>~ephHQr-
zhm3|UcPG_Jr|$1)IksKtV&RL7MwQ45<yGZZ%N5E|k*CG+_8e;Z$0bm1R{b5N1}E{V
zIo!hs+V^ji4*RIxKi<BuG@APp+O?<Up|AA1j|Q|Cm#OHTSsm3-(Nrv%*F(spw~@Tj
zR#FwKbZJ$kv)D2Pemj{)TbaftIVmnB%XK3xo*@TtnU3}PDNXyF7iS)BDsQNCrcbsA
z?eQ*tcff<CqVt_Ab8|_0f8(&P2mSsC>Ehi?Ct{*8G`23+`U00$ISVR{hh=c>b$OnC
zSi0-H(rdW^f~TL9enq-XZA?<N!60<RXk;v`XNwIkEhw$aHod-Arcu(;y}&td=ynf#
ziV;4==$7Xh&vTwW*#*1s(v3OgUI#ssJ;fV(5u;0o(ogmHR5{;iTg{CLN0o^d={>*h
zlDKzQ>(dyUok!9dkES(7JPrE@Y3NnKWd?`rXZ6T$QM1h49on<<VqgshCobM#_kcpK
z{@9u^3e)Y1sN*(A>o!%k%2izCe<|Q$UP;JO(lGQ)scl_n$&%OF)K2Lw7ro)sqT;bN
zVoUwk!6Z{Y_O-kb@j_k9{?y|6de?sYuzf?4n6}u|b&(c9mlk-qwikxK`83eltRucE
zDa*Hz?nV5Q(gjrZJ=sa&byPX|U8pRnyIm~@vZ;36>ujUq5VLO{eMOZh?fsx@vwGI^
zK3Xc*H`d-R()pcZZ(N@pYuNv296B0Zzn?$O+u&LG{;r$p^f_bC41&9N-wMtd9?Qud
zje7R}Lk|B?B-iaM;o(%ytYB7G%a)zpG^suZjK?`IwL=Z-cHTc?YrZqtx_(DmP?PQb
zB0KsFeENv<)ls{w9Qh;fTeVzi1Upha5W5R2UL<@p$%$ImzRo#iLvuxQ$EO$3Zv`2~
zWd}k$Sx?`=8`dcVZeDlz?Rb>OfwUFS9o7do^WOD!#qM}gkRr8OUr%m%ifop7gl@yY
z<|zHcPNC0nErW=-Yd4NLEbj5Qlaf)a<QVkeJRee}LJ*7G5^XMve#A!LHBx-13ty`)
zQq8oF78-+kB{;Bk1RBW|ad%Tjv7emaAzzA&qR8V%yFP9#@wO=o<(5OJ`txq5y_{#Y
zec?MSV~rfM?fKypjq5QTF)PCI_3VWW7p%$;y+M<Bu&Y%#V{NaWa2;J{3$N>vaT>wA
z_`W4;{ODVn`*=N6x)e4TsY=I*zhW6#<jI&^B6Cw-V3qYf%@5nwTnc!($s{~O_qv(n
zOH2fRdDS7yD)e5dhX>vWo!{`vUMw)|K-}Vm0Sr%f?%T{GFT4Nca?cCvGK?9P`Li7I
zzj-_$vlw$`!R-MamVCoAi<7Z*0{$v}g1XBeU^ktkiEml<)S|+7Ym134J$nmR9i>RY
z4&9~i3+-GNaQJbztRG>MSb{G+dxQR&PeqH>2g>)M|3}+f2gmL8S>7=-GdpHxX2(o%
z%*@Qp6f-k3Gc$9{%*>9NnZ2Fg%mXv?%<R^_@BVR1eRZ$YQkSHvK04>~ojW$D0ruOm
zK?FK9<!wL=@w?3>&j^2=3PgmdjQNt>XJpIyt6x++fI0`EV(wcbLrt$SCgjpjkgCPL
zMo4kLQ(K=lv{_x8byyz=_I2P=TmMDb@FjQi*cf;hQ!ZHlc0!m7U+ET&(`wQlTLuQ|
z%bznfD5AZcX?TM@UOynCd+=RRp|=Sbkg@_wuV|$EM{_~5{Fz+k!e`xkVO_C``lZzK
z*uL=8V2tjtmIOWO;dhjCHu(MkgXn>|6d>(@aJoWb={cN+oVY@x3AD)t>k6_VB)|`X
zUf(#kMT-a?(j)5tF77*Rfyg4CM8gQX*Uwxgq(ip&g@rxW;`)P!=pn@AmyCXTOX@l?
zK3a4r!mr?6;_D_3!iVt3U$47bc8Ku(NV@?#;zSaJT(wBtq2%wO{*Zm*vd9qpJ<(Sv
zZ?N6HUpXXyB7TD+k0l8cj@Kp$`W4qzKyWMs9^+fUPWg3?Av_^AbsyE9iZgPYsD4DV
zE{29ET!Pn(FtQ+!hIGOZO&MIKAj5SSn<=w`gnHQAp6eM$O*AQ<SsW}?2t9_BBs4=L
zZe-<(I#m!n)?rxJo=)QHVXUG(pIt1BIJTP91)&Qvdut5r*GfZ*){qQgI<jL!yb;3F
z5r#Mkwm2GlD6A+CGq%<UOG4t|->Su$f)_+C@pAi;Olg)x2E)coNtz<x$yDNT?cuP*
zsz_V+x#GmC$e;JY<$u1~eqCe~F{H+lwI)KbRml>!CL7qlG(>I<lqPGAqugg>N?iYb
znL}BF$mgs6goxfVq^?tmK;!#q+S$nAPT3h@opXB3<<7~Q=zg?tr|yo|8Dw3gG9z>6
zUaOw*9p>1~5jayk{Fvs20!H7?kttK=aJp3;u`)+nUGi{7axwj0&+FK}&F|jex`JOz
z9AhT=IQ|ap1=lO<BfLveC#QO*^Umo7_PrQg24VWqj1BK=7y4F+&4A0U$2AC^%+Pf0
zonhL5cHihJ=L4`u;CpyygwwBE)!@#-hdP(#_A{vmwAVDR{!Ig$TSWZPepeENL!=IJ
zgah81#iJ<{R~!6+d^eiQ@I(!Q&G4Zsj;vt_+sw5VJd0M}^$|W3ovU7+Q1V;BGeqw3
z;G6j6a-Pi2$xU6GI<GU@rficdJ)U^peq^j-oqC`<e(b`nY5JOz88w!AMs8)kua<=2
zxne2t-oN5Zfx>l1ED>Rfx0prwbGm2x?@S(8-hB&e6w${tX(C%!5ZMwkf6c1VC-nx^
zN+%5j&B>~<;Rx~@26zt?)ylt6zegq39$SK^k9<?Z%(p~39U)TV)fmTCC4Ohukl^o4
zv%u4k<u}1OlD!k_61&@pzInClwh!@r1KL5AAWFSN_Wz|D+ed&}+DAJ3#2=smizHNg
zA&G&dkLe?97)HaWX&8>AUjTUH6F#FKfl2)xol`$!_%}j0&-x{_a4rS%-T-KYr_R91
zxz#$W&aP}3)e~)3ba%-8di`6rDpGgP=K#T8^lkAI&YR2!^wyZpA^jfwE4&XJbq|>5
zSDjy{c4F4sk)BXG11dLYo}4;^$v5a-X*xYiH!!uM-q8fxmt&o2KEL=6vbR6pC@-e7
zZ#|yBS9D$OS$T4Q1`?q65=DHDMu*@NY1-yqZ#~H&5fMFo1tL<sL;VJNt?_L2W?iuc
z=Z(~zrZZ4IAgPO|42)BQ-x;AZNxzePbNUSD3DX^G(|_`D`G#JhVAD6+l^lW|`^NPF
z_KENr=QGfaF8rgnCh953XRLc~>z4W{(Gz8h#yuPpX~<`r|0ersiYO`B0f9ep>Y)C~
ztOHmU-of=NL9mi79erQCFc85Mxh|<EM0fnw0DsOzLm*wQ%+(No`&Bzw(-r3pPr<+y
zpen%xY#DzXVHe*EP?HM5`}VNw${_AL$HmN&WJi=Lv{<8Ux5t;q9ylI!i&UE^SHtXp
z>$l&>0(+tP+2Yz_{Iw-Kz8#k0L*@`SC(1?LJCDZe{ya65M`YSxU;Xgev%y;&L5veH
zTpc@IF#@Ei*&6B;#v=8u2MJ|GA~(76*K@vKg7mh4sVH+D46Ip#yTT4T?Ar`sGgT9@
zPY}`xRhLLGmq=C5M)Hz^%W^rr5+bLm{7}<11^dnB&&gX@oVbSD^t5QdK|$GDg)7@(
zZ74;twl}jTZE2Ip2-#kdWDT3RCq#OX6=#oEXm)=hDlXv$_HblRBm7Mbd9*B~`pT8(
znxYaZPQ^^XtQBYg8LReYH#|}piWJ?$MdVxIuVqr*tRaQp_n0ILdv=&V<h9SQ3$Vru
ziA^r<^5}@F3)T^%G=&`Y^A1APV~uSw@IkEHmC{p^%g2yP4LT_6J8;xR1PgoREjdzx
z4@;B{r`Z%aW@zR{sU>$i3$yHlhYn^UGwSSd!;=SVo{F*hj6i=ES`W=N)l~+`h*g1j
z?GN8$J8DM?QprIJ6%=3z(rJZZ2esLz6iY#ieh(DOQ{1PP^VeFdU7w&HmZ^R2T<Bkz
z@%T!H78gdepSy|XE?C}A&09(Xdp!#igdhFwr9jw+tK*q0e0oos9ekH2#72eIRz5|F
zvVoT+K+p9Tw`CTpMjFzEd45Ar$<0AA780HCyc02xO<qRSPvRv5;bCRUCZd`|<2zxd
z*ijNL^&I49Y~EX{yG7`Yqfk*PW+JVDxv@1@r7n{kB<*w6SYzSVu;N6F<kZY-wzFvE
z_fAbK#8meD4q7IrDZ)+7e3P;W`-r0TCR?}go^-+8X_Rp_`AxFO;MIesJfB!A;jjtc
zIZxXTPP*dl-B@1y`2xt_^nIIARa{XZYhUv4Oe=Xn2Cau)q@Jl(1C{t`Q?#vxx$N(O
ziDCWe=Y4^a%T)+pGdtapXd)dPG^khI%}NIYODXI+@MzKePeh7~uxhK=h_dZW&7aTC
z%?)l=h86~Y*H@Rz44d$)q?2V|S2YnnpIw^6Bu#;;Xdpb}p4mT1N3YKe!*5u{tk_2u
zU{M13+kNzs9_X6)=*IcfjNnQ;`XXh$94g}eq$0~R<tn4dW>b{oJ1@!e<oY4<j2l8V
zT8yHUFhv4b87~J|5dc^*Rv4|^n|APRvXknL=gw9n%btQxAUQ&{c}!f2b=H;^zSrnS
z0l`o*yiJpk@vJrDZXr<63+_5Px;o+B1ZLbUnYg7Or?Ft^ZwVlDF8uFpQd5d?CWNdg
z#oMY`Er#t0A`e3Ao#ty}1gFE+QslLiY=-g1utM=U@LJ_R%LmdkjSbt@JH?8z)VhPi
zVzok#`e&pN59c@BW5*#)H=rsjMWTqLzy5An(H$IMi4_sIgvI8>QBcW13N*#7FIC8G
z3>2uEA*OF^TR5M-pQ#jg5Ef(Eu$pCVAzliu#6e>!-Iu1`5_BpWy3CQN$R<}ITh$$3
zfoJWP3z6cfzICsR8JS!UHPseL?ALYMq{1z7+jI?}Hq1Na84AzSdvQU{OUE0|{c$1R
z>lOwxd+Hb3FI|dT5BVabJ05CKLL^d}r~u{rh|HYWblazjd0ZbD)k!o`7ihqeW|rVH
z45_>q7)3Oq=x*1r6(El_Hw!LJ?{b=Nhmt(2k=d3oW*?Z#S<CvhMw=mXS*b^3(|)*w
z0EJK@Yf1~PuH>kqZfd}QF}$S^4JNXr<0j5^UH*Os-z7c$*Y+%LYrm<Pr$q2f2Hl}E
zyFtd-@b~j5y{a+H*wwJQ5lo@NsPHg&CmidH*kZ+`>r_MaO^W~ydzl5JpIo-jEq2$b
zcejk}JH~tIHxNQ?N5Uet^AbgMGw|ceK-c=Ws`lu*@RQ%~)d~?ch~=%|$LNySyirhd
zq4Xz=q6Z+Wu1uR5VO5@=_U>c_>`BO0hlO49#xSFz_00x=S(>#&xT~ks<29LbBK1MZ
z+nWr~g><2Tg@F~c)<RDx1qJS<<2k?}6C`n=5+rb;q?0+2Q+8H;k8N?kF(VN{a}6~5
zr+EE_O!dMK6(eyh`=u~#hAb;sPm<+lES2R3IUdVH%Aj(SAP~4u2~_=yQdl?ZSFAuG
zpd|uYV;nU`E;ea$zyWnDiDLPF+>gvSsc$d|n#k%U`*G7mfRU4#LE?9K%?kBFRZtx1
zD??D6!WkuM2n{LQ_D0(@=^wi+)VTUP4cq&2-eposQWL7ie)fa*C<#Z@F;Rgvv!JAX
zR456M<w!0K*_+2@h0~wLe#q~zfN_&uX&AjoD9M0&=^sQHku3&qcaw6of!<Y6l6SY#
zKk~ApnP~%H0Am%bf}ys#7GenBy$Y?Y%aAm!YJ5qlJ=-1AURWz1h7&uKEkVEqYXmYC
z?O*KgFpL6}$8=VgVRd1R$8<Kx_JGLHk@A%IU47-5aAGBBGf}rDSfa_vfeFP?a5ZQo
z0|txYC^QmkL{?nSzv`YMc>UTPXv*$}3p4%r4R;^^MKi!;A#~SbIB18n-!edsyEuR`
z0*wuca;|l_qZZR2^1vRpfs`S%C#N9b312WqrcT6Ex~ztr)MW5JYD}CUE0L3ilaf+R
zTOzeT$i&E~;4v!v6pwlhz8xLM-?Z~uN7FI_y<p}VMn|KipsMYmER|XGWOG+!A$|5x
zGp0WOIDzxxWIVUHa3ZU7Cn><2m`>|zmd9o%#Q}M}ihYp#?D^S*7S~KF^kj{j-(WIw
zik1LiIU@Ob5C~Mti!!B?CO6GKwrMil_spGcjk_4~q80QK*~{!e1v_5LXx6VkPa%Ni
zC2+ZX#UM^vTEryj`b^JEMNB0MZBa(nT?RvnFuTvMuE9;*Dowz^LdCSpDRWvHjm3ML
zFk=8A41K;bT=PV<)?j#;5fBj45bzN%T419-!A1aG?$Z7mDJd;IFGf-Tt2XS-qv&yA
zWVV?htRn1M5EkD^QJq{34gnjuz=;$aP?r_(0|yB^UDZ-pPxYuIi7Z4pGOVWcsNi5%
zmk9I(RX9=pniC{i1ZeV<rh`zyMfSVSp#X5HKLorJyo_|nF&aL0g|9z{dy4i1ZV>$V
zFfN%EMgnCf7P%IA=aB%6nYz8Zva&H*l;l)gzlKid({@@m9cS@DC|14mRJnUgql=;C
zEQo21Ts11I)$qIHDB5N{3Vz#RT)I698;f!AWnFvFXQlNn8o|VK4bMuvJn!YnU}mE~
z-t7TR35aB0)W`U3BPL{E7XuZ!hEVK?CTu_0KnYxS<SGpf_}8N|!f$4HnW*@pIkK9L
z)g{9<T{~X>9074Ly7T_a%*???h6VY{QKs{hqp@b;VWDHgA%(f2p*ScgkLw+`5cW$I
zH|)aZyIP@7mrEEAe3B0A_|Dbp4wbE<7b0rNa!IzB346j=N0uz*$e_k!9A)u#sOWj<
z+8j{L=px1m6GLG}n;o~qd^h;^P@At5uXcMj<<h&LCqS+sFCK`}owE4}AcFK_`N;;X
zeVpTkpk8e?9O6F)Pf|2L2-oamwh6yZd@#y>H?3e^#4>0W0}p<XjhsB9UCLJXmy?Ss
z(KAQVUmtWfpSKAc=eH4Z{wTDl58A@UpK;kO2q+2BAa);=&A=qUvV*BEQSG$WlG%uL
zfu}Dv$(%9A;aj!)IO#W;tyivUHj1v(Y1q9=Ey|qabPs{pQx6Y{$jIgOqr{_+N6(CG
z4WqK@wVW`i(vIEH%PktolW_czGZG}WqVE+X<$WDR>>+VNS9d?0!6~J)+sq?X&W30*
z5*yBj+vV)^+bFDB+FMo<>EHqP9mKTvgyq)#&cI^+_PTNn9u2=s0u{oG&WA+NXGM1u
z=EMu@N(Hc%R|hq{I}Y3v(g)0Zr~0$?C?#v`12waeT$u)WabmmEyW|73hD&UN{N3kj
zW8+y$|6|_e7V4wR<BcBf)+haY&!e@)L>IUP;mlh-TnqL^f}He0AbmX#HoV4f9sFzj
z#VylRhF<@JTXgBKz-B27{*D}`y|^zim@w!C{B~+J;9f_rFz`Vi4SjwdKf|-Edvjx?
zY#O&2QiK67<E}v2$Lh8rRSQ)F@mU0jD+0)*O>P;b8h%D`b|8cAF>AdaWJmtM(x(;4
zkSrNg&NZhEqB>IxJ-1sn?`?%!wjO*y?C_4h_%SSCnw6Fv&Oysj4^~@VD_sGB@$g*$
zK5<+54r50pSf0Pqc%f|2gmqFa=#lCdZnOinUM_ZOG!t!~vjW5lw<I5O>C-_3?DW!7
zu}dC{f}MpZC3Z|u6m|=(jA*Z!D?e0se-8NNaSW82>O>J`BsyhfK}Ei;GK{xH4XxGU
z&(-;_v~yfzwnaEW)HsEdk@cwa0guxA#5+XY?}ip&o3eG=*U*!LQcXA4bcNP(aYbK;
zqR!nr*51>KoMu_zit-mWLOOxz+tX@sFGZlaL^d?$J2huG`3f(H4x@#YD(lkUr2Jzs
zb%bJsGO@M7823e?;Ywx@ajl9eoZt(d_=_2SRsVL(AUD#kDu|DjFFsq<zi$@%K^(>2
z1AJDgh+eKxyS8xhR9`+(K3L##Iv+7WHES5nS*|`1En7FzQdP9)AP;ed`1X<Y{n=IL
z=ejY`hQYmh6_IB1nQFD=n*PYwy^hT9`E@Onl~H@hTjj2;iMT_?ARFp#+D16lacbs7
zDtbcfi!2V(DtD_6V+8fuk9AHBr46B2dP4_PbL9tZ*?C8b)R|{mwlxsqW{(c_)EanO
z>EwLXQe~uJ9-U#k4?L!Wj+NNuP4+LIq`&p4!43JU<?`cX{YOO;IfRTuo|g}A#6mOb
z<wZhMEo9)~#2W{~kliL!Qj8lUXoYCfXsNO#&C--)!^6hN1{O?&s^dC=4i`&3O<(dQ
z?`f2>R4ZH?pYF>AO=ExO3Vx{8P@jU`)Z{y@v6jkzXav+bX^I&Qi64Z=gB#gs-KQb_
zC{|9U3?5UDO%!*alz&{wy|u&|Ie3AY?;Yx-)J1@+CDAI&8${EUg<2w}X`O#sTL9WL
z*BM}9Z%^oQ@-983RFfV*_<4U3{v&ydKuFQ?xXdNCE{Reo)9BK<+u4X)<)@B}7WO;R
z>MUhg+B?TsV&h5sy+mf6Y@|9~I4vzmy;@?M)aW@^lSzLRmh1(Qd^7lO>N$%<$s#IJ
zlT6aBVR*OYa8w+{F4IS;ZS$no>VZ1*95heroZ7L*RXgQ+b$??g)~VTc-acusiKBb`
zA#NG(^K_+Ow950HVfy*lHcOX}9mh>~1PEZ}j#S14$EsEl{cmbHb<S9uQZ=k-x7(oW
zvCxW{24WFR7VA}Sc44F8dq_4xqtRrM@*T3hO%NU=R$7fl<(SvSgtVN^Uo6s)2Wq9|
zGQ(^kqhtp+C7x(%uJ$8%&Qg7?pfF0`d-vJFOj6z4{1WqJB#I5XFP3WZ6;V*dvJK;F
zmBQ|ZH1hXi9zaE8M=!sHLZk5v(3|AODV~s6kx)vdxlzoJEW~v(U#a=I8ijpw6xLIo
zqZSH%jY@ArrGNrp`3uWK$<LYRuw(h+Zboi%WXlmPr)n&9PMWjV^NWd0qVseD^5>Ki
zS<p-lsAa|y?xAVYC9KFxmD`nzif<CwX3OZ5;v<AiGvb;vmlUPYkJ$+t1=b|S#_R}^
zWV`Jad1ckC%gj?ZhZQQ1M^)_Sjh^cik7rU(DXEZQP!~B0?Na%_K`Y&TSH`~+DIPH}
z|4Bl|f4nB3<|$X{QZFh^)pd=PRLm_^LR}uMz!qzt_{{m*v7B00%@&WGKq-Yvv35#@
zEvYmwBY#b1kS{$0`Rrp4{nklUIa!wG#CoCRR;6u}6ZiqU&`IO-Te8F<s<QS(TH#Q#
zoX@7A<dve~5<8r0p~jyfAN|E_Iq~qjC+UJVoQsaa2>$K9Tp%vjg{Rg6!MQUBUoeV}
z%5k@X%6ltU*{Qq4Ws~-a_9==SCtkL{4eMK?MTs*EZaNe&b>YEhBXqfikL2jWhkLm;
zZ~5oXX3|MXQk%$8;JBuC|H8I(YOA_gdY{+UoXPB&f-_p>MP?6ai9(rjU{8EZr=rEk
zuy8Z_E$)2h$g+fjSsmZ|{I^e0`SjJF*z?IkomB2+l`h11vf6<_tu-E*zz&*9!H~?<
zYoFSFQJ!3D`P3_#N!=KwZcZD9w`A0}iqx$q+|ydPSIWH+;#=;=9Uh7rZdH5f@VB%-
zl$vcwvD-3cu4zR)5>hRSZn@*!%sbgZhuh}WA@~qz`e)kcXp}2AWt27KPDN~3)Z3F)
zjONWhBLuqBnOwT%&6R?d7V1~xYSl$%ulIh<SCy}Ob+|}8Sf8SLKoESaG_F?E71>l<
z>sWOq?l*2ir@zW6Bwxy4SGuHX2j*+IA1}5yPdW{KMoK$*RvxHX6b!5)Sa4;2g{f?N
zuXakCmpw9@9lH0iPn-W-`X28~UKVvIm^@(C7HF1LTfp~h-nFk@;U3L9(V*~bBU`SO
zX~>n88HO4-^1+dodxwKzi_d+1sPxWpxKNeXKa?weY?9SWMtR26kk7;U{pvC0nnUpT
zgu6S9Wuk@MKwti+(*#P%-5JZiy6ynr#aw%o`kT_^oO|)D<C2LkXLY`p*XeNZ<JLZW
zd{Xoy0jHXa*wVNbSWC51nZx&RejAtdR*>?+2XmI13rDd1)gH;7@jemg<}MU%51Vaz
zpiTJ8Y_E1Lm+sOQE~U<{lm77ysrT;cr?O2drR;Q8-TH1S#>Z%IAEP?WNM(o&<+Owb
zxXekniLmO@6?qF$6ZP}U0~zZHE!FyVyt3Po8oBdFvgF^;*0oe98!wqlZ*Yb1%1&Hx
z>`q)V?^y_*ZEsoc6FP|4mGz35bu|^_8|WKkpRE-eWV67m<`rG>OKbVE@;GkNc&m9-
zyUru!G|}uOZGB$feBAKz%(<V@4MXDUSIQcL9o=8|2WO}#Ho>M6lA4j~HVqJOr(SW^
z2q_-DO|!V|=5^uZ4XMfX8%v<*;3gM>zy}_u{VFHx&GOl#(uc)DNMxLX4*8hAyj+B?
zptO?Hkc)r7DI=UkPI`eZ3Pjy&89Xoz+u(>Vey3))UrcJQEqiMctOF$V4N-1*udPPv
z_IkHj1zaarM~{u0-}Z$wdM*5mAr38$1w(t)RY&RU?|A&HE%IY*OdXs<sNimHKN0+&
z`ltV^D~1t3Lj8wZ;vdn|KPHwxg<t<;mRns&Rz_ByS{zX8jTVp=b+Xj6|JP<T20~h<
zKVT~cfE$hdKV-Q7^!~|)#tuM7{KJO!XPf_EL;HJ%8vt<qdxo18;HCS!361Fw!RRjs
z%Rd`w{=a6po#JF{`WRq?E}zjhfuP_EX<-o|mHdF!5nw@0)10Y8^=3@v3(tLhx_=cC
z%2denXT3PRtxB94wLcnvBbYgL0a@0|rS6{zUox9iuM9j;3M%syL#0SFW%R9_@r)*I
z*fWf!P;;)}kb9GdvQ(TBao~}<k7?nW=G!iQM6+2;&wNa}A+QNPJc+}mx5a``)2_Jj
zL0X373H?&&lS5gci~`v%h`3B3MXI0KS_OTumEpVP$}2%SK7o&$sfPX_Ag;X`lEqWM
zXE*X{A*Y{I5^p?5`P5WLBk4fejCWuYAwax<wJQ6z^3*kY3&TZ^SVIBqOWt0QIpUG_
z^{MK&@Nx-F^_08M5yBE~t{Cp3Xe+wvKBZWE8FO8!6<>&MTb;_UAEEr$Ig?yj<_&%a
z4)elKF6Qf8{#jZ>T`x#89YsJhrs6EHp@U;-gTnPKMJcBEiw%V8ccG%ya$0HToXOVH
zkG@x<nA~uyaOP{QG-#XbG)t{xdFjq=oMusM)DJM~pT`3K^>Y0StnxoPr2bsO{|6E7
ze|aVUE9cg~3UC9AmH#Qg4WM`atGno*qUHbO+yXc&0X;<<F$rNAJ==eT%>UWY^)HX_
zKfAd8x_AH0#r2od>i?CC>z`8Hf9zO)0?}H8fT%MdYYnih{qNepO$Trn00C}57W_~9
z_jvc8=rkK3C(Q<Ma{V;~=raQ*vH}2e>;RG%D<F-{2?%%pnfF%%@X7$m?mq^xKj^Z*
zZ3c*O|2eEbdthYvE1=ExCxOiZ;6AVdkY50#)W0n`7yxWiK$aV@?oVR^AnUaNTm3Z^
zkmUw+IRHG@zlQ(hxdBrES?)h!ZDs)2h?$cOkmm-R?VmleGXU5>0Ok!~`KJMXjK9+3
ztbp_Q=TJAm%Jz?|;{Pht&G@e!YEG(9iV91MU0+E!&0zy=XZTp5;6uLspin=hiAM$(
zL_`Bs1Q+xrgyKMVQnN4;)3Blr3HyO-TO!m5O_fJDO0k%;JD;Si^{vvOo&K`m^ix*&
zwQcdU+psdjV~W=#`||8^%_AEJ$;4tTR5-7z%iA|sj;ndPps?#J(k(M=7I7orOM~!f
zy_uh=EqJTs(G(Pju-~ZYk8m(C4K7u>4>{|#&#j51?T^`qk0u={aN7)V8<Tf0!|1(z
zkd;KCF3+0AJ+WuR?}LlSF01R$u%Bb*oA{emUHvTD#Jv2o?vZyl^U@;>C(QD2Y2yBT
zGTB+?(yXqgL<*`y@?MMja2EA(vvCJFamcJISfLVX*anhu#wX(;;xa$^`b(g`%ZQIM
zkeD^7M-Ij$6i6^)GY>^!>Wyn3<xZ({5T!;kMu?x{8)E8Z7Q?0BW0%6g-`yibUOh6<
zF`LKTtEK%S*?XI3@ef9;;^SprOG!Y>GZ`~_19|arV<VO8CZq%a1^ZLXH79GMGwKg<
zIY<wOuQO1Vf=|afr{2RA9I&kk*+X!~;dV4zax<veBXA~L?JhEsZns74vobIaCn9&r
z?BZJ~GY}5Pp6xy~zpy95>^o{AVvGmc=4xVMOipYQwE&y}7y}c#522i~P7{plm^jWx
zV-ZvPQDH%$uz76hy6Lh5*mk*34V95rY3CwqLvdMg-y}K<v{qHHsbpgDlbgoA89^9{
z!y3Yrp6{&ho#Icqe?L*1mzt$+QE{RV*CfvVszXtoG`Gp@L2%|J%P3Z>V5c!-bZC@p
zs&c&v>MkQFE63<fWj@5W>s*WyBRl}7>qmwP@{sE#kQV}0|HT-oC|@$-xUagy2;dk&
zJDX?_TY4?6D`n;zz6ai==7urhr=KFX<gxEdG9h{cVck6`?kLX@iuxcEf<TpWK3`U|
zt+{(XAHBUo(GYmeFxN$V8X{?nE#D+#NF^C~jLv=sQr|Yjd`bxd8<@s#z{uq-&G<TU
z(X+9{xaSM<5rMr55=@nCAc(Oug>eX4s;Ax;Q3Gc<S-iajj{JKMXMi!kGkG$z;sL1p
zUG8us3@L7`-r8s#$kAWSxF`0Cux&Fxg(6#rS6&;YLTpD%<XjK<iDW1srfdXu+_x-E
zNag2wDF;L&-nU=!NQ(xtwG5?FLkg8F2!ryEU(98Bj)bRowZD`NJbwA{3YlX-B5oR}
z05UoazNbg9BJ7jgwbGBq4DWtmyLGaK!I5^pJL1JN)E#p{!*BZr9CN#du0WbdCxKTC
z?^m-YWj-Q0S0NWe&Y9ny=!yXu<z`FJX=l4l?--;0td1~vFSUfYc)P-!dJ|fx@<=o_
z&av%V0?c|8ZO-_y%9>p)u~(8rR}?<^Ll#!CS<q<bc(i^&N2yn+XU8FH&KnWh4oWRI
z{i>gseFi<MH|suF1u-59Us4{XzKH}RwBYbLXoFxby2RsQ)QoFS#PN)xeX~IMmv<&H
zj~i{DFUJp>eW>UslcZMc)M=0S0Ptx&|5E%TDnF3IpD!9IepDH{s|{xHBf@QFtOhS2
z!O)6xkJCuxdBheAqeCsxz3{;`jkn0iaSp_U$VK?6Zp7&nI_|U*PywNcnuv~EAC8^|
z)*vhb2EAqvl%|HsR+JOk9-%iQk#WQrsVQX+7RXVtZsdJk@DShV*Io{?%-fW0`#ilo
zzWt<g$*NvmQhvVKOx9B%s|d&BcGj9W$I;R5D)#oTh{pP?d6AoVD?!?;v&7H0$%TLk
z-dDZL^FFy|@K8ECODSoT+Xi!7UJ>N@a+345pVbyQ&%I#+N%Q5sslm(8U~@fMnvk>o
z%J9O7kh~@iVPs?os)Uxdt*R=xk5lId*hpfl$<vV{4OJ!32)f(&9N%!4{Fwe)vkEkB
z&;HRO-7hB24-L};JtH(cHJMH*nPlbsM=-K;JkW)t8BaOGnTkGFKlL1(Hb<5KR-fY<
zesO5o%d%B1nG{OXAtz(a<%_EjIhC;Y(h$y%dt!3Sn7R-=M_k^8tMtpluiPQ>Po+nr
zoGoJ0k?`CCzp30+!$4@O-86%tQ*Z9+5$=R|I+LGEN}+MwG@pv4DK$?uc?bK~IOHmO
zOieMuRz;$1A-no|^_#v`-Z46!S(}@iZ$AIJ!}Qj;X1NN^DZd!NkH}Z2gx4zyBx;Cf
z?QdLHaF`Uf&I08_tkIJ^t9VK&EeBE4RQ?iP+43FMd-AM}DINGUC&W&5OX8FflYqau
zB900=wY!Xq;bnuuae#_vUJyZ#rHQw>KuxRd0ISVWe}y@i3pdPNQ{vUYr?7{JH>|j6
zJBd{Wqh!n;WE2SP+{p&I4f`qGHxN>;s|&=%xdER{H;w{~hZKb2cj=LR67G$_d9U@Y
zG}Z4@_*I=U)LA~Yw-x=Sb|gW1JJ2Xg?@;x7uM$S>R6)rZ$|;gmSC$H0mNiUK=3y1?
ztM)?W;blLefoRo9z5Ui*CCgkDJBmFAiub6dZ5Jl64W*2^!uqIp4_psA1?h2bqBjJE
z5u^iKo<4llQ6omE!Q--4hU$4+zx`BuRsGI9o41x!K=s>+7ykKgt<t`GTd5B2CX-Ne
zMyBsodHng*)6jY>_`jG&Nqpj)fSV*y%eNb+KV)IQ9a*?5n<i6&1~)%sOn)~`vi{=O
zd^O?7(u+TPdgR|9fS(Oi%aZSBMfHO%6<iMPJixL}4w_WaKQo>&WZG<&-TD~Zm^|di
zS8s!9ngIuNX3;M-E6`uUQ<b5Ov3t!iL`}BPthdk^zk_l1lbWjX&G@x{{pE8~km%n7
zrs)FTda{N%ojOqfnMn$_ls3Vw)1v@9rP2lXSMvn%C{G&$)%3K3fASr#WKNzdFsc|O
zX$RF&Ug-2dgZvaevy`$*FXMfLg1XCpZEfp^vgt(IX+{ku#AsFTeKlQH8HchWgi4OJ
z+r~PI-0v<OFov%gFH%DRQB$t#Q5k0}iu+#W^vs@FUq)hDd^6qTnRah>w^prL44hrw
z&h(pB7K3$(SH)+t*>->A=(!5rbnMH6@Q~2Z!*L0|mK=E-XVj^Vi`JeUWZqAv)t-9I
zhxJ*aLTz>EYu64nn$i!dTiDq;&B<Y~wrt`*&I)kVksc}&$OfrGhDbaH%P}0-+S10i
z_~Jp`kTmakv)07zz(=cBF5E>!b(=mQMk9}`Ta7^MkgzCTw1PKp=hbqBEkxPGnDjmH
zV#_g1QduUo2fS7bt4<#E*prKnPqqAh)=3k55^*uEzoABKILr-s5IG4(3UL48Js^xZ
zi9J}0P2$bs_fUwMgcQio=>b2;xf48li+QbB<Lg-`0bTCXysS6_>9+?|T<_K?z?43T
z`JL7hTR}M%N8NY__S=x8T60p@Pe+SKAlj}JtMdzLC0OmXxD0=@Av@d7e(we$dPl`9
ziy>|v81wjoj=H=8F!ATA0E&A{tOd)Q$y5eZPxE4(VB->6YHp!~a$vd8My7qT%v`2K
zc>SdSlNkC&WEOcG?+R%*IzI22GgPk?IW(57*P?TWJjj|V$~!Z=mS0qQUv5V2VFmZ4
zoRfbO0NXn}dYX&<4tk27$BIep?noK0<*j+zuY*SKelO#;`@%)qQNX*fgP0nh^39gI
zF_|*?{dYtZjTEfvT9{R39`O;ypw%~m59~7?FTN;CzrNtOoc?2t_Y|TH&(0%;lhv26
z@G9(r3lHH&5f4rXw*tt?4Af!}hPyXyTc7JnYegRjv&25cGU)oQWf54SuQT2zLHYhK
zz={c;9uXADBE(z2$0u3SjVOZ+!i^bQyi76D<Kh!*&5>d#Ig4x9SI^%+s@w_Av=kcr
zC@9p{{5}$WnnE57Y-+cuU=QPECn-X{m_zs%gWQ8{JeZ-`ne0{_unF;MuGn3F@rNe6
zZ_CD9T?U)07y8*kbJ<ZnXihorHJkE7@Cc)U{ZnZpHcY(*wy?fgw8jXYS!7gW`aJE3
zuH3a>MqhKAWSF2K;z`TdQ)dTiL2kLdi6y*4Vt@R@Kw>p{Z~~SIEjpT@<RP8&K2L;B
z0d(4ocDWjCsxf4<O(>!p%IkR?CG=H)dD~kr%Mr1$n<tR75W9<CU0tR`v50vxvnmZ%
zPK0@~4g@$NiG`+Ol$H7LieLL1cO#-uVqGLlJV7!R>Q1&(?YZ9Z#A}(2o~-i@(QZ?0
zrMe}lfMWko4`yYHwhXtyMr0Tx+AXazPMsQ(Zz2|08R{zpBgL7iA9?78K`0f5`iRT(
z#1sY&=L4+YFm0fOJibbc;&HheM>fh2T<zk@p{$13F|}PKDlI3A=7UV@()7!5WO0Ew
z6;=Jzng`#eKjc^IYg9jPT8{kHHI|x)^3I(g`S#QIxhG69WS3UXhq!XoCpck)z~u2$
zAWHM*abx;^=XBK=P(RX)ftimvf#-GTBk?nP;j{kpQyO>b+fJ8~8qc75=_=Xyp1(k|
zxwfUI^o1g1!1no>p&I^H%ge+FQ}^5XNSwg!Nm5JwEtO-Spg_^8vn3X{^<A;S`o_}d
z)9ps(BiE)dSW~J<@@6O@sLnO1h>hQ)d+8+valPkY-88GEhVr+XVwmX9lc?2S7>X6}
zzE>Fmf%~wH&2N(jVq@^m!A*X^)L$CrBpWcDQpVD&wOVZS#o!hUuq<vAy%F&<kHuo9
zH^+hCBeq&CY_>bmXPWUiI)pqGQie@1mC3*HNLNN$zNgGr#p1pdI8qw+b~G$pGvE<D
zkSW*P3TSWIV<=?p>ks}`ayk=%Kj{iyvh^BF99fh2(M56_o<T$rxaV=xZE)>yvKJ<-
z?}vw=>Ye>O;YY7F=?iv>4yZ~rm*SDGA^fNoNr$}e38F=h*DSPD)&{)*#%feSMPp;g
zXwG0<B>@I_#SzMEJ}%f5cvDi)S;7><*#0mZha24diz-?TP4aR$NRh^^tSmTu>w;f4
zAvL}-tT5^>K-$gHy)PqmhMK=Uw~yQM_N?dYH}X?l;1a#r2V*!KY9Nyrfjn0;uk|S?
z52?rQsvfAT&PL5~`eKv^1d=D9oD4WLfAV)H(2o<S%?2mbCizmOEt@}o2bYnilj-t&
z9l@w}&|n~OxWcq%erWb;>!Qn%nwI7}f#bQT1Z3wEBOJclDV|Y_B5l)1rGReNFW@Z3
z$l8-w%whbd^Mf5;ZYzkfbTR=wdv>(h#5V<TFfya=o#~?j*NS7J2y^eJ$;0d%jM%~W
zVLn(WB?6Ih6-envknPIVKnSZESd>Q?1Og2GJ~^N`sQLK>p&W(nqRn;3#>=-38F%!g
z3&JgEk<|4h63JdL-B4xp!THTaA?vg4_^8#L?4>hSqkovEH=aOJOZ8&x)weUN-l{F-
zXO~SJMTHM{C;44`JM1=xb>6*Afe^e_X;hy9U%PO~T4?DjdwKztmf<R-TeQqg7U8AQ
zg<G!a7<#<f?O_qEJ?F#aJqv&KMqSjz%_dpvt`CqM8k@p@mB|<ZT)TfL5C1z=S4ddm
zyRzheN*h}m{%d8I7EqOs5fC#5s3nZ-0Eiu+2;U!3=5L5jK%EvqLinE;o&O<b%)|*$
zZvQ2tlaT?Siv1nZ2@p>HF7W<A)c=>1F+kG!$NBwF89z1vhwxvejJ*=%r1k_*0|72%
zV**gG5E(LNltg*cIrKJNH0ru)*3>p%HYkbFS)cCs=Zd~OYQ>lP&DO^Z5M~U1eD<A&
z5*@1)O|jh(=lj|TtCS6!^zB`J?ZYAe%9qB*K-$TLi|;&5PF=N~E#u0!N=gqZRc=bZ
zUk&u8v~<`f&NS4#nqWPC{jPsUGrnDT`EAnD<Wu)L(R8v)k+JYf6cMB_Wz_Am@o9rv
zMb%Xb&3`b^q@||fLev$NH~d~x5>~+n3*WALS0x`Y+2&%T)Nm<3a7xG%Qb;k{;Y0q(
z&)-rK)Ss@fwANp?y3p%NBPv_9e1ASt-UTg+V5PTK=k8$VSvhlW86{KS?2nHG79UD+
zHW}%RRoM(dE$aWSv{Po}^FkLq#XMC!2z|^cYO9UWWYN@W)x3}nH%J#?N;D|-$PQ(x
z^rjHaao+}_)&E?+cqsqjvGyXEii0Y`ln#qJlof{=7ZZR?;fq>82xm4d04|w}+U9GW
zqj7|u_D#X`SRz+M6j7aI6m^aCzisod-%^Q4IsFe1p~}Emf|I8E;-h#-U8h~eht=av
z{D>p-K@qSKpzvAnk?k4x^HR)GO8?fxM`Ggi{q_9+G$K+V$*=@iO3FWzh6iH8i7<rd
z!Zcx9DgIhPN{SH~j0)HhC16MLJf=Jaf4<>5=Q?IraPRAY|DaG<FlFPwc|iNlWuI+i
z5XR2l;=xdplgc7L&WAQUdtJrZ(Aez!gzc!$TfCz;K7_j}xDSvefsM_7y)FOZh5!Gd
zUjMgu?7vX30q<M>h}Hiwd;Hh^^B=Ice@*)zkL{lnY=FT24;t!!doce+uKq{p7y#h>
ze-=6hJe2<J*`PuAhZM>Jcmn)s0Fm~O)C(wb`RC&wQ0SkJfD$bK$L-%X1ymaQLo)rt
z82yV;`iDUJrvbWuC6@uDQh-MNM>hUz1R#+AjY|qpvN`_}l>u@xAh-;uN$~H=H7fuK
z`tJZzRzTrQz{fv(`hzrO1x)?3WCoB?|2kAapXpE4xjz+I0fzw)%KvmZ{@_XhLw}p{
zHwGy{2WJ8B_W|{j{u&0X{_7V3d>zYQU&#nKoWGsSKLXT%t^V<F{lAi}nE?eP{{Gbc
zN3hyS)l3y2Tl1NT*P(?;y0>h?3oNXzYLL%2shfx(QTqKP)J;JS`Ux6`c(4MbRq6Bv
z)DW3?m@pnX1Xd0d0@aWT%%XN6Ni)1sY0c|waI-ys%d6}1)ov=<;lbhIs;cur{nCAE
zhf!1%HhL)i_@c&pF6jKzHb%tiX?=xVy76f4`4lY-Q7D_%WS|p<41IkK_7Oh1-Zip*
z-=P92)uNBzME{O$g^}~ns&{!!wA9Z9GpIPlxW$JTq+5?EylMPQ`T$egF}-6bgkn+y
z?j6Br;1kU1*sqYlGR4HG)Aai0c`>{KL!%N#fx6F6NtupHq(tAPhD)`vXOuLgD@2|`
zsNzV05+?Dv%uv33n4whKtPnm<&C1!yxpuVxQbA({;&rS~k*WOowz5KzDOD*(9j{A&
z=bMw^HgF)5t=<>Be09th^BD77z0Eh7HC)YuHJcUPy=}{S0ldm4=pPi71x$@Lwk77i
zy_y3TSK^J*&#iuaf$s!!pHf>~rG2tZB2+L65akA#?lO8sOx4VDDaYfmgAqu~HViZd
zlW&o!BX5~*@owR@`o@g)sjcG><2=TE^*`0dX^a+`aj<ZVQR-7`jhYyoBY1B)_j)W1
zaIwe-Q|<ILQ}M>X5Bf{&G6Mn(M8-1~4XnXXdgiIz;}n-R9rUh|)oR_!x2vFA{-5Kf
zp)(;pVu<8V>ZWuR7*XMx!q<70GX{qK7qWI4@|+9y<~Q@o2AS90(ljy|(gkGf+8A9~
z)?5Sgt9N<F1as)Jx9QH_FOJkL&nA;_n-e%s(=s124wEUOJq5K_gE~TI*_=<)I-Eob
zbq06jd>>1<iq45YP%<;@2{W&two++;>tMvi+0}fvNCr(+OXdgni;dkJpCZih2k*~Y
zL0rC&yX<hfa<K_IVs(6yf1ufi;31)-nM#e8(XlA4)8NwKPQHN$eF<LPMcbFLj8KMZ
z9PL<+*w2P^h#d#1!xJA>W(t-rCM?=VPxUSA7;@+PfJp6K+&gAy`_l7*N)cN7)4AWJ
zW9#m1Yy5tT7BQ{rcz)+QUUodMi&6LJ%MB#1U&p)wV+?Gmna3XD^Lw={=xaQ}S)gOc
zg!ou6%`k`aJ}8qG%F2+|@t)T7UW6Urq(_$@T)?)X+qk5SPu~|ci2e7v$LiLF(bK&&
zpq)pEYt)RND2scPhE=2ov4v-L&q!jb&{B`9<Wj6nK)qGl(7sA(+#`*zRs9qZ_!q-v
z!XjFYXjaqUtnIQ~@;{Dth}zsZB&U1~qp}|mlU|9Xj}Dtj?e)0o(6pZOu2q%T($-~F
zk?=f{nU}T;L3peOr#wAv1Kz(_YoaNtWKnTtg?hh;#w5n3#>TG9mRW(|ZL=Tn65908
zEbr5hi<T^WkC}_pNH3<PqUW{)y6B~aP=)sn^_6t<RJ8wQP8BE4w<bJ3;<!9q3ECp%
z{8Ja91mVIrxvF=dlOt~k_VeEM9Oc^J4fV74A+y{(7|1lN?4JIfK5^gmC#z+gTcu#6
zv)^cMQ=0ctz{|!q<U`g))yOdf8ieKpaXM10D*x(Hoo8h-z+x$@>k#?+MN*aeXqf}?
z4n)(x$uFoasBKqjI#hT3Vzo#CwNu<*<-u+&&-Lj5!Sh0^BhG7<Jy_!*D`sDo4nji{
zSU~wiuqL5>GEPsQ{soM_%{O)Tw3_Dfq@MsT#q06CP7n7<@hw7VIT~y$;fqWOnIb;9
zFWj~|(tM!jIcR0Nhu>wtOpCl@?T5ZsExdJ4<k1)q1_+%yx(x^=C|O_`(q>R22+=j@
zvxEIfO-)PXK?D4c&*MZ3TAe)JlOYFpSGew75dPa%%nIXNW<iWilI~rt6JU)VHPY+x
z!D_8{JRCvR`J&8dp-v*Ts=JZ|(Mi$cs0HnGrR?5sgtuh}4{geUOQdW95DlnH?0HhO
zV2xMAM1#vA6L9=w{+Tt*GU&yMl0tcJJs|t5DM<ls4Ws_*cOs?R)3yeWS|$EBNBWfa
zLPbG1on!rr(N8TwIEJ+X;z=>A*+?#gNJp`VR~7P>&S<ykgY@_8&AA^VQ~dp#dHFxy
z24+4nzQFNUV-6hWQ{#t`GjuLT<wc=CVXG6Lc9wfmcYHtliC06tsB%HQ^yG(AalX8g
zE4WLpf$F@b2gQ+fXBopLzLv*rX4%1Yl9w#@-qW?wgc?dtb>%^03O{%$yOJH3fQg5L
zS1t|%Y`Ev#)!clqS#4B3xiu*NwvTvhq~gX|t^V7|$hD=^VOhUr#B723vwJO?K;ct%
z`%dS1{YK2vOLhkG4brv9-m`eJx%adH#}Z5>F}gUqco6KYmv5+b9!80aouaD8%9P*f
zT7Hy9rLV(;<Kk=^8TG0)M`BZ2;e*dH+oCmmnEXk>Iq{APou}g6db|#ZN-;32RZ#3*
z(2p<4Qqx**JKrvY_aiRuidmVzOMv6p)X+6$($B7{Y6{jSUDe)2SMv=ns+i<A<afq3
zkRQiff(BYry-Jt?X$;HoVTOrZ?uD73`h>u!uFXQAAWF^dYg0<7i$XXr@A+gz4Tz_t
z^qMQ7*9d_{(Pl|!Z4;HDyLP+>Dagvl4(*S}?|~bb>jUk!%#a~aT>~d0f^`UOSLT8e
zKE8CPXFDa*BF>0RUa-yA*L=7>YiCPhu7>LmUndfh!q4tc>ZDj`aq=vHM?ps4p*xvk
zQ(i0U8sXe`%*FG)3N=C<qCg1kp91l!P&Wgj)|>2E!rS|r%p3F&zofm}nNgZiLWRac
z0ZBF&&^net4((y}iMGTj;{<<?r&E5bApM2|>%CnZ=TTZX(?!{SRP+4Akwt9j7qowi
zZKw`T{3Jl241YtGb<-+BOat``;;NtT29cU`XWMW&d_wb0>!#_7zu=z34YB&XpV+_S
zf;=NOYsxES9KL_a?cD^4j<6Qzn%l5zn0?0Ml5QFDyNM^4Cl^a|=`DoprumWSQHy&K
z!pnBkNgBrJ988m<)vyK_L0?!;C7XuTes&K}ib`w6Skc(QS`HL+kcq-0!sE^&g*pDm
zGgLOW&BuQ02JWn+_h4nc&NA;n|C33s873^B39P{r%E5Zj*RQ2qgg~y7<KVuO_!P4|
z&EdPea}EH<zeDTulLPlnP)vuAH>xZ0-t-i**Q2R|RNQ1vv|WuZc@~`)rZ)`IH4HZ|
zJIOdSS8Jy}X&(&vgcy*`;WyBg-)cswCNXCGtd&hT*{u^Ecc@#r6(qkJ&ty#LO-rpx
z7n*vUTJUGil=qbP6l)UOc&BtND-pMPyJ*hBpy1^pO1<KFOrz2G{3`<x#CS0@(gsDN
zY+Zs~m+o2)vOPT+<2vD0pwmy&PQN!!&v&J`YODQr8Y8vQt|eh6;pXD5AnZadLsa~o
zlz@?dp=hkEkk*O5LMQml4~1KH1`vp%ih3t|GJUCJZNF3lXLFh5lJS4Fk=@KnODMlS
zGWV46^!Ai6oV3PP1iIGKGcOyDGpYezOtQ8VV}t=C)_zocJXZ5eS*M=UEk1R1j$TQ;
ztX*np!6Ml8R8v_-x@^P2)1sYLI=!j>VovYm(gcgTf@k~vR$R3*3H&FwrK!_-G4_Gv
zWFRt0)Yki#Y!qG%CzB+womeRJNtf(CAO0{+;oXBwpwWA8{O3c{uI@yhpJL#y3yN$R
zyT)}B)5u%MVfNr)bVgOJbh<EW3!BI`5(Uh^KWWJ`ahzGtW=q?1V7P9bluR#cbc5$f
zSD`w-9tD(XDrG6cL|)Yts>nX>CajY$*fPJ`$|tDZb9~3a7;^9w(p8Pe&UGmqg2J%%
z)j|Tg_^NGZ-7wE}(5mcCXy;HU)>($PnoHW<1wJu=V#%vGXzq_>hvLgcsg741rBGN(
zS@0In_Dw6+@+Ypv`vC;BMDtuYhO?d^JpzsaXZUO2@+=s0Kw}LEdt-KTwn_r?@99br
zdKY^&uXubTF>ejlRb~>~N0-9Pu^u<wE#3eVeH?64r@D;BkTbLc%}Sd~#xII!W-Z6O
z%em2ymJy#3)JFVT^=_v2FPQ9jFdoCGmL%p?^P6=PL9@KSLrsNIJ!#IUNpo5)7N-hv
zW#8Ja$32qUYBj0>0GU*|=$XXk>!z6qxoZ@>$FE&I-;5;P?o#)1xE*n$1b2Ez9TPYx
zY`+|$K2nf)mnfiu8^8F`5AXU3`3da;o6fOQKN(^oe-?#DiNggmKynvx363o?XUYbU
zg}9Hj)6_8MF_v)-<gBML<G^ssCNBt}unZ`JV|=|tK0KTFi4JuP?8`_#O8fP6Y`i)u
zE0j{U610E-Yg)tGJ|XZ5B<)?I6#5dA5_9|t(#G0~zJ1#Jxng`_HoBo60h`r3DJLTA
zS{7CcZUaLye}<p9;!7yO5zCN*Jt0Eo5_*$r3K!bDtngKr$Ip$S=Zx5A4r{cJ9Xq$I
zKxS7nLvP4tRR!f9LccTv69*{=sYXtvpVmpi%W7$H{70V26ytomUfhEOyL9#4JTC&S
zPXm_D5e{lZELt~Z?Sx_M&e?HA&-98n2DF=g?8x|tHgza1Rs<j&HSG|JtgvuU+Eu26
zRUQU!9tQ?f=SM^wfmMAEX<o%TSG-9_rK!6%c9cVPyoF&CNdaMJAmD3MvI$ufyqON@
z)0uI}mqT{-M=l5(5a<uvMG5YvX{4(Y+o1TdAACovOl56kE|^8|ijFSb_P=}>c;FTq
z0)VGwtlzf+57aCnWFzMxoMuN}RN}ko+7}G;+JeZ=JGHe}II8mK*8Pc9TV$R~Bxd*B
z!-^<dN=USk=^F*>ttg$%5?CbZ#5RE)VOC|?*pf0P`BUXm<Xl^Tv5_Qt%2$vn#HTis
zJL%aO=_PU8HNo3;RpPqks}dmOLMa&woJr-C$QF7<o@yIE#e}Tmov<y}f`cLA^2aYu
z?aH)ydhvRp%P>*a`f!9C<B-YQT$V7W2uVI3{O)0xd85!Wli3HfgT`#H74JzEy(ll4
z5@q)-m=ZTUXn0ZTkerLPxwO34K7loVhH+GCQuBW>{XzjI@+0;W(l1n8Ih$0P>l2tQ
z1u2HRzKFiQHn7I#CQ<jW)oA$z2Is@oY5+SXa^-sTLLYo2P`&T%*&VFH8)kKRTdTtq
z9$C%$K8IX}JVw+&@lGvx7n)MhLcAvXWEhACKP5iwuU|qr!oIwcwu~BO4*_;DaG`dQ
z19}fM(RE)7^pPOw7j?;3$W=%sL9wCHrqbT+N>C<KF3B?da|647GV|6sL*%*~Oe;6@
znhV6P<S6wX02<6;b0w*-%o*#_^keDt`(Y#=hx@}cqiF&8a^TV_{_!f}x8b1v8i9Dx
zaNGDR7hKYQ6(Zarrbs_ndqg;nT~L%$7|*PA=p`-TR=D!~i<iL+44zx0Ni4_eC(vO_
zOa_M6#l@ijl(%H89$9p{(FdkDLsYLdM}T>`O9*Ra=%NFVM;zj=n7%jI8&Fdr6l36%
zvjg#+|8FS^^dVcLaKW0iW$_TZ-KJ%0S<4RKYSCMuS7|O#8kAoUgTI6YK?Cq&P4W%O
zK}laF=f-kizZ+qgI>0!{9x4*fsr_n>F2`X%#jP%yjrIO%wa4dP`U*@=U;87el$PR~
zzL$#20H!Wq*~hpb=qZ{;b36Emffq&eIR2fh<bt%Uw+o#7bQ_7&|EldPz^dw+Mnymx
z1SwG@qy-L#6zT4eZmC0az(bdm(nv{3cMDR|B}hq^NH@{~0+M$h@KxXX{_pwkz0bqm
z?6uacS+i=+-ZQgiLMn4qIp?YRZK&R-q<@*zrm>qbAU1hB*B92h;j(jQQkip?Y9-F!
zNyH>Cyvl>3RO792XV(iF<z2b#1G##Sqk@H*xVVrzGXk!4LyO-$5g$Ui=Ke1A0|W85
zf5}t+Ct<vXys)^4(l22=%R?q%8ym}i6UM`%US0Y8<NyRc|93t=e;Qi@p$&h@;(?et
zzsuq;g+qUq9R9b))*OKQzMoQ|-y-^XylRHdv*3#zen;=S9>}be*y?EZA{-SzP|O_J
zY}h)0OqP9CRH{Fqk!&KiT!{k{FW`OIUT<yd?NjF&KQ`S>df9v26N=ipuYFp+Tk4_s
zHm5T%o3c*5mW<je*kp6F$v2cCz0<2698F(~QVX2*84^=&T$>4IeH6278Myx5kLMUw
zV00YTfHt;odS25b*WGTQrrId7<+$0kWvqVTNl~WinA3F%d8RZJ6YXL$aDQd;e8+IV
z?oln;guZ>5amMtCBFWUG%J_63%Hxmrn)Y0~DQ)B8OPn^v))Fhx+$zn+E#h~yt!kIo
zipYEhv@`PPdm^y5AAaVTkkL))_|9@qHb-`-S?{w+d2e5<IP^5Sa-_6&x_i#DZ%gY|
zFrUOtW(GOyB%<NPWy$7Nzos0k((+<+L2=IlyN%CeUXDT+s+k^nN6}p8!U_k%iR}w{
zIk#8Q%CGeZ$@B;31o+0Jb||+9f$6SOrxH^MqDlH7!QFl$18zTG`RFG#J@Rq5zD|eO
z!`rqaJA3=RY!jnjyq9Lk*7Yf){adR<87YE|#MP*ahKOIwcV3aB1ybS3J^&X9I=$Gs
z&f!o%f%bw5Ne5pCDN<2VxJkBI7JiWfnT4r2m`Ik$mI%P*eW5nj7~LUD!4?>(Buf>c
z8T66LL=x=Vib-!09!JrJiI+^zOU<k7oV}VnXVNg0uvO_uyT6A|5S#s0qJzxHQ+^VS
z9?K(nzb1c*`Qjif;QZ;*s`c$t<TyVq?Z5B1%UGQMQI`E*T8aPbX8b2P`#)KV14hh$
zu@t|IANn6@`!_lJzY_CTIr}BQxiSmCl!gC)G7ASxp8t@u!==bvfR+yM><K)v0a9l8
z1&G-%?Xm&6_aA}4;QxP*0QNsU!{Ol<AY6wF@BfGgcKIK$^o9on;{bGu{}c@j9$Oq9
z3hW`^`1Mi}|L<|YIDt@uaFpDD82x7xm$uyhbx<%?p!&Z>1cQhF`nPak@Hk+X7u%&;
z;!5xjkO2=s1~)4Q{4&D@^l+sB7_Q5_im3)qT!v2L0>*|D5VixM)7XLV(f}wBVvGam
zDg1f}24ZOg*Y76jf7%-V8`(Z5JgC`EF8)ilU!>`zp*&tG0H)z)xFs<!l!Gd3XTK2?
zWG$+4_deyIkSi!Hu3t#730F7fF@88@D#GKN;n>$H$r;kq=u_D-2<=U_J|^*;>^M3f
z0zTGxW@-;lrpZ9x8<Y>V*58fkS)Pr`KO7#;!}ot5Z)r`z`RT1)M1*$7=v21+vr>ZY
z*wvGhlicY|<BD+wLAj{2IK!2v8a?!dgt3t9`R^8!N)>rz3(8%60?X8%yc{uHVd)PM
z*gBsEjTVOGkgA-#(&^PsJ*ZymyjuaHLjdnThK#L-tqQ~qY37&duM2NRcdHj_m%_ex
z5q#D@y?%C9XZ+cO)ibcMSUok?Dz7r0*5&3r=-U8|X<kBWkJR#VKI!7h8qEmyzWva&
z0n{SfdUl&A*Lrnnewo&8NUu~>TYGsqd-FCiamlms(kAk-taGI>$d+-4RlG4Q+$vrg
z8rXdAei^kN&pRj=>r?|f3%laP)RX+pHNw?J>If#)=5L2ssOa`lTL-kur{~DWp3d*u
zMrOvXa|#lkq^j!^i}7E#AmK<-%9zf0J}kTDVL_NBADBCiFJi7}ms>rIGVDJrG2A{(
zJDf9YI=nJWH0-`6V}Vc+ejKDF%9I_WD3b>nhFKt21P!LCDO=`Nykf3+sik0@x}R4x
z%xS>`5!_5|NOM#&v><{=Zf1w%^1T9A#ImNz(zA6XmZmb&Kj?a0ns)HmWf*cUe-3Rx
zo`GpCmLm?OXNxHW&nev{P-E+|La%-;?O-Q*mrb5wg=PiU)VK>pSze*Kd1PTa`4J>v
zzy{q0qd2xPjO=3{E)rVTmwNlD^7tV~*(kX?$;@%*bNhTl896y}7sa<ROcr~N{6Aw?
zU{`oH<QyjN*E3Tn998r``6Mu;Z?YzP+Awu8URG-Qt-x;hT|P=4Z7xpma=>-bKGQpW
z*a>GiCe#bv#w>*&ghPS2zT7E&Qt?qdE9Z%^cBUWsI1ay|Sn@mXq>(<mFS_URX#5?z
z{P&#^X7)0aBFmN18<M`?vxjQl*Y5eRROt4jo)LN^G!gP%&pK#^g0c0n)Jz$NidJv=
z&r0erf+$=187L>mcm}Z&n`quwf6DokLz#{ZI92_QGPX8JBAnpG>^HdG6}9SreT8mJ
z1WJyOLbbi&y)_{~l)+stMAL_Yaquj=(SN#i4LrMST{c9Q&idwt#g}!WHHK~_>qL&H
zal0}%&?8}GePlc_@sE`h>xvxPtwm}DYt48*A@+{gCJ@edwEOC?vl4b&p#(lZ<X(<j
zQ1#lpc2Yi%Wk#4R@3iy2mrRl`u5QciK{;VKcWS>$IkB9C1;bN(-j1Sn+Bod<7aN7a
zn!D2j7Ueg>?~#*<lq2*B4}IA@MX%k)Cb~1DqxZb=S{<fsE}C9Qj#2uxBEjY{ZpM{I
zPO>=2Y`crC<>jlQd4vrCi|#HuFi}yRa+O=%Z*|B+Uj_$AzKpV@fVIcci05Qc({yu0
z=#aPC1X8Gfd#8%+n|@;-Ex9eARJxy$co<nD8jJU_2)Wt<+SA*tjcroGw<oELds9QS
zANnd78-(vc!%yUmY!zccRyDnlHlvu2fo$yx@6jZKWkS4&1A;F&BR}+=yCH%o4kUT0
z1Vs&XNT`J7zica+8-)ARiwGN{Hs6e`5DB|2nAkxf73}FN)nl@ABSk|hF4>nS`<e~H
z=~5Q59HH1W1b0O6{*-iPFlGfmt+NuZ!lw5~$#S?sxm~VzPAq|jkA#7lscdIjEeqgR
zQ7aPt%g4%=Q`g)V!ZVEbwNS0CcFrnqV(*eJzqCm8=43Q(&vaLhof3M4wl6RA{_Sq7
z55ATQCI+(bs*wP?c)=s?r4hOnZlbo3B2&U9W@WVH<kN%JceL2FFLL~8#IuHzAINui
z*1@=oFmI%4Vy>InhC0T&)W%tH?(C$2JE>Uo1rS%?S2hLnxT0bs)(51#-aSm|O{A}W
z9UB#&93A~S`IUPGRXzIJwfe?g0^G;dXiE%=iV2TMJPcSEj_<ho+#J$V`h+>9NDAxW
z=wloElE_r}LKk}AY9EiQ@T|7s0Jk$o3B=;7`;u^Z08QeM?Cm4j?N!2IVWA@zkk<aA
z5rF}mQ^7uicl@hOZ?wiH?1wq0d#us>gv|VEK9)w7Miy$BWzUUo@$f8`vpgWSRw}U!
z9J5Ha9sgo>Yft_wL9HV$S&HHt6tFZxQ$q4}8^np`S)xyZJBV6iKG`OfvL6KpUdA}&
z3%Jz1y~Q08^h~Ic()7munXHxY`-S`1WY;`Oa`zjBYVYh?G*+*CBi?EW-PrBg?J7)I
z+*Na~cgk`rni5mCMJzTGxE(6{SSd$vA3KaL$6>b&ioZ0BSky69r6~j1Fh$n-^nu<p
z&@kC<+=W9{%zBi`%wKzeE0ZIVCP#))A9Yy?C4s8dT38tu1wEB+ON;7EaMq=Fp`54C
z+eW95>){saH0lF{zMl3WvbYr#e2Hk>oKYRRqHKuS9Yn&XZz7c9gB1cE&}-qjIG=f!
zR-I<Ryo_iCpA{D8XP~UMt8+0A;3Z?3q#Qi$SHG__+_M|kr9Ps|fo1Ox8=+ZbkmPkl
zmyZljZlUe0cVVl%!I&j+<i+13M)jcj0`J;V-@cooeb_nM01vyQPb&5LG8P`vx3r}<
z>bk|BbMk~|_$NmuMtFvC3k5H-Au=pX$1bK`5nxTDDA@_tSk~BeiXLNcOIpH*b};Wy
zEyv}-bf-2mexs7UX8n_fAZ#(~EMeuxD<vW_`+I1UomQn>qbHx!zDp@ACEe>uX2_6|
z=u=PiyeT0dF;HC9!5#3$-wncnNWB^Fv4h#KGyY)W#+dYp)tz~pg0o(=t;)1)@@xZ}
z(yYmYAoFTZ#PSw77szIFpB2j)b(ODr5LN8K<okmCyr5i)k-G$}>mIJtp@dihL;26U
zONTx390Q^HGB%}C`8jzE!l3@|M7JCQj*NC*Fh4QpRdt{&?k~O{mBqA9dK`WG?!Xe@
z9X<Mx)?(6%n1%z|X2SucQP3dGs45VgT5K{WIWo9bjb*EO8U$Wy7u*;SL8}B@Ragt+
zgfLG`z6A~};MDKX)fSb0Kc1RU2h5zjED?x4Lg)vTPnz*JGVgItAK<E$5FKP|5_}>W
zSCK*Eh!&P|?HtJ;#5kPgPG#Y1ZO1S`v$CJbYILg=B_e#E{-DOGa>)8Pj8A-ycC}t_
zLefatkwE!cby_^sJW^C)K7DFNeA@{vri1dV8RL4dzfN!!o;qq9J3cL8wr*!#jN3T_
zJ;?}_?xyxQ6UE(1jRlFOl@G1Y1$#q-QBqOHGYXpCr<6QlnR$`?jc-L*puPPmemSLw
z-n9OQo}p^d<jE}h{${mrPWN-IJ65X$AXpkfUpidf5s%Xbin}tKX!<r1!Tm?GRm53)
z+k!?~*8EJzS$V5USmpgYu3vPg_Sgy2WV5eJF9f|e$93a7N(Xh$_1}Ly^Px>bOkDas
zUnb{cKhaKn`|vRKuT0q_42IG^0m*A;M0Kb`JSZ*`-IF3Z*q@^w?>#1Y5{Em~>F5C4
zYFsKEAlV)IZca_Kdc>cs?M0<SLL0irzkzu}|Mm8#wNZRmoSM}2(|XGuf+D%^n}Y9J
ziu!LjJ+glOP8%A>=$BQ}>2XN7u=$MP0@OQX4X&!q>O13jB{J7JIaI!5)8AED0h?|x
z5i71nwXE~<f5Aj5hr|4>S;l$uqP1}9Xf9@c2}x_MDlTlk-Gn79RZPiT#qilPS}UI(
zidr2;Qn`?`%5aJ&rPl6gn63k0Wc021y4y<)1Nx*VJ4B~5tJfwnjB*vcPZUvKcMG89
zkbk8M-Kcp*<DN#XIpDTQ2Nix<-*g`JMG1Om1{}1NR^zDSKR>|OY=cMjnrDC$nM8>j
z?aU*tyEo9FChflNW5Eo}jWR^@*`@2TbUDuc7pr-WRSzsnvxc7RVis>nj$MSkOn=db
zTGEa_e6z)6kC2Zd7$(248Rv`(ZBkHtpZ@f#_C7J+V`^iyow$e3_l~I#D?W6CzruFS
z48jdJj9Ta_imNts$^#5!V(EjCZ!o`^U@vM050$-osq^0Qz>>dT7M*J4oUe5N&ru=Y
zMMiW@HA1)PjoA3=;EI=L^McohScH(^CkQu*@-w)3=fqd<F8G};Z@idtcRvq6Y$A37
z?>t}K&nzYLA2({1;TgcEIXAS@Q}L$IYP@5Oe_xkGfs6>JtnWxFSyTMGrCGTkcMg&B
zyMZ7cGZzf4Xji7Z*^ks(+^oDbAJ~vI5E~aa-<O?^wpGbHn_ORGIZiff-0^7+-B56@
zM4x53c5=%?<y=%6HzaT@w+Yp!=979-V)nZYwRth<%{>xVY3Lz`qaKOR<0_9TN@m(@
z_SCLCN2l0l16rk&3AzVXnLYbwWBD<Qk<f+Fi3C@jCkT9%Ru(*y`}Ay#>@r->i&BXi
z-cNjj6vVeXe!5{I#+)?GG>5*LmiC~&qU3GiH<JdRLZcg+?U9@+g6SUk91-#>L@F(l
zetvtZZDTUbqe2*y5u=*1p9tbjH)zsa-F0|_q>pmZxLF@?X@W*t$<nUx<9e32yH>|2
z%D<Yjk!V-V^W73yC{-B0g*mxf45lw_Fnnzyihq}cC`#Rof^74{;S55haDCl*V%o$`
z%;<nd?FUFTO|t*X442c09Qhh8;irl?C*=u4>>-N^)vIft?)ntb=QbzHyEU^_TfAzB
zVqb|dz)r4w<9*UZY{ixsq$5L$f$iu=@WF)ik;ii=YOyVaXy&_aC7cGoqS#hB`&RU6
zk%{RhT3Zwn)(71T-KFatO9gyM6U_c8F^O*#KF4l2FtaCNKO(#D|DgYCY9zkf+-9M^
z+IXALNXF#b>cYsyIgs|i;7p*q1~-4#OPsUh`W6(SBBPYtc^n5UyMZvs$5^+N3*BG`
ztwMYJrpjP9iz3S+?aZWp1g72BHLq)efoN|gq+@hRNGT*=-1G7nmh$O@izVVd`IOvf
zgp$~p9QLBT5`hKZZEr1*it?G+VJyk+%O`d!<yno#zddvZ(vo~bVD}<fFF<d-3Ee$x
zC1^#8s9mcqR%}e1eaM;9vIal=ebo{DX~45tg0%Q?U(<ZNq(P_U!qf=C@8b`49cAlq
zV-`+$X-~N$DzZHNTVpK9@6A#=qCa|mEP!YgaO*9abgQ!W-Q%Z~ur81nuOi}hnAEGt
z@}xj!w%iciGlCud=5AU0g><v1=sIMxfw2IauK^!259yWHt~Kli8Mg_o8m~`q%aLq=
zBWsN$GMn<>;-qQazM=1R>UHRLL3KUr^)Bvsky$$(uTO*&#%s?fXC`E<WOyU#NTY#Z
z#BRw40>}G=de#QI$5<_&V+&==Cy|^#7}R|tYC!U}h;ppL!CH|94Wvs-2{7EM<5<<r
z49uB#!hAJT8xS{Co+qecxm4|$kO2t6vv=5{nGSp%@o%N`vpjE6t+X(VGs*3xFYGIp
zw<Omb!dGo<lpb77w3Kp1EMU>bAyF+P3ZQIn6Up+yD-N<PP5X`}#+LpWqUKt_uG`E7
z^j;j#yRM2n!v{$vYZ{%2Vp`tfSf4h>EFgF|Jj6xy+iI2x*0AWP<f`2iZ#QqZip(eE
z7*<nl(+T}S6K{iTmpqU7O{{U<>*?V7H{mb0=Qi!isQ8HM9cMCDv^<5LuzU04ofU%F
zY4pNmj$R@=eo-?<oGY2>Zt3G6j9h%x$=xk;P@Y@alunt73l;47zH7OEN@Y^y(HA^c
zfg@2KGzU8p1CeNBY{=<ua;T;fm@VmwK6v<|UT4tage5nkzm6ciL~@AOD)Y_gJx2pV
zeWY5}9_fv)gQ;*_KR>^a&%($3BGdfp1vl+9a7_5n5ZQz3yJrzkUuAbqIL#s8C!|ct
zIS!&nhOAk4h&F#uOF2PIXfGUR*iy<rEL&@=jL7x@d7Um6Vn}@nyiGeqK?K8D5*r9Z
zifr|YVohyi{d7w3v}liyef#MdpYeG0dh&6Wp1<DNgqf)gENpefk0;j$;gBkYQ9x^7
zS;S1RP+*+>JFWDPq|up-Zy9^*t-H<qV|&ZzL9=VV-xP5;Odpg&mlp|z2s9I~OVMm5
zEhWr*3d1OOoT>UBxu<_%V`O@Q5vh=P#ti0S1XW%WPu;`i)(Nf0dmW1zO&djVyY!`=
zd};=d3yuC$Te7#DpKpd!bFU-p2U%0y>&L}H<#(NxbT2ISCv!1dRv({CR3HBox1yi^
z;<kH|OS7jI?qV+TV87x!x(nZDGtz?B8)lFbvw+c`R1#(rl7G_OokuPtt8<j*gM^UN
zNVZA+HM-?l$7J`n&si@D{Q9w)1vW@x%wPL@l=$BdB^iyX5|cola!ESmcp>dTX(|0W
z`rVqYjeT;!`Dhy_*k+n}E?6(+-bIK@%WEh(`^5vtHDfoCeiWM#iD@hH4DoZ~Zzk?!
z@9#~e^4>4OK!puX;*@4KVU&OLQfGJ&%8Xc(Q@H@vOskGEVHJ5O;-M2WK#WjF)Hy&?
z?tq|=QJQ8V$CYRH4R`KLf$U(H#w!oKccFc<og_>OVF6=+Fx~j2&yavDP2d5xo#}eI
ziX-;W)8>Sa<%X!Fxc0hV%#^1r=7S7WXph?7802N+Zba2+BaiWPwo%=&LY&(8G8!r!
zY`|J~0y{aAI?Nl!rpQ2>=`r{Wb8o#xLaV|!#Ew`??&Nm7MU&O`Jh~2>Qhe}xo{yai
zsxFmMyHT84)zh@P{5pn$2&<qiWG?O}P7P2BE~*d+g+uw%N-VIlbrL2UQ`ctKCKRbM
zGV&1osFm^Ecy8I}<*(gD6*wvU1yePNvw=#f-n|0oN}!s1-?W8lcD03q-o&bXpH?BD
zG7^q=+$zt_hn-a&-waC3l`vjd%GY-%C+Aq*O>2ElC7BjcAvpX1)bRT84eD?*n!~!;
zs6~X{64&ixhv&Goa*>~(WAEEVyqao{^O|~w^B_?7d7J1|NmoT5t=Iw$d(*d&AVZRN
z%+MZf8>&y9<7+|O{ZD4il=P#5m%dlOV7Oy1693`_;V8bKtgx(m;YSg?W%1YM-9p6V
z^eGB=`PV5-XVHlG3{1f|$P+JLAMqsgpCv>SDulng)?m|kae#E2_|A~G-sB4YemYa;
zF<rzB#1{iDtlUYqkwmrw98<?3cJt?KU{j;sd(z%2RWWuA2y3Y{`3zq2UzW#BCaA{4
znPq08HwH0nG%D^@;d#zYlt1^WI%V0pCef&0`u**-0{is>`(YEkU>jv)#5!M;5f*8y
zobYcRpFVuoCL_At#xj=1nZ@jn?bfu1u^$?9jVbVBb=nK;3>WLn&iE{U(;{4)S^VaF
z8SEYgwd(adA4&Xk6<S~AqLUVy%HEF*Y--XvRViucsFvA7U^h|0A52%wc{b|I#7wkN
z?r`F#qLaiJO@$(65B^#`FebddDz;j<7AuwoPJ{7{I+I$pGV?YTi}cLbna@pi&RI#4
z-SYe<=wZvlB%bVALbN^C$tg}k1`8`dwTZ?aj4=ylPIZx9pr{H?bk-zYSyLcze|Hh0
zosTsY8rDFwt+9Yrg5kE~vyY=e8rknDXpjT=^tTFb>1SRnyDJ%7=qn)lsQcP&BeKXk
z0}z6TPwVq)hms*aM=vjdPknd!$vnZ!L1Tv|W|f`~>Pf=mVPQGgAWoJR+H1x)ZM(SQ
z`HT9Z$ZF!q{W&|#8=Qk}Y1|;BJNrA#5~q40U~he-&DAZBUTQ2yS02440Zf1Lb2_<4
z$!uoC3|~DFHbg%+DSxxb2^&yANNH1Wi7KziCm7hI<2ClFZux|njo6`NJ%FGGM1<f8
zmHVFbp%67R{zAxz^Wx=)QEwN87V@;Wqi7_Sg;hhxw4rC9VE$H0t^88|l{qF}n^LxO
z&&oi&EnCGFm4hA9C4-TJ5Uh*X8K3bT&kJo9E$?X&>bv&%(YBp)d^e&~@dlYt8-0a(
z4?#CNVtIJHIEJ0URgqy_>#P^9gf()J-<Bn^(Z7+Qgn#kMO-T}es2$JMlz(6TPVzBP
zOYb$UeP$wNIap=xlgL$p=FbzTmJ4@7IgaO{SvAs;7Vjr7f)`shT`?-2Thcr(ygM$V
zxPO1>*#M6t!^y2ZLu2ZT#UtkI6b{|K_T8M1YiNj>(Sr7OlD*B+a}Vn3G}vyH&X?iz
z*I(B(-05UVbttk|KTjJK6_Vc0{QN9tbd`1r|FNI?#U1PI^I<}<!&B8etQVw9*~wBi
z@i^pV2m=LgaZraNA_#6NqG${`xF?>xdaS4R-9YP(nlioG&9*UiF}}vUEoucH1(Xy_
zn$Q&L$1g#okaPR#=i!3a)O(E~iE?~)TgD7e_4>rZ+q+eBMbS0WH!|^8m)tlR&u)#4
zAw^rHH!diJ^b8Rb-zw-2uRbS`@1sgo_xz|byf>k9QEaPshL*DevQ@e8ZRmTJ$?17d
z?D4v)rzKRy^p;83)3{Wt(TAj(COMEjjy97>h{ce6z8p%$ZKv$m?0ff97HD6Rsw=N2
z3?#x%ThT_&z!it*C=#0=wf?Sh1Oxh?|6a+jsU|HhA@xhi4`%t#N`62&^dqn*a9rj8
zm6D(BcP0Oy!+Kt7iT<qQ|8Ec6aRG{_pX#CCmHg2%HlGFY#ShO3hQu<Bg-TW+0XOl8
zb8}z$3*%SF7~<rO=-;2nou3&?MocSuElxR~P;Jy(eV>^q<(^u-lv+boat(G{MnShn
zbd4UYb{d(8sEl>WMt5M6JGOeGTze+KKVMe;4Q^~FtdeneIiE?7@XG|hT~59yk=&G}
zUZPA*A8xF|mgV-uUToV6WV`oF7OK9U{<3AdoY8Z?WouY@x~^j60))T$=15=tsGYYq
zzqfl{Ty)HUxDsQJQl`t=R0HMge%5MA6_-nds)WkdL2r}@e9dIe_Qi{wiAvR}xsf#J
zq@$))mp(h*S-WEyG~YL;0sZ(;bveJznIM*orIMrI40^cy=pOEkg|{7(*&g;28MWU=
z1E1wIe*8RD$L&7szZakLz8CF_9g?k<$(!jSW%Z(9b)t~ZG|!E|TIHS#O$6glPAGEp
zI9n@c_Z?H^wZ9xXERo^A!$B7@Z|BF7LL7X~PDPzbZ!Rt^X*U>BPTu1YBs}PY|2<PU
zl1hw9ERsr2iArRUN`{r{E<W~v7k(6r60sODeMmaJxp0fZ9PQk&t@J~VL_B${oaf5#
z>HkxbjJq18hfi`#$POpct$)&Y{vP{b>JHlxeF&yF`hDMsmJa<)7FGcF-;%Glv}y_r
z-j<kYFfx)5Vg&oOwWR+oDM46c`Q=S^ZOoR%9}gt(hqPMo6vt89(d|>iQ_N@JXEz#&
z1$>``q%?y*^vx12nC=9BW&2S~lbE2)JvTY2t~EnF?t$g;I^xqFW9bRy5q=bJHw*v4
z$K)D;5Yv@tiU)gp(k)%{@07P4e0Tarj6a_4?&aTcm>PbTQuw7I|MS~5iQ1H;so{?p
zhx}(wOK*6Vo}8O5?B5#AIuYB!h;(VG{5wPVpY{Czr47Qr@```3K>&P_{r#-p<*CCz
zZRSg`|CJ5GzY_B|8-!nH{VweU{{Pq@0EgiI%5)VCOd)pUz!78{2dF(cz~f}hjqL!%
zINR0GaKmk}lmNqoE5i(3fLFoi4n7#*e{e3S;9zfM4h0Z`#LZ!L_T*OwkwLNsz%y`m
z?&=KVWlM0b_XnoSlAq&iZf^x42bA-dz(VAJw*K--(EtVkD7<PF7#>9!#1YWqUl#s`
z>UZNRl`B+Nh9)2Y)m3Le|BULY!ytKx^G{lT!wWC{3yJV^lK>$?<Ul~ye<u;}pm$k#
zh4-)YUY7nDFJS%f2i_~_f2J3(75NSC74g65y)69;-Yd7CaC(1q^&iLKM_(?<{XzRL
zwD7_|W4#K>3n2Y5%74Xqh3AUaUwFX48O>jq<PEHWC8JGFCu(EFq-<{hv!@4+dtP>r
z4se|cwQ;h77@I<@f%&(i2iD?`A`^2{2N;Ce$iUX#+y=_b4ivFm%?X{gjWNWE$pB_#
zW^NBLvIoHE$$`_W@ByQ<1y(#W7-->YVy@DxK>5{ZU!~dL?fd|MK}-PetY9t!n1;C{
zn2Uvzg^i1oRSkduOlmGs!XboIWQCL;v9ho{gwye(EqH;5fhp9+-rfKxV+EQ4&X)eH
zBj#uTwfj*AINtg*UqT+B4-B}C{4Ix71D^Gx>u|gh4iGzgpfVU}@ZoRmnHigKz}o`4
z00a^KiNVU;)XW}g4mAa`e~c0wiwzX&4CGv`4LW#^tqb7A*2oOVyjmugnGV(l*5>dY
z|H^{^`_Tw;IU!eT1`c?c{Mi#L1AB-ulame15>ERMV(fqk0p^PVJ_kR#^D_$=AHa_2
zlIEXz@TvIGy`Om;K;Dm5e&%t~le7NlhMhgoFnqZ%*_bdvArNDrr60p?=U{DZ0CNE{
zu6hET^OiNYHn;!l`lI^^_-CH?`&nP$7X_$ywU+<<g8^^s=RN^EtioM9*c({cm;!w3
zlHp!*Vt{%4WSBqs+SO)&dx?M#102BC!Cne#4`2Mi9^vIB2VL33{$2t&P9+CPs3<>C
z6crZ&UwR0(zbaRR*_fCE4E3t}KhTNTK#k4elOSabpfxu!hrr0`?35&g=>c~WP!pR!
z62OZoFg|dK<$w(ZQniEpK`%%SVsBs!^a<D|SMv%|u!TT{E~geaDSk=(@B1A-gFi<E
z&X?(QfL#P!TsnZ$V`ccw0DiNA-|XSHNAQ~t3p;SJu*ku04)EJA0CRZK8h*2Z-;Ck6
zUpW59nwO1S!rQ@XTEcICs&rXq1kW~u-!7Yj!k-|&gUbHQxrB$olb2Ax;7xyrH-RTE
z3BCmVqeV%0_GOhz=*t%WC=>gM^Rng3dY4cxzav(JCobz<W?y#d65(YR?C3#&`I-f=
z_vq+BA~1*nu*vCU4OAc+<WA=HX5?nTDun?Xm>&3HwJ~-uf&irs29m-603*2?1ZD@U
z7IH8%n1h*xoDN<~*4z>JnTE48V9{J61Rz9#eFv~{IQwB|VFLpp$5{X?JS_kMSU~??
zr~)&vwS~Yvs2Et;0h>ipMNETP+`-C9*Z>NJuT2#*b31b2@4qMM*V=<~(Vr{taxW?v
zS}5CFlY`{RfmfIxZz^zi0KlV_J<uFT9L~5zAx6NW2iOAyY6=Vx;A@-X*O%HaL%tp@
zOPf1fglBJ_G>3IwpWk^3A`??=l3Ta(a^wmq6Lh@gskr#Is5Ks(`E>pCgwKgCY%>$4
zVww0MykjoFZflO(h3sw_E5ieuXbuJ%2p@B4X=b8Bm-rH@Q{?U|TZ!*+(RM+^jb-Vr
zw}|j1>}42;?K>GnGk8ld=_eI>@ojmx5mu3q0{GcjqL;4CVG6b0B74<Se{)IpWfx+f
zSTV0L9z;yMn%t17^>#sr&^=?Xw09c>xT(!v*LwVD@n7L|(&5o{#D={ULVG2bNi5*P
z+v;W555Z&<%|T*FYa*v#DU2I<G5H*<600Fa<a1Zi=%q9BywLz??tN|=5+VdC#|K#w
zmBBgWf#@(@YO3I<LO<0$vhX!s$bfG%wev9Xvkwbk17=L9G2%_$SPFJz0;WZ3LGD>d
zdFOMq<iL>ToJ>PWJ;*Rx*$awROxE#SV=Rl7a>4C5AEY(|JVlgvssg<D+)QQ4gD6HW
z*lS*w^n65BKFcQ+`Nx`kS|fYV0nUs5gSW?D+dDJf@!xP?&^G$$jZdWHem5u{UxuEa
zFWE}m?oNLVLPldg%H(<e?Pt!G9g<VhPu=(A+4i@BGio)CK$@qVsrRsmO}V}v`XTgY
zJ&ZPdny}+;riYodUhBLQ%d0mXa~?1K63TjPlsYMMY#W=ola*QTz3VUJx#{KRsJ+~o
zR91Q%oL-FL!mGn%v(W3moZ{7pyJs{kqq+%t<Un^cxG<8lr=<!qp`}ZylvC%LLcde8
z@2*+=Ie4i~uXAgC`&5mL$wZ(c*6S8~Z=o*cCu}t%g?s3OcsD<pXI<zGm0x?#9Z6%{
zfLlvD#^UCo>g5rEnlPrmWd0=Cr+K|#Qe8z{2>As%IsrZdor+>^s-jSuk1+BJ40IxV
zW;z|ks#IH{IUhWk*s3ihmTG-iGiFxz99I3od-V~8&D~tR2Hg1Nl4H|vb*@1-WV_Q=
zou`Eodfj>&xL%MDqMwV2pfaZVHS;9uGk~TYnhKxkvXkX|h>c7~(bm7RD1O2|BJ?dK
zwr}ZVwS2u-T|h;(HWBpFzLqzygz6(c;J_#1@k5a?@a=a*%?HMO+DUUHqs9i*4j&Rj
z`w0Ce6!^qyF|^V3Z{HYE5=CAR;M5j)@ve<vq7Fmbw9t_XI%tXEw8Z$ilkniFN`q)b
z4#jcO=yjb&8mkI#knDAedn8H439RD}9cVVoN!)`TeXu+`zUxqWKyjqR`Sdj|<7P{&
z+~~CKI#IWb+dcKh>KzW6*Yu`Fj&m3<KT9@YOy8Kx>@k!1xU$F});?zVrG}*^a1**7
zTmIZv-CbAwPNk=8gZcr&93A?)lXjy#Ex6LMfj=X(#KFEnqY*s7?#x=NIg{y8rH6BE
zq=W084`}6P_KnV7`E@Q@(wo<$%<ju$9f<qajdIP$f$I}Gp=-+1)_lY`7q&fP+QodV
z!zi<s?gv9RTh^0CeKAN}#+qXF+XtiTi8UL!o0Oc(5<FON*YVSmUwjwqvL1NFNW9Nv
z7FO-fO|4TsRjECS(9C5-AS$I-)C?85u_ZY(qj|R?IQc;n^GV~H88^?CBg|5s2dN6l
zlJMp@Z5llahS^|!XiH~0PrY-_%4T~$Uva{rq~7cCaSP031Xr<M1XCJ&+}S3&ey5J8
zx3wh&scLtu$&STIjHbso2_3!FeN-t$K}c<y7{y{R4eB7-L+Yaj-6DG@t#{imR#1=k
z##iY~`5$+W<KISHS!>8}iNV@j6a`a!{Q#OJjo@(8P*~aSwreOfoti%+5f!NH$xqoU
zYpa8*eam+}%%il<56v9gRRJk}!@ID$TWK@d;UvWqOuDVf^GSXCCQS!DjhHnU*MB5g
zDJ3P0aed(nsIl+$2u^Ptl_}MIR?6H#^8N#UOTRD1HU_M^DI29&3Ad9x*YtZ8B(fJ~
ziI|4*99VWDv5k*jPl~ge21gCO(PXE+KV*=ZhC80bM)LKYp`WJ#N{r+gE|c}5ovZ!F
zIHnbeWg|o*(`@f>EXjpbViXC9$Wc!_VMA`p!>{QPL<ox~b@M^9T(pb#M~mXLs2;``
z=SVHKkdu^mI^DELScYx1SxI)w;05b*+&{9LUukpKMDliThdq15<NXOrgAOx_@}Z;~
z6zq~fqSz%|w#-b{9WKBQ*)X|Pu1c9>Q1Q0Rf&FF>)`$%wn%6wy4GS&>pC=cGp6j8a
z3@#aL$Rj$5NxEb>p5r|G^|&hf*B4^y4fxaM%B^i4&d5#}P6W6oRkp<pd^o*bndqKU
z(e1l8UZ=M1yG2y?rXV(y!L()s6gySs3mqNFPb4;qsE`{ge(8bnj=<TYk~!Y6rbeD&
z@ms|Ur<aaqg1&Y9cqq-Djcy=I;r?V#Q41^o6g|d???O!7S*~CA_z5*V#-(z`rOx&r
z_#cgXJ_|_Ml=nZ{{b2pD4M(gx&t7VYmp4|8xkzO{d`xvflrsUlb9)^H)@Xizm(?G$
zJu9TOJujeXkV5fuRp6oCS&h@q&8N@(o8Q?+!+NbWrWYu0yxUDChzOmyIer%;&m2>c
z+}8^+wIt!G^gfi=@SeU8nzcOKL~CfCo@+=s>*fz%Xh?@GRq@Ry32xb>`P{xWAWUz_
z<_7wF93yMIGha{~hq<?>O0)G1@c|@`3`vcmt<fLDkims^zD?=d1uH@YaSd(k_5_#d
zp}PCt=CtKSr=(udL?xHdySSN6SGR@ZI=k9w)IhfDh2W%3>y(=2uTWU!$g^<I1(qj!
zbybcGOPh7Jc`307=VU%q)DC}_b^)&7{`OX)p#(93zkVA-oPqazxVt#GXC#oaxhv%I
z8Ke$mg8>l@+-W{Y$;JjQ4Ega|fBDi522O5Veltjk!ao$)M7UW*h1ocS*|{FFvT%!X
zvpnP!;p7$*ViN}Hu|DMgZ$N;S{?khl2OwwoC+RimV7JVx$HeVl_debxB~`w|F;sfD
zd&107xi%r2nyCg2Ir=faZkpKuB||9cB;Zam0H2+J+HB+z<!V?79SftDs5NVa+xWH{
zCt30ykLmQv$GwJLZZ1=PyASbUrh<WNE9g3{)%Hwe9+)Fqd+)K*SOWxozo!L;y95X)
zlqQE~3DtaGb>oBNG8=<CSW+By#@~MphY0I*Ya7HP?~&hu8Sqn)<LstgBNU`yd`?N{
zCssl%t&hg|LP=8iHly#7A~t~{nyMmJMalIwec$TcjJfOwX#+a$YMv&u&<?d`i@m9*
zkQ{O0$`7Z>IBC*CSi=bl(Q6&Efa4W#LGmDL_V`JNVZMC6FXb7rY<NAIKUWKl8Eb8c
zfH6})_WH+1ZX%UfS_W)Z-bMXZ{1KQEmG7}NY&fUIGpIt3&7aL^8rKlew0*P&y9v5?
z`;E<;MjUxao#y$23;`cacerWY3EW{O#p0?ELJKG9vY<NBQPxM4gI)TlH0J!dQ`;W%
z?G*Pe<WF50&-sZi>=C!){E`-*fucP&mxDKNa+in9;8t%^epOkV!Y4qayok@;z|A8{
zUrzosLT6!Q$Yo=>&G9xYFr@>Jv|c5p?F-FQKRfk%0cAXGBi-q4vO0})$5kIfVFRAe
z-@TsSgW|r|H7rMn>;B~OHM9E+Wj@^^^zZ8(_#lVbI|H8==&bBuu7?lNsj0;j#L@o`
D4o3-w

literal 0
HcmV?d00001

-- 
GitLab


From fa1f4637c9c95bd89936140c5af297c3c787be96 Mon Sep 17 00:00:00 2001
From: Samuel Van Stroud <sam.van.stroud@cern.ch>
Date: Tue, 12 Mar 2024 13:09:47 +0100
Subject: [PATCH 05/30] Paper fix

---
 .gitlab/.ci-test.yaml | 2 +-
 paper/paper.md        | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/.gitlab/.ci-test.yaml b/.gitlab/.ci-test.yaml
index d1beb976..9bc83da6 100644
--- a/.gitlab/.ci-test.yaml
+++ b/.gitlab/.ci-test.yaml
@@ -8,7 +8,7 @@ variables:
   artifacts:
     paths: [.coverage*]
   rules:
-    - changes: ["*", "salt/**/*.py"]
+    - changes: ["pyproject.toml", "requirements.txt", "salt/**/*.py", "**/*.yaml"]
 
 # --------------------------- UNIT TESTS ---------------------------
 unit-tests:
diff --git a/paper/paper.md b/paper/paper.md
index bc1a8e5b..9b1005ed 100644
--- a/paper/paper.md
+++ b/paper/paper.md
@@ -30,7 +30,7 @@ authors:
   - name: Dmitrii Kobylianskii
     orcid: 0009-0002-0070-5900
     affiliation: 6
-  - name: Wei Lai
+  - name: Wei Sheng Lai
     orcid: 0009-0001-6726-9851
     affiliation: 1
   - name: Matthew Leigh
-- 
GitLab


From 496a00e6a8ef70ec610109c2dfd9235c92fce633 Mon Sep 17 00:00:00 2001
From: Nicholas Luongo <nicholas.andrew.luongo@cern.ch>
Date: Wed, 13 Mar 2024 10:59:30 +0100
Subject: [PATCH 06/30] Multi-node slurm training and improved slurm submission

---
 docs/training.md             | 15 ++++--
 salt/submit/slurm_handler.py | 73 +++++++++++++++++++++++++++++
 salt/submit/submit_slurm.py  | 91 ++++++++++++++++++++++++++++++++++++
 3 files changed, 175 insertions(+), 4 deletions(-)
 create mode 100644 salt/submit/slurm_handler.py
 create mode 100644 salt/submit/submit_slurm.py

diff --git a/docs/training.md b/docs/training.md
index f89610f5..ce4b3e91 100644
--- a/docs/training.md
+++ b/docs/training.md
@@ -167,14 +167,21 @@ The job parameters such as memory requirements, number of GPUs and CPUs requeste
 
 #### Slurm Batch
 
-Those at institutions with Slurm managed GPU batch queues can submit training jobs using
+Those at institutions with Slurm managed GPU batch queues can submit training jobs using a very similar script.
+
+All options described above for HTCondor and more (CPUs, GPUs, etc) are available as command line arguments. 
 
 ```bash
-sbatch submit/submit_slurm.sh
+python submit/submit_slurm.py --config configs/GN2.yaml --tag test_salt --account MY-ACCOUNT --nodes 1 --gpus_per_node 2
 ```
 
-The submit script only supports running from a conda environment for now.
-There are several options in the script which need to be tailored to make sure to make a look inside.
+The script submit/submit_slurm.py script itself can be modified if a required configuration is not supported in this way.
+
+Where arguments need to agree between Slurm and Pytorch Lightning, such as ntasks-per-node for Slurm and trainer.devices for Lightning, this is handled by the script.
+
+This only supports running from a conda environment for now.
+
+There is also an older submit/submit_slurm.sh bash script that is kept around for compatibility. Users are strongly encouraged to use the python script.
 
 ??? info "Cleaning up after interruption"
 
diff --git a/salt/submit/slurm_handler.py b/salt/submit/slurm_handler.py
new file mode 100644
index 00000000..6a119ca1
--- /dev/null
+++ b/salt/submit/slurm_handler.py
@@ -0,0 +1,73 @@
+import logging
+import subprocess
+from pathlib import Path
+from typing import Any
+
+logging.basicConfig(level=logging.INFO)
+
+
+class SlurmHandler:
+    """A class to submit batch jobs to a Slurm scheduler.
+
+    Attributes
+    ----------
+    batch_path : Path
+        Path where the batch file which is created will be stored.
+    log_path : Path
+        Path where the batch log files will be stored.
+    base_dir : Path
+        Directory in which batch job will execute its command.
+
+    Methods
+    -------
+    activate_testmode():
+        Activate test mode: check config files in dry runs, no jobs submitted.
+    deactivate_testmode():
+        Deactivate test mode, enable submitting jobs.
+    send_job(command: str, tag: str = "slurm_job"):
+        Submit job by creating and executing Slurm batch file
+    """
+
+    def __init__(self, batch_path: str, log_path: str, basedir: str) -> None:
+        self.batch_path = Path(batch_path)
+        self.log_path = Path(log_path)
+        self.base_dir = Path(basedir) if basedir else Path.cwd()
+        self._tag = "salt_job"
+        # Keywords to be used in Slurm configuration
+        self._slurm_options_dict: dict[str, Any] = {}
+        self._test_mode = False
+
+    def activate_testmode(self) -> None:
+        logging.debug("Activated test mode: not submitting any jobs.")
+        self._test_mode = True
+
+    def deactivate_testmode(self) -> None:
+        logging.debug("Deactivated test mode: submitting jobs.")
+        self._test_mode = False
+
+    def send_job(self, command: str, tag: str = "salt_job") -> None:
+        self._tag = tag
+        batchfile = self._make_batch_file(command)
+        if self._test_mode:
+            logging.debug(f"Created batch file {batchfile}")
+        else:
+            subprocess.call(f"sbatch {batchfile}", shell=True)
+
+    def __setitem__(self, key: str, value: Any) -> None:  # noqa: ANN401
+        self._slurm_options_dict[key] = value
+
+    def _make_batch_file(self, command: str) -> Path:
+        batch_file = self.batch_path / f"sbatch_{self._tag}.sh"
+        with batch_file.open("w") as bf:
+            bf.write(f"""#!/bin/sh
+# {self._tag} batch run script\n""")
+            for key, value in self._slurm_options_dict.items():
+                if value is None:
+                    bf.write(f"#SBATCH --{key}\n")
+                else:
+                    bf.write(f"#SBATCH --{key}={value}\n")
+            bf.write(f"""BASEDIR={self.base_dir};pwd; ls -l\n""")
+            bf.write(f"""{command}""")
+        batch_file.chmod(0o755)
+        logging.debug(f"Made batch file {batch_file}")
+        return batch_file
diff --git a/salt/submit/submit_slurm.py b/salt/submit/submit_slurm.py
new file mode 100644
index 00000000..69425fee
--- /dev/null
+++ b/salt/submit/submit_slurm.py
@@ -0,0 +1,91 @@
+import argparse
+from pathlib import Path
+
+from slurm_handler import SlurmHandler
+
+# Set up argument parser
+parser = argparse.ArgumentParser(description="Submit batch jobs to Slurm.")
+parser.add_argument("-c", "--config", required=True, type=Path, help="Configuration file for job.")
+parser.add_argument("-t", "--tag", default="salt_job", help="Tag for job to be submitted.")
+parser.add_argument("-p", "--partition", default=None, type=str, help="Partition to submit job.")
+parser.add_argument("-a", "--account", default=None, type=str, help="Slurm account name.")
+parser.add_argument(
+    "-e",
+    "--environment",
+    default="conda",
+    choices=["conda", "local"],
+    help="Environment for job to be submitted.",
+)
+parser.add_argument("-n", "--nodes", default=1, type=int, help="Nodes to split training across")
+parser.add_argument("-g", "--gpus_per_node", default=1, type=int, help="GPUs for each node")
+parser.add_argument(
+    "-gt",
+    "--gpu_type",
+    default="",
+    type=str,
+    help="GPU type e.g. v100, leave empty for no preference",
+)
+parser.add_argument("-cpt", "--cpus_per_task", default=10, type=int, help="CPUs for each task")
+parser.add_argument("-m", "--memory", default="100G", type=str, help="Memory per node")
+parser.add_argument("-ex", "--exclusive", action="store_true")
+parser.add_argument("-ti", "--time", default=None, type=str, help="Job time limit e.g. '24:00:00'")
+parser.add_argument("-f", "--force", action="store_true")
+args = parser.parse_args()
+
+# Define directories
+batch_dir = Path.cwd() / "slurm"
+batch_path = batch_dir / "batch"
+log_path = batch_dir / "batch_logs"
+for directory in [batch_path, log_path]:
+    directory.mkdir(parents=True, exist_ok=True)
+
+# Variables that need to be harmonized between Slurm and salt
+nodes = args.nodes
+gpus_per_node = args.gpus_per_node
+cpus_per_task = args.cpus_per_task
+
+gpu_type = args.gpu_type
+gres = f"gpu:{gpu_type}:{gpus_per_node}" if gpu_type else f"gpu:{gpus_per_node}"
+
+# Set up Slurm options
+job_basedir = Path(__file__).resolve().parent.parent.parent
+handler = SlurmHandler(str(batch_path), str(log_path), str(job_basedir))
+handler["job-name"] = args.tag
+if args.partition is not None:
+    handler["partition"] = args.partition
+if args.account is not None:
+    handler["account"] = args.account
+handler["nodes"] = nodes
+handler["gres"] = gres
+handler["ntasks-per-node"] = gpus_per_node
+handler["mem"] = args.memory  # memory, 100 GiB - in MiB
+if args.exclusive:
+    handler["exclusive"] = None  # Exclusive access to nodes
+handler["cpus-per-task"] = cpus_per_task  # Don't use this if you have exclusive access to the node
+handler["export"] = "ALL"
+handler["output"] = f"{log_path}/slurm-%j.out"
+handler["error"] = f"{log_path}/slurm-%j.err"
+if args.time is not None:
+    handler["time"] = args.time  # Time limit of job, default is system specified
+
+# Construct and submit the job command
+command = "cd ${BASEDIR} && " "export OMP_NUM_THREADS=1\n"
+if args.environment == "conda":
+    command += (
+        "source conda/bin/activate && conda activate salt\n"
+        'echo "Activated environment ${CONDA_DEFAULT_ENV}"\n'
+    )
+command += (
+    'echo "CUDA_VISIBLE_DEVICES: ${CUDA_VISIBLE_DEVICES}"\n'
+    "cat /proc/cpuinfo | awk '/^processor/{print $3}' | tail -1\n"
+    "cd ${BASEDIR}/salt && pwd\n"
+    f"time srun salt fit --config {args.config.resolve()} "
+    f"--trainer.devices={gpus_per_node} "
+    f"--trainer.num_nodes={nodes} "
+    f"--data.num_workers={cpus_per_task} "
+)
+if args.force:
+    command += "--force "
+
+# handler.activate_testmode() # To inspect batch script before running
+handler.send_job(command, args.tag)
-- 
GitLab


From 7497808f44035153049d5dc4722d103a5ece371d Mon Sep 17 00:00:00 2001
From: Samuel Van Stroud <sam.van.stroud@cern.ch>
Date: Mon, 15 Apr 2024 13:11:03 +0200
Subject: [PATCH 07/30] Bump onnx version

---
 requirements.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/requirements.txt b/requirements.txt
index 6dceab43..768a89c6 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -14,7 +14,7 @@ torch==2.2.1
 lightning==2.2.0
 jsonargparse[all]==4.27.5
 torchmetrics==1.2.1
-onnx==1.15.0
+onnx==1.16.0
 onnxruntime==1.15.1
 atlas-ftag-tools==0.1.18
 scipy==1.12.0
-- 
GitLab


From 9283d52f8df911cfeb8c51e3e02c7718be7ce58b Mon Sep 17 00:00:00 2001
From: Wei Sheng Lai <wei.sheng.lai@cern.ch>
Date: Tue, 23 Apr 2024 18:27:33 +0200
Subject: [PATCH 08/30] JitPlugin coverage tool to improve coverage for
 torch.jit functions

---
 salt/models/task.py      | 4 ++--
 salt/to_onnx.py          | 6 +++---
 salt/utils/union_find.py | 8 +++++---
 3 files changed, 10 insertions(+), 8 deletions(-)

diff --git a/salt/models/task.py b/salt/models/task.py
index bfb1db74..4f5270a2 100644
--- a/salt/models/task.py
+++ b/salt/models/task.py
@@ -11,7 +11,7 @@ from salt.utils.array_utils import listify
 from salt.utils.class_names import CLASS_NAMES
 from salt.utils.scalers import RegressionTargetScaler
 from salt.utils.tensor_utils import masked_softmax
-from salt.utils.union_find import get_node_assignment
+from salt.utils.union_find import get_node_assignment_jit
 
 
 class TaskBase(nn.Module, ABC):
@@ -513,7 +513,7 @@ class VertexingTask(TaskBase):
         return 1 + weights
 
     def run_inference(self, preds: Tensor, pad_mask: Tensor | None = None):
-        preds = get_node_assignment(preds, pad_mask)
+        preds = get_node_assignment_jit(preds, pad_mask)
         preds = mask_fill_flattened(preds, pad_mask)
         dtype = np.dtype([("VertexIndex", "i8")])
         return u2s(preds.int().cpu().numpy(), dtype)
diff --git a/salt/to_onnx.py b/salt/to_onnx.py
index 78b0170c..cbfdf143 100644
--- a/salt/to_onnx.py
+++ b/salt/to_onnx.py
@@ -16,7 +16,7 @@ from tqdm import tqdm
 from salt.models.task import mask_fill_flattened
 from salt.modelwrapper import ModelWrapper
 from salt.utils.inputs import inputs_sep_no_pad, inputs_sep_with_pad
-from salt.utils.union_find import get_node_assignment
+from salt.utils.union_find import get_node_assignment_jit
 
 torch.manual_seed(42)
 # https://gitlab.cern.ch/atlas/athena/-/blob/master/PhysicsAnalysis/JetTagging/FlavorTagDiscriminants/Root/DataPrepUtilities.cxx
@@ -174,7 +174,7 @@ class ONNXModel(ModelWrapper):
             if "track_vertexing" in track_outs:
                 pad_mask = torch.zeros(tracks.shape[:-1], dtype=torch.bool)
                 edge_scores = track_outs["track_vertexing"]
-                vertex_indices = get_node_assignment(edge_scores, pad_mask)
+                vertex_indices = get_node_assignment_jit(edge_scores, pad_mask)
                 vertex_list = mask_fill_flattened(vertex_indices, pad_mask)
                 onnx_outputs += (vertex_list.reshape(-1).char(),)
 
@@ -231,7 +231,7 @@ def compare_output(pt_model, onnx_session, include_aux, n_track=40):
     # test vertexing
     if include_aux:
         pred_pt_scores = outputs_pt["tracks"]["track_vertexing"].detach()
-        pred_pt_indices = get_node_assignment(pred_pt_scores, pad_mask)
+        pred_pt_indices = get_node_assignment_jit(pred_pt_scores, pad_mask)
         pred_pt_vtx = mask_fill_flattened(pred_pt_indices, pad_mask)
 
         pred_onnx_vtx = outputs_onnx[-1]
diff --git a/salt/utils/union_find.py b/salt/utils/union_find.py
index 8522b612..7a116764 100644
--- a/salt/utils/union_find.py
+++ b/salt/utils/union_find.py
@@ -2,7 +2,6 @@ import torch
 from torch import Tensor
 
 
-@torch.jit.script
 def symmetrize_edge_scores(scores: Tensor, node_numbers: Tensor):
     """Function to make edge scores symmetric.
 
@@ -36,7 +35,6 @@ def symmetrize_edge_scores(scores: Tensor, node_numbers: Tensor):
     return torch.sigmoid(edge_scores.float())
 
 
-@torch.jit.script
 def update_node_indices(
     scores: Tensor, node_indices: Tensor, update_indices: Tensor, node_numbers: Tensor
 ):
@@ -92,7 +90,6 @@ def update_node_indices(
     return node_indices, update_indices
 
 
-@torch.jit.script
 def get_node_assignment(output: Tensor, mask: Tensor):
     """Run edge score symmetrization and union find.
 
@@ -117,3 +114,8 @@ def get_node_assignment(output: Tensor, mask: Tensor):
         )
 
     return node_indices.unsqueeze(-1)
+
+
+@torch.jit.script
+def get_node_assignment_jit(output: Tensor, mask: Tensor):
+    return get_node_assignment(output, mask)
-- 
GitLab


From daf5c4ab1fec160a65774f27f6ed228adceb4f61 Mon Sep 17 00:00:00 2001
From: Jackson Carl Burzynski <jackson.carl.burzynski@cern.ch>
Date: Tue, 23 Apr 2024 19:14:23 +0200
Subject: [PATCH 09/30] Add support for singularity environment to
 submit_slurm.py

---
 .gitignore                  |  2 ++
 docs/training.md            |  2 --
 salt/submit/submit_slurm.py | 30 +++++++++++++++++++++++++-----
 3 files changed, 27 insertions(+), 7 deletions(-)

diff --git a/.gitignore b/.gitignore
index 828e588c..acbbac8d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,6 +8,7 @@ python_install/
 conda/
 condor/
 logs/
+slurm/
 tmp/
 env/
 plots/
@@ -26,6 +27,7 @@ user/
 *.h5
 *.egg-info/
 *_cache/
+*.cache/
 *.ipynb
 .coverage*
 .cometml-runs
diff --git a/docs/training.md b/docs/training.md
index ce4b3e91..c59669d0 100644
--- a/docs/training.md
+++ b/docs/training.md
@@ -179,8 +179,6 @@ The script submit/submit_slurm.py script itself can be modified if a required co
 
 Where arguments need to agree between Slurm and Pytorch Lightning, such as ntasks-per-node for Slurm and trainer.devices for Lightning, this is handled by the script.
 
-This only supports running from a conda environment for now.
-
 There is also an older submit/submit_slurm.sh bash script that is kept around for compatibility. Users are strongly encouraged to use the python script.
 
 ??? info "Cleaning up after interruption"
diff --git a/salt/submit/submit_slurm.py b/salt/submit/submit_slurm.py
index 69425fee..8fa4f1ef 100644
--- a/salt/submit/submit_slurm.py
+++ b/salt/submit/submit_slurm.py
@@ -13,7 +13,7 @@ parser.add_argument(
     "-e",
     "--environment",
     default="conda",
-    choices=["conda", "local"],
+    choices=["conda", "singularity", "local"],
     help="Environment for job to be submitted.",
 )
 parser.add_argument("-n", "--nodes", default=1, type=int, help="Nodes to split training across")
@@ -30,8 +30,17 @@ parser.add_argument("-m", "--memory", default="100G", type=str, help="Memory per
 parser.add_argument("-ex", "--exclusive", action="store_true")
 parser.add_argument("-ti", "--time", default=None, type=str, help="Job time limit e.g. '24:00:00'")
 parser.add_argument("-f", "--force", action="store_true")
+parser.add_argument(
+    "-b",
+    "--bind",
+    nargs="+",
+    help="List of binds for singularity (e.g. /path/to/upp/output:/inputs)",
+)
 args = parser.parse_args()
 
+if args.bind and args.environment != "singularity":
+    parser.error("--bind option is only allowed with --environment singularity")
+
 # Define directories
 batch_dir = Path.cwd() / "slurm"
 batch_path = batch_dir / "batch"
@@ -75,17 +84,28 @@ if args.environment == "conda":
         "source conda/bin/activate && conda activate salt\n"
         'echo "Activated environment ${CONDA_DEFAULT_ENV}"\n'
     )
+elif args.environment == "singularity":
+    command += "srun singularity exec -e --nv \\\n"
+    command += " \\\n".join([f"--bind {b}" for b in args.bind]) + " \\\n"
+    command += (
+        "--home ${BASEDIR} \\\n"
+        "/cvmfs/unpacked.cern.ch/gitlab-registry.cern.ch/atlas-flavor-tagging-tools/algorithms/salt:latest/ \\\n"  # noqa: E501
+        'sh -c "'
+    )
 command += (
-    'echo "CUDA_VISIBLE_DEVICES: ${CUDA_VISIBLE_DEVICES}"\n'
-    "cat /proc/cpuinfo | awk '/^processor/{print $3}' | tail -1\n"
-    "cd ${BASEDIR}/salt && pwd\n"
-    f"time srun salt fit --config {args.config.resolve()} "
+    "echo 'CUDA_VISIBLE_DEVICES: ${CUDA_VISIBLE_DEVICES}' &&\n"
+    "cat /proc/cpuinfo | awk '/^processor/{print $3}' | tail -1 &&\n"
+    "cd ${BASEDIR}/salt && pwd &&\n"
+    + ("srun " if args.environment == "conda" else "")
+    + f"salt fit --config {args.config.resolve()} "
     f"--trainer.devices={gpus_per_node} "
     f"--trainer.num_nodes={nodes} "
     f"--data.num_workers={cpus_per_task} "
 )
 if args.force:
     command += "--force "
+if args.environment == "singularity":
+    command += '"'
 
 # handler.activate_testmode() # To inspect batch script before running
 handler.send_job(command, args.tag)
-- 
GitLab


From d1c0af143f6ebe13849e2bd1a874d09da44c4d16 Mon Sep 17 00:00:00 2001
From: Emil Haines <emil.harry.haines@cern.ch>
Date: Mon, 29 Apr 2024 10:46:00 +0200
Subject: [PATCH 10/30] Extend featurewise transformations to layers in encoder

---
 .gitlab/.ci-test.yaml                         |  4 +-
 docs/configuration.md                         | 10 ++++-
 salt/models/featurewise.py                    | 18 ++++++--
 salt/models/saltmodel.py                      | 45 ++++++++++++-------
 salt/models/transformer.py                    |  7 ++-
 salt/models/transformer_v2.py                 | 13 +++++-
 .../configs/param_concat.yaml}                | 13 ++----
 .../configs/param_featurewise.yaml}           | 20 +++++----
 salt/tests/test_pipeline.py                   | 12 +++--
 9 files changed, 94 insertions(+), 48 deletions(-)
 rename salt/{configs/parameterisation_concatenation.yaml => tests/configs/param_concat.yaml} (87%)
 rename salt/{configs/parameterisation_featurewise.yaml => tests/configs/param_featurewise.yaml} (88%)

diff --git a/.gitlab/.ci-test.yaml b/.gitlab/.ci-test.yaml
index 9bc83da6..4c665af8 100644
--- a/.gitlab/.ci-test.yaml
+++ b/.gitlab/.ci-test.yaml
@@ -128,9 +128,9 @@ test-maskformer:
 test-parameterisation_concatenation:
   <<: *test-template
   script:
-    - $TEST_CMD salt/tests/test_pipeline.py::test_parameterisation_concatenation
+    - $TEST_CMD salt/tests/test_pipeline.py::test_param_concat
 
 test-parameterisation_featurewise:
   <<: *test-template
   script:
-    - $TEST_CMD salt/tests/test_pipeline.py::test_parameterisation_featurewise
+    - $TEST_CMD salt/tests/test_pipeline.py::test_param_featurewise
diff --git a/docs/configuration.md b/docs/configuration.md
index 57d856a6..0954c8a6 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -283,6 +283,14 @@ by [Dumoulin et al](https://distill.pub/2018/feature-wise-transformations/). Her
           dense_config_bias:
             hidden_layers: [4]
             output_size: 17
+        - layer: encoder
+          apply_norm: True
+          dense_config_scale:
+            hidden_layers: [128]
+            output_size: 256
+          dense_config_bias:
+            hidden_layers: [128]
+            output_size: 256
         - layer: global
           dense_config_scale:
             output_size: 128
@@ -292,7 +300,7 @@ by [Dumoulin et al](https://distill.pub/2018/feature-wise-transformations/). Her
 ```
 
 Here, two instances of featurewise transformations have been added to the model. For each, you must specify the layer whose features you would
-like to transform (this can currently be either `input`, which applies the transformations to the features before they are passed into the initialisation network, or `global`, which applies them to the global track representations outputted by the encoder). For each instance, you can specify either one or both of `dense_config_scale` or `dense_config_bias`, which configure dense networks whose output scales and biases the features of the chosen layer, respectively. It is important to ensure the `output_size` of these networks matches the number of features in the layer you are transforming. In this case, the transformations are applied to a model with 17 inputs per track, and an encoder that outputs 128 features for each track representation. 
+like to transform (this can currently be either `input`, which applies the transformations to the features before they are passed into the initialisation network, `encoder`, which applies the transformations to the inputs of each layer to the encoder using separate networks, or `global`, which applies them to the global track representations outputted by the encoder). For each instance, you can specify either one or both of `dense_config_scale` or `dense_config_bias`, which configure dense networks whose output scales and biases the features of the chosen layer, respectively. It is important to ensure the `output_size` of these networks matches the number of features in the layer you are transforming. In this case, the transformations are applied to a model with 17 inputs per track, the layers of an encoder with 256 features, and the output of the encoder, which has 128 features for each track representation. You can optionally apply a layer normalisation after applying the transformations by setting `apply_norm: True` for a given network, as shown above.
 
 
 ### Training
diff --git a/salt/models/featurewise.py b/salt/models/featurewise.py
index 7695d145..6ecc8652 100644
--- a/salt/models/featurewise.py
+++ b/salt/models/featurewise.py
@@ -12,6 +12,7 @@ class FeaturewiseTransformation(nn.Module):
         variables: Vars,
         dense_config_scale: dict | None = None,
         dense_config_bias: dict | None = None,
+        apply_norm: bool = False,
     ):
         """Perform feature wise transformations on the features of a layer.
         https://distill.pub/2018/feature-wise-transformations/.
@@ -19,7 +20,7 @@ class FeaturewiseTransformation(nn.Module):
         Parameters
         ----------
         layer : str
-            layer to scale/bias (either "input", or "global")
+            layer to scale/bias (either "input", "encoder", or "global")
         variables : Vars
             Input variables used in the forward pass, set automatically by the framework
         dense_config_scale : dict
@@ -28,30 +29,39 @@ class FeaturewiseTransformation(nn.Module):
         dense_config_bias : dict
             Keyword arguments for [salt.models.Dense][salt.models.Dense],
             the dense network performing the biasing.
+        apply_norm : bool
+            Apply layer normalisation to the transformed features. By default false.
         """
         super().__init__()
 
         self.layer = layer
-        if layer not in {"input", "global"}:
+        if layer not in {"input", "encoder", "global"}:
             raise ValueError(
-                "Featurewise transformations must be applied to either 'input' or 'global' layers."
+                "Select either 'input', 'encoder' or 'global' layers for featurewise nets."
             )
 
         self.scale_net = None
         self.bias_net = None
+        self.num_features = None
+        self.norm = None
 
         if dense_config_scale:
             dense_config_scale["input_size"] = len(variables.get("PARAMETERS", []))
             self.scale_net = Dense(**dense_config_scale)
+            self.num_features = self.scale_net.output_size
         if dense_config_bias:
             dense_config_bias["input_size"] = len(variables.get("PARAMETERS", []))
             self.bias_net = Dense(**dense_config_bias)
+            self.num_features = self.bias_net.output_size
 
         if not self.bias_net and not self.scale_net:
             raise ValueError(
                 "Need to specify at least one dense_config_scale or dense_config_bias."
             )
 
+        if apply_norm:
+            self.norm = nn.LayerNorm(self.num_features)
+
     def forward(self, inputs: dict, features: Tensor):
         if "PARAMETERS" not in inputs:
             raise ValueError("Featurewise transformations require 'PARAMETERS'.")
@@ -60,4 +70,6 @@ class FeaturewiseTransformation(nn.Module):
             features = self.scale_net(x).unsqueeze(1) * features
         if self.bias_net:
             features = torch.add(features, self.bias_net(x).unsqueeze(1))
+        if self.norm:
+            features = self.norm(features)
         return features
diff --git a/salt/models/saltmodel.py b/salt/models/saltmodel.py
index 06dd583b..d0c7566a 100644
--- a/salt/models/saltmodel.py
+++ b/salt/models/saltmodel.py
@@ -63,20 +63,9 @@ class SaltModel(nn.Module):
         """
         super().__init__()
 
-        self.featurewise_nets = None
+        # init featurewise networks
         if featurewise_nets:
-            self.featurewise_nets = nn.ModuleList([
-                FeaturewiseTransformation(**featurewise_net) for featurewise_net in featurewise_nets
-            ])
-        self.featurewise_nets_map = (
-            {featurewise_net.layer: featurewise_net for featurewise_net in self.featurewise_nets}
-            if self.featurewise_nets
-            else {}
-        )
-        # if available, add featurewise net to init net config
-        if "input" in self.featurewise_nets_map:
-            for init_net in init_nets:
-                init_net["featurewise"] = self.featurewise_nets_map["input"]
+            self.init_featurewise(featurewise_nets, init_nets, encoder)
 
         self.init_nets = nn.ModuleList([InitNet(**init_net) for init_net in init_nets])
         self.tasks = tasks
@@ -172,7 +161,7 @@ class SaltModel(nn.Module):
 
         # Generate embedding from encoder, or by concatenating the init net outputs
         if self.encoder:
-            preds = {"embed_xs": self.encoder(xs, pad_mask=pad_masks, **kwargs)}
+            preds = {"embed_xs": self.encoder(xs, pad_mask=pad_masks, inputs=inputs, **kwargs)}
         else:
             preds = {"embed_xs": flatten_tensor_dict(xs)}
 
@@ -182,9 +171,9 @@ class SaltModel(nn.Module):
             else (preds, labels, {})
         )
 
-        # apply featurewise transformation to global track representations if configured
-        if "global" in self.featurewise_nets_map:
-            preds["embed_xs"] = self.featurewise_nets_map["global"](inputs, preds["embed_xs"])
+        # apply featurewise transformation to global track embeddings if configured
+        if hasattr(self, "featurewise_global") and self.featurewise_global:
+            preds["embed_xs"] = self.featurewise_global(inputs, preds["embed_xs"])
 
         # pooling
         if self.pool_net:
@@ -229,3 +218,25 @@ class SaltModel(nn.Module):
             loss[task.name] = task_loss
 
         return preds, loss
+
+    def init_featurewise(
+        self, featurewise_nets: list[dict], init_nets: list[dict], encoder: nn.Module
+    ):
+        for featurewise_net in featurewise_nets:
+            if featurewise_net.get("layer") == "input":
+                for init_net in init_nets:
+                    init_net["featurewise"] = FeaturewiseTransformation(**featurewise_net)
+            elif featurewise_net.get("layer") == "encoder":
+                if encoder:
+                    for _layer in range(encoder.num_layers):
+                        encoder.featurewise.append(FeaturewiseTransformation(**featurewise_net))
+                else:
+                    raise ValueError(
+                        "Requested featurewise transforms for encoder, no encoder configured"
+                    )
+            elif featurewise_net.get("layer") == "global":
+                self.featurewise_global = FeaturewiseTransformation(**featurewise_net)
+            else:
+                raise ValueError(
+                    "Select either 'input', 'encoder' or 'global' layers for featurewise nets."
+                )
diff --git a/salt/models/transformer.py b/salt/models/transformer.py
index 212a51c5..e92508e8 100644
--- a/salt/models/transformer.py
+++ b/salt/models/transformer.py
@@ -6,6 +6,7 @@ from torch import BoolTensor, Tensor, cat, nn
 
 from salt.models.attention import MultiheadAttention
 from salt.models.dense import Dense
+from salt.stypes import Tensors
 
 
 class TransformerEncoderLayer(nn.Module):
@@ -171,6 +172,7 @@ class TransformerEncoder(nn.Module):
         self.out_dim = out_dim
         self.update_edges = update_edges
         self.muP = muP
+        self.featurewise = nn.ModuleList()
 
         self.layers = nn.ModuleList([
             TransformerEncoderLayer(
@@ -203,6 +205,7 @@ class TransformerEncoder(nn.Module):
         x: Tensor | dict,
         edge_x: Tensor = None,
         pad_mask: Tensor | dict | None = None,
+        inputs: Tensors = None,
         **kwargs,
     ) -> Tensor:
         """Pass the input through all layers sequentially."""
@@ -212,7 +215,9 @@ class TransformerEncoder(nn.Module):
         if isinstance(pad_mask, dict):
             pad_mask = cat(list(pad_mask.values()), dim=1)
 
-        for layer in self.layers:
+        for i, layer in enumerate(self.layers):
+            if len(self.featurewise) > 0:
+                x = self.featurewise[i](inputs, x)
             if edge_x is not None:
                 x, edge_x = layer(x, edge_x, pad_mask=pad_mask, **kwargs)
             else:
diff --git a/salt/models/transformer_v2.py b/salt/models/transformer_v2.py
index e9f25b53..829188e0 100644
--- a/salt/models/transformer_v2.py
+++ b/salt/models/transformer_v2.py
@@ -15,6 +15,7 @@ import torch
 from torch import BoolTensor, Size, Tensor, nn
 
 import salt.models.layernorm as layernorms
+from salt.stypes import Tensors
 
 
 def merge_masks(
@@ -414,14 +415,22 @@ class TransformerV2(nn.Module):
         self.out_proj = None
         if out_dim is not None:
             self.out_proj = nn.Linear(self.embed_dim, out_dim)
+        self.featurewise = nn.ModuleList()
 
-    def forward(self, x: Tensor, pad_mask: BoolTensor) -> Tensor:
+    def forward(
+        self,
+        x: Tensor,
+        pad_mask: BoolTensor,
+        inputs: Tensors = None,
+    ) -> Tensor:
         if isinstance(x, dict):
             x = torch.cat(list(x.values()), dim=1)
         if isinstance(pad_mask, dict):
             pad_mask = torch.cat(list(pad_mask.values()), dim=1)
 
-        for layer in self.layers:
+        for i, layer in enumerate(self.layers):
+            if len(self.featurewise) > 0:
+                x = self.featurewise[i](inputs, x)
             x = layer(x, pad_mask)
         if self.out_proj is not None:
             x = self.out_proj(x)
diff --git a/salt/configs/parameterisation_concatenation.yaml b/salt/tests/configs/param_concat.yaml
similarity index 87%
rename from salt/configs/parameterisation_concatenation.yaml
rename to salt/tests/configs/param_concat.yaml
index 14a1f72c..f15d2861 100644
--- a/salt/configs/parameterisation_concatenation.yaml
+++ b/salt/tests/configs/param_concat.yaml
@@ -1,4 +1,5 @@
-name: parameterisation_concatenation
+# test config for parameterisation using input concatenation
+name: param_concat
 
 model:
   lrs_config:
@@ -47,10 +48,10 @@ model:
                 label: flavour_label
                 loss:
                   class_path: torch.nn.CrossEntropyLoss
-                  init_args: { weight: [1.0, 2.0, 2.0] }
+                  init_args: { weight: [1.0, 2.0, 2.0, 16.8] }
                 dense_config: &task_dense_config
                   input_size: *out_dim
-                  output_size: 3
+                  output_size: 4
                   hidden_layers: [128, 64, 32]
                   activation: *activation
 
@@ -112,11 +113,6 @@ data:
     PARAMETERS:
       - mass
 
-  train_file: /share/lustre/ehaines/umami-preprocessing/train_test/pp_output_train.h5
-  val_file: /share/lustre/ehaines/umami-preprocessing/val_test/pp_output_val.h5
-  norm_dict: /share/lustre/ehaines/umami-preprocessing/train_test/norm_dict.yaml
-  class_dict: /share/lustre/ehaines/umami-preprocessing/train_test/class_dict.yaml
-
   PARAMETERS:
     mass:
       train: [5, 40, 55]
@@ -127,6 +123,5 @@ data:
 
 trainer:
   max_epochs: 2
-  accelerator: gpu
   devices: 1
   precision: 32
diff --git a/salt/configs/parameterisation_featurewise.yaml b/salt/tests/configs/param_featurewise.yaml
similarity index 88%
rename from salt/configs/parameterisation_featurewise.yaml
rename to salt/tests/configs/param_featurewise.yaml
index 5f113a11..a653c7bc 100644
--- a/salt/configs/parameterisation_featurewise.yaml
+++ b/salt/tests/configs/param_featurewise.yaml
@@ -1,4 +1,5 @@
-name: parameterisation_featurewise
+# test config for parameterisation using featurewise transformations
+name: param_featurewise
 
 model:
   lrs_config:
@@ -25,6 +26,13 @@ model:
           dense_config_bias:
             hidden_layers: [4]
             output_size: 21
+        - layer: encoder
+          dense_config_scale:
+            hidden_layers: [128]
+            output_size: 256
+          dense_config_bias:
+            hidden_layers: [128]
+            output_size: 256
         - layer: global
           dense_config_scale:
             output_size: 128
@@ -60,10 +68,10 @@ model:
                 label: flavour_label
                 loss:
                   class_path: torch.nn.CrossEntropyLoss
-                  init_args: { weight: [1.0, 2.0, 2.0] }
+                  init_args: { weight: [1.0, 2.0, 2.0, 16.8] }
                 dense_config: &task_dense_config
                   input_size: *out_dim
-                  output_size: 3
+                  output_size: 4
                   hidden_layers: [128, 64, 32]
                   activation: *activation
 
@@ -125,11 +133,6 @@ data:
     PARAMETERS:
       - mass
 
-  train_file: /share/lustre/ehaines/umami-preprocessing/train_test/pp_output_train.h5
-  val_file: /share/lustre/ehaines/umami-preprocessing/val_test/pp_output_val.h5
-  norm_dict: /share/lustre/ehaines/umami-preprocessing/train_test/norm_dict.yaml
-  class_dict: /share/lustre/ehaines/umami-preprocessing/train_test/class_dict.yaml
-
   PARAMETERS:
     mass:
       train: [5, 40, 55]
@@ -140,6 +143,5 @@ data:
 
 trainer:
   max_epochs: 2
-  accelerator: gpu
   devices: 1
   precision: 32
diff --git a/salt/tests/test_pipeline.py b/salt/tests/test_pipeline.py
index 1b75b2a3..d32a9737 100644
--- a/salt/tests/test_pipeline.py
+++ b/salt/tests/test_pipeline.py
@@ -262,10 +262,14 @@ def test_maskformer(tmp_path) -> None:
 
 
 @pytest.mark.filterwarnings(w)
-def test_parameterisation_concatenation(tmp_path) -> None:
-    run_combined(tmp_path, "parameterisation_concatenation.yaml", do_onnx=False, inc_params=True)
+def test_param_concat(tmp_path) -> None:
+    args = [f"--config={Path(__file__).parent.parent / 'tests' / 'configs' / 'param_concat.yaml'}"]
+    run_combined(tmp_path, CONFIG, do_onnx=False, inc_params=True, train_args=args)
 
 
 @pytest.mark.filterwarnings(w)
-def test_parameterisation_featurewise(tmp_path) -> None:
-    run_combined(tmp_path, "parameterisation_featurewise.yaml", do_onnx=False, inc_params=True)
+def test_param_featurewise(tmp_path) -> None:
+    args = [
+        f"--config={Path(__file__).parent.parent / 'tests' / 'configs' / 'param_featurewise.yaml'}"
+    ]
+    run_combined(tmp_path, CONFIG, do_onnx=False, inc_params=True, train_args=args)
-- 
GitLab


From cfe3ff4e50e279903fb52f6c1272ce85ccff577f Mon Sep 17 00:00:00 2001
From: Lucio Derin <lucio.derin@cern.ch>
Date: Wed, 1 May 2024 17:52:55 +0200
Subject: [PATCH 11/30] Fix tutorial script that fetches h5 files

---
 docs/tutorial.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/tutorial.md b/docs/tutorial.md
index bd1bcf71..3ccaf4e4 100644
--- a/docs/tutorial.md
+++ b/docs/tutorial.md
@@ -29,7 +29,7 @@ Create a directory at a location with sufficient free disk space. The unpacked d
 Execute the following commands to download all files to a directory which you will need to define (replace `<path to directory>` with the path to the directory of your choice).
 
 ```bash
-export DIR_TUTORIAL_DATA=<path to directory>
+export TUTORIAL_DATA=<path to directory>
 mkdir -p $TUTORIAL_DATA
 cd $TUTORIAL_DATA
 curl -o $TUTORIAL_DATA/tutorialdata.zip "https://zenodo.org/api/records/10371998/files-archive"
-- 
GitLab


From 0d9bfc30d2c1987db4cda10fef0aeba7525f0cfa Mon Sep 17 00:00:00 2001
From: Nicholas Luongo <nicholas.andrew.luongo@cern.ch>
Date: Thu, 2 May 2024 17:05:51 +0200
Subject: [PATCH 12/30] Support Slurm requeue

---
 docs/training.md            |  6 ++++++
 salt/submit/submit_slurm.py | 26 ++++++++++++++++++++++++++
 salt/utils/cli.py           | 19 ++++++++++++++++++-
 3 files changed, 50 insertions(+), 1 deletion(-)

diff --git a/docs/training.md b/docs/training.md
index c59669d0..2311f8d3 100644
--- a/docs/training.md
+++ b/docs/training.md
@@ -179,6 +179,12 @@ The script submit/submit_slurm.py script itself can be modified if a required co
 
 Where arguments need to agree between Slurm and Pytorch Lightning, such as ntasks-per-node for Slurm and trainer.devices for Lightning, this is handled by the script.
 
+Lightning has the ability to requeue a job if it is killed by Slurm for exceeding the system walltime. The training state is saved in a checkpoint and loaded when the new job begins. submit_slurm.py creates a single log directory holding the checkpoints for the original and any requeue-d jobs (in the below example GN2_my_requeue_job).
+
+```bash
+python submit/submit_slurm.py --config configs/GN2.yaml --requeue --salt_log_dir=my_requeue_job --signal=SIGUSR1@90
+```
+
 There is also an older submit/submit_slurm.sh bash script that is kept around for compatibility. Users are strongly encouraged to use the python script.
 
 ??? info "Cleaning up after interruption"
diff --git a/salt/submit/submit_slurm.py b/salt/submit/submit_slurm.py
index 8fa4f1ef..ceae7110 100644
--- a/salt/submit/submit_slurm.py
+++ b/salt/submit/submit_slurm.py
@@ -1,4 +1,5 @@
 import argparse
+from datetime import datetime
 from pathlib import Path
 
 from slurm_handler import SlurmHandler
@@ -36,6 +37,20 @@ parser.add_argument(
     nargs="+",
     help="List of binds for singularity (e.g. /path/to/upp/output:/inputs)",
 )
+parser.add_argument("-r", "--requeue", action="store_true")
+parser.add_argument(
+    "-s",
+    "--signal",
+    default="SIGUSR1@90",
+    type=str,
+    help="Signal from Slurm to trigger Lightning to prepare for requeue",
+)
+parser.add_argument(
+    "-sls",
+    "--salt_log_suffix",
+    default=None,
+    help="Appended to model name to create Salt log directory",
+)
 args = parser.parse_args()
 
 if args.bind and args.environment != "singularity":
@@ -76,6 +91,13 @@ handler["output"] = f"{log_path}/slurm-%j.out"
 handler["error"] = f"{log_path}/slurm-%j.err"
 if args.time is not None:
     handler["time"] = args.time  # Time limit of job, default is system specified
+if args.requeue:
+    handler["requeue"] = None
+    handler["signal"] = args.signal
+
+log_suffix = args.salt_log_suffix
+if args.requeue and not log_suffix:
+    log_suffix = datetime.now().strftime("%Y%m%d-T%H%M%S")
 
 # Construct and submit the job command
 command = "cd ${BASEDIR} && " "export OMP_NUM_THREADS=1\n"
@@ -102,6 +124,10 @@ command += (
     f"--trainer.num_nodes={nodes} "
     f"--data.num_workers={cpus_per_task} "
 )
+
+if args.requeue:
+    command += f"--overwrite_config --log_suffix={log_suffix} "
+
 if args.force:
     command += "--force "
 if args.environment == "singularity":
diff --git a/salt/utils/cli.py b/salt/utils/cli.py
index 6853dfb9..5bc9be7b 100644
--- a/salt/utils/cli.py
+++ b/salt/utils/cli.py
@@ -66,6 +66,16 @@ class SaltCLI(LightningCLI):
         parser.add_argument(
             "--compile", action="store_true", help="Compile the model to speed up training."
         )
+        parser.add_argument(
+            "-oc", "--overwrite_config", action="store_true", help="Overwrite config file."
+        )
+        parser.add_argument(
+            "-ls",
+            "--log_suffix",
+            default=None,
+            type=str,
+            help="Appended to model name to create the log directory.",
+        )
         self.apply_links(parser)
 
     def fit(self, model, **kwargs):
@@ -203,7 +213,11 @@ class SaltCLI(LightningCLI):
                 pass
 
             # set the timestampped dir
-            dirname = f"{name}_{timestamp}"
+            if sc["log_suffix"]:
+                log_suffix = sc["log_suffix"]
+                dirname = f"{name}_{log_suffix}"
+            else:
+                dirname = f"{name}_{timestamp}"
             if "s3:/" not in sc["trainer.default_root_dir"]:
                 log_dir_timestamp = str(Path(log_dir / dirname).resolve())
             else:
@@ -225,6 +239,9 @@ class SaltCLI(LightningCLI):
                         "automated salt tag",
                     )
 
+            if sc["overwrite_config"]:
+                self.save_config_kwargs["overwrite"] = True
+
         if self.subcommand == "test":
             print("\n" + "-" * 100)
 
-- 
GitLab


From 49198e5dfde9ca883e3d0a7e8ea37580b2f78248 Mon Sep 17 00:00:00 2001
From: Matthew Leigh <matthew.leigh@cern.ch>
Date: Thu, 9 May 2024 15:11:56 +0200
Subject: [PATCH 13/30] Tweaks to Transformerv2: layerscale + varlen_attn +
 small optim

---
 .vscode/settings.json            |   5 +-
 docs/api/transformer.md          |   1 -
 salt/callbacks/saveconfig.py     |   5 +-
 salt/configs/GN3.yaml            |  14 +-
 salt/configs/MaskFormer.yaml     |   1 +
 salt/models/maskformer.py        |  25 +-
 salt/models/saltmodel.py         |   6 +-
 salt/models/transformer_v2.py    | 634 +++++++++++++++++++++----------
 salt/tests/test_transformerv2.py | 435 +++++++++++++--------
 salt/to_onnx.py                  |   4 +
 salt/utils/benchmarking.py       | 171 +++++++++
 salt/utils/tensor_utils.py       |  20 +-
 setup/Dockerfile                 |   6 +-
 setup/install_flash.sh           |   5 +
 14 files changed, 963 insertions(+), 369 deletions(-)
 create mode 100644 salt/utils/benchmarking.py
 create mode 100644 setup/install_flash.sh

diff --git a/.vscode/settings.json b/.vscode/settings.json
index ece0a73a..d4f963dc 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -3,6 +3,9 @@
     "autoDocstring.docstringFormat": "numpy",
     "[python]": {
         "editor.defaultFormatter": "charliermarsh.ruff",
+        "editor.rulers": [
+            100
+        ],
         "editor.codeActionsOnSave": {
             "source.organizeImports": "explicit",
             "source.fixAll": "explicit"
@@ -19,4 +22,4 @@
     ],
     "python.testing.unittestEnabled": false,
     "python.testing.pytestEnabled": true
-}
\ No newline at end of file
+}
diff --git a/docs/api/transformer.md b/docs/api/transformer.md
index 74cff859..e555c85c 100644
--- a/docs/api/transformer.md
+++ b/docs/api/transformer.md
@@ -2,7 +2,6 @@
 ## ::: salt.models.transformer_v2.Attention
     options:
       members: [forward]
-## ::: salt.models.transformer_v2.SelfAttention
 ## ::: salt.models.transformer_v2.GLU
 ## ::: salt.models.transformer_v2.EncoderLayer
 ## ::: salt.models.transformer_v2.TransformerV2
diff --git a/salt/callbacks/saveconfig.py b/salt/callbacks/saveconfig.py
index cba39a8a..f9f47fed 100644
--- a/salt/callbacks/saveconfig.py
+++ b/salt/callbacks/saveconfig.py
@@ -13,6 +13,7 @@ import yaml
 from ftag.git_check import get_git_hash
 from lightning import Callback, LightningModule, Trainer
 from lightning.pytorch.cli import LightningArgumentParser, Namespace
+from lightning.pytorch.loggers import CometLogger
 from s3fs import S3FileSystem
 from s3path import S3Path
 
@@ -138,8 +139,8 @@ class SaveConfigCallback(Callback):
         self.write_yaml_file(self.config, config_path)
 
         # log files as assets
-        #  currently cannot save log files as assests on S3
-        if self.plm.logger is not None and not self.use_S3:
+        # currently cannot save log files as assests on S3
+        if isinstance(self.plm.logger, CometLogger) and not self.use_S3:
             self.plm.logger.experiment.log_asset(config_path)
             self.plm.logger.experiment.log_asset(nd_path)
             self.plm.logger.experiment.log_asset(cd_path)
diff --git a/salt/configs/GN3.yaml b/salt/configs/GN3.yaml
index 307a67f1..8503be5c 100644
--- a/salt/configs/GN3.yaml
+++ b/salt/configs/GN3.yaml
@@ -11,7 +11,7 @@ model:
   model:
     class_path: salt.models.SaltModel
     init_args:
-      num_register_tokens: 10
+      num_register_tokens: 0 # Registers have moved to the encoder!
 
       init_nets:
         - input_name: tracks
@@ -23,11 +23,21 @@ model:
       encoder:
         class_path: salt.models.TransformerV2
         init_args:
+          num_layers: 6
           embed_dim: *embed_dim
-          num_layers: 4
           out_dim: &out_dim 128
+          norm: LayerNorm
+          attn_type: torch-math
+          do_final_norm: true
+          ls_init: 1.0e-2
+          dense_kwargs:
+            activation: SiLU
+            dropout: 0
+            gated: True
           attn_kwargs:
             num_heads: 8
+            dropout: 0.1
+          num_registers: 8
 
       pool_net:
         class_path: salt.models.GlobalAttentionPooling
diff --git a/salt/configs/MaskFormer.yaml b/salt/configs/MaskFormer.yaml
index bd8da499..532887b5 100644
--- a/salt/configs/MaskFormer.yaml
+++ b/salt/configs/MaskFormer.yaml
@@ -28,6 +28,7 @@ model:
             num_heads: 8
           dense_kwargs:
             activation: *activation
+          drop_registers: true
 
 
       mask_decoder:
diff --git a/salt/models/maskformer.py b/salt/models/maskformer.py
index e505f491..cc21b6d9 100644
--- a/salt/models/maskformer.py
+++ b/salt/models/maskformer.py
@@ -4,7 +4,7 @@ import torch
 from torch import Tensor, nn
 
 from salt.models import MaskFormerLoss
-from salt.models.transformer_v2 import GLU, CrossAttention, SelfAttention
+from salt.models.transformer_v2 import GLU, Attention
 from salt.stypes import Tensors
 
 
@@ -151,11 +151,11 @@ class MaskDecoderLayer(nn.Module):
         self.mask_attention = mask_attention
         self.bidirectional_ca = bidirectional_ca
 
-        self.q_ca = CrossAttention(embed_dim=embed_dim, num_heads=n_heads)
-        self.q_sa = SelfAttention(embed_dim=embed_dim, num_heads=n_heads)
+        self.q_ca = Attention(embed_dim=embed_dim, num_heads=n_heads)
+        self.q_sa = Attention(embed_dim=embed_dim, num_heads=n_heads)
         self.q_dense = GLU(embed_dim)
         if bidirectional_ca:
-            self.kv_ca = CrossAttention(embed_dim=embed_dim, num_heads=n_heads)
+            self.kv_ca = Attention(embed_dim=embed_dim, num_heads=n_heads)
             self.kv_dense = GLU(embed_dim)
         self.mask_net = mask_net
 
@@ -164,15 +164,16 @@ class MaskDecoderLayer(nn.Module):
 
         # if we want to do mask attention
         if self.mask_attention:
-            # If a BoolTensor is provided, positions with ``True`` are not allowed
-            # to attend while ``False`` values will be unchanged.
-            attn_mask = (get_masks(kv, q, self.mask_net, kv_mask).sigmoid() < 0.1).detach()
+            # New attention masking convention with transformers 2
+            # Positions with True are allowed while False are masked
+            attn_mask = (get_masks(kv, q, self.mask_net, kv_mask).sigmoid() > 0.9).detach()
 
-            # if the attn mask is invalid for a given query, allow it to attend everywhere
-            attn_mask[torch.where(attn_mask.sum(-1) == attn_mask.shape[-1])] = False
+            # If the attention mask is False for all positions, we set it to True
+            # This is prevent NaNs in the softmax
+            attn_mask[(~attn_mask).all(-1)] = True
 
         # update queries with cross attention from nodes
-        q = q + self.q_ca(q, kv, kv_mask=kv_mask, attn_mask=attn_mask)
+        q = q + self.q_ca(q, kv=kv, kv_mask=kv_mask, attn_mask=attn_mask)
 
         # update queries with self attention
         q = q + self.q_sa(q)
@@ -184,7 +185,7 @@ class MaskDecoderLayer(nn.Module):
         if self.bidirectional_ca:
             if attn_mask is not None:
                 attn_mask = attn_mask.transpose(1, 2)
-            kv = kv + self.kv_ca(kv, q, q_mask=kv_mask, attn_mask=attn_mask)
+                attn_mask[(~attn_mask).all(-1)] = True
+            kv = kv + self.kv_ca(kv, q, attn_mask=attn_mask)
             kv = kv + self.kv_dense(kv)
-
         return q, kv
diff --git a/salt/models/saltmodel.py b/salt/models/saltmodel.py
index d0c7566a..d82fd8fe 100644
--- a/salt/models/saltmodel.py
+++ b/salt/models/saltmodel.py
@@ -160,8 +160,12 @@ class SaltModel(nn.Module):
                         })
 
         # Generate embedding from encoder, or by concatenating the init net outputs
+        # We should change this such that all encoders return (x, mask)
         if self.encoder:
-            preds = {"embed_xs": self.encoder(xs, pad_mask=pad_masks, inputs=inputs, **kwargs)}
+            embed_xs = self.encoder(xs, pad_mask=pad_masks, inputs=inputs, **kwargs)
+            if isinstance(embed_xs, tuple):
+                embed_xs, pad_masks = embed_xs
+            preds = {"embed_xs": embed_xs}
         else:
             preds = {"embed_xs": flatten_tensor_dict(xs)}
 
diff --git a/salt/models/transformer_v2.py b/salt/models/transformer_v2.py
index 829188e0..8f177c46 100644
--- a/salt/models/transformer_v2.py
+++ b/salt/models/transformer_v2.py
@@ -9,57 +9,61 @@ Features:
 - RMSNorm https://arxiv.org/abs/1910.07467
 """
 
-from abc import ABC
+import warnings
+from functools import partial
 
 import torch
-from torch import BoolTensor, Size, Tensor, nn
+import torch.nn.functional as F
+from torch import BoolTensor, Tensor, nn
 
 import salt.models.layernorm as layernorms
 from salt.stypes import Tensors
+from salt.utils.tensor_utils import redo_padding, undo_padding
 
 
 def merge_masks(
-    q_mask: BoolTensor | None,
     kv_mask: BoolTensor | None,
     attn_mask: BoolTensor | None,
-    q_shape: Size,
-    k_shape: Size,
-) -> BoolTensor:
+    q_shape: Tensor,
+) -> BoolTensor | None:
     """Create a full attention mask which incorporates the padding information.
 
-    Using pytorch transformer convention:
+    Using pytorch transformer convention for padding
         False: Real node
         True:  Zero padded
 
+    Using pytorch transformer convention for attention mask
+        False:  Not allowed in attention mechanism
+        True:   Allowed in attention mechanism
+
+    Designing attention mask such that padded tokens can't send information.
+    But they can receive them.
+    This prevents Nans in the attention scores caused by the softmax
+
     Parameters
     ----------
-    q_mask : BoolTensor | None
-        Mask for the queries, of shape (batch, q_len).
     kv_mask : BoolTensor | None
         Mask for the keys and values, of shape (batch, kv_len).
     attn_mask : BoolTensor | None
         Full attention mask, of shape (batch, q_len, kv_len).
     q_shape : Size
         Shape of the queries tensor, (batch, q_len, dim).
-    k_shape : Size
-        Shape of the keys tensor, (batch, kv_len, dim).
     """
     # Create the full mask which combines the attention and padding masks
     mask = None
 
-    # if both masks exist, combine them
-    if q_mask is not None and kv_mask is not None:
-        mask = q_mask.unsqueeze(-1) | kv_mask.unsqueeze(-2)
-
-    # if only one mask exists, expand it to the other dimension
-    if q_mask is None and kv_mask is not None:
+    # if the kv_mask mask exists, ensure that padded tokens never send information
+    if kv_mask is not None:
         mask = kv_mask.unsqueeze(-2).expand(-1, q_shape[-2], -1)
-    if kv_mask is None and q_mask is not None:
-        mask = q_mask.unsqueeze(-1).expand(-1, -1, k_shape[-2])
+        mask = ~mask  # convert the mask such that True is a valid token
 
     # include the attention mask
     if attn_mask is not None:
-        mask = attn_mask if mask is None else attn_mask | mask
+        mask = attn_mask if mask is None else attn_mask & mask
+
+    # Unsqueeze the mask to give it a dimension for num_head broadcasting
+    if mask is not None:
+        mask = mask.unsqueeze(1)
 
     return mask
 
@@ -70,50 +74,88 @@ def repeat_kv(keys: Tensor, values: Tensor, repeats: int, dim: int):
     return keys, values
 
 
-def torch_meff_attn(q: Tensor, k: Tensor, v: Tensor, mask: BoolTensor, dropout: float) -> Tensor:
-    # masking can lead to nans, see
-    # - https://github.com/pytorch/pytorch/issues/110213
-    # - https://github.com/pytorch/pytorch/issues/103749
-    # to get round this, can transform the mask from a bool to float
-    # mask = (1.0 - mask.to(q.dtype)) * torch.finfo(q.dtype).min
-    # but don't need this if add_zero_attn is True
+def change_attn_backends(module: nn.Module, backend: str) -> None:
+    """Recursively change the attention backend of a module and all its children.
 
-    # TODO: change mask convention
-    # https://gitlab.cern.ch/atlas-flavor-tagging-tools/algorithms/salt/-/issues/47
-    if mask is not None:
-        mask = ~mask.contiguous()
+    Used primarily for switching back to torch-math for ONNX exports.
+    """
+    for child in module.children():
+        change_attn_backends(child, backend)
+        if isinstance(child, Attention):
+            child.set_backend(backend)
 
-    return nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=dropout)
 
+def projection_packed(
+    q: Tensor,
+    kv: Tensor | None,
+    weight: Tensor,
+    bias: Tensor | None = None,
+) -> tuple:
+    """Efficient input projection for MHA when using a single linear layer.
 
-def torch_flash_attn(q: Tensor, k: Tensor, v: Tensor, mask: BoolTensor, dropout: float) -> Tensor:
-    assert mask is None, "Flash attention does not support attention masks"
+    Essentially the same as torch.nn.functional._in_projection_packed
+    But here we use chunk which is 40x faster than unflatten
+    Not sure why they don't use chunk in the original implementation...
+
+    Parameters
+    ----------
+    q : Tensor
+        The queries tensor of shape (batch, q_len, dim).
+    kv : Tensor | None
+        The keys and values tensor of shape (batch, kv_len, dim).
+    weight : Tensor
+        The packed weight tensor of the input lienar projection with shape (3 * dim, dim).
+    bias : Tensor | None
+        The optional packed bias tensor of the input linear projection with shape (3 * dim).
+
+    Returns
+    -------
+    q_proj, k_proj, v_proj : tuple
+        The projected queries, keys, and values tensors.
+    """
+    # If the q tensor is the only input, then we assume we are doing self-attention.
+    # This is made (slightly) faster by using a single linear layer, then chunking rather than
+    # three seperate linear layers processed one at a time.
+    if kv is None:
+        return F.linear(q, weight, bias).chunk(3, dim=-1)
+
+    # If the kv tensor is present, then we are doing cross-attention.
+    # This means we must project the q and kv tensors seperately.
+    # The kv linear layer can remain packed, allowing us to project together then chunk,
+    # using the same trick as above. We must however first seperate weights (and biases if present)
+    # of the linear layers for the q and kv parts. We use torch.split which returns a veiw of the
+    # original tensor so this step doesnt required any extra memory or much time.
+    dim = q.size(-1)
+    w_q, w_kv = weight.split([dim, dim * 2])
+    b_q, b_kv = bias.split([dim, dim * 2]) if bias is not None else (None, None)
+
+    # Now we can do the seperate projections
+    q_proj = F.linear(q, w_q, b_q)
+    k_proj, v_proj = F.linear(kv, w_kv, b_kv).chunk(2, dim=-1)
+    return q_proj, k_proj, v_proj
+
+
+def torch_attn(
+    q: Tensor, k: Tensor, v: Tensor, mask: BoolTensor, dropout: float, backend: str
+) -> Tensor:
+    """Torch dot product attention with a switchable backend."""
     with torch.backends.cuda.sdp_kernel(
-        enable_flash=True, enable_math=False, enable_mem_efficient=False
+        enable_flash=(backend == "torch-flash"),
+        enable_math=(backend == "torch-math"),
+        enable_mem_efficient=(backend == "torch-meff"),
     ):
-        return nn.functional.scaled_dot_product_attention(
-            q, k, v, attn_mask=mask, dropout_p=dropout
-        )
+        return F.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=dropout)
 
 
-ATTN_BACKENDS = {
-    "torch-meff": torch_meff_attn,
-    "torch-flash": torch_flash_attn,
-}
-
-
-class Attention(nn.Module, ABC):
+class Attention(nn.Module):
     def __init__(
         self,
         embed_dim: int,
-        num_heads: int,
-        attn_type: str = "torch-meff",
-        n_kv_heads: int | None = None,
-        window_size: int | None = None,
+        num_heads: int = 1,
+        attn_type: str = "torch-math",
         dropout: float = 0.0,
         bias: bool = True,
-        add_zero_attn: bool = True,
-    ):
+    ) -> None:
         """Multihead attention module.
 
         Parameters
@@ -123,153 +165,154 @@ class Attention(nn.Module, ABC):
         num_heads : int
             Number of attention heads.
         attn_type : str, optional
-            Type of backend kernel to use.
-        n_kv_heads : int | None, optional
-            Number of heads for the keys and values. If None, defaults to num_heads.
-        window_size : int | None, optional
-            Window size for flash attention kernel. If None, defaults to global attention.
+            Name of backend kernel to use.
         dropout : float, optional
             Dropout rate.
         bias : bool, optional
             Whether to include bias terms.
-        add_zero_attn : bool, optional
-            Whether to add a dummy token to attend to. This avoids nan when all tokens are padded.
         """
         super().__init__()
-
+        assert embed_dim % num_heads == 0, "Dim not div by the number of heads!"
+        assert attn_type in {
+            "torch-flash",
+            "torch-math",
+            "torch-meff",
+            "flash-varlen",
+        }, "Invalid attention type!"
+
+        # Attributes
         self.embed_dim = embed_dim
         self.num_heads = num_heads
         self.head_dim = embed_dim // num_heads
-
-        self.n_kv_heads = num_heads if n_kv_heads is None else n_kv_heads
-        assert self.n_kv_heads is not None
-        self.repeats = self.num_heads // self.n_kv_heads
-        self.scale = self.head_dim**-0.5
         self.dropout = dropout
         self.bias = bias
-        self.add_zero_attn = add_zero_attn
 
+        # Better parallelism for self-attention when using parameters directly
+        self.in_proj_weight = nn.Parameter(torch.empty(3 * embed_dim, embed_dim))
+        self.in_proj_bias = nn.Parameter(torch.empty(3 * embed_dim)) if bias else None
+        self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+        self.reset_parameters()
+        self.set_backend(attn_type)
+
+    def set_backend(self, attn_type: str) -> str:
+        # Check the attention backend
         self.attn_type = attn_type
-        self.attn_func = ATTN_BACKENDS[self.attn_type]
-        self.backend = self._flash_backend if self.attn_type == "flash" else self._torch_backend
-        if window_size is None:
-            self.window_size = (-1, -1)
+        if self.attn_type == "flash-varlen":
+            why_not_varlen = ""
+
+            # Try importing the flash-varlen backend
+            try:
+                from flash_attn import flash_attn_varlen_qkvpacked_func
+
+                self.attn_fn = flash_attn_varlen_qkvpacked_func
+            except ImportError:
+                why_not_varlen = (
+                    "Requires the flash_attn package and CUDA 12+ which must be installed "
+                    "separately. See salt/setup/install_flash.sh for installation instructions."
+                )
+
+            # Check if a GPU is available
+            if not torch.cuda.is_available():
+                why_not_varlen = "No GPU available."
+
+            if why_not_varlen:
+                warnings.warn(
+                    f"Cannot use flash-varlen backend. {why_not_varlen} Reverting to torch-math.",
+                    stacklevel=2,
+                )
+                self.attn_type = "torch-math"
+                self.attn_fn = torch_attn
         else:
-            assert attn_type == "flash"
-            assert window_size % 2 == 0
-            self.window_size = (window_size // 2, window_size // 2)
+            self.attn_fn = torch_attn
+
+        return self.attn_type
+
+    def reset_parameters(self):
+        """Initialize the parameters."""
+        nn.init.xavier_uniform_(self.in_proj_weight)
+        if self.bias:
+            nn.init.constant_(self.in_proj_bias, 0.0)
+        self.out_proj.reset_parameters()
+
+    def _varlen_attention(self, x: Tensor, culens: Tensor, maxlen: int) -> Tensor:
+        """Attention forward pass for the flash-varlen backend."""
+        # Perform the packed input projection
+        qkv = F.linear(x, self.in_proj_weight, self.in_proj_bias)
+        qkv = qkv.view(-1, 3, self.num_heads, self.head_dim)
+
+        # Run the flash-varlen backend
+        dropout = self.dropout if self.training else 0.0
+        a_out = self.attn_fn(qkv, culens, maxlen, dropout)
+        a_out = a_out.reshape(-1, self.embed_dim)
 
-        self.wq = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=self.bias)
-        self.wk = nn.Linear(self.embed_dim, self.n_kv_heads * self.head_dim, bias=self.bias)
-        self.wv = nn.Linear(self.embed_dim, self.n_kv_heads * self.head_dim, bias=self.bias)
-        self.wo = nn.Linear(self.num_heads * self.head_dim, self.embed_dim, bias=self.bias)
+        # Mix with final linear layer
+        return self.out_proj(a_out)
 
     def forward(
         self,
-        q: Tensor,
-        k: Tensor,
-        v: Tensor,
-        q_mask: BoolTensor | None = None,
+        x: Tensor,
+        kv: Tensor | None = None,
+        mask: BoolTensor | None = None,
         kv_mask: BoolTensor | None = None,
         attn_mask: BoolTensor | None = None,
+        culens: Tensor | None = None,
+        maxlen: int | None = None,
     ) -> Tensor:
         """Attention forward pass.
 
         Parameters
         ----------
-        q : Tensor
-            Queries of shape (batch, q_len, dim).
-        k : Tensor
-            Keys of shape (batch, kv_len, dim).
-        v : Tensor
-            Values of shape (batch, kv_len, dim).
-        q_mask : BoolTensor, optional
-            Mask for the queries, by default None.
+        x : Tensor
+            The pointcloud of shape (batch, x_len, dim).
+        kv : Tensor
+            Optional second pointcloud for cross-attn with shape (batch, kv_len, dim).
+        mask : BoolTensor, optional
+            Mask for the pointcloud x, by default None.
         kv_mask : BoolTensor, optional
-            Mask for the keys and values, by default None.
+            Mask the kv pointcloud, by default None.
         attn_mask : BoolTensor, optional
             Full attention mask, by default None.
+        culens : Tensor, optional
+            Cumulative lengths of the sequences in x, by default None.
+            Only used for the flash-varlen backend.
+        maxlen : int, optional
+            Maximum length of a sequence in the x, by default None.
+            Only used for the flash-varlen backend.
 
         Returns
         -------
         Tensor
-            Output of shape (batch, q_len, dim).
+            Output of shape (batch, x_len, dim).
         """
-        # combine masks
-        attn_mask = merge_masks(q_mask, kv_mask, attn_mask, q.shape, k.shape)
-
-        # input projections
-        q, k, v = self.wq(q), self.wk(k), self.wv(v)
-
-        # add a dummy token to attend to - avoids nan when all tokens are padded
-        if self.add_zero_attn:
-            batch = q.shape[0]
-            zero_attn_shape = (batch, 1, self.embed_dim)
-            k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1)
-            v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1)
-            if attn_mask is not None:
-                attn_mask = nn.functional.pad(attn_mask, (0, 1), value=False)
-            if kv_mask is not None:
-                kv_mask = nn.functional.pad(kv_mask, (0, 1), value=False)
-
-        # run attention
-        output = self.backend(q, k, v, attn_mask)
+        # the varlen attention backend is called at the begining (different args)
+        if self.attn_type == "flash-varlen":
+            assert kv is None, "flash-varlen only supports self attention!"
+            assert attn_mask is None, "flash-varlen does not support attention masks!"
+            assert culens is not None, "flash-varlen requires culens!"
+            assert maxlen is not None, "flash-varlen requires maxlen!"
+            return self._varlen_attention(x, culens, maxlen)
 
-        # return output projection
-        return self.wo(output)
+        # Otherwise perform standard attention
+        B, S, D = x.shape
 
-    def _torch_backend(self, q: Tensor, k: Tensor, v: Tensor, attn_mask: BoolTensor | None = None):
-        batch, q_len, _ = q.shape
-        _, kv_len, _ = k.shape
+        # input projections -> B, S, D
+        q, k, v = projection_packed(x, kv, self.in_proj_weight, self.in_proj_bias)
 
-        # transform tensors to (batch, num_heads, seq_len, head_dim)
-        q = q.view(batch, q_len, self.num_heads, self.head_dim).transpose(1, 2)
-        k = k.view(batch, kv_len, self.n_kv_heads, self.head_dim).transpose(1, 2)
-        v = v.view(batch, kv_len, self.n_kv_heads, self.head_dim).transpose(1, 2)
-
-        # repeat keys and values to match number of query heads
-        if self.repeats > 1:
-            k, v = repeat_kv(k, v, self.repeats, dim=-2)
-
-        # expand mask to (batch, num_heads, q_len, kv_len)
-        if attn_mask is not None:
-            attn_mask = attn_mask.view(batch, 1, q_len, kv_len).expand(-1, self.num_heads, -1, -1)
+        # transform tensors to (B, Nh, S, Hd)
+        shape = (B, -1, self.num_heads, self.head_dim)  # Dont use S for cross attn
+        q, k, v = (t.view(shape).transpose(1, 2).contiguous() for t in (q, k, v))
 
         # run attention
-        output = self.attn_func(q, k, v, mask=attn_mask, dropout=self.dropout)
-
-        # recombine heads and return
-        return output.transpose(1, 2).contiguous().view(batch, -1, self.embed_dim)
+        s_mask = mask if kv is None else kv_mask  # Who is sending, x or kv
+        mask = merge_masks(s_mask, attn_mask, q.shape)
+        dropout = self.dropout if self.training else 0.0
+        a_out = torch_attn(q, k, v, mask, dropout, self.attn_type)
 
+        # recombine heads
+        a_out = a_out.transpose(1, 2).contiguous().view(B, S, D)
 
-class SelfAttention(nn.Module):
-    def __init__(self, embed_dim: int, **kwargs):
-        """Self attention module.
-
-        Parameters
-        ----------
-        embed_dim : int
-            Dimension of the input.
-        kwargs : dict
-            Keyword arguments for
-            [salt.models.transformer_v2.Attention][salt.models.transformer_v2.Attention].
-        """
-        super().__init__()
-        self.embed_dim = embed_dim
-        self.attention = Attention(embed_dim=embed_dim, **kwargs)
-
-    def forward(self, x: Tensor, **kwargs) -> Tensor:
-        return self.attention(x, x, x, **kwargs)
-
-
-class CrossAttention(nn.Module):
-    def __init__(self, embed_dim: int, **kwargs):
-        super().__init__()
-        self.embed_dim = embed_dim
-        self.attention = Attention(embed_dim=embed_dim, **kwargs)
-
-    def forward(self, q: Tensor, kv: Tensor, **kwargs) -> Tensor:
-        return self.attention(q, kv, kv, **kwargs)
+        # mix with final linear layer
+        return self.out_proj(a_out)
 
 
 class GLU(nn.Module):
@@ -277,7 +320,8 @@ class GLU(nn.Module):
         self,
         embed_dim: int,
         hidden_dim: int | None = None,
-        activation: str = "ReLU",
+        activation: str = "SiLU",
+        dropout: float = 0.0,
         bias: bool = True,
         gated: bool = False,
     ):
@@ -293,6 +337,8 @@ class GLU(nn.Module):
             Dimension of the hidden layer. If None, defaults to embed_dim * 2.
         activation : str, optional
             Activation function.
+        dropout : float, optional
+            Dropout rate.
         bias : bool, optional
             Whether to include bias in the linear layers.
         gated : bool, optional
@@ -303,18 +349,104 @@ class GLU(nn.Module):
         if hidden_dim is None:
             hidden_dim = embed_dim * 2
 
-        self.in_proj = nn.Linear(embed_dim, hidden_dim, bias=bias)
+        self.gated = gated
+        self.embed_dim = embed_dim
+        self.in_proj = nn.Linear(embed_dim, hidden_dim + hidden_dim * gated, bias=bias)
         self.out_proj = nn.Linear(hidden_dim, embed_dim, bias=bias)
-        self.gate = None
-        if gated:
-            self.gate = nn.Linear(embed_dim, hidden_dim, bias=bias)
+        self.drop = nn.Dropout(dropout)
         self.activation = getattr(nn, activation)()
 
     def forward(self, x: Tensor) -> Tensor:
-        out = self.activation(self.in_proj(x))
-        if self.gate:
-            out = out * self.gate(x)
-        return self.out_proj(out)
+        x = self.in_proj(x)
+        if self.gated:
+            x1, x2 = x.chunk(2, dim=-1)
+            x = self.activation(x1) * x2
+        else:
+            x = self.activation(x)
+        x = self.drop(x)
+        return self.out_proj(x)
+
+
+class LayerScale(nn.Module):
+    """Applies the LayerScale operation from the Cait vision transformer.
+
+    Effective at improving stability and speed of deep transformers.
+    Now the standard for vision transformers
+    https://arxiv.org/abs/2103.17239
+    """
+
+    def __init__(self, dim: int, init_value: float = 1e-3) -> None:
+        super().__init__()
+        self.gamma = nn.Parameter(init_value * torch.ones(dim))
+
+    def forward(self, x: Tensor) -> Tensor:
+        return x * self.gamma
+
+
+class DropPath(nn.Module):
+    """Drop paths for a stochastic depth neural network.
+
+    Used for regularisation when applied to the main path of a residual block.
+    """
+
+    def __init__(self, drop_prob: float = 0.0):
+        super().__init__()
+        self.drop_prob = drop_prob
+
+    def forward(self, x: Tensor) -> Tensor:
+        if self.drop_prob == 0.0 or not self.training:
+            return x
+        keep_prob = 1 - self.drop_prob
+        shape = (x.shape[0],) + (1,) * (x.ndim - 1)
+        random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
+        random_tensor.floor_()  # binarize
+        return x.div(keep_prob) * random_tensor
+
+
+class PreNormResidual(nn.Module):
+    """Wraps a module with pre-norm with a residual connection.
+
+    Optionally also applies:
+    - LayerScale
+    - DropPath (Stochastic Depth)
+
+    Neat way of doing the most common transformer pattern:
+    - x = x + drop(scale * fn(norm(x)))
+    """
+
+    def __init__(
+        self,
+        fn: nn.Module,
+        norm: str = "LayerNorm",
+        ls_init: float | None = None,
+        drop_path: float = 0.0,
+        embed_dim: int = 0,
+    ) -> None:
+        """Parameters
+        ----------
+        fn : nn.Module
+            The module to wrap. Must be non-resizing.
+        norm : str, optional
+            The normalization method, by default "LayerNorm".
+        ls_init : float | None, optional
+            The initial value for the layerscale, by default 1e-3.
+            If None, then no layerscale is applied.
+        drop_path : float, optional
+            The drop path rate, by default 0.0.
+        embed_dim : int
+            The dimension of the input and output.
+            If zero we will try get it from the fn's own embed_dim attribute.
+        """
+        super().__init__()
+        dim = embed_dim or fn.embed_dim
+        assert dim > 0, "Could not determine embed_dim from fn"
+        self.fn = fn
+        self.norm = getattr(layernorms, norm)(dim)
+        self.ls = LayerScale(dim, ls_init) if ls_init is not None else nn.Identity()
+        self.drop_path = DropPath(drop_path) if drop_path else nn.Identity()
+
+    def forward(self, x: Tensor, *args, **kwargs) -> Tensor:
+        return x + self.drop_path(self.ls(self.fn(self.norm(x), *args, **kwargs)))
 
 
 class EncoderLayer(nn.Module):
@@ -322,9 +454,11 @@ class EncoderLayer(nn.Module):
         self,
         embed_dim: int,
         norm: str = "LayerNorm",
+        ls_init: float | None = None,
+        drop_path: float = 0.0,
         dense_kwargs: dict | None = None,
         attn_kwargs: dict | None = None,
-    ):
+    ) -> None:
         """Encoder layer consisting of a self-attention and a feed-forward layer.
 
         Parameters
@@ -333,6 +467,10 @@ class EncoderLayer(nn.Module):
             Dimension of the embeddings at each layer.
         norm : str, optional
             Normalization style, by default "LayerNorm".
+        drop_path : float, optional
+            Drop path rate, by default 0.0.
+        ls_init : float | None, optional
+            Initial value for the layerscale, by default 1e-3.
         dense_kwargs : dict | None, optional
             Keyword arguments for [salt.models.transformer_v2.GLU][salt.models.transformer_v2.GLU].
         attn_kwargs : dict | None, optional
@@ -340,19 +478,23 @@ class EncoderLayer(nn.Module):
             [salt.models.transformer_v2.SelfAttention][salt.models.transformer_v2.SelfAttention].
         """
         super().__init__()
+
+        # Safe defaults
         if attn_kwargs is None:
             attn_kwargs = {}
         if dense_kwargs is None:
             dense_kwargs = {}
+
+        # Attributes
         self.embed_dim = embed_dim
-        self.attn = SelfAttention(embed_dim=embed_dim, **attn_kwargs)
-        self.attn_norm = getattr(layernorms, norm)(embed_dim)
-        self.dense = GLU(embed_dim, **dense_kwargs)
-        self.dense_norm = getattr(layernorms, norm)(embed_dim)
 
-    def forward(self, x: Tensor, pad_mask: BoolTensor) -> Tensor:
-        x = x + self.attn(self.attn_norm(x), kv_mask=pad_mask)
-        return x + self.dense(self.dense_norm(x))
+        # Submodules
+        residual = partial(PreNormResidual, norm=norm, ls_init=ls_init, drop_path=drop_path)
+        self.attn = residual(Attention(embed_dim, **attn_kwargs))
+        self.dense = residual(GLU(embed_dim, **dense_kwargs))
+
+    def forward(self, x: Tensor, **kwargs) -> Tensor:
+        return self.dense(self.attn(x, **kwargs))
 
 
 class DecoderLayer(nn.Module):
@@ -360,24 +502,39 @@ class DecoderLayer(nn.Module):
         self,
         embed_dim: int,
         norm: str = "LayerNorm",
+        ls_init: float | None = 1e-3,
+        drop_path: float = 0.0,
         dense_kwargs: dict | None = None,
         attn_kwargs: dict | None = None,
     ):
         super().__init__()
+
+        # Safe defaults
         if attn_kwargs is None:
             attn_kwargs = {}
         if dense_kwargs is None:
             dense_kwargs = {}
+
+        # Attributes
         self.embed_dim = embed_dim
-        self.attn = CrossAttention(embed_dim=embed_dim, **attn_kwargs)
-        self.q_norm = getattr(layernorms, norm)(embed_dim)
-        self.kv_norm = getattr(layernorms, norm)(embed_dim)
-        self.dense = GLU(embed_dim, **dense_kwargs)
-        self.dense_norm = getattr(layernorms, norm)(embed_dim)
 
-    def forward(self, x: Tensor, kv: Tensor, pad_mask: BoolTensor) -> Tensor:
-        x = x + self.attn(self.q_norm(x), self.kv_norm(kv), kv_mask=pad_mask)
-        return x + self.dense(self.dense_norm(x))
+        # Submodules
+        residual = partial(PreNormResidual, norm=norm, ls_init=ls_init, drop_path=drop_path)
+        self.self_attn = residual(Attention(embed_dim=embed_dim, **attn_kwargs))
+        self.cross_attn = residual(Attention(embed_dim=embed_dim, **attn_kwargs))
+        self.dense = residual(GLU(embed_dim, **dense_kwargs))
+
+    def forward(
+        self,
+        x: Tensor,
+        *,  # Indicates that kv is required
+        kv: Tensor,
+        mask: Tensor | None = None,
+        kv_mask: Tensor | None = None,
+    ) -> Tensor:
+        x = self.self_attn(x, kv_mask=mask)
+        x = self.cross_attn(x, kv=kv, kv_mask=kv_mask)
+        return self.dense(x)
 
 
 class TransformerV2(nn.Module):
@@ -387,9 +544,13 @@ class TransformerV2(nn.Module):
         embed_dim: int,
         out_dim: int | None = None,
         norm: str = "LayerNorm",
+        attn_type: str = "torch-math",
+        do_final_norm: bool = True,
+        num_registers: int = 1,
+        drop_registers: bool = False,
         **kwargs,
-    ):
-        """Transformer model consisting of a series of stacked Transformer encoder layers.
+    ) -> None:
+        """Transformer model consisting of a stack of Transformer encoder layers.
 
         Parameters
         ----------
@@ -401,37 +562,122 @@ class TransformerV2(nn.Module):
             Optionally project the output to a different dimension.
         norm : str, optional
             Normalization style, by default "LayerNorm".
+        attn_type : str, optional
+            The backend for the attention mechanism, by default "torch-flash".
+            Provided here because the varlen backend requires pre/post processing.
+        do_final_norm : bool, optional
+            Whether to apply a final normalization layer, by default True.
+        num_registers : int, optional
+            The number of registers to add to the END of the input sequence
+        drop_registers : bool, optional
+            If to drop the registers from the outputs
         kwargs : dict
             Keyword arguments for [salt.models.transformer_v2.EncoderLayer].
         """
         super().__init__()
+
+        # Check the inputs
+        if num_registers < 1:
+            raise ValueError(
+                "Many jets have no tracks, which causes NaNs in the attention scores. ",
+                "To fix this, set num_registers to at least 1",
+            )
+
+        # Attributes
         self.num_layers = num_layers
         self.embed_dim = embed_dim
+        self.out_dim = out_dim or embed_dim
+        self.do_final_norm = do_final_norm
+        self.do_out_proj = out_dim is not None
+        self.attn_type = attn_type
+        self.num_registers = num_registers
+        self.drop_registers = drop_registers
 
+        # Submodules
         self.layers = torch.nn.ModuleList([
             EncoderLayer(embed_dim=embed_dim, norm=norm, **kwargs) for _ in range(num_layers)
         ])
-        self.out_norm = getattr(layernorms, norm)(embed_dim if out_dim is None else out_dim)
-        self.out_proj = None
-        if out_dim is not None:
+        self.attn_type = self.set_backend(attn_type)
+
+        # Optional submodules
+        if self.do_out_proj:
             self.out_proj = nn.Linear(self.embed_dim, out_dim)
+        if self.do_final_norm:
+            self.out_norm = getattr(layernorms, norm)(self.out_dim)
+        if self.num_registers:
+            self.registers = nn.Parameter(torch.randn(num_registers, embed_dim))
+            self.register_buffer("register_mask", torch.zeros(num_registers, dtype=torch.bool))
         self.featurewise = nn.ModuleList()
 
+    def set_backend(self, attn_type: str) -> str:
+        for layer in self.layers:
+            attn_type = layer.attn.fn.set_backend(attn_type)
+        return attn_type  # Might change due to library availibility
+
     def forward(
         self,
         x: Tensor,
         pad_mask: BoolTensor,
-        inputs: Tensors = None,
+        inputs: Tensors | None = None,
+        **kwargs,
     ) -> Tensor:
+        # Add the registers to the sequence and the mask
+        if self.num_registers:
+            x, pad_mask = self._add_registers(x, pad_mask)
+
+        # Combine the input sequences if they are dictionaries (don't overwrite pad_mask)
         if isinstance(x, dict):
             x = torch.cat(list(x.values()), dim=1)
-        if isinstance(pad_mask, dict):
-            pad_mask = torch.cat(list(pad_mask.values()), dim=1)
+        mask = torch.cat(list(pad_mask.values()), dim=1) if isinstance(pad_mask, dict) else pad_mask
 
+        # If using the varlen backend, pack the sequence and store the cumulative lengths
+        if self.attn_type == "flash-varlen":
+            x, kwargs["culens"], kwargs["maxlen"] = undo_padding(x, mask)
+
+        # Run through the main transformer encoder layers
         for i, layer in enumerate(self.layers):
             if len(self.featurewise) > 0:
                 x = self.featurewise[i](inputs, x)
-            x = layer(x, pad_mask)
-        if self.out_proj is not None:
+            x = layer(x, mask=mask, **kwargs)
+
+        # Run through the optional layers
+        if self.do_out_proj:
             x = self.out_proj(x)
-        return self.out_norm(x)
+        if self.do_final_norm:
+            x = self.out_norm(x)
+
+        # If using the varlen backend, unpack the sequence
+        if self.attn_type == "flash-varlen":
+            x = redo_padding(x, mask)
+
+        # Optionally drop the registers from the output
+        if self.drop_registers:
+            x = x[:, : -self.num_registers]
+            if isinstance(pad_mask, dict):
+                del pad_mask["registers"]
+            elif isinstance(pad_mask, Tensor):
+                pad_mask = pad_mask[:, : -self.num_registers]
+
+        return x, pad_mask
+
+    def _add_registers(self, x: Tensor | dict, pad_mask: BoolTensor | dict | None) -> tuple:
+        """Add the learnable registers to the end of the input sequence."""
+        # Get the batch size and expand the registers to match
+        B = next(iter(x.values())).size(0) if isinstance(x, dict) else x.size(0)
+
+        # Add as a key or concatenate at the end
+        reg = self.registers.expand(B, -1, -1)
+        if isinstance(x, dict):
+            x["registers"] = reg
+        else:
+            x = torch.cat([x, reg], dim=1)
+
+        # Also include a mask for the registers
+        if pad_mask is not None:
+            reg_mask = self.register_mask.expand(B, -1)
+            if isinstance(pad_mask, dict):
+                pad_mask["registers"] = reg_mask
+            else:
+                pad_mask = torch.cat([pad_mask, reg_mask], dim=-1)
+
+        return x, pad_mask
diff --git a/salt/tests/test_transformerv2.py b/salt/tests/test_transformerv2.py
index 22df9d49..958c3be0 100644
--- a/salt/tests/test_transformerv2.py
+++ b/salt/tests/test_transformerv2.py
@@ -1,13 +1,20 @@
-import time
+import importlib.util
 
 import pytest
 import torch
 from torch import nn
+from torch.utils.benchmark import Timer
 
 from salt.models.attention import MultiheadAttention
 from salt.models.layernorm import RMSNorm
-from salt.models.transformer import TransformerEncoderLayer
-from salt.models.transformer_v2 import Attention, DecoderLayer, EncoderLayer, merge_masks
+from salt.models.transformer_v2 import (
+    Attention,
+    DecoderLayer,
+    TransformerV2,
+    merge_masks,
+    redo_padding,
+    undo_padding,
+)
 
 N_BATCH = 10
 Q_SEQ = 20
@@ -21,54 +28,35 @@ def create_bool_tensor(shape, value):
 
 def test_merge_masks_none_inputs():
     q_shape = (N_BATCH, Q_SEQ, DIM)
-    k_shape = (N_BATCH, KV_SEQ, DIM)
-    mask = merge_masks(None, None, None, q_shape, k_shape)
+    mask = merge_masks(None, None, q_shape)
     assert mask is None
 
 
-def test_merge_masks_only_q_mask():
+def test_merge_masks_only_attn_mask():
     q_shape = (N_BATCH, Q_SEQ, DIM)
-    k_shape = (N_BATCH, KV_SEQ, DIM)
-    q_mask = create_bool_tensor(q_shape[:-1], False)
-    mask = merge_masks(q_mask, None, None, q_shape, k_shape)
-    assert mask.shape == (N_BATCH, Q_SEQ, KV_SEQ)
+    attn_shape = (N_BATCH, Q_SEQ, KV_SEQ)
+    attn_mask = create_bool_tensor(attn_shape, False)
+    mask = merge_masks(None, attn_mask, q_shape)
+    assert mask.shape == (N_BATCH, 1, Q_SEQ, KV_SEQ)
 
 
 def test_merge_masks_only_kv_mask():
     q_shape = (N_BATCH, Q_SEQ, DIM)
     k_shape = (N_BATCH, KV_SEQ, DIM)
     kv_mask = create_bool_tensor(k_shape[:-1], False)
-    mask = merge_masks(None, kv_mask, None, q_shape, k_shape)
-    assert mask.shape == (N_BATCH, Q_SEQ, KV_SEQ)
-
-
-def test_merge_masks_q_and_kv_masks():
-    q_shape = (N_BATCH, Q_SEQ, DIM)
-    k_shape = (N_BATCH, KV_SEQ, DIM)
-    q_mask = create_bool_tensor(q_shape[:-1], False)
-    kv_mask = create_bool_tensor(k_shape[:-1], True)
-    mask = merge_masks(q_mask, kv_mask, None, q_shape, k_shape)
-    assert mask.shape == (N_BATCH, Q_SEQ, KV_SEQ)
-    assert torch.all(mask)
+    mask = merge_masks(kv_mask, None, q_shape)
+    assert mask.shape == (N_BATCH, 1, Q_SEQ, KV_SEQ)
 
 
-def test_merge_masks_with_attn_mask():
+def test_merge_masks_attn_and_kv_masks():
     q_shape = (N_BATCH, Q_SEQ, DIM)
     k_shape = (N_BATCH, KV_SEQ, DIM)
-    attn_mask = create_bool_tensor((3, 4, 5), False)
-    mask = merge_masks(None, None, attn_mask, q_shape, k_shape)
-    assert mask.shape == attn_mask.shape
-    assert torch.equal(mask, attn_mask)
-
-
-def test_merge_masks_different_shapes():
-    q_shape = (2, 3, 10)
-    k_shape = (2, 4, 10)
-    q_mask = create_bool_tensor(q_shape[:-1], False)
+    attn_shape = (N_BATCH, Q_SEQ, KV_SEQ)
     kv_mask = create_bool_tensor(k_shape[:-1], False)
-    attn_mask = create_bool_tensor((2, 3, 4), False)
-    mask = merge_masks(q_mask, kv_mask, attn_mask, q_shape, k_shape)
-    assert mask.shape == attn_mask.shape
+    attn_mask = create_bool_tensor(attn_shape, True)
+    mask = merge_masks(kv_mask, attn_mask, q_shape)
+    assert mask.shape == (N_BATCH, 1, Q_SEQ, KV_SEQ)
+    assert torch.all(mask)
 
 
 def test_padding_mask():
@@ -96,132 +84,222 @@ def test_padding_mask():
     # ]])
 
 
-def compare_attention_outputs(custom_attn, torch_attn, q, k, v, kv_mask=None):
-    """Helper function to compare outputs of custom and torch attention modules."""
-    custom_output = custom_attn(q, k, v, kv_mask=kv_mask)
-    torch_output, _ = torch_attn(q, k, v, key_padding_mask=kv_mask)
-    torch.testing.assert_close(custom_output, torch_output)
-    assert not torch.isnan(custom_output).any()
-
+def get_models(dim, num_heads) -> tuple:
+    salt_attn = Attention(dim, num_heads=num_heads)
+    torch_attn = nn.MultiheadAttention(dim, num_heads, batch_first=True)
+    salt_attn.in_proj_weight = torch_attn.in_proj_weight
+    salt_attn.in_proj_bias = torch_attn.in_proj_bias
+    salt_attn.out_proj.weight = torch_attn.out_proj.weight
+    salt_attn.out_proj.bias = torch_attn.out_proj.bias
+    return salt_attn, torch_attn
 
-def get_models(dim, num_heads, add_zero_attn):
-    salt_attn = Attention(dim, num_heads=num_heads, add_zero_attn=add_zero_attn)
-    torch_attn = nn.MultiheadAttention(
-        dim, num_heads, batch_first=True, add_zero_attn=add_zero_attn
-    )
 
-    # Set the weights of the custom attention module to be the same as the torch module
-    weights = torch.rand((3 * dim, dim))
-    bias = torch.rand(3 * dim)
-    torch_attn.in_proj_weight = nn.Parameter(weights)
-    torch_attn.in_proj_bias = nn.Parameter(bias)
-
-    wq, wk, wv = weights.chunk(3)
-    bq, bk, bv = bias.chunk(3)
-    salt_attn.wq.weight = nn.Parameter(wq)
-    salt_attn.wk.weight = nn.Parameter(wk)
-    salt_attn.wv.weight = nn.Parameter(wv)
-    salt_attn.wq.bias = nn.Parameter(bq)
-    salt_attn.wk.bias = nn.Parameter(bk)
-    salt_attn.wv.bias = nn.Parameter(bv)
-    salt_attn.wo.weight = torch_attn.out_proj.weight
-    salt_attn.wo.bias = torch_attn.out_proj.bias
-    return salt_attn, torch_attn
+def get_cross_attn_inputs(batch_size, q_len, kv_len, dim, frac_pad=0.0) -> tuple:
+    torch.manual_seed(0)
+    q = torch.randn(batch_size, q_len, dim)
+    kv = torch.randn(batch_size, kv_len, dim)
+    kv_mask = torch.rand(batch_size, kv_len) > frac_pad
+    kv_mask[:, 0] = False  # Make sure something can send
+    return q, kv, kv_mask
 
 
-def get_test_inputs(batch_size, seq_len, dim, frac_pad=0.0):
+def get_self_attn_inputs(batch_size, seq_len, dim, frac_pad=0.0) -> tuple:
     torch.manual_seed(0)
-    q = torch.randn(batch_size, seq_len, dim)
-    k = torch.randn(batch_size, seq_len, dim)
-    v = torch.randn(batch_size, seq_len, dim)
-    kv_mask = torch.rand(batch_size, seq_len) < frac_pad
-    q[kv_mask] = 0
-    k[kv_mask] = 0
-    v[kv_mask] = 0
-    return q, k, v, kv_mask
+    x = torch.randn(batch_size, seq_len, dim)
+    mask = torch.rand(batch_size, seq_len) > frac_pad
+    mask[:, 0] = False  # Make sure something can send
+    return x, mask
 
 
 @pytest.mark.parametrize("batch_size", [1, 10])
-@pytest.mark.parametrize("seq_len", [0, 1, 2, 10])
+@pytest.mark.parametrize("q_len", [1, 10])
+@pytest.mark.parametrize("kv_len", [1, 10])
 @pytest.mark.parametrize("dim", [32])
-@pytest.mark.parametrize("num_heads", [1, 2])
-@pytest.mark.parametrize("add_zero_attn", [False, True])
-@pytest.mark.parametrize("frac_pad", [0.0, 0.5, 1.0])
-def test_attention_output(batch_size, seq_len, dim, num_heads, add_zero_attn, frac_pad):
-    salt_attn, torch_attn = get_models(dim, num_heads, add_zero_attn=add_zero_attn)
-    q, k, v, kv_mask = get_test_inputs(batch_size, seq_len, dim, frac_pad=frac_pad)
+@pytest.mark.parametrize("frac_pad", [0.0, 0.5, 0.9])
+def test_cross_attention(
+    batch_size,
+    q_len,
+    kv_len,
+    dim,
+    frac_pad,
+) -> None:
+    salt_attn, torch_attn = get_models(dim, 2)
+    q, kv, kv_mask = get_cross_attn_inputs(batch_size, q_len, kv_len, dim, frac_pad)
+    custom_output = salt_attn(q, kv, kv_mask=kv_mask)
+    torch_output, _ = torch_attn(q, kv, kv, key_padding_mask=kv_mask)
+    torch.testing.assert_close(custom_output, torch_output)
+    assert not torch.isnan(custom_output).any()
 
-    # if not adding a dummy token to attend to, ensure at least one element is not masked
-    if not add_zero_attn and kv_mask.shape[-1] != 0:
-        kv_mask[..., 0] = False
 
-    compare_attention_outputs(salt_attn, torch_attn, q, k, v, kv_mask)
+@pytest.mark.parametrize("batch_size", [1, 10])
+@pytest.mark.parametrize("seq_len", [1, 2, 10])
+@pytest.mark.parametrize("dim", [32])
+@pytest.mark.parametrize("num_heads", [1, 2])
+@pytest.mark.parametrize("frac_pad", [0.0, 0.5, 0.9])
+def test_self_attention(
+    batch_size,
+    seq_len,
+    dim,
+    num_heads,
+    frac_pad,
+) -> None:
+    salt_attn, torch_attn = get_models(dim, num_heads)
+    x, mask = get_self_attn_inputs(batch_size, seq_len, dim, frac_pad)
+    custom_output = salt_attn(x, mask=mask)
+    torch_output, _ = torch_attn(x, x, x, key_padding_mask=mask)
+    torch.testing.assert_close(custom_output, torch_output)
+    assert not torch.isnan(custom_output).any()
 
 
+@pytest.mark.parametrize("batch_size", [1, 10])
+@pytest.mark.parametrize("seq_len", [1, 2, 10])
 @pytest.mark.parametrize("dim", [32])
 @pytest.mark.parametrize("num_heads", [1, 2])
 @pytest.mark.parametrize("frac_pad", [0.0, 0.5])
+@pytest.mark.parametrize("attn_type", ["torch-flash", "torch-meff", "flash-varlen"])
+def test_attention_backends(
+    batch_size,
+    seq_len,
+    dim,
+    num_heads,
+    frac_pad,
+    attn_type,
+) -> None:
+    if not torch.cuda.is_available():
+        pytest.skip("CUDA not available")
+    if importlib.util.find_spec("flash_attn") is None:
+        pytest.skip("flash_attn not available")
+
+    # FlashVarlenAttention requires half precision
+    with torch.autocast("cuda", enabled=True):
+        # Get the inputs and move to device
+        x, mask = get_self_attn_inputs(batch_size, seq_len, dim, frac_pad)
+        x = x.cuda()
+        mask = mask.cuda()
+
+        # Change the masking to None for the torch backends as they dont support it
+        if "torch" in attn_type:
+            mask = None
+
+        # Perform the standard attention (math)
+        attn = Attention(dim, num_heads=num_heads).to("cuda")
+        output = attn(x, mask=mask)
+
+        # ensure zero padded
+        if mask is not None:
+            output *= ~mask.unsqueeze(-1)
+
+        # Switch to the attention backend
+        attn.set_backend(attn_type)
+        if attn_type == "flash-varlen":
+            x_p, culens, maxlen = undo_padding(x, mask)
+            output_2 = attn(x_p, mask=mask, culens=culens, maxlen=maxlen)
+            output_2 = redo_padding(output_2, mask)
+        else:
+            output_2 = attn(x, mask=mask)
+
+        # Test all close with less strict due to half precision
+        torch.testing.assert_close(output, output_2, atol=1e-3, rtol=1e-3)
+        assert not torch.isnan(output_2).any()
+
+
+def sync_v1v2_attn(v1_attn, v2_attn):
+    wq, wk, wv = v2_attn.in_proj_weight.chunk(3)
+    bq, bk, bv = v2_attn.in_proj_bias.chunk(3)
+    v1_attn.linear_q.weight.data = wq
+    v1_attn.linear_k.weight.data = wk
+    v1_attn.linear_v.weight.data = wv
+    v1_attn.linear_q.bias.data = bq
+    v1_attn.linear_k.bias.data = bk
+    v1_attn.linear_v.bias.data = bv
+
+
+@pytest.mark.parametrize("dim", [32])
+@pytest.mark.parametrize("num_heads", [1, 2])
+@pytest.mark.parametrize("frac_pad", [0.0, 0.5, 0.9])
 def test_v1_v2_attention_output(dim, num_heads, frac_pad):
     v1_attn = MultiheadAttention(
         dim, num_heads, {"class_path": "salt.models.ScaledDotProductAttention"}
     )
-    v2_attn = Attention(dim, num_heads=num_heads, add_zero_attn=False)
-    v1_attn.linear_q = v2_attn.wq
-    v1_attn.linear_k = v2_attn.wk
-    v1_attn.linear_v = v2_attn.wv
-    v1_attn.linear_out = v2_attn.wo
-    q, k, v, kv_mask = get_test_inputs(10, 20, dim, frac_pad=frac_pad)
-    v1_out = v1_attn(q, k, v, kv_mask=kv_mask)
-    v2_out = v2_attn(q, k, v, kv_mask=kv_mask)
+    v2_attn = Attention(dim, num_heads=num_heads)
+    sync_v1v2_attn(v1_attn, v2_attn)
+    v1_attn.linear_out = v2_attn.out_proj
+    q, kv, kv_mask = get_cross_attn_inputs(10, 20, 20, dim, frac_pad=frac_pad)
+    v1_out = v1_attn(q, kv, kv_mask=kv_mask)
+    v2_out = v2_attn(q, kv, kv_mask=kv_mask)
     torch.testing.assert_close(v1_out, v2_out)
 
 
-@pytest.mark.parametrize("dim", [32])
-@pytest.mark.parametrize("num_heads", [1, 2])
-@pytest.mark.parametrize("frac_pad", [0])  # note that this fails for frac_pad > 0
-def test_v1_v2_encoder_output(dim, num_heads, frac_pad):
-    v1_enc = TransformerEncoderLayer(
-        dim,
-        {
-            "num_heads": num_heads,
-            "attention": {"class_path": "salt.models.ScaledDotProductAttention"},
-        },
-        {"activation": "ReLU"},
+@pytest.mark.parametrize("num_registers", [1, 4])
+@pytest.mark.parametrize("num_layers", [1, 3])
+@pytest.mark.parametrize("ls_init", [None, 0.1])
+@pytest.mark.parametrize("drop_path", [0, 0.1])
+def test_transformerv2_tensor_input(num_registers, num_layers, ls_init, drop_path):
+    x, mask = get_self_attn_inputs(5, 10, 32, 0.5)
+    trans = TransformerV2(
+        num_layers=num_layers,
+        embed_dim=32,
+        attn_type="torch-math",
+        dense_kwargs={"activation": "SiLU"},
+        attn_kwargs={"num_heads": 2},
+        num_registers=num_registers,
+        ls_init=ls_init,
+        drop_path=drop_path,
     )
-    v2_enc = EncoderLayer(
-        dim,
-        attn_kwargs={"num_heads": num_heads, "add_zero_attn": False},
-        dense_kwargs={"gated": False},
+    x, mask = trans(x, pad_mask=mask)
+    assert x.shape == (5, 10 + num_registers, 32)
+    assert not x.isnan().any()
+
+
+@pytest.mark.parametrize("ls_init", [None, 0.1])
+@pytest.mark.parametrize("drop_path", [0, 0.1])
+def test_decoder_layer(ls_init, drop_path):
+    q, kv, kv_mask = get_cross_attn_inputs(5, 10, 5, 32, 0.5)
+    decoder = DecoderLayer(
+        embed_dim=32,
+        dense_kwargs={"activation": "SiLU"},
+        attn_kwargs={"num_heads": 2},
+        ls_init=ls_init,
+        drop_path=drop_path,
     )
-
-    v1_enc.mha.linear_q = v2_enc.attn.attention.wq
-    v1_enc.mha.linear_k = v2_enc.attn.attention.wk
-    v1_enc.mha.linear_v = v2_enc.attn.attention.wv
-    v1_enc.mha.linear_out = v2_enc.attn.attention.wo
-
-    v1_enc.dense.net[0] = v2_enc.dense.in_proj
-    v1_enc.dense.net[2] = v2_enc.dense.out_proj
-    v1_enc.norm1 = v2_enc.attn_norm
-    v1_enc.norm2 = v2_enc.dense_norm
-
-    q, _, _, kv_mask = get_test_inputs(10, 20, dim, frac_pad=frac_pad)
-
-    v1_out = v1_enc(q, pad_mask=kv_mask)
-    v2_out = v2_enc(q, pad_mask=kv_mask)
-
-    torch.testing.assert_close(v1_out, v2_out)
+    x = decoder(q, kv=kv, kv_mask=kv_mask)
+    assert x.shape == q.shape
+    assert not x.isnan().any()
+
+
+@pytest.mark.parametrize("num_registers", [1, 4])
+def test_transformerv2_dict_input(num_registers):
+    x1, m1 = get_self_attn_inputs(5, 10, 32, 0.5)
+    x2, m2 = get_self_attn_inputs(5, 3, 32, 0.5)
+    x3, m3 = get_self_attn_inputs(5, 2, 32, 0.5)
+    x = {"m1": x1, "m2": x2, "m3": x3}  # Multimodal inputs
+    mask = {"m1": m1, "m2": m2, "m3": m3}
+    trans = TransformerV2(
+        num_layers=3,
+        embed_dim=32,
+        attn_type="torch-math",
+        dense_kwargs={"activation": "SiLU"},
+        attn_kwargs={"num_heads": 2},
+        num_registers=num_registers,
+    )
+    x, mask = trans(x, pad_mask=mask)
+    assert x.shape == (5, 10 + 3 + 2 + num_registers, 32)
+    assert all(k in mask for k in ["m1", "m2", "m3", "registers"])
 
 
-def test_times_torch_vs_salt():  # pragma: no cover
+def test_times_torch_vs_salt() -> None:
     # skip if cuda is not available
     if not torch.cuda.is_available():
         pytest.skip("CUDA not available")
-    batch_size, seq_len, dim, num_heads = 1000, 40, 128, 8
-    salt_attn, torch_attn = get_models(dim, num_heads, add_zero_attn=True)
-    q, k, v, kv_mask = get_test_inputs(batch_size, seq_len, dim, frac_pad=0.5)
+
+    # Define the input parameters for the timings
+    batch_size, seq_len, dim, num_heads = 1000, 64, 128, 8
+    salt_attn, torch_attn = get_models(dim, num_heads)
+    x, mask = get_self_attn_inputs(batch_size, seq_len, dim, frac_pad=0.5)
 
     # move tensors and models to cuda
-    q, k, v, kv_mask = q.cuda(), k.cuda(), v.cuda(), kv_mask.cuda()
+    x = x.cuda()
+    mask = mask.cuda()
     salt_attn.cuda()
     torch_attn.cuda()
 
@@ -229,32 +307,81 @@ def test_times_torch_vs_salt():  # pragma: no cover
     salt_attn.training = True
     torch_attn.training = True
 
-    # warm up
-    for _ in range(10):
-        salt_attn(q, k, v, kv_mask=kv_mask)
-        torch_attn(q, k, v, key_padding_mask=kv_mask)
+    # Using timers also performs warm up
+    salt_timer = Timer(
+        stmt="salt_attn(x, kv_mask=mask)",
+        globals={"salt_attn": salt_attn, "x": x, "mask": mask},
+        label="salt",
+        num_threads=1,
+    )
 
-    salt_times = []
-    for _ in range(50):
-        start = time.time()
-        salt_attn(q, k, v, kv_mask=kv_mask)
-        end = time.time()
-        salt_times.append(end - start)
+    torch_timer = Timer(
+        stmt="torch_attn(x, x, x, key_padding_mask=mask)",
+        globals={"torch_attn": torch_attn, "x": x, "mask": mask},
+        label="torch",
+        num_threads=1,
+    )
 
-    torch_times = []
-    for _ in range(50):
-        start = time.time()
-        torch_attn(q, k, v, key_padding_mask=kv_mask)
-        end = time.time()
-        torch_times.append(end - start)
+    salt_time = salt_timer.timeit(300).mean
+    torch_time = torch_timer.timeit(300).mean
+    assert salt_time < torch_time, f"mean: {salt_time} vs {torch_time}"
 
-    salt_mean = sum(salt_times) / len(salt_times)
-    torch_mean = sum(torch_times) / len(torch_times)
-    salt_median = sorted(salt_times)[len(salt_times) // 2]
-    torch_median = sorted(torch_times)[len(torch_times) // 2]
 
-    assert salt_mean < torch_mean, f"mean: {salt_mean} vs {torch_mean}"
-    assert salt_median < torch_median, f"median: {salt_median} vs {torch_median}"
+def test_times_varlen_vs_default() -> None:
+    if not torch.cuda.is_available():
+        pytest.skip("CUDA not available")
+    if importlib.util.find_spec("flash_attn") is None:
+        pytest.skip("flash_attn not available")
+
+    # FlashVarlenAttention requires half precision
+    with torch.autocast("cuda", enabled=True):
+        # Define the input parameters for the timings
+        num_layers = 4
+        num_heads = 4
+        batch_size = 256
+        seq_len = 64
+        dim = 128
+        x, mask = get_self_attn_inputs(batch_size, seq_len, dim, frac_pad=0.5)
+
+        # Create the transformers
+        standard_attn = TransformerV2(
+            num_layers=num_layers,
+            embed_dim=dim,
+            attn_type="torch-math",
+            dense_kwargs={"activation": "SiLU"},
+            attn_kwargs={"num_heads": num_heads},
+        )
+
+        varlen_attn = TransformerV2(
+            num_layers=num_layers,
+            embed_dim=dim,
+            attn_type="flash-varlen",
+            dense_kwargs={"activation": "SiLU"},
+            attn_kwargs={"num_heads": num_heads},
+        )
+
+        # move tensors and models to cuda
+        x = x.cuda()
+        mask = mask.cuda()
+        standard_attn.cuda()
+        varlen_attn.cuda()
+
+        # Time the models
+        s_timer = Timer(
+            stmt="standard_attn(x, pad_mask=mask)",
+            globals={"standard_attn": standard_attn, "x": x, "mask": mask},
+            label="salt",
+            num_threads=1,
+        )
+        v_timer = Timer(
+            stmt="varlen_attn(x, pad_mask=mask)",
+            globals={"varlen_attn": varlen_attn, "x": x, "mask": mask},
+            label="salt",
+            num_threads=1,
+        )
+        st = s_timer.timeit(20).mean
+        vt = v_timer.timeit(20).mean
+        assert vt < st, f"mean: {vt} vs {st}"
 
 
 def test_RMSNorm():
@@ -266,5 +393,5 @@ def test_RMSNorm():
 def test_DecoderLayer():
     layer = DecoderLayer(embed_dim=32, attn_kwargs={"num_heads": 2})
     x = torch.randn(5, 10, 32)
-    y = torch.randn(5, 10, 32)
-    layer(x, y, pad_mask=None)
+    kv = torch.randn(5, 10, 32)
+    layer(x, kv=kv)
diff --git a/salt/to_onnx.py b/salt/to_onnx.py
index cbfdf143..7ca1d384 100644
--- a/salt/to_onnx.py
+++ b/salt/to_onnx.py
@@ -14,6 +14,7 @@ from torch.nn.functional import softmax
 from tqdm import tqdm
 
 from salt.models.task import mask_fill_flattened
+from salt.models.transformer_v2 import change_attn_backends
 from salt.modelwrapper import ModelWrapper
 from salt.utils.inputs import inputs_sep_no_pad, inputs_sep_with_pad
 from salt.utils.union_find import get_node_assignment_jit
@@ -293,6 +294,9 @@ def main(args=None):
             map_location=torch.device("cpu"),
         )
         onnx_model.eval()
+        change_attn_backends(
+            onnx_model.model, "torch-math"
+        )  # Only applies to transformer_v2 layers
 
     print("\n" + "-" * 100)
     print("Converting model to ONNX...")
diff --git a/salt/utils/benchmarking.py b/salt/utils/benchmarking.py
new file mode 100644
index 00000000..6e0ae07d
--- /dev/null
+++ b/salt/utils/benchmarking.py
@@ -0,0 +1,171 @@
+"""Benchmarking utilities for Pytorch models."""
+
+from collections.abc import Callable
+
+import torch
+from torch import Tensor, dtype
+from torch.utils import benchmark
+
+
+def time_forward(
+    fn: Callable,
+    *args,
+    repeats: int = 10,
+    block_time: float = 0.0,
+    desc: str = "",
+    verbose: bool = True,
+    amp: bool = False,
+    amp_dtype: dtype = torch.float16,
+    **kwargs,
+) -> tuple:
+    """Use Pytorch Benchmark on the forward pass of an arbitrary function.
+
+    Parameters
+    ----------
+    fn : function
+        The function to benchmark.
+    args : list
+        The args to the function.
+    repeats : int
+        Number of times to repeat the benchmark.
+    block_time : float
+        Instead of repeats, run the benchmark for a fixed amount of time.
+    desc : str
+        Description of the benchmark.
+    verbose : bool
+        Whether to print the benchmark results.
+    amp : bool
+        Whether to use automatic mixed precision.
+    amp_dtype : torch.dtype
+        The dtype to use for automatic mixed precision.
+    kwargs : dict
+        Additional keyword arguments to pass to the function.
+    """
+    if verbose:
+        print(desc, " - Foward pass")
+
+    # Define the automatic mixed precision wrapper
+    def fn_with_amp(*args, **kwargs):
+        with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp):
+            fn(*args, **kwargs)
+
+    # Create the benchmark timer
+    t = benchmark.Timer(
+        stmt="fn_with_amp(*args, **kwargs)",
+        globals={"fn_with_amp": fn_with_amp, "args": args, "kwargs": kwargs},
+        num_threads=torch.get_num_threads(),
+    )
+
+    # Run the benchmark
+    m = t.blocked_autorange(min_run_time=block_time) if block_time > 0 else t.timeit(repeats)
+
+    if verbose:
+        print(m)
+    return t, m
+
+
+def time_backward(
+    fn: Callable,
+    *args,
+    repeats: int = 10,
+    block_time: float = 0.0,
+    desc: str = "",
+    verbose: bool = True,
+    amp: bool = False,
+    amp_dtype: dtype = torch.float16,
+    **kwargs,
+) -> tuple:
+    """Use Pytorch Benchmark on the backward pass of an arbitrary function.
+
+    Parameters
+    ----------
+    fn : function
+        The function to benchmark.
+    args : list
+        The args to the function.
+    repeats : int
+        Number of times to repeat the benchmark.
+    block_time : float
+        Instead of repeats, run the benchmark for a fixed amount of time.
+    desc : str
+        Description of the benchmark.
+    verbose : bool
+        Whether to print the benchmark results.
+    amp : bool
+        Whether to use automatic mixed precision.
+    amp_dtype : torch.dtype
+        The dtype to use for automatic mixed precision.
+    kwargs : dict
+        Additional keyword arguments to pass to the function.
+    """
+    if verbose:
+        print(desc, " - Backward pass")
+
+    # Run in forward to get the output so we can backpropagate
+    with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp):
+        y = fn(*args, **kwargs)
+        if type(y) is tuple:
+            y = y[0]
+        elif type(y) is dict:
+            y = next(iter(y.values()))
+
+    # Generate a random gradient
+    grad = torch.randn_like(y)
+
+    # Define the backward function
+    def bwd(*args, y, grad):
+        for x in args:  # Turn off gradients for all args
+            if isinstance(x, Tensor):
+                x.grad = None
+        y.backward(grad, retain_graph=True)
+
+    # Create the benchmark timer
+    t = benchmark.Timer(
+        stmt="f(*args, y=y, grad=grad)",
+        globals={"f": bwd, "args": args, "y": y, "grad": grad},
+        num_threads=torch.get_num_threads(),
+    )
+
+    # Run the benchmark
+    m = t.blocked_autorange(min_run_time=block_time) if block_time > 0 else t.timeit(repeats)
+    if verbose:
+        print(m)
+    return t, m
+
+
+def benchmark_gpu_memory(
+    fn: Callable,
+    *args,
+    amp: bool = False,
+    amp_dtype: dtype = torch.float16,
+    **kwargs,
+) -> tuple:
+    """Calculate the maximum GPU memory used by a function.
+
+    Parameters
+    ----------
+    fn : function
+        The function to benchmark.
+    args : list
+        The args to the function.
+    amp : bool
+        Whether to use automatic mixed precision.
+    amp_dtype : torch.dtype
+        The dtype to use for automatic mixed precision.
+    kwargs : dict
+        Additional keyword arguments to pass to the function.
+    """
+    # Clear the cache and reset memory stats
+    torch.cuda.empty_cache()
+    torch.cuda.reset_peak_memory_stats()
+    torch.cuda.synchronize()
+
+    # Run the function
+    with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp):
+        fn(*args, **kwargs)
+    torch.cuda.synchronize()
+
+    # Calculate the max memory used in GB
+    mem = torch.cuda.max_memory_allocated() / ((2**20) * 1000)
+    torch.cuda.empty_cache()
+    return mem
diff --git a/salt/utils/tensor_utils.py b/salt/utils/tensor_utils.py
index cb6a59ee..288ac888 100644
--- a/salt/utils/tensor_utils.py
+++ b/salt/utils/tensor_utils.py
@@ -1,6 +1,6 @@
 import torch
 from torch import BoolTensor, Tensor
-from torch.nn.functional import softmax
+from torch.nn.functional import pad, softmax
 
 from salt.stypes import Tensors
 
@@ -59,6 +59,24 @@ def masked_softmax(x: Tensor, mask: BoolTensor, dim: int = -1) -> Tensor:
     return x
 
 
+def undo_padding(seq: Tensor, mask: BoolTensor) -> tuple:
+    """Remove all padded elements from a tensor and return the sequence lengths."""
+    mask = ~mask  # convert the mask such that True is a valid token
+    seqlens = mask.sum(dim=-1)
+    maxlen = seqlens.max().item()
+    culens = pad(torch.cumsum(seqlens, dim=0, dtype=torch.int32), (1, 0))
+    return seq[mask], culens, maxlen
+
+
+def redo_padding(unpadded_seq: Tensor, mask: BoolTensor) -> Tensor:
+    """Redo the padding and return a zero-padded tensor."""
+    mask = ~mask  # convert the mask such that True is a valid token
+    shape = (*mask.shape, unpadded_seq.shape[-1])
+    out = torch.zeros(shape, dtype=unpadded_seq.dtype, device=unpadded_seq.device)
+    out[mask] = unpadded_seq
+    return out
+
+
 def add_dims(x: Tensor, ndim: int):
     """Adds dimensions to a tensor to match the shape of another tensor."""
     if (dim_diff := ndim - x.dim()) < 0:
diff --git a/setup/Dockerfile b/setup/Dockerfile
index 9a8cc07f..963f303d 100644
--- a/setup/Dockerfile
+++ b/setup/Dockerfile
@@ -1,5 +1,5 @@
 # base image
-FROM pytorch/pytorch:2.2.1-cuda12.1-cudnn8-runtime
+FROM pytorch/pytorch:2.2.1-cuda12.1-cudnn8-devel
 
 # local and envs
 ENV LANG C.UTF-8
@@ -24,6 +24,10 @@ RUN python -m pip install -r requirements.txt
 # add some other packages to the image, instead of as a package dependency
 RUN python -m pip install puma-hep umami-preprocessing
 
+# Flash attention sometimes has issues in a requirements file
+RUN python -m pip install wheel packaging ninja
+RUN python -m pip install flash-attn==2.5.7
+
 # copy and install package
 COPY . .
 RUN python -m pip install -e .
diff --git a/setup/install_flash.sh b/setup/install_flash.sh
new file mode 100644
index 00000000..71141a08
--- /dev/null
+++ b/setup/install_flash.sh
@@ -0,0 +1,5 @@
+# To optionally install the flash-attn package for transformer-v2 models
+python -m pip install wheel packaging
+python -m pip install ninja==1.11.1.1
+python -m pip install flash-attn==2.5.7
+
-- 
GitLab


From d29faafd6164bec02b9f04957fcb050b48753d03 Mon Sep 17 00:00:00 2001
From: Samuel Van Stroud <sam.van.stroud@cern.ch>
Date: Thu, 9 May 2024 19:12:10 +0200
Subject: [PATCH 14/30] Minor follow up to transformer changes

---
 salt/configs/GN3.yaml            |  4 +---
 salt/models/saltmodel.py         | 27 ---------------------------
 salt/models/transformer_v2.py    | 26 +++++++++++++++-----------
 salt/tests/test_transformerv2.py |  2 +-
 4 files changed, 17 insertions(+), 42 deletions(-)

diff --git a/salt/configs/GN3.yaml b/salt/configs/GN3.yaml
index 8503be5c..ecc2b88a 100644
--- a/salt/configs/GN3.yaml
+++ b/salt/configs/GN3.yaml
@@ -11,7 +11,6 @@ model:
   model:
     class_path: salt.models.SaltModel
     init_args:
-      num_register_tokens: 0 # Registers have moved to the encoder!
 
       init_nets:
         - input_name: tracks
@@ -26,9 +25,8 @@ model:
           num_layers: 6
           embed_dim: *embed_dim
           out_dim: &out_dim 128
+          attn_type: flash-varlen
           norm: LayerNorm
-          attn_type: torch-math
-          do_final_norm: true
           ls_init: 1.0e-2
           dense_kwargs:
             activation: SiLU
diff --git a/salt/models/saltmodel.py b/salt/models/saltmodel.py
index d82fd8fe..0fbdda5d 100644
--- a/salt/models/saltmodel.py
+++ b/salt/models/saltmodel.py
@@ -14,7 +14,6 @@ class SaltModel(nn.Module):
         encoder: nn.Module = None,
         mask_decoder: nn.Module = None,
         pool_net: Pooling = None,
-        num_register_tokens: int = 0,
         merge_dict: dict[str, list[str]] | None = None,
         featurewise_nets: list[dict] | None = None,
     ):
@@ -48,10 +47,6 @@ class SaltModel(nn.Module):
             Pooling network which computes a global representation of the object
             by aggregating over the constituents. If not provided, assume that
             the only inputs are global features (i.e. no constituents).
-        num_register_tokens : int
-            Number of randomly initialised register tokens of the same length as
-            any other input sequences after initialiser networks (e.g. tracks).
-            See https://arxiv.org/abs/2309.16588.
         merge_dict : dict[str, list[str]] | None
             A dictionary that lets the salt concatenate all the input
             representations of the inputs in list[str] and act on them
@@ -74,22 +69,6 @@ class SaltModel(nn.Module):
 
         self.pool_net = pool_net
         self.merge_dict = merge_dict
-        self.num_register_tokens = num_register_tokens
-
-        # init register tokens
-        if self.num_register_tokens and not self.encoder:
-            raise ValueError("encoder must be set if num_register_tokens is set")
-        if self.num_register_tokens and self.encoder:
-            self.registers = torch.nn.Parameter(
-                torch.normal(
-                    torch.zeros((self.num_register_tokens, self.encoder.embed_dim)), std=1e-4
-                )
-            )
-            self.register_mask = torch.zeros(self.num_register_tokens, dtype=torch.bool)
-            self.register_buffer("register_mask_buffer", self.register_mask)
-        else:
-            self.registers = None
-            self.register_mask = None
 
         # checks for the global object only setup
         if self.pool_net is None:
@@ -135,12 +114,6 @@ class SaltModel(nn.Module):
         for init_net in self.init_nets:
             xs[init_net.input_name] = init_net(inputs)
 
-        if self.num_register_tokens:
-            batch_size = xs[next(iter(xs))].shape[0]
-            xs["REGISTERS"] = self.registers.expand(batch_size, -1, -1)
-            if pad_masks:
-                pad_masks["REGISTERS"] = self.register_mask_buffer.expand(batch_size, -1)
-
         # handle edge features if present
         edge_x = xs.pop("EDGE", None)
         kwargs = {} if edge_x is None else {"edge_x": edge_x}
diff --git a/salt/models/transformer_v2.py b/salt/models/transformer_v2.py
index 8f177c46..6c1887e0 100644
--- a/salt/models/transformer_v2.py
+++ b/salt/models/transformer_v2.py
@@ -140,9 +140,9 @@ def torch_attn(
 ) -> Tensor:
     """Torch dot product attention with a switchable backend."""
     with torch.backends.cuda.sdp_kernel(
-        enable_flash=(backend == "torch-flash"),
-        enable_math=(backend == "torch-math"),
+        enable_math=True,  # always enabled as a fallback
         enable_mem_efficient=(backend == "torch-meff"),
+        enable_flash=(backend == "torch-flash"),
     ):
         return F.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=dropout)
 
@@ -152,7 +152,7 @@ class Attention(nn.Module):
         self,
         embed_dim: int,
         num_heads: int = 1,
-        attn_type: str = "torch-math",
+        attn_type: str = "torch-meff",
         dropout: float = 0.0,
         bias: bool = True,
     ) -> None:
@@ -475,7 +475,7 @@ class EncoderLayer(nn.Module):
             Keyword arguments for [salt.models.transformer_v2.GLU][salt.models.transformer_v2.GLU].
         attn_kwargs : dict | None, optional
             Keyword arguments for
-            [salt.models.transformer_v2.SelfAttention][salt.models.transformer_v2.SelfAttention].
+            [salt.models.transformer_v2.Attention][salt.models.transformer_v2.Attention].
         """
         super().__init__()
 
@@ -568,7 +568,9 @@ class TransformerV2(nn.Module):
         do_final_norm : bool, optional
             Whether to apply a final normalization layer, by default True.
         num_registers : int, optional
-            The number of registers to add to the END of the input sequence
+            The number of registers to add to the END of the input sequence.
+            Registers are randomly initialised tokens of the same dimension as
+            any other inputs after initialiser networks. See 2309.16588.
         drop_registers : bool, optional
             If to drop the registers from the outputs
         kwargs : dict
@@ -579,8 +581,8 @@ class TransformerV2(nn.Module):
         # Check the inputs
         if num_registers < 1:
             raise ValueError(
-                "Many jets have no tracks, which causes NaNs in the attention scores. ",
-                "To fix this, set num_registers to at least 1",
+                "Some jets have no tracks, which causes NaNs in the attention scores. ",
+                "To avoid this, set num_registers to at least 1",
             )
 
         # Attributes
@@ -605,7 +607,9 @@ class TransformerV2(nn.Module):
         if self.do_final_norm:
             self.out_norm = getattr(layernorms, norm)(self.out_dim)
         if self.num_registers:
-            self.registers = nn.Parameter(torch.randn(num_registers, embed_dim))
+            self.registers = nn.Parameter(
+                torch.normal(torch.zeros((self.num_registers, self.embed_dim)), std=1e-4)
+            )
             self.register_buffer("register_mask", torch.zeros(num_registers, dtype=torch.bool))
         self.featurewise = nn.ModuleList()
 
@@ -654,7 +658,7 @@ class TransformerV2(nn.Module):
         if self.drop_registers:
             x = x[:, : -self.num_registers]
             if isinstance(pad_mask, dict):
-                del pad_mask["registers"]
+                del pad_mask["REGISTERS"]
             elif isinstance(pad_mask, Tensor):
                 pad_mask = pad_mask[:, : -self.num_registers]
 
@@ -668,7 +672,7 @@ class TransformerV2(nn.Module):
         # Add as a key or concatenate at the end
         reg = self.registers.expand(B, -1, -1)
         if isinstance(x, dict):
-            x["registers"] = reg
+            x["REGISTERS"] = reg
         else:
             x = torch.cat([x, reg], dim=1)
 
@@ -676,7 +680,7 @@ class TransformerV2(nn.Module):
         if pad_mask is not None:
             reg_mask = self.register_mask.expand(B, -1)
             if isinstance(pad_mask, dict):
-                pad_mask["registers"] = reg_mask
+                pad_mask["REGISTERS"] = reg_mask
             else:
                 pad_mask = torch.cat([pad_mask, reg_mask], dim=-1)
 
diff --git a/salt/tests/test_transformerv2.py b/salt/tests/test_transformerv2.py
index 958c3be0..0cd769f2 100644
--- a/salt/tests/test_transformerv2.py
+++ b/salt/tests/test_transformerv2.py
@@ -284,7 +284,7 @@ def test_transformerv2_dict_input(num_registers):
     )
     x, mask = trans(x, pad_mask=mask)
     assert x.shape == (5, 10 + 3 + 2 + num_registers, 32)
-    assert all(k in mask for k in ["m1", "m2", "m3", "registers"])
+    assert all(k in mask for k in ["m1", "m2", "m3", "REGISTERS"])
 
 
 def test_times_torch_vs_salt() -> None:
-- 
GitLab


From ff43bdb2037e0dd414d37d54def4d55045984c60 Mon Sep 17 00:00:00 2001
From: Samuel Van Stroud <sam.van.stroud@cern.ch>
Date: Mon, 13 May 2024 15:18:14 +0200
Subject: [PATCH 15/30] Update tutorial-Xbb.md

---
 docs/tutorial-Xbb.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/tutorial-Xbb.md b/docs/tutorial-Xbb.md
index 68aba298..1cc6807f 100644
--- a/docs/tutorial-Xbb.md
+++ b/docs/tutorial-Xbb.md
@@ -236,7 +236,7 @@ Go to the GitLab project page of Salt to begin with the task: <https://gitlab.ce
 
 ??? info "Hint: Saving info to environment variables"
 
-    See the Salt [logging docs](training.md#setup-logging) for info on which environment variables to use.
+    See the Salt [logging docs](setup.md#setup-logging) for info on which environment variables to use.
 
 ??? danger "Warning: If you don't set up logging, you may need to disable it in the training config file"
 
-- 
GitLab


From 1befc350b956584583ef3c90f87bf37ea48a972f Mon Sep 17 00:00:00 2001
From: Samuel Van Stroud <sam.van.stroud@cern.ch>
Date: Mon, 13 May 2024 15:46:17 +0200
Subject: [PATCH 16/30] Update tutorial-Xbb.md

---
 docs/tutorial-Xbb.md | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/docs/tutorial-Xbb.md b/docs/tutorial-Xbb.md
index 1cc6807f..67f9add2 100644
--- a/docs/tutorial-Xbb.md
+++ b/docs/tutorial-Xbb.md
@@ -111,17 +111,19 @@ We'll use the tagged image for version `0.3` of the code.
     If you run on lxplus, it is advantageous to also mount the `/afs`, `/eos`, `/tmp` and `/cvmfs` directories:
 
     ```bash
-    singularity shell -e --nv --bind $PWD,/afs,/eos,/tmp,/cvmfs \
+    singularity shell -e --env KRB5CCNAME=$KRB5CCNAME --nv --bind $PWD,/afs,/eos,/tmp,/cvmfs,/run/user \
         /cvmfs/unpacked.cern.ch/gitlab-registry.cern.ch/atlas-flavor-tagging-tools/algorithms/salt:0-3
     ```
 
 === "other (cvmfs only)"
 
     ```
-    singularity shell -e --nv --bind $PWD,/cvmfs \
+    singularity shell -e --env KRB5CCNAME=$KRB5CCNAME --nv --bind $PWD,/cvmfs,/run/user \
         /cvmfs/unpacked.cern.ch/gitlab-registry.cern.ch/atlas-flavor-tagging-tools/algorithms/salt:0-3
     ```
 
+If you have issues accessing bound paths, ensure your Kerberos credentials are set with `export KRB5CCNAME=FILE:/run/user/${UID}/krb5cc`
+
 After running the [`singularity shell`](https://docs.sylabs.io/guides/latest/user-guide/cli/singularity_shell.html#singularity-shell) command, you can re-source your `.bashrc` to get some of the features of your normal terminal back by running 
 
 ```bash
-- 
GitLab


From 8148e244de3a0310efd06b4faaf02e2d030cd74c Mon Sep 17 00:00:00 2001
From: Samuel Van Stroud <sam.van.stroud@cern.ch>
Date: Tue, 14 May 2024 10:10:56 +0200
Subject: [PATCH 17/30] Update paper

---
 paper/paper.md | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/paper/paper.md b/paper/paper.md
index 9b1005ed..dc1689b1 100644
--- a/paper/paper.md
+++ b/paper/paper.md
@@ -36,6 +36,9 @@ authors:
   - name: Matthew Leigh
     orcid: 0000-0003-1406-1413
     affiliation: 7
+  - name: Nicholas Luongo
+    orcid: 0000-0001-6527-0253
+    affiliation: 10
   - name: Ivan Oleksiyuk
     orcid: 0000-0002-4784-6340
     affiliation: 7
@@ -74,6 +77,8 @@ affiliations:
    index: 8
  - name: University of California, Berkeley
    index: 9
+ - name: Argonne National Laboratory
+   index: 10
 
 date: 15 Janurary 2024
 bibliography: paper.bib
-- 
GitLab


From 29377c44e86db030a0fd776915b71dfa8d9cc9a3 Mon Sep 17 00:00:00 2001
From: Samuel Van Stroud <sam.van.stroud@cern.ch>
Date: Thu, 16 May 2024 15:43:43 +0200
Subject: [PATCH 18/30] Update GN3.yaml

---
 salt/configs/GN3.yaml       | 71 ++++++++++++++++++-------------------
 salt/tests/test_models.py   |  2 +-
 salt/tests/test_pipeline.py |  3 +-
 salt/utils/cli.py           |  1 +
 salt/utils/inputs.py        |  8 ++++-
 5 files changed, 45 insertions(+), 40 deletions(-)

diff --git a/salt/configs/GN3.yaml b/salt/configs/GN3.yaml
index ecc2b88a..1c3fc706 100644
--- a/salt/configs/GN3.yaml
+++ b/salt/configs/GN3.yaml
@@ -17,19 +17,19 @@ model:
           dense_config:
             output_size: &embed_dim 256
             hidden_layers: [256]
-            activation: &activation ReLU
+            activation: &activation SiLU
 
       encoder:
         class_path: salt.models.TransformerV2
         init_args:
-          num_layers: 6
+          num_layers: 8
           embed_dim: *embed_dim
           out_dim: &out_dim 128
           attn_type: flash-varlen
           norm: LayerNorm
           ls_init: 1.0e-2
           dense_kwargs:
-            activation: SiLU
+            activation: *activation
             dropout: 0
             gated: True
           attn_kwargs:
@@ -50,44 +50,41 @@ model:
                 name: jets_classification
                 input_name: jets
                 label: flavour_label
-                loss:
-                  class_path: torch.nn.CrossEntropyLoss
-                  init_args: { weight: [1.0, 2.0, 2.0] }
+                use_class_dict: True
+                loss: torch.nn.CrossEntropyLoss
                 dense_config: &task_dense_config
                   input_size: *out_dim
-                  output_size: 3
+                  output_size: 4
                   hidden_layers: [128, 64, 32]
                   activation: *activation
 
-            - class_path: salt.models.ClassificationTask
-              init_args:
-                name: track_origin
-                input_name: tracks
-                label: ftagTruthOriginLabel
-                weight: 0.5
-                loss:
-                  class_path: torch.nn.CrossEntropyLoss
-                  init_args:
-                    weight: [4.2, 73.7, 1.0, 17.5, 12.3, 12.5, 141.7, 22.3]
-                dense_config:
-                  <<: *task_dense_config
-                  output_size: 8
-                  context_size: *out_dim
-
-            - class_path: salt.models.VertexingTask
-              init_args:
-                name: track_vertexing
-                input_name: tracks
-                label: ftagTruthVertexIndex
-                weight: 1.5
-                loss:
-                  class_path: torch.nn.BCEWithLogitsLoss
-                  init_args: { reduction: none }
-                dense_config:
-                  <<: *task_dense_config
-                  input_size: 256
-                  output_size: 1
-                  context_size: *out_dim
+# Disabling aux tasks during R&D phase to speed up training!
+#            - class_path: salt.models.ClassificationTask
+#              init_args:
+#                name: track_origin
+#                input_name: tracks
+#                label: ftagTruthOriginLabel
+#                weight: 0.5
+#                loss: torch.nn.CrossEntropyLoss
+#                dense_config:
+#                  <<: *task_dense_config
+#                  output_size: 8
+#                  context_size: *out_dim
+#
+#            - class_path: salt.models.VertexingTask
+#              init_args:
+#                name: track_vertexing
+#                input_name: tracks
+#                label: ftagTruthVertexIndex
+#                weight: 1.5
+#                loss:
+#                  class_path: torch.nn.BCEWithLogitsLoss
+#                  init_args: { reduction: none }
+#                dense_config:
+#                  <<: *task_dense_config
+#                  input_size: 256
+#                  output_size: 1
+#                  context_size: *out_dim
 
 data:
   variables:
@@ -114,8 +111,8 @@ data:
       - numberOfPixelSharedHits
       - numberOfPixelSplitHits
       - numberOfSCTSharedHits
+      - leptonID
       #- numberOfTRTHits
-      #- leptonID
 
   train_file: /unix/atlastracking/samples/ftag_dumps/vertexing/output/pp_output_train.h5
   val_file: /unix/atlastracking/samples/ftag_dumps/vertexing/output/pp_output_val.h5
diff --git a/salt/tests/test_models.py b/salt/tests/test_models.py
index c391efea..4abddaa8 100644
--- a/salt/tests/test_models.py
+++ b/salt/tests/test_models.py
@@ -139,7 +139,7 @@ def test_transformer_cross_attention_encoder() -> None:
     mask["type1"] = torch.zeros(extended_x["type1"].shape[:-1]).bool()
     mask["type1"][:, -1] = True
     out_with_pad = net(extended_x, mask)["type1"][:, :-1]
-    assert torch.all(out["type1"] == out_with_pad)
+    torch.testing.assert_allclose(out["type1"], out_with_pad)
 
 
 def test_mha_allvalid_mask() -> None:
diff --git a/salt/tests/test_pipeline.py b/salt/tests/test_pipeline.py
index d32a9737..53e53c80 100644
--- a/salt/tests/test_pipeline.py
+++ b/salt/tests/test_pipeline.py
@@ -11,10 +11,11 @@ from salt.utils.inputs import write_dummy_file, write_dummy_norm_dict
 
 w = "ignore::lightning.fabric.utilities.warnings.PossibleUserWarning:"
 CONFIG = "GN2.yaml"
+TAU_CONFIGS = {"GN2.yaml", "GN3.yaml"}
 
 
 def run_train(tmp_path, config_path, train_args, do_xbb=False, do_muP=False, inc_params=False):
-    incl_taus = config_path.name == CONFIG
+    incl_taus = config_path.name in TAU_CONFIGS
     tmp_path = Path(tmp_path)
     train_h5_path = tmp_path / "dummy_train_inputs.h5"
     nd_path = tmp_path / "dummy_norm_dict.yaml"
diff --git a/salt/utils/cli.py b/salt/utils/cli.py
index 5bc9be7b..ab711db9 100644
--- a/salt/utils/cli.py
+++ b/salt/utils/cli.py
@@ -188,6 +188,7 @@ class SaltCLI(LightningCLI):
                 input_name = task["init_args"]["input_name"]
                 if task["init_args"]["label"] in class_dict[input_name]:
                     class_weights = class_dict[input_name][task["init_args"]["label"]]
+                    class_weights = torch.Tensor(class_weights)
                     task["init_args"]["loss"]["init_args"]["weight"] = class_weights
                 else:
                     raise ValueError(
diff --git a/salt/utils/inputs.py b/salt/utils/inputs.py
index 1125c67a..15073e81 100644
--- a/salt/utils/inputs.py
+++ b/salt/utils/inputs.py
@@ -76,6 +76,7 @@ TRACK_VARS = [
     "eta",
     "phi",
     "subjetIndex",
+    "leptonID",
 ]
 
 ELECTRON_VARS = [
@@ -151,8 +152,13 @@ def write_dummy_norm_dict(nd_path: Path, cd_path: Path):
     sd["flow"] = {n: {"std": 1.0, "mean": 1.0} for n in TRACK_VARS}
     with open(nd_path, "w") as file:
         yaml.dump(sd, file, sort_keys=False)
+
+    cd: dict = {}
+    cd["jets"] = {"HadronConeExclTruthLabelID": [1.0, 2.0, 2.0, 2.0]}
+    cd["jets"]["flavour_label"] = cd["jets"]["HadronConeExclTruthLabelID"]
+    cd["tracks"] = {"ftagTruthOriginLabel": [4.2, 73.7, 1.0, 17.5, 12.3, 12.5, 141.7, 22.3]}
     with open(cd_path, "w") as file:
-        yaml.dump(sd, file, sort_keys=False)
+        yaml.dump(cd, file, sort_keys=False)
 
 
 def get_dummy_inputs(n_jets=1000, n_jet_features=2, n_track_features=21, n_tracks_per_jet=40):
-- 
GitLab


From 429ce12ecfb6f50c5f78b94c418588191a6ac62f Mon Sep 17 00:00:00 2001
From: Nikita I Pond <zcappon@ucl.ac.uk>
Date: Tue, 21 May 2024 09:11:23 +0100
Subject: [PATCH 19/30] working commit for mf onnx exports

---
 salt/configs/MaskFormer.yaml |  2 +-
 salt/models/maskformer.py    | 78 ++++++++++++++++++++++++----
 salt/tests/test_pipeline.py  | 15 ++++--
 salt/to_onnx.py              | 98 ++++++++++++++++++++++++++++++++++--
 salt/utils/configs.py        |  6 +++
 salt/utils/mask_utils.py     | 11 +++-
 6 files changed, 190 insertions(+), 20 deletions(-)

diff --git a/salt/configs/MaskFormer.yaml b/salt/configs/MaskFormer.yaml
index 532887b5..595200fc 100644
--- a/salt/configs/MaskFormer.yaml
+++ b/salt/configs/MaskFormer.yaml
@@ -28,7 +28,7 @@ model:
             num_heads: 8
           dense_kwargs:
             activation: *activation
-          drop_registers: true
+          drop_registers: true 
 
 
       mask_decoder:
diff --git a/salt/models/maskformer.py b/salt/models/maskformer.py
index cc21b6d9..64f846e4 100644
--- a/salt/models/maskformer.py
+++ b/salt/models/maskformer.py
@@ -101,6 +101,8 @@ class MaskDecoder(nn.Module):
         # MF only supports one input, if we have multiple then we have no way of knowing
         # what section of the embedding relates to objects we want to generate masks for
         if isinstance(pad_mask, dict):
+            # print(pad_mask.keys())
+            # print({k: v.shape for k, v in pad_mask.items()})
             assert len(pad_mask) == 1, "Maskformer only supports one input."
             pad_mask = next(iter(pad_mask.values()))
 
@@ -108,15 +110,39 @@ class MaskDecoder(nn.Module):
         # apply norm
         q = self.norm1(self.inital_q.expand(x.shape[0], -1, -1))
         x = self.norm2(x)
-
+        xpad = torch.zeros((x.shape[0], 1, x.shape[-1]), device=x.device, dtype=x.dtype)
+        
+        print('here', x.shape, xpad.shape,q.shape)
+        if pad_mask is not None:
+            padpad_mask = torch.zeros((pad_mask.shape[0], 1), device=pad_mask.device, dtype=pad_mask.dtype)
+            pad_mask = torch.cat([pad_mask, padpad_mask], dim=1)
+            print(pad_mask.shape)
+
+        # qpad = torch.zeros((q.shape[0], 1, q.shape[-1]), device=q.device, dtype=q.dtype)
+
+        x = torch.cat([x, xpad], dim=1)
+        # q = torch.cat([q, qpad], dim=2)
+        print(x.shape, q.shape)
         intermediate_outputs: list | None = [] if self.aux_loss else None
         for layer in self.layers:
             if self.aux_loss:
                 assert intermediate_outputs is not None
                 intermediate_outputs.append({"embed": q, **self.get_preds(q, x, pad_mask)})
             q, x = layer(q, x, kv_mask=pad_mask)
-
-        preds["objects"] = {"embed": q, "x": x, **self.get_preds(q, x, pad_mask)}
+        # q = q[:, :-1, :]
+        mf_preds = self.get_preds(q, x, pad_mask)
+        print('HERE1'*50)
+        for k, v in mf_preds.items():
+            print(k, v.shape, flush=True)
+        preds["objects"] = {
+            "embed": q, 
+            "x": x[:, :-1, :], 
+            **self.get_preds(q, x, pad_mask)
+            }
+        preds["objects"]["masks"] = preds["objects"]["masks"][:,:,:-1]
+        print('HERE2'*50)
+        for k, v in preds["objects"].items():
+            print(k, v.shape, flush=True)
         if self.aux_loss:
             preds["intermediate_outputs"] = intermediate_outputs
 
@@ -131,9 +157,16 @@ def get_masks(x: Tensor, q: Tensor, mask_net: nn.Module, input_pad_mask: Tensor
     pred_masks = torch.einsum("bqe,ble->bql", mask_tokens, x)
 
     if input_pad_mask is not None:
+        print('this one lol', input_pad_mask.shape,  pred_masks.shape)
+        t = input_pad_mask.unsqueeze(1).expand_as(pred_masks)
+        print(t.shape)
         pred_masks[input_pad_mask.unsqueeze(1).expand_as(pred_masks)] = torch.finfo(
             pred_masks.dtype
         ).min
+        # expanded_mask = input_pad_mask.unsqueeze(1).expand_as(pred_masks)
+        # print("Expanded mask shape:", expanded_mask.shape)
+        # pred_masks = torch.where(expanded_mask, torch.tensor(torch.finfo(pred_masks.dtype).min, device=pred_masks.device), pred_masks)
+
     return pred_masks
 
 
@@ -161,16 +194,38 @@ class MaskDecoderLayer(nn.Module):
 
     def forward(self, q: Tensor, kv: Tensor, kv_mask: Tensor | None = None) -> Tensor:
         attn_mask = None
-
+        # return q, kv
         # if we want to do mask attention
         if self.mask_attention:
             # New attention masking convention with transformers 2
             # Positions with True are allowed while False are masked
-            attn_mask = (get_masks(kv, q, self.mask_net, kv_mask).sigmoid() > 0.9).detach()
+            # attn_mask = get_masks(kv, q, self.mask_net, kv_mask).sigmoid().detach()
+            # attn_mask = attn_mask * (attn_mask > 0.9).float()
+
+            # # attn_mask = (get_masks(kv, q, self.mask_net, kv_mask).sigmoid() > 0.9).detach()
+
+            # # If the attention mask is False for all positions, we set it to True
+            # # This is prevent NaNs in the softmax
+            # attn_mask[(~attn_mask).all(-1)] = True
+            # Compute masks and apply sigmoid
+            attn_mask = get_masks(kv, q, self.mask_net, kv_mask).sigmoid()
+
+            # Threshold and detach
+            attn_mask = (attn_mask > 0.9).detach()
+            print(attn_mask.shape, (~attn_mask).all(-1).shape, torch.all(attn_mask == 0, dim=-1).shape)
+            newmask = torch.all(attn_mask == 0, dim=-1, keepdim=True).expand(attn_mask.shape)
+            # Check if all values along the last dimension are 0 (equivalent to `False` in boolean)
+            # If so, set them to 1 (equivalent to `True` in boolean)
+            print('AT THIS BIT', attn_mask.shape, newmask.shape)
+            # attn_mask.masked_fill_(newmask.bool(), True)
+            # attn_mask = attn_mask.float()  # Convert attn_mask to float if it isn't already
+            # newmask = newmask.float()  # Convert newmask to float (True becomes 1.0, False becomes 0.0)
+
+            # Use arithmetic to set the specified positions to True (1.0)
+            # attn_mask = (attn_mask * (1 - newmask) + newmask).bool()
+            attn_mask = attn_mask | newmask.bool()
+            # attn_mask[newmask] = True
 
-            # If the attention mask is False for all positions, we set it to True
-            # This is prevent NaNs in the softmax
-            attn_mask[(~attn_mask).all(-1)] = True
 
         # update queries with cross attention from nodes
         q = q + self.q_ca(q, kv=kv, kv_mask=kv_mask, attn_mask=attn_mask)
@@ -185,7 +240,12 @@ class MaskDecoderLayer(nn.Module):
         if self.bidirectional_ca:
             if attn_mask is not None:
                 attn_mask = attn_mask.transpose(1, 2)
-                attn_mask[(~attn_mask).all(-1)] = True
+                newmask = torch.all(attn_mask == 1, dim=-1, keepdim=True).expand(attn_mask.shape)
+                # attn_mask = attn_mask.float()
+                # newmask = newmask.float()
+                # attn_mask = (attn_mask * (1 - newmask) + newmask).bool()
+                attn_mask = attn_mask | ~newmask.bool()
+                # attn_mask[(~attn_mask).all(-1)] = True
             kv = kv + self.kv_ca(kv, q, attn_mask=attn_mask)
             kv = kv + self.kv_dense(kv)
         return q, kv
diff --git a/salt/tests/test_pipeline.py b/salt/tests/test_pipeline.py
index 53e53c80..757bebf9 100644
--- a/salt/tests/test_pipeline.py
+++ b/salt/tests/test_pipeline.py
@@ -29,9 +29,9 @@ def run_train(tmp_path, config_path, train_args, do_xbb=False, do_muP=False, inc
     args += [f"--data.class_dict={cd_path}"]
     args += [f"--data.train_file={train_h5_path}"]
     args += [f"--data.val_file={train_h5_path}"]
-    args += ["--data.num_train=500"]
-    args += ["--data.num_val=200"]
-    args += ["--data.batch_size=100"]
+    args += ["--data.num_train=50"]
+    args += ["--data.num_val=20"]
+    args += ["--data.batch_size=10"]
     args += ["--data.num_workers=0"]
     args += ["--trainer.max_epochs=1"]
     args += ["--trainer.accelerator=cpu"]
@@ -62,8 +62,10 @@ def run_eval(tmp_path, train_config_path, nd_path, do_xbb=False):
     write_dummy_file(test_h5_path, nd_path, do_xbb)
 
     args = ["test"]
+
     args += [f"--config={train_config_path}"]
     args += [f"--data.test_file={test_h5_path}"]
+    args += ["--data.batch_size=100"]
     args += ["--data.num_test=1000"]
     main(args)
 
@@ -109,7 +111,12 @@ def run_onnx(train_dir, args=None):
         args = []
     args += [f"--ckpt_path={ckpt_path}"]
     args += ["--track_selection=dipsLoose202102"]
-    args += args
+
+    if 'MaskFormer' in str(train_dir):
+        args += ["-mf=vertexing"]
+    print('ONNX'*100)
+    print(train_dir)
+    # args += args
     to_onnx(args)
     get_onnx_metadata([str(train_dir / "network.onnx")])
 
diff --git a/salt/to_onnx.py b/salt/to_onnx.py
index 7ca1d384..0a4cf6ec 100644
--- a/salt/to_onnx.py
+++ b/salt/to_onnx.py
@@ -18,7 +18,11 @@ from salt.models.transformer_v2 import change_attn_backends
 from salt.modelwrapper import ModelWrapper
 from salt.utils.inputs import inputs_sep_no_pad, inputs_sep_with_pad
 from salt.utils.union_find import get_node_assignment_jit
-
+from salt.utils.cli import SaltCLI
+from salt.utils.inputs import inputs_sep_no_pad, inputs_sep_with_pad
+from salt.utils.union_find import get_node_assignment
+from salt.utils.mask_utils import indices_from_mask
+from salt.utils.configs import MaskformerConfig
 torch.manual_seed(42)
 # https://gitlab.cern.ch/atlas/athena/-/blob/master/PhysicsAnalysis/JetTagging/FlavorTagDiscriminants/Root/DataPrepUtilities.cxx
 TRACK_SELECTIONS = [
@@ -78,6 +82,10 @@ def parse_args(args):
         help="Include auxiliary task outputs (if available)",
         action="store_true",
     )
+    parser.add_argument(
+        "-mf",
+        "--object_name",
+    )
     parser.add_argument(
         "-f",
         "--force",
@@ -93,8 +101,34 @@ def get_probs(outputs: Tensor):
     return tuple(output.squeeze() for output in torch.split(outputs, 1, -1))
 
 
+def get_maskformer_outputs(objects):
+    print(objects.keys())
+    print(objects['class_probs'].shape)
+    # Convert the (N,M) -> (M,) mask indices
+    masks = objects['masks']
+    class_probs = objects['class_probs']
+    regression = objects['regression']
+    object_leading = objects['regression']
+    # Define the leading object as the one with the highest regression[0] value 
+    # in vertexing case, this is the pT
+    order = torch.argsort(object_leading[:,:, 0], descending=True)
+    indices = torch.arange(order.shape[0]).unsqueeze(1).expand_as(order)
+    
+    # Apply the re-ordering
+    # masks = masks[indices, order]
+    # class_probs = class_probs[indices, order]
+    # regression = regression[indices, order]
+    # Convert our masks (N,M), now in pT order, to be (M,) indices
+    object_indices = indices_from_mask(masks)
+    print(regression[:, :, 0])
+    # Return the leading regression level variables to be stored at global-level
+    leading_regression = regression[:, 0]
+
+    return leading_regression, masks, class_probs, regression
+
+
 class ONNXModel(ModelWrapper):
-    def __init__(self, name: str | None = None, include_aux: bool = False, **kwargs) -> None:
+    def __init__(self, name: str | None = None, include_aux: bool = False, object_name : str | None = None, mf_config : dict | None =None, **kwargs) -> None:
         super().__init__(**kwargs)
         self.name = name if name else self.name
         assert len(self.model.init_nets) == 1, "Multi input ONNX models are not yet supported."
@@ -102,10 +136,19 @@ class ONNXModel(ModelWrapper):
         assert "-" not in self.name, "Model name cannot contain dashes."
         self.include_aux = include_aux
         self.const = "tracks"
+        self.object = object_name
+        self.mf_config = MaskformerConfig(**mf_config) if mf_config else None
+        if self.object:
+            self.object_params = {
+                "class_label": self.mf_config.object.class_label,
+                "label_map": [f"p{name}" for name in self.mf_config.object.class_names],
+            }
+            print('OBJECT PARAMS', self.object_params)
         self.input_names = ["jet_features", "track_features"]
         jets, tracks = inputs_sep_no_pad(
             1, 40, self.input_dims[self.global_object], self.input_dims[self.const]
         )
+        self.has_global_task = len([t for t in self.model.tasks if t.input_name == self.global_object]) > 0
         self.example_input_array = jets, tracks.squeeze(0)  # used for the tracing during export
 
     @property
@@ -131,6 +174,17 @@ class ONNXModel(ModelWrapper):
             if "track_vertexing" in [t.name for t in self.model.tasks]:
                 out_name = f"{self.model_name}_VertexIndex"
                 outputs.append(out_name)
+        if self.object:
+            regression_task = [t for t in self.model.tasks 
+                               if t.input_name == 'objects' and t.name == 'regression']
+            assert len(regression_task) == 1, "Object outputs require a regression task"
+            # First we append the leading jet regression variables
+            outputs += [
+                f"{self.model_name}_leading_{self.object}_{v}"
+                for v in regression_task[0].targets]
+            outputs += [f"{self.model_name}_{self.object}_index"]
+            outputs += [f"{self.model_name}_{self.object}_class"]
+            outputs += [f"{self.model_name}_{self.object}_regression"]
 
         return outputs
 
@@ -148,6 +202,9 @@ class ONNXModel(ModelWrapper):
             if "track_vertexing" in [t.name for t in self.model.tasks]:
                 out_name = f"{self.model_name}_VertexIndex"
                 dynamic_axes[out_name] = {0: "n_tracks"}
+        if self.object:
+            out_name = f"{self.model_name}_{self.object}"
+            dynamic_axes[out_name] = {0: "n_tracks"}
         return dynamic_axes
 
     def forward(self, jets: Tensor, tracks: Tensor, labels=None):  # type: ignore[override]
@@ -160,9 +217,12 @@ class ONNXModel(ModelWrapper):
         outputs = super().forward({self.global_object: jets, self.const: tracks}, None)[0]
 
         # get class probabilities
+        # onnx_outputs = get_probs(
+        #     outputs[self.global_object][f"{self.global_object}_classification"]
+        # )
         onnx_outputs = get_probs(
-            outputs[self.global_object][f"{self.global_object}_classification"]
-        )
+                outputs[self.global_object][f"{self.global_object}_classification"]
+        ) if self.has_global_task else ()
 
         # add aux outputs
         if self.include_aux:
@@ -178,7 +238,22 @@ class ONNXModel(ModelWrapper):
                 vertex_indices = get_node_assignment_jit(edge_scores, pad_mask)
                 vertex_list = mask_fill_flattened(vertex_indices, pad_mask)
                 onnx_outputs += (vertex_list.reshape(-1).char(),)
-
+        if self.object:
+            print('LOL'*100)
+            assert 'objects' in outputs, 'No MF objects in outputs'
+            print(outputs['objects'].keys())
+            print(outputs['objects']['masks'])
+            print(indices_from_mask(outputs['objects']['masks']))
+            print(outputs['objects']['class_probs'])
+            # Extract the mf outputs
+            leading_reg, masks, class_probs, regression = get_maskformer_outputs(outputs['objects'])
+            
+            for r in leading_reg[0]:
+                onnx_outputs += (r,)
+            onnx_outputs += (indices_from_mask(masks).char(),)
+            onnx_outputs += (torch.argmax(class_probs, dim=-1).char(),)
+            onnx_outputs += (regression,)
+        print(onnx_outputs)
         return onnx_outputs
 
 
@@ -277,6 +352,7 @@ def main(args=None):
         config_path = args.ckpt_path.parents[1] / "config.yaml"
         assert config_path.is_file(), f"Could not find config file at {config_path}"
 
+
     # instantiate pytorch and wrapper models
     with warnings.catch_warnings():
         warnings.simplefilter("ignore")
@@ -287,11 +363,22 @@ def main(args=None):
         pt_model.eval()
         pt_model.float()
 
+        if args.object_name:
+            with open(config_path, 'r') as f:
+                config = yaml.safe_load(f)
+            mf_config = config['data'].get('mf_config')
+            if not mf_config:
+                raise ValueError('No mf_config in config')
+        else:
+            mf_config = {}
         onnx_model = ONNXModel.load_from_checkpoint(
             args.ckpt_path,
             name=args.name,
             include_aux=args.include_aux,
+            object_name=args.object_name,
+            mf_config=mf_config,
             map_location=torch.device("cpu"),
+
         )
         onnx_model.eval()
         change_attn_backends(
@@ -315,6 +402,7 @@ def main(args=None):
         input_names=onnx_model.input_names,
         output_names=onnx_model.output_names,
         dynamic_axes=onnx_model.dynamic_axes,
+        verbose=True,
     )
 
     # add metadata
diff --git a/salt/utils/configs.py b/salt/utils/configs.py
index 5580bfc8..f8f60afd 100644
--- a/salt/utils/configs.py
+++ b/salt/utils/configs.py
@@ -85,3 +85,9 @@ class MaskformerConfig:
 
     object: MaskformerObjectConfig
     constituent: MaskformerObjectConfig
+
+    def __post_init__(self):
+        if isinstance(self.object, dict):
+            self.object = MaskformerObjectConfig(**self.object)
+        if isinstance(self.constituent, dict):
+            self.constituent = MaskformerObjectConfig(**self.constituent)
\ No newline at end of file
diff --git a/salt/utils/mask_utils.py b/salt/utils/mask_utils.py
index 40df58b7..8713fccc 100644
--- a/salt/utils/mask_utils.py
+++ b/salt/utils/mask_utils.py
@@ -111,7 +111,16 @@ def indices_from_mask(mask: BoolTensor, noindex: int = -1) -> Tensor:
         raise ValueError("mask must be 2D for single sample or 3D for batch")
 
     # ensure indices start from 0
-    indices -= indices[indices >= 0].min()
+    idx = indices >= 0
+    # Ensure all negative indices are +ve so we don't include them in the min,
+    # this is due to onnx
+    indices = indices + idx*999
+    
+    mindices = indices
+    
+    # d_indices = 
+    indices -= mindices
+
     indices[indices < 0] = noindex
 
     return indices
-- 
GitLab


From 333f2dad62b080d297e9b3cc7e9f313538767ee4 Mon Sep 17 00:00:00 2001
From: Nikita I Pond <zcappon@ucl.ac.uk>
Date: Fri, 24 May 2024 14:08:45 +0100
Subject: [PATCH 20/30] cleanup debug stuff

---
 salt/models/maskformer.py | 47 ++++----------------------
 salt/models/task.py       |  4 +++
 salt/to_onnx.py           | 69 +++++++++++++++++++++++++++++----------
 3 files changed, 62 insertions(+), 58 deletions(-)

diff --git a/salt/models/maskformer.py b/salt/models/maskformer.py
index 64f846e4..c8c2364f 100644
--- a/salt/models/maskformer.py
+++ b/salt/models/maskformer.py
@@ -101,8 +101,7 @@ class MaskDecoder(nn.Module):
         # MF only supports one input, if we have multiple then we have no way of knowing
         # what section of the embedding relates to objects we want to generate masks for
         if isinstance(pad_mask, dict):
-            # print(pad_mask.keys())
-            # print({k: v.shape for k, v in pad_mask.items()})
+        
             assert len(pad_mask) == 1, "Maskformer only supports one input."
             pad_mask = next(iter(pad_mask.values()))
 
@@ -112,37 +111,26 @@ class MaskDecoder(nn.Module):
         x = self.norm2(x)
         xpad = torch.zeros((x.shape[0], 1, x.shape[-1]), device=x.device, dtype=x.dtype)
         
-        print('here', x.shape, xpad.shape,q.shape)
         if pad_mask is not None:
             padpad_mask = torch.zeros((pad_mask.shape[0], 1), device=pad_mask.device, dtype=pad_mask.dtype)
             pad_mask = torch.cat([pad_mask, padpad_mask], dim=1)
-            print(pad_mask.shape)
-
-        # qpad = torch.zeros((q.shape[0], 1, q.shape[-1]), device=q.device, dtype=q.dtype)
 
         x = torch.cat([x, xpad], dim=1)
-        # q = torch.cat([q, qpad], dim=2)
-        print(x.shape, q.shape)
+
         intermediate_outputs: list | None = [] if self.aux_loss else None
         for layer in self.layers:
             if self.aux_loss:
                 assert intermediate_outputs is not None
                 intermediate_outputs.append({"embed": q, **self.get_preds(q, x, pad_mask)})
             q, x = layer(q, x, kv_mask=pad_mask)
-        # q = q[:, :-1, :]
         mf_preds = self.get_preds(q, x, pad_mask)
-        print('HERE1'*50)
-        for k, v in mf_preds.items():
-            print(k, v.shape, flush=True)
+
         preds["objects"] = {
             "embed": q, 
             "x": x[:, :-1, :], 
             **self.get_preds(q, x, pad_mask)
             }
         preds["objects"]["masks"] = preds["objects"]["masks"][:,:,:-1]
-        print('HERE2'*50)
-        for k, v in preds["objects"].items():
-            print(k, v.shape, flush=True)
         if self.aux_loss:
             preds["intermediate_outputs"] = intermediate_outputs
 
@@ -157,15 +145,12 @@ def get_masks(x: Tensor, q: Tensor, mask_net: nn.Module, input_pad_mask: Tensor
     pred_masks = torch.einsum("bqe,ble->bql", mask_tokens, x)
 
     if input_pad_mask is not None:
-        print('this one lol', input_pad_mask.shape,  pred_masks.shape)
+
         t = input_pad_mask.unsqueeze(1).expand_as(pred_masks)
-        print(t.shape)
         pred_masks[input_pad_mask.unsqueeze(1).expand_as(pred_masks)] = torch.finfo(
             pred_masks.dtype
         ).min
-        # expanded_mask = input_pad_mask.unsqueeze(1).expand_as(pred_masks)
-        # print("Expanded mask shape:", expanded_mask.shape)
-        # pred_masks = torch.where(expanded_mask, torch.tensor(torch.finfo(pred_masks.dtype).min, device=pred_masks.device), pred_masks)
+
 
     return pred_masks
 
@@ -199,32 +184,17 @@ class MaskDecoderLayer(nn.Module):
         if self.mask_attention:
             # New attention masking convention with transformers 2
             # Positions with True are allowed while False are masked
-            # attn_mask = get_masks(kv, q, self.mask_net, kv_mask).sigmoid().detach()
-            # attn_mask = attn_mask * (attn_mask > 0.9).float()
-
-            # # attn_mask = (get_masks(kv, q, self.mask_net, kv_mask).sigmoid() > 0.9).detach()
-
-            # # If the attention mask is False for all positions, we set it to True
-            # # This is prevent NaNs in the softmax
-            # attn_mask[(~attn_mask).all(-1)] = True
             # Compute masks and apply sigmoid
             attn_mask = get_masks(kv, q, self.mask_net, kv_mask).sigmoid()
 
             # Threshold and detach
             attn_mask = (attn_mask > 0.9).detach()
-            print(attn_mask.shape, (~attn_mask).all(-1).shape, torch.all(attn_mask == 0, dim=-1).shape)
             newmask = torch.all(attn_mask == 0, dim=-1, keepdim=True).expand(attn_mask.shape)
             # Check if all values along the last dimension are 0 (equivalent to `False` in boolean)
             # If so, set them to 1 (equivalent to `True` in boolean)
-            print('AT THIS BIT', attn_mask.shape, newmask.shape)
-            # attn_mask.masked_fill_(newmask.bool(), True)
-            # attn_mask = attn_mask.float()  # Convert attn_mask to float if it isn't already
-            # newmask = newmask.float()  # Convert newmask to float (True becomes 1.0, False becomes 0.0)
 
-            # Use arithmetic to set the specified positions to True (1.0)
-            # attn_mask = (attn_mask * (1 - newmask) + newmask).bool()
             attn_mask = attn_mask | newmask.bool()
-            # attn_mask[newmask] = True
+
 
 
         # update queries with cross attention from nodes
@@ -241,11 +211,8 @@ class MaskDecoderLayer(nn.Module):
             if attn_mask is not None:
                 attn_mask = attn_mask.transpose(1, 2)
                 newmask = torch.all(attn_mask == 1, dim=-1, keepdim=True).expand(attn_mask.shape)
-                # attn_mask = attn_mask.float()
-                # newmask = newmask.float()
-                # attn_mask = (attn_mask * (1 - newmask) + newmask).bool()
                 attn_mask = attn_mask | ~newmask.bool()
-                # attn_mask[(~attn_mask).all(-1)] = True
+
             kv = kv + self.kv_ca(kv, q, attn_mask=attn_mask)
             kv = kv + self.kv_dense(kv)
         return q, kv
diff --git a/salt/models/task.py b/salt/models/task.py
index 4f5270a2..5ad13820 100644
--- a/salt/models/task.py
+++ b/salt/models/task.py
@@ -331,6 +331,9 @@ class RegressionTask(RegressionTaskBase):
         loss = None
         if targets is not None:
             loss = self.nan_loss(preds, targets) * self.weight
+        
+
+        
         return preds, loss
 
     def run_inference(self, preds: Tensor, targets_dict: Mapping, precision: str = "f4"):
@@ -343,6 +346,7 @@ class RegressionTask(RegressionTaskBase):
             for i in range(len(self.norm_params["mean"])):
                 preds[:, i] = preds[:, i] * self.norm_params["std"][i] + self.norm_params["mean"][i]
         elif self.scaler is not None:
+            print('Are we hitting this>'*1000)
             for i in range(len(self.targets)):
                 preds[:, i] = self.scaler.inverse(self.targets[i], preds[:, i])
         dtype = np.dtype([(f"{self.name}_{t}", precision) for t in self.targets])
diff --git a/salt/to_onnx.py b/salt/to_onnx.py
index 0a4cf6ec..b848efed 100644
--- a/salt/to_onnx.py
+++ b/salt/to_onnx.py
@@ -109,15 +109,26 @@ def get_maskformer_outputs(objects):
     class_probs = objects['class_probs']
     regression = objects['regression']
     object_leading = objects['regression']
+
+    # TODO not enforce == 2
+    null_preds = class_probs[:, :, -1] > 0.5
+    if not null_preds.any():
+        # If we have no predicted objects, we return arange(0,40) for vertex index, and
+        # NaN (check?) for regression values
+
+        return torch.ones((1,5))*torch.nan, torch.arange(40), class_probs, torch.ones((1,5, 5))*torch.nan
+    object_leading[null_preds] = -999
+    regression[null_preds] = np.nan
     # Define the leading object as the one with the highest regression[0] value 
     # in vertexing case, this is the pT
     order = torch.argsort(object_leading[:,:, 0], descending=True)
-    indices = torch.arange(order.shape[0]).unsqueeze(1).expand_as(order)
     
-    # Apply the re-ordering
-    # masks = masks[indices, order]
-    # class_probs = class_probs[indices, order]
-    # regression = regression[indices, order]
+    
+    # Use gather to reorder tensors along a specific dimension
+    # TODO check this is working as expected
+    masks = torch.gather(masks, 1, order.unsqueeze(-1).expand(-1, -1, masks.size(-1)))
+    class_probs = torch.gather(class_probs, 1, order.unsqueeze(-1).expand(-1, -1, class_probs.size(-1)))
+    regression = torch.gather(regression, 1, order.unsqueeze(-1).expand(-1, -1, regression.size(-1)))
     # Convert our masks (N,M), now in pT order, to be (M,) indices
     object_indices = indices_from_mask(masks)
     print(regression[:, :, 0])
@@ -161,10 +172,12 @@ class ONNXModel(ModelWrapper):
         """The output names are a list of strings, one for each output of the model."""
         # get the global task output
         global_tasks = [t for t in self.model.tasks if t.input_name == self.global_object]
-        assert len(global_tasks) == 1, "Multi global task ONNX models are not yet supported."
-        object_classes = global_tasks[0].class_names
-        outputs = [f"{self.model_name}_p{flav.rstrip('jets')}" for flav in object_classes]
-
+        assert len(global_tasks) <= 1, "Multi global task ONNX models are not yet supported."
+        if self.has_global_task:
+            object_classes = global_tasks[0].class_names
+            outputs = [f"{self.model_name}_p{flav.rstrip('jets')}" for flav in object_classes]
+        else:
+            outputs = []
         # aux task output names
         if self.include_aux:
             if "track_origin" in [t.name for t in self.model.tasks]:
@@ -183,8 +196,8 @@ class ONNXModel(ModelWrapper):
                 f"{self.model_name}_leading_{self.object}_{v}"
                 for v in regression_task[0].targets]
             outputs += [f"{self.model_name}_{self.object}_index"]
-            outputs += [f"{self.model_name}_{self.object}_class"]
-            outputs += [f"{self.model_name}_{self.object}_regression"]
+            # outputs += [f"{self.model_name}_{self.object}_class"]
+            # outputs += [f"{self.model_name}_{self.object}_regression"]
 
         return outputs
 
@@ -235,6 +248,7 @@ class ONNXModel(ModelWrapper):
             if "track_vertexing" in track_outs:
                 pad_mask = torch.zeros(tracks.shape[:-1], dtype=torch.bool)
                 edge_scores = track_outs["track_vertexing"]
+                print(edge_scores.shape, flush=True)
                 vertex_indices = get_node_assignment_jit(edge_scores, pad_mask)
                 vertex_list = mask_fill_flattened(vertex_indices, pad_mask)
                 onnx_outputs += (vertex_list.reshape(-1).char(),)
@@ -245,14 +259,22 @@ class ONNXModel(ModelWrapper):
             print(outputs['objects']['masks'])
             print(indices_from_mask(outputs['objects']['masks']))
             print(outputs['objects']['class_probs'])
+            regression_task = [t for t in self.model.tasks if t.input_name == 'objects' and t.name == 'regression']
+            assert len(regression_task) == 1, "Object outputs require a regression task"
+            regression_task = regression_task[0]
+
+            for i, t in enumerate(regression_task.targets):
+                unscaled_preds = regression_task.scaler.inverse(t, outputs['objects']["regression"][:, :, i])
+                outputs['objects']['regression'][:, :, i] = unscaled_preds
+            # outputs['objects']['regression'] = regression_task.run_inference
             # Extract the mf outputs
             leading_reg, masks, class_probs, regression = get_maskformer_outputs(outputs['objects'])
             
             for r in leading_reg[0]:
                 onnx_outputs += (r,)
-            onnx_outputs += (indices_from_mask(masks).char(),)
-            onnx_outputs += (torch.argmax(class_probs, dim=-1).char(),)
-            onnx_outputs += (regression,)
+            onnx_outputs += (indices_from_mask(masks).reshape(-1).char(),)
+            # onnx_outputs += (torch.argmax(class_probs, dim=-1).char(),)
+            # onnx_outputs += (regression,)
         print(onnx_outputs)
         return onnx_outputs
 
@@ -266,7 +288,11 @@ def compare_output(pt_model, onnx_session, include_aux, n_track=40):
 
     inputs_pt = {"jets": jets, "tracks": tracks}
     outputs_pt = pt_model(inputs_pt, {"tracks": pad_mask})[0]
-    pred_pt_jc = [p.detach().numpy() for p in get_probs(outputs_pt["jets"]["jets_classification"])]
+    pred_pt_jc = (
+        [p.detach().numpy() for p in get_probs(outputs_pt["jets"]["jets_classification"])]
+        if "jets" in outputs_pt
+        else []
+        )
 
     inputs_onnx = {
         "jet_features": jets.numpy(),
@@ -305,7 +331,8 @@ def compare_output(pt_model, onnx_session, include_aux, n_track=40):
         )
 
     # test vertexing
-    if include_aux:
+    if include_aux and "track_vertexing" in outputs_pt["tracks"]:
+        
         pred_pt_scores = outputs_pt["tracks"]["track_vertexing"].detach()
         pred_pt_indices = get_node_assignment_jit(pred_pt_scores, pad_mask)
         pred_pt_vtx = mask_fill_flattened(pred_pt_indices, pad_mask)
@@ -352,13 +379,16 @@ def main(args=None):
         config_path = args.ckpt_path.parents[1] / "config.yaml"
         assert config_path.is_file(), f"Could not find config file at {config_path}"
 
+    with open(config_path, 'r') as f:
+        config = yaml.safe_load(f)
 
     # instantiate pytorch and wrapper models
     with warnings.catch_warnings():
         warnings.simplefilter("ignore")
 
         pt_model = ModelWrapper.load_from_checkpoint(
-            args.ckpt_path, map_location=torch.device("cpu")
+            args.ckpt_path, map_location=torch.device("cpu"),
+            norm_config=config["model"]["norm_config"],
         )
         pt_model.eval()
         pt_model.float()
@@ -378,8 +408,10 @@ def main(args=None):
             object_name=args.object_name,
             mf_config=mf_config,
             map_location=torch.device("cpu"),
+            norm_config=config["model"]["norm_config"],
 
         )
+        print("OUTPUTS", onnx_model.output_names)
         onnx_model.eval()
         change_attn_backends(
             onnx_model.model, "torch-math"
@@ -402,7 +434,7 @@ def main(args=None):
         input_names=onnx_model.input_names,
         output_names=onnx_model.output_names,
         dynamic_axes=onnx_model.dynamic_axes,
-        verbose=True,
+        
     )
 
     # add metadata
@@ -468,6 +500,7 @@ def add_metadata(
 
     # write metadata as json string
     metadata = {"gnn_config": json.dumps(metadata)}
+    
     for k, v in metadata.items():
         meta = onnx_model.metadata_props.add()
         meta.key = k
-- 
GitLab


From b50c85d636310437e7e633743ab3b4bed7176b3e Mon Sep 17 00:00:00 2001
From: Nicholas Luongo <nicholas.andrew.luongo@cern.ch>
Date: Thu, 13 Jun 2024 12:14:04 +0200
Subject: [PATCH 21/30] Add additional Slurm options

---
 salt/submit/submit_slurm.py | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/salt/submit/submit_slurm.py b/salt/submit/submit_slurm.py
index ceae7110..3c4021a6 100644
--- a/salt/submit/submit_slurm.py
+++ b/salt/submit/submit_slurm.py
@@ -9,6 +9,9 @@ parser = argparse.ArgumentParser(description="Submit batch jobs to Slurm.")
 parser.add_argument("-c", "--config", required=True, type=Path, help="Configuration file for job.")
 parser.add_argument("-t", "--tag", default="salt_job", help="Tag for job to be submitted.")
 parser.add_argument("-p", "--partition", default=None, type=str, help="Partition to submit job.")
+parser.add_argument(
+    "-cn", "--constraint", default=None, type=str, help="Constraint on requested resources."
+)
 parser.add_argument("-a", "--account", default=None, type=str, help="Slurm account name.")
 parser.add_argument(
     "-e",
@@ -17,6 +20,7 @@ parser.add_argument(
     choices=["conda", "singularity", "local"],
     help="Environment for job to be submitted.",
 )
+parser.add_argument("-q", "--qos", default=None, type=str, help="Quality Of Service for job")
 parser.add_argument("-n", "--nodes", default=1, type=int, help="Nodes to split training across")
 parser.add_argument("-g", "--gpus_per_node", default=1, type=int, help="GPUs for each node")
 parser.add_argument(
@@ -77,8 +81,12 @@ handler = SlurmHandler(str(batch_path), str(log_path), str(job_basedir))
 handler["job-name"] = args.tag
 if args.partition is not None:
     handler["partition"] = args.partition
+if args.constraint is not None:
+    handler["constraint"] = args.constraint
 if args.account is not None:
     handler["account"] = args.account
+if args.qos is not None:
+    handler["qos"] = args.qos
 handler["nodes"] = nodes
 handler["gres"] = gres
 handler["ntasks-per-node"] = gpus_per_node
-- 
GitLab


From a04303b3954532132d5664499accc4d23980061e Mon Sep 17 00:00:00 2001
From: Neelam Kumari <neelam.kumari@cern.ch>
Date: Thu, 13 Jun 2024 16:43:26 +0200
Subject: [PATCH 22/30] Jet labels update

---
 salt/configs/GN2_extended.yaml | 125 +++++++++++++++++++++++++++++++++
 1 file changed, 125 insertions(+)
 create mode 100644 salt/configs/GN2_extended.yaml

diff --git a/salt/configs/GN2_extended.yaml b/salt/configs/GN2_extended.yaml
new file mode 100644
index 00000000..dfabb11b
--- /dev/null
+++ b/salt/configs/GN2_extended.yaml
@@ -0,0 +1,125 @@
+name: GN2
+
+model:
+  lrs_config:
+    initial: 1e-7
+    max: 5e-4
+    end: 1e-5
+    pct_start: 0.01
+    weight_decay: 1e-5
+
+  model:
+    class_path: salt.models.SaltModel
+    init_args:
+      init_nets:
+        - input_name: tracks
+          dense_config:
+            output_size: &embed_dim 256
+            hidden_layers: [256]
+            activation: &activation ReLU
+
+      encoder:
+        class_path: salt.models.TransformerEncoder
+        init_args:
+          embed_dim: *embed_dim
+          num_layers: 4
+          out_dim: &out_dim 128
+          mha_config:
+            num_heads: 8
+            attention: { class_path: salt.models.ScaledDotProductAttention }
+          dense_config:
+            activation: *activation
+
+      pool_net:
+        class_path: salt.models.GlobalAttentionPooling
+        init_args: { input_size: *out_dim }
+
+      tasks:
+        class_path: torch.nn.ModuleList
+        init_args:
+          modules:
+            - class_path: salt.models.ClassificationTask
+              init_args:
+                name: jets_classification
+                input_name: jets
+                label: flavour_label
+                loss:
+                  class_path: torch.nn.CrossEntropyLoss
+                  init_args: { weight: [2.0, 2.0, 2.0, 1.0, 6.25] }
+                dense_config: &task_dense_config
+                  input_size: *out_dim
+                  output_size: 5
+                  hidden_layers: [128, 64, 32]
+                  activation: *activation
+
+            - class_path: salt.models.ClassificationTask
+              init_args:
+                name: track_origin
+                input_name: tracks
+                label: ftagTruthOriginLabel
+                weight: 0.5
+                loss:
+                  class_path: torch.nn.CrossEntropyLoss
+                  init_args:
+                    weight: [3.92, 83.21, 1.0, 10.22, 7.11, 7.88, 62.91, 19.42]
+                dense_config:
+                  <<: *task_dense_config
+                  output_size: 8
+                  context_size: *out_dim
+
+            - class_path: salt.models.VertexingTask
+              init_args:
+                name: track_vertexing
+                input_name: tracks
+                label: ftagTruthVertexIndex
+                weight: 1.5
+                loss:
+                  class_path: torch.nn.BCEWithLogitsLoss
+                  init_args: { reduction: none }
+                dense_config:
+                  <<: *task_dense_config
+                  input_size: 256
+                  output_size: 1
+                  context_size: *out_dim
+
+data:
+  variables:
+    jets:
+      - pt_btagJes
+      - eta_btagJes
+    tracks:
+      - d0
+      - z0SinTheta
+      - dphi
+      - deta
+      - qOverP
+      - IP3D_signed_d0_significance
+      - IP3D_signed_z0_significance
+      - phiUncertainty
+      - thetaUncertainty
+      - qOverPUncertainty
+      - numberOfPixelHits
+      - numberOfSCTHits
+      - numberOfInnermostPixelLayerHits
+      - numberOfNextToInnermostPixelLayerHits
+      - numberOfInnermostPixelLayerSharedHits
+      - numberOfInnermostPixelLayerSplitHits
+      - numberOfPixelSharedHits
+      - numberOfPixelSplitHits
+      - numberOfSCTSharedHits
+      #- numberOfTRTHits
+      #- leptonID
+
+  train_file: /nfs/dust/atlas/user/nkumari/UPP_latest/umami-preprocessing/upp/configs/prep/output/pp_output_train.h5
+  val_file: /nfs/dust/atlas/user/nkumari/UPP_latest/umami-preprocessing/upp/configs/prep/output/pp_output_val.h5
+  norm_dict: /nfs/dust/atlas/user/nkumari/UPP_latest/umami-preprocessing/upp/configs/prep/output/norm_dict.yaml
+  class_dict: /nfs/dust/atlas/user/nkumari/UPP_latest/umami-preprocessing/upp/configs/prep/output/class_dict.yaml
+
+  batch_size: 4000
+  num_workers: 40
+
+trainer:
+  max_epochs: 40
+  accelerator: gpu
+  devices: 2
+  precision: 16-mixed
-- 
GitLab


From 7784ad79ea03d0d613b32fba631a68af0f2bb038 Mon Sep 17 00:00:00 2001
From: Nikita Pond <nikita.ivvan.pond@cern.ch>
Date: Wed, 3 Jul 2024 12:43:16 +0100
Subject: [PATCH 23/30] A hopefully working onnx model for MF

---
 salt/to_onnx.py          | 41 +++++++++---------------
 salt/utils/mask_utils.py | 69 ++++++++++++++++++++++------------------
 2 files changed, 53 insertions(+), 57 deletions(-)

diff --git a/salt/to_onnx.py b/salt/to_onnx.py
index b848efed..15922529 100644
--- a/salt/to_onnx.py
+++ b/salt/to_onnx.py
@@ -21,7 +21,7 @@ from salt.utils.union_find import get_node_assignment_jit
 from salt.utils.cli import SaltCLI
 from salt.utils.inputs import inputs_sep_no_pad, inputs_sep_with_pad
 from salt.utils.union_find import get_node_assignment
-from salt.utils.mask_utils import indices_from_mask
+from salt.utils.mask_utils import masks_to_index
 from salt.utils.configs import MaskformerConfig
 torch.manual_seed(42)
 # https://gitlab.cern.ch/atlas/athena/-/blob/master/PhysicsAnalysis/JetTagging/FlavorTagDiscriminants/Root/DataPrepUtilities.cxx
@@ -102,23 +102,23 @@ def get_probs(outputs: Tensor):
 
 
 def get_maskformer_outputs(objects):
-    print(objects.keys())
-    print(objects['class_probs'].shape)
+
     # Convert the (N,M) -> (M,) mask indices
     masks = objects['masks']
     class_probs = objects['class_probs']
     regression = objects['regression']
     object_leading = objects['regression']
 
-    # TODO not enforce == 2
+    # If the null prob is the highest prob, then assume this is a null object
     null_preds = class_probs[:, :, -1] > 0.5
     if not null_preds.any():
-        # If we have no predicted objects, we return arange(0,40) for vertex index, and
-        # NaN (check?) for regression values
-
+        # TODO don't enforce 5 objects or 40 tracks
         return torch.ones((1,5))*torch.nan, torch.arange(40), class_probs, torch.ones((1,5, 5))*torch.nan
+    
+    # Set default values for null predictions
     object_leading[null_preds] = -999
     regression[null_preds] = np.nan
+
     # Define the leading object as the one with the highest regression[0] value 
     # in vertexing case, this is the pT
     order = torch.argsort(object_leading[:,:, 0], descending=True)
@@ -130,12 +130,11 @@ def get_maskformer_outputs(objects):
     class_probs = torch.gather(class_probs, 1, order.unsqueeze(-1).expand(-1, -1, class_probs.size(-1)))
     regression = torch.gather(regression, 1, order.unsqueeze(-1).expand(-1, -1, regression.size(-1)))
     # Convert our masks (N,M), now in pT order, to be (M,) indices
-    object_indices = indices_from_mask(masks)
-    print(regression[:, :, 0])
+    object_indices = masks_to_index(masks)
     # Return the leading regression level variables to be stored at global-level
     leading_regression = regression[:, 0]
 
-    return leading_regression, masks, class_probs, regression
+    return leading_regression, object_indices, class_probs, regression
 
 
 class ONNXModel(ModelWrapper):
@@ -195,9 +194,7 @@ class ONNXModel(ModelWrapper):
             outputs += [
                 f"{self.model_name}_leading_{self.object}_{v}"
                 for v in regression_task[0].targets]
-            outputs += [f"{self.model_name}_{self.object}_index"]
-            # outputs += [f"{self.model_name}_{self.object}_class"]
-            # outputs += [f"{self.model_name}_{self.object}_regression"]
+            outputs += [f"{self.model_name}_{self.object}Index"]
 
         return outputs
 
@@ -229,10 +226,6 @@ class ONNXModel(ModelWrapper):
         # forward pass
         outputs = super().forward({self.global_object: jets, self.const: tracks}, None)[0]
 
-        # get class probabilities
-        # onnx_outputs = get_probs(
-        #     outputs[self.global_object][f"{self.global_object}_classification"]
-        # )
         onnx_outputs = get_probs(
                 outputs[self.global_object][f"{self.global_object}_classification"]
         ) if self.has_global_task else ()
@@ -248,31 +241,27 @@ class ONNXModel(ModelWrapper):
             if "track_vertexing" in track_outs:
                 pad_mask = torch.zeros(tracks.shape[:-1], dtype=torch.bool)
                 edge_scores = track_outs["track_vertexing"]
-                print(edge_scores.shape, flush=True)
                 vertex_indices = get_node_assignment_jit(edge_scores, pad_mask)
                 vertex_list = mask_fill_flattened(vertex_indices, pad_mask)
                 onnx_outputs += (vertex_list.reshape(-1).char(),)
+
         if self.object:
-            print('LOL'*100)
             assert 'objects' in outputs, 'No MF objects in outputs'
-            print(outputs['objects'].keys())
-            print(outputs['objects']['masks'])
-            print(indices_from_mask(outputs['objects']['masks']))
-            print(outputs['objects']['class_probs'])
             regression_task = [t for t in self.model.tasks if t.input_name == 'objects' and t.name == 'regression']
             assert len(regression_task) == 1, "Object outputs require a regression task"
             regression_task = regression_task[0]
 
+            # Get the (hopefully) correctly (un)scaled regression predictions
             for i, t in enumerate(regression_task.targets):
                 unscaled_preds = regression_task.scaler.inverse(t, outputs['objects']["regression"][:, :, i])
                 outputs['objects']['regression'][:, :, i] = unscaled_preds
-            # outputs['objects']['regression'] = regression_task.run_inference
+
             # Extract the mf outputs
-            leading_reg, masks, class_probs, regression = get_maskformer_outputs(outputs['objects'])
+            leading_reg, indices, class_probs, regression = get_maskformer_outputs(outputs['objects'])
             
             for r in leading_reg[0]:
                 onnx_outputs += (r,)
-            onnx_outputs += (indices_from_mask(masks).reshape(-1).char(),)
+            onnx_outputs += (indices.reshape(-1).char(),)
             # onnx_outputs += (torch.argmax(class_probs, dim=-1).char(),)
             # onnx_outputs += (regression,)
         print(onnx_outputs)
diff --git a/salt/utils/mask_utils.py b/salt/utils/mask_utils.py
index 8713fccc..11488f2f 100644
--- a/salt/utils/mask_utils.py
+++ b/salt/utils/mask_utils.py
@@ -76,26 +76,24 @@ def mask_from_indices(indices: Tensor, num_masks: int | None = None) -> BoolTens
     return mask
 
 
-def indices_from_mask(mask: BoolTensor, noindex: int = -1) -> Tensor:
-    """Convert a sparse bool mask to a dense index tensor.
-
-    Indices are arbitrary and start from 0.
+def masks_to_index(mask: BoolTensor, noindex: int = -1, first_invalid=None):
+    """
+    Converts a spares bool mask to a dense index tensor, where any
+    index NOT part of a mask is given an increasing index value.
 
     Examples
     --------
-    [[True, False, False], [False, True, True]] -> [0, 1, 1]
+    [
+        [True, True, False, False, False, False],
+        [False, False, True, False, False, True]
+    ] -> [0, 0, 1, 2, 3, 1]
 
     Parameters
     ----------
     mask : BoolTensor
         The sparse mask
     noindex : int
-        The value to use for no index
 
-    Returns
-    -------
-    Tensor
-        The dense indices
     """
     mask = torch.as_tensor(mask)
     kwargs = {"dtype": torch.long, "device": mask.device}
@@ -103,27 +101,36 @@ def indices_from_mask(mask: BoolTensor, noindex: int = -1) -> Tensor:
         indices = torch.ones(mask.shape[-1], **kwargs) * noindex
         nonzero_idx = torch.where(mask)
         indices[nonzero_idx[1]] = nonzero_idx[0]
-    elif mask.ndim == 3:
-        indices = torch.ones((mask.shape[0], mask.shape[-1]), **kwargs) * noindex
-        nonzero_idx = torch.where(mask)
-        indices[nonzero_idx[0], nonzero_idx[2]] = nonzero_idx[1]
-    else:
-        raise ValueError("mask must be 2D for single sample or 3D for batch")
-
-    # ensure indices start from 0
-    idx = indices >= 0
-    # Ensure all negative indices are +ve so we don't include them in the min,
-    # this is due to onnx
-    indices = indices + idx*999
-    
-    mindices = indices
-    
-    # d_indices = 
-    indices -= mindices
-
-    indices[indices < 0] = noindex
-
-    return indices
+        # The idx of all indices that are part of a mask
+        if mask.shape[-1] == 0:
+            return torch.arange(mask.shape[-1], **kwargs)
+       
+        idx_exist = indices >= 0
+        if idx_exist.any():
+            min_val = torch.min(indices[idx_exist]).item()
+            indices[idx_exist] = indices[idx_exist] - min_val
+            if first_invalid:
+                max_val = first_invalid
+            else:
+                max_val = torch.max(indices[idx_exist]).item()
+        else:
+            min_val = 0  # Default value if the tensor is empty
+            max_val = 0
+
+        neg_ind = torch.where(indices < 0)[0]
+        if len(neg_ind) == 0:
+            return indices
+        replacement_vals = torch.arange(max_val + 1, max_val + 1 + neg_ind.shape[0])
+        indices[neg_ind] = replacement_vals
+        return indices
+    if mask.ndim == 3:
+        # Not a fan, but CBA to do this properly for now as its only used
+        # by the onnx model, so speed isn't an issue
+        indices = torch.full((mask.shape[0], mask.shape[-1]), noindex, **kwargs)
+        for i in range(mask.shape[0]):
+            indices[i] = masks_to_index(mask[i])
+        return indices
+    raise ValueError("mask must be 2D for single sample or 3D for batch")
 
 
 def sanitise_mask(
-- 
GitLab


From 3c700bd5b6bfa6db25a5c35510f77705872c7429 Mon Sep 17 00:00:00 2001
From: Nikita Pond <nikita.ivvan.pond@cern.ch>
Date: Wed, 3 Jul 2024 13:00:30 +0100
Subject: [PATCH 24/30] fix

---
 salt/to_onnx.py | 82 ++++++++++++++++++++++++++++++++++---------------
 1 file changed, 57 insertions(+), 25 deletions(-)

diff --git a/salt/to_onnx.py b/salt/to_onnx.py
index 15922529..c2b7d6b5 100644
--- a/salt/to_onnx.py
+++ b/salt/to_onnx.py
@@ -102,39 +102,72 @@ def get_probs(outputs: Tensor):
 
 
 def get_maskformer_outputs(objects):
-
+    print(objects.keys())
+    print(objects["class_probs"].shape)
     # Convert the (N,M) -> (M,) mask indices
-    masks = objects['masks']
-    class_probs = objects['class_probs']
-    regression = objects['regression']
-    object_leading = objects['regression']
-
-    # If the null prob is the highest prob, then assume this is a null object
-    null_preds = class_probs[:, :, -1] > 0.5
-    if not null_preds.any():
-        # TODO don't enforce 5 objects or 40 tracks
-        return torch.ones((1,5))*torch.nan, torch.arange(40), class_probs, torch.ones((1,5, 5))*torch.nan
-    
-    # Set default values for null predictions
+    masks = objects["masks"]
+    class_probs = objects["class_probs"]
+    regression = objects["regression"]
+    object_leading = objects["regression"]
+    n_tracks = masks.shape[-1]
+    n_obj = masks.shape[1]
+    n_reg = regression.shape[-1]
+
+    # TODO not enforce == 2
+    if n_tracks == 0:
+        print("lol?")
+        return (
+            torch.ones((1, n_obj)) * torch.nan,
+            None,
+            class_probs,
+            torch.ones((1, n_obj, n_reg)) * torch.nan,
+        )
+    print(class_probs)
+    null_preds = class_probs[:, :, -1] > 0.9
+    print("CLASS PROBS")
+    print(class_probs)
+    # if not null_preds.any():
+    if False:
+        # If we have no predicted objects, we return arange(0,40) for vertex index, and
+        # NaN (check?) for regression values
+
+        return (
+            torch.ones((1, n_obj)) * torch.nan,
+            torch.zeros((1, n_obj, n_tracks), dtype=torch.bool),
+            class_probs,
+            torch.ones((1, n_obj, n_reg)) * torch.nan,
+        )
+    print("lol?", masks.shape)
+    print(null_preds.shape)
+    masks = masks.sigmoid() > 0.5
     object_leading[null_preds] = -999
     regression[null_preds] = np.nan
-
-    # Define the leading object as the one with the highest regression[0] value 
+    expanded_null = null_preds.unsqueeze(-1).expand(-1, -1, masks.size(-1))
+    print("these shapes!", null_preds.shape, masks.shape, expanded_null.shape)
+    # masks[expanded_null] = False
+    # Define the leading object as the one with the highest regression[0] value
     # in vertexing case, this is the pT
-    order = torch.argsort(object_leading[:,:, 0], descending=True)
-    
-    
+    order = torch.argsort(object_leading[:, :, 0], descending=True)
+    order_expanded = order.unsqueeze(-1).expand(-1, -1, masks.size(-1))
+
+    print("pre-re-order", masks.shape)
+
     # Use gather to reorder tensors along a specific dimension
     # TODO check this is working as expected
-    masks = torch.gather(masks, 1, order.unsqueeze(-1).expand(-1, -1, masks.size(-1)))
-    class_probs = torch.gather(class_probs, 1, order.unsqueeze(-1).expand(-1, -1, class_probs.size(-1)))
-    regression = torch.gather(regression, 1, order.unsqueeze(-1).expand(-1, -1, regression.size(-1)))
+    masks = torch.gather(masks, 1, order_expanded)
+    class_probs = torch.gather(
+        class_probs, 1, order.unsqueeze(-1).expand(-1, -1, class_probs.size(-1))
+    )
+    regression = torch.gather(
+        regression, 1, order.unsqueeze(-1).expand(-1, -1, regression.size(-1))
+    )
     # Convert our masks (N,M), now in pT order, to be (M,) indices
-    object_indices = masks_to_index(masks)
+    print("post-re-order", masks.shape)
     # Return the leading regression level variables to be stored at global-level
     leading_regression = regression[:, 0]
 
-    return leading_regression, object_indices, class_probs, regression
+    obj_indices = masks_to_index(masks)
+    return leading_regression, obj_indices, class_probs, regression
 
 
 class ONNXModel(ModelWrapper):
@@ -262,8 +295,7 @@ class ONNXModel(ModelWrapper):
             for r in leading_reg[0]:
                 onnx_outputs += (r,)
             onnx_outputs += (indices.reshape(-1).char(),)
-            # onnx_outputs += (torch.argmax(class_probs, dim=-1).char(),)
-            # onnx_outputs += (regression,)
+ 
         print(onnx_outputs)
         return onnx_outputs
 
-- 
GitLab


From f8b67375a45d1991cf4044b4657192f350e9afba Mon Sep 17 00:00:00 2001
From: Nikita Pond <nikita.ivvan.pond@cern.ch>
Date: Wed, 3 Jul 2024 13:04:19 +0100
Subject: [PATCH 25/30] cleanup

---
 salt/to_onnx.py | 35 ++++++++++++-----------------------
 1 file changed, 12 insertions(+), 23 deletions(-)

diff --git a/salt/to_onnx.py b/salt/to_onnx.py
index c2b7d6b5..5b2a92fc 100644
--- a/salt/to_onnx.py
+++ b/salt/to_onnx.py
@@ -102,8 +102,7 @@ def get_probs(outputs: Tensor):
 
 
 def get_maskformer_outputs(objects):
-    print(objects.keys())
-    print(objects["class_probs"].shape)
+
     # Convert the (N,M) -> (M,) mask indices
     masks = objects["masks"]
     class_probs = objects["class_probs"]
@@ -113,45 +112,35 @@ def get_maskformer_outputs(objects):
     n_obj = masks.shape[1]
     n_reg = regression.shape[-1]
 
-    # TODO not enforce == 2
+    # If we have a jet with no tracks, 
     if n_tracks == 0:
-        print("lol?")
         return (
             torch.ones((1, n_obj)) * torch.nan,
             None,
             class_probs,
             torch.ones((1, n_obj, n_reg)) * torch.nan,
         )
-    print(class_probs)
+    # For testing purposes - this will likely blow up our fake rate
     null_preds = class_probs[:, :, -1] > 0.9
-    print("CLASS PROBS")
-    print(class_probs)
-    # if not null_preds.any():
-    if False:
-        # If we have no predicted objects, we return arange(0,40) for vertex index, and
-        # NaN (check?) for regression values
-
+    if not null_preds.any():
+        # If we have no predicted objects, we return dummy values
         return (
             torch.ones((1, n_obj)) * torch.nan,
             torch.zeros((1, n_obj, n_tracks), dtype=torch.bool),
             class_probs,
             torch.ones((1, n_obj, n_reg)) * torch.nan,
         )
-    print("lol?", masks.shape)
-    print(null_preds.shape)
+
     masks = masks.sigmoid() > 0.5
     object_leading[null_preds] = -999
     regression[null_preds] = np.nan
     expanded_null = null_preds.unsqueeze(-1).expand(-1, -1, masks.size(-1))
-    print("these shapes!", null_preds.shape, masks.shape, expanded_null.shape)
-    # masks[expanded_null] = False
+
     # Define the leading object as the one with the highest regression[0] value
     # in vertexing case, this is the pT
     order = torch.argsort(object_leading[:, :, 0], descending=True)
     order_expanded = order.unsqueeze(-1).expand(-1, -1, masks.size(-1))
 
-    print("pre-re-order", masks.shape)
-
     # Use gather to reorder tensors along a specific dimension
     # TODO check this is working as expected
     masks = torch.gather(masks, 1, order_expanded)
@@ -161,12 +150,12 @@ def get_maskformer_outputs(objects):
     regression = torch.gather(
         regression, 1, order.unsqueeze(-1).expand(-1, -1, regression.size(-1))
     )
-    # Convert our masks (N,M), now in pT order, to be (M,) indices
-    print("post-re-order", masks.shape)
-    # Return the leading regression level variables to be stored at global-level
+    # Define the leading object as that with the highest [0] (pt for vertexing)
     leading_regression = regression[:, 0]
-
+    
+    # Convert our masks (N,M), now in pT order, to be (M,) indices
     obj_indices = masks_to_index(masks)
+
     return leading_regression, obj_indices, class_probs, regression
 
 
@@ -296,7 +285,7 @@ class ONNXModel(ModelWrapper):
                 onnx_outputs += (r,)
             onnx_outputs += (indices.reshape(-1).char(),)
  
-        print(onnx_outputs)
+
         return onnx_outputs
 
 
-- 
GitLab


From c4e910e6656b730c5681825ec3143bdc7452afb2 Mon Sep 17 00:00:00 2001
From: Nikita Pond <nikita.ivvan.pond@cern.ch>
Date: Wed, 3 Jul 2024 13:40:26 +0100
Subject: [PATCH 26/30] change back indecies_from_mask name

---
 salt/to_onnx.py          | 84 +++++++++++++++++++++++-----------------
 salt/utils/mask_utils.py |  7 ++--
 2 files changed, 51 insertions(+), 40 deletions(-)

diff --git a/salt/to_onnx.py b/salt/to_onnx.py
index 5b2a92fc..4d03f6ce 100644
--- a/salt/to_onnx.py
+++ b/salt/to_onnx.py
@@ -16,13 +16,11 @@ from tqdm import tqdm
 from salt.models.task import mask_fill_flattened
 from salt.models.transformer_v2 import change_attn_backends
 from salt.modelwrapper import ModelWrapper
+from salt.utils.configs import MaskformerConfig
 from salt.utils.inputs import inputs_sep_no_pad, inputs_sep_with_pad
+from salt.utils.mask_utils import indices_from_mask
 from salt.utils.union_find import get_node_assignment_jit
-from salt.utils.cli import SaltCLI
-from salt.utils.inputs import inputs_sep_no_pad, inputs_sep_with_pad
-from salt.utils.union_find import get_node_assignment
-from salt.utils.mask_utils import masks_to_index
-from salt.utils.configs import MaskformerConfig
+
 torch.manual_seed(42)
 # https://gitlab.cern.ch/atlas/athena/-/blob/master/PhysicsAnalysis/JetTagging/FlavorTagDiscriminants/Root/DataPrepUtilities.cxx
 TRACK_SELECTIONS = [
@@ -102,7 +100,6 @@ def get_probs(outputs: Tensor):
 
 
 def get_maskformer_outputs(objects):
-
     # Convert the (N,M) -> (M,) mask indices
     masks = objects["masks"]
     class_probs = objects["class_probs"]
@@ -112,7 +109,7 @@ def get_maskformer_outputs(objects):
     n_obj = masks.shape[1]
     n_reg = regression.shape[-1]
 
-    # If we have a jet with no tracks, 
+    # If we have a jet with no tracks,
     if n_tracks == 0:
         return (
             torch.ones((1, n_obj)) * torch.nan,
@@ -152,15 +149,22 @@ def get_maskformer_outputs(objects):
     )
     # Define the leading object as that with the highest [0] (pt for vertexing)
     leading_regression = regression[:, 0]
-    
+
     # Convert our masks (N,M), now in pT order, to be (M,) indices
-    obj_indices = masks_to_index(masks)
+    obj_indices = indices_from_mask(masks)
 
     return leading_regression, obj_indices, class_probs, regression
 
 
 class ONNXModel(ModelWrapper):
-    def __init__(self, name: str | None = None, include_aux: bool = False, object_name : str | None = None, mf_config : dict | None =None, **kwargs) -> None:
+    def __init__(
+        self,
+        name: str | None = None,
+        include_aux: bool = False,
+        object_name: str | None = None,
+        mf_config: dict | None = None,
+        **kwargs,
+    ) -> None:
         super().__init__(**kwargs)
         self.name = name if name else self.name
         assert len(self.model.init_nets) == 1, "Multi input ONNX models are not yet supported."
@@ -175,12 +179,14 @@ class ONNXModel(ModelWrapper):
                 "class_label": self.mf_config.object.class_label,
                 "label_map": [f"p{name}" for name in self.mf_config.object.class_names],
             }
-            print('OBJECT PARAMS', self.object_params)
+            print("OBJECT PARAMS", self.object_params)
         self.input_names = ["jet_features", "track_features"]
         jets, tracks = inputs_sep_no_pad(
             1, 40, self.input_dims[self.global_object], self.input_dims[self.const]
         )
-        self.has_global_task = len([t for t in self.model.tasks if t.input_name == self.global_object]) > 0
+        self.has_global_task = (
+            len([t for t in self.model.tasks if t.input_name == self.global_object]) > 0
+        )
         self.example_input_array = jets, tracks.squeeze(0)  # used for the tracing during export
 
     @property
@@ -209,13 +215,14 @@ class ONNXModel(ModelWrapper):
                 out_name = f"{self.model_name}_VertexIndex"
                 outputs.append(out_name)
         if self.object:
-            regression_task = [t for t in self.model.tasks 
-                               if t.input_name == 'objects' and t.name == 'regression']
+            regression_task = [
+                t for t in self.model.tasks if t.input_name == "objects" and t.name == "regression"
+            ]
             assert len(regression_task) == 1, "Object outputs require a regression task"
             # First we append the leading jet regression variables
             outputs += [
-                f"{self.model_name}_leading_{self.object}_{v}"
-                for v in regression_task[0].targets]
+                f"{self.model_name}_leading_{self.object}_{v}" for v in regression_task[0].targets
+            ]
             outputs += [f"{self.model_name}_{self.object}Index"]
 
         return outputs
@@ -248,9 +255,11 @@ class ONNXModel(ModelWrapper):
         # forward pass
         outputs = super().forward({self.global_object: jets, self.const: tracks}, None)[0]
 
-        onnx_outputs = get_probs(
-                outputs[self.global_object][f"{self.global_object}_classification"]
-        ) if self.has_global_task else ()
+        onnx_outputs = (
+            get_probs(outputs[self.global_object][f"{self.global_object}_classification"])
+            if self.has_global_task
+            else ()
+        )
 
         # add aux outputs
         if self.include_aux:
@@ -268,23 +277,28 @@ class ONNXModel(ModelWrapper):
                 onnx_outputs += (vertex_list.reshape(-1).char(),)
 
         if self.object:
-            assert 'objects' in outputs, 'No MF objects in outputs'
-            regression_task = [t for t in self.model.tasks if t.input_name == 'objects' and t.name == 'regression']
+            assert "objects" in outputs, "No MF objects in outputs"
+            regression_task = [
+                t for t in self.model.tasks if t.input_name == "objects" and t.name == "regression"
+            ]
             assert len(regression_task) == 1, "Object outputs require a regression task"
             regression_task = regression_task[0]
 
             # Get the (hopefully) correctly (un)scaled regression predictions
             for i, t in enumerate(regression_task.targets):
-                unscaled_preds = regression_task.scaler.inverse(t, outputs['objects']["regression"][:, :, i])
-                outputs['objects']['regression'][:, :, i] = unscaled_preds
+                unscaled_preds = regression_task.scaler.inverse(
+                    t, outputs["objects"]["regression"][:, :, i]
+                )
+                outputs["objects"]["regression"][:, :, i] = unscaled_preds
 
             # Extract the mf outputs
-            leading_reg, indices, class_probs, regression = get_maskformer_outputs(outputs['objects'])
-            
+            leading_reg, indices, class_probs, regression = get_maskformer_outputs(
+                outputs["objects"]
+            )
+
             for r in leading_reg[0]:
                 onnx_outputs += (r,)
             onnx_outputs += (indices.reshape(-1).char(),)
- 
 
         return onnx_outputs
 
@@ -302,7 +316,7 @@ def compare_output(pt_model, onnx_session, include_aux, n_track=40):
         [p.detach().numpy() for p in get_probs(outputs_pt["jets"]["jets_classification"])]
         if "jets" in outputs_pt
         else []
-        )
+    )
 
     inputs_onnx = {
         "jet_features": jets.numpy(),
@@ -342,7 +356,6 @@ def compare_output(pt_model, onnx_session, include_aux, n_track=40):
 
     # test vertexing
     if include_aux and "track_vertexing" in outputs_pt["tracks"]:
-        
         pred_pt_scores = outputs_pt["tracks"]["track_vertexing"].detach()
         pred_pt_indices = get_node_assignment_jit(pred_pt_scores, pad_mask)
         pred_pt_vtx = mask_fill_flattened(pred_pt_indices, pad_mask)
@@ -389,7 +402,7 @@ def main(args=None):
         config_path = args.ckpt_path.parents[1] / "config.yaml"
         assert config_path.is_file(), f"Could not find config file at {config_path}"
 
-    with open(config_path, 'r') as f:
+    with open(config_path) as f:
         config = yaml.safe_load(f)
 
     # instantiate pytorch and wrapper models
@@ -397,18 +410,19 @@ def main(args=None):
         warnings.simplefilter("ignore")
 
         pt_model = ModelWrapper.load_from_checkpoint(
-            args.ckpt_path, map_location=torch.device("cpu"),
+            args.ckpt_path,
+            map_location=torch.device("cpu"),
             norm_config=config["model"]["norm_config"],
         )
         pt_model.eval()
         pt_model.float()
 
         if args.object_name:
-            with open(config_path, 'r') as f:
+            with open(config_path) as f:
                 config = yaml.safe_load(f)
-            mf_config = config['data'].get('mf_config')
+            mf_config = config["data"].get("mf_config")
             if not mf_config:
-                raise ValueError('No mf_config in config')
+                raise ValueError("No mf_config in config")
         else:
             mf_config = {}
         onnx_model = ONNXModel.load_from_checkpoint(
@@ -419,7 +433,6 @@ def main(args=None):
             mf_config=mf_config,
             map_location=torch.device("cpu"),
             norm_config=config["model"]["norm_config"],
-
         )
         print("OUTPUTS", onnx_model.output_names)
         onnx_model.eval()
@@ -444,7 +457,6 @@ def main(args=None):
         input_names=onnx_model.input_names,
         output_names=onnx_model.output_names,
         dynamic_axes=onnx_model.dynamic_axes,
-        
     )
 
     # add metadata
@@ -510,7 +522,7 @@ def add_metadata(
 
     # write metadata as json string
     metadata = {"gnn_config": json.dumps(metadata)}
-    
+
     for k, v in metadata.items():
         meta = onnx_model.metadata_props.add()
         meta.key = k
diff --git a/salt/utils/mask_utils.py b/salt/utils/mask_utils.py
index 11488f2f..50527172 100644
--- a/salt/utils/mask_utils.py
+++ b/salt/utils/mask_utils.py
@@ -76,9 +76,8 @@ def mask_from_indices(indices: Tensor, num_masks: int | None = None) -> BoolTens
     return mask
 
 
-def masks_to_index(mask: BoolTensor, noindex: int = -1, first_invalid=None):
-    """
-    Converts a spares bool mask to a dense index tensor, where any
+def indices_from_mask(mask: BoolTensor, noindex: int = -1, first_invalid=None):
+    """Converts a spares bool mask to a dense index tensor, where any
     index NOT part of a mask is given an increasing index value.
 
     Examples
@@ -104,7 +103,7 @@ def masks_to_index(mask: BoolTensor, noindex: int = -1, first_invalid=None):
         # The idx of all indices that are part of a mask
         if mask.shape[-1] == 0:
             return torch.arange(mask.shape[-1], **kwargs)
-       
+
         idx_exist = indices >= 0
         if idx_exist.any():
             min_val = torch.min(indices[idx_exist]).item()
-- 
GitLab


From 210df8a9a2503ddb5abaa9fec9ec2b4ff78c1733 Mon Sep 17 00:00:00 2001
From: Nikita Pond <nikita.ivvan.pond@cern.ch>
Date: Wed, 3 Jul 2024 13:59:34 +0100
Subject: [PATCH 27/30] lintnig

---
 salt/models/maskformer.py   | 19 ++++++-------------
 salt/models/task.py         |  4 +---
 salt/tests/test_masks.py    |  8 ++++----
 salt/tests/test_pipeline.py |  4 ++--
 salt/utils/configs.py       |  2 +-
 salt/utils/mask_utils.py    |  7 ++++---
 6 files changed, 18 insertions(+), 26 deletions(-)

diff --git a/salt/models/maskformer.py b/salt/models/maskformer.py
index c8c2364f..91b8efd6 100644
--- a/salt/models/maskformer.py
+++ b/salt/models/maskformer.py
@@ -101,7 +101,6 @@ class MaskDecoder(nn.Module):
         # MF only supports one input, if we have multiple then we have no way of knowing
         # what section of the embedding relates to objects we want to generate masks for
         if isinstance(pad_mask, dict):
-        
             assert len(pad_mask) == 1, "Maskformer only supports one input."
             pad_mask = next(iter(pad_mask.values()))
 
@@ -110,9 +109,11 @@ class MaskDecoder(nn.Module):
         q = self.norm1(self.inital_q.expand(x.shape[0], -1, -1))
         x = self.norm2(x)
         xpad = torch.zeros((x.shape[0], 1, x.shape[-1]), device=x.device, dtype=x.dtype)
-        
+
         if pad_mask is not None:
-            padpad_mask = torch.zeros((pad_mask.shape[0], 1), device=pad_mask.device, dtype=pad_mask.dtype)
+            padpad_mask = torch.zeros(
+                (pad_mask.shape[0], 1), device=pad_mask.device, dtype=pad_mask.dtype
+            )
             pad_mask = torch.cat([pad_mask, padpad_mask], dim=1)
 
         x = torch.cat([x, xpad], dim=1)
@@ -125,12 +126,8 @@ class MaskDecoder(nn.Module):
             q, x = layer(q, x, kv_mask=pad_mask)
         mf_preds = self.get_preds(q, x, pad_mask)
 
-        preds["objects"] = {
-            "embed": q, 
-            "x": x[:, :-1, :], 
-            **self.get_preds(q, x, pad_mask)
-            }
-        preds["objects"]["masks"] = preds["objects"]["masks"][:,:,:-1]
+        preds["objects"] = {"embed": q, "x": x[:, :-1, :], **self.get_preds(q, x, pad_mask)}
+        preds["objects"]["masks"] = preds["objects"]["masks"][:, :, :-1]
         if self.aux_loss:
             preds["intermediate_outputs"] = intermediate_outputs
 
@@ -145,13 +142,11 @@ def get_masks(x: Tensor, q: Tensor, mask_net: nn.Module, input_pad_mask: Tensor
     pred_masks = torch.einsum("bqe,ble->bql", mask_tokens, x)
 
     if input_pad_mask is not None:
-
         t = input_pad_mask.unsqueeze(1).expand_as(pred_masks)
         pred_masks[input_pad_mask.unsqueeze(1).expand_as(pred_masks)] = torch.finfo(
             pred_masks.dtype
         ).min
 
-
     return pred_masks
 
 
@@ -195,8 +190,6 @@ class MaskDecoderLayer(nn.Module):
 
             attn_mask = attn_mask | newmask.bool()
 
-
-
         # update queries with cross attention from nodes
         q = q + self.q_ca(q, kv=kv, kv_mask=kv_mask, attn_mask=attn_mask)
 
diff --git a/salt/models/task.py b/salt/models/task.py
index 5ad13820..8e157868 100644
--- a/salt/models/task.py
+++ b/salt/models/task.py
@@ -331,9 +331,7 @@ class RegressionTask(RegressionTaskBase):
         loss = None
         if targets is not None:
             loss = self.nan_loss(preds, targets) * self.weight
-        
 
-        
         return preds, loss
 
     def run_inference(self, preds: Tensor, targets_dict: Mapping, precision: str = "f4"):
@@ -346,7 +344,7 @@ class RegressionTask(RegressionTaskBase):
             for i in range(len(self.norm_params["mean"])):
                 preds[:, i] = preds[:, i] * self.norm_params["std"][i] + self.norm_params["mean"][i]
         elif self.scaler is not None:
-            print('Are we hitting this>'*1000)
+            print("Are we hitting this>" * 1000)
             for i in range(len(self.targets)):
                 preds[:, i] = self.scaler.inverse(self.targets[i], preds[:, i])
         dtype = np.dtype([(f"{self.name}_{t}", precision) for t in self.targets])
diff --git a/salt/tests/test_masks.py b/salt/tests/test_masks.py
index 0ec7603a..ff92de44 100644
--- a/salt/tests/test_masks.py
+++ b/salt/tests/test_masks.py
@@ -67,13 +67,13 @@ def test_indices_from_mask_3d(mask_3d, indices_2d):
 
 
 def test_indices_from_mask_empty():
-    mask = torch.tensor([[False, False], [False, False], [False, True]])
+    mask = torch.tensor([[False, False,], [False, False], [False, False]])
     indices = indices_from_mask(mask)
-    assert torch.all(indices == torch.tensor([-1, 0]))
+    assert torch.all(indices == torch.tensor([1, 2]))
 
     mask = torch.tensor([
-        [[False, False], [False, False], [False, True]],
+        [[False, False], [False, False], [False, False]],
         [[False, False], [False, False], [False, False]],
     ])
     indices = indices_from_mask(mask)
-    assert torch.all(indices == torch.tensor([[-1, 0], [-1, -1]]))
+    assert torch.all(indices == torch.tensor([[1, 2], [1, 2]]))
diff --git a/salt/tests/test_pipeline.py b/salt/tests/test_pipeline.py
index 757bebf9..a2c873ff 100644
--- a/salt/tests/test_pipeline.py
+++ b/salt/tests/test_pipeline.py
@@ -112,9 +112,9 @@ def run_onnx(train_dir, args=None):
     args += [f"--ckpt_path={ckpt_path}"]
     args += ["--track_selection=dipsLoose202102"]
 
-    if 'MaskFormer' in str(train_dir):
+    if "MaskFormer" in str(train_dir):
         args += ["-mf=vertexing"]
-    print('ONNX'*100)
+    print("ONNX" * 100)
     print(train_dir)
     # args += args
     to_onnx(args)
diff --git a/salt/utils/configs.py b/salt/utils/configs.py
index f8f60afd..43e07300 100644
--- a/salt/utils/configs.py
+++ b/salt/utils/configs.py
@@ -90,4 +90,4 @@ class MaskformerConfig:
         if isinstance(self.object, dict):
             self.object = MaskformerObjectConfig(**self.object)
         if isinstance(self.constituent, dict):
-            self.constituent = MaskformerObjectConfig(**self.constituent)
\ No newline at end of file
+            self.constituent = MaskformerObjectConfig(**self.constituent)
diff --git a/salt/utils/mask_utils.py b/salt/utils/mask_utils.py
index 50527172..22649e53 100644
--- a/salt/utils/mask_utils.py
+++ b/salt/utils/mask_utils.py
@@ -77,8 +77,9 @@ def mask_from_indices(indices: Tensor, num_masks: int | None = None) -> BoolTens
 
 
 def indices_from_mask(mask: BoolTensor, noindex: int = -1, first_invalid=None):
-    """Converts a spares bool mask to a dense index tensor, where any
-    index NOT part of a mask is given an increasing index value.
+    """
+    Converts a spares bool mask to a dense index tensor, where any
+    index NOT part of a mask is given an increasing index value. 
 
     Examples
     --------
@@ -127,7 +128,7 @@ def indices_from_mask(mask: BoolTensor, noindex: int = -1, first_invalid=None):
         # by the onnx model, so speed isn't an issue
         indices = torch.full((mask.shape[0], mask.shape[-1]), noindex, **kwargs)
         for i in range(mask.shape[0]):
-            indices[i] = masks_to_index(mask[i])
+            indices[i] = indices_from_mask(mask[i])
         return indices
     raise ValueError("mask must be 2D for single sample or 3D for batch")
 
-- 
GitLab


From 3ce478688005c9148b379e28c810fea40ba5284f Mon Sep 17 00:00:00 2001
From: Nikita Pond <nikita.ivvan.pond@cern.ch>
Date: Wed, 3 Jul 2024 14:00:54 +0100
Subject: [PATCH 28/30] more linting - giving up now

---
 salt/models/maskformer.py | 2 +-
 salt/tests/test_masks.py  | 9 ++++++++-
 salt/utils/mask_utils.py  | 5 ++---
 3 files changed, 11 insertions(+), 5 deletions(-)

diff --git a/salt/models/maskformer.py b/salt/models/maskformer.py
index 91b8efd6..ad6acec0 100644
--- a/salt/models/maskformer.py
+++ b/salt/models/maskformer.py
@@ -126,7 +126,7 @@ class MaskDecoder(nn.Module):
             q, x = layer(q, x, kv_mask=pad_mask)
         mf_preds = self.get_preds(q, x, pad_mask)
 
-        preds["objects"] = {"embed": q, "x": x[:, :-1, :], **self.get_preds(q, x, pad_mask)}
+        preds["objects"] = {"embed": q, "x": x[:, :-1, :], **mf_preds}
         preds["objects"]["masks"] = preds["objects"]["masks"][:, :, :-1]
         if self.aux_loss:
             preds["intermediate_outputs"] = intermediate_outputs
diff --git a/salt/tests/test_masks.py b/salt/tests/test_masks.py
index ff92de44..f2bfcce0 100644
--- a/salt/tests/test_masks.py
+++ b/salt/tests/test_masks.py
@@ -67,7 +67,14 @@ def test_indices_from_mask_3d(mask_3d, indices_2d):
 
 
 def test_indices_from_mask_empty():
-    mask = torch.tensor([[False, False,], [False, False], [False, False]])
+    mask = torch.tensor([
+        [
+            False,
+            False,
+        ],
+        [False, False],
+        [False, False],
+    ])
     indices = indices_from_mask(mask)
     assert torch.all(indices == torch.tensor([1, 2]))
 
diff --git a/salt/utils/mask_utils.py b/salt/utils/mask_utils.py
index 22649e53..04743fe8 100644
--- a/salt/utils/mask_utils.py
+++ b/salt/utils/mask_utils.py
@@ -77,9 +77,8 @@ def mask_from_indices(indices: Tensor, num_masks: int | None = None) -> BoolTens
 
 
 def indices_from_mask(mask: BoolTensor, noindex: int = -1, first_invalid=None):
-    """
-    Converts a spares bool mask to a dense index tensor, where any
-    index NOT part of a mask is given an increasing index value. 
+    """Converts a spares bool mask to a dense index tensor, where any
+    index NOT part of a mask is given an increasing index value.
 
     Examples
     --------
-- 
GitLab


From e34150e0a465b0d26e1ee86bf6daab643ed7fd6e Mon Sep 17 00:00:00 2001
From: Nikita Pond <nikita.ivvan.pond@cern.ch>
Date: Wed, 3 Jul 2024 16:01:15 +0100
Subject: [PATCH 29/30] correct mask index in evaluation

---
 salt/callbacks/predictionwriter.py | 11 +++++++++++
 salt/models/task.py                |  1 -
 2 files changed, 11 insertions(+), 1 deletion(-)

diff --git a/salt/callbacks/predictionwriter.py b/salt/callbacks/predictionwriter.py
index 4ecf2dec..865a161f 100644
--- a/salt/callbacks/predictionwriter.py
+++ b/salt/callbacks/predictionwriter.py
@@ -15,6 +15,7 @@ from salt.models.task import (
 )
 from salt.stypes import Vars
 from salt.utils.array_utils import join_structured_arrays, maybe_pad
+from salt.utils.mask_utils import indices_from_mask
 
 
 class PredictionWriter(Callback):
@@ -180,6 +181,8 @@ class PredictionWriter(Callback):
             for out in ["object_class_probs", "object_class_targets", "mask_logits", "tgt_masks"]:
                 if out not in self.outputs["objects"]:
                     self.outputs["objects"][out] = []
+            if "mask_index" not in self.outputs["tracks"]:
+                self.outputs["tracks"]["mask_index"] = []
 
             probs_dtype = np.dtype([(n, self.precision) for n in self.object_params["label_map"]])
             self.outputs["objects"]["object_class_probs"].append(
@@ -189,6 +192,14 @@ class PredictionWriter(Callback):
                 labels["objects"][self.object_params["class_label"]].cpu().numpy()
             )
             self.outputs["objects"]["mask_logits"].append(objects["masks"].cpu().float().numpy())
+            mask_indices = indices_from_mask(objects["masks"].cpu().sigmoid() > 0.5)
+            dtype = np.dtype([("MaskIndex", "i8")])
+            mask_indices = mask_indices.int().cpu().numpy()
+            mask_indices = np.where(~this_pad_masks, mask_indices, -1)
+            # Get the mask index with a default mask cut value of 0.5
+            self.outputs["tracks"]["mask_index"].append(
+                u2s(np.expand_dims(mask_indices, -1), dtype)
+            )
             self.outputs["objects"]["tgt_masks"].append(labels["objects"]["masks"].cpu().numpy())
 
     def on_test_end(self, trainer, module):  # noqa: ARG002
diff --git a/salt/models/task.py b/salt/models/task.py
index 8e157868..0aae9324 100644
--- a/salt/models/task.py
+++ b/salt/models/task.py
@@ -344,7 +344,6 @@ class RegressionTask(RegressionTaskBase):
             for i in range(len(self.norm_params["mean"])):
                 preds[:, i] = preds[:, i] * self.norm_params["std"][i] + self.norm_params["mean"][i]
         elif self.scaler is not None:
-            print("Are we hitting this>" * 1000)
             for i in range(len(self.targets)):
                 preds[:, i] = self.scaler.inverse(self.targets[i], preds[:, i])
         dtype = np.dtype([(f"{self.name}_{t}", precision) for t in self.targets])
-- 
GitLab


From 9a8b795f998d1e0861ba7afd26f6d8f632b4dd96 Mon Sep 17 00:00:00 2001
From: Nikita Pond <nikita.ivvan.pond@cern.ch>
Date: Fri, 5 Jul 2024 13:17:01 +0100
Subject: [PATCH 30/30] pre-commit

---
 salt/configs/MaskFormer.yaml |  2 +-
 salt/models/maskformer.py    |  1 -
 salt/to_onnx.py              | 18 ++++++++++--------
 salt/utils/mask_utils.py     |  8 +++-----
 4 files changed, 14 insertions(+), 15 deletions(-)

diff --git a/salt/configs/MaskFormer.yaml b/salt/configs/MaskFormer.yaml
index 595200fc..532887b5 100644
--- a/salt/configs/MaskFormer.yaml
+++ b/salt/configs/MaskFormer.yaml
@@ -28,7 +28,7 @@ model:
             num_heads: 8
           dense_kwargs:
             activation: *activation
-          drop_registers: true 
+          drop_registers: true
 
 
       mask_decoder:
diff --git a/salt/models/maskformer.py b/salt/models/maskformer.py
index ad6acec0..7aaa6fb8 100644
--- a/salt/models/maskformer.py
+++ b/salt/models/maskformer.py
@@ -142,7 +142,6 @@ def get_masks(x: Tensor, q: Tensor, mask_net: nn.Module, input_pad_mask: Tensor
     pred_masks = torch.einsum("bqe,ble->bql", mask_tokens, x)
 
     if input_pad_mask is not None:
-        t = input_pad_mask.unsqueeze(1).expand_as(pred_masks)
         pred_masks[input_pad_mask.unsqueeze(1).expand_as(pred_masks)] = torch.finfo(
             pred_masks.dtype
         ).min
diff --git a/salt/to_onnx.py b/salt/to_onnx.py
index 4d03f6ce..a6552283 100644
--- a/salt/to_onnx.py
+++ b/salt/to_onnx.py
@@ -131,7 +131,6 @@ def get_maskformer_outputs(objects):
     masks = masks.sigmoid() > 0.5
     object_leading[null_preds] = -999
     regression[null_preds] = np.nan
-    expanded_null = null_preds.unsqueeze(-1).expand(-1, -1, masks.size(-1))
 
     # Define the leading object as the one with the highest regression[0] value
     # in vertexing case, this is the pT
@@ -139,7 +138,6 @@ def get_maskformer_outputs(objects):
     order_expanded = order.unsqueeze(-1).expand(-1, -1, masks.size(-1))
 
     # Use gather to reorder tensors along a specific dimension
-    # TODO check this is working as expected
     masks = torch.gather(masks, 1, order_expanded)
     class_probs = torch.gather(
         class_probs, 1, order.unsqueeze(-1).expand(-1, -1, class_probs.size(-1))
@@ -172,9 +170,11 @@ class ONNXModel(ModelWrapper):
         assert "-" not in self.name, "Model name cannot contain dashes."
         self.include_aux = include_aux
         self.const = "tracks"
+        if sum([bool(object_name), bool(mf_config)]) not in {0, 2}:
+            raise ValueError("If one of object name or mf config is defined, so must the other.")
         self.object = object_name
         self.mf_config = MaskformerConfig(**mf_config) if mf_config else None
-        if self.object:
+        if self.object and self.mf_config:
             self.object_params = {
                 "class_label": self.mf_config.object.class_label,
                 "label_map": [f"p{name}" for name in self.mf_config.object.class_names],
@@ -278,11 +278,11 @@ class ONNXModel(ModelWrapper):
 
         if self.object:
             assert "objects" in outputs, "No MF objects in outputs"
-            regression_task = [
+            regression_tasks = [
                 t for t in self.model.tasks if t.input_name == "objects" and t.name == "regression"
             ]
-            assert len(regression_task) == 1, "Object outputs require a regression task"
-            regression_task = regression_task[0]
+            assert len(regression_tasks) == 1, "Object outputs require a regression task"
+            regression_task = regression_tasks[0]
 
             # Get the (hopefully) correctly (un)scaled regression predictions
             for i, t in enumerate(regression_task.targets):
@@ -291,8 +291,10 @@ class ONNXModel(ModelWrapper):
                 )
                 outputs["objects"]["regression"][:, :, i] = unscaled_preds
 
-            # Extract the mf outputs
-            leading_reg, indices, class_probs, regression = get_maskformer_outputs(
+            # Extract the mf outputs.
+            # TODO: write all regression values, this will require work on the athena end as well
+            # https://gitlab.cern.ch/atlas-flavor-tagging-tools/algorithms/salt/-/issues/53
+            leading_reg, indices, class_probs, regression = get_maskformer_outputs(  # noqa: F841
                 outputs["objects"]
             )
 
diff --git a/salt/utils/mask_utils.py b/salt/utils/mask_utils.py
index 04743fe8..82e2b125 100644
--- a/salt/utils/mask_utils.py
+++ b/salt/utils/mask_utils.py
@@ -76,7 +76,7 @@ def mask_from_indices(indices: Tensor, num_masks: int | None = None) -> BoolTens
     return mask
 
 
-def indices_from_mask(mask: BoolTensor, noindex: int = -1, first_invalid=None):
+def indices_from_mask(mask: BoolTensor, noindex: int = -1):
     """Converts a spares bool mask to a dense index tensor, where any
     index NOT part of a mask is given an increasing index value.
 
@@ -92,6 +92,7 @@ def indices_from_mask(mask: BoolTensor, noindex: int = -1, first_invalid=None):
     mask : BoolTensor
         The sparse mask
     noindex : int
+        The value to insert for padding in the mask
 
     """
     mask = torch.as_tensor(mask)
@@ -108,10 +109,7 @@ def indices_from_mask(mask: BoolTensor, noindex: int = -1, first_invalid=None):
         if idx_exist.any():
             min_val = torch.min(indices[idx_exist]).item()
             indices[idx_exist] = indices[idx_exist] - min_val
-            if first_invalid:
-                max_val = first_invalid
-            else:
-                max_val = torch.max(indices[idx_exist]).item()
+            max_val = torch.max(indices[idx_exist]).item()
         else:
             min_val = 0  # Default value if the tensor is empty
             max_val = 0
-- 
GitLab