AD_system_demo_1.ipynb 6.41 MB
Newer Older
smetaj's avatar
smetaj committed
1
2
3
4
{
 "cells": [
  {
   "cell_type": "markdown",
5
   "id": "false-lafayette",
6
7
8
9
10
   "metadata": {
    "toc": true
   },
   "source": [
    "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n",
11
    "<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Demo-Notebook-for-TimeSeries-Anomaly-Detection\" data-toc-modified-id=\"Demo-Notebook-for-TimeSeries-Anomaly-Detection-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Demo Notebook for TimeSeries Anomaly Detection</a></span></li><li><span><a href=\"#Installation-of-libraries-and-imports\" data-toc-modified-id=\"Installation-of-libraries-and-imports-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Installation of libraries and imports</a></span><ul class=\"toc-item\"><li><span><a href=\"#Installation-of-adcern-and-others-libraries\" data-toc-modified-id=\"Installation-of-adcern-and-others-libraries-2.1\"><span class=\"toc-item-num\">2.1&nbsp;&nbsp;</span>Installation of adcern and others libraries</a></span></li><li><span><a href=\"#Imports\" data-toc-modified-id=\"Imports-2.2\"><span class=\"toc-item-num\">2.2&nbsp;&nbsp;</span>Imports</a></span></li></ul></li><li><span><a href=\"#Init-of-configuration-files---ETL\" data-toc-modified-id=\"Init-of-configuration-files---ETL-3\"><span class=\"toc-item-num\">3&nbsp;&nbsp;</span>Init of configuration files - ETL</a></span><ul class=\"toc-item\"><li><span><a href=\"#Creation\" data-toc-modified-id=\"Creation-3.1\"><span class=\"toc-item-num\">3.1&nbsp;&nbsp;</span>Creation</a></span></li><li><span><a href=\"#Reading-the-json\" data-toc-modified-id=\"Reading-the-json-3.2\"><span class=\"toc-item-num\">3.2&nbsp;&nbsp;</span>Reading the json</a></span></li></ul></li><li><span><a href=\"#ETL-steps-(Extract,-Transform,-Load)\" data-toc-modified-id=\"ETL-steps-(Extract,-Transform,-Load)-4\"><span class=\"toc-item-num\">4&nbsp;&nbsp;</span>ETL steps (Extract, Transform, Load)</a></span><ul class=\"toc-item\"><li><span><a href=\"#Compute-Normalization\" data-toc-modified-id=\"Compute-Normalization-4.1\"><span class=\"toc-item-num\">4.1&nbsp;&nbsp;</span>Compute Normalization</a></span></li><li><span><a href=\"#Transform-Data\" data-toc-modified-id=\"Transform-Data-4.2\"><span class=\"toc-item-num\">4.2&nbsp;&nbsp;</span>Transform Data</a></span></li><li><span><a href=\"#Copy-Locally\" data-toc-modified-id=\"Copy-Locally-4.3\"><span class=\"toc-item-num\">4.3&nbsp;&nbsp;</span>Copy Locally</a></span></li></ul></li><li><span><a href=\"#Visualization-of-downloaded-time-series\" data-toc-modified-id=\"Visualization-of-downloaded-time-series-5\"><span class=\"toc-item-num\">5&nbsp;&nbsp;</span>Visualization of downloaded time series</a></span><ul class=\"toc-item\"><li><span><a href=\"#Reading-time-series-with-pandas-and-host-definition\" data-toc-modified-id=\"Reading-time-series-with-pandas-and-host-definition-5.1\"><span class=\"toc-item-num\">5.1&nbsp;&nbsp;</span>Reading time series with pandas and host definition</a></span></li><li><span><a href=\"#Reconstruction-function\" data-toc-modified-id=\"Reconstruction-function-5.2\"><span class=\"toc-item-num\">5.2&nbsp;&nbsp;</span>Reconstruction function</a></span></li><li><span><a href=\"#Normalized-data-visualization\" data-toc-modified-id=\"Normalized-data-visualization-5.3\"><span class=\"toc-item-num\">5.3&nbsp;&nbsp;</span>Normalized data visualization</a></span></li><li><span><a href=\"#How-to-retrieve-the-original-data-without-normalization\" data-toc-modified-id=\"How-to-retrieve-the-original-data-without-normalization-5.4\"><span class=\"toc-item-num\">5.4&nbsp;&nbsp;</span>How to retrieve the original data without normalization</a></span></li><li><span><a href=\"#Original-data-visualization\" data-toc-modified-id=\"Original-data-visualization-5.5\"><span class=\"toc-item-num\">5.5&nbsp;&nbsp;</span>Original data visualization</a></span></li></ul></li><li><span><a href=\"#Init-of-configuration-file---ANALYSIS\" data-toc-modified-id=\"Init-of-configuration-file---ANALYSIS-6\"><span class=\"toc-item-num\">6&nbsp;&nbsp;</span>Init of configuration file - ANALYSIS</a></span><ul class=\"toc-item\"><li><span><a href=\"#Creation\" data-toc-modified-id=\"Creation-6.1\"><span class=\"toc-item-num\">6.1&nbsp;&nbsp;</span>Creation</a></span></li><li><span><a href=\"#Reading-the-json\" data-toc-modified-id=\"Reading-the-json-6.2\"><span class=\"toc-item-num\">6.2&nbsp;&nbsp;</span>Reading the json</a></span></li></ul></li><li><span><a href=\"#ANALYSIS-to-produce-anomaly-scores\" data-toc-modified-id=\"ANALYSIS-to-produce-anomaly-scores-7\"><span class=\"toc-item-num\">7&nbsp;&nbsp;</span>ANALYSIS to produce anomaly scores</a></span><ul class=\"toc-item\"><li><span><a href=\"#Analysis\" data-toc-modified-id=\"Analysis-7.1\"><span class=\"toc-item-num\">7.1&nbsp;&nbsp;</span>Analysis</a></span></li></ul></li><li><span><a href=\"#Visualization-of-the-results\" data-toc-modified-id=\"Visualization-of-the-results-8\"><span class=\"toc-item-num\">8&nbsp;&nbsp;</span>Visualization of the results</a></span><ul class=\"toc-item\"><li><span><a href=\"#Reading-the-scores\" data-toc-modified-id=\"Reading-the-scores-8.1\"><span class=\"toc-item-num\">8.1&nbsp;&nbsp;</span>Reading the scores</a></span></li><li><span><a href=\"#Visualization-of-both-downloaded-data-and-scores\" data-toc-modified-id=\"Visualization-of-both-downloaded-data-and-scores-8.2\"><span class=\"toc-item-num\">8.2&nbsp;&nbsp;</span>Visualization of both downloaded data and scores</a></span></li></ul></li><li><span><a href=\"#ETL-Steps-not-used\" data-toc-modified-id=\"ETL-Steps-not-used-9\"><span class=\"toc-item-num\">9&nbsp;&nbsp;</span>ETL Steps not used</a></span><ul class=\"toc-item\"><li><span><a href=\"#Data-Presence\" data-toc-modified-id=\"Data-Presence-9.1\"><span class=\"toc-item-num\">9.1&nbsp;&nbsp;</span>Data Presence</a></span></li><li><span><a href=\"#Check-Normalization\" data-toc-modified-id=\"Check-Normalization-9.2\"><span class=\"toc-item-num\">9.2&nbsp;&nbsp;</span>Check Normalization</a></span></li></ul></li><li><span><a href=\"#Adding-a-model\" data-toc-modified-id=\"Adding-a-model-10\"><span class=\"toc-item-num\">10&nbsp;&nbsp;</span>Adding a model</a></span><ul class=\"toc-item\"><li><span><a href=\"#Changing-config-file---Analysis\" data-toc-modified-id=\"Changing-config-file---Analysis-10.1\"><span class=\"toc-item-num\">10.1&nbsp;&nbsp;</span>Changing config file - Analysis</a></span></li><li><span><a href=\"#Reading-the-json\" data-toc-modified-id=\"Reading-the-json-10.2\"><span class=\"toc-item-num\">10.2&nbsp;&nbsp;</span>Reading the json</a></span></li><li><span><a href=\"#Analysis-of-multiple-models\" data-toc-modified-id=\"Analysis-of-multiple-models-10.3\"><span class=\"toc-item-num\">10.3&nbsp;&nbsp;</span>Analysis of multiple models</a></span></li><li><span><a href=\"#Checking-new-results\" data-toc-modified-id=\"Checking-new-results-10.4\"><span class=\"toc-item-num\">10.4&nbsp;&nbsp;</span>Checking new results</a></span></li></ul></li></ul></div>"
12
13
14
15
   ]
  },
  {
   "cell_type": "markdown",
16
   "id": "pressing-plasma",
17
18
19
   "metadata": {
    "heading_collapsed": true
   },
smetaj's avatar
smetaj committed
20
   "source": [
21
    "# Demo Notebook for TimeSeries Anomaly Detection\n",
22
    "This notebook shows the Anomaly Detection system for TimeSeries in action using the tools provided in this  repository.\n",
smetaj's avatar
smetaj committed
23
    "\n",
24
25
26
27
28
    "**Note that we recommend to run this notebook using swan004.cern.ch with the following configuration:**\n",
    "- Software stack: Other releases, **97a**\n",
    "- Spark cluster: **General Purpose (Analytix)**\n",
    "\n",
    "---\n",
29
    "**REMEMBER TO ACTIVATE THE ANALYTIX CLUSTER IN THE SWAN CONFIGURATION!!**\n",
30
    "Activating it means to activate spark (click the spark icon in the upper part of the notebook to enable spark before starting to run the notebook cells)."
smetaj's avatar
smetaj committed
31
32
33
34
   ]
  },
  {
   "cell_type": "markdown",
35
36
   "id": "naval-specific",
   "metadata": {},
smetaj's avatar
smetaj committed
37
   "source": [
38
    "# Installation of libraries and imports"
smetaj's avatar
smetaj committed
39
40
41
42
   ]
  },
  {
   "cell_type": "markdown",
43
44
   "id": "stopped-praise",
   "metadata": {},
smetaj's avatar
smetaj committed
45
   "source": [
46
    "## Installation of adcern and others libraries"
smetaj's avatar
smetaj committed
47
48
49
50
51
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
52
   "id": "vocal-participation",
smetaj's avatar
smetaj committed
53
54
   "metadata": {
    "ExecuteTime": {
55
56
     "end_time": "2021-05-03T18:33:36.844986Z",
     "start_time": "2021-05-03T18:20:48.908597Z"
57
    },
58
    "scrolled": true
smetaj's avatar
smetaj committed
59
   },
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Collecting git+https://:****@gitlab.cern.ch:8443/cloud-infrastructure/data-analytics.git@qa-v0.4\n",
      "  Cloning https://:****@gitlab.cern.ch:8443/cloud-infrastructure/data-analytics.git (to revision qa-v0.4) to /tmp/pip-req-build-6x76eylt\n",
      "  Running command git clone -q https://:@gitlab.cern.ch:8443/cloud-infrastructure/data-analytics.git /tmp/pip-req-build-6x76eylt\n",
      "  Running command git checkout -b qa-v0.4 --track origin/qa-v0.4\n",
      "  Switched to a new branch 'qa-v0.4'\n",
      "  Branch qa-v0.4 set up to track remote branch qa-v0.4 from origin.\n",
      "Collecting setuptools==41.0.0 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/c8/b0/cc6b7ba28d5fb790cf0d5946df849233e32b8872b6baca10c9e002ff5b41/setuptools-41.0.0-py2.py3-none-any.whl (575kB)\n",
      "\u001b[K     |████████████████████████████████| 583kB 8.1MB/s eta 0:00:01\n",
      "\u001b[?25hRequirement already satisfied: pbr!=2.1.0,>=2.0.0 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from data-analytics==0.4.0.0rc5.dev83) (5.2.1)\n",
      "Collecting matplotlib==3.2.1 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/b2/c2/71fcf957710f3ba1f09088b35776a799ba7dd95f7c2b195ec800933b276b/matplotlib-3.2.1-cp37-cp37m-manylinux1_x86_64.whl (12.4MB)\n",
      "\u001b[K     |████████████████████████████████| 12.4MB 10.4MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting numpy==1.18.5 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/d6/c6/58e517e8b1fb192725cfa23c01c2e60e4e6699314ee9684a1c5f5c9b27e1/numpy-1.18.5-cp37-cp37m-manylinux1_x86_64.whl (20.1MB)\n",
      "\u001b[K     |████████████████████████████████| 20.1MB 26.3MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting pandas==1.0.4 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/a4/5f/1b6e0efab4bfb738478919d40b0e3e1a06e3d9996da45eb62a77e9a090d9/pandas-1.0.4-cp37-cp37m-manylinux1_x86_64.whl (10.1MB)\n",
      "\u001b[K     |████████████████████████████████| 10.1MB 24.1MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting requests==2.21.0 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/7d/e3/20f3d364d6c8e5d2353c72a67778eb189176f08e873c9900e10c0287b84b/requests-2.21.0-py2.py3-none-any.whl (57kB)\n",
      "\u001b[K     |████████████████████████████████| 61kB 9.9MB/s  eta 0:00:01\n",
      "\u001b[?25hCollecting pyspark==2.4.6 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/e9/e4/5c15ab8d354c4e3528510821865e6748209a9b0ff6a1788f4cd36cc2a5dc/pyspark-2.4.6.tar.gz (218.4MB)\n",
      "\u001b[K     |████████████████████████████████| 218.4MB 174kB/s  eta 0:00:01   |                                | 296kB 21.8MB/s eta 0:00:11     |███████████████████▊            | 134.4MB 38.2MB/s eta 0:00:03     |██████████████████████          | 150.2MB 38.2MB/s eta 0:00:02     |████████████████████████        | 164.0MB 34.9MB/s eta 0:00:02\n",
      "\u001b[?25hCollecting scipy==1.4.1 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/dd/82/c1fe128f3526b128cfd185580ba40d01371c5d299fcf7f77968e22dfcc2e/scipy-1.4.1-cp37-cp37m-manylinux1_x86_64.whl (26.1MB)\n",
      "\u001b[K     |████████████████████████████████| 26.1MB 16.1MB/s eta 0:00:01\n",
      "\u001b[?25hRequirement already satisfied: six==1.12.0 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from data-analytics==0.4.0.0rc5.dev83) (1.12.0)\n",
      "Collecting seaborn==0.10.1 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/c7/e6/54aaaafd0b87f51dfba92ba73da94151aa3bc179e5fe88fc5dfb3038e860/seaborn-0.10.1-py3-none-any.whl (215kB)\n",
      "\u001b[K     |████████████████████████████████| 225kB 22.7MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting statsmodels==0.11.1 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/7b/6a/0bf4184c3fb6f9f43df997b88de5784b4cb2f6bd19a5dc213463971076cf/statsmodels-0.11.1-cp37-cp37m-manylinux1_x86_64.whl (8.7MB)\n",
      "\u001b[K     |████████████████████████████████| 8.7MB 19.2MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting scikit-learn==0.23.1 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/b8/7e/74e707b66490d4eb05f702966ad0990881127acecf9d5cdcef3c95ec6c16/scikit_learn-0.23.1-cp37-cp37m-manylinux1_x86_64.whl (6.8MB)\n",
      "\u001b[K     |████████████████████████████████| 6.8MB 49.3MB/s eta 0:00:01                    | 266kB 49.3MB/s eta 0:00:01\n",
      "\u001b[?25hRequirement already satisfied: tables==3.6.1 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from data-analytics==0.4.0.0rc5.dev83) (3.6.1)\n",
      "Collecting dictdiffer==0.8.1 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "  Downloading https://files.pythonhosted.org/packages/97/92/350b6b6ec39c5f87d98d04c91a50c498518716a05368e6dea88b5c69b590/dictdiffer-0.8.1-py2.py3-none-any.whl\n",
      "Collecting elasticsearch==7.7.1 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/4a/33/d0ed32e077f7dc860153fa866fc52ac312886c9890962ff29379aa753dd1/elasticsearch-7.7.1-py2.py3-none-any.whl (99kB)\n",
      "\u001b[K     |████████████████████████████████| 102kB 3.2MB/s ta 0:00:011     |███████████████████▉            | 61kB 3.8MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting pyyaml==5.3.1 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/64/c2/b80047c7ac2478f9501676c988a5411ed5572f35d1beff9cae07d321512c/PyYAML-5.3.1.tar.gz (269kB)\n",
      "\u001b[K     |████████████████████████████████| 276kB 19.4MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting pyod==0.8.0 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/cf/68/99df05e5666248e9c10359457e2da1b89943f5ac96749ceb1c131001eb88/pyod-0.8.0.tar.gz (93kB)\n",
      "\u001b[K     |████████████████████████████████| 102kB 21.2MB/s ta 0:00:01\n",
      "\u001b[?25hCollecting pyarrow==0.17.1 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/c4/dd/4d2ce1c64a94e5f35ae622cdfdda6eeab927cf6570d826dcfe09e9ba08f8/pyarrow-0.17.1-cp37-cp37m-manylinux2010_x86_64.whl (64.2MB)\n",
      "\u001b[K     |████████████████████████████████| 64.2MB 32.1MB/s eta 0:00:01    |███████████▋                    | 23.2MB 5.6MB/s eta 0:00:08     |████████████████████████████▌   | 57.2MB 32.1MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting numba==0.50.1 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/9d/2f/2b3ad106afafd684d31cd24dd624f15d2d725aba394752932d08d5283e10/numba-0.50.1-cp37-cp37m-manylinux1_x86_64.whl (2.6MB)\n",
      "\u001b[K     |████████████████████████████████| 2.6MB 39.3MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting llvmlite==0.33.0 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/0a/28/0a35b3c2685bf2ea327cef5577bdf91f387f0f4594417a2a05a1d42fb7c2/llvmlite-0.33.0-cp37-cp37m-manylinux1_x86_64.whl (18.3MB)\n",
      "\u001b[K     |████████████████████████████████| 18.3MB 21.3MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting click==7.1.2 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/d2/3d/fa76db83bf75c4f8d338c2fd15c8d33fdd7ad23a9b5e57eb6c5de26b430e/click-7.1.2-py2.py3-none-any.whl (82kB)\n",
      "\u001b[K     |████████████████████████████████| 92kB 24.6MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting cliff==3.4.0 (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/71/06/03b1f92d46546a18eabf33ff7f37ef422c18c93d5a926bf590fee32ebe75/cliff-3.4.0-py3-none-any.whl (76kB)\n",
      "\u001b[K     |████████████████████████████████| 81kB 12.9MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting pandasticsearch (from data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/09/d0/c46cf54ba2a58e3b5057cb95098546cc393de6a390018c22f1a6afd5dfed/pandasticsearch-0.5.3.tar.gz (251kB)\n",
      "\u001b[K     |████████████████████████████████| 256kB 25.0MB/s eta 0:00:01\n",
      "\u001b[?25hRequirement already satisfied: python-dateutil>=2.1 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from matplotlib==3.2.1->data-analytics==0.4.0.0rc5.dev83) (2.8.0)\n",
      "Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from matplotlib==3.2.1->data-analytics==0.4.0.0rc5.dev83) (2.4.0)\n",
      "Requirement already satisfied: cycler>=0.10 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from matplotlib==3.2.1->data-analytics==0.4.0.0rc5.dev83) (0.10.0)\n",
      "Requirement already satisfied: kiwisolver>=1.0.1 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from matplotlib==3.2.1->data-analytics==0.4.0.0rc5.dev83) (1.0.1)\n",
      "Requirement already satisfied: pytz>=2017.2 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from pandas==1.0.4->data-analytics==0.4.0.0rc5.dev83) (2019.1)\n",
      "Requirement already satisfied: certifi>=2017.4.17 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from requests==2.21.0->data-analytics==0.4.0.0rc5.dev83) (2019.3.9)\n",
      "Collecting urllib3<1.25,>=1.21.1 (from requests==2.21.0->data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/01/11/525b02e4acc0c747de8b6ccdab376331597c569c42ea66ab0a1dbd36eca2/urllib3-1.24.3-py2.py3-none-any.whl (118kB)\n",
      "\u001b[K     |████████████████████████████████| 122kB 27.1MB/s eta 0:00:01\n",
      "\u001b[?25hRequirement already satisfied: idna<2.9,>=2.5 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from requests==2.21.0->data-analytics==0.4.0.0rc5.dev83) (2.8)\n",
      "Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from requests==2.21.0->data-analytics==0.4.0.0rc5.dev83) (3.0.4)\n",
      "Requirement already satisfied: py4j==0.10.7 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from pyspark==2.4.6->data-analytics==0.4.0.0rc5.dev83) (0.10.7)\n",
      "Requirement already satisfied: patsy>=0.5 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from statsmodels==0.11.1->data-analytics==0.4.0.0rc5.dev83) (0.5.1)\n",
      "Requirement already satisfied: joblib>=0.11 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from scikit-learn==0.23.1->data-analytics==0.4.0.0rc5.dev83) (0.14.0)\n",
      "Collecting threadpoolctl>=2.0.0 (from scikit-learn==0.23.1->data-analytics==0.4.0.0rc5.dev83)\n",
      "  Downloading https://files.pythonhosted.org/packages/f7/12/ec3f2e203afa394a149911729357aa48affc59c20e2c1c8297a60f33f133/threadpoolctl-2.1.0-py3-none-any.whl\n",
      "Requirement already satisfied: numexpr>=2.6.2 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from tables==3.6.1->data-analytics==0.4.0.0rc5.dev83) (2.6.9)\n",
      "Collecting combo (from pyod==0.8.0->data-analytics==0.4.0.0rc5.dev83)\n",
      "  Downloading https://files.pythonhosted.org/packages/12/ae/66029dcaa88ccca77f454dbb29c1178c751ec24fc771ed475a992b49a02d/combo-0.1.2.tar.gz\n",
      "Collecting suod (from pyod==0.8.0->data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/4e/8a/255ed2c959abab7c712b10fe710e454d5a8e3461c6ae60e426349a8eb6a5/suod-0.0.6.tar.gz (2.1MB)\n",
      "\u001b[K     |████████████████████████████████| 2.1MB 37.0MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting stevedore>=2.0.1 (from cliff==3.4.0->data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/d4/49/b602307aeac3df3384ff1fcd05da9c0376c622a6c48bb5325f28ab165b57/stevedore-3.3.0-py3-none-any.whl (49kB)\n",
      "\u001b[K     |████████████████████████████████| 51kB 14.6MB/s eta 0:00:01\n",
      "\u001b[?25hRequirement already satisfied: PrettyTable<0.8,>=0.7.2 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from cliff==3.4.0->data-analytics==0.4.0.0rc5.dev83) (0.7.2)\n",
      "Collecting cmd2!=0.8.3,>=0.8.0 (from cliff==3.4.0->data-analytics==0.4.0.0rc5.dev83)\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/15/8b/15061b32332bb35ea2a2f6263d0f616779d576e82739ec8e7fcf3c94abf5/cmd2-1.5.0-py3-none-any.whl (133kB)\n",
      "\u001b[K     |████████████████████████████████| 143kB 27.4MB/s eta 0:00:01\n",
      "\u001b[?25hRequirement already satisfied: psutil in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from suod->pyod==0.8.0->data-analytics==0.4.0.0rc5.dev83) (5.6.2)\n",
      "Collecting importlib-metadata>=1.7.0; python_version < \"3.8\" (from stevedore>=2.0.1->cliff==3.4.0->data-analytics==0.4.0.0rc5.dev83)\n",
      "  Downloading https://files.pythonhosted.org/packages/8e/e2/49966924c93909d47612bb47d911448140a2f6c1390aec2f4c1afbe3748f/importlib_metadata-4.0.1-py3-none-any.whl\n",
      "Collecting colorama>=0.3.7 (from cmd2!=0.8.3,>=0.8.0->cliff==3.4.0->data-analytics==0.4.0.0rc5.dev83)\n",
      "  Downloading https://files.pythonhosted.org/packages/44/98/5b86278fbbf250d239ae0ecb724f8572af1c91f4a11edf4d36a206189440/colorama-0.4.4-py2.py3-none-any.whl\n",
      "Requirement already satisfied: attrs>=16.3.0 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from cmd2!=0.8.3,>=0.8.0->cliff==3.4.0->data-analytics==0.4.0.0rc5.dev83) (19.3.0)\n",
      "Collecting pyperclip>=1.6 (from cmd2!=0.8.3,>=0.8.0->cliff==3.4.0->data-analytics==0.4.0.0rc5.dev83)\n",
      "  Downloading https://files.pythonhosted.org/packages/a7/2c/4c64579f847bd5d539803c8b909e54ba087a79d01bb3aba433a95879a6c5/pyperclip-1.8.2.tar.gz\n",
      "Requirement already satisfied: wcwidth>=0.1.7 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from cmd2!=0.8.3,>=0.8.0->cliff==3.4.0->data-analytics==0.4.0.0rc5.dev83) (0.1.7)\n",
      "Requirement already satisfied: zipp>=0.5 in /cvmfs/sft.cern.ch/lcg/views/LCG_97apython3/x86_64-centos7-gcc8-opt/lib/python3.7/site-packages (from importlib-metadata>=1.7.0; python_version < \"3.8\"->stevedore>=2.0.1->cliff==3.4.0->data-analytics==0.4.0.0rc5.dev83) (0.5.1)\n",
      "Collecting typing-extensions>=3.6.4; python_version < \"3.8\" (from importlib-metadata>=1.7.0; python_version < \"3.8\"->stevedore>=2.0.1->cliff==3.4.0->data-analytics==0.4.0.0rc5.dev83)\n",
      "  Downloading https://files.pythonhosted.org/packages/2e/35/6c4fff5ab443b57116cb1aad46421fb719bed2825664e8fe77d66d99bcbc/typing_extensions-3.10.0.0-py3-none-any.whl\n",
      "Building wheels for collected packages: data-analytics, pyspark, pyyaml, pyod, pandasticsearch, combo, suod, pyperclip\n",
      "  Building wheel for data-analytics (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25h  Stored in directory: /tmp/pip-ephem-wheel-cache-j16jwrx9/wheels/15/c2/a5/b8cdd4a27351e39866586a51b8ea2c523b42e64a859737ea5e\n",
      "  Building wheel for pyspark (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25h  Stored in directory: /tmp/smetaj/.cache/pip/wheels/1e/5e/6a/17e906c94ec7246f260330a66e44a06a0809033ba2738a74a8\n",
      "  Building wheel for pyyaml (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25h  Stored in directory: /tmp/smetaj/.cache/pip/wheels/a7/c1/ea/cf5bd31012e735dc1dfea3131a2d5eae7978b251083d6247bd\n",
      "  Building wheel for pyod (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25h  Stored in directory: /tmp/smetaj/.cache/pip/wheels/ba/a6/81/2dd042e240090f3603a686b897d03402219a86e3f61bc71184\n",
      "  Building wheel for pandasticsearch (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25h  Stored in directory: /tmp/smetaj/.cache/pip/wheels/33/d3/38/984341c95870a77b5b09b5fa604f4dbd32389af4492cef36c4\n",
      "  Building wheel for combo (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25h  Stored in directory: /tmp/smetaj/.cache/pip/wheels/01/d9/bf/d1a371a5f0844cd8a53c04c14daa89974c93f429dda9dceb86\n",
      "  Building wheel for suod (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25h  Stored in directory: /tmp/smetaj/.cache/pip/wheels/c5/d7/c1/6c778aee7fccfe3c054ea9bab92c5994ae3a0f6bba7078541e\n",
      "  Building wheel for pyperclip (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25h  Stored in directory: /tmp/smetaj/.cache/pip/wheels/25/af/b8/3407109267803f4015e1ee2ff23be0c8c19ce4008665931ee1\n",
      "Successfully built data-analytics pyspark pyyaml pyod pandasticsearch combo suod pyperclip\n",
      "\u001b[31mERROR: tensorflow 1.14.0 requires google-pasta>=0.1.6, which is not installed.\u001b[0m\n",
      "\u001b[31mERROR: caniusepython3 7.1.0 requires argparse, which is not installed.\u001b[0m\n",
      "\u001b[31mERROR: itkwidgets 0.25.3 has requirement ipywidgets>=7.5.1, but you'll have ipywidgets 7.4.2 which is incompatible.\u001b[0m\n",
      "\u001b[31mERROR: ipympl 0.4.1 has requirement ipywidgets>=7.5.0, but you'll have ipywidgets 7.4.2 which is incompatible.\u001b[0m\n",
      "\u001b[31mERROR: hepdata-converter 0.1.35 has requirement matplotlib<3.0.0, but you'll have matplotlib 3.2.1 which is incompatible.\u001b[0m\n",
      "\u001b[31mERROR: distributed 1.28.1 has requirement dask>=0.18.0, but you'll have dask 0+unknown which is incompatible.\u001b[0m\n",
      "\u001b[31mERROR: suod 0.0.6 has requirement joblib>=0.14.1, but you'll have joblib 0.14.0 which is incompatible.\u001b[0m\n",
      "Installing collected packages: setuptools, numpy, matplotlib, pandas, urllib3, requests, pyspark, scipy, seaborn, statsmodels, threadpoolctl, scikit-learn, dictdiffer, elasticsearch, pyyaml, llvmlite, numba, combo, suod, pyod, pyarrow, click, typing-extensions, importlib-metadata, stevedore, colorama, pyperclip, cmd2, cliff, pandasticsearch, data-analytics\n",
      "\u001b[33m  WARNING: The scripts f2py, f2py3 and f2py3.7 are installed in '/eos/user/s/smetaj/.local/bin' which is not on PATH.\n",
      "  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\n",
      "\u001b[33m  WARNING: The script plasma_store is installed in '/eos/user/s/smetaj/.local/bin' which is not on PATH.\n",
      "  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\n",
      "\u001b[33m  WARNING: The scripts data_mining and elaborate_scores are installed in '/eos/user/s/smetaj/.local/bin' which is not on PATH.\n",
      "  Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\n",
      "Successfully installed click cliff cmd2 colorama combo data-analytics dictdiffer elasticsearch importlib-metadata llvmlite matplotlib numba numpy pandas pandasticsearch pyarrow pyod pyperclip pyspark pyyaml requests scikit-learn scipy seaborn setuptools statsmodels stevedore suod threadpoolctl typing-extensions urllib3\n"
     ]
    }
   ],
smetaj's avatar
smetaj committed
210
   "source": [
211
212
    "# Set the variable to True the first time to download the libraries.\n",
    "# Note that with @branch you can install a specific branch\n",
213
    "\n",
smetaj's avatar
smetaj committed
214
215
    "first_time = False\n",
    "if first_time:\n",
216
217
218
    "    # If you install the libraries it takes some minutes \n",
    "    # (with a fresh installation 12 minutes)\n",
    "    !pip install --user git+https://:@gitlab.cern.ch:8443/cloud-infrastructure/data-analytics.git@qa-v0.4"
smetaj's avatar
smetaj committed
219
220
221
222
   ]
  },
  {
   "cell_type": "markdown",
223
224
   "id": "abstract-puzzle",
   "metadata": {},
smetaj's avatar
smetaj committed
225
   "source": [
226
    "## Imports"
smetaj's avatar
smetaj committed
227
228
229
230
231
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
232
   "id": "agricultural-discrimination",
smetaj's avatar
smetaj committed
233
234
   "metadata": {
    "ExecuteTime": {
235
236
237
     "end_time": "2021-05-03T18:35:30.857508Z",
     "start_time": "2021-05-03T18:35:19.489449Z"
    }
smetaj's avatar
smetaj committed
238
239
240
241
242
243
244
   },
   "outputs": [],
   "source": [
    "# AD System Libraries ----------------------------------------------------\n",
    "import adcern.cmd.data_mining as DM\n",
    "import etl.spark_etl.etl_pipeline as PL\n",
    "\n",
245
246
    "# To pass command line parameters and to use other functions -------------\n",
    "import sys, os, re, json\n",
smetaj's avatar
smetaj committed
247
248
249
250
    "\n",
    "# To run the visualization function --------------------------------------\n",
    "import pandas as pd\n",
    "import numpy as np\n",
251
252
    "from dateutil import tz\n",
    "\n",
smetaj's avatar
smetaj committed
253
254
255
256
257
    "import matplotlib.pyplot as plt\n",
    "import plotly.graph_objects as go\n",
    "from plotly.subplots import make_subplots\n",
    "\n",
    "# To read more parquet files with pandas ---------------------------------\n",
258
    "import glob"
smetaj's avatar
smetaj committed
259
260
261
262
   ]
  },
  {
   "cell_type": "markdown",
263
264
   "id": "timely-parish",
   "metadata": {},
smetaj's avatar
smetaj committed
265
   "source": [
266
    "# Init of configuration files - ETL"
smetaj's avatar
smetaj committed
267
268
269
270
   ]
  },
  {
   "cell_type": "markdown",
271
272
   "id": "informed-bikini",
   "metadata": {},
smetaj's avatar
smetaj committed
273
   "source": [
274
275
    "Our ETL and analysis methods work with json configuration files in input. In these files we save all the paths, the dates and the hyperparameters used then by the methods.\n",
    "\n",
276
    "Usually we have the files already saved for static tests or we create them in our production pipeline; **for the purpose of this notebook instead, we create the json files starting from a python dict.**\n",
277
    "\n",
smetaj's avatar
smetaj committed
278
279
    "Note that we need 2 config files: \n",
    "- A config file about the training part.\n",
280
    "- A config file about the data used by the trained model to infer the scores. "
smetaj's avatar
smetaj committed
281
282
283
284
   ]
  },
  {
   "cell_type": "markdown",
285
286
   "id": "durable-parliament",
   "metadata": {},
smetaj's avatar
smetaj committed
287
   "source": [
288
    "## Creation\n",
smetaj's avatar
smetaj committed
289
290
291
292
    "Note that the 2 config files share most of the parameters so we will have:\n",
    "- json_data: containing all the main information\n",
    "- json_data_train, json_data_inference: containing specific paths for the 2 different purposes\n",
    "\n",
293
    "**Note also that you have to be sure that you have the writing rights for all the paths contained here, in particular *HDFS_folder_with_write_rights* should point to your hdfs personal folder to ensure that**"
smetaj's avatar
smetaj committed
294
295
296
297
   ]
  },
  {
   "cell_type": "code",
298
299
   "execution_count": 4,
   "id": "laughing-aging",
smetaj's avatar
smetaj committed
300
301
   "metadata": {
    "ExecuteTime": {
302
303
304
     "end_time": "2021-05-03T18:37:04.581683Z",
     "start_time": "2021-05-03T18:37:04.577078Z"
    }
smetaj's avatar
smetaj committed
305
306
   },
   "outputs": [],
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
   "source": [
    "# if you want to run the notebook on more data you can change \n",
    "# this boolean variable to change the dates in the \n",
    "# following configuration json.\n",
    "\n",
    "longer_run_with_more_data = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "contrary-thong",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-05-03T19:28:54.536642Z",
     "start_time": "2021-05-03T19:28:54.266918Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Folder to save data and results: /user/smetaj/\n",
      "Json files saved.\n"
     ]
    }
   ],
smetaj's avatar
smetaj committed
335
336
337
338
   "source": [
    "demo_name = 'demo_AD_System'\n",
    "json_file_train = 'demo_config_train.json'\n",
    "json_file_inference = 'demo_config_inference.json'\n",
339
340
    "username = os.environ['USER']\n",
    "HDFS_folder_with_write_rights = '/user/' + username + '/'\n",
341
    "\n",
342
    "print(\"Folder to save data and results: \" + HDFS_folder_with_write_rights)\n",
smetaj's avatar
smetaj committed
343
    "\n",
344
345
    "collectd_path = \"/project/monitoring/collectd/\"\n",
    "                                  \n",
smetaj's avatar
smetaj committed
346
347
348
349
350
    "json_data = {}\n",
    "\n",
    "# Absolute path identifier of the cell/hostgroup that you want to mine.\n",
    "# Note that it is in a list format, but only one hostgroup is supported so far.\n",
    "json_data['hostgroups'] = []\n",
351
    "json_data['hostgroups'].append('cloud_compute/level2/main/gva_shared_017')\n",
smetaj's avatar
smetaj committed
352
353
354
355
356
357
358
    "\n",
    "# The pattern of the names of your data folders and \".metadata\" files.\n",
    "json_data['code_project_name'] = demo_name\n",
    "\n",
    "# Local area of your VM where to save your data and metadata data are saved in\n",
    "# folders with one parquet only. Metadata are saved in file with the same name\n",
    "# of the resepctive foler plus the \".metadata\" extension.\n",
359
360
    "json_data['local_cache_folder'] = '/eos/user/' + username[:1] + \\\n",
    "    '/' + username + '/' + demo_name + '/local_cache_'\n",
smetaj's avatar
smetaj committed
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
    "\n",
    "# HDFS Area where Spark saves the aggregated data of your cell.\n",
    "# Note that the saving can create multiple file depending on the number of\n",
    "# partitions that the workers were using.\n",
    "json_data['hdfs_out_folder'] = HDFS_folder_with_write_rights + \\\n",
    "    demo_name + '/raw_parquet_'\n",
    "\n",
    "# HDFS Area where Spark saves the aggregated data of your cell.\n",
    "# Note that here we force it to be one partiotion only.\n",
    "json_data['hdfs_cache_folder'] = HDFS_folder_with_write_rights + \\\n",
    "    demo_name + '/compressed_'\n",
    "\n",
    "# HDFS Area where Spark saves the normalization coefficients computed on the\n",
    "# normalziation chunk of data between the normalization dates.\n",
    "json_data['normalization_out_folder'] = HDFS_folder_with_write_rights + \\\n",
    "    demo_name + '/normalization/'\n",
    "\n",
    "# ----------------------------------------------------------------------------\n",
    "# ----------------------------------------------------------------------------\n",
    "\n",
    "# Wether you want to overwrite (true) or not (false) the raw data in HDFS.\n",
    "# If not sure leave true.\n",
    "json_data['overwrite_on_hdfs'] = True\n",
    "\n",
    "# Wether you want to overwrite (true) or not (false) the noramlization\n",
    "# coefficeints in HDFS. If not sure leave true.\n",
    "json_data['overwrite_normalization'] = True\n",
    "\n",
    "# The level of aggregation of your raw time series data.\n",
    "# The aggregator is typically the mean operator.\n",
    "# e.g. if 5 it means that we summarize the data every 5 min, and the values\n",
    "# with timestamp 7.45 will represent the mean of the previous 5 minutes from\n",
    "# 7.40 to 7.45 but that value will have 7.45 as timestamp\n",
    "json_data['aggregate_every_n_minutes'] = 10\n",
    "\n",
    "# The length of your windows of data.\n",
    "# e.g. if aggregate_every_n_minutes = 10 and history_steps = 6 it means that\n",
    "# every windows is summarizing 6 * 10 = 60 minutes\n",
    "json_data['history_steps'] = 48\n",
    "\n",
    "# The number of step you want to move your window.\n",
    "# e.g. if aggregate_every_n_minutes = 10 and history_steps = 2 it means that\n",
    "# you will get a window of data that is translated of 10 * 2 = 20 min with\n",
    "# respect to the previous.\n",
    "# Note that if slide_steps has the same value of history_steps you have non-\n",
    "# overlapping windows.\n",
    "json_data['slide_steps'] = 1\n",
    "\n",
    "# Used to create windows with future steps. If not sure keep this to 0.\n",
    "json_data['future_steps'] = 0\n",
    "\n",
    "# ----------------------------------------------------------------------------\n",
    "# ----------------------------------------------------------------------------\n",
    "\n",
    "# Dates representing the start/end of the data and noramlization chunks.\n",
    "# - start_date -> the starting date of data chunk of ETL\n",
    "# - end_date -> the ending date of data chunk of ETL\n",
    "# - start_date_normalization -> the starting date of the chunk of data used\n",
    "#   to learn noramlization coefficeints (typically this chunk preceeds the\n",
    "#   chunk of data)\n",
    "# - end_date_normalization -> the ending date of the chunk of data used\n",
    "#   to learn noramlization coefficeints\n",
    "# Note that the upper extremum is excluded (i.e. data will stop at the 23:59\n",
    "# of the day preeceeding the date_end_excluded)\n",
425
426
427
428
429
430
431
432
    "if longer_run_with_more_data:\n",
    "    json_data['date_start'] = \"2021-03-01\"\n",
    "    json_data['date_end_excluded'] = \"2021-03-20\"\n",
    "else:\n",
    "    json_data['date_start'] = \"2021-04-01\"\n",
    "    json_data['date_end_excluded'] = \"2021-04-08\"    \n",
    "json_data['date_start_normalization'] = json_data['date_start']\n",
    "json_data['date_end_normalization_excluded'] = json_data['date_end_excluded']\n",
smetaj's avatar
smetaj committed
433
434
435
436
437
438
439
440
441
442
443
444
445
446
    "\n",
    "# ----------------------------------------------------------------------------\n",
    "# ----------------------------------------------------------------------------\n",
    "\n",
    "# List of plugins to mine.\n",
    "# Note that it is a dictionary where every key represents the name your plugin\n",
    "# have and the value is a dictionary with:\n",
    "# 'plugin_instance', 'type' 'type_instance', 'plugin_name'\n",
    "# the value asigned to these key is defining an and-filter.\n",
    "# you will get only the data that have all those attributes\n",
    "# ('plugin_instance', 'type' 'type_instance', 'plugin_name') in and with the\n",
    "# specified value.\n",
    "# Note that if you do not want to filter on one attribute do not express it.\n",
    "json_data['selected_plugins'] = {\n",
447
448
    "    'cpu__percent_idle': {\n",
    "        'plugin_data_path': collectd_path + 'cpu',\n",
449
    "        'plugin_filter': \"type == 'percent' and type_instance == 'idle' and plugin == 'cpu'\"\n",
450
451
452
    "    }, \n",
    "    'load_longterm': {\n",
    "        'plugin_data_path': collectd_path + 'load',\n",
453
    "        'plugin_filter': \"value_instance == 'longterm' and plugin == 'load'\"\n",
smetaj's avatar
smetaj committed
454
    "    },\n",
455
456
    "    'disk_io_time': {\n",
    "        'plugin_data_path': collectd_path + 'cloud',\n",
457
    "        'plugin_filter': \"value_instance == 'io_time' and plugin == 'disk'\"\n",
smetaj's avatar
smetaj committed
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
    "    }\n",
    "}\n",
    "\n",
    "json_data_train = json_data.copy()\n",
    "json_data_train['local_cache_folder'] += 'train/'\n",
    "json_data_train['hdfs_out_folder'] += 'train/'\n",
    "json_data_train['hdfs_cache_folder'] += 'train/'\n",
    "\n",
    "json_data_inference = json_data.copy()\n",
    "json_data_inference['local_cache_folder'] += 'inference/'\n",
    "json_data_inference['hdfs_out_folder'] += 'inference/'\n",
    "json_data_inference['hdfs_cache_folder'] += 'inference/'\n",
    "# The imporant change is that we want to have NON OVERLAPPING windows\n",
    "# in the inference!\n",
    "json_data_inference['slide_steps'] = 48\n",
473
474
475
476
477
478
479
    "\n",
    "if longer_run_with_more_data:\n",
    "    json_data_inference['date_start'] = \"2021-03-20\"\n",
    "    json_data_inference['date_end_excluded'] = \"2021-04-20\"\n",
    "else:\n",
    "    json_data_inference['date_start'] = \"2021-04-08\"\n",
    "    json_data_inference['date_end_excluded'] = \"2021-04-14\"   \n",
smetaj's avatar
smetaj committed
480
481
482
483
484
    "\n",
    "with open(json_file_train, 'w') as outfile:\n",
    "    json.dump(json_data_train, outfile, indent=4)\n",
    "\n",
    "with open(json_file_inference, 'w') as outfile:\n",
485
486
487
    "    json.dump(json_data_inference, outfile, indent=4)\n",
    "    \n",
    "print(\"Json files saved.\")"
smetaj's avatar
smetaj committed
488
489
490
491
   ]
  },
  {
   "cell_type": "markdown",
492
493
   "id": "daily-nebraska",
   "metadata": {},
smetaj's avatar
smetaj committed
494
   "source": [
495
    "## Reading the json"
smetaj's avatar
smetaj committed
496
497
498
499
   ]
  },
  {
   "cell_type": "code",
500
501
   "execution_count": 7,
   "id": "distinguished-works",
smetaj's avatar
smetaj committed
502
503
   "metadata": {
    "ExecuteTime": {
504
505
506
     "end_time": "2021-05-03T18:40:53.541935Z",
     "start_time": "2021-05-03T18:40:53.523494Z"
    }
smetaj's avatar
smetaj committed
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "RESOURCE DETAILS: demo_config_train.json\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "{\n",
      "    \"hostgroups\": [\n",
522
      "        \"cloud_compute/level2/main/gva_shared_017\"\n",
smetaj's avatar
smetaj committed
523
524
      "    ],\n",
      "    \"code_project_name\": \"demo_AD_System\",\n",
525
      "    \"local_cache_folder\": \"/eos/user/s/smetaj/demo_AD_System/local_cache_train/\",\n",
smetaj's avatar
smetaj committed
526
527
528
529
530
531
532
533
534
      "    \"hdfs_out_folder\": \"/user/smetaj/demo_AD_System/raw_parquet_train/\",\n",
      "    \"hdfs_cache_folder\": \"/user/smetaj/demo_AD_System/compressed_train/\",\n",
      "    \"normalization_out_folder\": \"/user/smetaj/demo_AD_System/normalization/\",\n",
      "    \"overwrite_on_hdfs\": true,\n",
      "    \"overwrite_normalization\": true,\n",
      "    \"aggregate_every_n_minutes\": 10,\n",
      "    \"history_steps\": 48,\n",
      "    \"slide_steps\": 1,\n",
      "    \"future_steps\": 0,\n",
535
536
537
538
      "    \"date_start\": \"2021-04-01\",\n",
      "    \"date_end_excluded\": \"2021-04-08\",\n",
      "    \"date_start_normalization\": \"2021-04-01\",\n",
      "    \"date_end_normalization_excluded\": \"2021-04-08\",\n",
smetaj's avatar
smetaj committed
539
      "    \"selected_plugins\": {\n",
540
541
      "        \"cpu__percent_idle\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/cpu\",\n",
542
      "            \"plugin_filter\": \"type == 'percent' and type_instance == 'idle' and plugin == 'cpu'\"\n",
smetaj's avatar
smetaj committed
543
      "        },\n",
544
545
      "        \"load_longterm\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/load\",\n",
546
      "            \"plugin_filter\": \"value_instance == 'longterm' and plugin == 'load'\"\n",
smetaj's avatar
smetaj committed
547
      "        },\n",
548
549
      "        \"disk_io_time\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/cloud\",\n",
550
      "            \"plugin_filter\": \"value_instance == 'io_time' and plugin == 'disk'\"\n",
smetaj's avatar
smetaj committed
551
552
553
554
555
556
557
558
559
560
561
562
      "        }\n",
      "    }\n",
      "}\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "RESOURCE DETAILS: demo_config_inference.json\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "{\n",
      "    \"hostgroups\": [\n",
563
      "        \"cloud_compute/level2/main/gva_shared_017\"\n",
smetaj's avatar
smetaj committed
564
565
      "    ],\n",
      "    \"code_project_name\": \"demo_AD_System\",\n",
566
      "    \"local_cache_folder\": \"/eos/user/s/smetaj/demo_AD_System/local_cache_inference/\",\n",
smetaj's avatar
smetaj committed
567
568
569
570
571
572
573
574
575
      "    \"hdfs_out_folder\": \"/user/smetaj/demo_AD_System/raw_parquet_inference/\",\n",
      "    \"hdfs_cache_folder\": \"/user/smetaj/demo_AD_System/compressed_inference/\",\n",
      "    \"normalization_out_folder\": \"/user/smetaj/demo_AD_System/normalization/\",\n",
      "    \"overwrite_on_hdfs\": true,\n",
      "    \"overwrite_normalization\": true,\n",
      "    \"aggregate_every_n_minutes\": 10,\n",
      "    \"history_steps\": 48,\n",
      "    \"slide_steps\": 48,\n",
      "    \"future_steps\": 0,\n",
576
577
578
579
      "    \"date_start\": \"2021-04-08\",\n",
      "    \"date_end_excluded\": \"2021-04-19\",\n",
      "    \"date_start_normalization\": \"2021-04-01\",\n",
      "    \"date_end_normalization_excluded\": \"2021-04-08\",\n",
smetaj's avatar
smetaj committed
580
      "    \"selected_plugins\": {\n",
581
582
      "        \"cpu__percent_idle\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/cpu\",\n",
583
      "            \"plugin_filter\": \"type == 'percent' and type_instance == 'idle' and plugin == 'cpu'\"\n",
smetaj's avatar
smetaj committed
584
      "        },\n",
585
586
      "        \"load_longterm\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/load\",\n",
587
      "            \"plugin_filter\": \"value_instance == 'longterm' and plugin == 'load'\"\n",
smetaj's avatar
smetaj committed
588
      "        },\n",
589
590
      "        \"disk_io_time\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/cloud\",\n",
591
      "            \"plugin_filter\": \"value_instance == 'io_time' and plugin == 'disk'\"\n",
smetaj's avatar
smetaj committed
592
593
594
595
596
597
598
      "        }\n",
      "    }\n",
      "}\n"
     ]
    }
   ],
   "source": [
599
600
601
    "# Let's use the read_resource function in the data_mining library to be \n",
    "# sure that the format is correct and to print the files.\n",
    "\n",
smetaj's avatar
smetaj committed
602
603
604
605
606
607
    "_ = DM.read_resource(resource_file=json_file_train)\n",
    "_ = DM.read_resource(resource_file=json_file_inference)"
   ]
  },
  {
   "cell_type": "markdown",
608
609
   "id": "monthly-drive",
   "metadata": {},
smetaj's avatar
smetaj committed
610
   "source": [
611
    "# ETL steps (Extract, Transform, Load)"
smetaj's avatar
smetaj committed
612
613
614
615
   ]
  },
  {
   "cell_type": "markdown",
616
617
   "id": "curious-instruction",
   "metadata": {},
smetaj's avatar
smetaj committed
618
619
   "source": [
    "What we want to achieve here is to reproduce the steps to download and normalize the data (following the order in the graph below).\n",
620
621
    "\n",
    "![](https://mattermost.web.cern.ch/files/z18wtsurhtfrzr8eumtao5ybmc/public?h=UB1juU5YHeFJopbeSHrYD2QAlM3TxLRHvsihgtFVB-k)\n",
smetaj's avatar
smetaj committed
622
    "\n",
623
    "For every step, we will call the specific function of the data_mining library (for both the json files in most of the cases). \n",
smetaj's avatar
smetaj committed
624
    "\n",
625
    "**Note that we use the click python library (it permits to use python functions through the command line but it has a default behavior that calls exit(0) at the end of them). To avoid the exit(0) problem just use the standalone_mode=False option.**\n",
smetaj's avatar
smetaj committed
626
    "\n",
627
    "Moreover, we will skip the first 2 steps of the pipeline that we show in the image above (data_presence and check_normalization) because they are used in production pipelines to check and avoid the re-processing of already processed time intervals. For the purpose of this example, we will force the reprocessing if data are available"
smetaj's avatar
smetaj committed
628
629
630
631
   ]
  },
  {
   "cell_type": "markdown",
632
633
   "id": "palestinian-investing",
   "metadata": {},
smetaj's avatar
smetaj committed
634
   "source": [
635
    "## Compute Normalization"
smetaj's avatar
smetaj committed
636
637
638
639
   ]
  },
  {
   "cell_type": "code",
640
641
   "execution_count": 11,
   "id": "preliminary-shade",
smetaj's avatar
smetaj committed
642
643
   "metadata": {
    "ExecuteTime": {
644
645
     "end_time": "2021-05-03T18:47:02.085990Z",
     "start_time": "2021-05-03T18:45:46.333302Z"
smetaj's avatar
smetaj committed
646
    },
647
    "scrolled": false
smetaj's avatar
smetaj committed
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "PREPARING SPARK:\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "SPARK CONTEXT: <SparkContext master=yarn appName=pyspark_shell_swan>\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
671
      "SPARK OBJECT: <pyspark.sql.session.SparkSession object at 0x7f05fb323510>\n",
smetaj's avatar
smetaj committed
672
673
674
675
676
677
678
679
680
681
682
683
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "RESOURCE DETAILS: demo_config_train.json\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "{\n",
      "    \"hostgroups\": [\n",
684
      "        \"cloud_compute/level2/main/gva_shared_017\"\n",
smetaj's avatar
smetaj committed
685
686
      "    ],\n",
      "    \"code_project_name\": \"demo_AD_System\",\n",
687
      "    \"local_cache_folder\": \"/eos/user/s/smetaj/demo_AD_System/local_cache_train/\",\n",
smetaj's avatar
smetaj committed
688
689
690
691
692
693
694
695
696
      "    \"hdfs_out_folder\": \"/user/smetaj/demo_AD_System/raw_parquet_train/\",\n",
      "    \"hdfs_cache_folder\": \"/user/smetaj/demo_AD_System/compressed_train/\",\n",
      "    \"normalization_out_folder\": \"/user/smetaj/demo_AD_System/normalization/\",\n",
      "    \"overwrite_on_hdfs\": true,\n",
      "    \"overwrite_normalization\": true,\n",
      "    \"aggregate_every_n_minutes\": 10,\n",
      "    \"history_steps\": 48,\n",
      "    \"slide_steps\": 1,\n",
      "    \"future_steps\": 0,\n",
697
698
699
700
      "    \"date_start\": \"2021-04-01\",\n",
      "    \"date_end_excluded\": \"2021-04-08\",\n",
      "    \"date_start_normalization\": \"2021-04-01\",\n",
      "    \"date_end_normalization_excluded\": \"2021-04-08\",\n",
smetaj's avatar
smetaj committed
701
      "    \"selected_plugins\": {\n",
702
703
      "        \"cpu__percent_idle\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/cpu\",\n",
704
      "            \"plugin_filter\": \"type == 'percent' and type_instance == 'idle' and plugin == 'cpu'\"\n",
smetaj's avatar
smetaj committed
705
      "        },\n",
706
707
      "        \"load_longterm\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/load\",\n",
708
      "            \"plugin_filter\": \"value_instance == 'longterm' and plugin == 'load'\"\n",
smetaj's avatar
smetaj committed
709
      "        },\n",
710
711
      "        \"disk_io_time\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/cloud\",\n",
712
      "            \"plugin_filter\": \"value_instance == 'io_time' and plugin == 'disk'\"\n",
smetaj's avatar
smetaj committed
713
714
715
716
717
718
719
720
721
722
      "        }\n",
      "    }\n",
      "}\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "COMPUTE NORMALIZATION COEFFICIENTS:\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
723
      "Start: (2021, 4, 1) - End (2021, 4, 7)\n",
724
725
726
      "{'plugin_data_path': '/project/monitoring/collectd/cpu', 'plugin_filter': \"type == 'percent' and type_instance == 'idle' and plugin == 'cpu'\"}\n",
      "{'plugin_data_path': '/project/monitoring/collectd/load', 'plugin_filter': \"value_instance == 'longterm' and plugin == 'load'\"}\n",
      "{'plugin_data_path': '/project/monitoring/collectd/cloud', 'plugin_filter': \"value_instance == 'io_time' and plugin == 'disk'\"}\n",
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
      "/project/monitoring/collectd/cloud/2021/04/01/\n",
      "/project/monitoring/collectd/cloud/2021/04/02/\n",
      "/project/monitoring/collectd/cloud/2021/04/03/\n",
      "/project/monitoring/collectd/cloud/2021/04/04/\n",
      "/project/monitoring/collectd/cloud/2021/04/05/\n",
      "/project/monitoring/collectd/cloud/2021/04/06/\n",
      "/project/monitoring/collectd/cloud/2021/04/07/\n",
      "/project/monitoring/collectd/cpu/2021/04/01/\n",
      "/project/monitoring/collectd/cpu/2021/04/02/\n",
      "/project/monitoring/collectd/cpu/2021/04/03/\n",
      "/project/monitoring/collectd/cpu/2021/04/04/\n",
      "/project/monitoring/collectd/cpu/2021/04/05/\n",
      "/project/monitoring/collectd/cpu/2021/04/06/\n",
      "/project/monitoring/collectd/cpu/2021/04/07/\n",
      "/project/monitoring/collectd/load/2021/04/01/\n",
      "/project/monitoring/collectd/load/2021/04/02/\n",
      "/project/monitoring/collectd/load/2021/04/03/\n",
      "/project/monitoring/collectd/load/2021/04/04/\n",
      "/project/monitoring/collectd/load/2021/04/05/\n",
      "/project/monitoring/collectd/load/2021/04/06/\n",
      "/project/monitoring/collectd/load/2021/04/07/\n",
748
      "filter_str:  ( type == 'percent' and type_instance == 'idle' and plugin == 'cpu' ) or ( value_instance == 'longterm' and plugin == 'load' ) or ( value_instance == 'io_time' and plugin == 'disk' )\n",
749
      "NEW id_norm:  0a8f66\n",
smetaj's avatar
smetaj committed
750
751
752
753
754
755
756
757
758
      "Normalization Saved successfully\n",
      "Coefficient preparation shared:  0\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "SUCCESS (inspect the first 40 rows):\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
759
      "                                  hostgroup             plugin    mean  stddev\n",
760
761
762
      "0  cloud_compute/level2/main/gva_shared_017  cpu__percent_idle  76.387  16.884\n",
      "1  cloud_compute/level2/main/gva_shared_017      load_longterm   0.269   0.253\n",
      "2  cloud_compute/level2/main/gva_shared_017       disk_io_time  56.666  71.620\n"
smetaj's avatar
smetaj committed
763
764
765
766
     ]
    }
   ],
   "source": [
767
    "# We must download firstly the normalization coefficients that we will use\n",
768
    "# to normalize the real data that we will download later.\n",
769
770
    "\n",
    "# Note that the function will use the same path for both both train and \n",
771
    "# inference (because the hostgroup, the dates and the plugins are the same).\n",
772
773
774
    "\n",
    "sys.argv = ['', '--resource_file', json_file_train]\n",
    "DM.compute_normalization(standalone_mode=False)"
smetaj's avatar
smetaj committed
775
776
777
778
   ]
  },
  {
   "cell_type": "markdown",
779
   "id": "proud-conspiracy",
smetaj's avatar
smetaj committed
780
781
782
783
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-04-15T08:08:23.318591Z",
     "start_time": "2021-04-15T08:08:23.267079Z"
784
    }
smetaj's avatar
smetaj committed
785
786
   },
   "source": [
787
    "## Transform Data"
smetaj's avatar
smetaj committed
788
789
790
791
   ]
  },
  {
   "cell_type": "code",
792
793
   "execution_count": 12,
   "id": "excessive-family",
smetaj's avatar
smetaj committed
794
795
   "metadata": {
    "ExecuteTime": {
796
797
     "end_time": "2021-05-03T18:48:27.814673Z",
     "start_time": "2021-05-03T18:47:02.090004Z"
798
799
    },
    "scrolled": true
smetaj's avatar
smetaj committed
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "PREPARING SPARK:\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "SPARK CONTEXT: <SparkContext master=yarn appName=pyspark_shell_swan>\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
823
      "SPARK OBJECT: <pyspark.sql.session.SparkSession object at 0x7f05fb414f10>\n",
smetaj's avatar
smetaj committed
824
825
826
827
828
829
830
831
832
833
834
835
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "RESOURCE DETAILS: demo_config_train.json\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "{\n",
      "    \"hostgroups\": [\n",
836
      "        \"cloud_compute/level2/main/gva_shared_017\"\n",
smetaj's avatar
smetaj committed
837
838
      "    ],\n",
      "    \"code_project_name\": \"demo_AD_System\",\n",
839
      "    \"local_cache_folder\": \"/eos/user/s/smetaj/demo_AD_System/local_cache_train/\",\n",
smetaj's avatar
smetaj committed
840
841
842
843
844
845
846
847
848
      "    \"hdfs_out_folder\": \"/user/smetaj/demo_AD_System/raw_parquet_train/\",\n",
      "    \"hdfs_cache_folder\": \"/user/smetaj/demo_AD_System/compressed_train/\",\n",
      "    \"normalization_out_folder\": \"/user/smetaj/demo_AD_System/normalization/\",\n",
      "    \"overwrite_on_hdfs\": true,\n",
      "    \"overwrite_normalization\": true,\n",
      "    \"aggregate_every_n_minutes\": 10,\n",
      "    \"history_steps\": 48,\n",
      "    \"slide_steps\": 1,\n",
      "    \"future_steps\": 0,\n",
849
850
851
852
      "    \"date_start\": \"2021-04-01\",\n",
      "    \"date_end_excluded\": \"2021-04-08\",\n",
      "    \"date_start_normalization\": \"2021-04-01\",\n",
      "    \"date_end_normalization_excluded\": \"2021-04-08\",\n",
smetaj's avatar
smetaj committed
853
      "    \"selected_plugins\": {\n",
854
855
      "        \"cpu__percent_idle\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/cpu\",\n",
856
      "            \"plugin_filter\": \"type == 'percent' and type_instance == 'idle' and plugin == 'cpu'\"\n",
smetaj's avatar
smetaj committed
857
      "        },\n",
858
859
      "        \"load_longterm\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/load\",\n",
860
      "            \"plugin_filter\": \"value_instance == 'longterm' and plugin == 'load'\"\n",
smetaj's avatar
smetaj committed
861
      "        },\n",
862
863
      "        \"disk_io_time\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/cloud\",\n",
864
      "            \"plugin_filter\": \"value_instance == 'io_time' and plugin == 'disk'\"\n",
smetaj's avatar
smetaj committed
865
866
867
868
869
870
871
872
873
874
      "        }\n",
      "    }\n",
      "}\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "DOWNLOAD DATA - LONG MINING PROCESS...\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
875
876
      "Start: (2021, 4, 1) - End (2021, 4, 7)\n",
      "Reading normalization:  0a8f66\n",
877
878
879
      "{'plugin_data_path': '/project/monitoring/collectd/cpu', 'plugin_filter': \"type == 'percent' and type_instance == 'idle' and plugin == 'cpu'\"}\n",
      "{'plugin_data_path': '/project/monitoring/collectd/load', 'plugin_filter': \"value_instance == 'longterm' and plugin == 'load'\"}\n",
      "{'plugin_data_path': '/project/monitoring/collectd/cloud', 'plugin_filter': \"value_instance == 'io_time' and plugin == 'disk'\"}\n",
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
      "/project/monitoring/collectd/cloud/2021/04/01/\n",
      "/project/monitoring/collectd/cloud/2021/04/02/\n",
      "/project/monitoring/collectd/cloud/2021/04/03/\n",
      "/project/monitoring/collectd/cloud/2021/04/04/\n",
      "/project/monitoring/collectd/cloud/2021/04/05/\n",
      "/project/monitoring/collectd/cloud/2021/04/06/\n",
      "/project/monitoring/collectd/cloud/2021/04/07/\n",
      "/project/monitoring/collectd/cpu/2021/04/01/\n",
      "/project/monitoring/collectd/cpu/2021/04/02/\n",
      "/project/monitoring/collectd/cpu/2021/04/03/\n",
      "/project/monitoring/collectd/cpu/2021/04/04/\n",
      "/project/monitoring/collectd/cpu/2021/04/05/\n",
      "/project/monitoring/collectd/cpu/2021/04/06/\n",
      "/project/monitoring/collectd/cpu/2021/04/07/\n",
      "/project/monitoring/collectd/load/2021/04/01/\n",
      "/project/monitoring/collectd/load/2021/04/02/\n",
      "/project/monitoring/collectd/load/2021/04/03/\n",
      "/project/monitoring/collectd/load/2021/04/04/\n",
      "/project/monitoring/collectd/load/2021/04/05/\n",
      "/project/monitoring/collectd/load/2021/04/06/\n",
      "/project/monitoring/collectd/load/2021/04/07/\n",
901
902
      "filter_str:  ( type == 'percent' and type_instance == 'idle' and plugin == 'cpu' ) or ( value_instance == 'longterm' and plugin == 'load' ) or ( value_instance == 'io_time' and plugin == 'disk' )\n",
      "['cpu__percent_idle', 'load_longterm', 'disk_io_time']\n",
smetaj's avatar
smetaj committed
903
904
905
906
907
908
909
910
911
912
      "Deleting any previous/old remainders in /user/smetaj/demo_AD_System/raw_parquet_train//demo_AD_System ...\n",
      "Error while deleting directory:  [Errno 2] No such file or directory: '/user/smetaj/demo_AD_System/raw_parquet_train//demo_AD_System'\n",
      "Saving the big guy (window dataframe) in: /user/smetaj/demo_AD_System/raw_parquet_train//demo_AD_System ..\n",
      "Saved successfully: /user/smetaj/demo_AD_System/raw_parquet_train//demo_AD_System\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "SUCCESS - DATA AGGREGATED IN HDFS.\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
913
914
915
916
917
918
      "\n"
     ]
    }
   ],
   "source": [
    "# Here we download the data that we will normalize using the coefficients \n",
919
    "# produced by the previous step.\n",
920
921
922
923
924
    "\n",
    "# Note that we have to download both the train and the inference datasets,\n",
    "# so we will have 2 different download_data calls.\n",
    "    \n",
    "sys.argv = ['', '--resource_file', json_file_train]\n",
925
    "DM.transform_data(standalone_mode=False)"
926
927
928
929
   ]
  },
  {
   "cell_type": "code",
930
931
   "execution_count": 31,
   "id": "composite-electron",
932
933
   "metadata": {
    "ExecuteTime": {
934
935
936
     "end_time": "2021-05-03T19:31:32.804102Z",
     "start_time": "2021-05-03T19:29:24.608877Z"
    }
937
938
939
940
941
942
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
smetaj's avatar
smetaj committed
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "PREPARING SPARK:\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "SPARK CONTEXT: <SparkContext master=yarn appName=pyspark_shell_swan>\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
960
      "SPARK OBJECT: <pyspark.sql.session.SparkSession object at 0x7f064a5f4e50>\n",
smetaj's avatar
smetaj committed
961
962
963
964
965
966
967
968
969
970
971
972
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "RESOURCE DETAILS: demo_config_inference.json\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "{\n",
      "    \"hostgroups\": [\n",
973
      "        \"cloud_compute/level2/main/gva_shared_017\"\n",
smetaj's avatar
smetaj committed
974
975
      "    ],\n",
      "    \"code_project_name\": \"demo_AD_System\",\n",
976
      "    \"local_cache_folder\": \"/eos/user/s/smetaj/demo_AD_System/local_cache_inference/\",\n",
smetaj's avatar
smetaj committed
977
978
979
980
981
982
983
984
985
      "    \"hdfs_out_folder\": \"/user/smetaj/demo_AD_System/raw_parquet_inference/\",\n",
      "    \"hdfs_cache_folder\": \"/user/smetaj/demo_AD_System/compressed_inference/\",\n",
      "    \"normalization_out_folder\": \"/user/smetaj/demo_AD_System/normalization/\",\n",
      "    \"overwrite_on_hdfs\": true,\n",
      "    \"overwrite_normalization\": true,\n",
      "    \"aggregate_every_n_minutes\": 10,\n",
      "    \"history_steps\": 48,\n",
      "    \"slide_steps\": 48,\n",
      "    \"future_steps\": 0,\n",
986
987
988
989
      "    \"date_start\": \"2021-04-08\",\n",
      "    \"date_end_excluded\": \"2021-04-14\",\n",
      "    \"date_start_normalization\": \"2021-04-01\",\n",
      "    \"date_end_normalization_excluded\": \"2021-04-08\",\n",
smetaj's avatar
smetaj committed
990
      "    \"selected_plugins\": {\n",
991
992
      "        \"cpu__percent_idle\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/cpu\",\n",
993
      "            \"plugin_filter\": \"type == 'percent' and type_instance == 'idle' and plugin == 'cpu'\"\n",
smetaj's avatar
smetaj committed
994
      "        },\n",
995
996
      "        \"load_longterm\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/load\",\n",
997
      "            \"plugin_filter\": \"value_instance == 'longterm' and plugin == 'load'\"\n",
smetaj's avatar
smetaj committed
998
      "        },\n",
999
1000
      "        \"disk_io_time\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/cloud\",\n",
1001
      "            \"plugin_filter\": \"value_instance == 'io_time' and plugin == 'disk'\"\n",
smetaj's avatar
smetaj committed
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
      "        }\n",
      "    }\n",
      "}\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "DOWNLOAD DATA - LONG MINING PROCESS...\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
1012
1013
      "Start: (2021, 4, 8) - End (2021, 4, 13)\n",
      "Reading normalization:  0a8f66\n",
1014
1015
1016
1017
1018
1019
1020
      "{'plugin_data_path': '/project/monitoring/collectd/cpu', 'plugin_filter': \"type == 'percent' and type_instance == 'idle' and plugin == 'cpu'\"}\n",
      "{'plugin_data_path': '/project/monitoring/collectd/load', 'plugin_filter': \"value_instance == 'longterm' and plugin == 'load'\"}\n",
      "{'plugin_data_path': '/project/monitoring/collectd/cloud', 'plugin_filter': \"value_instance == 'io_time' and plugin == 'disk'\"}\n",
      "/project/monitoring/collectd/cloud/2021/04/08/\n",
      "/project/monitoring/collectd/cloud/2021/04/09/\n",
      "/project/monitoring/collectd/cloud/2021/04/10/\n",
      "/project/monitoring/collectd/cloud/2021/04/11/\n",
1021
      "/project/monitoring/collectd/cloud/2021/04/12/\n",
1022
1023
1024
1025
1026
      "/project/monitoring/collectd/cloud/2021/04/13/\n",
      "/project/monitoring/collectd/cpu/2021/04/08/\n",
      "/project/monitoring/collectd/cpu/2021/04/09/\n",
      "/project/monitoring/collectd/cpu/2021/04/10/\n",
      "/project/monitoring/collectd/cpu/2021/04/11/\n",
1027
      "/project/monitoring/collectd/cpu/2021/04/12/\n",
1028
1029
1030
1031
1032
      "/project/monitoring/collectd/cpu/2021/04/13/\n",
      "/project/monitoring/collectd/load/2021/04/08/\n",
      "/project/monitoring/collectd/load/2021/04/09/\n",
      "/project/monitoring/collectd/load/2021/04/10/\n",
      "/project/monitoring/collectd/load/2021/04/11/\n",
1033
      "/project/monitoring/collectd/load/2021/04/12/\n",
1034
      "/project/monitoring/collectd/load/2021/04/13/\n",
1035
1036
      "filter_str:  ( type == 'percent' and type_instance == 'idle' and plugin == 'cpu' ) or ( value_instance == 'longterm' and plugin == 'load' ) or ( value_instance == 'io_time' and plugin == 'disk' )\n",
      "['cpu__percent_idle', 'load_longterm', 'disk_io_time']\n",
smetaj's avatar
smetaj committed
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
      "Deleting any previous/old remainders in /user/smetaj/demo_AD_System/raw_parquet_inference//demo_AD_System ...\n",
      "Error while deleting directory:  [Errno 2] No such file or directory: '/user/smetaj/demo_AD_System/raw_parquet_inference//demo_AD_System'\n",
      "Saving the big guy (window dataframe) in: /user/smetaj/demo_AD_System/raw_parquet_inference//demo_AD_System ..\n",
      "Saved successfully: /user/smetaj/demo_AD_System/raw_parquet_inference//demo_AD_System\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "SUCCESS - DATA AGGREGATED IN HDFS.\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n"
     ]
    }
   ],
   "source": [
1052
    "sys.argv = ['', '--resource_file', json_file_inference]\n",
1053
    "DM.transform_data(standalone_mode=False)"
smetaj's avatar
smetaj committed
1054
1055
1056
1057
   ]
  },
  {
   "cell_type": "markdown",
1058
1059
   "id": "liberal-scotland",
   "metadata": {},
smetaj's avatar
smetaj committed
1060
   "source": [
1061
    "## Copy Locally"
smetaj's avatar
smetaj committed
1062
1063
1064
1065
   ]
  },
  {
   "cell_type": "code",
1066
1067
   "execution_count": 14,
   "id": "executive-native",
smetaj's avatar
smetaj committed
1068
1069
   "metadata": {
    "ExecuteTime": {
1070
1071
     "end_time": "2021-05-03T18:50:13.991759Z",
     "start_time": "2021-05-03T18:49:47.942324Z"
1072
1073
    },
    "scrolled": true
smetaj's avatar
smetaj committed
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "PREPARING SPARK:\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "SPARK CONTEXT: <SparkContext master=yarn appName=pyspark_shell_swan>\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
1097
      "SPARK OBJECT: <pyspark.sql.session.SparkSession object at 0x7f05fb2f5b10>\n",
smetaj's avatar
smetaj committed
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "RESOURCE DETAILS: demo_config_train.json\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "{\n",
      "    \"hostgroups\": [\n",
1110
      "        \"cloud_compute/level2/main/gva_shared_017\"\n",
smetaj's avatar
smetaj committed
1111
1112
      "    ],\n",
      "    \"code_project_name\": \"demo_AD_System\",\n",
1113
      "    \"local_cache_folder\": \"/eos/user/s/smetaj/demo_AD_System/local_cache_train/\",\n",
smetaj's avatar
smetaj committed
1114
1115
1116
1117
1118
1119
1120
1121
1122
      "    \"hdfs_out_folder\": \"/user/smetaj/demo_AD_System/raw_parquet_train/\",\n",
      "    \"hdfs_cache_folder\": \"/user/smetaj/demo_AD_System/compressed_train/\",\n",
      "    \"normalization_out_folder\": \"/user/smetaj/demo_AD_System/normalization/\",\n",
      "    \"overwrite_on_hdfs\": true,\n",
      "    \"overwrite_normalization\": true,\n",
      "    \"aggregate_every_n_minutes\": 10,\n",
      "    \"history_steps\": 48,\n",
      "    \"slide_steps\": 1,\n",
      "    \"future_steps\": 0,\n",
1123
1124
1125
1126
      "    \"date_start\": \"2021-04-01\",\n",
      "    \"date_end_excluded\": \"2021-04-08\",\n",
      "    \"date_start_normalization\": \"2021-04-01\",\n",
      "    \"date_end_normalization_excluded\": \"2021-04-08\",\n",
smetaj's avatar
smetaj committed
1127
      "    \"selected_plugins\": {\n",
1128
1129
      "        \"cpu__percent_idle\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/cpu\",\n",
1130
      "            \"plugin_filter\": \"type == 'percent' and type_instance == 'idle' and plugin == 'cpu'\"\n",
smetaj's avatar
smetaj committed
1131
      "        },\n",
1132
1133
      "        \"load_longterm\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/load\",\n",
1134
      "            \"plugin_filter\": \"value_instance == 'longterm' and plugin == 'load'\"\n",
smetaj's avatar
smetaj committed
1135
      "        },\n",
1136
1137
      "        \"disk_io_time\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/cloud\",\n",
1138
      "            \"plugin_filter\": \"value_instance == 'io_time' and plugin == 'disk'\"\n",
smetaj's avatar
smetaj committed
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
      "        }\n",
      "    }\n",
      "}\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "CACHE NEW DATA:\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "ASSUMPTION: no data was in cache before and if anything is there, it will be deleted.\n",
      "Deleting any old remainders...\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "CACHE CREATION\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
1158
      "1617228000\n",
smetaj's avatar
smetaj committed
1159
      "SUCCESS\n",
1160
      "Deleting the raw data saved in /user/smetaj/demo_AD_System/raw_parquet_train/demo_AD_System ...\n",
1161
      "Save the config file locally: /eos/user/s/smetaj/demo_AD_System/local_cache_train/demo_AD_System.metadata\n",
smetaj's avatar
smetaj committed
1162
1163
1164
1165
1166
1167
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "SUCCESS - CACHED DATA AVAILABLE LOCALLY\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
      "\n"
     ]
    }
   ],
   "source": [
    "# The final step of the ETL procedure is to save the HDFS downloaded \n",
    "# files into EOS, in this way the data are easier to be accessed and used.\n",
    "\n",
    "# As always we must do the same for both the datasets.\n",
    "\n",
    "sys.argv = ['', '--resource_file', json_file_train]\n",
1179
    "DM.copy_locally(standalone_mode=False)"
1180
1181
1182
1183
   ]
  },
  {
   "cell_type": "code",
1184
1185
   "execution_count": 32,
   "id": "amber-product",
1186
1187
   "metadata": {
    "ExecuteTime": {
1188
1189
1190
     "end_time": "2021-05-03T19:31:43.239907Z",
     "start_time": "2021-05-03T19:31:32.810420Z"
    }
1191
1192
1193
1194
1195
1196
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
smetaj's avatar
smetaj committed
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "PREPARING SPARK:\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "SPARK CONTEXT: <SparkContext master=yarn appName=pyspark_shell_swan>\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
1214
      "SPARK OBJECT: <pyspark.sql.session.SparkSession object at 0x7f05745ae950>\n",
smetaj's avatar
smetaj committed
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "RESOURCE DETAILS: demo_config_inference.json\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "{\n",
      "    \"hostgroups\": [\n",
1227
      "        \"cloud_compute/level2/main/gva_shared_017\"\n",
smetaj's avatar
smetaj committed
1228
1229
      "    ],\n",
      "    \"code_project_name\": \"demo_AD_System\",\n",
1230
      "    \"local_cache_folder\": \"/eos/user/s/smetaj/demo_AD_System/local_cache_inference/\",\n",
smetaj's avatar
smetaj committed
1231
1232
1233
1234
1235
1236
1237
1238
1239
      "    \"hdfs_out_folder\": \"/user/smetaj/demo_AD_System/raw_parquet_inference/\",\n",
      "    \"hdfs_cache_folder\": \"/user/smetaj/demo_AD_System/compressed_inference/\",\n",
      "    \"normalization_out_folder\": \"/user/smetaj/demo_AD_System/normalization/\",\n",
      "    \"overwrite_on_hdfs\": true,\n",
      "    \"overwrite_normalization\": true,\n",
      "    \"aggregate_every_n_minutes\": 10,\n",
      "    \"history_steps\": 48,\n",
      "    \"slide_steps\": 48,\n",
      "    \"future_steps\": 0,\n",
1240
1241
1242
1243
      "    \"date_start\": \"2021-04-08\",\n",
      "    \"date_end_excluded\": \"2021-04-14\",\n",
      "    \"date_start_normalization\": \"2021-04-01\",\n",
      "    \"date_end_normalization_excluded\": \"2021-04-08\",\n",
smetaj's avatar
smetaj committed
1244
      "    \"selected_plugins\": {\n",
1245
1246
      "        \"cpu__percent_idle\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/cpu\",\n",
1247
      "            \"plugin_filter\": \"type == 'percent' and type_instance == 'idle' and plugin == 'cpu'\"\n",
smetaj's avatar
smetaj committed
1248
      "        },\n",
1249
1250
      "        \"load_longterm\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/load\",\n",
1251
      "            \"plugin_filter\": \"value_instance == 'longterm' and plugin == 'load'\"\n",
smetaj's avatar
smetaj committed
1252
      "        },\n",
1253
1254
      "        \"disk_io_time\": {\n",
      "            \"plugin_data_path\": \"/project/monitoring/collectd/cloud\",\n",
1255
      "            \"plugin_filter\": \"value_instance == 'io_time' and plugin == 'disk'\"\n",
smetaj's avatar
smetaj committed
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
      "        }\n",
      "    }\n",
      "}\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "CACHE NEW DATA:\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "ASSUMPTION: no data was in cache before and if anything is there, it will be deleted.\n",
      "Deleting any old remainders...\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "CACHE CREATION\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
1275
      "1617832800\n",
smetaj's avatar
smetaj committed
1276
      "SUCCESS\n",
1277
      "Deleting the raw data saved in /user/smetaj/demo_AD_System/raw_parquet_inference/demo_AD_System ...\n",
1278
      "Save the config file locally: /eos/user/s/smetaj/demo_AD_System/local_cache_inference/demo_AD_System.metadata\n",
smetaj's avatar
smetaj committed
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n",
      "SUCCESS - CACHED DATA AVAILABLE LOCALLY\n",
      "\n",
      "pppppppppppppppppppppppppppppppppppppppppppppppppp\n",
      "\n"
     ]
    }
   ],
   "source": [
1290
    "sys.argv = ['', '--resource_file', json_file_inference]\n",
1291
    "DM.copy_locally(standalone_mode=False)"
smetaj's avatar
smetaj committed
1292
1293
1294
1295
   ]
  },
  {
   "cell_type": "markdown",
1296
1297
   "id": "clinical-puppy",
   "metadata": {},
smetaj's avatar
smetaj committed
1298
   "source": [
1299
    "# Visualization of downloaded time series"
smetaj's avatar
smetaj committed
1300
1301
1302
1303
   ]
  },
  {
   "cell_type": "markdown",
1304
1305
   "id": "loved-victoria",
   "metadata": {},
smetaj's avatar
smetaj committed
1306
   "source": [
1307
    "## Reading time series with pandas and host definition"
smetaj's avatar
smetaj committed
1308
1309
1310
1311
   ]
  },
  {
   "cell_type": "code",
1312
1313
   "execution_count": 33,
   "id": "positive-people",
smetaj's avatar
smetaj committed
1314
1315
   "metadata": {
    "ExecuteTime": {
1316
1317
1318
     "end_time": "2021-05-03T19:31:43.591850Z",
     "start_time": "2021-05-03T19:31:43.246446Z"
    }
smetaj's avatar
smetaj committed
1319
1320
1321
   },
   "outputs": [],
   "source": [
1322
    "# Now we have downloaded the data. \n",
1323
    "# We also want some tools to visualize the time series.\n",
1324
    "\n",
smetaj's avatar
smetaj committed
1325
    "# in local_path we have our data saved in EOS\n",
1326
1327
    "local_path_train = json_data[\"local_cache_folder\"] + 'train/' + json_data[\"code_project_name\"]\n",
    "local_path_inference = json_data[\"local_cache_folder\"] + 'inference/' + json_data[\"code_project_name\"]\n",
smetaj's avatar
smetaj committed
1328
1329
1330
1331
1332
    "\n",
    "# nr_timeseries will be equal to the number of plugins that we have downloaded\n",
    "nr_timeseries = len(json_data[\"selected_plugins\"])\n",
    "\n",
    "# finally df will be the pandas dataframe containing the data\n",
1333
1334
1335
    "# (it is eaisier to print them with pandas)\n",
    "df_train = pd.read_parquet(local_path_train)\n",
    "df_inference = pd.read_parquet(local_path_inference)"
smetaj's avatar
smetaj committed
1336
1337
1338
1339
   ]
  },
  {
   "cell_type": "code",
1340
1341
   "execution_count": 34,
   "id": "relevant-barbados",
smetaj's avatar
smetaj committed
1342
1343
   "metadata": {
    "ExecuteTime": {
1344
1345
1346
     "end_time": "2021-05-03T19:31:43.631050Z",
     "start_time": "2021-05-03T19:31:43.597536Z"
    }
smetaj's avatar
smetaj committed
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>timestamp</th>\n",
       "      <th>hostname</th>\n",
       "      <th>hostgroup</th>\n",
1373
1374
1375
1376
1377
1378
1379
       "      <th>cpu__percent_idle_h0</th>\n",
       "      <th>cpu__percent_idle_h1</th>\n",
       "      <th>cpu__percent_idle_h2</th>\n",
       "      <th>cpu__percent_idle_h3</th>\n",
       "      <th>cpu__percent_idle_h4</th>\n",
       "      <th>cpu__percent_idle_h5</th>\n",
       "      <th>cpu__percent_idle_h6</th>\n",
smetaj's avatar
smetaj committed
1380
       "      <th>...</th>\n",
1381
1382
1383
1384
1385
1386
1387
1388
1389
       "      <th>load_longterm_h39</th>\n",
       "      <th>load_longterm_h40</th>\n",
       "      <th>load_longterm_h41</th>\n",
       "      <th>load_longterm_h42</th>\n",
       "      <th>load_longterm_h43</th>\n",
       "      <th>load_longterm_h44</th>\n",
       "      <th>load_longterm_h45</th>\n",
       "      <th>load_longterm_h46</th>\n",
       "      <th>load_longterm_h47</th>\n",
smetaj's avatar
smetaj committed
1390
1391
1392
1393
1394
1395
       "      <th>ts</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
1396
1397
       "      <td>1618063200</td>\n",
       "      <td>p06492044x27120.cern.ch</td>\n",
1398
       "      <td>cloud_compute/level2/main/gva_shared_017</td>\n",
1399
1400
1401
1402
1403
1404
1405
       "      <td>-0.215828</td>\n",
       "      <td>-0.051549</td>\n",
       "      <td>-0.346833</td>\n",
       "      <td>-0.495070</td>\n",
       "      <td>0.012011</td>\n",
       "      <td>-0.048024</td>\n",
       "      <td>-0.049717</td>\n",
smetaj's avatar
smetaj committed
1406
       "      <td>...</td>\n",
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
       "      <td>0.889130</td>\n",
       "      <td>1.045257</td>\n",
       "      <td>1.454985</td>\n",
       "      <td>1.672815</td>\n",
       "      <td>1.432148</td>\n",
       "      <td>1.741700</td>\n",
       "      <td>2.517194</td>\n",
       "      <td>2.421937</td>\n",
       "      <td>1.307202</td>\n",
       "      <td>2021-04-10 16:00:00</td>\n",
smetaj's avatar
smetaj committed
1417
1418
1419
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
1420
1421
       "      <td>1618034400</td>\n",
       "      <td>p06492044s46921.cern.ch</td>\n",
1422
       "      <td>cloud_compute/level2/main/gva_shared_017</td>\n",
1423
1424
1425
1426
1427
1428
1429
       "      <td>0.345407</td>\n",
       "      <td>0.192619</td>\n",
       "      <td>0.194089</td>\n",
       "      <td>0.131184</td>\n",
       "      <td>0.289174</td>\n",
       "      <td>0.194735</td>\n",
       "      <td>0.251950</td>\n",
smetaj's avatar
smetaj committed
1430
       "      <td>...</td>\n",
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
       "      <td>-0.290733</td>\n",
       "      <td>-0.306522</td>\n",
       "      <td>-0.267896</td>\n",
       "      <td>-0.259140</td>\n",
       "      <td>-0.253557</td>\n",
       "      <td>-0.181621</td>\n",
       "      <td>-0.111413</td>\n",
       "      <td>-0.103557</td>\n",
       "      <td>-0.121400</td>\n",
       "      <td>2021-04-10 08:00:00</td>\n",
smetaj's avatar
smetaj committed
1441
1442
1443
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
1444
1445
       "      <td>1618092000</td>\n",
       "      <td>p06428644v93101.cern.ch</td>\n",
1446
       "      <td>cloud_compute/level2/main/gva_shared_017</td>\n",
1447
1448
1449
1450
1451
1452
1453
       "      <td>0.790763</td>\n",
       "      <td>0.820482</td>\n",
       "      <td>0.706678</td>\n",
       "      <td>0.763613</td>\n",
       "      <td>0.742577</td>\n",
       "      <td>0.671884</td>\n",
       "      <td>0.776097</td>\n",
smetaj's avatar
smetaj committed
1454
       "      <td>...</td>\n",
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
       "      <td>-0.621739</td>\n",
       "      <td>-0.657115</td>\n",
       "      <td>-0.627470</td>\n",
       "      <td>-0.652833</td>\n",
       "      <td>-0.633289</td>\n",
       "      <td>-0.578854</td>\n",
       "      <td>-0.541722</td>\n",
       "      <td>-0.539526</td>\n",
       "      <td>-0.663812</td>\n",
       "      <td>2021-04-11 00:00:00</td>\n",
smetaj's avatar
smetaj committed
1465
1466
1467
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
1468
       "<p>3 rows × 148 columns</p>\n",
smetaj's avatar
smetaj committed
1469
1470
1471
1472
       "</div>"
      ],
      "text/plain": [
       "    timestamp                 hostname  \\\n",
1473
1474
1475
       "0  1618063200  p06492044x27120.cern.ch   \n",
       "1  1618034400  p06492044s46921.cern.ch   \n",
       "2  1618092000  p06428644v93101.cern.ch   \n",
smetaj's avatar
smetaj committed
1476
       "\n",
1477
       "                                  hostgroup  cpu__percent_idle_h0  \\\n",
1478
1479
1480
       "0  cloud_compute/level2/main/gva_shared_017             -0.215828   \n",
       "1  cloud_compute/level2/main/gva_shared_017              0.345407   \n",
       "2  cloud_compute/level2/main/gva_shared_017              0.790763   \n",
smetaj's avatar
smetaj committed
1481
       "\n",
1482
       "   cpu__percent_idle_h1  cpu__percent_idle_h2  cpu__percent_idle_h3  \\\n",
1483
1484
1485
       "0             -0.051549             -0.346833             -0.495070   \n",
       "1              0.192619              0.194089              0.131184   \n",
       "2              0.820482              0.706678              0.763613   \n",
smetaj's avatar
smetaj committed
1486
       "\n",
1487
       "   cpu__percent_idle_h4  cpu__percent_idle_h5  cpu__percent_idle_h6  ...  \\\n",
1488
1489
1490
       "0              0.012011             -0.048024             -0.049717  ...   \n",
       "1              0.289174              0.194735              0.251950  ...   \n",
       "2              0.742577              0.671884              0.776097  ...   \n",
smetaj's avatar
smetaj committed
1491
       "\n",
1492
       "   load_longterm_h39  load_longterm_h40  load_longterm_h41  load_longterm_h42  \\\n",
1493
1494
1495
       "0           0.889130           1.045257           1.454985           1.672815   \n",
       "1          -0.290733          -0.306522          -0.267896          -0.259140   \n",
       "2          -0.621739          -0.657115          -0.627470          -0.652833   \n",
smetaj's avatar
smetaj committed
1496
       "\n",
1497
       "   load_longterm_h43  load_longterm_h44  load_longterm_h45  load_longterm_h46  \\\n",
1498
1499
1500
       "0           1.432148           1.741700           2.517194           2.421937   \n",
       "1          -0.253557          -0.181621          -0.111413          -0.103557   \n",
       "2          -0.633289          -0.578854          -0.541722          -0.539526   \n",
smetaj's avatar
smetaj committed
1501
       "\n",
1502
       "   load_longterm_h47                   ts  \n",
1503
1504
1505
       "0           1.307202  2021-04-10 16:00:00  \n",
       "1          -0.121400  2021-04-10 08:00:00  \n",
       "2          -0.663812  2021-04-11 00:00:00  \n",
smetaj's avatar
smetaj committed
1506
       "\n",
1507
       "[3 rows x 148 columns]"
smetaj's avatar
smetaj committed
1508
1509
      ]
     },
1510
     "execution_count": 34,
smetaj's avatar
smetaj committed
1511
1512
1513
1514
1515
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
1516
1517
    "# Note that each row of df will be a window of data following the granularity definition in the configuration file. \n",
    "# In this example it consists of 48 points representing 8 hours of time interval with an aggregation of 10 minutes.\n",
smetaj's avatar
smetaj committed
1518
1519
1520
    "# So for every day, for every hostname, we will have 3 rows (3 x 8hours = 24),\n",
    "# each row will be composed by timestamp, hostname, hostgroup, ts and\n",
    "# for each plugin we will have 48 columns, 1 for each 10 minutes.\n",
1521
    "df_inference.head(3)"
smetaj's avatar
smetaj committed
1522
1523
1524
1525
   ]
  },
  {
   "cell_type": "code",
1526
1527
   "execution_count": 35,
   "id": "innocent-percentage",
smetaj's avatar
smetaj committed
1528
1529
   "metadata": {
    "ExecuteTime": {
1530
1531
1532
     "end_time": "2021-05-03T19:31:43.642493Z",
     "start_time": "2021-05-03T19:31:43.637265Z"
    }
smetaj's avatar
smetaj committed
1533
1534
1535
1536
1537
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
1538
1539
1540
1541
1542
       "['p06428644w45688.cern.ch',\n",
       " 'p06428644x11005.cern.ch',\n",
       " 'p06428644v24034.cern.ch',\n",
       " 'p06428644r01940.cern.ch',\n",
       " 'p06492044e17935.cern.ch']"
smetaj's avatar
smetaj committed
1543
1544
      ]
     },
smetaj's avatar