HWC_RB_QHDA.ipynb 13.9 KB
Newer Older
1
2
3
4
5
6
{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
7
    "<h1><center>Analysis of a QH discharge in an RB Circuit</center></h1>"
8
9
10
11
12
13
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
14
    "# 0. Initialise Working Environment"
15
16
17
18
   ]
  },
  {
   "cell_type": "code",
thbuffet's avatar
thbuffet committed
19
   "execution_count": null,
20
   "metadata": {},
thbuffet's avatar
thbuffet committed
21
   "outputs": [],
22
   "source": [
23
    "# External libraries\n",
24
    "print('Loading (1/13)'); import sys; import pandas as pd; import warnings\n",
25
26
27
28
29
30
31
32
    "print('Loading (2/12)'); from multiprocessing import Pool\n",
    "print('Loading (3/12)'); from IPython.display import display, HTML, Javascript, clear_output\n",
    "\n",
    "# Internal libraries\n",
    "print('Loading (4/12)'); import lhcsmapi\n",
    "print('Loading (5/12)'); from lhcsmapi.Time import Time\n",
    "print('Loading (6/12)'); from lhcsmapi.Timer import Timer\n",
    "print('Loading (7/12)'); from lhcsmapi.analysis.RbCircuitQuery import RbCircuitQuery\n",
33
34
    "print('Loading (8/12)'); from lhcsmapi.analysis.qh.QuenchHeaterAnalysis import analyze_single_qh, plot_qh_discharge_hwc, calculate_rb_qh_feature_row\n",
    "print('Loading (9/12)'); from lhcsmapi.analysis.expert_input import check_show_next, get_expert_decision\n",
35
36
37
    "print('Loading (10/12)'); from lhcsmapi.analysis.report_template import apply_report_template\n",
    "print('Loading (11/12)'); from lhcsmapi.gui.DateTimeBaseModule import DateTimeBaseModule\n",
    "print('Loading (12/12)'); from lhcsmapi.gui.qh.QhPmSearchModuleMediator import QhPmSearchModuleMediator\n",
38
    "print('Loading (13/13)'); from lhcsmapi.gui.hwc.HwcSearchModuleMediator import HwcSearchModuleMediator\n",
39
40
41
    "\n",
    "clear_output()\n",
    "lhcsmapi.get_lhcsmapi_version()\n",
42
43
    "lhcsmapi.get_lhcsmhwc_version('../__init__.py')\n",
    "print('Analysis performed by %s' % HwcSearchModuleMediator.get_user())"
44
45
46
47
48
49
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
50
    "# 1. Find QH Post Mortem Entries"
51
52
53
54
   ]
  },
  {
   "cell_type": "code",
thbuffet's avatar
thbuffet committed
55
   "execution_count": null,
56
   "metadata": {
57
    "scrolled": true
58
   },
thbuffet's avatar
thbuffet committed
59
   "outputs": [],
60
   "source": [
61
    "circuit_type = 'RB'\n",
62
    "qh_pm_search = QhPmSearchModuleMediator(DateTimeBaseModule(start_date_time='2021-02-07 00:00:00+01:00', end_date_time=Time.to_string(Time.now())), circuit_type=circuit_type)"
63
64
65
66
67
68
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
69
    "# 2. Query All Signals Prior to Analysis"
70
71
72
73
   ]
  },
  {
   "cell_type": "code",
thbuffet's avatar
thbuffet committed
74
   "execution_count": null,
75
   "metadata": {
76
77
78
79
    "scrolled": false,
    "tags": [
     "skip_output"
    ]
80
   },
thbuffet's avatar
thbuffet committed
81
   "outputs": [],
82
   "source": [
83
84
    "from lhcsmapi.pyedsl.QueryBuilder import QueryBuilder\n",
    "from lhcsmapi.reference.Reference import Reference\n",
85
    "\n",
86
    "def query_qh_parallel(input_param):\n",
87
88
    "    source, timestamp, is_ref = input_param\n",
    "    if is_ref:\n",
thbuffet's avatar
thbuffet committed
89
    "        timestamp = Reference.get_quench_heater_reference_discharge('RB', source, timestamp)\n",
90
    "\n",
91
92
93
    "    u_hds_dfs = QueryBuilder().with_pm() \\\n",
    "        .with_timestamp(timestamp) \\\n",
    "        .with_circuit_type('RB') \\\n",
94
95
96
97
    "        .with_metadata(circuit_name=qh_pm_search.get_circuit_name(), system='QH', signal=['U_HDS', 'I_HDS'], source=source,\n",
    "                       wildcard={'CELL': source}) \\\n",
    "        .signal_query() \\\n",
    "        .synchronize_time(timestamp) \\\n",
98
99
100
    "        .convert_index_to_sec() \\\n",
    "        .drop_first_npoints(5) \\\n",
    "        .drop_last_npoints(5).dfs\n",
101
    "\n",
102
103
    "    print('Done: %s: %d' % (source, timestamp))\n",
    "\n",
104
    "    return source, timestamp, u_hds_dfs\n",
105
106
    "\n",
    "with Timer():\n",
107
    "    circuit_name = qh_pm_search.get_circuit_name()\n",
108
109
    "    input_params = list(zip(qh_pm_search.source_timestamp_df['source'].values, qh_pm_search.source_timestamp_df['timestamp'].values, [False]*len(qh_pm_search.source_timestamp_df)))\n",
    "    print('Querying requested QH events...')\n",
110
    "    with Pool(processes=8) as pool:\n",
111
    "        qh_source_timestamp_dfs = pool.map(query_qh_parallel, input_params)\n",
112
113
114
115
    "        \n",
    "    input_params = list(zip(qh_pm_search.source_timestamp_df['source'].values, qh_pm_search.source_timestamp_df['timestamp'].values, [True]*len(qh_pm_search.source_timestamp_df)))   \n",
    "    print('Querying reference QH events...')\n",
    "    with Pool(processes=8) as pool:\n",
116
    "        qh_source_timestamp_ref_dfs = pool.map(query_qh_parallel, input_params)\n",
117
    "        \n",
118
119
    "    qh_source_timestamp_dfs_dct = {(source, timestamp): dfs for (source, timestamp, dfs) in qh_source_timestamp_dfs}\n",
    "    qh_source_timestamp_ref_dfs_dct = {(source, timestamp_ref): dfs for (source, timestamp_ref, dfs) in qh_source_timestamp_ref_dfs}"
120
121
122
123
124
125
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
    "# 3. Quench Protection System\n",
    "## 3.1. Analysis of Quench Heater Discharges\n",
    "\n",
    "*CRITERIA*:\n",
    "- all characteristic times of an exponential decay calculated with the 'charge' approach for voltage and current are +/- 3 ms from the reference ones\n",
    "- all initial resistances are +/- 0.5 Ohm from the reference ones\n",
    "- all initial voltages are between 780 and 980 V\n",
    "- all final voltages are between 15 and 70 V\n",
    "\n",
    "*PLOT*:  \n",
    "\n",
    "t = 0 s corresponds to the start of the pseudo-exponential decay.  \n",
    "Line for actual signal is continuous and dashed for the reference.\n",
    "\n",
    "Left plot (Voltage view)\n",
    "- the querried and filtered quench heater voltage on the left axis, U_HDS\n",
    "\n",
    "Middle plot (Current view)\n",
    "- the querried and filtered quench heater current on the left axis, I_HDS\n",
    "\n",
    "Bottom plot (Resistance view)\n",
    "- the calculated quench heater resistance on the left axis, R_HDS\n",
    "\n"
149
150
   ]
  },
151
152
153
154
155
156
157
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3.2. Display QH Discharge Table"
   ]
  },
158
159
  {
   "cell_type": "code",
thbuffet's avatar
thbuffet committed
160
   "execution_count": null,
161
162
163
   "metadata": {
    "scrolled": false
   },
thbuffet's avatar
thbuffet committed
164
   "outputs": [],
165
   "source": [
166
    "index_max = len(qh_pm_search.source_timestamp_df) - 1\n",
167
    "\n",
168
    "feature_row_dfs = []\n",
169
170
171
172
    "qh_pm_search.source_timestamp_df['acceptance'] = float('nan')\n",
    "qh_pm_search.source_timestamp_df['datetime'] = float('nan')\n",
    "qh_pm_search.source_timestamp_df['link_to_analysis'] = float('nan')\n",
    "for index, row in qh_pm_search.source_timestamp_df.iterrows():\n",
173
174
175
176
    "    source = row['source']\n",
    "    timestamp = row['timestamp']\n",
    "    \n",
    "    print('{}/{}: Analysing quench heater signals of {} on {}, {}'.format(index, index_max, source, Time.to_string_short(timestamp), timestamp))\n",
177
    "    display(HTML('<a id=' + source + str(timestamp) + '>' + source +  '</a>, <a href=#Table>Back to Table</a>'))\n",
178
179
    "    \n",
    "    # Get matching voltage and current\n",
180
181
    "    u_hds_dfs = list(filter(lambda col: 'U_HDS' in col.columns.values[0], qh_source_timestamp_dfs_dct[(source, timestamp)]))\n",
    "    i_hds_dfs = list(filter(lambda col: 'I_HDS' in col.columns.values[0], qh_source_timestamp_dfs_dct[(source, timestamp)]))\n",
182
183
    "    \n",
    "    # Get matching reference voltage and current\n",
thbuffet's avatar
thbuffet committed
184
    "    timestamp_ref = Reference.get_quench_heater_reference_discharge('RB', source, timestamp)\n",
185
186
    "    u_hds_ref_dfs = list(filter(lambda col: 'U_HDS' in col.columns.values[0], qh_source_timestamp_ref_dfs_dct[(source, timestamp_ref)]))\n",
    "    i_hds_ref_dfs = list(filter(lambda col: 'I_HDS' in col.columns.values[0], qh_source_timestamp_ref_dfs_dct[(source, timestamp_ref)]))\n",
187
    "    \n",
188
    "    try:\n",
Michal Maciejewski's avatar
Michal Maciejewski committed
189
190
191
    "        nominal_voltage = qh_pm_search.get_discharge_level()\n",
    "        mean_start_value = 15 if nominal_voltage < 450 else 50\n",
    "        is_qh_correct = analyze_single_qh(timestamp, source, u_hds_dfs, i_hds_dfs, u_hds_ref_dfs, i_hds_ref_dfs, plot_qh_discharge=plot_qh_discharge_hwc, mean_start_value=mean_start_value, current_offset=0.085, nominal_voltage=nominal_voltage)\n",
192
    "\n",
193
194
195
196
    "        acceptance = 'Pass' if is_qh_correct else 'Fail'\n",
    "    except Exception:\n",
    "        acceptance = 'Fail'\n",
    "        warnings.warn('Analysis failed for source %s, timestamp %d, %s' % (source, timestamp, Time.to_string_short(timestamp)))\n",
197
    "    \n",
Michal Maciejewski's avatar
Michal Maciejewski committed
198
    "    feature_row_df = calculate_rb_qh_feature_row(u_hds_dfs, i_hds_dfs, timestamp, current_offset=0.025, mean_start_value=mean_start_value)\n",
199
200
201
202
    "    feature_row_df['source'] = source\n",
    "    feature_row_df['timestamp'] = timestamp\n",
    "    feature_row_dfs.append(feature_row_df)\n",
    "    \n",
203
204
    "    print('QH assessment is:', acceptance)\n",
    "    qh_pm_search.source_timestamp_df.loc[index, 'acceptance'] = acceptance\n",
205
206
    "    qh_pm_search.source_timestamp_df.loc[index, 'datetime'] = Time.to_string_short(timestamp)\n",
    "    qh_pm_search.source_timestamp_df.loc[index, 'link_to_analysis'] = '<a href=\"#'+ source + str(timestamp) + '\">' + source + '</a>'\n",
207
208
209
    "    \n",
    "    # Show next\n",
    "    check_show_next(index, index_max, is_automatic=qh_pm_search.is_automatic_mode())"
210
211
   ]
  },
212
213
  {
   "cell_type": "code",
214
   "execution_count": null,
215
   "metadata": {},
216
   "outputs": [],
217
   "source": [
218
219
    "feature_rows_df = pd.concat(feature_row_dfs)\n",
    "source_timestamp_feature_df = qh_pm_search.source_timestamp_df.merge(feature_rows_df, on=['source', 'timestamp'], how='left')"
220
221
222
223
   ]
  },
  {
   "cell_type": "code",
224
   "execution_count": null,
225
226
227
   "metadata": {
    "scrolled": false
   },
228
   "outputs": [],
229
   "source": [
230
    "display(HTML('<a id=Table>Table</a>'))\n",
231
    "source_timestamp_feature_df.style.apply(lambda x: [\"background: red\" if v == 'Fail' else \"\" for v in x], axis = 1)"
232
233
234
235
236
237
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
238
239
240
241
242
    "# 4. Signature Decision"
   ]
  },
  {
   "cell_type": "code",
243
   "execution_count": null,
244
   "metadata": {},
245
   "outputs": [],
246
247
248
249
250
251
252
253
254
   "source": [
    "signature = get_expert_decision('Expert Signature Decision: ', ['PASSED', 'FAILED'])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 5. Final Report"
255
   ]
256
257
258
  },
  {
   "cell_type": "code",
259
   "execution_count": null,
260
   "metadata": {},
261
   "outputs": [],
262
   "source": [
263
    "if not qh_pm_search.source_timestamp_df.empty:\n",
264
    "    analysis_start_time = Time.get_analysis_start_time()\n",
265
266
    "    date_time_qh = Time.to_datetime(qh_pm_search.source_timestamp_df.loc[0, 'timestamp']).strftime(\"%Y-%m-%d-%Hh%M\")\n",
    "\n",
267
268
    "    file_name = \"{}_QHDA-{}-{}_{}\".format(circuit_name, date_time_qh, analysis_start_time, signature)\n",
    "    file_name_html = \"{}.html\".format(file_name)\n",
269
270
271
    "\n",
    "    apply_report_template()\n",
    "    !mkdir -p /eos/project/m/mp3/RB/$circuit_name/QHDA\n",
272
273
274
275
276
277
278
    "    \n",
    "    csv_full_path = '/eos/project/m/mp3/RB/{}/QHDA/{}.csv'.format(circuit_name, file_name)\n",
    "    cell_datetime_timestamp = source_timestamp_feature_df.rename(columns={'source': 'Cell'})['Cell']\n",
    "    cell_datetime_timestamp['Timestamp String'] = source_timestamp_feature_df['timestamp'].apply(lambda col: Time.to_string_short(col, n_dec_digits=9))\n",
    "    cell_datetime_timestamp['Timestamp'] = source_timestamp_feature_df['timestamp']\n",
    "    cell_datetime_timestamp.to_csv(csv_full_path, index=False)\n",
    "    print('Analysis results table saved to (Windows): ' + '\\\\\\\\cernbox-smb' + csv_full_path.replace('/', '\\\\'))\n",
279
280
281
282
283
284
    "\n",
    "    full_path = '/eos/project/m/mp3/RB/{}/QHDA/{}'.format(circuit_name, file_name_html)\n",
    "    print('Compact notebook report saved to (Windows): ' + '\\\\\\\\cernbox-smb' + full_path.replace('/', '\\\\'))\n",
    "    display(Javascript('IPython.notebook.save_notebook();'))\n",
    "    Time.sleep(5)\n",
    "    !{sys.executable} -m jupyter nbconvert --to html $'HWC_RB_QHDA.ipynb' --output /eos/project/m/mp3/RB/$circuit_name/QHDA/$file_name_html --TemplateExporter.exclude_input=True --TagRemovePreprocessor.remove_all_outputs_tags='[\"skip_output\"]' --TagRemovePreprocessor.remove_cell_tags='[\"skip_cell\"]'"
285
   ]
286
  },
287
288
289
290
291
292
293
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 6. Save Timestamps in Reference Format (if update needed)"
   ]
  },
294
295
296
297
298
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
299
300
301
   "source": [
    "file_name = \"{}_QHDA-{}-{}_Reference\".format(circuit_name, date_time_qh, analysis_start_time)\n",
    "csv_full_path = '/eos/project/m/mp3/RB/{}/QHDA/{}.csv'.format(circuit_name, file_name)\n",
302
    "cell_datetime_timestamp = pd.DataFrame(qh_pm_search.source_timestamp_df.rename(columns={'source': 'Cell'})['Cell'])\n",
303
304
305
306
307
    "cell_datetime_timestamp['Timestamp String'] = source_timestamp_feature_df['timestamp'].apply(lambda col: Time.to_string_short(col, n_dec_digits=9))\n",
    "cell_datetime_timestamp['Timestamp'] = source_timestamp_feature_df['timestamp']\n",
    "cell_datetime_timestamp.to_csv(csv_full_path, index=False)\n",
    "print('Analysis results table saved to (Windows): ' + '\\\\\\\\cernbox-smb' + csv_full_path.replace('/', '\\\\'))"
   ]
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  },
328
329
330
331
  "sparkconnect": {
   "bundled_options": [],
   "list_of_options": []
  },
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": false,
   "sideBar": false,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}