From de85f56a53c6f09e43cfcad20eb2abe2d7f35119 Mon Sep 17 00:00:00 2001
From: Mykhailo Dalchenko <mykhailo.dalchenko@cern.ch>
Date: Fri, 29 May 2020 13:37:21 +0200
Subject: [PATCH 1/5] Bring the code in accordance with PEP8

- Line length is kept under 99 characters. This is more then PEP8 asks,
however is still reasonable. In the current shape all files can be
viewed in pairs with vim vertical split mode on my 13" laptop.
- unused loop variables changed to '_'
- Following warnings has been disabled where appropriate:
  - absence of module docstring
  - too many attributes
  - attribute defined outside of __init__
  - multiple statements in a row
- Warnings, related to TODO, FIXME and XXX are kept
- Code duplication warning is still present, but is irreducible. Since
it is not possible to disable it for just one method, leave it as is.

Linter report:

************* Module gdh.unpacker
gdh/unpacker.py:41:2: W0511: TODO read first word and determine the source type instead of asking to pass (fixme)
************* Module gdh.formats.vfat
gdh/formats/vfat.py:49:2: W0511: FIXME change to BX for uniformity? (fixme)
************* Module gdh.formats.geb
gdh/formats/geb.py:30:2: W0511: TODO implement zero suppression payload types (fixme)
gdh/formats/geb.py:1:0: R0801: Similar lines in 2 files
==gdh.formats.cdf:52
==gdh.formats.geb:62
    def compile(self):
        """
        Compile self header and trailer fields (one 64-bit word each)
        """
        self.header_compiled = bs.compile(
            ''.join(list(self.header_fields.values())),
            list(self.header_fields.keys()))
        self.trailer_compiled = bs.compile(
            ''.join(list(self.trailer_fields.values())),
            list(self.trailer_fields.keys()))

    def unpack(self, stream):
        """
        Unpack CDF block. Unpacking sequence:

        * CDF header
        * AMC13 block
        * CDF trailer

        :param IndexedNDArray stream: An array of 64-bits data words

        :return dict: Dictionary in form ``{Column name tuple: Value}``
        """ (duplicate-code)

------------------------------------------------------------------
Your code has been rated at 9.82/10
---
 gdh/formats/amc.py           | 97 +++++++++++++++++++++++-------------
 gdh/formats/amc13.py         | 69 ++++++++++++++++++-------
 gdh/formats/cdf.py           | 31 ++++++++----
 gdh/formats/geb.py           | 46 +++++++++++------
 gdh/formats/generic_block.py | 59 ++++++++++++++--------
 gdh/formats/vfat.py          | 24 ++++++---
 gdh/unpacker.py              | 37 ++++++++------
 gdh/utils.py                 | 13 ++++-
 8 files changed, 250 insertions(+), 126 deletions(-)

diff --git a/gdh/formats/amc.py b/gdh/formats/amc.py
index 42eeaba..9d5d89d 100644
--- a/gdh/formats/amc.py
+++ b/gdh/formats/amc.py
@@ -1,18 +1,26 @@
+# pylint: disable=missing-module-docstring
+# This module contains only one class which is properly documented
+
 import cbitstruct as bs
 from gdh.formats.generic_block import GenericBlock
 from gdh.formats.geb import GEB
 
 class AMC(GenericBlock):
     """
-    GEM AMC block. 
-    See detailed data format `description. <https://docs.google.com/spreadsheets/d/1iKMl68vMsSWgr8ekVdsJYTk7-Pgy8paxq98xj6GtoVo/edit?usp=sharing>`_
+    GEM AMC block.
+    See detailed data format `description. <https://docs.google.com/
+    spreadsheets/d/1iKMl68vMsSWgr8ekVdsJYTk7-Pgy8paxq98xj6GtoVo/edit?usp=sharing>`_
 
-        .. todo :: 
+        .. todo ::
             provide detailed description here
     """
+
+    # pylint: disable=too-many-instance-attributes
+
     def __init__(self):
         """
-        Default constructor. Creates child ``GEB()`` and set self block id to 'AMC'
+        Default constructor. Creates child ``GEB()``
+        and set self block id to 'AMC'
         """
         super().__init__()
         self.slot = 0
@@ -21,10 +29,11 @@ class AMC(GenericBlock):
 
     def setup(self):
         """
-        Define self header and trailer fields. 
+        Define self header and trailer fields.
 
             .. note ::
-                See data format description and method implementation for details since the format is data-driven.
+                See data format description and method implementation for
+                details since the format is data-driven.
                 Variable format options are implemented as tuple.
         """
         # first 72 bits of 192-long header
@@ -36,7 +45,7 @@ class AMC(GenericBlock):
 
         self.header_2_start_fields = {'FV'      :'u4',
                                       'RUN TYPE':'u4'}
-    
+
         # 24 bits optional run parameters
         self.run_parameters = ({'RP:PAD'          :'u24'},#Transition
                                {'RP:PAD'          :'u11', #Physics
@@ -58,7 +67,7 @@ class AMC(GenericBlock):
                                 'RP:CAL DAC'      :'u8',
                                 'RP:PULSE STRETCH':'u3',
                                 'RP:THR ARM DAC'  :'u10'})
-    
+
         # last 96 bits of header
         self.header_2_end_fields = {'OR N'    :'u16',
                                     'BOARD ID':'u16'}
@@ -69,7 +78,7 @@ class AMC(GenericBlock):
                                 'PAYLOAD VAR' :'u3',
                                 'PAYLOAD TYPE':'u4',
                                 'TTS'         :'u4'}
-    
+
         # 128 bit trailer
         self.trailer_1_fields = {'LINK TO'  :'u24',
                                  'OOS'      :'u1',
@@ -86,31 +95,44 @@ class AMC(GenericBlock):
                                  'PAD3'     :'u4',
                                  'DATA LGTH':'u20'}
 
-    def assemble_header_2(self, rp):
+    def assemble_header_2(self, run_parameter):
         """
         Assemble second header of the AMC block based on the run type
         """
         tmp = {}
         tmp.clear()
         tmp.update(self.header_2_start_fields)
-        tmp.update(rp)
+        tmp.update(run_parameter)
         tmp.update(self.header_2_end_fields)
-        #self.logger.debug("Assembled header 2 is: %s"%tmp)
+        self.logger.debug("Assembled header 2 is: %s", tmp)
         return tmp
 
     def compile(self):
         """
-        Compile self header and trailer fields (three 64-bit word header and two 64-bit word trailer)
+        Compile self header and trailer fields
+        (three 64-bit word header and two 64-bit word trailer)
         """
-        self.header_1_compiled = bs.compile(''.join(list(self.header_1_fields.values())), list(self.header_1_fields.keys()))
-        self.header_2_start_compiled = bs.compile(''.join(list(self.header_2_start_fields.values())), list(self.header_2_start_fields.keys()))
+        self.header_1_compiled = bs.compile(
+            ''.join(list(self.header_1_fields.values())),
+            list(self.header_1_fields.keys()))
+        self.header_2_start_compiled = bs.compile(
+            ''.join(list(self.header_2_start_fields.values())),
+            list(self.header_2_start_fields.keys()))
         # assemble 5 options of header 2 and compile
         header_2_array = [self.assemble_header_2(rp) for rp in self.run_parameters]
-        #self.logger.debug("Header 2 array: %s"%header_2_array)
-        self.header_2_array_compiled = tuple(bs.compile(''.join(list(header2.values())), list(header2.keys())) for header2 in header_2_array) 
-        self.header_3_compiled = bs.compile(''.join(list(self.header_3_fields.values())), list(self.header_3_fields.keys()))
-        self.trailer_1_compiled = bs.compile(''.join(list(self.trailer_1_fields.values())), list(self.trailer_1_fields.keys()))
-        self.trailer_2_compiled = bs.compile(''.join(list(self.trailer_2_fields.values())), list(self.trailer_2_fields.keys()))
+        self.logger.debug("Header 2 array: %s", header_2_array)
+        self.header_2_array_compiled = tuple(bs.compile(
+            ''.join(list(header2.values())),
+            list(header2.keys())) for header2 in header_2_array)
+        self.header_3_compiled = bs.compile(
+            ''.join(list(self.header_3_fields.values())),
+            list(self.header_3_fields.keys()))
+        self.trailer_1_compiled = bs.compile(
+            ''.join(list(self.trailer_1_fields.values())),
+            list(self.trailer_1_fields.keys()))
+        self.trailer_2_compiled = bs.compile(
+            ''.join(list(self.trailer_2_fields.values())),
+            list(self.trailer_2_fields.keys()))
 
     def unpack(self, stream):
         """
@@ -119,32 +141,37 @@ class AMC(GenericBlock):
         * AMC headers (building second header on the fly)
         * GEB blocks. Use "DAV_LIST" to determine the active optical links
         * AMC trailers
-        
+
         :param IndexedNDArray stream: An array of 64-bits data words
 
         :return dict: Dictionary in form ``{Column name tuple: Value}``
         """
-        event = {self.update_key(k, 'HEADER'): v 
-            for k, v in self.unpack_word(stream, self.header_1_compiled).items()}
+        event = {self.update_key(k, 'HEADER'): v
+                 for k, v in
+                 self.unpack_word(stream, self.header_1_compiled).items()}
         temp_dict = self.unpack_word(stream, self.header_2_start_compiled)
         stream.pos = stream.pos - 1
         run_type = temp_dict['RUN TYPE']
-        #self.logger.debug("Run type: %d" %run_type)
-        event.update({self.update_key(k, 'HEADER'): v 
-            for k, v in self.unpack_word(stream, self.header_2_array_compiled[run_type]).items()})
-        event.update({self.update_key(k, 'HEADER'): v 
-            for k, v in self.unpack_word(stream, self.header_3_compiled).items()})
+        self.logger.debug("Run type: %d", run_type)
+        event.update({self.update_key(k, 'HEADER'): v
+                      for k, v in
+                      self.unpack_word(stream, self.header_2_array_compiled[run_type]).items()})
+        event.update({self.update_key(k, 'HEADER'): v
+                      for k, v in
+                      self.unpack_word(stream, self.header_3_compiled).items()})
         # Retrieve active optical links and retreieve payloads
         dav_list = event[('DAV LIST', self.block_id, 'HEADER', self.slot, self.link, self.pos)]
         for link in range(24):
-            if ((dav_list >> link) & 0x1):
+            if (dav_list >> link) & 0x1:
                 self.geb.slot = self.slot
                 self.geb.link = link
                 event.update(self.geb.unpack(stream))
-        #self.logger.debug("all GEBs payload reading done. Stream position %d" %(stream.pos))
-        event.update({self.update_key(k, 'TRAILER'): v 
-            for k, v in self.unpack_word(stream, self.trailer_1_compiled).items()})
-        event.update({self.update_key(k, 'TRAILER'): v 
-            for k, v in self.unpack_word(stream, self.trailer_2_compiled).items()})
-        #self.logger.debug("AMC:%d payload reading done. Stream position %d" %(self.slot, stream.pos))
+        self.logger.debug("all GEBs payload reading done. Stream position %d", stream.pos)
+        event.update({self.update_key(k, 'TRAILER'): v
+                      for k, v in
+                      self.unpack_word(stream, self.trailer_1_compiled).items()})
+        event.update({self.update_key(k, 'TRAILER'): v
+                      for k, v in
+                      self.unpack_word(stream, self.trailer_2_compiled).items()})
+        self.logger.debug("AMC:%d payload reading done. Stream position %d", self.slot, stream.pos)
         return event
diff --git a/gdh/formats/amc13.py b/gdh/formats/amc13.py
index 804c637..125d1e9 100644
--- a/gdh/formats/amc13.py
+++ b/gdh/formats/amc13.py
@@ -1,3 +1,6 @@
+# pylint: disable=missing-module-docstring
+# This module contains only one class which is properly documented
+
 import cbitstruct as bs
 from gdh.formats.generic_block import GenericBlock
 from gdh.formats.amc import AMC
@@ -5,14 +8,19 @@ from gdh.formats.amc import AMC
 class AMC13(GenericBlock):
     """
     CMS AMC13 data format block.
-    See detailed data format `description. <https://docs.google.com/spreadsheets/d/1iKMl68vMsSWgr8ekVdsJYTk7-Pgy8paxq98xj6GtoVo/edit?usp=sharing>`_
+    See detailed data format `description. <https://docs.google.com/
+    spreadsheets/d/1iKMl68vMsSWgr8ekVdsJYTk7-Pgy8paxq98xj6GtoVo/edit?usp=sharing>`_
 
-        .. todo :: 
+        .. todo ::
             provide detailed description here
     """
+
+    # pylint: disable=too-many-instance-attributes
+
     def __init__(self):
         """
-        Default constructor. Creates child ``AMC()`` and set self block id to 'AMC13'
+        Default constructor.
+        Creates child ``AMC()`` and set self block id to 'AMC13'
         """
         super().__init__()
         self.amc = AMC()
@@ -20,7 +28,8 @@ class AMC13(GenericBlock):
 
     def setup(self):
         """
-        Define self header and trailer fields (two 64-bit word header and one 64-bit word trailer)
+        Define self header and trailer fields
+        (two 64-bit word header and one 64-bit word trailer)
         """
         self.header_fields = {'uFOV'    :'u4',
                               'CAL TYPE':'u4',
@@ -52,11 +61,18 @@ class AMC13(GenericBlock):
 
     def compile(self):
         """
-        Compile self header and trailer fields (two 64-bit word header and one 64-bit word trailer)
+        Compile self header and trailer fields
+        (two 64-bit word header and one 64-bit word trailer)
         """
-        self.header_compiled = bs.compile(''.join(list(self.header_fields.values())), list(self.header_fields.keys()))
-        self.amc_header_compiled = bs.compile(''.join(list(self.amc_header_fields.values())), list(self.amc_header_fields.keys()))
-        self.trailer_compiled = bs.compile(''.join(list(self.trailer_fields.values())), list(self.trailer_fields.keys()))
+        self.header_compiled = bs.compile(
+            ''.join(list(self.header_fields.values())),
+            list(self.header_fields.keys()))
+        self.amc_header_compiled = bs.compile(
+            ''.join(list(self.amc_header_fields.values())),
+            list(self.amc_header_fields.keys()))
+        self.trailer_compiled = bs.compile(
+            ''.join(list(self.trailer_fields.values())),
+            list(self.trailer_fields.keys()))
 
     def unpack(self, stream):
         """
@@ -66,24 +82,41 @@ class AMC13(GenericBlock):
         * AMC13::AMC header fields (read their number from AMC13 header)
         * AMC blocks
         * AMC13trailer
-        
+
         :param IndexedNDArray stream: An array of 64-bits data words
 
         :return dict: Dictionary in form ``{Column name tuple: Value}``
         """
-        event = {self.update_key(k, 'HEADER'): v for k, v in self.unpack_word(stream, self.header_compiled).items()}
+        event = {self.update_key(k, 'HEADER'): v
+                 for k, v in
+                 self.unpack_word(stream, self.header_compiled).items()}
         # Loop over AMC13:H:N AMC headers
-        for i in range(event[('N AMC', self.block_id, 'HEADER', self.slot, self.link, self.pos)]):
+        for _ in range(event[('N AMC',
+                              self.block_id,
+                              'HEADER',
+                              self.slot,
+                              self.link,
+                              self.pos)]):
             # NOTE AMC number ranges from 0 to (N AMC-1). Shall add "+1"?
             temp_amc_dict = self.unpack_word(stream, self.amc_header_compiled)
-            temp_amc_dict_2 = {f'{k[:4]}{temp_amc_dict["AMC:$:AMC N"]}{k[5:]}': v for k, v in temp_amc_dict.items()}
-            event.update({self.update_key(k): v for k, v in temp_amc_dict_2.items()})
+            temp_amc_dict_2 = {f'{k[:4]}{temp_amc_dict["AMC:$:AMC N"]}{k[5:]}': v
+                               for k, v in temp_amc_dict.items()}
+            event.update({self.update_key(k): v
+                          for k, v in temp_amc_dict_2.items()})
 
-        for i in range(event[('N AMC', self.block_id, 'HEADER', self.slot, self.link, self.pos)]):
+        for _ in range(event[('N AMC',
+                              self.block_id,
+                              'HEADER',
+                              self.slot,
+                              self.link,
+                              self.pos)]):
             self.amc.slot = temp_amc_dict['AMC:$:AMC N']
             event.update(self.amc.unpack(stream))
-        #self.logger.debug("all AMCs payload reading done. Stream position %d" %(stream.pos))
-        event.update({self.update_key(k,'TRAILER'): v for k, v in self.unpack_word(stream, self.trailer_compiled).items()})
-        #self.logger.debug("AMC13 payload reading done. Stream position %d" %(stream.pos))
+        self.logger.debug("all AMCs payload reading done. Stream position %d",
+                          stream.pos)
+        event.update({self.update_key(k, 'TRAILER'): v
+                      for k, v in
+                      self.unpack_word(stream, self.trailer_compiled).items()})
+        self.logger.debug("AMC13 payload reading done. Stream position %d",
+                          stream.pos)
         return event
-
diff --git a/gdh/formats/cdf.py b/gdh/formats/cdf.py
index 4155dca..dc19a3a 100644
--- a/gdh/formats/cdf.py
+++ b/gdh/formats/cdf.py
@@ -1,15 +1,18 @@
+# pylint: disable=missing-module-docstring
+# This module contains only one class which is properly documented
+
 import cbitstruct as bs
 from gdh.formats.generic_block import GenericBlock
 from gdh.formats.amc13 import AMC13
-import pandas as pd
 
 
 class CDF(GenericBlock):
     """
-    CMS Common Data Format block. 
-    See detailed data format `description. <https://docs.google.com/spreadsheets/d/1iKMl68vMsSWgr8ekVdsJYTk7-Pgy8paxq98xj6GtoVo/edit?usp=sharing>`_
+    CMS Common Data Format block.
+    See detailed data format `description. <https://docs.google.com/
+    spreadsheets/d/1iKMl68vMsSWgr8ekVdsJYTk7-Pgy8paxq98xj6GtoVo/edit?usp=sharing>`_
 
-        .. todo :: 
+        .. todo ::
             provide detailed description here
     """
     def __init__(self):
@@ -51,8 +54,12 @@ class CDF(GenericBlock):
         """
         Compile self header and trailer fields (one 64-bit word each)
         """
-        self.header_compiled = bs.compile(''.join(list(self.header_fields.values())), list(self.header_fields.keys()))
-        self.trailer_compiled = bs.compile(''.join(list(self.trailer_fields.values())), list(self.trailer_fields.keys()))
+        self.header_compiled = bs.compile(
+            ''.join(list(self.header_fields.values())),
+            list(self.header_fields.keys()))
+        self.trailer_compiled = bs.compile(
+            ''.join(list(self.trailer_fields.values())),
+            list(self.trailer_fields.keys()))
 
     def unpack(self, stream):
         """
@@ -61,13 +68,17 @@ class CDF(GenericBlock):
         * CDF header
         * AMC13 block
         * CDF trailer
-        
+
         :param IndexedNDArray stream: An array of 64-bits data words
 
         :return dict: Dictionary in form ``{Column name tuple: Value}``
         """
-        event = {self.update_key(k,'HEADER'): v for k, v in self.unpack_word(stream, self.header_compiled).items()}
+        event = {self.update_key(k, 'HEADER'): v
+                 for k, v in
+                 self.unpack_word(stream, self.header_compiled).items()}
         event.update(self.amc13.unpack(stream))
-        event.update({self.update_key(k,'TRAILER'): v for k, v in self.unpack_word(stream, self.trailer_compiled).items()})
-        #self.logger.debug("event reading done. Stream position %d" %(stream.pos))
+        event.update({self.update_key(k, 'TRAILER'): v
+                      for k, v in
+                      self.unpack_word(stream, self.trailer_compiled).items()})
+        self.logger.debug("event reading done. Stream position %d", stream.pos)
         return event
diff --git a/gdh/formats/geb.py b/gdh/formats/geb.py
index 1787f37..09d3b83 100644
--- a/gdh/formats/geb.py
+++ b/gdh/formats/geb.py
@@ -1,15 +1,22 @@
+# pylint: disable=missing-module-docstring
+# This module contains only one class which is properly documented
+
 import cbitstruct as bs
 from gdh.formats.generic_block import GenericBlock
 from gdh.formats.vfat import VFAT
 
 class GEB(GenericBlock):
     """
-    GEM GEB block. 
-    See detailed data format `description. <https://docs.google.com/spreadsheets/d/1iKMl68vMsSWgr8ekVdsJYTk7-Pgy8paxq98xj6GtoVo/edit?usp=sharing>`_
+    GEM GEB block.
+    See detailed data format `description. <https://docs.google.com/
+    spreadsheets/d/1iKMl68vMsSWgr8ekVdsJYTk7-Pgy8paxq98xj6GtoVo/edit?usp=sharing>`_
 
-        .. todo :: 
+        .. todo ::
             provide detailed description here
     """
+
+    # pylint: disable=too-many-instance-attributes
+
     def __init__(self):
         """
         Default constructor. Creates child ``VFAT()`` and set self block id to 'GEB'
@@ -22,7 +29,7 @@ class GEB(GenericBlock):
         self.block_id = 'GEB'
         self.payload_type = 0 # TODO implement zero suppression payload types
         self.vfat = VFAT()
-        
+
     def setup(self):
         """
         Define self header and trailer fields (one 64-bit word each)
@@ -57,8 +64,12 @@ class GEB(GenericBlock):
         """
         Compile self header and trailer fields (one 64-bit word each)
         """
-        self.header_compiled = bs.compile(''.join(list(self.header_fields.values())), list(self.header_fields.keys()))
-        self.trailer_compiled = bs.compile(''.join(list(self.trailer_fields.values())), list(self.trailer_fields.keys()))
+        self.header_compiled = bs.compile(
+            ''.join(list(self.header_fields.values())),
+            list(self.header_fields.keys()))
+        self.trailer_compiled = bs.compile(
+            ''.join(list(self.trailer_fields.values())),
+            list(self.trailer_fields.keys()))
 
     def unpack(self, stream):
         """
@@ -69,22 +80,27 @@ class GEB(GenericBlock):
         * GEB trailer
 
             .. note ::
-                Currently only lossless data is supported. In this case, the size of VFAT payload is always 192 bits each. Number of VFAT blocks is then determined as number of VFAT words // 3. 
-        
+                Currently only lossless data is supported.
+                In this case, the size of VFAT payload is always 192 bits each.
+                Number of VFAT blocks is then determined as number of VFAT words // 3.
+
         :param IndexedNDArray stream: An array of 64-bits data words
 
         :return dict: Dictionary in form ``{Column name tuple: Value}``
         """
         tmp = self.unpack_word(stream, self.header_compiled)
-        event = {self.update_key(k, 'HEADER'): v 
-            for k, v in tmp.items()}
-        for vfat_pos in range(tmp['N VFAT WORDS']//3):
+        event = {self.update_key(k, 'HEADER'): v
+                 for k, v in tmp.items()}
+        for _ in range(tmp['N VFAT WORDS']//3):
             self.vfat.slot = self.slot
             self.vfat.link = self.link
             self.vfat.payload_type = self.payload_type
             event.update(self.vfat.unpack(stream))
-        #self.logger.debug("all VFATs payload reading done. Stream position %d" %(stream.pos))
-        event.update({self.update_key(k, 'TRAILER'): v 
-            for k, v in self.unpack_word(stream, self.trailer_compiled).items()})
-        #self.logger.debug("GEB:%d:%d payload reading done. Stream position %d" %(self.slot, self.link, stream.pos))
+        self.logger.debug("all VFATs payload reading done. Stream position %d",
+                          stream.pos)
+        event.update({self.update_key(k, 'TRAILER'): v
+                      for k, v in
+                      self.unpack_word(stream, self.trailer_compiled).items()})
+        self.logger.debug("GEB:%d:%d payload reading done. Stream position %d",
+                          self.slot, self.link, stream.pos)
         return event
diff --git a/gdh/formats/generic_block.py b/gdh/formats/generic_block.py
index b7d5938..dc222b8 100644
--- a/gdh/formats/generic_block.py
+++ b/gdh/formats/generic_block.py
@@ -1,11 +1,14 @@
-import cbitstruct as bs
-import logging 
+# pylint: disable=missing-module-docstring
+# This module contains only one class which is properly documented
+
 from abc import ABC
+import logging
 
 class GenericBlock(ABC):
     """
     Abstract class defining genericl binary stream block
-    GEM detailed data format `description. <https://docs.google.com/spreadsheets/d/1iKMl68vMsSWgr8ekVdsJYTk7-Pgy8paxq98xj6GtoVo/edit?usp=sharing>`_
+    See detailed data format `description. <https://docs.google.com/
+    spreadsheets/d/1iKMl68vMsSWgr8ekVdsJYTk7-Pgy8paxq98xj6GtoVo/edit?usp=sharing>`_
 
        .. note ::
           This is a base class and it must be extended
@@ -23,14 +26,15 @@ class GenericBlock(ABC):
         self.setup()
         self.compile()
 
-    def update_key(self, key, sub_block = 'PAYLOAD'):
+    def update_key(self, key, sub_block='PAYLOAD'):
         """
         Convert filed name to a tuple with hardware tagging information
 
         :param str key: Field name
         :param str sub_block: Sub block id
 
-        :return tuple: ``('FIELD NAME', 'BLOCK ID', 'SUB BLOCK', self.slot, self.link, self.pos)``
+        :return tuple: ``('FIELD NAME', 'BLOCK ID', 'SUB BLOCK',
+            self.slot, self.link, self.pos)``
         """
         return (key, self.block_id, sub_block, self.slot, self.link, self.pos)
 
@@ -39,48 +43,59 @@ class GenericBlock(ABC):
         Read and unpack a word from stream
 
         :param IndexedNDArray stream: An array of 64-bits data words
-        :param dict compiled_dict: Compiled ``bitstruct`` dictionary with word fields in form ``{'Field Name':'Field Type:length'}``
+        :param dict compiled_dict: Compiled ``bitstruct`` dictionary
+            with word fields in form ``{'Field Name':'Field Type:length'}``
         :param int n_words: Number of 64-bit words in the array
 
         :return dict: Dictionary in form ``{'Field Name': Value}``
         """
-        #self.logger.debug("unpack_word called. Stream position %d, word: %s" %(stream.pos, stream[stream.pos]))
-        unpacked_word = compiled_dict.unpack(stream[stream.pos:stream.pos + n_words].tobytes())
+        self.logger.debug("unpack_word called. Stream position %d, word: %s",
+                          stream.pos, stream[stream.pos])
+        unpacked_word = compiled_dict.unpack(
+            stream[stream.pos:stream.pos + n_words].tobytes())
         stream.pos += n_words
-        #self.logger.debug("word reading done. Stream position %d" %(stream.pos))
-        #self.logger.debug('unpacked word %s', unpacked_word)
+        self.logger.debug("word reading done. Stream position %d", stream.pos)
+        self.logger.debug('unpacked word %s', unpacked_word)
         return unpacked_word
 
     def compile(self):
         """
-        Compile bitstruct dictionary. Virtual method, implemented in descendant classes
+        Compile bitstruct dictionary.
+        Virtual method, implemented in descendant classes
         """
-        pass
 
     def setup(self):
         """
-        Define block data fields. Virtual method, implemented in descendant classes
+        Define block data fields.
+        Virtual method, implemented in descendant classes
         """
-        pass
 
     def unpack(self, stream):
         """
-        Unpack block. Virtual method, implemented in descendant classes. 
-        
+        Unpack block.
+        Virtual method, implemented in descendant classes.
+
         :param IndexedNDArray stream: An array of 64-bits data words
 
         :return dict: Dictionary in form ``{Column name tuple: Value}``
         """
-        pass
 
     def pack(self):
-        pass
+        """
+        Placeholder. TBU
+        """
 
-    def print():
-        pass
+    def print(self):
+        """
+        Placeholder. TBU
+        """
 
     def set_logger_console_handler(self):
-        pass
+        """
+        Placeholder. TBU
+        """
 
     def set_logger_file_handler(self):
-        pass
+        """
+        Placeholder. TBU
+        """
diff --git a/gdh/formats/vfat.py b/gdh/formats/vfat.py
index 3e961bc..3a5cb9e 100644
--- a/gdh/formats/vfat.py
+++ b/gdh/formats/vfat.py
@@ -1,12 +1,16 @@
+# pylint: disable=missing-module-docstring
+# This module contains only one class which is properly documented
+
 import cbitstruct as bs
 from gdh.formats.generic_block import GenericBlock
 
 class VFAT(GenericBlock):
     """
-    GEM AMC block. 
-    See detailed data format `description. <https://docs.google.com/spreadsheets/d/1iKMl68vMsSWgr8ekVdsJYTk7-Pgy8paxq98xj6GtoVo/edit?usp=sharing>`_
+    GEM AMC block.
+    See detailed data format `description. <https://docs.google.com/
+    spreadsheets/d/1iKMl68vMsSWgr8ekVdsJYTk7-Pgy8paxq98xj6GtoVo/edit?usp=sharing>`_
 
-        .. todo :: 
+        .. todo ::
             provide detailed description here
     """
     def __init__(self):
@@ -19,20 +23,22 @@ class VFAT(GenericBlock):
 
     def setup(self):
         """
-        Define self header and trailer fields. 
+        Define self header and trailer fields.
 
             .. note ::
                 Lossless VFAT data format:
                 192 bits = 3 64 bit words
 
             .. warning ::
-                ``cbitstruct`` package has a limitation on max field size of 64 bits.
-                Thus, VFAT channel data is presented as a collection of two 64-bit fields:
+                ``cbitstruct`` package has a limitation
+                on max field size of 64 bits.
+                Thus, VFAT channel data is presented as
+                a collection of two 64-bit fields:
 
                 * 'CHANNEL DATA M' - Most significant 64 bits
                 * 'CHANNEL DATA L' - List significant 64 bits
 
-            .. todo :: 
+            .. todo ::
                 Implement zero suppression and calibration data formats
         """
         self.vfat_block = {'POS'           :'u8',
@@ -49,7 +55,9 @@ class VFAT(GenericBlock):
         """
         Compile complete VFAT block
         """
-        self.vfat_block_compiled = bs.compile(''.join(list(self.vfat_block.values())), list(self.vfat_block.keys()))
+        self.vfat_block_compiled = bs.compile(
+            ''.join(list(self.vfat_block.values())),
+            list(self.vfat_block.keys()))
 
     def unpack(self, stream):
         """
diff --git a/gdh/unpacker.py b/gdh/unpacker.py
index 4841fa4..37e4208 100644
--- a/gdh/unpacker.py
+++ b/gdh/unpacker.py
@@ -1,6 +1,9 @@
+"""
+Executable script for unpacking GEM binary data
+"""
+
 import argparse
 import logging
-import os
 import pandas as pd
 import numpy as np
 from gdh.utils import IndexedNDArray
@@ -11,14 +14,15 @@ np.set_printoptions(formatter={'int':hex})
 
 def run(filename, source, nrows=None, verbosity=1):
     """
-    Unpack gem binary file 
+    Unpack gem binary file
 
     :param str filename: Input data binary file
     :param str source: Type of binary stream source
     :param int nrows: Number of events to unpack
     :param int verbosity: Logging level
 
-    :return pandas.DataFrame: A table of events with column names as tuples. Each row corresponds to one event
+    :return pandas.DataFrame: A table of events with column names as tuples.
+        Each row corresponds to one event
     """
 
     if verbosity == 0:
@@ -31,28 +35,28 @@ def run(filename, source, nrows=None, verbosity=1):
         # set level to DEBUG
         logging.basicConfig(level=logging.DEBUG)
 
-    logger.info('Processing file: %s. \n Data source: %s'%(filename, source))
-    cbs = IndexedNDArray(np.fromfile(filename, dtype='>Q').byteswap(inplace=True), pos = 0)
-    #logger.debug('Input data size in 64-bit words: %d'%(cbs.size))
+    logger.info('Processing file: %s. \n Data source: %s', filename, source)
+    cbs = IndexedNDArray(np.fromfile(filename, dtype='>Q').byteswap(inplace=True), pos=0)
+    logger.debug('Input data size in 64-bit words: %d', cbs.size)
     # TODO read first word and determine the source type instead of asking to pass
-    if source == 'minidaq' or source == 'ferol':
-        logger.warn('%s format is not supported at the moment'%(source))
-        return
+    if source in ('minidaq', 'ferol'):
+        logger.warning('%s format is not supported at the moment', source)
+        return None
 
-    # Create the CDF instance (which creates every format block downstream). 
-    # This has to be done outside of the loop as the formats are precompiled 
-    # during creation of the objects and there's no need to create them on 
+    # Create the CDF instance (which creates every format block downstream).
+    # This has to be done outside of the loop as the formats are precompiled
+    # during creation of the objects and there's no need to create them on
     # per-event basis
     cdf = CDF()
     # Get one event from the stream
     def get():
         while cbs.pos < cbs.size:
             yield cdf.unpack(cbs)
-            #logger.debug('Data size in 64-bit words: %d. Position: %d '%(cbs.size, cbs.pos))
+            logger.debug('Data size in 64-bit words: %d. Position: %d', cbs.size, cbs.pos)
 
     events = pd.DataFrame.from_records(get(), nrows=nrows)
     logger.info(events.head())
-    logger.info('Finished unpacking. Processed %d events.'%(len(events.index)))
+    logger.info('Finished unpacking. Processed %d events.', len(events.index))
     return events
 
 if __name__ == "__main__":
@@ -63,9 +67,10 @@ if __name__ == "__main__":
                         help='Source type. Only "sdram" is currently supported')
     parser.add_argument('-n', '--nevents', dest='nevents', type=int, default=None,
                         help='Maximum number of events to process')
-    parser.add_argument('-v', '--verbosity', dest='verbosity', type=int, choices=[0, 1, 2], default=1,
+    parser.add_argument('-v', '--verbosity', dest='verbosity', type=int,
+                        choices=[0, 1, 2], default=1,
                         help='Logger verbosity levels')
-    
+
     args = parser.parse_args()
 
     run(args.ifilename, args.isource, args.nevents, args.verbosity)
diff --git a/gdh/utils.py b/gdh/utils.py
index e364f88..c1c446b 100644
--- a/gdh/utils.py
+++ b/gdh/utils.py
@@ -1,11 +1,20 @@
+# pylint: disable=missing-module-docstring
+# This module contains only one class which is properly documented
+
 import numpy as np
 
 class IndexedNDArray(np.ndarray):
     """
     Helper class providing ndarray with extra ``pos`` attribute.
-    This attribute will be used to indicate current position when moving through array values.
-    See `numpy example. <https://numpy.org/doc/stable/user/basics.subclassing.html>`_
+    This attribute will be used to indicate current position
+    when moving through array values.
+    See `numpy example. <https://numpy.org/
+    doc/stable/user/basics.subclassing.html>`_
     """
+
+    # pylint: disable=multiple-statements
+    # pylint: disable=attribute-defined-outside-init
+
     def __new__(cls, input_array, pos=0):
         obj = np.asarray(input_array).view(cls)
         obj.pos = pos
-- 
GitLab


From 47d947489ffe79cb82c0dd74880b19635bfcf762 Mon Sep 17 00:00:00 2001
From: Mykhailo Dalchenko <mykhailo.dalchenko@cern.ch>
Date: Fri, 29 May 2020 13:56:06 +0200
Subject: [PATCH 2/5] Add a note about linter usage

---
 DEVELOPERS.md | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/DEVELOPERS.md b/DEVELOPERS.md
index d935114..097f174 100644
--- a/DEVELOPERS.md
+++ b/DEVELOPERS.md
@@ -34,6 +34,12 @@ deactivate # return from virtual environment to the regular one
 
 ## <a name="tests"> Running Tests
 
+### Checking whether your code is up to project standard
+Use [*pylint*][pylint] to check if your code quality is good. Standard linter configuration is used.
+Compare the linter ratings before and after your modifications and do not commit the code which reduces the code quality.
+
+### Testing the code with Jupyter notebooks
+
 Tests alongside with some supporting documentation are provided in `jupyter` [notebook][./doc/notebooks/test_unpacker.ipynb].
 `Jupyter` will be installed as a dependency with the `gdh` package. You can add your own test notebooks when required.
 In order to run the notebook locally simply run 
@@ -88,3 +94,4 @@ This section will be completed later with precise documentation style guide, for
 [doxygen]:http://www.doxygen.nl/manual/starting.html
 [sphinx]:https://www.sphinx-doc.org/en/master/usage/quickstart.html
 [pandoc]:https://pandoc.org/installing.html
+[pylint]:https://www.pylint.org/
-- 
GitLab


From d6eb647340408a68c73fd29f58f5944d86862224 Mon Sep 17 00:00:00 2001
From: Mykhailo Dalchenko <mykhailo.dalchenko@cern.ch>
Date: Fri, 29 May 2020 14:42:24 +0200
Subject: [PATCH 3/5] Setup basic CI

- Add CI configuration
- Setup custom image
- Update gitignore to remove test and build aritfacts from tracked
content
---
 .gitlab-ci.yml | 6 ++++--
 DEVELOPERS.md  | 2 +-
 2 files changed, 5 insertions(+), 3 deletions(-)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 89f4d03..eda282a 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -14,6 +14,8 @@ stages:
     - lint
     - test
     - build
+    - build
+    - test
     - deploy
 
 variables:
@@ -51,6 +53,7 @@ after_script:
 .deploy: &deploy_template
     type: deploy
     dependencies:
+        - run_build
         - run_test
         - run_doc
 
@@ -73,8 +76,7 @@ run_pylint:
     artifacts:
         paths:
             - ./pylint/
-# 
-# Job: install packages and run tests
+
 #
 run_test:
     type: test
diff --git a/DEVELOPERS.md b/DEVELOPERS.md
index 097f174..bf0e286 100644
--- a/DEVELOPERS.md
+++ b/DEVELOPERS.md
@@ -81,7 +81,7 @@ Additional dependencies to install:
 The documentation can be generated using the following command:
 
 ```bash
-sphinx-build -b html doc  build/doc
+sphinx-build -b html doc  doc/_build
 ```
 
 Generated files are placed in `build/doc`.
-- 
GitLab


From e9b0f91cc0b92801c785ee85b16ffce9c46b2ef1 Mon Sep 17 00:00:00 2001
From: Mykhailo Dalchenko <mykhailo.dalchenko@cern.ch>
Date: Sat, 30 May 2020 10:45:29 +0200
Subject: [PATCH 4/5] CI cleanup

---
 .gitlab-ci.yml | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index eda282a..01b3f76 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -14,8 +14,6 @@ stages:
     - lint
     - test
     - build
-    - build
-    - test
     - deploy
 
 variables:
@@ -53,9 +51,9 @@ after_script:
 .deploy: &deploy_template
     type: deploy
     dependencies:
-        - run_build
         - run_test
         - run_doc
+        - run_build
 
 #
 # Job: run PyLint
@@ -76,7 +74,8 @@ run_pylint:
     artifacts:
         paths:
             - ./pylint/
-
+#
+# Job: install packages and run tests
 #
 run_test:
     type: test
-- 
GitLab


From ef2ae78b104f6514011884f8dfe0e79bbef117aa Mon Sep 17 00:00:00 2001
From: Mykhailo Dalchenko <mykhailo.dalchenko@cern.ch>
Date: Sun, 31 May 2020 17:59:25 +0000
Subject: [PATCH 5/5] Apply suggestion to DEVELOPERS.md

---
 DEVELOPERS.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/DEVELOPERS.md b/DEVELOPERS.md
index bf0e286..86e9146 100644
--- a/DEVELOPERS.md
+++ b/DEVELOPERS.md
@@ -36,7 +36,7 @@ deactivate # return from virtual environment to the regular one
 
 ### Checking whether your code is up to project standard
 Use [*pylint*][pylint] to check if your code quality is good. Standard linter configuration is used.
-Compare the linter ratings before and after your modifications and do not commit the code which reduces the code quality.
+Compare the linter ratings before and after your modifications and do not commit code that reduces the code quality.
 
 ### Testing the code with Jupyter notebooks
 
-- 
GitLab