paycheck: fixes to checker module (readability review)

BUG=None
TEST=Passes unit tests
TEST=Passes integration tests

Change-Id: Ifd502af9d2755b2c23805cd03857ebbf0e633732
Reviewed-on: https://chromium-review.googlesource.com/59752
Reviewed-by: Gilad Arnold <garnold@chromium.org>
Tested-by: Gilad Arnold <garnold@chromium.org>
Reviewed-by: Chris Sosa <sosa@chromium.org>
Commit-Queue: Gilad Arnold <garnold@chromium.org>
diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py
index 2a1dd31..549ffcb 100644
--- a/scripts/update_payload/checker.py
+++ b/scripts/update_payload/checker.py
@@ -10,17 +10,17 @@
 
   checker = PayloadChecker(payload)
   checker.Run(...)
-
 """
 
 import array
 import base64
 import hashlib
+import itertools
 import os
 import subprocess
 
 import common
-from error import PayloadError
+import error
 import format_utils
 import histogram
 import update_metadata_pb2
@@ -29,6 +29,7 @@
 #
 # Constants.
 #
+
 _CHECK_DST_PSEUDO_EXTENTS = 'dst-pseudo-extents'
 _CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block'
 _CHECK_PAYLOAD_SIG = 'payload-sig'
@@ -51,6 +52,7 @@
 #
 # Helper functions.
 #
+
 def _IsPowerOfTwo(val):
   """Returns True iff val is a power of two."""
   return val > 0 and (val & (val - 1)) == 0
@@ -60,11 +62,10 @@
   """Adds a custom formatted representation to ordinary string representation.
 
   Args:
-    format_func: a value formatter
-    value: value to be formatted and returned
+    format_func: A value formatter.
+    value: Value to be formatted and returned.
   Returns:
     A string 'x (y)' where x = str(value) and y = format_func(value).
-
   """
   return '%s (%s)' % (value, format_func(value))
 
@@ -77,16 +78,16 @@
 #
 # Payload report generator.
 #
+
 class _PayloadReport(object):
   """A payload report generator.
 
   A report is essentially a sequence of nodes, which represent data points. It
   is initialized to have a "global", untitled section. A node may be a
   sub-report itself.
-
   """
 
-  # Report nodes: field, sub-report, section.
+  # Report nodes: Field, sub-report, section.
   class Node(object):
     """A report node interface."""
 
@@ -95,11 +96,10 @@
       """Indents a line by a given indentation amount.
 
       Args:
-        indent: the indentation amount
-        line: the line content (string)
+        indent: The indentation amount.
+        line: The line content (string).
       Returns:
         The properly indented line (string).
-
       """
       return '%*s%s' % (indent, '', line)
 
@@ -107,15 +107,14 @@
       """Generates the report lines for this node.
 
       Args:
-        base_indent: base indentation for each line
-        sub_indent: additional indentation for sub-nodes
-        curr_section: the current report section object
+        base_indent: Base indentation for each line.
+        sub_indent: Additional indentation for sub-nodes.
+        curr_section: The current report section object.
       Returns:
         A pair consisting of a list of properly indented report lines and a new
         current section object.
-
       """
-      raise NotImplementedError()
+      raise NotImplementedError
 
   class FieldNode(Node):
     """A field report node, representing a (name, value) pair."""
@@ -190,11 +189,10 @@
     """Generates the lines in the report, properly indented.
 
     Args:
-      base_indent: the indentation used for root-level report lines
-      sub_indent: the indentation offset used for sub-reports
+      base_indent: The indentation used for root-level report lines.
+      sub_indent: The indentation offset used for sub-reports.
     Returns:
       A list of indented report lines.
-
     """
     report_lines = []
     curr_section = self.global_section
@@ -209,12 +207,10 @@
     """Dumps the report to a file.
 
     Args:
-      out_file: file object to output the content to
-      base_indent: base indentation for report lines
-      sub_indent: added indentation for sub-reports
-
+      out_file: File object to output the content to.
+      base_indent: Base indentation for report lines.
+      sub_indent: Added indentation for sub-reports.
     """
-
     report_lines = self.GenerateLines(base_indent, sub_indent)
     if report_lines and not self.is_finalized:
       report_lines.append('(incomplete report)\n')
@@ -226,11 +222,10 @@
     """Adds a field/value pair to the payload report.
 
     Args:
-      name: the field's name
-      value: the field's value
-      linebreak: whether the value should be printed on a new line
-      indent: amount of extra indent for each line of the value
-
+      name: The field's name.
+      value: The field's value.
+      linebreak: Whether the value should be printed on a new line.
+      indent: Amount of extra indent for each line of the value.
     """
     assert not self.is_finalized
     if name and self.last_section.max_field_name_len < len(name):
@@ -258,12 +253,12 @@
 #
 # Payload verification.
 #
+
 class PayloadChecker(object):
   """Checking the integrity of an update payload.
 
   This is a short-lived object whose purpose is to isolate the logic used for
   verifying the integrity of an update payload.
-
   """
 
   def __init__(self, payload, assert_type=None, block_size=0,
@@ -271,23 +266,24 @@
     """Initialize the checker.
 
     Args:
-      payload: the payload object to check
-      assert_type: assert that payload is either 'full' or 'delta' (optional)
-      block_size: expected filesystem / payload block size (optional)
-      allow_unhashed: allow operations with unhashed data blobs
-      disabled_tests: list of tests to disable
-
+      payload: The payload object to check.
+      assert_type: Assert that payload is either 'full' or 'delta' (optional).
+      block_size: Expected filesystem / payload block size (optional).
+      allow_unhashed: Allow operations with unhashed data blobs.
+      disabled_tests: Sequence of tests to disable.
     """
-    assert payload.is_init, 'uninitialized update payload'
+    if not payload.is_init:
+      raise ValueError('Uninitialized update payload.')
 
     # Set checker configuration.
     self.payload = payload
     self.block_size = block_size if block_size else _DEFAULT_BLOCK_SIZE
     if not _IsPowerOfTwo(self.block_size):
-      raise PayloadError('expected block (%d) size is not a power of two' %
-                         self.block_size)
+      raise error.PayloadError(
+          'Expected block (%d) size is not a power of two.' % self.block_size)
     if assert_type not in (None, _TYPE_FULL, _TYPE_DELTA):
-      raise PayloadError("invalid assert_type value (`%s')" % assert_type)
+      raise error.PayloadError('Invalid assert_type value (%r).' %
+                               assert_type)
     self.payload_type = assert_type
     self.allow_unhashed = allow_unhashed
 
@@ -316,39 +312,38 @@
     causes an exception to be raised.
 
     Args:
-      msg: the message containing the element
-      name: the name of the element
-      report: a report object to add the element name/value to
-      is_mandatory: whether or not this element must be present
-      is_submsg: whether this element is itself a message
-      convert: a function for converting the element value for reporting
-      msg_name: the name of the message object (for error reporting)
-      linebreak: whether the value report should induce a line break
-      indent: amount of indent used for reporting the value
+      msg: The message containing the element.
+      name: The name of the element.
+      report: A report object to add the element name/value to.
+      is_mandatory: Whether or not this element must be present.
+      is_submsg: Whether this element is itself a message.
+      convert: A function for converting the element value for reporting.
+      msg_name: The name of the message object (for error reporting).
+      linebreak: Whether the value report should induce a line break.
+      indent: Amount of indent used for reporting the value.
     Returns:
       A pair consisting of the element value and the generated sub-report for
       it (if the element is a sub-message, None otherwise). If the element is
       missing, returns (None, None).
     Raises:
-      PayloadError if a mandatory element is missing.
-
+      error.PayloadError if a mandatory element is missing.
     """
     if not msg.HasField(name):
       if is_mandatory:
-        raise PayloadError("%smissing mandatory %s '%s'" %
-                           (msg_name + ' ' if msg_name else '',
-                            'sub-message' if is_submsg else 'field',
-                            name))
-      return (None, None)
+        raise error.PayloadError('%smissing mandatory %s %r.' %
+                                 (msg_name + ' ' if msg_name else '',
+                                  'sub-message' if is_submsg else 'field',
+                                  name))
+      return None, None
 
     value = getattr(msg, name)
     if is_submsg:
-      return (value, report and report.AddSubReport(name))
+      return value, report and report.AddSubReport(name)
     else:
       if report:
         report.AddField(name, convert(value), linebreak=linebreak,
                         indent=indent)
-      return (value, None)
+      return value, None
 
   @staticmethod
   def _CheckMandatoryField(msg, field_name, report, msg_name, convert=str,
@@ -382,68 +377,77 @@
     """Checks that val1 is None iff val2 is None.
 
     Args:
-      val1: first value to be compared
-      val2: second value to be compared
-      name1: name of object holding the first value
-      name2: name of object holding the second value
-      obj_name: name of the object containing these values
+      val1: first value to be compared.
+      val2: second value to be compared.
+      name1: name of object holding the first value.
+      name2: name of object holding the second value.
+      obj_name: Name of the object containing these values.
     Raises:
-      PayloadError if assertion does not hold.
-
+      error.PayloadError if assertion does not hold.
     """
     if None in (val1, val2) and val1 is not val2:
       present, missing = (name1, name2) if val2 is None else (name2, name1)
-      raise PayloadError("'%s' present without '%s'%s" %
-                         (present, missing,
-                          ' in ' + obj_name if obj_name else ''))
+      raise error.PayloadError('%r present without %r%s.' %
+                               (present, missing,
+                                ' in ' + obj_name if obj_name else ''))
 
   @staticmethod
   def _Run(cmd, send_data=None):
     """Runs a subprocess, returns its output.
 
     Args:
-      cmd: list of command-line argument for invoking the subprocess
-      send_data: data to feed to the process via its stdin
+      cmd: Sequence of command-line argument for invoking the subprocess.
+      send_data: Data to feed to the process via its stdin.
     Returns:
       A tuple containing the stdout and stderr output of the process.
-
     """
     run_process = subprocess.Popen(cmd, stdin=subprocess.PIPE,
                                    stdout=subprocess.PIPE)
-    return run_process.communicate(input=send_data)
+    try:
+      result = run_process.communicate(input=send_data)
+    finally:
+      exit_code = run_process.wait()
+
+    if exit_code:
+      raise RuntimeError('Subprocess %r failed with code %r.' %
+                         (cmd, exit_code))
+
+    return result
 
   @staticmethod
   def _CheckSha256Signature(sig_data, pubkey_file_name, actual_hash, sig_name):
     """Verifies an actual hash against a signed one.
 
     Args:
-      sig_data: the raw signature data
-      pubkey_file_name: public key used for verifying signature
-      actual_hash: the actual hash digest
-      sig_name: signature name for error reporting
+      sig_data: The raw signature data.
+      pubkey_file_name: Public key used for verifying signature.
+      actual_hash: The actual hash digest.
+      sig_name: Signature name for error reporting.
     Raises:
-      PayloadError if signature could not be verified.
-
+      error.PayloadError if signature could not be verified.
     """
     if len(sig_data) != 256:
-      raise PayloadError('%s: signature size (%d) not as expected (256)' %
-                         (sig_name, len(sig_data)))
+      raise error.PayloadError(
+          '%s: signature size (%d) not as expected (256).' %
+          (sig_name, len(sig_data)))
     signed_data, _ = PayloadChecker._Run(
         ['openssl', 'rsautl', '-verify', '-pubin', '-inkey', pubkey_file_name],
         send_data=sig_data)
 
     if len(signed_data) != len(common.SIG_ASN1_HEADER) + 32:
-      raise PayloadError('%s: unexpected signed data length (%d)' %
-                         (sig_name, len(signed_data)))
+      raise error.PayloadError('%s: unexpected signed data length (%d).' %
+                               (sig_name, len(signed_data)))
 
     if not signed_data.startswith(common.SIG_ASN1_HEADER):
-      raise PayloadError('%s: not containing standard ASN.1 prefix' % sig_name)
+      raise error.PayloadError('%s: not containing standard ASN.1 prefix.' %
+                               sig_name)
 
     signed_hash = signed_data[len(common.SIG_ASN1_HEADER):]
     if signed_hash != actual_hash:
-      raise PayloadError('%s: signed hash (%s) different from actual (%s)' %
-                         (sig_name, common.FormatSha256(signed_hash),
-                          common.FormatSha256(actual_hash)))
+      raise error.PayloadError(
+          '%s: signed hash (%s) different from actual (%s).' %
+          (sig_name, common.FormatSha256(signed_hash),
+           common.FormatSha256(actual_hash)))
 
   @staticmethod
   def _CheckBlocksFitLength(length, num_blocks, block_size, length_name,
@@ -454,40 +458,38 @@
     length of the data residing in these blocks.
 
     Args:
-      length: the actual length of the data
-      num_blocks: the number of blocks allocated for it
-      block_size: the size of each block in bytes
-      length_name: name of length (used for error reporting)
-      block_name: name of block (used for error reporting)
+      length: The actual length of the data.
+      num_blocks: The number of blocks allocated for it.
+      block_size: The size of each block in bytes.
+      length_name: Name of length (used for error reporting).
+      block_name: Name of block (used for error reporting).
     Raises:
-      PayloadError if the aforementioned invariant is not satisfied.
-
+      error.PayloadError if the aforementioned invariant is not satisfied.
     """
     # Check: length <= num_blocks * block_size.
     if length > num_blocks * block_size:
-      raise PayloadError(
-          '%s (%d) > num %sblocks (%d) * block_size (%d)' %
+      raise error.PayloadError(
+          '%s (%d) > num %sblocks (%d) * block_size (%d).' %
           (length_name, length, block_name or '', num_blocks, block_size))
 
     # Check: length > (num_blocks - 1) * block_size.
     if length <= (num_blocks - 1) * block_size:
-      raise PayloadError(
-          '%s (%d) <= (num %sblocks - 1 (%d)) * block_size (%d)' %
+      raise error.PayloadError(
+          '%s (%d) <= (num %sblocks - 1 (%d)) * block_size (%d).' %
           (length_name, length, block_name or '', num_blocks - 1, block_size))
 
   def _CheckManifest(self, report, rootfs_part_size=0, kernel_part_size=0):
     """Checks the payload manifest.
 
     Args:
-      report: a report object to add to
-      rootfs_part_size: size of the rootfs partition in bytes
-      kernel_part_size: size of the kernel partition in bytes
+      report: A report object to add to.
+      rootfs_part_size: Size of the rootfs partition in bytes.
+      kernel_part_size: Size of the kernel partition in bytes.
     Returns:
       A tuple consisting of the partition block size used during the update
       (integer), the signatures block offset and size.
     Raises:
-      PayloadError if any of the checks fail.
-
+      error.PayloadError if any of the checks fail.
     """
     manifest = self.payload.manifest
     report.AddSection('manifest')
@@ -496,8 +498,8 @@
     actual_block_size = self._CheckMandatoryField(manifest, 'block_size',
                                                   report, 'manifest')
     if actual_block_size != self.block_size:
-      raise PayloadError('block_size (%d) not as expected (%d)' %
-                         (actual_block_size, self.block_size))
+      raise error.PayloadError('Block_size (%d) not as expected (%d).' %
+                               (actual_block_size, self.block_size))
 
     # Check: signatures_offset <==> signatures_size.
     self.sigs_offset = self._CheckOptionalField(manifest, 'signatures_offset',
@@ -517,8 +519,8 @@
     if oki_msg:  # equivalently, ori_msg
       # Assert/mark delta payload.
       if self.payload_type == _TYPE_FULL:
-        raise PayloadError(
-            'apparent full payload contains old_{kernel,rootfs}_info')
+        raise error.PayloadError(
+            'Apparent full payload contains old_{kernel,rootfs}_info.')
       self.payload_type = _TYPE_DELTA
 
       # Check: {size, hash} present in old_{kernel,rootfs}_info.
@@ -533,18 +535,18 @@
 
       # Check: old_{kernel,rootfs} size must fit in respective partition.
       if kernel_part_size and self.old_kernel_fs_size > kernel_part_size:
-        raise PayloadError(
-            'old kernel content (%d) exceed partition size (%d)' %
+        raise error.PayloadError(
+            'Old kernel content (%d) exceed partition size (%d).' %
             (self.old_kernel_fs_size, kernel_part_size))
       if rootfs_part_size and self.old_rootfs_fs_size > rootfs_part_size:
-        raise PayloadError(
-            'old rootfs content (%d) exceed partition size (%d)' %
+        raise error.PayloadError(
+            'Old rootfs content (%d) exceed partition size (%d).' %
             (self.old_rootfs_fs_size, rootfs_part_size))
     else:
       # Assert/mark full payload.
       if self.payload_type == _TYPE_DELTA:
-        raise PayloadError(
-            'apparent delta payload missing old_{kernel,rootfs}_info')
+        raise error.PayloadError(
+            'Apparent delta payload missing old_{kernel,rootfs}_info.')
       self.payload_type = _TYPE_FULL
 
     # Check: new_kernel_info present; contains {size, hash}.
@@ -565,34 +567,33 @@
 
     # Check: new_{kernel,rootfs} size must fit in respective partition.
     if kernel_part_size and self.new_kernel_fs_size > kernel_part_size:
-      raise PayloadError(
-          'new kernel content (%d) exceed partition size (%d)' %
+      raise error.PayloadError(
+          'New kernel content (%d) exceed partition size (%d).' %
           (self.new_kernel_fs_size, kernel_part_size))
     if rootfs_part_size and self.new_rootfs_fs_size > rootfs_part_size:
-      raise PayloadError(
-          'new rootfs content (%d) exceed partition size (%d)' %
+      raise error.PayloadError(
+          'New rootfs content (%d) exceed partition size (%d).' %
           (self.new_rootfs_fs_size, rootfs_part_size))
 
-    # Check: payload must contain at least one operation.
+    # Check: Payload must contain at least one operation.
     if not(len(manifest.install_operations) or
            len(manifest.kernel_install_operations)):
-      raise PayloadError('payload contains no operations')
+      raise error.PayloadError('Payload contains no operations.')
 
   def _CheckLength(self, length, total_blocks, op_name, length_name):
     """Checks whether a length matches the space designated in extents.
 
     Args:
-      length: the total length of the data
-      total_blocks: the total number of blocks in extents
-      op_name: operation name (for error reporting)
-      length_name: length name (for error reporting)
+      length: The total length of the data.
+      total_blocks: The total number of blocks in extents.
+      op_name: Operation name (for error reporting).
+      length_name: Length name (for error reporting).
     Raises:
-      PayloadError is there a problem with the length.
-
+      error.PayloadError is there a problem with the length.
     """
     # Check: length is non-zero.
     if length == 0:
-      raise PayloadError('%s: %s is zero' % (op_name, length_name))
+      raise error.PayloadError('%s: %s is zero.' % (op_name, length_name))
 
     # Check that length matches number of blocks.
     self._CheckBlocksFitLength(length, total_blocks, self.block_size,
@@ -603,21 +604,20 @@
     """Checks a sequence of extents.
 
     Args:
-      extents: the sequence of extents to check
-      usable_size: the usable size of the partition to which the extents apply
-      block_counters: an array of counters corresponding to the number of blocks
-      name: the name of the extent block
-      allow_pseudo: whether or not pseudo block numbers are allowed
-      allow_signature: whether or not the extents are used for a signature
+      extents: The sequence of extents to check.
+      usable_size: The usable size of the partition to which the extents apply.
+      block_counters: Array of counters corresponding to the number of blocks.
+      name: The name of the extent block.
+      allow_pseudo: Whether or not pseudo block numbers are allowed.
+      allow_signature: Whether or not the extents are used for a signature.
     Returns:
       The total number of blocks in the extents.
     Raises:
-      PayloadError if any of the entailed checks fails.
-
+      error.PayloadError if any of the entailed checks fails.
     """
     total_num_blocks = 0
     for ex, ex_name in common.ExtentIter(extents, name):
-      # Check: mandatory fields.
+      # Check: Mandatory fields.
       start_block = PayloadChecker._CheckMandatoryField(ex, 'start_block',
                                                         None, ex_name)
       num_blocks = PayloadChecker._CheckMandatoryField(ex, 'num_blocks', None,
@@ -626,22 +626,22 @@
 
       # Check: num_blocks > 0.
       if num_blocks == 0:
-        raise PayloadError('%s: extent length is zero' % ex_name)
+        raise error.PayloadError('%s: extent length is zero.' % ex_name)
 
       if start_block != common.PSEUDO_EXTENT_MARKER:
-        # Check: make sure we're within the partition limit.
+        # Check: Make sure we're within the partition limit.
         if usable_size and end_block * self.block_size > usable_size:
-          raise PayloadError(
-              '%s: extent (%s) exceeds usable partition size (%d)' %
+          raise error.PayloadError(
+              '%s: extent (%s) exceeds usable partition size (%d).' %
               (ex_name, common.FormatExtent(ex, self.block_size), usable_size))
 
         # Record block usage.
-        for i in range(start_block, end_block):
+        for i in xrange(start_block, end_block):
           block_counters[i] += 1
       elif not (allow_pseudo or (allow_signature and len(extents) == 1)):
         # Pseudo-extents must be allowed explicitly, or otherwise be part of a
         # signature operation (in which case there has to be exactly one).
-        raise PayloadError('%s: unexpected pseudo-extent' % ex_name)
+        raise error.PayloadError('%s: unexpected pseudo-extent.' % ex_name)
 
       total_num_blocks += num_blocks
 
@@ -651,21 +651,20 @@
     """Specific checks for REPLACE/REPLACE_BZ operations.
 
     Args:
-      op: the operation object from the manifest
-      data_length: the length of the data blob associated with the operation
-      total_dst_blocks: total number of blocks in dst_extents
-      op_name: operation name for error reporting
+      op: The operation object from the manifest.
+      data_length: The length of the data blob associated with the operation.
+      total_dst_blocks: Total number of blocks in dst_extents.
+      op_name: Operation name for error reporting.
     Raises:
-      PayloadError if any check fails.
-
+      error.PayloadError if any check fails.
     """
-    # Check: does not contain src extents.
+    # Check: Does not contain src extents.
     if op.src_extents:
-      raise PayloadError('%s: contains src_extents' % op_name)
+      raise error.PayloadError('%s: contains src_extents.' % op_name)
 
-    # Check: contains data.
+    # Check: Contains data.
     if data_length is None:
-      raise PayloadError('%s: missing data_{offset,length}' % op_name)
+      raise error.PayloadError('%s: missing data_{offset,length}.' % op_name)
 
     if op.type == common.OpType.REPLACE:
       PayloadChecker._CheckBlocksFitLength(data_length, total_dst_blocks,
@@ -674,9 +673,9 @@
     else:
       # Check: data_length must be smaller than the alotted dst blocks.
       if data_length >= total_dst_blocks * self.block_size:
-        raise PayloadError(
+        raise error.PayloadError(
             '%s: data_length (%d) must be less than allotted dst block '
-            'space (%d * %d)' %
+            'space (%d * %d).' %
             (op_name, data_length, total_dst_blocks, self.block_size))
 
   def _CheckMoveOperation(self, op, data_offset, total_src_blocks,
@@ -684,26 +683,25 @@
     """Specific checks for MOVE operations.
 
     Args:
-      op: the operation object from the manifest
-      data_offset: the offset of a data blob for the operation
-      total_src_blocks: total number of blocks in src_extents
-      total_dst_blocks: total number of blocks in dst_extents
-      op_name: operation name for error reporting
+      op: The operation object from the manifest.
+      data_offset: The offset of a data blob for the operation.
+      total_src_blocks: Total number of blocks in src_extents.
+      total_dst_blocks: Total number of blocks in dst_extents.
+      op_name: Operation name for error reporting.
     Raises:
-      PayloadError if any check fails.
-
+      error.PayloadError if any check fails.
     """
-    # Check: no data_{offset,length}.
+    # Check: No data_{offset,length}.
     if data_offset is not None:
-      raise PayloadError('%s: contains data_{offset,length}' % op_name)
+      raise error.PayloadError('%s: contains data_{offset,length}.' % op_name)
 
-    # Check: total src blocks == total dst blocks.
+    # Check: total_src_blocks == total_dst_blocks.
     if total_src_blocks != total_dst_blocks:
-      raise PayloadError(
-          '%s: total src blocks (%d) != total dst blocks (%d)' %
+      raise error.PayloadError(
+          '%s: total src blocks (%d) != total dst blocks (%d).' %
           (op_name, total_src_blocks, total_dst_blocks))
 
-    # Check: for all i, i-th src block index != i-th dst block index.
+    # Check: For all i, i-th src block index != i-th dst block index.
     i = 0
     src_extent_iter = iter(op.src_extents)
     dst_extent_iter = iter(op.dst_extents)
@@ -715,8 +713,8 @@
         try:
           src_extent = src_extent_iter.next()
         except StopIteration:
-          raise PayloadError('%s: ran out of src extents (%d/%d)' %
-                             (op_name, i, total_src_blocks))
+          raise error.PayloadError('%s: ran out of src extents (%d/%d).' %
+                                   (op_name, i, total_src_blocks))
         src_idx = src_extent.start_block
         src_num = src_extent.num_blocks
 
@@ -725,14 +723,15 @@
         try:
           dst_extent = dst_extent_iter.next()
         except StopIteration:
-          raise PayloadError('%s: ran out of dst extents (%d/%d)' %
-                             (op_name, i, total_dst_blocks))
+          raise error.PayloadError('%s: ran out of dst extents (%d/%d).' %
+                                   (op_name, i, total_dst_blocks))
         dst_idx = dst_extent.start_block
         dst_num = dst_extent.num_blocks
 
       if self.check_move_same_src_dst_block and src_idx == dst_idx:
-        raise PayloadError('%s: src/dst block number %d is the same (%d)' %
-                           (op_name, i, src_idx))
+        raise error.PayloadError(
+            '%s: src/dst block number %d is the same (%d).' %
+            (op_name, i, src_idx))
 
       advance = min(src_num, dst_num)
       i += advance
@@ -749,30 +748,29 @@
 
     # Make sure we've exhausted all src/dst extents.
     if src_extent:
-      raise PayloadError('%s: excess src blocks' % op_name)
+      raise error.PayloadError('%s: excess src blocks.' % op_name)
     if dst_extent:
-      raise PayloadError('%s: excess dst blocks' % op_name)
+      raise error.PayloadError('%s: excess dst blocks.' % op_name)
 
   def _CheckBsdiffOperation(self, data_length, total_dst_blocks, op_name):
     """Specific checks for BSDIFF operations.
 
     Args:
-      data_length: the length of the data blob associated with the operation
-      total_dst_blocks: total number of blocks in dst_extents
-      op_name: operation name for error reporting
+      data_length: The length of the data blob associated with the operation.
+      total_dst_blocks: Total number of blocks in dst_extents.
+      op_name: Operation name for error reporting.
     Raises:
-      PayloadError if any check fails.
-
+      error.PayloadError if any check fails.
     """
     # Check: data_{offset,length} present.
     if data_length is None:
-      raise PayloadError('%s: missing data_{offset,length}' % op_name)
+      raise error.PayloadError('%s: missing data_{offset,length}.' % op_name)
 
     # Check: data_length is strictly smaller than the alotted dst blocks.
     if data_length >= total_dst_blocks * self.block_size:
-      raise PayloadError(
+      raise error.PayloadError(
           '%s: data_length (%d) must be smaller than allotted dst space '
-          '(%d * %d = %d)' %
+          '(%d * %d = %d).' %
           (op_name, data_length, total_dst_blocks, self.block_size,
            total_dst_blocks * self.block_size))
 
@@ -782,21 +780,20 @@
     """Checks a single update operation.
 
     Args:
-      op: the operation object
-      op_name: operation name string for error reporting
-      is_last: whether this is the last operation in the sequence
-      old_block_counters: arrays of block read counters
-      new_block_counters: arrays of block write counters
-      old_usable_size: the overall usable size for src data in bytes
-      new_usable_size: the overall usable size for dst data in bytes
-      prev_data_offset: offset of last used data bytes
-      allow_signature: whether this may be a signature operation
-      blob_hash_counts: counters for hashed/unhashed blobs
+      op: The operation object.
+      op_name: Operation name string for error reporting.
+      is_last: Whether this is the last operation in the sequence.
+      old_block_counters: Arrays of block read counters.
+      new_block_counters: Arrays of block write counters.
+      old_usable_size: The overall usable size for src data in bytes.
+      new_usable_size: The overall usable size for dst data in bytes.
+      prev_data_offset: Offset of last used data bytes.
+      allow_signature: Whether this may be a signature operation.
+      blob_hash_counts: Counters for hashed/unhashed blobs.
     Returns:
       The amount of data blob associated with the operation.
     Raises:
-      PayloadError if any check has failed.
-
+      error.PayloadError if any check has failed.
     """
     # Check extents.
     total_src_blocks = self._CheckExtents(
@@ -816,9 +813,9 @@
     self._CheckPresentIff(data_offset, data_length, 'data_offset',
                           'data_length', op_name)
 
-    # Check: at least one dst_extent.
+    # Check: At least one dst_extent.
     if not op.dst_extents:
-      raise PayloadError('%s: dst_extents is empty' % op_name)
+      raise error.PayloadError('%s: dst_extents is empty.' % op_name)
 
     # Check {src,dst}_length, if present.
     if op.HasField('src_length'):
@@ -829,19 +826,20 @@
     if op.HasField('data_sha256_hash'):
       blob_hash_counts['hashed'] += 1
 
-      # Check: operation carries data.
+      # Check: Operation carries data.
       if data_offset is None:
-        raise PayloadError(
-            '%s: data_sha256_hash present but no data_{offset,length}' %
+        raise error.PayloadError(
+            '%s: data_sha256_hash present but no data_{offset,length}.' %
             op_name)
 
-      # Check: hash verifies correctly.
+      # Check: Hash verifies correctly.
+      # pylint cannot find the method in hashlib, for some reason.
       # pylint: disable=E1101
       actual_hash = hashlib.sha256(self.payload.ReadDataBlob(data_offset,
                                                              data_length))
       if op.data_sha256_hash != actual_hash.digest():
-        raise PayloadError(
-            '%s: data_sha256_hash (%s) does not match actual hash (%s)' %
+        raise error.PayloadError(
+            '%s: data_sha256_hash (%s) does not match actual hash (%s).' %
             (op_name, common.FormatSha256(op.data_sha256_hash),
              common.FormatSha256(actual_hash.digest())))
     elif data_offset is not None:
@@ -850,21 +848,22 @@
       elif self.allow_unhashed:
         blob_hash_counts['unhashed'] += 1
       else:
-        raise PayloadError('%s: unhashed operation not allowed' % op_name)
+        raise error.PayloadError('%s: unhashed operation not allowed.' %
+                                 op_name)
 
     if data_offset is not None:
-      # Check: contiguous use of data section.
+      # Check: Contiguous use of data section.
       if data_offset != prev_data_offset:
-        raise PayloadError(
-            '%s: data offset (%d) not matching amount used so far (%d)' %
+        raise error.PayloadError(
+            '%s: data offset (%d) not matching amount used so far (%d).' %
             (op_name, data_offset, prev_data_offset))
 
     # Type-specific checks.
     if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
       self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
     elif self.payload_type == _TYPE_FULL:
-      raise PayloadError('%s: non-REPLACE operation in a full payload' %
-                         op_name)
+      raise error.PayloadError('%s: non-REPLACE operation in a full payload.' %
+                               op_name)
     elif op.type == common.OpType.MOVE:
       self._CheckMoveOperation(op, data_offset, total_src_blocks,
                                total_dst_blocks, op_name)
@@ -882,14 +881,17 @@
   def _AllocBlockCounters(self, total_size):
     """Returns a freshly initialized array of block counters.
 
+    Note that the generated array is not portable as is due to byte-ordering
+    issues, hence it should not be serialized.
+
     Args:
-      total_size: the total block size in bytes
+      total_size: The total block size in bytes.
     Returns:
       An array of unsigned short elements initialized to zero, one for each of
       the blocks necessary for containing the partition.
-
     """
-    return array.array('H', [0] * self._SizeToNumBlocks(total_size))
+    return array.array('H',
+                       itertools.repeat(0, self._SizeToNumBlocks(total_size)))
 
   def _CheckOperations(self, operations, report, base_name, old_fs_size,
                        new_fs_size, new_usable_size, prev_data_offset,
@@ -897,19 +899,18 @@
     """Checks a sequence of update operations.
 
     Args:
-      operations: the sequence of operations to check
-      report: the report object to add to
-      base_name: the name of the operation block
-      old_fs_size: the old filesystem size in bytes
-      new_fs_size: the new filesystem size in bytes
-      new_usable_size: the overall usable size of the new partition in bytes
-      prev_data_offset: offset of last used data bytes
-      allow_signature: whether this sequence may contain signature operations
+      operations: The sequence of operations to check.
+      report: The report object to add to.
+      base_name: The name of the operation block.
+      old_fs_size: The old filesystem size in bytes.
+      new_fs_size: The new filesystem size in bytes.
+      new_usable_size: The overall usable size of the new partition in bytes.
+      prev_data_offset: Offset of last used data bytes.
+      allow_signature: Whether this sequence may contain signature operations.
     Returns:
       The total data blob size used.
     Raises:
-      PayloadError if any of the checks fails.
-
+      error.PayloadError if any of the checks fails.
     """
     # The total size of data blobs used by operations scanned thus far.
     total_data_used = 0
@@ -924,7 +925,7 @@
     op_blob_totals = {
         common.OpType.REPLACE: 0,
         common.OpType.REPLACE_BZ: 0,
-        # MOVE operations don't have blobs
+        # MOVE operations don't have blobs.
         common.OpType.BSDIFF: 0,
     }
     # Counts of hashed vs unhashed operations.
@@ -945,9 +946,9 @@
     for op, op_name in common.OperationIter(operations, base_name):
       op_num += 1
 
-      # Check: type is valid.
+      # Check: Type is valid.
       if op.type not in op_counts.keys():
-        raise PayloadError('%s: invalid type (%d)' % (op_name, op.type))
+        raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type))
       op_counts[op.type] += 1
 
       is_last = op_num == len(operations)
@@ -990,10 +991,10 @@
     report.AddField('block write hist', new_write_hist, linebreak=True,
                     indent=1)
 
-    # Check: full update must write each dst block once.
+    # Check: Full update must write each dst block once.
     if self.payload_type == _TYPE_FULL and new_write_hist.GetKeys() != [1]:
-      raise PayloadError(
-          '%s: not all blocks written exactly once during full update' %
+      raise error.PayloadError(
+          '%s: not all blocks written exactly once during full update.' %
           base_name)
 
     return total_data_used
@@ -1005,10 +1006,11 @@
     sigs.ParseFromString(sigs_raw)
     report.AddSection('signatures')
 
-    # Check: at least one signature present.
+    # Check: At least one signature present.
+    # pylint cannot see through the protobuf object, it seems.
     # pylint: disable=E1101
     if not sigs.signatures:
-      raise PayloadError('signature block is empty')
+      raise error.PayloadError('Signature block is empty.')
 
     last_ops_section = (self.payload.manifest.kernel_install_operations or
                         self.payload.manifest.install_operations)
@@ -1017,9 +1019,9 @@
     if not (fake_sig_op.type == common.OpType.REPLACE and
             self.sigs_offset == fake_sig_op.data_offset and
             self.sigs_size == fake_sig_op.data_length):
-      raise PayloadError(
-          'signatures_{offset,size} (%d+%d) does not match last operation '
-          '(%d+%d)' %
+      raise error.PayloadError(
+          'Signatures_{offset,size} (%d+%d) does not match last operation '
+          '(%d+%d).' %
           (self.sigs_offset, self.sigs_size, fake_sig_op.data_offset,
            fake_sig_op.data_length))
 
@@ -1035,33 +1037,33 @@
     for sig, sig_name in common.SignatureIter(sigs.signatures, 'signatures'):
       sig_report = report.AddSubReport(sig_name)
 
-      # Check: signature contains mandatory fields.
+      # Check: Signature contains mandatory fields.
       self._CheckMandatoryField(sig, 'version', sig_report, sig_name)
       self._CheckMandatoryField(sig, 'data', None, sig_name)
       sig_report.AddField('data len', len(sig.data))
 
-      # Check: signatures pertains to actual payload hash.
+      # Check: Signatures pertains to actual payload hash.
       if sig.version == 1:
         self._CheckSha256Signature(sig.data, pubkey_file_name,
                                    payload_hasher.digest(), sig_name)
       else:
-        raise PayloadError('unknown signature version (%d)' % sig.version)
+        raise error.PayloadError('Unknown signature version (%d).' %
+                                 sig.version)
 
   def Run(self, pubkey_file_name=None, metadata_sig_file=None,
           rootfs_part_size=0, kernel_part_size=0, report_out_file=None):
     """Checker entry point, invoking all checks.
 
     Args:
-      pubkey_file_name: public key used for signature verification
-      metadata_sig_file: metadata signature, if verification is desired
-      rootfs_part_size: the size of rootfs partitions in bytes (default: use
-                        reported filesystem size)
-      kernel_part_size: the size of kernel partitions in bytes (default: use
-                        reported filesystem size)
-      report_out_file: file object to dump the report to
+      pubkey_file_name: Public key used for signature verification.
+      metadata_sig_file: Metadata signature, if verification is desired.
+      rootfs_part_size: The size of rootfs partitions in bytes (default: use
+                        reported filesystem size).
+      kernel_part_size: The size of kernel partitions in bytes (default: use
+                        reported filesystem size).
+      report_out_file: File object to dump the report to.
     Raises:
-      PayloadError if payload verification failed.
-
+      error.PayloadError if payload verification failed.
     """
     if not pubkey_file_name:
       pubkey_file_name = _DEFAULT_PUBKEY_FILE_NAME
@@ -1081,20 +1083,20 @@
                                    self.payload.manifest_hasher.digest(),
                                    'metadata signature')
 
-      # Part 1: check the file header.
+      # Part 1: Check the file header.
       report.AddSection('header')
-      # Check: payload version is valid.
+      # Check: Payload version is valid.
       if self.payload.header.version != 1:
-        raise PayloadError('unknown payload version (%d)' %
-                           self.payload.header.version)
+        raise error.PayloadError('Unknown payload version (%d).' %
+                                 self.payload.header.version)
       report.AddField('version', self.payload.header.version)
       report.AddField('manifest len', self.payload.header.manifest_len)
 
-      # Part 2: check the manifest.
+      # Part 2: Check the manifest.
       self._CheckManifest(report, rootfs_part_size, kernel_part_size)
       assert self.payload_type, 'payload type should be known by now'
 
-      # Part 3: examine rootfs operations.
+      # Part 3: Examine rootfs operations.
       # TODO(garnold)(chromium:243559) only default to the filesystem size if
       # no explicit size provided *and* the partition size is not embedded in
       # the payload; see issue for more details.
@@ -1106,7 +1108,7 @@
           rootfs_part_size if rootfs_part_size else self.new_rootfs_fs_size,
           0, False)
 
-      # Part 4: examine kernel operations.
+      # Part 4: Examine kernel operations.
       # TODO(garnold)(chromium:243559) as above.
       report.AddSection('kernel operations')
       total_blob_size += self._CheckOperations(
@@ -1116,18 +1118,18 @@
           kernel_part_size if kernel_part_size else self.new_kernel_fs_size,
           total_blob_size, True)
 
-      # Check: operations data reach the end of the payload file.
+      # Check: Operations data reach the end of the payload file.
       used_payload_size = self.payload.data_offset + total_blob_size
       if used_payload_size != payload_file_size:
-        raise PayloadError(
-            'used payload size (%d) different from actual file size (%d)' %
+        raise error.PayloadError(
+            'Used payload size (%d) different from actual file size (%d).' %
             (used_payload_size, payload_file_size))
 
-      # Part 5: handle payload signatures message.
+      # Part 5: Handle payload signatures message.
       if self.check_payload_sig and self.sigs_size:
         self._CheckSignatures(report, pubkey_file_name)
 
-      # Part 6: summary.
+      # Part 6: Summary.
       report.AddSection('summary')
       report.AddField('update type', self.payload_type)