Merge dev-utils update_payload_library and the paycheck.

The update_payload library is a python library for parsing and
handling update payload. This library is used by the payload checker
python script also included here.

The code is merged from this repo:
  https://chromium.googlesource.com/chromiumos/platform/dev-util/

Bug: 28797993
TEST='import update_payload'
diff --git a/scripts/paycheck.py b/scripts/paycheck.py
new file mode 100755
index 0000000..0195f53
--- /dev/null
+++ b/scripts/paycheck.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python2
+#
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Command-line tool for checking and applying Chrome OS update payloads."""
+
+from __future__ import print_function
+
+import optparse
+import os
+import sys
+
+# pylint: disable=F0401
+lib_dir = os.path.join(os.path.dirname(__file__), 'lib')
+if os.path.exists(lib_dir) and os.path.isdir(lib_dir):
+  sys.path.insert(1, lib_dir)
+import update_payload
+
+
+_TYPE_FULL = 'full'
+_TYPE_DELTA = 'delta'
+
+
+def ParseArguments(argv):
+  """Parse and validate command-line arguments.
+
+  Args:
+    argv: command-line arguments to parse (excluding the program name)
+
+  Returns:
+    A tuple (opts, payload, extra_args), where `opts' are the options
+    returned by the parser, `payload' is the name of the payload file
+    (mandatory argument) and `extra_args' are any additional command-line
+    arguments.
+  """
+  parser = optparse.OptionParser(
+      usage=('Usage: %prog [OPTION...] PAYLOAD [DST_KERN DST_ROOT '
+             '[SRC_KERN SRC_ROOT]]'),
+      description=('Applies a Chrome OS update PAYLOAD to SRC_KERN and '
+                   'SRC_ROOT emitting DST_KERN and DST_ROOT, respectively. '
+                   'SRC_KERN and SRC_ROOT are only needed for delta payloads. '
+                   'When no partitions are provided, verifies the payload '
+                   'integrity.'),
+      epilog=('Note: a payload may verify correctly but fail to apply, and '
+              'vice versa; this is by design and can be thought of as static '
+              'vs dynamic correctness. A payload that both verifies and '
+              'applies correctly should be safe for use by the Chrome OS '
+              'Update Engine. Use --check to verify a payload prior to '
+              'applying it.'))
+
+  check_opts = optparse.OptionGroup(parser, 'Checking payload integrity')
+  check_opts.add_option('-c', '--check', action='store_true', default=False,
+                        help=('force payload integrity check (e.g. before '
+                              'applying)'))
+  check_opts.add_option('-D', '--describe', action='store_true', default=False,
+                        help='Print a friendly description of the payload.')
+  check_opts.add_option('-r', '--report', metavar='FILE',
+                        help="dump payload report (`-' for stdout)")
+  check_opts.add_option('-t', '--type', metavar='TYPE', dest='assert_type',
+                        help=("assert that payload is either `%s' or `%s'" %
+                              (_TYPE_FULL, _TYPE_DELTA)))
+  check_opts.add_option('-z', '--block-size', metavar='NUM', default=0,
+                        type='int',
+                        help='assert a non-default (4096) payload block size')
+  check_opts.add_option('-u', '--allow-unhashed', action='store_true',
+                        default=False, help='allow unhashed operations')
+  check_opts.add_option('-d', '--disabled_tests', metavar='TESTLIST',
+                        default=(),
+                        help=('comma-separated list of tests to disable; '
+                              'available values: ' +
+                              ', '.join(update_payload.CHECKS_TO_DISABLE)))
+  check_opts.add_option('-k', '--key', metavar='FILE',
+                        help=('Override standard key used for signature '
+                              'validation'))
+  check_opts.add_option('-m', '--meta-sig', metavar='FILE',
+                        help='verify metadata against its signature')
+  check_opts.add_option('-p', '--root-part-size', metavar='NUM',
+                        default=0, type='int',
+                        help=('override rootfs partition size auto-inference'))
+  check_opts.add_option('-P', '--kern-part-size', metavar='NUM',
+                        default=0, type='int',
+                        help=('override kernel partition size auto-inference'))
+  parser.add_option_group(check_opts)
+
+  trace_opts = optparse.OptionGroup(parser, 'Applying payload')
+  trace_opts.add_option('-x', '--extract-bsdiff', action='store_true',
+                        default=False,
+                        help=('use temp input/output files with BSDIFF '
+                              'operations (not in-place)'))
+  trace_opts.add_option('--bspatch-path', metavar='FILE',
+                        help=('use the specified bspatch binary'))
+  parser.add_option_group(trace_opts)
+
+  trace_opts = optparse.OptionGroup(parser, 'Block tracing')
+  trace_opts.add_option('-b', '--root-block', metavar='BLOCK', type='int',
+                        help='trace the origin for a rootfs block')
+  trace_opts.add_option('-B', '--kern-block', metavar='BLOCK', type='int',
+                        help='trace the origin for a kernel block')
+  trace_opts.add_option('-s', '--skip', metavar='NUM', default='0', type='int',
+                        help='skip first NUM occurrences of traced block')
+  parser.add_option_group(trace_opts)
+
+  # Parse command-line arguments.
+  opts, args = parser.parse_args(argv)
+
+  # Validate a value given to --type, if any.
+  if opts.assert_type not in (None, _TYPE_FULL, _TYPE_DELTA):
+    parser.error('invalid argument to --type: %s' % opts.assert_type)
+
+  # Convert and validate --disabled_tests value list, if provided.
+  if opts.disabled_tests:
+    opts.disabled_tests = opts.disabled_tests.split(',')
+    for test in opts.disabled_tests:
+      if test not in update_payload.CHECKS_TO_DISABLE:
+        parser.error('invalid argument to --disabled_tests: %s' % test)
+
+  # Ensure consistent use of block tracing options.
+  do_block_trace = not (opts.root_block is None and opts.kern_block is None)
+  if opts.skip and not do_block_trace:
+    parser.error('--skip must be used with either --root-block or --kern-block')
+
+  # There are several options that imply --check.
+  opts.check = (opts.check or opts.report or opts.assert_type or
+                opts.block_size or opts.allow_unhashed or
+                opts.disabled_tests or opts.meta_sig or opts.key or
+                opts.root_part_size or opts.kern_part_size)
+
+  # Check number of arguments, enforce payload type accordingly.
+  if len(args) == 3:
+    if opts.assert_type == _TYPE_DELTA:
+      parser.error('%s payload requires source partition arguments' %
+                   _TYPE_DELTA)
+    opts.assert_type = _TYPE_FULL
+  elif len(args) == 5:
+    if opts.assert_type == _TYPE_FULL:
+      parser.error('%s payload does not accept source partition arguments' %
+                   _TYPE_FULL)
+    opts.assert_type = _TYPE_DELTA
+  elif len(args) == 1:
+    # Not applying payload; if block tracing not requested either, do an
+    # integrity check.
+    if not do_block_trace:
+      opts.check = True
+    if opts.extract_bsdiff:
+      parser.error('--extract-bsdiff can only be used when applying payloads')
+    if opts.bspatch_path:
+      parser.error('--bspatch-path can only be used when applying payloads')
+  else:
+    parser.error('unexpected number of arguments')
+
+  # By default, look for a metadata-signature file with a name based on the name
+  # of the payload we are checking. We only do it if check was triggered.
+  if opts.check and not opts.meta_sig:
+    default_meta_sig = args[0] + '.metadata-signature'
+    if os.path.isfile(default_meta_sig):
+      opts.meta_sig = default_meta_sig
+      print('Using default metadata signature', opts.meta_sig, file=sys.stderr)
+
+  return opts, args[0], args[1:]
+
+
+def main(argv):
+  # Parse and validate arguments.
+  options, payload_file_name, extra_args = ParseArguments(argv[1:])
+
+  with open(payload_file_name) as payload_file:
+    payload = update_payload.Payload(payload_file)
+    try:
+      # Initialize payload.
+      payload.Init()
+
+      if options.describe:
+        payload.Describe()
+
+      # Perform payload integrity checks.
+      if options.check:
+        report_file = None
+        do_close_report_file = False
+        metadata_sig_file = None
+        try:
+          if options.report:
+            if options.report == '-':
+              report_file = sys.stdout
+            else:
+              report_file = open(options.report, 'w')
+              do_close_report_file = True
+
+          metadata_sig_file = options.meta_sig and open(options.meta_sig)
+          payload.Check(
+              pubkey_file_name=options.key,
+              metadata_sig_file=metadata_sig_file,
+              report_out_file=report_file,
+              assert_type=options.assert_type,
+              block_size=int(options.block_size),
+              rootfs_part_size=options.root_part_size,
+              kernel_part_size=options.kern_part_size,
+              allow_unhashed=options.allow_unhashed,
+              disabled_tests=options.disabled_tests)
+        finally:
+          if metadata_sig_file:
+            metadata_sig_file.close()
+          if do_close_report_file:
+            report_file.close()
+
+      # Trace blocks.
+      if options.root_block is not None:
+        payload.TraceBlock(options.root_block, options.skip, sys.stdout, False)
+      if options.kern_block is not None:
+        payload.TraceBlock(options.kern_block, options.skip, sys.stdout, True)
+
+      # Apply payload.
+      if extra_args:
+        dargs = {'bsdiff_in_place': not options.extract_bsdiff}
+        if options.bspatch_path:
+          dargs['bspatch_path'] = options.bspatch_path
+        if options.assert_type == _TYPE_DELTA:
+          dargs['old_kernel_part'] = extra_args[2]
+          dargs['old_rootfs_part'] = extra_args[3]
+
+        payload.Apply(extra_args[0], extra_args[1], **dargs)
+
+    except update_payload.PayloadError, e:
+      sys.stderr.write('Error: %s\n' % e)
+      return 1
+
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/scripts/test_paycheck.sh b/scripts/test_paycheck.sh
new file mode 100755
index 0000000..c395db4
--- /dev/null
+++ b/scripts/test_paycheck.sh
@@ -0,0 +1,175 @@
+#!/bin/bash
+#
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# A test script for paycheck.py and the update_payload.py library.
+#
+# This script requires three payload files, along with a metadata signature for
+# each, and a public key for verifying signatures. Payload include:
+#
+# - A full payload for release X (old_full_payload)
+#
+# - A full payload for release Y (new_full_payload), where Y > X
+#
+# - A delta payload from X to Y (delta_payload)
+#
+# The test performs the following:
+#
+# - It verifies each payload against its metadata signature, also asserting the
+#   payload type. Another artifact is a human-readable payload report, which
+#   is output to stdout to be inspected by the user.
+#
+# - It performs a random block trace on the delta payload (both kernel and
+#   rootfs blocks), dumping the traces to stdout for the user to inspect.
+#
+# - It applies old_full_payload to yield old kernel (old_kern.part) and rootfs
+#   (old_root.part) partitions.
+#
+# - It applies delta_payload to old_{kern,root}.part to yield new kernel
+#   (new_delta_kern.part) and rootfs (new_delta_root.part) partitions.
+#
+# - It applies new_full_payload to yield reference new kernel
+#   (new_full_kern.part) and rootfs (new_full_root.part) partitions.
+#
+# - It compares new_{delta,full}_kern.part and new_{delta,full}_root.part to
+#   ensure that they are binary identical.
+#
+# If all steps have completed successfully we know with high certainty that
+# paycheck.py (and hence update_payload.py) correctly parses both full and
+# delta payloads, and applies them to yield the expected result. We also know
+# that tracing works, to the extent it does not crash. Manual inspection of
+# payload reports and block traces will improve this our confidence and are
+# strongly encouraged. Finally, each paycheck.py execution is timed.
+
+
+# Stop on errors, unset variables.
+set -e
+set -u
+
+# Temporary image files.
+OLD_KERN_PART=old_kern.part
+OLD_ROOT_PART=old_root.part
+NEW_DELTA_KERN_PART=new_delta_kern.part
+NEW_DELTA_ROOT_PART=new_delta_root.part
+NEW_FULL_KERN_PART=new_full_kern.part
+NEW_FULL_ROOT_PART=new_full_root.part
+
+
+log() {
+  echo "$@" >&2
+}
+
+die() {
+  log "$@"
+  exit 1
+}
+
+usage_and_exit() {
+  cat >&2 <<EOF
+Usage: ${0##*/} old_full_payload delta_payload new_full_payload
+EOF
+  exit
+}
+
+check_payload() {
+  payload_file=$1
+  payload_type=$2
+
+  time ${paycheck} -t ${payload_type} ${payload_file}
+}
+
+trace_kern_block() {
+  payload_file=$1
+  block=$2
+  time ${paycheck} -B ${block} ${payload_file}
+}
+
+trace_root_block() {
+  payload_file=$1
+  block=$2
+  time ${paycheck} -b ${block} ${payload_file}
+}
+
+apply_full_payload() {
+  payload_file=$1
+  dst_kern_part="$2/$3"
+  dst_root_part="$2/$4"
+
+  time ${paycheck} ${payload_file} ${dst_kern_part} ${dst_root_part}
+}
+
+apply_delta_payload() {
+  payload_file=$1
+  dst_kern_part="$2/$3"
+  dst_root_part="$2/$4"
+  src_kern_part="$2/$5"
+  src_root_part="$2/$6"
+
+  time ${paycheck} ${payload_file} ${dst_kern_part} ${dst_root_part} \
+    ${src_kern_part} ${src_root_part}
+}
+
+main() {
+  # Read command-line arguments.
+  if [ $# == 1 ] && [ "$1" == "-h" ]; then
+    usage_and_exit
+  elif [ $# != 3 ]; then
+    die "Error: unexpected number of arguments"
+  fi
+  old_full_payload="$1"
+  delta_payload="$2"
+  new_full_payload="$3"
+
+  # Find paycheck.py
+  paycheck=${0%/*}/paycheck.py
+  if [ -z "${paycheck}" ] || [ ! -x ${paycheck} ]; then
+    die "cannot find ${paycheck} or file is not executable"
+  fi
+
+  # Check the payloads statically.
+  log "Checking payloads..."
+  check_payload "${old_full_payload}" full
+  check_payload "${new_full_payload}" full
+  check_payload "${delta_payload}" delta
+  log "Done"
+
+  # Trace a random block between 0-1024 on all payloads.
+  block=$((RANDOM * 1024 / 32767))
+  log "Tracing a random block (${block}) in full/delta payloads..."
+  trace_kern_block "${new_full_payload}" ${block}
+  trace_root_block "${new_full_payload}" ${block}
+  trace_kern_block "${delta_payload}" ${block}
+  trace_root_block "${delta_payload}" ${block}
+  log "Done"
+
+  # Apply full/delta payloads and verify results are identical.
+  tmpdir="$(mktemp -d --tmpdir test_paycheck.XXXXXXXX)"
+  log "Initiating application of payloads at $tmpdir"
+
+  log "Applying old full payload..."
+  apply_full_payload "${old_full_payload}" "${tmpdir}" "${OLD_KERN_PART}" \
+    "${OLD_ROOT_PART}"
+  log "Done"
+
+  log "Applying delta payload to old partitions..."
+  apply_delta_payload "${delta_payload}" "${tmpdir}" "${NEW_DELTA_KERN_PART}" \
+    "${NEW_DELTA_ROOT_PART}" "${OLD_KERN_PART}" "${OLD_ROOT_PART}"
+  log "Done"
+
+  log "Applying new full payload..."
+  apply_full_payload "${new_full_payload}" "${tmpdir}" "${NEW_FULL_KERN_PART}" \
+    "${NEW_FULL_ROOT_PART}"
+  log "Done"
+
+  log "Comparing results of delta and new full updates..."
+  diff "${tmpdir}/${NEW_FULL_KERN_PART}" "${tmpdir}/${NEW_DELTA_KERN_PART}"
+  diff "${tmpdir}/${NEW_FULL_ROOT_PART}" "${tmpdir}/${NEW_DELTA_ROOT_PART}"
+  log "Done"
+
+  log "Cleaning up"
+  rm -fr "${tmpdir}"
+}
+
+main "$@"
diff --git a/scripts/update_payload/__init__.py b/scripts/update_payload/__init__.py
new file mode 100644
index 0000000..1906a16
--- /dev/null
+++ b/scripts/update_payload/__init__.py
@@ -0,0 +1,11 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Library for processing, verifying and applying Chrome OS update payloads."""
+
+# Just raise the interface classes to the root namespace.
+# pylint: disable=W0401
+from checker import CHECKS_TO_DISABLE
+from error import PayloadError
+from payload import Payload
diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py
new file mode 100644
index 0000000..04791c1
--- /dev/null
+++ b/scripts/update_payload/applier.py
@@ -0,0 +1,576 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Applying a Chrome OS update payload.
+
+This module is used internally by the main Payload class for applying an update
+payload. The interface for invoking the applier is as follows:
+
+  applier = PayloadApplier(payload)
+  applier.Run(...)
+
+"""
+
+from __future__ import print_function
+
+import array
+import bz2
+import hashlib
+import itertools
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+
+import common
+from error import PayloadError
+
+
+#
+# Helper functions.
+#
+def _VerifySha256(file_obj, expected_hash, name, length=-1):
+  """Verifies the SHA256 hash of a file.
+
+  Args:
+    file_obj: file object to read
+    expected_hash: the hash digest we expect to be getting
+    name: name string of this hash, for error reporting
+    length: precise length of data to verify (optional)
+
+  Raises:
+    PayloadError if computed hash doesn't match expected one, or if fails to
+    read the specified length of data.
+  """
+  # pylint: disable=E1101
+  hasher = hashlib.sha256()
+  block_length = 1024 * 1024
+  max_length = length if length >= 0 else sys.maxint
+
+  while max_length > 0:
+    read_length = min(max_length, block_length)
+    data = file_obj.read(read_length)
+    if not data:
+      break
+    max_length -= len(data)
+    hasher.update(data)
+
+  if length >= 0 and max_length > 0:
+    raise PayloadError(
+        'insufficient data (%d instead of %d) when verifying %s' %
+        (length - max_length, length, name))
+
+  actual_hash = hasher.digest()
+  if actual_hash != expected_hash:
+    raise PayloadError('%s hash (%s) not as expected (%s)' %
+                       (name, common.FormatSha256(actual_hash),
+                        common.FormatSha256(expected_hash)))
+
+
+def _ReadExtents(file_obj, extents, block_size, max_length=-1):
+  """Reads data from file as defined by extent sequence.
+
+  This tries to be efficient by not copying data as it is read in chunks.
+
+  Args:
+    file_obj: file object
+    extents: sequence of block extents (offset and length)
+    block_size: size of each block
+    max_length: maximum length to read (optional)
+
+  Returns:
+    A character array containing the concatenated read data.
+  """
+  data = array.array('c')
+  if max_length < 0:
+    max_length = sys.maxint
+  for ex in extents:
+    if max_length == 0:
+      break
+    read_length = min(max_length, ex.num_blocks * block_size)
+
+    # Fill with zeros or read from file, depending on the type of extent.
+    if ex.start_block == common.PSEUDO_EXTENT_MARKER:
+      data.extend(itertools.repeat('\0', read_length))
+    else:
+      file_obj.seek(ex.start_block * block_size)
+      data.fromfile(file_obj, read_length)
+
+    max_length -= read_length
+
+  return data
+
+
+def _WriteExtents(file_obj, data, extents, block_size, base_name):
+  """Writes data to file as defined by extent sequence.
+
+  This tries to be efficient by not copy data as it is written in chunks.
+
+  Args:
+    file_obj: file object
+    data: data to write
+    extents: sequence of block extents (offset and length)
+    block_size: size of each block
+    base_name: name string of extent sequence for error reporting
+
+  Raises:
+    PayloadError when things don't add up.
+  """
+  data_offset = 0
+  data_length = len(data)
+  for ex, ex_name in common.ExtentIter(extents, base_name):
+    if not data_length:
+      raise PayloadError('%s: more write extents than data' % ex_name)
+    write_length = min(data_length, ex.num_blocks * block_size)
+
+    # Only do actual writing if this is not a pseudo-extent.
+    if ex.start_block != common.PSEUDO_EXTENT_MARKER:
+      file_obj.seek(ex.start_block * block_size)
+      data_view = buffer(data, data_offset, write_length)
+      file_obj.write(data_view)
+
+    data_offset += write_length
+    data_length -= write_length
+
+  if data_length:
+    raise PayloadError('%s: more data than write extents' % base_name)
+
+
+def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1):
+  """Translates an extent sequence into a bspatch-compatible string argument.
+
+  Args:
+    extents: sequence of block extents (offset and length)
+    block_size: size of each block
+    base_name: name string of extent sequence for error reporting
+    data_length: the actual total length of the data in bytes (optional)
+
+  Returns:
+    A tuple consisting of (i) a string of the form
+    "off_1:len_1,...,off_n:len_n", (ii) an offset where zero padding is needed
+    for filling the last extent, (iii) the length of the padding (zero means no
+    padding is needed and the extents cover the full length of data).
+
+  Raises:
+    PayloadError if data_length is too short or too long.
+  """
+  arg = ''
+  pad_off = pad_len = 0
+  if data_length < 0:
+    data_length = sys.maxint
+  for ex, ex_name in common.ExtentIter(extents, base_name):
+    if not data_length:
+      raise PayloadError('%s: more extents than total data length' % ex_name)
+
+    is_pseudo = ex.start_block == common.PSEUDO_EXTENT_MARKER
+    start_byte = -1 if is_pseudo else ex.start_block * block_size
+    num_bytes = ex.num_blocks * block_size
+    if data_length < num_bytes:
+      # We're only padding a real extent.
+      if not is_pseudo:
+        pad_off = start_byte + data_length
+        pad_len = num_bytes - data_length
+
+      num_bytes = data_length
+
+    arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
+    data_length -= num_bytes
+
+  if data_length:
+    raise PayloadError('%s: extents not covering full data length' % base_name)
+
+  return arg, pad_off, pad_len
+
+
+#
+# Payload application.
+#
+class PayloadApplier(object):
+  """Applying an update payload.
+
+  This is a short-lived object whose purpose is to isolate the logic used for
+  applying an update payload.
+  """
+
+  def __init__(self, payload, bsdiff_in_place=True, bspatch_path=None,
+               truncate_to_expected_size=True):
+    """Initialize the applier.
+
+    Args:
+      payload: the payload object to check
+      bsdiff_in_place: whether to perform BSDIFF operation in-place (optional)
+      bspatch_path: path to the bspatch binary (optional)
+      truncate_to_expected_size: whether to truncate the resulting partitions
+                                 to their expected sizes, as specified in the
+                                 payload (optional)
+    """
+    assert payload.is_init, 'uninitialized update payload'
+    self.payload = payload
+    self.block_size = payload.manifest.block_size
+    self.minor_version = payload.manifest.minor_version
+    self.bsdiff_in_place = bsdiff_in_place
+    self.bspatch_path = bspatch_path or 'bspatch'
+    self.truncate_to_expected_size = truncate_to_expected_size
+
+  def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
+    """Applies a REPLACE{,_BZ} operation.
+
+    Args:
+      op: the operation object
+      op_name: name string for error reporting
+      out_data: the data to be written
+      part_file: the partition file object
+      part_size: the size of the partition
+
+    Raises:
+      PayloadError if something goes wrong.
+    """
+    block_size = self.block_size
+    data_length = len(out_data)
+
+    # Decompress data if needed.
+    if op.type == common.OpType.REPLACE_BZ:
+      out_data = bz2.decompress(out_data)
+      data_length = len(out_data)
+
+    # Write data to blocks specified in dst extents.
+    data_start = 0
+    for ex, ex_name in common.ExtentIter(op.dst_extents,
+                                         '%s.dst_extents' % op_name):
+      start_block = ex.start_block
+      num_blocks = ex.num_blocks
+      count = num_blocks * block_size
+
+      # Make sure it's not a fake (signature) operation.
+      if start_block != common.PSEUDO_EXTENT_MARKER:
+        data_end = data_start + count
+
+        # Make sure we're not running past partition boundary.
+        if (start_block + num_blocks) * block_size > part_size:
+          raise PayloadError(
+              '%s: extent (%s) exceeds partition size (%d)' %
+              (ex_name, common.FormatExtent(ex, block_size),
+               part_size))
+
+        # Make sure that we have enough data to write.
+        if data_end >= data_length + block_size:
+          raise PayloadError(
+              '%s: more dst blocks than data (even with padding)')
+
+        # Pad with zeros if necessary.
+        if data_end > data_length:
+          padding = data_end - data_length
+          out_data += '\0' * padding
+
+        self.payload.payload_file.seek(start_block * block_size)
+        part_file.seek(start_block * block_size)
+        part_file.write(out_data[data_start:data_end])
+
+      data_start += count
+
+    # Make sure we wrote all data.
+    if data_start < data_length:
+      raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
+                         (op_name, data_start, data_length))
+
+  def _ApplyMoveOperation(self, op, op_name, part_file):
+    """Applies a MOVE operation.
+
+    Note that this operation must read the whole block data from the input and
+    only then dump it, due to our in-place update semantics; otherwise, it
+    might clobber data midway through.
+
+    Args:
+      op: the operation object
+      op_name: name string for error reporting
+      part_file: the partition file object
+
+    Raises:
+      PayloadError if something goes wrong.
+    """
+    block_size = self.block_size
+
+    # Gather input raw data from src extents.
+    in_data = _ReadExtents(part_file, op.src_extents, block_size)
+
+    # Dump extracted data to dst extents.
+    _WriteExtents(part_file, in_data, op.dst_extents, block_size,
+                  '%s.dst_extents' % op_name)
+
+  def _ApplyBsdiffOperation(self, op, op_name, patch_data, new_part_file):
+    """Applies a BSDIFF operation.
+
+    Args:
+      op: the operation object
+      op_name: name string for error reporting
+      patch_data: the binary patch content
+      new_part_file: the target partition file object
+
+    Raises:
+      PayloadError if something goes wrong.
+    """
+    # Implemented using a SOURCE_BSDIFF operation with the source and target
+    # partition set to the new partition.
+    self._ApplySourceBsdiffOperation(op, op_name, patch_data, new_part_file,
+                                     new_part_file)
+
+  def _ApplySourceCopyOperation(self, op, op_name, old_part_file,
+                                new_part_file):
+    """Applies a SOURCE_COPY operation.
+
+    Args:
+      op: the operation object
+      op_name: name string for error reporting
+      old_part_file: the old partition file object
+      new_part_file: the new partition file object
+
+    Raises:
+      PayloadError if something goes wrong.
+    """
+    if not old_part_file:
+      raise PayloadError(
+          '%s: no source partition file provided for operation type (%d)' %
+          (op_name, op.type))
+
+    block_size = self.block_size
+
+    # Gather input raw data from src extents.
+    in_data = _ReadExtents(old_part_file, op.src_extents, block_size)
+
+    # Dump extracted data to dst extents.
+    _WriteExtents(new_part_file, in_data, op.dst_extents, block_size,
+                  '%s.dst_extents' % op_name)
+
+  def _ApplySourceBsdiffOperation(self, op, op_name, patch_data, old_part_file,
+                                  new_part_file):
+    """Applies a SOURCE_BSDIFF operation.
+
+    Args:
+      op: the operation object
+      op_name: name string for error reporting
+      patch_data: the binary patch content
+      old_part_file: the source partition file object
+      new_part_file: the target partition file object
+
+    Raises:
+      PayloadError if something goes wrong.
+    """
+    if not old_part_file:
+      raise PayloadError(
+          '%s: no source partition file provided for operation type (%d)' %
+          (op_name, op.type))
+
+    block_size = self.block_size
+
+    # Dump patch data to file.
+    with tempfile.NamedTemporaryFile(delete=False) as patch_file:
+      patch_file_name = patch_file.name
+      patch_file.write(patch_data)
+
+    if (hasattr(new_part_file, 'fileno') and
+        ((not old_part_file) or hasattr(old_part_file, 'fileno'))):
+      # Construct input and output extents argument for bspatch.
+      in_extents_arg, _, _ = _ExtentsToBspatchArg(
+          op.src_extents, block_size, '%s.src_extents' % op_name,
+          data_length=op.src_length)
+      out_extents_arg, pad_off, pad_len = _ExtentsToBspatchArg(
+          op.dst_extents, block_size, '%s.dst_extents' % op_name,
+          data_length=op.dst_length)
+
+      new_file_name = '/dev/fd/%d' % new_part_file.fileno()
+      # Diff from source partition.
+      old_file_name = '/dev/fd/%d' % old_part_file.fileno()
+
+      # Invoke bspatch on partition file with extents args.
+      bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name,
+                     patch_file_name, in_extents_arg, out_extents_arg]
+      subprocess.check_call(bspatch_cmd)
+
+      # Pad with zeros past the total output length.
+      if pad_len:
+        new_part_file.seek(pad_off)
+        new_part_file.write('\0' * pad_len)
+    else:
+      # Gather input raw data and write to a temp file.
+      input_part_file = old_part_file if old_part_file else new_part_file
+      in_data = _ReadExtents(input_part_file, op.src_extents, block_size,
+                             max_length=op.src_length)
+      with tempfile.NamedTemporaryFile(delete=False) as in_file:
+        in_file_name = in_file.name
+        in_file.write(in_data)
+
+      # Allocate temporary output file.
+      with tempfile.NamedTemporaryFile(delete=False) as out_file:
+        out_file_name = out_file.name
+
+      # Invoke bspatch.
+      bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name,
+                     patch_file_name]
+      subprocess.check_call(bspatch_cmd)
+
+      # Read output.
+      with open(out_file_name, 'rb') as out_file:
+        out_data = out_file.read()
+        if len(out_data) != op.dst_length:
+          raise PayloadError(
+              '%s: actual patched data length (%d) not as expected (%d)' %
+              (op_name, len(out_data), op.dst_length))
+
+      # Write output back to partition, with padding.
+      unaligned_out_len = len(out_data) % block_size
+      if unaligned_out_len:
+        out_data += '\0' * (block_size - unaligned_out_len)
+      _WriteExtents(new_part_file, out_data, op.dst_extents, block_size,
+                    '%s.dst_extents' % op_name)
+
+      # Delete input/output files.
+      os.remove(in_file_name)
+      os.remove(out_file_name)
+
+    # Delete patch file.
+    os.remove(patch_file_name)
+
+  def _ApplyOperations(self, operations, base_name, old_part_file,
+                       new_part_file, part_size):
+    """Applies a sequence of update operations to a partition.
+
+    This assumes an in-place update semantics for MOVE and BSDIFF, namely all
+    reads are performed first, then the data is processed and written back to
+    the same file.
+
+    Args:
+      operations: the sequence of operations
+      base_name: the name of the operation sequence
+      old_part_file: the old partition file object, open for reading/writing
+      new_part_file: the new partition file object, open for reading/writing
+      part_size: the partition size
+
+    Raises:
+      PayloadError if anything goes wrong while processing the payload.
+    """
+    for op, op_name in common.OperationIter(operations, base_name):
+      # Read data blob.
+      data = self.payload.ReadDataBlob(op.data_offset, op.data_length)
+
+      if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
+        self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size)
+      elif op.type == common.OpType.MOVE:
+        self._ApplyMoveOperation(op, op_name, new_part_file)
+      elif op.type == common.OpType.BSDIFF:
+        self._ApplyBsdiffOperation(op, op_name, data, new_part_file)
+      elif op.type == common.OpType.SOURCE_COPY:
+        self._ApplySourceCopyOperation(op, op_name, old_part_file,
+                                       new_part_file)
+      elif op.type == common.OpType.SOURCE_BSDIFF:
+        self._ApplySourceBsdiffOperation(op, op_name, data, old_part_file,
+                                         new_part_file)
+      else:
+        raise PayloadError('%s: unknown operation type (%d)' %
+                           (op_name, op.type))
+
+  def _ApplyToPartition(self, operations, part_name, base_name,
+                        new_part_file_name, new_part_info,
+                        old_part_file_name=None, old_part_info=None):
+    """Applies an update to a partition.
+
+    Args:
+      operations: the sequence of update operations to apply
+      part_name: the name of the partition, for error reporting
+      base_name: the name of the operation sequence
+      new_part_file_name: file name to write partition data to
+      new_part_info: size and expected hash of dest partition
+      old_part_file_name: file name of source partition (optional)
+      old_part_info: size and expected hash of source partition (optional)
+
+    Raises:
+      PayloadError if anything goes wrong with the update.
+    """
+    # Do we have a source partition?
+    if old_part_file_name:
+      # Verify the source partition.
+      with open(old_part_file_name, 'rb') as old_part_file:
+        _VerifySha256(old_part_file, old_part_info.hash,
+                      'old ' + part_name, length=old_part_info.size)
+      new_part_file_mode = 'r+b'
+      if self.minor_version == common.INPLACE_MINOR_PAYLOAD_VERSION:
+        # Copy the src partition to the dst one; make sure we don't truncate it.
+        shutil.copyfile(old_part_file_name, new_part_file_name)
+      elif (self.minor_version == common.SOURCE_MINOR_PAYLOAD_VERSION or
+            self.minor_version == common.OPSRCHASH_MINOR_PAYLOAD_VERSION):
+        # In minor version >= 2, we don't want to copy the partitions, so
+        # instead just make the new partition file.
+        open(new_part_file_name, 'w').close()
+      else:
+        raise PayloadError("Unknown minor version: %d" % self.minor_version)
+    else:
+      # We need to create/truncate the dst partition file.
+      new_part_file_mode = 'w+b'
+
+    # Apply operations.
+    with open(new_part_file_name, new_part_file_mode) as new_part_file:
+      old_part_file = (open(old_part_file_name, 'r+b')
+                       if old_part_file_name else None)
+      try:
+        self._ApplyOperations(operations, base_name, old_part_file,
+                              new_part_file, new_part_info.size)
+      finally:
+        if old_part_file:
+          old_part_file.close()
+
+      # Truncate the result, if so instructed.
+      if self.truncate_to_expected_size:
+        new_part_file.seek(0, 2)
+        if new_part_file.tell() > new_part_info.size:
+          new_part_file.seek(new_part_info.size)
+          new_part_file.truncate()
+
+    # Verify the resulting partition.
+    with open(new_part_file_name, 'rb') as new_part_file:
+      _VerifySha256(new_part_file, new_part_info.hash,
+                    'new ' + part_name, length=new_part_info.size)
+
+  def Run(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
+          old_rootfs_part=None):
+    """Applier entry point, invoking all update operations.
+
+    Args:
+      new_kernel_part: name of dest kernel partition file
+      new_rootfs_part: name of dest rootfs partition file
+      old_kernel_part: name of source kernel partition file (optional)
+      old_rootfs_part: name of source rootfs partition file (optional)
+
+    Raises:
+      PayloadError if payload application failed.
+    """
+    self.payload.ResetFile()
+
+    # Make sure the arguments are sane and match the payload.
+    if not (new_kernel_part and new_rootfs_part):
+      raise PayloadError('missing dst {kernel,rootfs} partitions')
+
+    if not (old_kernel_part or old_rootfs_part):
+      if not self.payload.IsFull():
+        raise PayloadError('trying to apply a non-full update without src '
+                           '{kernel,rootfs} partitions')
+    elif old_kernel_part and old_rootfs_part:
+      if not self.payload.IsDelta():
+        raise PayloadError('trying to apply a non-delta update onto src '
+                           '{kernel,rootfs} partitions')
+    else:
+      raise PayloadError('not all src partitions provided')
+
+    # Apply update to rootfs.
+    self._ApplyToPartition(
+        self.payload.manifest.install_operations, 'rootfs',
+        'install_operations', new_rootfs_part,
+        self.payload.manifest.new_rootfs_info, old_rootfs_part,
+        self.payload.manifest.old_rootfs_info)
+
+    # Apply update to kernel update.
+    self._ApplyToPartition(
+        self.payload.manifest.kernel_install_operations, 'kernel',
+        'kernel_install_operations', new_kernel_part,
+        self.payload.manifest.new_kernel_info, old_kernel_part,
+        self.payload.manifest.old_kernel_info)
diff --git a/scripts/update_payload/block_tracer.py b/scripts/update_payload/block_tracer.py
new file mode 100644
index 0000000..f222b21
--- /dev/null
+++ b/scripts/update_payload/block_tracer.py
@@ -0,0 +1,113 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tracing block data source through a Chrome OS update payload.
+
+This module is used internally by the main Payload class for tracing block
+content through an update payload. This is a useful feature in debugging
+payload applying functionality in this package. The interface for invoking the
+tracer is as follows:
+
+  tracer = PayloadBlockTracer(payload)
+  tracer.Run(...)
+
+"""
+
+from __future__ import print_function
+
+import common
+
+
+#
+# Payload block tracing.
+#
+class PayloadBlockTracer(object):
+  """Tracing the origin of block data through update instructions.
+
+  This is a short-lived object whose purpose is to isolate the logic used for
+  tracing the origin of destination partition blocks.
+
+  """
+
+  def __init__(self, payload):
+    assert payload.is_init, 'uninitialized update payload'
+    self.payload = payload
+
+  @staticmethod
+  def _TraceBlock(block, skip, trace_out_file, operations, base_name):
+    """Trace the origin of a given block through a sequence of operations.
+
+    This method tries to map the given dest block to the corresponding source
+    block from which its content originates in the course of an update. It
+    further tries to trace transitive origins through MOVE operations. It is
+    rather efficient, doing the actual tracing by means of a single reverse
+    sweep through the operation sequence. It dumps a log of operations and
+    source blocks responsible for the data in the given dest block to the
+    provided output file.
+
+    Args:
+      block: the block number to trace
+      skip: number of initial transitive origins to ignore
+      trace_out_file: a file object to dump the trace to
+      operations: the sequence of operations
+      base_name: name of the operation sequence
+    """
+    # Traverse operations backwards.
+    for op, op_name in common.OperationIter(operations, base_name,
+                                            reverse=True):
+      total_block_offset = 0
+      found = False
+
+      # Is the traced block mentioned in the dest extents?
+      for dst_ex, dst_ex_name in common.ExtentIter(op.dst_extents,
+                                                   op_name + '.dst_extents'):
+        if (block >= dst_ex.start_block
+            and block < dst_ex.start_block + dst_ex.num_blocks):
+          if skip:
+            skip -= 1
+          else:
+            total_block_offset += block - dst_ex.start_block
+            trace_out_file.write(
+                '%d: %s: found %s (total block offset: %d)\n' %
+                (block, dst_ex_name, common.FormatExtent(dst_ex),
+                 total_block_offset))
+            found = True
+            break
+
+        total_block_offset += dst_ex.num_blocks
+
+      if found:
+        # Don't trace further, unless it's a MOVE.
+        if op.type != common.OpType.MOVE:
+          break
+
+        # For MOVE, find corresponding source block and keep tracing.
+        for src_ex, src_ex_name in common.ExtentIter(op.src_extents,
+                                                     op_name + '.src_extents'):
+          if total_block_offset < src_ex.num_blocks:
+            block = src_ex.start_block + total_block_offset
+            trace_out_file.write(
+                '%s:  mapped to %s (%d)\n' %
+                (src_ex_name, common.FormatExtent(src_ex), block))
+            break
+
+          total_block_offset -= src_ex.num_blocks
+
+  def Run(self, block, skip, trace_out_file, is_kernel):
+    """Block tracer entry point, invoking the actual search.
+
+    Args:
+      block: the block number whose origin to trace
+      skip: the number of first origin mappings to skip
+      trace_out_file: file object to dump the trace to
+      is_kernel: trace through kernel (True) or rootfs (False) operations
+    """
+    if is_kernel:
+      operations = self.payload.manifest.kernel_install_operations
+      base_name = 'kernel_install_operations'
+    else:
+      operations = self.payload.manifest.install_operations
+      base_name = 'install_operations'
+
+    self._TraceBlock(block, skip, trace_out_file, operations, base_name)
diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py
new file mode 100644
index 0000000..7abf178
--- /dev/null
+++ b/scripts/update_payload/checker.py
@@ -0,0 +1,1270 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Verifying the integrity of a Chrome OS update payload.
+
+This module is used internally by the main Payload class for verifying the
+integrity of an update payload. The interface for invoking the checks is as
+follows:
+
+  checker = PayloadChecker(payload)
+  checker.Run(...)
+"""
+
+from __future__ import print_function
+
+import array
+import base64
+import hashlib
+import itertools
+import os
+import subprocess
+
+import common
+import error
+import format_utils
+import histogram
+import update_metadata_pb2
+
+
+#
+# Constants.
+#
+
+_CHECK_DST_PSEUDO_EXTENTS = 'dst-pseudo-extents'
+_CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block'
+_CHECK_PAYLOAD_SIG = 'payload-sig'
+CHECKS_TO_DISABLE = (
+    _CHECK_DST_PSEUDO_EXTENTS,
+    _CHECK_MOVE_SAME_SRC_DST_BLOCK,
+    _CHECK_PAYLOAD_SIG,
+)
+
+_TYPE_FULL = 'full'
+_TYPE_DELTA = 'delta'
+
+_DEFAULT_BLOCK_SIZE = 4096
+
+_DEFAULT_PUBKEY_BASE_NAME = 'update-payload-key.pub.pem'
+_DEFAULT_PUBKEY_FILE_NAME = os.path.join(os.path.dirname(__file__),
+                                         _DEFAULT_PUBKEY_BASE_NAME)
+
+# Supported minor version map to payload types allowed to be using them.
+_SUPPORTED_MINOR_VERSIONS = {
+    0: (_TYPE_FULL,),
+    1: (_TYPE_DELTA,),
+    2: (_TYPE_DELTA,),
+    3: (_TYPE_DELTA,),
+}
+
+_OLD_DELTA_USABLE_PART_SIZE = 2 * 1024 * 1024 * 1024
+
+#
+# Helper functions.
+#
+
+def _IsPowerOfTwo(val):
+  """Returns True iff val is a power of two."""
+  return val > 0 and (val & (val - 1)) == 0
+
+
+def _AddFormat(format_func, value):
+  """Adds a custom formatted representation to ordinary string representation.
+
+  Args:
+    format_func: A value formatter.
+    value: Value to be formatted and returned.
+
+  Returns:
+    A string 'x (y)' where x = str(value) and y = format_func(value).
+  """
+  ret = str(value)
+  formatted_str = format_func(value)
+  if formatted_str:
+    ret += ' (%s)' % formatted_str
+  return ret
+
+
+def _AddHumanReadableSize(size):
+  """Adds a human readable representation to a byte size value."""
+  return _AddFormat(format_utils.BytesToHumanReadable, size)
+
+
+#
+# Payload report generator.
+#
+
+class _PayloadReport(object):
+  """A payload report generator.
+
+  A report is essentially a sequence of nodes, which represent data points. It
+  is initialized to have a "global", untitled section. A node may be a
+  sub-report itself.
+  """
+
+  # Report nodes: Field, sub-report, section.
+  class Node(object):
+    """A report node interface."""
+
+    @staticmethod
+    def _Indent(indent, line):
+      """Indents a line by a given indentation amount.
+
+      Args:
+        indent: The indentation amount.
+        line: The line content (string).
+
+      Returns:
+        The properly indented line (string).
+      """
+      return '%*s%s' % (indent, '', line)
+
+    def GenerateLines(self, base_indent, sub_indent, curr_section):
+      """Generates the report lines for this node.
+
+      Args:
+        base_indent: Base indentation for each line.
+        sub_indent: Additional indentation for sub-nodes.
+        curr_section: The current report section object.
+
+      Returns:
+        A pair consisting of a list of properly indented report lines and a new
+        current section object.
+      """
+      raise NotImplementedError
+
+  class FieldNode(Node):
+    """A field report node, representing a (name, value) pair."""
+
+    def __init__(self, name, value, linebreak, indent):
+      super(_PayloadReport.FieldNode, self).__init__()
+      self.name = name
+      self.value = value
+      self.linebreak = linebreak
+      self.indent = indent
+
+    def GenerateLines(self, base_indent, sub_indent, curr_section):
+      """Generates a properly formatted 'name : value' entry."""
+      report_output = ''
+      if self.name:
+        report_output += self.name.ljust(curr_section.max_field_name_len) + ' :'
+      value_lines = str(self.value).splitlines()
+      if self.linebreak and self.name:
+        report_output += '\n' + '\n'.join(
+            ['%*s%s' % (self.indent, '', line) for line in value_lines])
+      else:
+        if self.name:
+          report_output += ' '
+        report_output += '%*s' % (self.indent, '')
+        cont_line_indent = len(report_output)
+        indented_value_lines = [value_lines[0]]
+        indented_value_lines.extend(['%*s%s' % (cont_line_indent, '', line)
+                                     for line in value_lines[1:]])
+        report_output += '\n'.join(indented_value_lines)
+
+      report_lines = [self._Indent(base_indent, line + '\n')
+                      for line in report_output.split('\n')]
+      return report_lines, curr_section
+
+  class SubReportNode(Node):
+    """A sub-report node, representing a nested report."""
+
+    def __init__(self, title, report):
+      super(_PayloadReport.SubReportNode, self).__init__()
+      self.title = title
+      self.report = report
+
+    def GenerateLines(self, base_indent, sub_indent, curr_section):
+      """Recurse with indentation."""
+      report_lines = [self._Indent(base_indent, self.title + ' =>\n')]
+      report_lines.extend(self.report.GenerateLines(base_indent + sub_indent,
+                                                    sub_indent))
+      return report_lines, curr_section
+
+  class SectionNode(Node):
+    """A section header node."""
+
+    def __init__(self, title=None):
+      super(_PayloadReport.SectionNode, self).__init__()
+      self.title = title
+      self.max_field_name_len = 0
+
+    def GenerateLines(self, base_indent, sub_indent, curr_section):
+      """Dump a title line, return self as the (new) current section."""
+      report_lines = []
+      if self.title:
+        report_lines.append(self._Indent(base_indent,
+                                         '=== %s ===\n' % self.title))
+      return report_lines, self
+
+  def __init__(self):
+    self.report = []
+    self.last_section = self.global_section = self.SectionNode()
+    self.is_finalized = False
+
+  def GenerateLines(self, base_indent, sub_indent):
+    """Generates the lines in the report, properly indented.
+
+    Args:
+      base_indent: The indentation used for root-level report lines.
+      sub_indent: The indentation offset used for sub-reports.
+
+    Returns:
+      A list of indented report lines.
+    """
+    report_lines = []
+    curr_section = self.global_section
+    for node in self.report:
+      node_report_lines, curr_section = node.GenerateLines(
+          base_indent, sub_indent, curr_section)
+      report_lines.extend(node_report_lines)
+
+    return report_lines
+
+  def Dump(self, out_file, base_indent=0, sub_indent=2):
+    """Dumps the report to a file.
+
+    Args:
+      out_file: File object to output the content to.
+      base_indent: Base indentation for report lines.
+      sub_indent: Added indentation for sub-reports.
+    """
+    report_lines = self.GenerateLines(base_indent, sub_indent)
+    if report_lines and not self.is_finalized:
+      report_lines.append('(incomplete report)\n')
+
+    for line in report_lines:
+      out_file.write(line)
+
+  def AddField(self, name, value, linebreak=False, indent=0):
+    """Adds a field/value pair to the payload report.
+
+    Args:
+      name: The field's name.
+      value: The field's value.
+      linebreak: Whether the value should be printed on a new line.
+      indent: Amount of extra indent for each line of the value.
+    """
+    assert not self.is_finalized
+    if name and self.last_section.max_field_name_len < len(name):
+      self.last_section.max_field_name_len = len(name)
+    self.report.append(self.FieldNode(name, value, linebreak, indent))
+
+  def AddSubReport(self, title):
+    """Adds and returns a sub-report with a title."""
+    assert not self.is_finalized
+    sub_report = self.SubReportNode(title, type(self)())
+    self.report.append(sub_report)
+    return sub_report.report
+
+  def AddSection(self, title):
+    """Adds a new section title."""
+    assert not self.is_finalized
+    self.last_section = self.SectionNode(title)
+    self.report.append(self.last_section)
+
+  def Finalize(self):
+    """Seals the report, marking it as complete."""
+    self.is_finalized = True
+
+
+#
+# Payload verification.
+#
+
+class PayloadChecker(object):
+  """Checking the integrity of an update payload.
+
+  This is a short-lived object whose purpose is to isolate the logic used for
+  verifying the integrity of an update payload.
+  """
+
+  def __init__(self, payload, assert_type=None, block_size=0,
+               allow_unhashed=False, disabled_tests=()):
+    """Initialize the checker.
+
+    Args:
+      payload: The payload object to check.
+      assert_type: Assert that payload is either 'full' or 'delta' (optional).
+      block_size: Expected filesystem / payload block size (optional).
+      allow_unhashed: Allow operations with unhashed data blobs.
+      disabled_tests: Sequence of tests to disable.
+    """
+    if not payload.is_init:
+      raise ValueError('Uninitialized update payload.')
+
+    # Set checker configuration.
+    self.payload = payload
+    self.block_size = block_size if block_size else _DEFAULT_BLOCK_SIZE
+    if not _IsPowerOfTwo(self.block_size):
+      raise error.PayloadError(
+          'Expected block (%d) size is not a power of two.' % self.block_size)
+    if assert_type not in (None, _TYPE_FULL, _TYPE_DELTA):
+      raise error.PayloadError('Invalid assert_type value (%r).' %
+                               assert_type)
+    self.payload_type = assert_type
+    self.allow_unhashed = allow_unhashed
+
+    # Disable specific tests.
+    self.check_dst_pseudo_extents = (
+        _CHECK_DST_PSEUDO_EXTENTS not in disabled_tests)
+    self.check_move_same_src_dst_block = (
+        _CHECK_MOVE_SAME_SRC_DST_BLOCK not in disabled_tests)
+    self.check_payload_sig = _CHECK_PAYLOAD_SIG not in disabled_tests
+
+    # Reset state; these will be assigned when the manifest is checked.
+    self.sigs_offset = 0
+    self.sigs_size = 0
+    self.old_rootfs_fs_size = 0
+    self.old_kernel_fs_size = 0
+    self.new_rootfs_fs_size = 0
+    self.new_kernel_fs_size = 0
+    self.minor_version = None
+
+  @staticmethod
+  def _CheckElem(msg, name, report, is_mandatory, is_submsg, convert=str,
+                 msg_name=None, linebreak=False, indent=0):
+    """Adds an element from a protobuf message to the payload report.
+
+    Checks to see whether a message contains a given element, and if so adds
+    the element value to the provided report. A missing mandatory element
+    causes an exception to be raised.
+
+    Args:
+      msg: The message containing the element.
+      name: The name of the element.
+      report: A report object to add the element name/value to.
+      is_mandatory: Whether or not this element must be present.
+      is_submsg: Whether this element is itself a message.
+      convert: A function for converting the element value for reporting.
+      msg_name: The name of the message object (for error reporting).
+      linebreak: Whether the value report should induce a line break.
+      indent: Amount of indent used for reporting the value.
+
+    Returns:
+      A pair consisting of the element value and the generated sub-report for
+      it (if the element is a sub-message, None otherwise). If the element is
+      missing, returns (None, None).
+
+    Raises:
+      error.PayloadError if a mandatory element is missing.
+    """
+    if not msg.HasField(name):
+      if is_mandatory:
+        raise error.PayloadError('%smissing mandatory %s %r.' %
+                                 (msg_name + ' ' if msg_name else '',
+                                  'sub-message' if is_submsg else 'field',
+                                  name))
+      return None, None
+
+    value = getattr(msg, name)
+    if is_submsg:
+      return value, report and report.AddSubReport(name)
+    else:
+      if report:
+        report.AddField(name, convert(value), linebreak=linebreak,
+                        indent=indent)
+      return value, None
+
+  @staticmethod
+  def _CheckMandatoryField(msg, field_name, report, msg_name, convert=str,
+                           linebreak=False, indent=0):
+    """Adds a mandatory field; returning first component from _CheckElem."""
+    return PayloadChecker._CheckElem(msg, field_name, report, True, False,
+                                     convert=convert, msg_name=msg_name,
+                                     linebreak=linebreak, indent=indent)[0]
+
+  @staticmethod
+  def _CheckOptionalField(msg, field_name, report, convert=str,
+                          linebreak=False, indent=0):
+    """Adds an optional field; returning first component from _CheckElem."""
+    return PayloadChecker._CheckElem(msg, field_name, report, False, False,
+                                     convert=convert, linebreak=linebreak,
+                                     indent=indent)[0]
+
+  @staticmethod
+  def _CheckMandatorySubMsg(msg, submsg_name, report, msg_name):
+    """Adds a mandatory sub-message; wrapper for _CheckElem."""
+    return PayloadChecker._CheckElem(msg, submsg_name, report, True, True,
+                                     msg_name)
+
+  @staticmethod
+  def _CheckOptionalSubMsg(msg, submsg_name, report):
+    """Adds an optional sub-message; wrapper for _CheckElem."""
+    return PayloadChecker._CheckElem(msg, submsg_name, report, False, True)
+
+  @staticmethod
+  def _CheckPresentIff(val1, val2, name1, name2, obj_name):
+    """Checks that val1 is None iff val2 is None.
+
+    Args:
+      val1: first value to be compared.
+      val2: second value to be compared.
+      name1: name of object holding the first value.
+      name2: name of object holding the second value.
+      obj_name: Name of the object containing these values.
+
+    Raises:
+      error.PayloadError if assertion does not hold.
+    """
+    if None in (val1, val2) and val1 is not val2:
+      present, missing = (name1, name2) if val2 is None else (name2, name1)
+      raise error.PayloadError('%r present without %r%s.' %
+                               (present, missing,
+                                ' in ' + obj_name if obj_name else ''))
+
+  @staticmethod
+  def _Run(cmd, send_data=None):
+    """Runs a subprocess, returns its output.
+
+    Args:
+      cmd: Sequence of command-line argument for invoking the subprocess.
+      send_data: Data to feed to the process via its stdin.
+
+    Returns:
+      A tuple containing the stdout and stderr output of the process.
+    """
+    run_process = subprocess.Popen(cmd, stdin=subprocess.PIPE,
+                                   stdout=subprocess.PIPE)
+    try:
+      result = run_process.communicate(input=send_data)
+    finally:
+      exit_code = run_process.wait()
+
+    if exit_code:
+      raise RuntimeError('Subprocess %r failed with code %r.' %
+                         (cmd, exit_code))
+
+    return result
+
+  @staticmethod
+  def _CheckSha256Signature(sig_data, pubkey_file_name, actual_hash, sig_name):
+    """Verifies an actual hash against a signed one.
+
+    Args:
+      sig_data: The raw signature data.
+      pubkey_file_name: Public key used for verifying signature.
+      actual_hash: The actual hash digest.
+      sig_name: Signature name for error reporting.
+
+    Raises:
+      error.PayloadError if signature could not be verified.
+    """
+    if len(sig_data) != 256:
+      raise error.PayloadError(
+          '%s: signature size (%d) not as expected (256).' %
+          (sig_name, len(sig_data)))
+    signed_data, _ = PayloadChecker._Run(
+        ['openssl', 'rsautl', '-verify', '-pubin', '-inkey', pubkey_file_name],
+        send_data=sig_data)
+
+    if len(signed_data) != len(common.SIG_ASN1_HEADER) + 32:
+      raise error.PayloadError('%s: unexpected signed data length (%d).' %
+                               (sig_name, len(signed_data)))
+
+    if not signed_data.startswith(common.SIG_ASN1_HEADER):
+      raise error.PayloadError('%s: not containing standard ASN.1 prefix.' %
+                               sig_name)
+
+    signed_hash = signed_data[len(common.SIG_ASN1_HEADER):]
+    if signed_hash != actual_hash:
+      raise error.PayloadError(
+          '%s: signed hash (%s) different from actual (%s).' %
+          (sig_name, common.FormatSha256(signed_hash),
+           common.FormatSha256(actual_hash)))
+
+  @staticmethod
+  def _CheckBlocksFitLength(length, num_blocks, block_size, length_name,
+                            block_name=None):
+    """Checks that a given length fits given block space.
+
+    This ensures that the number of blocks allocated is appropriate for the
+    length of the data residing in these blocks.
+
+    Args:
+      length: The actual length of the data.
+      num_blocks: The number of blocks allocated for it.
+      block_size: The size of each block in bytes.
+      length_name: Name of length (used for error reporting).
+      block_name: Name of block (used for error reporting).
+
+    Raises:
+      error.PayloadError if the aforementioned invariant is not satisfied.
+    """
+    # Check: length <= num_blocks * block_size.
+    if length > num_blocks * block_size:
+      raise error.PayloadError(
+          '%s (%d) > num %sblocks (%d) * block_size (%d).' %
+          (length_name, length, block_name or '', num_blocks, block_size))
+
+    # Check: length > (num_blocks - 1) * block_size.
+    if length <= (num_blocks - 1) * block_size:
+      raise error.PayloadError(
+          '%s (%d) <= (num %sblocks - 1 (%d)) * block_size (%d).' %
+          (length_name, length, block_name or '', num_blocks - 1, block_size))
+
+  def _CheckManifestMinorVersion(self, report):
+    """Checks the payload manifest minor_version field.
+
+    Args:
+      report: The report object to add to.
+
+    Raises:
+      error.PayloadError if any of the checks fail.
+    """
+    self.minor_version = self._CheckOptionalField(self.payload.manifest,
+                                                  'minor_version', report)
+    if self.minor_version in _SUPPORTED_MINOR_VERSIONS:
+      if self.payload_type not in _SUPPORTED_MINOR_VERSIONS[self.minor_version]:
+        raise error.PayloadError(
+            'Minor version %d not compatible with payload type %s.' %
+            (self.minor_version, self.payload_type))
+    elif self.minor_version is None:
+      raise error.PayloadError('Minor version is not set.')
+    else:
+      raise error.PayloadError('Unsupported minor version: %d' %
+                               self.minor_version)
+
+  def _CheckManifest(self, report, rootfs_part_size=0, kernel_part_size=0):
+    """Checks the payload manifest.
+
+    Args:
+      report: A report object to add to.
+      rootfs_part_size: Size of the rootfs partition in bytes.
+      kernel_part_size: Size of the kernel partition in bytes.
+
+    Returns:
+      A tuple consisting of the partition block size used during the update
+      (integer), the signatures block offset and size.
+
+    Raises:
+      error.PayloadError if any of the checks fail.
+    """
+    manifest = self.payload.manifest
+    report.AddSection('manifest')
+
+    # Check: block_size must exist and match the expected value.
+    actual_block_size = self._CheckMandatoryField(manifest, 'block_size',
+                                                  report, 'manifest')
+    if actual_block_size != self.block_size:
+      raise error.PayloadError('Block_size (%d) not as expected (%d).' %
+                               (actual_block_size, self.block_size))
+
+    # Check: signatures_offset <==> signatures_size.
+    self.sigs_offset = self._CheckOptionalField(manifest, 'signatures_offset',
+                                                report)
+    self.sigs_size = self._CheckOptionalField(manifest, 'signatures_size',
+                                              report)
+    self._CheckPresentIff(self.sigs_offset, self.sigs_size,
+                          'signatures_offset', 'signatures_size', 'manifest')
+
+    # Check: old_kernel_info <==> old_rootfs_info.
+    oki_msg, oki_report = self._CheckOptionalSubMsg(manifest,
+                                                    'old_kernel_info', report)
+    ori_msg, ori_report = self._CheckOptionalSubMsg(manifest,
+                                                    'old_rootfs_info', report)
+    self._CheckPresentIff(oki_msg, ori_msg, 'old_kernel_info',
+                          'old_rootfs_info', 'manifest')
+    if oki_msg:  # equivalently, ori_msg
+      # Assert/mark delta payload.
+      if self.payload_type == _TYPE_FULL:
+        raise error.PayloadError(
+            'Apparent full payload contains old_{kernel,rootfs}_info.')
+      self.payload_type = _TYPE_DELTA
+
+      # Check: {size, hash} present in old_{kernel,rootfs}_info.
+      self.old_kernel_fs_size = self._CheckMandatoryField(
+          oki_msg, 'size', oki_report, 'old_kernel_info')
+      self._CheckMandatoryField(oki_msg, 'hash', oki_report, 'old_kernel_info',
+                                convert=common.FormatSha256)
+      self.old_rootfs_fs_size = self._CheckMandatoryField(
+          ori_msg, 'size', ori_report, 'old_rootfs_info')
+      self._CheckMandatoryField(ori_msg, 'hash', ori_report, 'old_rootfs_info',
+                                convert=common.FormatSha256)
+
+      # Check: old_{kernel,rootfs} size must fit in respective partition.
+      if kernel_part_size and self.old_kernel_fs_size > kernel_part_size:
+        raise error.PayloadError(
+            'Old kernel content (%d) exceed partition size (%d).' %
+            (self.old_kernel_fs_size, kernel_part_size))
+      if rootfs_part_size and self.old_rootfs_fs_size > rootfs_part_size:
+        raise error.PayloadError(
+            'Old rootfs content (%d) exceed partition size (%d).' %
+            (self.old_rootfs_fs_size, rootfs_part_size))
+    else:
+      # Assert/mark full payload.
+      if self.payload_type == _TYPE_DELTA:
+        raise error.PayloadError(
+            'Apparent delta payload missing old_{kernel,rootfs}_info.')
+      self.payload_type = _TYPE_FULL
+
+    # Check: new_kernel_info present; contains {size, hash}.
+    nki_msg, nki_report = self._CheckMandatorySubMsg(
+        manifest, 'new_kernel_info', report, 'manifest')
+    self.new_kernel_fs_size = self._CheckMandatoryField(
+        nki_msg, 'size', nki_report, 'new_kernel_info')
+    self._CheckMandatoryField(nki_msg, 'hash', nki_report, 'new_kernel_info',
+                              convert=common.FormatSha256)
+
+    # Check: new_rootfs_info present; contains {size, hash}.
+    nri_msg, nri_report = self._CheckMandatorySubMsg(
+        manifest, 'new_rootfs_info', report, 'manifest')
+    self.new_rootfs_fs_size = self._CheckMandatoryField(
+        nri_msg, 'size', nri_report, 'new_rootfs_info')
+    self._CheckMandatoryField(nri_msg, 'hash', nri_report, 'new_rootfs_info',
+                              convert=common.FormatSha256)
+
+    # Check: new_{kernel,rootfs} size must fit in respective partition.
+    if kernel_part_size and self.new_kernel_fs_size > kernel_part_size:
+      raise error.PayloadError(
+          'New kernel content (%d) exceed partition size (%d).' %
+          (self.new_kernel_fs_size, kernel_part_size))
+    if rootfs_part_size and self.new_rootfs_fs_size > rootfs_part_size:
+      raise error.PayloadError(
+          'New rootfs content (%d) exceed partition size (%d).' %
+          (self.new_rootfs_fs_size, rootfs_part_size))
+
+    # Check: minor_version makes sense for the payload type. This check should
+    # run after the payload type has been set.
+    self._CheckManifestMinorVersion(report)
+
+  def _CheckLength(self, length, total_blocks, op_name, length_name):
+    """Checks whether a length matches the space designated in extents.
+
+    Args:
+      length: The total length of the data.
+      total_blocks: The total number of blocks in extents.
+      op_name: Operation name (for error reporting).
+      length_name: Length name (for error reporting).
+
+    Raises:
+      error.PayloadError is there a problem with the length.
+    """
+    # Check: length is non-zero.
+    if length == 0:
+      raise error.PayloadError('%s: %s is zero.' % (op_name, length_name))
+
+    # Check that length matches number of blocks.
+    self._CheckBlocksFitLength(length, total_blocks, self.block_size,
+                               '%s: %s' % (op_name, length_name))
+
+  def _CheckExtents(self, extents, usable_size, block_counters, name,
+                    allow_pseudo=False, allow_signature=False):
+    """Checks a sequence of extents.
+
+    Args:
+      extents: The sequence of extents to check.
+      usable_size: The usable size of the partition to which the extents apply.
+      block_counters: Array of counters corresponding to the number of blocks.
+      name: The name of the extent block.
+      allow_pseudo: Whether or not pseudo block numbers are allowed.
+      allow_signature: Whether or not the extents are used for a signature.
+
+    Returns:
+      The total number of blocks in the extents.
+
+    Raises:
+      error.PayloadError if any of the entailed checks fails.
+    """
+    total_num_blocks = 0
+    for ex, ex_name in common.ExtentIter(extents, name):
+      # Check: Mandatory fields.
+      start_block = PayloadChecker._CheckMandatoryField(ex, 'start_block',
+                                                        None, ex_name)
+      num_blocks = PayloadChecker._CheckMandatoryField(ex, 'num_blocks', None,
+                                                       ex_name)
+      end_block = start_block + num_blocks
+
+      # Check: num_blocks > 0.
+      if num_blocks == 0:
+        raise error.PayloadError('%s: extent length is zero.' % ex_name)
+
+      if start_block != common.PSEUDO_EXTENT_MARKER:
+        # Check: Make sure we're within the partition limit.
+        if usable_size and end_block * self.block_size > usable_size:
+          raise error.PayloadError(
+              '%s: extent (%s) exceeds usable partition size (%d).' %
+              (ex_name, common.FormatExtent(ex, self.block_size), usable_size))
+
+        # Record block usage.
+        for i in xrange(start_block, end_block):
+          block_counters[i] += 1
+      elif not (allow_pseudo or (allow_signature and len(extents) == 1)):
+        # Pseudo-extents must be allowed explicitly, or otherwise be part of a
+        # signature operation (in which case there has to be exactly one).
+        raise error.PayloadError('%s: unexpected pseudo-extent.' % ex_name)
+
+      total_num_blocks += num_blocks
+
+    return total_num_blocks
+
+  def _CheckReplaceOperation(self, op, data_length, total_dst_blocks, op_name):
+    """Specific checks for REPLACE/REPLACE_BZ operations.
+
+    Args:
+      op: The operation object from the manifest.
+      data_length: The length of the data blob associated with the operation.
+      total_dst_blocks: Total number of blocks in dst_extents.
+      op_name: Operation name for error reporting.
+
+    Raises:
+      error.PayloadError if any check fails.
+    """
+    # Check: Does not contain src extents.
+    if op.src_extents:
+      raise error.PayloadError('%s: contains src_extents.' % op_name)
+
+    # Check: Contains data.
+    if data_length is None:
+      raise error.PayloadError('%s: missing data_{offset,length}.' % op_name)
+
+    if op.type == common.OpType.REPLACE:
+      PayloadChecker._CheckBlocksFitLength(data_length, total_dst_blocks,
+                                           self.block_size,
+                                           op_name + '.data_length', 'dst')
+    else:
+      # Check: data_length must be smaller than the alotted dst blocks.
+      if data_length >= total_dst_blocks * self.block_size:
+        raise error.PayloadError(
+            '%s: data_length (%d) must be less than allotted dst block '
+            'space (%d * %d).' %
+            (op_name, data_length, total_dst_blocks, self.block_size))
+
+  def _CheckMoveOperation(self, op, data_offset, total_src_blocks,
+                          total_dst_blocks, op_name):
+    """Specific checks for MOVE operations.
+
+    Args:
+      op: The operation object from the manifest.
+      data_offset: The offset of a data blob for the operation.
+      total_src_blocks: Total number of blocks in src_extents.
+      total_dst_blocks: Total number of blocks in dst_extents.
+      op_name: Operation name for error reporting.
+
+    Raises:
+      error.PayloadError if any check fails.
+    """
+    # Check: No data_{offset,length}.
+    if data_offset is not None:
+      raise error.PayloadError('%s: contains data_{offset,length}.' % op_name)
+
+    # Check: total_src_blocks == total_dst_blocks.
+    if total_src_blocks != total_dst_blocks:
+      raise error.PayloadError(
+          '%s: total src blocks (%d) != total dst blocks (%d).' %
+          (op_name, total_src_blocks, total_dst_blocks))
+
+    # Check: For all i, i-th src block index != i-th dst block index.
+    i = 0
+    src_extent_iter = iter(op.src_extents)
+    dst_extent_iter = iter(op.dst_extents)
+    src_extent = dst_extent = None
+    src_idx = src_num = dst_idx = dst_num = 0
+    while i < total_src_blocks:
+      # Get the next source extent, if needed.
+      if not src_extent:
+        try:
+          src_extent = src_extent_iter.next()
+        except StopIteration:
+          raise error.PayloadError('%s: ran out of src extents (%d/%d).' %
+                                   (op_name, i, total_src_blocks))
+        src_idx = src_extent.start_block
+        src_num = src_extent.num_blocks
+
+      # Get the next dest extent, if needed.
+      if not dst_extent:
+        try:
+          dst_extent = dst_extent_iter.next()
+        except StopIteration:
+          raise error.PayloadError('%s: ran out of dst extents (%d/%d).' %
+                                   (op_name, i, total_dst_blocks))
+        dst_idx = dst_extent.start_block
+        dst_num = dst_extent.num_blocks
+
+      # Check: start block is not 0. See crbug/480751; there are still versions
+      # of update_engine which fail when seeking to 0 in PReadAll and PWriteAll,
+      # so we need to fail payloads that try to MOVE to/from block 0.
+      if src_idx == 0 or dst_idx == 0:
+        raise error.PayloadError(
+            '%s: MOVE operation cannot have extent with start block 0' %
+            op_name)
+
+      if self.check_move_same_src_dst_block and src_idx == dst_idx:
+        raise error.PayloadError(
+            '%s: src/dst block number %d is the same (%d).' %
+            (op_name, i, src_idx))
+
+      advance = min(src_num, dst_num)
+      i += advance
+
+      src_idx += advance
+      src_num -= advance
+      if src_num == 0:
+        src_extent = None
+
+      dst_idx += advance
+      dst_num -= advance
+      if dst_num == 0:
+        dst_extent = None
+
+    # Make sure we've exhausted all src/dst extents.
+    if src_extent:
+      raise error.PayloadError('%s: excess src blocks.' % op_name)
+    if dst_extent:
+      raise error.PayloadError('%s: excess dst blocks.' % op_name)
+
+  def _CheckBsdiffOperation(self, data_length, total_dst_blocks, op_name):
+    """Specific checks for BSDIFF and SOURCE_BSDIFF operations.
+
+    Args:
+      data_length: The length of the data blob associated with the operation.
+      total_dst_blocks: Total number of blocks in dst_extents.
+      op_name: Operation name for error reporting.
+
+    Raises:
+      error.PayloadError if any check fails.
+    """
+    # Check: data_{offset,length} present.
+    if data_length is None:
+      raise error.PayloadError('%s: missing data_{offset,length}.' % op_name)
+
+    # Check: data_length is strictly smaller than the alotted dst blocks.
+    if data_length >= total_dst_blocks * self.block_size:
+      raise error.PayloadError(
+          '%s: data_length (%d) must be smaller than allotted dst space '
+          '(%d * %d = %d).' %
+          (op_name, data_length, total_dst_blocks, self.block_size,
+           total_dst_blocks * self.block_size))
+
+  def _CheckSourceCopyOperation(self, data_offset, total_src_blocks,
+                                total_dst_blocks, op_name):
+    """Specific checks for SOURCE_COPY.
+
+    Args:
+      data_offset: The offset of a data blob for the operation.
+      total_src_blocks: Total number of blocks in src_extents.
+      total_dst_blocks: Total number of blocks in dst_extents.
+      op_name: Operation name for error reporting.
+
+    Raises:
+      error.PayloadError if any check fails.
+    """
+    # Check: No data_{offset,length}.
+    if data_offset is not None:
+      raise error.PayloadError('%s: contains data_{offset,length}.' % op_name)
+
+    # Check: total_src_blocks == total_dst_blocks.
+    if total_src_blocks != total_dst_blocks:
+      raise error.PayloadError(
+          '%s: total src blocks (%d) != total dst blocks (%d).' %
+          (op_name, total_src_blocks, total_dst_blocks))
+
+  def _CheckAnySourceOperation(self, op, total_src_blocks, op_name):
+    """Specific checks for SOURCE_* operations.
+
+    Args:
+      op: The operation object from the manifest.
+      total_src_blocks: Total number of blocks in src_extents.
+      op_name: Operation name for error reporting.
+
+    Raises:
+      error.PayloadError if any check fails.
+    """
+    # Check: total_src_blocks != 0.
+    if total_src_blocks == 0:
+      raise error.PayloadError('%s: no src blocks in a source op.' % op_name)
+
+    # Check: src_sha256_hash present in minor version 3.
+    if self.minor_version == 3 and op.src_sha256_hash is None:
+      raise error.PayloadError('%s: source hash missing.' % op_name)
+
+  def _CheckOperation(self, op, op_name, is_last, old_block_counters,
+                      new_block_counters, old_usable_size, new_usable_size,
+                      prev_data_offset, allow_signature, blob_hash_counts):
+    """Checks a single update operation.
+
+    Args:
+      op: The operation object.
+      op_name: Operation name string for error reporting.
+      is_last: Whether this is the last operation in the sequence.
+      old_block_counters: Arrays of block read counters.
+      new_block_counters: Arrays of block write counters.
+      old_usable_size: The overall usable size for src data in bytes.
+      new_usable_size: The overall usable size for dst data in bytes.
+      prev_data_offset: Offset of last used data bytes.
+      allow_signature: Whether this may be a signature operation.
+      blob_hash_counts: Counters for hashed/unhashed blobs.
+
+    Returns:
+      The amount of data blob associated with the operation.
+
+    Raises:
+      error.PayloadError if any check has failed.
+    """
+    # Check extents.
+    total_src_blocks = self._CheckExtents(
+        op.src_extents, old_usable_size, old_block_counters,
+        op_name + '.src_extents', allow_pseudo=True)
+    allow_signature_in_extents = (allow_signature and is_last and
+                                  op.type == common.OpType.REPLACE)
+    total_dst_blocks = self._CheckExtents(
+        op.dst_extents, new_usable_size, new_block_counters,
+        op_name + '.dst_extents',
+        allow_pseudo=(not self.check_dst_pseudo_extents),
+        allow_signature=allow_signature_in_extents)
+
+    # Check: data_offset present <==> data_length present.
+    data_offset = self._CheckOptionalField(op, 'data_offset', None)
+    data_length = self._CheckOptionalField(op, 'data_length', None)
+    self._CheckPresentIff(data_offset, data_length, 'data_offset',
+                          'data_length', op_name)
+
+    # Check: At least one dst_extent.
+    if not op.dst_extents:
+      raise error.PayloadError('%s: dst_extents is empty.' % op_name)
+
+    # Check {src,dst}_length, if present.
+    if op.HasField('src_length'):
+      self._CheckLength(op.src_length, total_src_blocks, op_name, 'src_length')
+    if op.HasField('dst_length'):
+      self._CheckLength(op.dst_length, total_dst_blocks, op_name, 'dst_length')
+
+    if op.HasField('data_sha256_hash'):
+      blob_hash_counts['hashed'] += 1
+
+      # Check: Operation carries data.
+      if data_offset is None:
+        raise error.PayloadError(
+            '%s: data_sha256_hash present but no data_{offset,length}.' %
+            op_name)
+
+      # Check: Hash verifies correctly.
+      # pylint cannot find the method in hashlib, for some reason.
+      # pylint: disable=E1101
+      actual_hash = hashlib.sha256(self.payload.ReadDataBlob(data_offset,
+                                                             data_length))
+      if op.data_sha256_hash != actual_hash.digest():
+        raise error.PayloadError(
+            '%s: data_sha256_hash (%s) does not match actual hash (%s).' %
+            (op_name, common.FormatSha256(op.data_sha256_hash),
+             common.FormatSha256(actual_hash.digest())))
+    elif data_offset is not None:
+      if allow_signature_in_extents:
+        blob_hash_counts['signature'] += 1
+      elif self.allow_unhashed:
+        blob_hash_counts['unhashed'] += 1
+      else:
+        raise error.PayloadError('%s: unhashed operation not allowed.' %
+                                 op_name)
+
+    if data_offset is not None:
+      # Check: Contiguous use of data section.
+      if data_offset != prev_data_offset:
+        raise error.PayloadError(
+            '%s: data offset (%d) not matching amount used so far (%d).' %
+            (op_name, data_offset, prev_data_offset))
+
+    # Type-specific checks.
+    if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
+      self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
+    elif op.type == common.OpType.MOVE and self.minor_version == 1:
+      self._CheckMoveOperation(op, data_offset, total_src_blocks,
+                               total_dst_blocks, op_name)
+    elif op.type == common.OpType.BSDIFF and self.minor_version == 1:
+      self._CheckBsdiffOperation(data_length, total_dst_blocks, op_name)
+    elif op.type == common.OpType.SOURCE_COPY and self.minor_version in (2, 3):
+      self._CheckSourceCopyOperation(data_offset, total_src_blocks,
+                                     total_dst_blocks, op_name)
+      self._CheckAnySourceOperation(op, total_src_blocks, op_name)
+    elif (op.type == common.OpType.SOURCE_BSDIFF and
+          self.minor_version in (2, 3)):
+      self._CheckBsdiffOperation(data_length, total_dst_blocks, op_name)
+      self._CheckAnySourceOperation(op, total_src_blocks, op_name)
+    else:
+      raise error.PayloadError(
+          'Operation %s (type %d) not allowed in minor version %d' %
+          (op_name, op.type, self.minor_version))
+    return data_length if data_length is not None else 0
+
+  def _SizeToNumBlocks(self, size):
+    """Returns the number of blocks needed to contain a given byte size."""
+    return (size + self.block_size - 1) / self.block_size
+
+  def _AllocBlockCounters(self, total_size):
+    """Returns a freshly initialized array of block counters.
+
+    Note that the generated array is not portable as is due to byte-ordering
+    issues, hence it should not be serialized.
+
+    Args:
+      total_size: The total block size in bytes.
+
+    Returns:
+      An array of unsigned short elements initialized to zero, one for each of
+      the blocks necessary for containing the partition.
+    """
+    return array.array('H',
+                       itertools.repeat(0, self._SizeToNumBlocks(total_size)))
+
+  def _CheckOperations(self, operations, report, base_name, old_fs_size,
+                       new_fs_size, new_usable_size, prev_data_offset,
+                       allow_signature):
+    """Checks a sequence of update operations.
+
+    Args:
+      operations: The sequence of operations to check.
+      report: The report object to add to.
+      base_name: The name of the operation block.
+      old_fs_size: The old filesystem size in bytes.
+      new_fs_size: The new filesystem size in bytes.
+      new_usable_size: The overall usable size of the new partition in bytes.
+      prev_data_offset: Offset of last used data bytes.
+      allow_signature: Whether this sequence may contain signature operations.
+
+    Returns:
+      The total data blob size used.
+
+    Raises:
+      error.PayloadError if any of the checks fails.
+    """
+    # The total size of data blobs used by operations scanned thus far.
+    total_data_used = 0
+    # Counts of specific operation types.
+    op_counts = {
+        common.OpType.REPLACE: 0,
+        common.OpType.REPLACE_BZ: 0,
+        common.OpType.MOVE: 0,
+        common.OpType.BSDIFF: 0,
+        common.OpType.SOURCE_COPY: 0,
+        common.OpType.SOURCE_BSDIFF: 0,
+    }
+    # Total blob sizes for each operation type.
+    op_blob_totals = {
+        common.OpType.REPLACE: 0,
+        common.OpType.REPLACE_BZ: 0,
+        # MOVE operations don't have blobs.
+        common.OpType.BSDIFF: 0,
+        # SOURCE_COPY operations don't have blobs.
+        common.OpType.SOURCE_BSDIFF: 0,
+    }
+    # Counts of hashed vs unhashed operations.
+    blob_hash_counts = {
+        'hashed': 0,
+        'unhashed': 0,
+    }
+    if allow_signature:
+      blob_hash_counts['signature'] = 0
+
+    # Allocate old and new block counters.
+    old_block_counters = (self._AllocBlockCounters(new_usable_size)
+                          if old_fs_size else None)
+    new_block_counters = self._AllocBlockCounters(new_usable_size)
+
+    # Process and verify each operation.
+    op_num = 0
+    for op, op_name in common.OperationIter(operations, base_name):
+      op_num += 1
+
+      # Check: Type is valid.
+      if op.type not in op_counts.keys():
+        raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type))
+      op_counts[op.type] += 1
+
+      is_last = op_num == len(operations)
+      curr_data_used = self._CheckOperation(
+          op, op_name, is_last, old_block_counters, new_block_counters,
+          new_usable_size if old_fs_size else 0, new_usable_size,
+          prev_data_offset + total_data_used, allow_signature,
+          blob_hash_counts)
+      if curr_data_used:
+        op_blob_totals[op.type] += curr_data_used
+        total_data_used += curr_data_used
+
+    # Report totals and breakdown statistics.
+    report.AddField('total operations', op_num)
+    report.AddField(
+        None,
+        histogram.Histogram.FromCountDict(op_counts,
+                                          key_names=common.OpType.NAMES),
+        indent=1)
+    report.AddField('total blobs', sum(blob_hash_counts.values()))
+    report.AddField(None,
+                    histogram.Histogram.FromCountDict(blob_hash_counts),
+                    indent=1)
+    report.AddField('total blob size', _AddHumanReadableSize(total_data_used))
+    report.AddField(
+        None,
+        histogram.Histogram.FromCountDict(op_blob_totals,
+                                          formatter=_AddHumanReadableSize,
+                                          key_names=common.OpType.NAMES),
+        indent=1)
+
+    # Report read/write histograms.
+    if old_block_counters:
+      report.AddField('block read hist',
+                      histogram.Histogram.FromKeyList(old_block_counters),
+                      linebreak=True, indent=1)
+
+    new_write_hist = histogram.Histogram.FromKeyList(
+        new_block_counters[:self._SizeToNumBlocks(new_fs_size)])
+    report.AddField('block write hist', new_write_hist, linebreak=True,
+                    indent=1)
+
+    # Check: Full update must write each dst block once.
+    if self.payload_type == _TYPE_FULL and new_write_hist.GetKeys() != [1]:
+      raise error.PayloadError(
+          '%s: not all blocks written exactly once during full update.' %
+          base_name)
+
+    return total_data_used
+
+  def _CheckSignatures(self, report, pubkey_file_name):
+    """Checks a payload's signature block."""
+    sigs_raw = self.payload.ReadDataBlob(self.sigs_offset, self.sigs_size)
+    sigs = update_metadata_pb2.Signatures()
+    sigs.ParseFromString(sigs_raw)
+    report.AddSection('signatures')
+
+    # Check: At least one signature present.
+    # pylint cannot see through the protobuf object, it seems.
+    # pylint: disable=E1101
+    if not sigs.signatures:
+      raise error.PayloadError('Signature block is empty.')
+
+    last_ops_section = (self.payload.manifest.kernel_install_operations or
+                        self.payload.manifest.install_operations)
+    fake_sig_op = last_ops_section[-1]
+    # Check: signatures_{offset,size} must match the last (fake) operation.
+    if not (fake_sig_op.type == common.OpType.REPLACE and
+            self.sigs_offset == fake_sig_op.data_offset and
+            self.sigs_size == fake_sig_op.data_length):
+      raise error.PayloadError(
+          'Signatures_{offset,size} (%d+%d) does not match last operation '
+          '(%d+%d).' %
+          (self.sigs_offset, self.sigs_size, fake_sig_op.data_offset,
+           fake_sig_op.data_length))
+
+    # Compute the checksum of all data up to signature blob.
+    # TODO(garnold) we're re-reading the whole data section into a string
+    # just to compute the checksum; instead, we could do it incrementally as
+    # we read the blobs one-by-one, under the assumption that we're reading
+    # them in order (which currently holds). This should be reconsidered.
+    payload_hasher = self.payload.manifest_hasher.copy()
+    common.Read(self.payload.payload_file, self.sigs_offset,
+                offset=self.payload.data_offset, hasher=payload_hasher)
+
+    for sig, sig_name in common.SignatureIter(sigs.signatures, 'signatures'):
+      sig_report = report.AddSubReport(sig_name)
+
+      # Check: Signature contains mandatory fields.
+      self._CheckMandatoryField(sig, 'version', sig_report, sig_name)
+      self._CheckMandatoryField(sig, 'data', None, sig_name)
+      sig_report.AddField('data len', len(sig.data))
+
+      # Check: Signatures pertains to actual payload hash.
+      if sig.version == 1:
+        self._CheckSha256Signature(sig.data, pubkey_file_name,
+                                   payload_hasher.digest(), sig_name)
+      else:
+        raise error.PayloadError('Unknown signature version (%d).' %
+                                 sig.version)
+
+  def Run(self, pubkey_file_name=None, metadata_sig_file=None,
+          rootfs_part_size=0, kernel_part_size=0, report_out_file=None):
+    """Checker entry point, invoking all checks.
+
+    Args:
+      pubkey_file_name: Public key used for signature verification.
+      metadata_sig_file: Metadata signature, if verification is desired.
+      rootfs_part_size: The size of rootfs partitions in bytes (default: infer
+                        based on payload type and version).
+      kernel_part_size: The size of kernel partitions in bytes (default: use
+                        reported filesystem size).
+      report_out_file: File object to dump the report to.
+
+    Raises:
+      error.PayloadError if payload verification failed.
+    """
+    if not pubkey_file_name:
+      pubkey_file_name = _DEFAULT_PUBKEY_FILE_NAME
+
+    report = _PayloadReport()
+
+    # Get payload file size.
+    self.payload.payload_file.seek(0, 2)
+    payload_file_size = self.payload.payload_file.tell()
+    self.payload.ResetFile()
+
+    try:
+      # Check metadata signature (if provided).
+      if metadata_sig_file:
+        metadata_sig = base64.b64decode(metadata_sig_file.read())
+        self._CheckSha256Signature(metadata_sig, pubkey_file_name,
+                                   self.payload.manifest_hasher.digest(),
+                                   'metadata signature')
+
+      # Part 1: Check the file header.
+      report.AddSection('header')
+      # Check: Payload version is valid.
+      if self.payload.header.version != 1:
+        raise error.PayloadError('Unknown payload version (%d).' %
+                                 self.payload.header.version)
+      report.AddField('version', self.payload.header.version)
+      report.AddField('manifest len', self.payload.header.manifest_len)
+
+      # Part 2: Check the manifest.
+      self._CheckManifest(report, rootfs_part_size, kernel_part_size)
+      assert self.payload_type, 'payload type should be known by now'
+
+      # Infer the usable partition size when validating rootfs operations:
+      # - If rootfs partition size was provided, use that.
+      # - Otherwise, if this is an older delta (minor version < 2), stick with
+      #   a known constant size. This is necessary because older deltas may
+      #   exceed the filesystem size when moving data blocks around.
+      # - Otherwise, use the encoded filesystem size.
+      new_rootfs_usable_size = self.new_rootfs_fs_size
+      if rootfs_part_size:
+        new_rootfs_usable_size = rootfs_part_size
+      elif self.payload_type == _TYPE_DELTA and self.minor_version in (None, 1):
+        new_rootfs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
+
+      # Part 3: Examine rootfs operations.
+      # TODO(garnold)(chromium:243559) only default to the filesystem size if
+      # no explicit size provided *and* the partition size is not embedded in
+      # the payload; see issue for more details.
+      report.AddSection('rootfs operations')
+      total_blob_size = self._CheckOperations(
+          self.payload.manifest.install_operations, report,
+          'install_operations', self.old_rootfs_fs_size,
+          self.new_rootfs_fs_size, new_rootfs_usable_size, 0, False)
+
+      # Part 4: Examine kernel operations.
+      # TODO(garnold)(chromium:243559) as above.
+      report.AddSection('kernel operations')
+      total_blob_size += self._CheckOperations(
+          self.payload.manifest.kernel_install_operations, report,
+          'kernel_install_operations', self.old_kernel_fs_size,
+          self.new_kernel_fs_size,
+          kernel_part_size if kernel_part_size else self.new_kernel_fs_size,
+          total_blob_size, True)
+
+      # Check: Operations data reach the end of the payload file.
+      used_payload_size = self.payload.data_offset + total_blob_size
+      if used_payload_size != payload_file_size:
+        raise error.PayloadError(
+            'Used payload size (%d) different from actual file size (%d).' %
+            (used_payload_size, payload_file_size))
+
+      # Part 5: Handle payload signatures message.
+      if self.check_payload_sig and self.sigs_size:
+        self._CheckSignatures(report, pubkey_file_name)
+
+      # Part 6: Summary.
+      report.AddSection('summary')
+      report.AddField('update type', self.payload_type)
+
+      report.Finalize()
+    finally:
+      if report_out_file:
+        report.Dump(report_out_file)
diff --git a/scripts/update_payload/checker_unittest.py b/scripts/update_payload/checker_unittest.py
new file mode 100755
index 0000000..b05c728
--- /dev/null
+++ b/scripts/update_payload/checker_unittest.py
@@ -0,0 +1,1325 @@
+#!/usr/bin/python2
+#
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit testing checker.py."""
+
+from __future__ import print_function
+
+import array
+import collections
+import cStringIO
+import hashlib
+import itertools
+import os
+import unittest
+
+# pylint cannot find mox.
+# pylint: disable=F0401
+import mox
+
+import checker
+import common
+import payload as update_payload  # Avoid name conflicts later.
+import test_utils
+import update_metadata_pb2
+
+
+def _OpTypeByName(op_name):
+  op_name_to_type = {
+      'REPLACE': common.OpType.REPLACE,
+      'REPLACE_BZ': common.OpType.REPLACE_BZ,
+      'MOVE': common.OpType.MOVE,
+      'BSDIFF': common.OpType.BSDIFF,
+      'SOURCE_COPY': common.OpType.SOURCE_COPY,
+      'SOURCE_BSDIFF': common.OpType.SOURCE_BSDIFF,
+      'ZERO': common.OpType.ZERO,
+      'DISCARD': common.OpType.DISCARD,
+      'REPLACE_XZ': common.OpType.REPLACE_XZ,
+      'IMGDIFF': common.OpType.IMGDIFF,
+  }
+  return op_name_to_type[op_name]
+
+
+def _GetPayloadChecker(payload_gen_write_to_file_func, payload_gen_dargs=None,
+                       checker_init_dargs=None):
+  """Returns a payload checker from a given payload generator."""
+  if payload_gen_dargs is None:
+    payload_gen_dargs = {}
+  if checker_init_dargs is None:
+    checker_init_dargs = {}
+
+  payload_file = cStringIO.StringIO()
+  payload_gen_write_to_file_func(payload_file, **payload_gen_dargs)
+  payload_file.seek(0)
+  payload = update_payload.Payload(payload_file)
+  payload.Init()
+  return checker.PayloadChecker(payload, **checker_init_dargs)
+
+
+def _GetPayloadCheckerWithData(payload_gen):
+  """Returns a payload checker from a given payload generator."""
+  payload_file = cStringIO.StringIO()
+  payload_gen.WriteToFile(payload_file)
+  payload_file.seek(0)
+  payload = update_payload.Payload(payload_file)
+  payload.Init()
+  return checker.PayloadChecker(payload)
+
+
+# This class doesn't need an __init__().
+# pylint: disable=W0232
+# Unit testing is all about running protected methods.
+# pylint: disable=W0212
+# Don't bark about missing members of classes you cannot import.
+# pylint: disable=E1101
+class PayloadCheckerTest(mox.MoxTestBase):
+  """Tests the PayloadChecker class.
+
+  In addition to ordinary testFoo() methods, which are automatically invoked by
+  the unittest framework, in this class we make use of DoBarTest() calls that
+  implement parametric tests of certain features. In order to invoke each test,
+  which embodies a unique combination of parameter values, as a complete unit
+  test, we perform explicit enumeration of the parameter space and create
+  individual invocation contexts for each, which are then bound as
+  testBar__param1=val1__param2=val2(). The enumeration of parameter spaces for
+  all such tests is done in AddAllParametricTests().
+  """
+
+  def MockPayload(self):
+    """Create a mock payload object, complete with a mock manifest."""
+    payload = self.mox.CreateMock(update_payload.Payload)
+    payload.is_init = True
+    payload.manifest = self.mox.CreateMock(
+        update_metadata_pb2.DeltaArchiveManifest)
+    return payload
+
+  @staticmethod
+  def NewExtent(start_block, num_blocks):
+    """Returns an Extent message.
+
+    Each of the provided fields is set iff it is >= 0; otherwise, it's left at
+    its default state.
+
+    Args:
+      start_block: The starting block of the extent.
+      num_blocks: The number of blocks in the extent.
+
+    Returns:
+      An Extent message.
+    """
+    ex = update_metadata_pb2.Extent()
+    if start_block >= 0:
+      ex.start_block = start_block
+    if num_blocks >= 0:
+      ex.num_blocks = num_blocks
+    return ex
+
+  @staticmethod
+  def NewExtentList(*args):
+    """Returns an list of extents.
+
+    Args:
+      *args: (start_block, num_blocks) pairs defining the extents.
+
+    Returns:
+      A list of Extent objects.
+    """
+    ex_list = []
+    for start_block, num_blocks in args:
+      ex_list.append(PayloadCheckerTest.NewExtent(start_block, num_blocks))
+    return ex_list
+
+  @staticmethod
+  def AddToMessage(repeated_field, field_vals):
+    for field_val in field_vals:
+      new_field = repeated_field.add()
+      new_field.CopyFrom(field_val)
+
+  def SetupAddElemTest(self, is_present, is_submsg, convert=str,
+                       linebreak=False, indent=0):
+    """Setup for testing of _CheckElem() and its derivatives.
+
+    Args:
+      is_present: Whether or not the element is found in the message.
+      is_submsg: Whether the element is a sub-message itself.
+      convert: A representation conversion function.
+      linebreak: Whether or not a linebreak is to be used in the report.
+      indent: Indentation used for the report.
+
+    Returns:
+      msg: A mock message object.
+      report: A mock report object.
+      subreport: A mock sub-report object.
+      name: An element name to check.
+      val: Expected element value.
+    """
+    name = 'foo'
+    val = 'fake submsg' if is_submsg else 'fake field'
+    subreport = 'fake subreport'
+
+    # Create a mock message.
+    msg = self.mox.CreateMock(update_metadata_pb2._message.Message)
+    msg.HasField(name).AndReturn(is_present)
+    setattr(msg, name, val)
+
+    # Create a mock report.
+    report = self.mox.CreateMock(checker._PayloadReport)
+    if is_present:
+      if is_submsg:
+        report.AddSubReport(name).AndReturn(subreport)
+      else:
+        report.AddField(name, convert(val), linebreak=linebreak, indent=indent)
+
+    self.mox.ReplayAll()
+    return (msg, report, subreport, name, val)
+
+  def DoAddElemTest(self, is_present, is_mandatory, is_submsg, convert,
+                    linebreak, indent):
+    """Parametric testing of _CheckElem().
+
+    Args:
+      is_present: Whether or not the element is found in the message.
+      is_mandatory: Whether or not it's a mandatory element.
+      is_submsg: Whether the element is a sub-message itself.
+      convert: A representation conversion function.
+      linebreak: Whether or not a linebreak is to be used in the report.
+      indent: Indentation used for the report.
+    """
+    msg, report, subreport, name, val = self.SetupAddElemTest(
+        is_present, is_submsg, convert, linebreak, indent)
+
+    args = (msg, name, report, is_mandatory, is_submsg)
+    kwargs = {'convert': convert, 'linebreak': linebreak, 'indent': indent}
+    if is_mandatory and not is_present:
+      self.assertRaises(update_payload.PayloadError,
+                        checker.PayloadChecker._CheckElem, *args, **kwargs)
+    else:
+      ret_val, ret_subreport = checker.PayloadChecker._CheckElem(*args,
+                                                                 **kwargs)
+      self.assertEquals(val if is_present else None, ret_val)
+      self.assertEquals(subreport if is_present and is_submsg else None,
+                        ret_subreport)
+
+  def DoAddFieldTest(self, is_mandatory, is_present, convert, linebreak,
+                     indent):
+    """Parametric testing of _Check{Mandatory,Optional}Field().
+
+    Args:
+      is_mandatory: Whether we're testing a mandatory call.
+      is_present: Whether or not the element is found in the message.
+      convert: A representation conversion function.
+      linebreak: Whether or not a linebreak is to be used in the report.
+      indent: Indentation used for the report.
+    """
+    msg, report, _, name, val = self.SetupAddElemTest(
+        is_present, False, convert, linebreak, indent)
+
+    # Prepare for invocation of the tested method.
+    args = [msg, name, report]
+    kwargs = {'convert': convert, 'linebreak': linebreak, 'indent': indent}
+    if is_mandatory:
+      args.append('bar')
+      tested_func = checker.PayloadChecker._CheckMandatoryField
+    else:
+      tested_func = checker.PayloadChecker._CheckOptionalField
+
+    # Test the method call.
+    if is_mandatory and not is_present:
+      self.assertRaises(update_payload.PayloadError, tested_func, *args,
+                        **kwargs)
+    else:
+      ret_val = tested_func(*args, **kwargs)
+      self.assertEquals(val if is_present else None, ret_val)
+
+  def DoAddSubMsgTest(self, is_mandatory, is_present):
+    """Parametrized testing of _Check{Mandatory,Optional}SubMsg().
+
+    Args:
+      is_mandatory: Whether we're testing a mandatory call.
+      is_present: Whether or not the element is found in the message.
+    """
+    msg, report, subreport, name, val = self.SetupAddElemTest(is_present, True)
+
+    # Prepare for invocation of the tested method.
+    args = [msg, name, report]
+    if is_mandatory:
+      args.append('bar')
+      tested_func = checker.PayloadChecker._CheckMandatorySubMsg
+    else:
+      tested_func = checker.PayloadChecker._CheckOptionalSubMsg
+
+    # Test the method call.
+    if is_mandatory and not is_present:
+      self.assertRaises(update_payload.PayloadError, tested_func, *args)
+    else:
+      ret_val, ret_subreport = tested_func(*args)
+      self.assertEquals(val if is_present else None, ret_val)
+      self.assertEquals(subreport if is_present else None, ret_subreport)
+
+  def testCheckPresentIff(self):
+    """Tests _CheckPresentIff()."""
+    self.assertIsNone(checker.PayloadChecker._CheckPresentIff(
+        None, None, 'foo', 'bar', 'baz'))
+    self.assertIsNone(checker.PayloadChecker._CheckPresentIff(
+        'a', 'b', 'foo', 'bar', 'baz'))
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckPresentIff,
+                      'a', None, 'foo', 'bar', 'baz')
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckPresentIff,
+                      None, 'b', 'foo', 'bar', 'baz')
+
+  def DoCheckSha256SignatureTest(self, expect_pass, expect_subprocess_call,
+                                 sig_data, sig_asn1_header,
+                                 returned_signed_hash, expected_signed_hash):
+    """Parametric testing of _CheckSha256SignatureTest().
+
+    Args:
+      expect_pass: Whether or not it should pass.
+      expect_subprocess_call: Whether to expect the openssl call to happen.
+      sig_data: The signature raw data.
+      sig_asn1_header: The ASN1 header.
+      returned_signed_hash: The signed hash data retuned by openssl.
+      expected_signed_hash: The signed hash data to compare against.
+    """
+    try:
+      # Stub out the subprocess invocation.
+      self.mox.StubOutWithMock(checker.PayloadChecker, '_Run')
+      if expect_subprocess_call:
+        checker.PayloadChecker._Run(
+            mox.IsA(list), send_data=sig_data).AndReturn(
+                (sig_asn1_header + returned_signed_hash, None))
+
+      self.mox.ReplayAll()
+      if expect_pass:
+        self.assertIsNone(checker.PayloadChecker._CheckSha256Signature(
+            sig_data, 'foo', expected_signed_hash, 'bar'))
+      else:
+        self.assertRaises(update_payload.PayloadError,
+                          checker.PayloadChecker._CheckSha256Signature,
+                          sig_data, 'foo', expected_signed_hash, 'bar')
+    finally:
+      self.mox.UnsetStubs()
+
+  def testCheckSha256Signature_Pass(self):
+    """Tests _CheckSha256Signature(); pass case."""
+    sig_data = 'fake-signature'.ljust(256)
+    signed_hash = hashlib.sha256('fake-data').digest()
+    self.DoCheckSha256SignatureTest(True, True, sig_data,
+                                    common.SIG_ASN1_HEADER, signed_hash,
+                                    signed_hash)
+
+  def testCheckSha256Signature_FailBadSignature(self):
+    """Tests _CheckSha256Signature(); fails due to malformed signature."""
+    sig_data = 'fake-signature'  # Malformed (not 256 bytes in length).
+    signed_hash = hashlib.sha256('fake-data').digest()
+    self.DoCheckSha256SignatureTest(False, False, sig_data,
+                                    common.SIG_ASN1_HEADER, signed_hash,
+                                    signed_hash)
+
+  def testCheckSha256Signature_FailBadOutputLength(self):
+    """Tests _CheckSha256Signature(); fails due to unexpected output length."""
+    sig_data = 'fake-signature'.ljust(256)
+    signed_hash = 'fake-hash'  # Malformed (not 32 bytes in length).
+    self.DoCheckSha256SignatureTest(False, True, sig_data,
+                                    common.SIG_ASN1_HEADER, signed_hash,
+                                    signed_hash)
+
+  def testCheckSha256Signature_FailBadAsnHeader(self):
+    """Tests _CheckSha256Signature(); fails due to bad ASN1 header."""
+    sig_data = 'fake-signature'.ljust(256)
+    signed_hash = hashlib.sha256('fake-data').digest()
+    bad_asn1_header = 'bad-asn-header'.ljust(len(common.SIG_ASN1_HEADER))
+    self.DoCheckSha256SignatureTest(False, True, sig_data, bad_asn1_header,
+                                    signed_hash, signed_hash)
+
+  def testCheckSha256Signature_FailBadHash(self):
+    """Tests _CheckSha256Signature(); fails due to bad hash returned."""
+    sig_data = 'fake-signature'.ljust(256)
+    expected_signed_hash = hashlib.sha256('fake-data').digest()
+    returned_signed_hash = hashlib.sha256('bad-fake-data').digest()
+    self.DoCheckSha256SignatureTest(False, True, sig_data,
+                                    common.SIG_ASN1_HEADER,
+                                    expected_signed_hash, returned_signed_hash)
+
+  def testCheckBlocksFitLength_Pass(self):
+    """Tests _CheckBlocksFitLength(); pass case."""
+    self.assertIsNone(checker.PayloadChecker._CheckBlocksFitLength(
+        64, 4, 16, 'foo'))
+    self.assertIsNone(checker.PayloadChecker._CheckBlocksFitLength(
+        60, 4, 16, 'foo'))
+    self.assertIsNone(checker.PayloadChecker._CheckBlocksFitLength(
+        49, 4, 16, 'foo'))
+    self.assertIsNone(checker.PayloadChecker._CheckBlocksFitLength(
+        48, 3, 16, 'foo'))
+
+  def testCheckBlocksFitLength_TooManyBlocks(self):
+    """Tests _CheckBlocksFitLength(); fails due to excess blocks."""
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckBlocksFitLength,
+                      64, 5, 16, 'foo')
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckBlocksFitLength,
+                      60, 5, 16, 'foo')
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckBlocksFitLength,
+                      49, 5, 16, 'foo')
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckBlocksFitLength,
+                      48, 4, 16, 'foo')
+
+  def testCheckBlocksFitLength_TooFewBlocks(self):
+    """Tests _CheckBlocksFitLength(); fails due to insufficient blocks."""
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckBlocksFitLength,
+                      64, 3, 16, 'foo')
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckBlocksFitLength,
+                      60, 3, 16, 'foo')
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckBlocksFitLength,
+                      49, 3, 16, 'foo')
+    self.assertRaises(update_payload.PayloadError,
+                      checker.PayloadChecker._CheckBlocksFitLength,
+                      48, 2, 16, 'foo')
+
+  def DoCheckManifestTest(self, fail_mismatched_block_size, fail_bad_sigs,
+                          fail_mismatched_oki_ori, fail_bad_oki, fail_bad_ori,
+                          fail_bad_nki, fail_bad_nri, fail_old_kernel_fs_size,
+                          fail_old_rootfs_fs_size, fail_new_kernel_fs_size,
+                          fail_new_rootfs_fs_size):
+    """Parametric testing of _CheckManifest().
+
+    Args:
+      fail_mismatched_block_size: Simulate a missing block_size field.
+      fail_bad_sigs: Make signatures descriptor inconsistent.
+      fail_mismatched_oki_ori: Make old rootfs/kernel info partially present.
+      fail_bad_oki: Tamper with old kernel info.
+      fail_bad_ori: Tamper with old rootfs info.
+      fail_bad_nki: Tamper with new kernel info.
+      fail_bad_nri: Tamper with new rootfs info.
+      fail_old_kernel_fs_size: Make old kernel fs size too big.
+      fail_old_rootfs_fs_size: Make old rootfs fs size too big.
+      fail_new_kernel_fs_size: Make new kernel fs size too big.
+      fail_new_rootfs_fs_size: Make new rootfs fs size too big.
+    """
+    # Generate a test payload. For this test, we only care about the manifest
+    # and don't need any data blobs, hence we can use a plain paylaod generator
+    # (which also gives us more control on things that can be screwed up).
+    payload_gen = test_utils.PayloadGenerator()
+
+    # Tamper with block size, if required.
+    if fail_mismatched_block_size:
+      payload_gen.SetBlockSize(test_utils.KiB(1))
+    else:
+      payload_gen.SetBlockSize(test_utils.KiB(4))
+
+    # Add some operations.
+    payload_gen.AddOperation(False, common.OpType.MOVE,
+                             src_extents=[(0, 16), (16, 497)],
+                             dst_extents=[(16, 496), (0, 16)])
+    payload_gen.AddOperation(True, common.OpType.MOVE,
+                             src_extents=[(0, 8), (8, 8)],
+                             dst_extents=[(8, 8), (0, 8)])
+
+    # Set an invalid signatures block (offset but no size), if required.
+    if fail_bad_sigs:
+      payload_gen.SetSignatures(32, None)
+
+    # Set partition / filesystem sizes.
+    rootfs_part_size = test_utils.MiB(8)
+    kernel_part_size = test_utils.KiB(512)
+    old_rootfs_fs_size = new_rootfs_fs_size = rootfs_part_size
+    old_kernel_fs_size = new_kernel_fs_size = kernel_part_size
+    if fail_old_kernel_fs_size:
+      old_kernel_fs_size += 100
+    if fail_old_rootfs_fs_size:
+      old_rootfs_fs_size += 100
+    if fail_new_kernel_fs_size:
+      new_kernel_fs_size += 100
+    if fail_new_rootfs_fs_size:
+      new_rootfs_fs_size += 100
+
+    # Add old kernel/rootfs partition info, as required.
+    if fail_mismatched_oki_ori or fail_old_kernel_fs_size or fail_bad_oki:
+      oki_hash = (None if fail_bad_oki
+                  else hashlib.sha256('fake-oki-content').digest())
+      payload_gen.SetPartInfo(True, False, old_kernel_fs_size, oki_hash)
+    if not fail_mismatched_oki_ori and (fail_old_rootfs_fs_size or
+                                        fail_bad_ori):
+      ori_hash = (None if fail_bad_ori
+                  else hashlib.sha256('fake-ori-content').digest())
+      payload_gen.SetPartInfo(False, False, old_rootfs_fs_size, ori_hash)
+
+    # Add new kernel/rootfs partition info.
+    payload_gen.SetPartInfo(
+        True, True, new_kernel_fs_size,
+        None if fail_bad_nki else hashlib.sha256('fake-nki-content').digest())
+    payload_gen.SetPartInfo(
+        False, True, new_rootfs_fs_size,
+        None if fail_bad_nri else hashlib.sha256('fake-nri-content').digest())
+
+    # Set the minor version.
+    payload_gen.SetMinorVersion(0)
+
+    # Create the test object.
+    payload_checker = _GetPayloadChecker(payload_gen.WriteToFile)
+    report = checker._PayloadReport()
+
+    should_fail = (fail_mismatched_block_size or fail_bad_sigs or
+                   fail_mismatched_oki_ori or fail_bad_oki or fail_bad_ori or
+                   fail_bad_nki or fail_bad_nri or fail_old_kernel_fs_size or
+                   fail_old_rootfs_fs_size or fail_new_kernel_fs_size or
+                   fail_new_rootfs_fs_size)
+    if should_fail:
+      self.assertRaises(update_payload.PayloadError,
+                        payload_checker._CheckManifest, report,
+                        rootfs_part_size, kernel_part_size)
+    else:
+      self.assertIsNone(payload_checker._CheckManifest(report,
+                                                       rootfs_part_size,
+                                                       kernel_part_size))
+
+  def testCheckLength(self):
+    """Tests _CheckLength()."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    block_size = payload_checker.block_size
+
+    # Passes.
+    self.assertIsNone(payload_checker._CheckLength(
+        int(3.5 * block_size), 4, 'foo', 'bar'))
+    # Fails, too few blocks.
+    self.assertRaises(update_payload.PayloadError,
+                      payload_checker._CheckLength,
+                      int(3.5 * block_size), 3, 'foo', 'bar')
+    # Fails, too many blocks.
+    self.assertRaises(update_payload.PayloadError,
+                      payload_checker._CheckLength,
+                      int(3.5 * block_size), 5, 'foo', 'bar')
+
+  def testCheckExtents(self):
+    """Tests _CheckExtents()."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    block_size = payload_checker.block_size
+
+    # Passes w/ all real extents.
+    extents = self.NewExtentList((0, 4), (8, 3), (1024, 16))
+    self.assertEquals(
+        23,
+        payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
+                                      collections.defaultdict(int), 'foo'))
+
+    # Passes w/ pseudo-extents (aka sparse holes).
+    extents = self.NewExtentList((0, 4), (common.PSEUDO_EXTENT_MARKER, 5),
+                                 (8, 3))
+    self.assertEquals(
+        12,
+        payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
+                                      collections.defaultdict(int), 'foo',
+                                      allow_pseudo=True))
+
+    # Passes w/ pseudo-extent due to a signature.
+    extents = self.NewExtentList((common.PSEUDO_EXTENT_MARKER, 2))
+    self.assertEquals(
+        2,
+        payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
+                                      collections.defaultdict(int), 'foo',
+                                      allow_signature=True))
+
+    # Fails, extent missing a start block.
+    extents = self.NewExtentList((-1, 4), (8, 3), (1024, 16))
+    self.assertRaises(
+        update_payload.PayloadError, payload_checker._CheckExtents,
+        extents, (1024 + 16) * block_size, collections.defaultdict(int),
+        'foo')
+
+    # Fails, extent missing block count.
+    extents = self.NewExtentList((0, -1), (8, 3), (1024, 16))
+    self.assertRaises(
+        update_payload.PayloadError, payload_checker._CheckExtents,
+        extents, (1024 + 16) * block_size, collections.defaultdict(int),
+        'foo')
+
+    # Fails, extent has zero blocks.
+    extents = self.NewExtentList((0, 4), (8, 3), (1024, 0))
+    self.assertRaises(
+        update_payload.PayloadError, payload_checker._CheckExtents,
+        extents, (1024 + 16) * block_size, collections.defaultdict(int),
+        'foo')
+
+    # Fails, extent exceeds partition boundaries.
+    extents = self.NewExtentList((0, 4), (8, 3), (1024, 16))
+    self.assertRaises(
+        update_payload.PayloadError, payload_checker._CheckExtents,
+        extents, (1024 + 15) * block_size, collections.defaultdict(int),
+        'foo')
+
+  def testCheckReplaceOperation(self):
+    """Tests _CheckReplaceOperation() where op.type == REPLACE."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    block_size = payload_checker.block_size
+    data_length = 10000
+
+    op = self.mox.CreateMock(
+        update_metadata_pb2.InstallOperation)
+    op.type = common.OpType.REPLACE
+
+    # Pass.
+    op.src_extents = []
+    self.assertIsNone(
+        payload_checker._CheckReplaceOperation(
+            op, data_length, (data_length + block_size - 1) / block_size,
+            'foo'))
+
+    # Fail, src extents founds.
+    op.src_extents = ['bar']
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckReplaceOperation,
+        op, data_length, (data_length + block_size - 1) / block_size, 'foo')
+
+    # Fail, missing data.
+    op.src_extents = []
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckReplaceOperation,
+        op, None, (data_length + block_size - 1) / block_size, 'foo')
+
+    # Fail, length / block number mismatch.
+    op.src_extents = ['bar']
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckReplaceOperation,
+        op, data_length, (data_length + block_size - 1) / block_size + 1, 'foo')
+
+  def testCheckReplaceBzOperation(self):
+    """Tests _CheckReplaceOperation() where op.type == REPLACE_BZ."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    block_size = payload_checker.block_size
+    data_length = block_size * 3
+
+    op = self.mox.CreateMock(
+        update_metadata_pb2.InstallOperation)
+    op.type = common.OpType.REPLACE_BZ
+
+    # Pass.
+    op.src_extents = []
+    self.assertIsNone(
+        payload_checker._CheckReplaceOperation(
+            op, data_length, (data_length + block_size - 1) / block_size + 5,
+            'foo'))
+
+    # Fail, src extents founds.
+    op.src_extents = ['bar']
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckReplaceOperation,
+        op, data_length, (data_length + block_size - 1) / block_size + 5, 'foo')
+
+    # Fail, missing data.
+    op.src_extents = []
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckReplaceOperation,
+        op, None, (data_length + block_size - 1) / block_size, 'foo')
+
+    # Fail, too few blocks to justify BZ.
+    op.src_extents = []
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckReplaceOperation,
+        op, data_length, (data_length + block_size - 1) / block_size, 'foo')
+
+  def testCheckMoveOperation_Pass(self):
+    """Tests _CheckMoveOperation(); pass case."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    op = update_metadata_pb2.InstallOperation()
+    op.type = common.OpType.MOVE
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((16, 128), (512, 6)))
+    self.assertIsNone(
+        payload_checker._CheckMoveOperation(op, None, 134, 134, 'foo'))
+
+  def testCheckMoveOperation_FailContainsData(self):
+    """Tests _CheckMoveOperation(); fails, message contains data."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    op = update_metadata_pb2.InstallOperation()
+    op.type = common.OpType.MOVE
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((16, 128), (512, 6)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, 1024, 134, 134, 'foo')
+
+  def testCheckMoveOperation_FailInsufficientSrcBlocks(self):
+    """Tests _CheckMoveOperation(); fails, not enough actual src blocks."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    op = update_metadata_pb2.InstallOperation()
+    op.type = common.OpType.MOVE
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 127)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((16, 128), (512, 6)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, None, 134, 134, 'foo')
+
+  def testCheckMoveOperation_FailInsufficientDstBlocks(self):
+    """Tests _CheckMoveOperation(); fails, not enough actual dst blocks."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    op = update_metadata_pb2.InstallOperation()
+    op.type = common.OpType.MOVE
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((16, 128), (512, 5)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, None, 134, 134, 'foo')
+
+  def testCheckMoveOperation_FailExcessSrcBlocks(self):
+    """Tests _CheckMoveOperation(); fails, too many actual src blocks."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    op = update_metadata_pb2.InstallOperation()
+    op.type = common.OpType.MOVE
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((16, 128), (512, 5)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, None, 134, 134, 'foo')
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 129)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((16, 128), (512, 6)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, None, 134, 134, 'foo')
+
+  def testCheckMoveOperation_FailExcessDstBlocks(self):
+    """Tests _CheckMoveOperation(); fails, too many actual dst blocks."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    op = update_metadata_pb2.InstallOperation()
+    op.type = common.OpType.MOVE
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((16, 128), (512, 7)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, None, 134, 134, 'foo')
+
+  def testCheckMoveOperation_FailStagnantBlocks(self):
+    """Tests _CheckMoveOperation(); fails, there are blocks that do not move."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    op = update_metadata_pb2.InstallOperation()
+    op.type = common.OpType.MOVE
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((8, 128), (512, 6)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, None, 134, 134, 'foo')
+
+  def testCheckMoveOperation_FailZeroStartBlock(self):
+    """Tests _CheckMoveOperation(); fails, has extent with start block 0."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    op = update_metadata_pb2.InstallOperation()
+    op.type = common.OpType.MOVE
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((0, 4), (12, 2), (1024, 128)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((8, 128), (512, 6)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, None, 134, 134, 'foo')
+
+    self.AddToMessage(op.src_extents,
+                      self.NewExtentList((1, 4), (12, 2), (1024, 128)))
+    self.AddToMessage(op.dst_extents,
+                      self.NewExtentList((0, 128), (512, 6)))
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckMoveOperation,
+        op, None, 134, 134, 'foo')
+
+  def testCheckBsdiff(self):
+    """Tests _CheckMoveOperation()."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+
+    # Pass.
+    self.assertIsNone(
+        payload_checker._CheckBsdiffOperation(10000, 3, 'foo'))
+
+    # Fail, missing data blob.
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckBsdiffOperation,
+        None, 3, 'foo')
+
+    # Fail, too big of a diff blob (unjustified).
+    self.assertRaises(
+        update_payload.PayloadError,
+        payload_checker._CheckBsdiffOperation,
+        10000, 2, 'foo')
+
+  def testCheckSourceCopyOperation_Pass(self):
+    """Tests _CheckSourceCopyOperation(); pass case."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    self.assertIsNone(
+        payload_checker._CheckSourceCopyOperation(None, 134, 134, 'foo'))
+
+  def testCheckSourceCopyOperation_FailContainsData(self):
+    """Tests _CheckSourceCopyOperation(); message contains data."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    self.assertRaises(update_payload.PayloadError,
+                      payload_checker._CheckSourceCopyOperation,
+                      134, 0, 0, 'foo')
+
+  def testCheckSourceCopyOperation_FailBlockCountsMismatch(self):
+    """Tests _CheckSourceCopyOperation(); src and dst block totals not equal."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    self.assertRaises(update_payload.PayloadError,
+                      payload_checker._CheckSourceCopyOperation,
+                      None, 0, 1, 'foo')
+
+  def DoCheckOperationTest(self, op_type_name, is_last, allow_signature,
+                           allow_unhashed, fail_src_extents, fail_dst_extents,
+                           fail_mismatched_data_offset_length,
+                           fail_missing_dst_extents, fail_src_length,
+                           fail_dst_length, fail_data_hash,
+                           fail_prev_data_offset, fail_bad_minor_version):
+    """Parametric testing of _CheckOperation().
+
+    Args:
+      op_type_name: 'REPLACE', 'REPLACE_BZ', 'MOVE', 'BSDIFF', 'SOURCE_COPY',
+        or 'SOURCE_BSDIFF'.
+      is_last: Whether we're testing the last operation in a sequence.
+      allow_signature: Whether we're testing a signature-capable operation.
+      allow_unhashed: Whether we're allowing to not hash the data.
+      fail_src_extents: Tamper with src extents.
+      fail_dst_extents: Tamper with dst extents.
+      fail_mismatched_data_offset_length: Make data_{offset,length}
+        inconsistent.
+      fail_missing_dst_extents: Do not include dst extents.
+      fail_src_length: Make src length inconsistent.
+      fail_dst_length: Make dst length inconsistent.
+      fail_data_hash: Tamper with the data blob hash.
+      fail_prev_data_offset: Make data space uses incontiguous.
+      fail_bad_minor_version: Make minor version incompatible with op.
+    """
+    op_type = _OpTypeByName(op_type_name)
+
+    # Create the test object.
+    payload = self.MockPayload()
+    payload_checker = checker.PayloadChecker(payload,
+                                             allow_unhashed=allow_unhashed)
+    block_size = payload_checker.block_size
+
+    # Create auxiliary arguments.
+    old_part_size = test_utils.MiB(4)
+    new_part_size = test_utils.MiB(8)
+    old_block_counters = array.array(
+        'B', [0] * ((old_part_size + block_size - 1) / block_size))
+    new_block_counters = array.array(
+        'B', [0] * ((new_part_size + block_size - 1) / block_size))
+    prev_data_offset = 1876
+    blob_hash_counts = collections.defaultdict(int)
+
+    # Create the operation object for the test.
+    op = update_metadata_pb2.InstallOperation()
+    op.type = op_type
+
+    total_src_blocks = 0
+    if op_type in (common.OpType.MOVE, common.OpType.BSDIFF,
+                   common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF):
+      if fail_src_extents:
+        self.AddToMessage(op.src_extents,
+                          self.NewExtentList((1, 0)))
+      else:
+        self.AddToMessage(op.src_extents,
+                          self.NewExtentList((1, 16)))
+        total_src_blocks = 16
+
+    if op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
+      payload_checker.minor_version = 0
+    elif op_type in (common.OpType.MOVE, common.OpType.BSDIFF):
+      payload_checker.minor_version = 2 if fail_bad_minor_version else 1
+    elif op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF):
+      payload_checker.minor_version = 1 if fail_bad_minor_version else 2
+
+    if op_type not in (common.OpType.MOVE, common.OpType.SOURCE_COPY):
+      if not fail_mismatched_data_offset_length:
+        op.data_length = 16 * block_size - 8
+      if fail_prev_data_offset:
+        op.data_offset = prev_data_offset + 16
+      else:
+        op.data_offset = prev_data_offset
+
+      fake_data = 'fake-data'.ljust(op.data_length)
+      if not (allow_unhashed or (is_last and allow_signature and
+                                 op_type == common.OpType.REPLACE)):
+        if not fail_data_hash:
+          # Create a valid data blob hash.
+          op.data_sha256_hash = hashlib.sha256(fake_data).digest()
+          payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn(
+              fake_data)
+      elif fail_data_hash:
+        # Create an invalid data blob hash.
+        op.data_sha256_hash = hashlib.sha256(
+            fake_data.replace(' ', '-')).digest()
+        payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn(
+            fake_data)
+
+    total_dst_blocks = 0
+    if not fail_missing_dst_extents:
+      total_dst_blocks = 16
+      if fail_dst_extents:
+        self.AddToMessage(op.dst_extents,
+                          self.NewExtentList((4, 16), (32, 0)))
+      else:
+        self.AddToMessage(op.dst_extents,
+                          self.NewExtentList((4, 8), (64, 8)))
+
+    if total_src_blocks:
+      if fail_src_length:
+        op.src_length = total_src_blocks * block_size + 8
+      else:
+        op.src_length = total_src_blocks * block_size
+    elif fail_src_length:
+      # Add an orphaned src_length.
+      op.src_length = 16
+
+    if total_dst_blocks:
+      if fail_dst_length:
+        op.dst_length = total_dst_blocks * block_size + 8
+      else:
+        op.dst_length = total_dst_blocks * block_size
+
+    self.mox.ReplayAll()
+    should_fail = (fail_src_extents or fail_dst_extents or
+                   fail_mismatched_data_offset_length or
+                   fail_missing_dst_extents or fail_src_length or
+                   fail_dst_length or fail_data_hash or fail_prev_data_offset or
+                   fail_bad_minor_version)
+    args = (op, 'foo', is_last, old_block_counters, new_block_counters,
+            old_part_size, new_part_size, prev_data_offset, allow_signature,
+            blob_hash_counts)
+    if should_fail:
+      self.assertRaises(update_payload.PayloadError,
+                        payload_checker._CheckOperation, *args)
+    else:
+      self.assertEqual(op.data_length if op.HasField('data_length') else 0,
+                       payload_checker._CheckOperation(*args))
+
+  def testAllocBlockCounters(self):
+    """Tests _CheckMoveOperation()."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    block_size = payload_checker.block_size
+
+    # Check allocation for block-aligned partition size, ensure it's integers.
+    result = payload_checker._AllocBlockCounters(16 * block_size)
+    self.assertEqual(16, len(result))
+    self.assertEqual(int, type(result[0]))
+
+    # Check allocation of unaligned partition sizes.
+    result = payload_checker._AllocBlockCounters(16 * block_size - 1)
+    self.assertEqual(16, len(result))
+    result = payload_checker._AllocBlockCounters(16 * block_size + 1)
+    self.assertEqual(17, len(result))
+
+  def DoCheckOperationsTest(self, fail_nonexhaustive_full_update):
+    # Generate a test payload. For this test, we only care about one
+    # (arbitrary) set of operations, so we'll only be generating kernel and
+    # test with them.
+    payload_gen = test_utils.PayloadGenerator()
+
+    block_size = test_utils.KiB(4)
+    payload_gen.SetBlockSize(block_size)
+
+    rootfs_part_size = test_utils.MiB(8)
+
+    # Fake rootfs operations in a full update, tampered with as required.
+    rootfs_op_type = common.OpType.REPLACE
+    rootfs_data_length = rootfs_part_size
+    if fail_nonexhaustive_full_update:
+      rootfs_data_length -= block_size
+
+    payload_gen.AddOperation(False, rootfs_op_type,
+                             dst_extents=[(0, rootfs_data_length / block_size)],
+                             data_offset=0,
+                             data_length=rootfs_data_length)
+
+    # Create the test object.
+    payload_checker = _GetPayloadChecker(payload_gen.WriteToFile,
+                                         checker_init_dargs={
+                                             'allow_unhashed': True})
+    payload_checker.payload_type = checker._TYPE_FULL
+    report = checker._PayloadReport()
+
+    args = (payload_checker.payload.manifest.install_operations, report,
+            'foo', 0, rootfs_part_size, rootfs_part_size, 0, False)
+    if fail_nonexhaustive_full_update:
+      self.assertRaises(update_payload.PayloadError,
+                        payload_checker._CheckOperations, *args)
+    else:
+      self.assertEqual(rootfs_data_length,
+                       payload_checker._CheckOperations(*args))
+
+  def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_missing_pseudo_op,
+                            fail_mismatched_pseudo_op, fail_sig_missing_fields,
+                            fail_unknown_sig_version, fail_incorrect_sig):
+    # Generate a test payload. For this test, we only care about the signature
+    # block and how it relates to the payload hash. Therefore, we're generating
+    # a random (otherwise useless) payload for this purpose.
+    payload_gen = test_utils.EnhancedPayloadGenerator()
+    block_size = test_utils.KiB(4)
+    payload_gen.SetBlockSize(block_size)
+    rootfs_part_size = test_utils.MiB(2)
+    kernel_part_size = test_utils.KiB(16)
+    payload_gen.SetPartInfo(False, True, rootfs_part_size,
+                            hashlib.sha256('fake-new-rootfs-content').digest())
+    payload_gen.SetPartInfo(True, True, kernel_part_size,
+                            hashlib.sha256('fake-new-kernel-content').digest())
+    payload_gen.SetMinorVersion(0)
+    payload_gen.AddOperationWithData(
+        False, common.OpType.REPLACE,
+        dst_extents=[(0, rootfs_part_size / block_size)],
+        data_blob=os.urandom(rootfs_part_size))
+
+    do_forge_pseudo_op = (fail_missing_pseudo_op or fail_mismatched_pseudo_op)
+    do_forge_sigs_data = (do_forge_pseudo_op or fail_empty_sigs_blob or
+                          fail_sig_missing_fields or fail_unknown_sig_version
+                          or fail_incorrect_sig)
+
+    sigs_data = None
+    if do_forge_sigs_data:
+      sigs_gen = test_utils.SignaturesGenerator()
+      if not fail_empty_sigs_blob:
+        if fail_sig_missing_fields:
+          sig_data = None
+        else:
+          sig_data = test_utils.SignSha256('fake-payload-content',
+                                           test_utils._PRIVKEY_FILE_NAME)
+        sigs_gen.AddSig(5 if fail_unknown_sig_version else 1, sig_data)
+
+      sigs_data = sigs_gen.ToBinary()
+      payload_gen.SetSignatures(payload_gen.curr_offset, len(sigs_data))
+
+    if do_forge_pseudo_op:
+      assert sigs_data is not None, 'should have forged signatures blob by now'
+      sigs_len = len(sigs_data)
+      payload_gen.AddOperation(
+          False, common.OpType.REPLACE,
+          data_offset=payload_gen.curr_offset / 2,
+          data_length=sigs_len / 2,
+          dst_extents=[(0, (sigs_len / 2 + block_size - 1) / block_size)])
+
+    # Generate payload (complete w/ signature) and create the test object.
+    payload_checker = _GetPayloadChecker(
+        payload_gen.WriteToFileWithData,
+        payload_gen_dargs={
+            'sigs_data': sigs_data,
+            'privkey_file_name': test_utils._PRIVKEY_FILE_NAME,
+            'do_add_pseudo_operation': not do_forge_pseudo_op})
+    payload_checker.payload_type = checker._TYPE_FULL
+    report = checker._PayloadReport()
+
+    # We have to check the manifest first in order to set signature attributes.
+    payload_checker._CheckManifest(report, rootfs_part_size, kernel_part_size)
+
+    should_fail = (fail_empty_sigs_blob or fail_missing_pseudo_op or
+                   fail_mismatched_pseudo_op or fail_sig_missing_fields or
+                   fail_unknown_sig_version or fail_incorrect_sig)
+    args = (report, test_utils._PUBKEY_FILE_NAME)
+    if should_fail:
+      self.assertRaises(update_payload.PayloadError,
+                        payload_checker._CheckSignatures, *args)
+    else:
+      self.assertIsNone(payload_checker._CheckSignatures(*args))
+
+  def DoCheckManifestMinorVersionTest(self, minor_version, payload_type):
+    """Parametric testing for CheckManifestMinorVersion().
+
+    Args:
+      minor_version: The payload minor version to test with.
+      payload_type: The type of the payload we're testing, delta or full.
+    """
+    # Create the test object.
+    payload = self.MockPayload()
+    payload.manifest.minor_version = minor_version
+    payload_checker = checker.PayloadChecker(payload)
+    payload_checker.payload_type = payload_type
+    report = checker._PayloadReport()
+
+    should_succeed = (
+        (minor_version == 0 and payload_type == checker._TYPE_FULL) or
+        (minor_version == 1 and payload_type == checker._TYPE_DELTA) or
+        (minor_version == 2 and payload_type == checker._TYPE_DELTA) or
+        (minor_version == 3 and payload_type == checker._TYPE_DELTA))
+    args = (report,)
+
+    if should_succeed:
+      self.assertIsNone(payload_checker._CheckManifestMinorVersion(*args))
+    else:
+      self.assertRaises(update_payload.PayloadError,
+                        payload_checker._CheckManifestMinorVersion, *args)
+
+  def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided,
+                fail_wrong_payload_type, fail_invalid_block_size,
+                fail_mismatched_block_size, fail_excess_data,
+                fail_rootfs_part_size_exceeded,
+                fail_kernel_part_size_exceeded):
+    # Generate a test payload. For this test, we generate a full update that
+    # has sample kernel and rootfs operations. Since most testing is done with
+    # internal PayloadChecker methods that are tested elsewhere, here we only
+    # tamper with what's actually being manipulated and/or tested in the Run()
+    # method itself. Note that the checker doesn't verify partition hashes, so
+    # they're safe to fake.
+    payload_gen = test_utils.EnhancedPayloadGenerator()
+    block_size = test_utils.KiB(4)
+    payload_gen.SetBlockSize(block_size)
+    kernel_filesystem_size = test_utils.KiB(16)
+    rootfs_filesystem_size = test_utils.MiB(2)
+    payload_gen.SetPartInfo(False, True, rootfs_filesystem_size,
+                            hashlib.sha256('fake-new-rootfs-content').digest())
+    payload_gen.SetPartInfo(True, True, kernel_filesystem_size,
+                            hashlib.sha256('fake-new-kernel-content').digest())
+    payload_gen.SetMinorVersion(0)
+
+    rootfs_part_size = 0
+    if rootfs_part_size_provided:
+      rootfs_part_size = rootfs_filesystem_size + block_size
+    rootfs_op_size = rootfs_part_size or rootfs_filesystem_size
+    if fail_rootfs_part_size_exceeded:
+      rootfs_op_size += block_size
+    payload_gen.AddOperationWithData(
+        False, common.OpType.REPLACE,
+        dst_extents=[(0, rootfs_op_size / block_size)],
+        data_blob=os.urandom(rootfs_op_size))
+
+    kernel_part_size = 0
+    if kernel_part_size_provided:
+      kernel_part_size = kernel_filesystem_size + block_size
+    kernel_op_size = kernel_part_size or kernel_filesystem_size
+    if fail_kernel_part_size_exceeded:
+      kernel_op_size += block_size
+    payload_gen.AddOperationWithData(
+        True, common.OpType.REPLACE,
+        dst_extents=[(0, kernel_op_size / block_size)],
+        data_blob=os.urandom(kernel_op_size))
+
+    # Generate payload (complete w/ signature) and create the test object.
+    if fail_invalid_block_size:
+      use_block_size = block_size + 5  # Not a power of two.
+    elif fail_mismatched_block_size:
+      use_block_size = block_size * 2  # Different that payload stated.
+    else:
+      use_block_size = block_size
+
+    kwargs = {
+        'payload_gen_dargs': {
+            'privkey_file_name': test_utils._PRIVKEY_FILE_NAME,
+            'do_add_pseudo_operation': True,
+            'is_pseudo_in_kernel': True,
+            'padding': os.urandom(1024) if fail_excess_data else None},
+        'checker_init_dargs': {
+            'assert_type': 'delta' if fail_wrong_payload_type else 'full',
+            'block_size': use_block_size}}
+    if fail_invalid_block_size:
+      self.assertRaises(update_payload.PayloadError, _GetPayloadChecker,
+                        payload_gen.WriteToFileWithData, **kwargs)
+    else:
+      payload_checker = _GetPayloadChecker(payload_gen.WriteToFileWithData,
+                                           **kwargs)
+
+      kwargs = {'pubkey_file_name': test_utils._PUBKEY_FILE_NAME,
+                'rootfs_part_size': rootfs_part_size,
+                'kernel_part_size': kernel_part_size}
+      should_fail = (fail_wrong_payload_type or fail_mismatched_block_size or
+                     fail_excess_data or
+                     fail_rootfs_part_size_exceeded or
+                     fail_kernel_part_size_exceeded)
+      if should_fail:
+        self.assertRaises(update_payload.PayloadError, payload_checker.Run,
+                          **kwargs)
+      else:
+        self.assertIsNone(payload_checker.Run(**kwargs))
+
+# This implements a generic API, hence the occasional unused args.
+# pylint: disable=W0613
+def ValidateCheckOperationTest(op_type_name, is_last, allow_signature,
+                               allow_unhashed, fail_src_extents,
+                               fail_dst_extents,
+                               fail_mismatched_data_offset_length,
+                               fail_missing_dst_extents, fail_src_length,
+                               fail_dst_length, fail_data_hash,
+                               fail_prev_data_offset, fail_bad_minor_version):
+  """Returns True iff the combination of arguments represents a valid test."""
+  op_type = _OpTypeByName(op_type_name)
+
+  # REPLACE/REPLACE_BZ operations don't read data from src partition. They are
+  # compatible with all valid minor versions, so we don't need to check that.
+  if (op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ) and (
+      fail_src_extents or fail_src_length or fail_bad_minor_version)):
+    return False
+
+  # MOVE and SOURCE_COPY operations don't carry data.
+  if (op_type in (common.OpType.MOVE, common.OpType.SOURCE_COPY) and (
+      fail_mismatched_data_offset_length or fail_data_hash or
+      fail_prev_data_offset)):
+    return False
+
+  return True
+
+
+def TestMethodBody(run_method_name, run_dargs):
+  """Returns a function that invokes a named method with named arguments."""
+  return lambda self: getattr(self, run_method_name)(**run_dargs)
+
+
+def AddParametricTests(tested_method_name, arg_space, validate_func=None):
+  """Enumerates and adds specific parametric tests to PayloadCheckerTest.
+
+  This function enumerates a space of test parameters (defined by arg_space),
+  then binds a new, unique method name in PayloadCheckerTest to a test function
+  that gets handed the said parameters. This is a preferable approach to doing
+  the enumeration and invocation during the tests because this way each test is
+  treated as a complete run by the unittest framework, and so benefits from the
+  usual setUp/tearDown mechanics.
+
+  Args:
+    tested_method_name: Name of the tested PayloadChecker method.
+    arg_space: A dictionary containing variables (keys) and lists of values
+               (values) associated with them.
+    validate_func: A function used for validating test argument combinations.
+  """
+  for value_tuple in itertools.product(*arg_space.itervalues()):
+    run_dargs = dict(zip(arg_space.iterkeys(), value_tuple))
+    if validate_func and not validate_func(**run_dargs):
+      continue
+    run_method_name = 'Do%sTest' % tested_method_name
+    test_method_name = 'test%s' % tested_method_name
+    for arg_key, arg_val in run_dargs.iteritems():
+      if arg_val or type(arg_val) is int:
+        test_method_name += '__%s=%s' % (arg_key, arg_val)
+    setattr(PayloadCheckerTest, test_method_name,
+            TestMethodBody(run_method_name, run_dargs))
+
+
+def AddAllParametricTests():
+  """Enumerates and adds all parametric tests to PayloadCheckerTest."""
+  # Add all _CheckElem() test cases.
+  AddParametricTests('AddElem',
+                     {'linebreak': (True, False),
+                      'indent': (0, 1, 2),
+                      'convert': (str, lambda s: s[::-1]),
+                      'is_present': (True, False),
+                      'is_mandatory': (True, False),
+                      'is_submsg': (True, False)})
+
+  # Add all _Add{Mandatory,Optional}Field tests.
+  AddParametricTests('AddField',
+                     {'is_mandatory': (True, False),
+                      'linebreak': (True, False),
+                      'indent': (0, 1, 2),
+                      'convert': (str, lambda s: s[::-1]),
+                      'is_present': (True, False)})
+
+  # Add all _Add{Mandatory,Optional}SubMsg tests.
+  AddParametricTests('AddSubMsg',
+                     {'is_mandatory': (True, False),
+                      'is_present': (True, False)})
+
+  # Add all _CheckManifest() test cases.
+  AddParametricTests('CheckManifest',
+                     {'fail_mismatched_block_size': (True, False),
+                      'fail_bad_sigs': (True, False),
+                      'fail_mismatched_oki_ori': (True, False),
+                      'fail_bad_oki': (True, False),
+                      'fail_bad_ori': (True, False),
+                      'fail_bad_nki': (True, False),
+                      'fail_bad_nri': (True, False),
+                      'fail_old_kernel_fs_size': (True, False),
+                      'fail_old_rootfs_fs_size': (True, False),
+                      'fail_new_kernel_fs_size': (True, False),
+                      'fail_new_rootfs_fs_size': (True, False)})
+
+  # Add all _CheckOperation() test cases.
+  AddParametricTests('CheckOperation',
+                     {'op_type_name': ('REPLACE', 'REPLACE_BZ', 'MOVE',
+                                       'BSDIFF', 'SOURCE_COPY',
+                                       'SOURCE_BSDIFF'),
+                      'is_last': (True, False),
+                      'allow_signature': (True, False),
+                      'allow_unhashed': (True, False),
+                      'fail_src_extents': (True, False),
+                      'fail_dst_extents': (True, False),
+                      'fail_mismatched_data_offset_length': (True, False),
+                      'fail_missing_dst_extents': (True, False),
+                      'fail_src_length': (True, False),
+                      'fail_dst_length': (True, False),
+                      'fail_data_hash': (True, False),
+                      'fail_prev_data_offset': (True, False),
+                      'fail_bad_minor_version': (True, False)},
+                     validate_func=ValidateCheckOperationTest)
+
+  # Add all _CheckOperations() test cases.
+  AddParametricTests('CheckOperations',
+                     {'fail_nonexhaustive_full_update': (True, False)})
+
+  # Add all _CheckOperations() test cases.
+  AddParametricTests('CheckSignatures',
+                     {'fail_empty_sigs_blob': (True, False),
+                      'fail_missing_pseudo_op': (True, False),
+                      'fail_mismatched_pseudo_op': (True, False),
+                      'fail_sig_missing_fields': (True, False),
+                      'fail_unknown_sig_version': (True, False),
+                      'fail_incorrect_sig': (True, False)})
+
+  # Add all _CheckManifestMinorVersion() test cases.
+  AddParametricTests('CheckManifestMinorVersion',
+                     {'minor_version': (None, 0, 1, 2, 3, 555),
+                      'payload_type': (checker._TYPE_FULL,
+                                       checker._TYPE_DELTA)})
+
+  # Add all Run() test cases.
+  AddParametricTests('Run',
+                     {'rootfs_part_size_provided': (True, False),
+                      'kernel_part_size_provided': (True, False),
+                      'fail_wrong_payload_type': (True, False),
+                      'fail_invalid_block_size': (True, False),
+                      'fail_mismatched_block_size': (True, False),
+                      'fail_excess_data': (True, False),
+                      'fail_rootfs_part_size_exceeded': (True, False),
+                      'fail_kernel_part_size_exceeded': (True, False)})
+
+
+if __name__ == '__main__':
+  AddAllParametricTests()
+  unittest.main()
diff --git a/scripts/update_payload/common.py b/scripts/update_payload/common.py
new file mode 100644
index 0000000..88cd6ed
--- /dev/null
+++ b/scripts/update_payload/common.py
@@ -0,0 +1,203 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utilities for update payload processing."""
+
+from __future__ import print_function
+
+from error import PayloadError
+import update_metadata_pb2
+
+
+#
+# Constants.
+#
+PSEUDO_EXTENT_MARKER = (1L << 64) - 1  # UINT64_MAX
+
+SIG_ASN1_HEADER = (
+    '\x30\x31\x30\x0d\x06\x09\x60\x86'
+    '\x48\x01\x65\x03\x04\x02\x01\x05'
+    '\x00\x04\x20'
+)
+
+CHROMEOS_MAJOR_PAYLOAD_VERSION = 1
+BRILLO_MAJOR_PAYLOAD_VERSION = 2
+
+INPLACE_MINOR_PAYLOAD_VERSION = 1
+SOURCE_MINOR_PAYLOAD_VERSION = 2
+OPSRCHASH_MINOR_PAYLOAD_VERSION = 3
+
+#
+# Payload operation types.
+#
+class OpType(object):
+  """Container for operation type constants."""
+  _CLASS = update_metadata_pb2.InstallOperation
+  # pylint: disable=E1101
+  REPLACE = _CLASS.REPLACE
+  REPLACE_BZ = _CLASS.REPLACE_BZ
+  MOVE = _CLASS.MOVE
+  BSDIFF = _CLASS.BSDIFF
+  SOURCE_COPY = _CLASS.SOURCE_COPY
+  SOURCE_BSDIFF = _CLASS.SOURCE_BSDIFF
+  ZERO = _CLASS.ZERO
+  DISCARD = _CLASS.DISCARD
+  REPLACE_XZ = _CLASS.REPLACE_XZ
+  IMGDIFF = _CLASS.IMGDIFF
+  ALL = (REPLACE, REPLACE_BZ, MOVE, BSDIFF, SOURCE_COPY, SOURCE_BSDIFF, ZERO,
+         DISCARD, REPLACE_XZ, IMGDIFF)
+  NAMES = {
+      REPLACE: 'REPLACE',
+      REPLACE_BZ: 'REPLACE_BZ',
+      MOVE: 'MOVE',
+      BSDIFF: 'BSDIFF',
+      SOURCE_COPY: 'SOURCE_COPY',
+      SOURCE_BSDIFF: 'SOURCE_BSDIFF',
+      ZERO: 'ZERO',
+      DISCARD: 'DISCARD',
+      REPLACE_XZ: 'REPLACE_XZ',
+      IMGDIFF: 'IMGDIFF',
+  }
+
+  def __init__(self):
+    pass
+
+
+#
+# Checked and hashed reading of data.
+#
+def IntPackingFmtStr(size, is_unsigned):
+  """Returns an integer format string for use by the struct module.
+
+  Args:
+    size: the integer size in bytes (2, 4 or 8)
+    is_unsigned: whether it is signed or not
+
+  Returns:
+    A format string for packing/unpacking integer values; assumes network byte
+    order (big-endian).
+
+  Raises:
+    PayloadError if something is wrong with the arguments.
+  """
+  # Determine the base conversion format.
+  if size == 2:
+    fmt = 'h'
+  elif size == 4:
+    fmt = 'i'
+  elif size == 8:
+    fmt = 'q'
+  else:
+    raise PayloadError('unsupport numeric field size (%s)' % size)
+
+  # Signed or unsigned?
+  if is_unsigned:
+    fmt = fmt.upper()
+
+  # Make it network byte order (big-endian).
+  fmt = '!' + fmt
+
+  return fmt
+
+
+def Read(file_obj, length, offset=None, hasher=None):
+  """Reads binary data from a file.
+
+  Args:
+    file_obj: an open file object
+    length: the length of the data to read
+    offset: an offset to seek to prior to reading; this is an absolute offset
+            from either the beginning (non-negative) or end (negative) of the
+            file.  (optional)
+    hasher: a hashing object to pass the read data through (optional)
+
+  Returns:
+    A string containing the read data.
+
+  Raises:
+    PayloadError if a read error occurred or not enough data was read.
+  """
+  if offset is not None:
+    if offset >= 0:
+      file_obj.seek(offset)
+    else:
+      file_obj.seek(offset, 2)
+
+  try:
+    data = file_obj.read(length)
+  except IOError, e:
+    raise PayloadError('error reading from file (%s): %s' % (file_obj.name, e))
+
+  if len(data) != length:
+    raise PayloadError(
+        'reading from file (%s) too short (%d instead of %d bytes)' %
+        (file_obj.name, len(data), length))
+
+  if hasher:
+    hasher.update(data)
+
+  return data
+
+
+#
+# Formatting functions.
+#
+def FormatExtent(ex, block_size=0):
+  end_block = ex.start_block + ex.num_blocks
+  if block_size:
+    return '%d->%d * %d' % (ex.start_block, end_block, block_size)
+  else:
+    return '%d->%d' % (ex.start_block, end_block)
+
+
+def FormatSha256(digest):
+  """Returns a canonical string representation of a SHA256 digest."""
+  return digest.encode('base64').strip()
+
+
+#
+# Useful iterators.
+#
+def _ObjNameIter(items, base_name, reverse=False, name_format_func=None):
+  """A generic (item, name) tuple iterators.
+
+  Args:
+    items: the sequence of objects to iterate on
+    base_name: the base name for all objects
+    reverse: whether iteration should be in reverse order
+    name_format_func: a function to apply to the name string
+
+  Yields:
+    An iterator whose i-th invocation returns (items[i], name), where name ==
+    base_name + '[i]' (with a formatting function optionally applied to it).
+  """
+  idx, inc = (len(items), -1) if reverse else (1, 1)
+  if reverse:
+    items = reversed(items)
+  for item in items:
+    item_name = '%s[%d]' % (base_name, idx)
+    if name_format_func:
+      item_name = name_format_func(item, item_name)
+    yield (item, item_name)
+    idx += inc
+
+
+def _OperationNameFormatter(op, op_name):
+  return '%s(%s)' % (op_name, OpType.NAMES.get(op.type, '?'))
+
+
+def OperationIter(operations, base_name, reverse=False):
+  """An (item, name) iterator for update operations."""
+  return _ObjNameIter(operations, base_name, reverse=reverse,
+                      name_format_func=_OperationNameFormatter)
+
+
+def ExtentIter(extents, base_name, reverse=False):
+  """An (item, name) iterator for operation extents."""
+  return _ObjNameIter(extents, base_name, reverse=reverse)
+
+
+def SignatureIter(sigs, base_name, reverse=False):
+  """An (item, name) iterator for signatures."""
+  return _ObjNameIter(sigs, base_name, reverse=reverse)
diff --git a/scripts/update_payload/error.py b/scripts/update_payload/error.py
new file mode 100644
index 0000000..8b9cadd
--- /dev/null
+++ b/scripts/update_payload/error.py
@@ -0,0 +1,9 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Payload handling errors."""
+
+
+class PayloadError(Exception):
+  """An update payload general processing error."""
diff --git a/scripts/update_payload/format_utils.py b/scripts/update_payload/format_utils.py
new file mode 100644
index 0000000..2c3775c
--- /dev/null
+++ b/scripts/update_payload/format_utils.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Various formatting functions."""
+
+
+def NumToPercent(num, total, min_precision=1, max_precision=5):
+  """Returns the percentage (string) of |num| out of |total|.
+
+  If the percentage includes a fraction, it will be computed down to the least
+  precision that yields a non-zero and ranging between |min_precision| and
+  |max_precision|. Values are always rounded down. All arithmetic operations
+  are integer built-ins. Examples (using default precision):
+
+    (1, 1) => 100%
+    (3, 10) => 30%
+    (3, 9) => 33.3%
+    (3, 900) => 0.3%
+    (3, 9000000) => 0.00003%
+    (3, 900000000) => 0%
+    (5, 2) => 250%
+
+  Args:
+    num: the value of the part
+    total: the value of the whole
+    min_precision: minimum precision for fractional percentage
+    max_precision: maximum precision for fractional percentage
+  Returns:
+    Percentage string, or None if percent cannot be computed (i.e. total is
+    zero).
+
+  """
+  if total == 0:
+    return None
+
+  percent = 0
+  precision = min(min_precision, max_precision)
+  factor = 10 ** precision
+  while precision <= max_precision:
+    percent = num * 100 * factor / total
+    if percent:
+      break
+    factor *= 10
+    precision += 1
+
+  whole, frac = divmod(percent, factor)
+  while frac and not frac % 10:
+    frac /= 10
+    precision -= 1
+
+  return '%d%s%%' % (whole, '.%0*d' % (precision, frac) if frac else '')
+
+
+def BytesToHumanReadable(size, precision=1, decimal=False):
+  """Returns a human readable representation of a given |size|.
+
+  The returned string includes unit notations in either binary (KiB, MiB, etc)
+  or decimal (kB, MB, etc), based on the value of |decimal|. The chosen unit is
+  the largest that yields a whole (or mixed) number. It may contain up to
+  |precision| fractional digits. Values are always rounded down. Largest unit
+  is an exabyte. All arithmetic operations are integer built-ins. Examples
+  (using default precision and binary units):
+
+    4096 => 4 KiB
+    5000 => 4.8 KiB
+    500000 => 488.2 KiB
+    5000000 => 4.7 MiB
+
+  Args:
+    size: the size in bytes
+    precision: the number of digits past the decimal point
+    decimal: whether to compute/present decimal or binary units
+  Returns:
+    Readable size string, or None if no conversion is applicable (i.e. size is
+    less than the smallest unit).
+
+  """
+  constants = (
+      (('KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'), 1024),
+      (('kB', 'MB', 'GB', 'TB', 'PB', 'EB'), 1000)
+  )
+  suffixes, base = constants[decimal]
+  exp, magnitude = 0, 1
+  while exp < len(suffixes):
+    next_magnitude = magnitude * base
+    if size < next_magnitude:
+      break
+    exp += 1
+    magnitude = next_magnitude
+
+  if exp != 0:
+    whole = size / magnitude
+    frac = (size % magnitude) * (10 ** precision) / magnitude
+    while frac and not frac % 10:
+      frac /= 10
+    return '%d%s %s' % (whole, '.%d' % frac if frac else '', suffixes[exp - 1])
diff --git a/scripts/update_payload/format_utils_unittest.py b/scripts/update_payload/format_utils_unittest.py
new file mode 100755
index 0000000..8c5ba8e
--- /dev/null
+++ b/scripts/update_payload/format_utils_unittest.py
@@ -0,0 +1,76 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit tests for format_utils.py."""
+
+import unittest
+
+import format_utils
+
+
+class NumToPercentTest(unittest.TestCase):
+  def testHundredPercent(self):
+    self.assertEqual(format_utils.NumToPercent(1, 1), '100%')
+
+  def testOverHundredPercent(self):
+    self.assertEqual(format_utils.NumToPercent(5, 2), '250%')
+
+  def testWholePercent(self):
+    self.assertEqual(format_utils.NumToPercent(3, 10), '30%')
+
+  def testDefaultMinPrecision(self):
+    self.assertEqual(format_utils.NumToPercent(3, 9), '33.3%')
+    self.assertEqual(format_utils.NumToPercent(3, 900), '0.3%')
+
+  def testDefaultMaxPrecision(self):
+    self.assertEqual(format_utils.NumToPercent(3, 9000000), '0.00003%')
+    self.assertEqual(format_utils.NumToPercent(3, 90000000), '0%')
+
+  def testCustomMinPrecision(self):
+    self.assertEqual(format_utils.NumToPercent(3, 9, min_precision=3),
+                     '33.333%')
+    self.assertEqual(format_utils.NumToPercent(3, 9, min_precision=0),
+                     '33%')
+
+  def testCustomMaxPrecision(self):
+    self.assertEqual(format_utils.NumToPercent(3, 900, max_precision=1),
+                     '0.3%')
+    self.assertEqual(format_utils.NumToPercent(3, 9000, max_precision=1),
+                     '0%')
+
+
+class BytesToHumanReadableTest(unittest.TestCase):
+  def testBaseTwo(self):
+    self.assertEqual(format_utils.BytesToHumanReadable(0x1000), '4 KiB')
+    self.assertEqual(format_utils.BytesToHumanReadable(0x400000), '4 MiB')
+    self.assertEqual(format_utils.BytesToHumanReadable(0x100000000), '4 GiB')
+    self.assertEqual(format_utils.BytesToHumanReadable(0x40000000000), '4 TiB')
+
+  def testDecimal(self):
+    self.assertEqual(format_utils.BytesToHumanReadable(5000, decimal=True),
+                     '5 kB')
+    self.assertEqual(format_utils.BytesToHumanReadable(5000000, decimal=True),
+                     '5 MB')
+    self.assertEqual(format_utils.BytesToHumanReadable(5000000000,
+                                                       decimal=True),
+                     '5 GB')
+
+  def testDefaultPrecision(self):
+    self.assertEqual(format_utils.BytesToHumanReadable(5000), '4.8 KiB')
+    self.assertEqual(format_utils.BytesToHumanReadable(500000), '488.2 KiB')
+    self.assertEqual(format_utils.BytesToHumanReadable(5000000), '4.7 MiB')
+
+  def testCustomPrecision(self):
+    self.assertEqual(format_utils.BytesToHumanReadable(5000, precision=3),
+                     '4.882 KiB')
+    self.assertEqual(format_utils.BytesToHumanReadable(500000, precision=0),
+                     '488 KiB')
+    self.assertEqual(format_utils.BytesToHumanReadable(5000000, precision=5),
+                     '4.76837 MiB')
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/scripts/update_payload/histogram.py b/scripts/update_payload/histogram.py
new file mode 100644
index 0000000..9916329
--- /dev/null
+++ b/scripts/update_payload/histogram.py
@@ -0,0 +1,117 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Histogram generation tools."""
+
+from collections import defaultdict
+
+import format_utils
+
+
+class Histogram(object):
+  """A histogram generating object.
+
+  This object serves the sole purpose of formatting (key, val) pairs as an
+  ASCII histogram, including bars and percentage markers, and taking care of
+  label alignment, scaling, etc. In addition to the standard __init__
+  interface, two static methods are provided for conveniently converting data
+  in different formats into a histogram. Histogram generation is exported via
+  its __str__ method, and looks as follows:
+
+    Yes |################    | 5 (83.3%)
+    No  |###                 | 1 (16.6%)
+
+  TODO(garnold) we may want to add actual methods for adding data or tweaking
+  the output layout and formatting. For now, though, this is fine.
+
+  """
+
+  def __init__(self, data, scale=20, formatter=None):
+    """Initialize a histogram object.
+
+    Args:
+      data: list of (key, count) pairs constituting the histogram
+      scale: number of characters used to indicate 100%
+      formatter: function used for formatting raw histogram values
+
+    """
+    self.data = data
+    self.scale = scale
+    self.formatter = formatter or str
+    self.max_key_len = max([len(str(key)) for key, count in self.data])
+    self.total = sum([count for key, count in self.data])
+
+  @staticmethod
+  def FromCountDict(count_dict, scale=20, formatter=None, key_names=None):
+    """Takes a dictionary of counts and returns a histogram object.
+
+    This simply converts a mapping from names to counts into a list of (key,
+    count) pairs, optionally translating keys into name strings, then
+    generating and returning a histogram for them. This is a useful convenience
+    call for clients that update a dictionary of counters as they (say) scan a
+    data stream.
+
+    Args:
+      count_dict: dictionary mapping keys to occurrence counts
+      scale: number of characters used to indicate 100%
+      formatter: function used for formatting raw histogram values
+      key_names: dictionary mapping keys to name strings
+    Returns:
+      A histogram object based on the given data.
+
+    """
+    namer = None
+    if key_names:
+      namer = lambda key: key_names[key]
+    else:
+      namer = lambda key: key
+
+    hist = [(namer(key), count) for key, count in count_dict.items()]
+    return Histogram(hist, scale, formatter)
+
+  @staticmethod
+  def FromKeyList(key_list, scale=20, formatter=None, key_names=None):
+    """Takes a list of (possibly recurring) keys and returns a histogram object.
+
+    This converts the list into a dictionary of counters, then uses
+    FromCountDict() to generate the actual histogram. For example:
+
+      ['a', 'a', 'b', 'a', 'b'] --> {'a': 3, 'b': 2} --> ...
+
+    Args:
+      key_list: list of (possibly recurring) keys
+      scale: number of characters used to indicate 100%
+      formatter: function used for formatting raw histogram values
+      key_names: dictionary mapping keys to name strings
+    Returns:
+      A histogram object based on the given data.
+
+    """
+    count_dict = defaultdict(int)  # Unset items default to zero
+    for key in key_list:
+      count_dict[key] += 1
+    return Histogram.FromCountDict(count_dict, scale, formatter, key_names)
+
+  def __str__(self):
+    hist_lines = []
+    hist_bar = '|'
+    for key, count in self.data:
+      if self.total:
+        bar_len = count * self.scale / self.total
+        hist_bar = '|%s|' % ('#' * bar_len).ljust(self.scale)
+
+      line = '%s %s %s' % (
+          str(key).ljust(self.max_key_len),
+          hist_bar,
+          self.formatter(count))
+      percent_str = format_utils.NumToPercent(count, self.total)
+      if percent_str:
+        line += ' (%s)' % percent_str
+      hist_lines.append(line)
+
+    return '\n'.join(hist_lines)
+
+  def GetKeys(self):
+    """Returns the keys of the histogram."""
+    return [key for key, _ in self.data]
diff --git a/scripts/update_payload/histogram_unittest.py b/scripts/update_payload/histogram_unittest.py
new file mode 100755
index 0000000..421ff20
--- /dev/null
+++ b/scripts/update_payload/histogram_unittest.py
@@ -0,0 +1,60 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit tests for histogram.py."""
+
+import unittest
+
+import format_utils
+import histogram
+
+
+class HistogramTest(unittest.TestCase):
+
+  @staticmethod
+  def AddHumanReadableSize(size):
+    fmt = format_utils.BytesToHumanReadable(size)
+    return '%s (%s)' % (size, fmt) if fmt else str(size)
+
+  def CompareToExpectedDefault(self, actual_str):
+    expected_str = (
+        'Yes |################    | 5 (83.3%)\n'
+        'No  |###                 | 1 (16.6%)'
+    )
+    self.assertEqual(actual_str, expected_str)
+
+  def testExampleHistogram(self):
+    self.CompareToExpectedDefault(str(histogram.Histogram(
+        [('Yes', 5), ('No', 1)])))
+
+  def testFromCountDict(self):
+    self.CompareToExpectedDefault(str(histogram.Histogram.FromCountDict(
+        {'Yes': 5, 'No': 1})))
+
+  def testFromKeyList(self):
+    self.CompareToExpectedDefault(str(histogram.Histogram.FromKeyList(
+        ['Yes', 'Yes', 'No', 'Yes', 'Yes', 'Yes'])))
+
+  def testCustomScale(self):
+    expected_str = (
+        'Yes |#### | 5 (83.3%)\n'
+        'No  |     | 1 (16.6%)'
+    )
+    actual_str = str(histogram.Histogram([('Yes', 5), ('No', 1)], scale=5))
+    self.assertEqual(actual_str, expected_str)
+
+  def testCustomFormatter(self):
+    expected_str = (
+        'Yes |################    | 5000 (4.8 KiB) (83.3%)\n'
+        'No  |###                 | 1000 (16.6%)'
+    )
+    actual_str = str(histogram.Histogram(
+        [('Yes', 5000), ('No', 1000)], formatter=self.AddHumanReadableSize))
+    self.assertEqual(actual_str, expected_str)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/scripts/update_payload/payload-test-key.pem b/scripts/update_payload/payload-test-key.pem
new file mode 100644
index 0000000..342e923
--- /dev/null
+++ b/scripts/update_payload/payload-test-key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAvtGHtqO21Uhy2wGz9fluIpIUR8G7dZoCZhZukGkm4mlfgL71
+xPSArjx02/w/FhYxOusV6/XQeKgL3i8cni3HCkCOurZLpi2L5Ver6qrxKFh6WBVZ
+0Dj7N6P/Mf5jZdhfvVyweLlsNK8Ypeb+RazfrsXhd4cy3dBMxouGwH7R7QQXTFCo
+Cc8kgJBTxILl3jfvY8OrNKgYiCETa7tQdFkP0bfPwH9cAXuMjHXiZatim0tF+ivp
+kM2v/6LTxtD6Rq1wks/N6CHi8efrRaviFp7c0mNmBNFaV54cHEUW2SlNIiRun7L0
+1nAz/D8kuoHfx4E3Mtj0DbvngZJMX/X+rJQ5cQIDAQABAoIBADmE2X7hbJxwAUcp
+BUExFdTP6dMTf9lcOjrhqiRXvgPjtYkOhvD+rsdWq/cf2zhiKibTdEEzUMr+BM3N
+r7eyntvlR+DaUIVgF1pjigvryVPbD837aZ5NftRv194PC5FInttq1Dsf0ZEz8p8X
+uS/xg1+ggG1SUK/yOSJkLpNZ5xelbclQJ9bnJST8PR8XbEieA83xt5M2DcooPzq0
+/99m/daA5hmSWs6n8sFrIZDQxDhLyyW4J72jjoNTE87eCpwK855yXMelpEPDZNQi
+nB3x5Y/bGbl81PInqL2q14lekrVYdYZ7bOBVlsmyvz6f1e4OOE1aaAM+w6ArA4az
+6elZQE0CgYEA4GOU6BBu9jLqFdqV9jIkWsgz5ZWINz8PLJPtZzk5I9KO1m+GAUy2
+h/1IGGR6qRQR49hMtq4C0lUifxquq0xivzJ87U9oxKC9yEeTxkmDe5csVHsnAtqT
+xRgVM7Ysrut5NLU1zm0q3jBmkDu7d99LvscM/3n7eJ6RiYpnA54O6I8CgYEA2bNA
+34PTvxBS2deRoxKQNlVU14FtirE+q0+k0wcE85wr7wIMpR13al8T1TpE8J1yvvZM
+92HMGFGfYNDB46b8VfJ5AxEUFwdruec6sTVVfkMZMOqM/A08yiaLzQ1exDxNwaja
+fLuG5FAVRD/2g7fLBcsmosyNgcgNr1XA8Q/nvf8CgYEAwaSOg7py19rWcqehlMZu
+4z00tCNYWzz7LmA2l0clzYlPJTU3MvXt6+ujhRFpXXJpgfRPN7Nx0ewQihoPtNqF
+uTSr5OwLoOyK+0Tx/UPByS2L3xgscWUJ8yQ2X9sOMqIZhmf/mDZTsU2ZpU03GlrE
+dk43JF4zq0NEm6qp/dAwU3cCgYEAvECl+KKmmLIk8vvWlI2Y52Mi2rixYR2kc7+L
+aHDJd1+1HhlHlgDFItbU765Trz5322phZArN0rnCeJYNFC9yRWBIBL7gAIoKPdgW
+iOb15xlez04EXHGV/7kVa1wEdu0u0CiTxwjivMwDl+E36u8kQP5LirwYIgI800H0
+doCqhUECgYEAjvA38OS7hy56Q4LQtmHFBuRIn4E5SrIGMwNIH6TGbEKQix3ajTCQ
+0fSoLDGTkU6dH+T4v0WheveN2a2Kofqm0UQx5V2rfnY/Ut1fAAWgL/lsHLDnzPUZ
+bvTOANl8TbT49xAfNXTaGWe7F7nYz+bK0UDif1tJNDLQw7USD5I8lbQ=
+-----END RSA PRIVATE KEY-----
diff --git a/scripts/update_payload/payload-test-key.pub b/scripts/update_payload/payload-test-key.pub
new file mode 100644
index 0000000..fdae963
--- /dev/null
+++ b/scripts/update_payload/payload-test-key.pub
@@ -0,0 +1,9 @@
+-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvtGHtqO21Uhy2wGz9flu
+IpIUR8G7dZoCZhZukGkm4mlfgL71xPSArjx02/w/FhYxOusV6/XQeKgL3i8cni3H
+CkCOurZLpi2L5Ver6qrxKFh6WBVZ0Dj7N6P/Mf5jZdhfvVyweLlsNK8Ypeb+Razf
+rsXhd4cy3dBMxouGwH7R7QQXTFCoCc8kgJBTxILl3jfvY8OrNKgYiCETa7tQdFkP
+0bfPwH9cAXuMjHXiZatim0tF+ivpkM2v/6LTxtD6Rq1wks/N6CHi8efrRaviFp7c
+0mNmBNFaV54cHEUW2SlNIiRun7L01nAz/D8kuoHfx4E3Mtj0DbvngZJMX/X+rJQ5
+cQIDAQAB
+-----END PUBLIC KEY-----
diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py
new file mode 100644
index 0000000..f76c0de
--- /dev/null
+++ b/scripts/update_payload/payload.py
@@ -0,0 +1,341 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tools for reading, verifying and applying Chrome OS update payloads."""
+
+from __future__ import print_function
+
+import hashlib
+import struct
+
+import applier
+import block_tracer
+import checker
+import common
+from error import PayloadError
+import update_metadata_pb2
+
+
+#
+# Helper functions.
+#
+def _ReadInt(file_obj, size, is_unsigned, hasher=None):
+  """Reads a binary-encoded integer from a file.
+
+  It will do the correct conversion based on the reported size and whether or
+  not a signed number is expected. Assumes a network (big-endian) byte
+  ordering.
+
+  Args:
+    file_obj: a file object
+    size: the integer size in bytes (2, 4 or 8)
+    is_unsigned: whether it is signed or not
+    hasher: an optional hasher to pass the value through
+
+  Returns:
+    An "unpacked" (Python) integer value.
+
+  Raises:
+    PayloadError if an read error occurred.
+  """
+  return struct.unpack(common.IntPackingFmtStr(size, is_unsigned),
+                       common.Read(file_obj, size, hasher=hasher))[0]
+
+
+#
+# Update payload.
+#
+class Payload(object):
+  """Chrome OS update payload processor."""
+
+  class _PayloadHeader(object):
+    """Update payload header struct."""
+
+    # Header constants; sizes are in bytes.
+    _MAGIC = 'CrAU'
+    _VERSION_SIZE = 8
+    _MANIFEST_LEN_SIZE = 8
+    _METADATA_SIGNATURE_LEN_SIZE = 4
+
+    def __init__(self):
+      self.version = None
+      self.manifest_len = None
+      self.metadata_signature_len = None
+      self.size = None
+
+    def ReadFromPayload(self, payload_file, hasher=None):
+      """Reads the payload header from a file.
+
+      Reads the payload header from the |payload_file| and updates the |hasher|
+      if one is passed. The parsed header is stored in the _PayloadHeader
+      instance attributes.
+
+      Args:
+        payload_file: a file object
+        hasher: an optional hasher to pass the value through
+
+      Returns:
+        None.
+
+      Raises:
+        PayloadError if a read error occurred or the header is invalid.
+      """
+      # Verify magic
+      magic = common.Read(payload_file, len(self._MAGIC), hasher=hasher)
+      if magic != self._MAGIC:
+        raise PayloadError('invalid payload magic: %s' % magic)
+
+      self.version = _ReadInt(payload_file, self._VERSION_SIZE, True,
+                              hasher=hasher)
+      self.manifest_len = _ReadInt(payload_file, self._MANIFEST_LEN_SIZE, True,
+                                   hasher=hasher)
+      self.size = (len(self._MAGIC) + self._VERSION_SIZE +
+                   self._MANIFEST_LEN_SIZE)
+      self.metadata_signature_len = 0
+
+      if self.version == common.BRILLO_MAJOR_PAYLOAD_VERSION:
+        self.size += self._METADATA_SIGNATURE_LEN_SIZE
+        self.metadata_signature_len = _ReadInt(
+            payload_file, self._METADATA_SIGNATURE_LEN_SIZE, True,
+            hasher=hasher)
+
+
+  def __init__(self, payload_file):
+    """Initialize the payload object.
+
+    Args:
+      payload_file: update payload file object open for reading
+    """
+    self.payload_file = payload_file
+    self.manifest_hasher = None
+    self.is_init = False
+    self.header = None
+    self.manifest = None
+    self.data_offset = None
+    self.metadata_signature = None
+    self.metadata_size = None
+
+  def _ReadHeader(self):
+    """Reads and returns the payload header.
+
+    Returns:
+      A payload header object.
+
+    Raises:
+      PayloadError if a read error occurred.
+    """
+    header = self._PayloadHeader()
+    header.ReadFromPayload(self.payload_file, self.manifest_hasher)
+    return header
+
+  def _ReadManifest(self):
+    """Reads and returns the payload manifest.
+
+    Returns:
+      A string containing the payload manifest in binary form.
+
+    Raises:
+      PayloadError if a read error occurred.
+    """
+    if not self.header:
+      raise PayloadError('payload header not present')
+
+    return common.Read(self.payload_file, self.header.manifest_len,
+                       hasher=self.manifest_hasher)
+
+  def _ReadMetadataSignature(self):
+    """Reads and returns the metadata signatures.
+
+    Returns:
+      A string containing the metadata signatures protobuf in binary form or
+      an empty string if no metadata signature found in the payload.
+
+    Raises:
+      PayloadError if a read error occurred.
+    """
+    if not self.header:
+      raise PayloadError('payload header not present')
+
+    return common.Read(
+        self.payload_file, self.header.metadata_signature_len,
+        offset=self.header.size + self.header.manifest_len)
+
+  def ReadDataBlob(self, offset, length):
+    """Reads and returns a single data blob from the update payload.
+
+    Args:
+      offset: offset to the beginning of the blob from the end of the manifest
+      length: the blob's length
+
+    Returns:
+      A string containing the raw blob data.
+
+    Raises:
+      PayloadError if a read error occurred.
+    """
+    return common.Read(self.payload_file, length,
+                       offset=self.data_offset + offset)
+
+  def Init(self):
+    """Initializes the payload object.
+
+    This is a prerequisite for any other public API call.
+
+    Raises:
+      PayloadError if object already initialized or fails to initialize
+      correctly.
+    """
+    if self.is_init:
+      raise PayloadError('payload object already initialized')
+
+    # Initialize hash context.
+    # pylint: disable=E1101
+    self.manifest_hasher = hashlib.sha256()
+
+    # Read the file header.
+    self.header = self._ReadHeader()
+
+    # Read the manifest.
+    manifest_raw = self._ReadManifest()
+    self.manifest = update_metadata_pb2.DeltaArchiveManifest()
+    self.manifest.ParseFromString(manifest_raw)
+
+    # Read the metadata signature (if any).
+    metadata_signature_raw = self._ReadMetadataSignature()
+    if metadata_signature_raw:
+      self.metadata_signature = update_metadata_pb2.Signatures()
+      self.metadata_signature.ParseFromString(metadata_signature_raw)
+
+    self.metadata_size = self.header.size + self.header.manifest_len
+    self.data_offset = self.metadata_size + self.header.metadata_signature_len
+
+    self.is_init = True
+
+  def Describe(self):
+    """Emits the payload embedded description data to standard output."""
+    def _DescribeImageInfo(description, image_info):
+      def _DisplayIndentedValue(name, value):
+        print('  {:<14} {}'.format(name+':', value))
+
+      print('%s:' % description)
+      _DisplayIndentedValue('Channel', image_info.channel)
+      _DisplayIndentedValue('Board', image_info.board)
+      _DisplayIndentedValue('Version', image_info.version)
+      _DisplayIndentedValue('Key', image_info.key)
+
+      if image_info.build_channel != image_info.channel:
+        _DisplayIndentedValue('Build channel', image_info.build_channel)
+
+      if image_info.build_version != image_info.version:
+        _DisplayIndentedValue('Build version', image_info.build_version)
+
+    if self.manifest.HasField('old_image_info'):
+      # pylint: disable=E1101
+      _DescribeImageInfo('Old Image', self.manifest.old_image_info)
+
+    if self.manifest.HasField('new_image_info'):
+      # pylint: disable=E1101
+      _DescribeImageInfo('New Image', self.manifest.new_image_info)
+
+  def _AssertInit(self):
+    """Raises an exception if the object was not initialized."""
+    if not self.is_init:
+      raise PayloadError('payload object not initialized')
+
+  def ResetFile(self):
+    """Resets the offset of the payload file to right past the manifest."""
+    self.payload_file.seek(self.data_offset)
+
+  def IsDelta(self):
+    """Returns True iff the payload appears to be a delta."""
+    self._AssertInit()
+    return (self.manifest.HasField('old_kernel_info') or
+            self.manifest.HasField('old_rootfs_info') or
+            any(partition.HasField('old_partition_info')
+                for partition in self.manifest.partitions))
+
+  def IsFull(self):
+    """Returns True iff the payload appears to be a full."""
+    return not self.IsDelta()
+
+  def Check(self, pubkey_file_name=None, metadata_sig_file=None,
+            report_out_file=None, assert_type=None, block_size=0,
+            rootfs_part_size=0, kernel_part_size=0, allow_unhashed=False,
+            disabled_tests=()):
+    """Checks the payload integrity.
+
+    Args:
+      pubkey_file_name: public key used for signature verification
+      metadata_sig_file: metadata signature, if verification is desired
+      report_out_file: file object to dump the report to
+      assert_type: assert that payload is either 'full' or 'delta'
+      block_size: expected filesystem / payload block size
+      rootfs_part_size: the size of (physical) rootfs partitions in bytes
+      kernel_part_size: the size of (physical) kernel partitions in bytes
+      allow_unhashed: allow unhashed operation blobs
+      disabled_tests: list of tests to disable
+
+    Raises:
+      PayloadError if payload verification failed.
+    """
+    self._AssertInit()
+
+    # Create a short-lived payload checker object and run it.
+    helper = checker.PayloadChecker(
+        self, assert_type=assert_type, block_size=block_size,
+        allow_unhashed=allow_unhashed, disabled_tests=disabled_tests)
+    helper.Run(pubkey_file_name=pubkey_file_name,
+               metadata_sig_file=metadata_sig_file,
+               rootfs_part_size=rootfs_part_size,
+               kernel_part_size=kernel_part_size,
+               report_out_file=report_out_file)
+
+  def Apply(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
+            old_rootfs_part=None, bsdiff_in_place=True, bspatch_path=None,
+            truncate_to_expected_size=True):
+    """Applies the update payload.
+
+    Args:
+      new_kernel_part: name of dest kernel partition file
+      new_rootfs_part: name of dest rootfs partition file
+      old_kernel_part: name of source kernel partition file (optional)
+      old_rootfs_part: name of source rootfs partition file (optional)
+      bsdiff_in_place: whether to perform BSDIFF operations in-place (optional)
+      bspatch_path: path to the bspatch binary (optional)
+      truncate_to_expected_size: whether to truncate the resulting partitions
+                                 to their expected sizes, as specified in the
+                                 payload (optional)
+
+    Raises:
+      PayloadError if payload application failed.
+    """
+    self._AssertInit()
+
+    # Create a short-lived payload applier object and run it.
+    helper = applier.PayloadApplier(
+        self, bsdiff_in_place=bsdiff_in_place, bspatch_path=bspatch_path,
+        truncate_to_expected_size=truncate_to_expected_size)
+    helper.Run(new_kernel_part, new_rootfs_part,
+               old_kernel_part=old_kernel_part,
+               old_rootfs_part=old_rootfs_part)
+
+  def TraceBlock(self, block, skip, trace_out_file, is_kernel):
+    """Traces the origin(s) of a given dest partition block.
+
+    The tracing tries to find origins transitively, when possible (it currently
+    only works for move operations, where the mapping of src/dst is
+    one-to-one). It will dump a list of operations and source blocks
+    responsible for the data in the given dest block.
+
+    Args:
+      block: the block number whose origin to trace
+      skip: the number of first origin mappings to skip
+      trace_out_file: file object to dump the trace to
+      is_kernel: trace through kernel (True) or rootfs (False) operations
+    """
+    self._AssertInit()
+
+    # Create a short-lived payload block tracer object and run it.
+    helper = block_tracer.PayloadBlockTracer(self)
+    helper.Run(block, skip, trace_out_file, is_kernel)
diff --git a/scripts/update_payload/test_utils.py b/scripts/update_payload/test_utils.py
new file mode 100644
index 0000000..61a91f5
--- /dev/null
+++ b/scripts/update_payload/test_utils.py
@@ -0,0 +1,364 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utilities for unit testing."""
+
+from __future__ import print_function
+
+import cStringIO
+import hashlib
+import os
+import struct
+import subprocess
+
+import common
+import payload
+import update_metadata_pb2
+
+
+class TestError(Exception):
+  """An error during testing of update payload code."""
+
+
+# Private/public RSA keys used for testing.
+_PRIVKEY_FILE_NAME = os.path.join(os.path.dirname(__file__),
+                                  'payload-test-key.pem')
+_PUBKEY_FILE_NAME = os.path.join(os.path.dirname(__file__),
+                                 'payload-test-key.pub')
+
+
+def KiB(count):
+  return count << 10
+
+
+def MiB(count):
+  return count << 20
+
+
+def GiB(count):
+  return count << 30
+
+
+def _WriteInt(file_obj, size, is_unsigned, val):
+  """Writes a binary-encoded integer to a file.
+
+  It will do the correct conversion based on the reported size and whether or
+  not a signed number is expected. Assumes a network (big-endian) byte
+  ordering.
+
+  Args:
+    file_obj: a file object
+    size: the integer size in bytes (2, 4 or 8)
+    is_unsigned: whether it is signed or not
+    val: integer value to encode
+
+  Raises:
+    PayloadError if a write error occurred.
+  """
+  try:
+    file_obj.write(struct.pack(common.IntPackingFmtStr(size, is_unsigned), val))
+  except IOError, e:
+    raise payload.PayloadError('error writing to file (%s): %s' %
+                               (file_obj.name, e))
+
+
+def _SetMsgField(msg, field_name, val):
+  """Sets or clears a field in a protobuf message."""
+  if val is None:
+    msg.ClearField(field_name)
+  else:
+    setattr(msg, field_name, val)
+
+
+def SignSha256(data, privkey_file_name):
+  """Signs the data's SHA256 hash with an RSA private key.
+
+  Args:
+    data: the data whose SHA256 hash we want to sign
+    privkey_file_name: private key used for signing data
+
+  Returns:
+    The signature string, prepended with an ASN1 header.
+
+  Raises:
+    TestError if something goes wrong.
+  """
+  # pylint: disable=E1101
+  data_sha256_hash = common.SIG_ASN1_HEADER + hashlib.sha256(data).digest()
+  sign_cmd = ['openssl', 'rsautl', '-sign', '-inkey', privkey_file_name]
+  try:
+    sign_process = subprocess.Popen(sign_cmd, stdin=subprocess.PIPE,
+                                    stdout=subprocess.PIPE)
+    sig, _ = sign_process.communicate(input=data_sha256_hash)
+  except Exception as e:
+    raise TestError('signing subprocess failed: %s' % e)
+
+  return sig
+
+
+class SignaturesGenerator(object):
+  """Generates a payload signatures data block."""
+
+  def __init__(self):
+    self.sigs = update_metadata_pb2.Signatures()
+
+  def AddSig(self, version, data):
+    """Adds a signature to the signature sequence.
+
+    Args:
+      version: signature version (None means do not assign)
+      data: signature binary data (None means do not assign)
+    """
+    # Pylint fails to identify a member of the Signatures message.
+    # pylint: disable=E1101
+    sig = self.sigs.signatures.add()
+    if version is not None:
+      sig.version = version
+    if data is not None:
+      sig.data = data
+
+  def ToBinary(self):
+    """Returns the binary representation of the signature block."""
+    return self.sigs.SerializeToString()
+
+
+class PayloadGenerator(object):
+  """Generates an update payload allowing low-level control.
+
+  Attributes:
+    manifest: the protobuf containing the payload manifest
+    version: the payload version identifier
+    block_size: the block size pertaining to update operations
+
+  """
+
+  def __init__(self, version=1):
+    self.manifest = update_metadata_pb2.DeltaArchiveManifest()
+    self.version = version
+    self.block_size = 0
+
+  @staticmethod
+  def _WriteExtent(ex, val):
+    """Returns an Extent message."""
+    start_block, num_blocks = val
+    _SetMsgField(ex, 'start_block', start_block)
+    _SetMsgField(ex, 'num_blocks', num_blocks)
+
+  @staticmethod
+  def _AddValuesToRepeatedField(repeated_field, values, write_func):
+    """Adds values to a repeated message field."""
+    if values:
+      for val in values:
+        new_item = repeated_field.add()
+        write_func(new_item, val)
+
+  @staticmethod
+  def _AddExtents(extents_field, values):
+    """Adds extents to an extents field."""
+    PayloadGenerator._AddValuesToRepeatedField(
+        extents_field, values, PayloadGenerator._WriteExtent)
+
+  def SetBlockSize(self, block_size):
+    """Sets the payload's block size."""
+    self.block_size = block_size
+    _SetMsgField(self.manifest, 'block_size', block_size)
+
+  def SetPartInfo(self, is_kernel, is_new, part_size, part_hash):
+    """Set the partition info entry.
+
+    Args:
+      is_kernel: whether this is kernel partition info
+      is_new: whether to set old (False) or new (True) info
+      part_size: the partition size (in fact, filesystem size)
+      part_hash: the partition hash
+    """
+    if is_kernel:
+      # pylint: disable=E1101
+      part_info = (self.manifest.new_kernel_info if is_new
+                   else self.manifest.old_kernel_info)
+    else:
+      # pylint: disable=E1101
+      part_info = (self.manifest.new_rootfs_info if is_new
+                   else self.manifest.old_rootfs_info)
+    _SetMsgField(part_info, 'size', part_size)
+    _SetMsgField(part_info, 'hash', part_hash)
+
+  def AddOperation(self, is_kernel, op_type, data_offset=None,
+                   data_length=None, src_extents=None, src_length=None,
+                   dst_extents=None, dst_length=None, data_sha256_hash=None):
+    """Adds an InstallOperation entry."""
+    # pylint: disable=E1101
+    operations = (self.manifest.kernel_install_operations if is_kernel
+                  else self.manifest.install_operations)
+
+    op = operations.add()
+    op.type = op_type
+
+    _SetMsgField(op, 'data_offset', data_offset)
+    _SetMsgField(op, 'data_length', data_length)
+
+    self._AddExtents(op.src_extents, src_extents)
+    _SetMsgField(op, 'src_length', src_length)
+
+    self._AddExtents(op.dst_extents, dst_extents)
+    _SetMsgField(op, 'dst_length', dst_length)
+
+    _SetMsgField(op, 'data_sha256_hash', data_sha256_hash)
+
+  def SetSignatures(self, sigs_offset, sigs_size):
+    """Set the payload's signature block descriptors."""
+    _SetMsgField(self.manifest, 'signatures_offset', sigs_offset)
+    _SetMsgField(self.manifest, 'signatures_size', sigs_size)
+
+  def SetMinorVersion(self, minor_version):
+    """Set the payload's minor version field."""
+    _SetMsgField(self.manifest, 'minor_version', minor_version)
+
+  def _WriteHeaderToFile(self, file_obj, manifest_len):
+    """Writes a payload heaer to a file."""
+    # We need to access protected members in Payload for writing the header.
+    # pylint: disable=W0212
+    file_obj.write(payload.Payload._PayloadHeader._MAGIC)
+    _WriteInt(file_obj, payload.Payload._PayloadHeader._VERSION_SIZE, True,
+              self.version)
+    _WriteInt(file_obj, payload.Payload._PayloadHeader._MANIFEST_LEN_SIZE, True,
+              manifest_len)
+
+  def WriteToFile(self, file_obj, manifest_len=-1, data_blobs=None,
+                  sigs_data=None, padding=None):
+    """Writes the payload content to a file.
+
+    Args:
+      file_obj: a file object open for writing
+      manifest_len: manifest len to dump (otherwise computed automatically)
+      data_blobs: a list of data blobs to be concatenated to the payload
+      sigs_data: a binary Signatures message to be concatenated to the payload
+      padding: stuff to dump past the normal data blobs provided (optional)
+    """
+    manifest = self.manifest.SerializeToString()
+    if manifest_len < 0:
+      manifest_len = len(manifest)
+    self._WriteHeaderToFile(file_obj, manifest_len)
+    file_obj.write(manifest)
+    if data_blobs:
+      for data_blob in data_blobs:
+        file_obj.write(data_blob)
+    if sigs_data:
+      file_obj.write(sigs_data)
+    if padding:
+      file_obj.write(padding)
+
+
+class EnhancedPayloadGenerator(PayloadGenerator):
+  """Payload generator with automatic handling of data blobs.
+
+  Attributes:
+    data_blobs: a list of blobs, in the order they were added
+    curr_offset: the currently consumed offset of blobs added to the payload
+  """
+
+  def __init__(self):
+    super(EnhancedPayloadGenerator, self).__init__()
+    self.data_blobs = []
+    self.curr_offset = 0
+
+  def AddData(self, data_blob):
+    """Adds a (possibly orphan) data blob."""
+    data_length = len(data_blob)
+    data_offset = self.curr_offset
+    self.curr_offset += data_length
+    self.data_blobs.append(data_blob)
+    return data_length, data_offset
+
+  def AddOperationWithData(self, is_kernel, op_type, src_extents=None,
+                           src_length=None, dst_extents=None, dst_length=None,
+                           data_blob=None, do_hash_data_blob=True):
+    """Adds an install operation and associated data blob.
+
+    This takes care of obtaining a hash of the data blob (if so instructed)
+    and appending it to the internally maintained list of blobs, including the
+    necessary offset/length accounting.
+
+    Args:
+      is_kernel: whether this is a kernel (True) or rootfs (False) operation
+      op_type: one of REPLACE, REPLACE_BZ, MOVE or BSDIFF
+      src_extents: list of (start, length) pairs indicating src block ranges
+      src_length: size of the src data in bytes (needed for BSDIFF)
+      dst_extents: list of (start, length) pairs indicating dst block ranges
+      dst_length: size of the dst data in bytes (needed for BSDIFF)
+      data_blob: a data blob associated with this operation
+      do_hash_data_blob: whether or not to compute and add a data blob hash
+    """
+    data_offset = data_length = data_sha256_hash = None
+    if data_blob is not None:
+      if do_hash_data_blob:
+        # pylint: disable=E1101
+        data_sha256_hash = hashlib.sha256(data_blob).digest()
+      data_length, data_offset = self.AddData(data_blob)
+
+    self.AddOperation(is_kernel, op_type, data_offset=data_offset,
+                      data_length=data_length, src_extents=src_extents,
+                      src_length=src_length, dst_extents=dst_extents,
+                      dst_length=dst_length, data_sha256_hash=data_sha256_hash)
+
+  def WriteToFileWithData(self, file_obj, sigs_data=None,
+                          privkey_file_name=None,
+                          do_add_pseudo_operation=False,
+                          is_pseudo_in_kernel=False, padding=None):
+    """Writes the payload content to a file, optionally signing the content.
+
+    Args:
+      file_obj: a file object open for writing
+      sigs_data: signatures blob to be appended to the payload (optional;
+                 payload signature fields assumed to be preset by the caller)
+      privkey_file_name: key used for signing the payload (optional; used only
+                         if explicit signatures blob not provided)
+      do_add_pseudo_operation: whether a pseudo-operation should be added to
+                               account for the signature blob
+      is_pseudo_in_kernel: whether the pseudo-operation should be added to
+                           kernel (True) or rootfs (False) operations
+      padding: stuff to dump past the normal data blobs provided (optional)
+
+    Raises:
+      TestError: if arguments are inconsistent or something goes wrong.
+    """
+    sigs_len = len(sigs_data) if sigs_data else 0
+
+    # Do we need to generate a genuine signatures blob?
+    do_generate_sigs_data = sigs_data is None and privkey_file_name
+
+    if do_generate_sigs_data:
+      # First, sign some arbitrary data to obtain the size of a signature blob.
+      fake_sig = SignSha256('fake-payload-data', privkey_file_name)
+      fake_sigs_gen = SignaturesGenerator()
+      fake_sigs_gen.AddSig(1, fake_sig)
+      sigs_len = len(fake_sigs_gen.ToBinary())
+
+      # Update the payload with proper signature attributes.
+      self.SetSignatures(self.curr_offset, sigs_len)
+
+    # Add a pseudo-operation to account for the signature blob, if requested.
+    if do_add_pseudo_operation:
+      if not self.block_size:
+        raise TestError('cannot add pseudo-operation without knowing the '
+                        'payload block size')
+      self.AddOperation(
+          is_pseudo_in_kernel, common.OpType.REPLACE,
+          data_offset=self.curr_offset, data_length=sigs_len,
+          dst_extents=[(common.PSEUDO_EXTENT_MARKER,
+                        (sigs_len + self.block_size - 1) / self.block_size)])
+
+    if do_generate_sigs_data:
+      # Once all payload fields are updated, dump and sign it.
+      temp_payload_file = cStringIO.StringIO()
+      self.WriteToFile(temp_payload_file, data_blobs=self.data_blobs)
+      sig = SignSha256(temp_payload_file.getvalue(), privkey_file_name)
+      sigs_gen = SignaturesGenerator()
+      sigs_gen.AddSig(1, sig)
+      sigs_data = sigs_gen.ToBinary()
+      assert len(sigs_data) == sigs_len, 'signature blob lengths mismatch'
+
+    # Dump the whole thing, complete with data and signature blob, to a file.
+    self.WriteToFile(file_obj, data_blobs=self.data_blobs, sigs_data=sigs_data,
+                     padding=padding)
diff --git a/scripts/update_payload/update-payload-key.pub.pem b/scripts/update_payload/update-payload-key.pub.pem
new file mode 100644
index 0000000..7ac369f
--- /dev/null
+++ b/scripts/update_payload/update-payload-key.pub.pem
@@ -0,0 +1,9 @@
+-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1Bg9BnjWhX3jJyECeXqF
+O28nkYTF1NHWLlFHgzAGg+ysva22BL3S5LlsNejnYVg/xzx3izvAQyOF3I1TJVOy
+2fH1DoZOWyKuckMyUrFQbO6OV1VIvPUPKckHadWcXSsHj2lBdDPH9xRDEBsXeztf
+nAGBD8GlAyTU7iH+Bf+xzyK9k4BmITf4Nx4xWhRZ6gm2Fc2SEP3x5N5fohkLv5ZP
+kFr0fj5wUK+0XF95rkGFBLIq2XACS3dmxMFToFl1HMM1HonUg9TAH+3dVH93zue1
+y81mkTuGnNX+zYya5ov2kD8zW1V10iTOSJfOlho5T8FpKbG37o3yYcUiyMHKO1Iv
+PQIDAQAB
+-----END PUBLIC KEY-----
diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py
new file mode 100644
index 0000000..46c475e
--- /dev/null
+++ b/scripts/update_payload/update_metadata_pb2.py
@@ -0,0 +1,620 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: update_metadata.proto
+
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='update_metadata.proto',
+  package='chromeos_update_engine',
+  serialized_pb='\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xd2\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\r\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\r\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\x91\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x0b\n\x07IMGDIFF\x10\t\"\x88\x03\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\"\xc4\x05\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdateB\x02H\x03')
+
+
+
+_INSTALLOPERATION_TYPE = _descriptor.EnumDescriptor(
+  name='Type',
+  full_name='chromeos_update_engine.InstallOperation.Type',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='REPLACE', index=0, number=0,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='REPLACE_BZ', index=1, number=1,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='MOVE', index=2, number=2,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='BSDIFF', index=3, number=3,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='SOURCE_COPY', index=4, number=4,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='SOURCE_BSDIFF', index=5, number=5,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='ZERO', index=6, number=6,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='DISCARD', index=7, number=7,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='REPLACE_XZ', index=8, number=8,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='IMGDIFF', index=9, number=9,
+      options=None,
+      type=None),
+  ],
+  containing_type=None,
+  options=None,
+  serialized_start=712,
+  serialized_end=857,
+)
+
+
+_EXTENT = _descriptor.Descriptor(
+  name='Extent',
+  full_name='chromeos_update_engine.Extent',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='start_block', full_name='chromeos_update_engine.Extent.start_block', index=0,
+      number=1, type=4, cpp_type=4, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='num_blocks', full_name='chromeos_update_engine.Extent.num_blocks', index=1,
+      number=2, type=4, cpp_type=4, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=49,
+  serialized_end=98,
+)
+
+
+_SIGNATURES_SIGNATURE = _descriptor.Descriptor(
+  name='Signature',
+  full_name='chromeos_update_engine.Signatures.Signature',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='version', full_name='chromeos_update_engine.Signatures.Signature.version', index=0,
+      number=1, type=13, cpp_type=3, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='data', full_name='chromeos_update_engine.Signatures.Signature.data', index=1,
+      number=2, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value="",
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=180,
+  serialized_end=222,
+)
+
+_SIGNATURES = _descriptor.Descriptor(
+  name='Signatures',
+  full_name='chromeos_update_engine.Signatures',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='signatures', full_name='chromeos_update_engine.Signatures.signatures', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[_SIGNATURES_SIGNATURE, ],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=100,
+  serialized_end=222,
+)
+
+
+_PARTITIONINFO = _descriptor.Descriptor(
+  name='PartitionInfo',
+  full_name='chromeos_update_engine.PartitionInfo',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='size', full_name='chromeos_update_engine.PartitionInfo.size', index=0,
+      number=1, type=4, cpp_type=4, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='hash', full_name='chromeos_update_engine.PartitionInfo.hash', index=1,
+      number=2, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value="",
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=224,
+  serialized_end=267,
+)
+
+
+_IMAGEINFO = _descriptor.Descriptor(
+  name='ImageInfo',
+  full_name='chromeos_update_engine.ImageInfo',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='board', full_name='chromeos_update_engine.ImageInfo.board', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='key', full_name='chromeos_update_engine.ImageInfo.key', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='channel', full_name='chromeos_update_engine.ImageInfo.channel', index=2,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='version', full_name='chromeos_update_engine.ImageInfo.version', index=3,
+      number=4, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='build_channel', full_name='chromeos_update_engine.ImageInfo.build_channel', index=4,
+      number=5, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='build_version', full_name='chromeos_update_engine.ImageInfo.build_version', index=5,
+      number=6, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=269,
+  serialized_end=388,
+)
+
+
+_INSTALLOPERATION = _descriptor.Descriptor(
+  name='InstallOperation',
+  full_name='chromeos_update_engine.InstallOperation',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='type', full_name='chromeos_update_engine.InstallOperation.type', index=0,
+      number=1, type=14, cpp_type=8, label=2,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='data_offset', full_name='chromeos_update_engine.InstallOperation.data_offset', index=1,
+      number=2, type=13, cpp_type=3, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='data_length', full_name='chromeos_update_engine.InstallOperation.data_length', index=2,
+      number=3, type=13, cpp_type=3, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='src_extents', full_name='chromeos_update_engine.InstallOperation.src_extents', index=3,
+      number=4, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='src_length', full_name='chromeos_update_engine.InstallOperation.src_length', index=4,
+      number=5, type=4, cpp_type=4, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='dst_extents', full_name='chromeos_update_engine.InstallOperation.dst_extents', index=5,
+      number=6, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='dst_length', full_name='chromeos_update_engine.InstallOperation.dst_length', index=6,
+      number=7, type=4, cpp_type=4, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='data_sha256_hash', full_name='chromeos_update_engine.InstallOperation.data_sha256_hash', index=7,
+      number=8, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value="",
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='src_sha256_hash', full_name='chromeos_update_engine.InstallOperation.src_sha256_hash', index=8,
+      number=9, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value="",
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+    _INSTALLOPERATION_TYPE,
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=391,
+  serialized_end=857,
+)
+
+
+_PARTITIONUPDATE = _descriptor.Descriptor(
+  name='PartitionUpdate',
+  full_name='chromeos_update_engine.PartitionUpdate',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='partition_name', full_name='chromeos_update_engine.PartitionUpdate.partition_name', index=0,
+      number=1, type=9, cpp_type=9, label=2,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='run_postinstall', full_name='chromeos_update_engine.PartitionUpdate.run_postinstall', index=1,
+      number=2, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='postinstall_path', full_name='chromeos_update_engine.PartitionUpdate.postinstall_path', index=2,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='filesystem_type', full_name='chromeos_update_engine.PartitionUpdate.filesystem_type', index=3,
+      number=4, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=unicode("", "utf-8"),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='new_partition_signature', full_name='chromeos_update_engine.PartitionUpdate.new_partition_signature', index=4,
+      number=5, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='old_partition_info', full_name='chromeos_update_engine.PartitionUpdate.old_partition_info', index=5,
+      number=6, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='new_partition_info', full_name='chromeos_update_engine.PartitionUpdate.new_partition_info', index=6,
+      number=7, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='operations', full_name='chromeos_update_engine.PartitionUpdate.operations', index=7,
+      number=8, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=860,
+  serialized_end=1252,
+)
+
+
+_DELTAARCHIVEMANIFEST = _descriptor.Descriptor(
+  name='DeltaArchiveManifest',
+  full_name='chromeos_update_engine.DeltaArchiveManifest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.install_operations', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1,
+      number=2, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2,
+      number=3, type=13, cpp_type=3, label=1,
+      has_default_value=True, default_value=4096,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='signatures_offset', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_offset', index=3,
+      number=4, type=4, cpp_type=4, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='signatures_size', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_size', index=4,
+      number=5, type=4, cpp_type=4, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='old_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_kernel_info', index=5,
+      number=6, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6,
+      number=7, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7,
+      number=8, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8,
+      number=9, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9,
+      number=10, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='new_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_image_info', index=10,
+      number=11, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='minor_version', full_name='chromeos_update_engine.DeltaArchiveManifest.minor_version', index=11,
+      number=12, type=13, cpp_type=3, label=1,
+      has_default_value=True, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='partitions', full_name='chromeos_update_engine.DeltaArchiveManifest.partitions', index=12,
+      number=13, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  serialized_start=1255,
+  serialized_end=1963,
+)
+
+_SIGNATURES_SIGNATURE.containing_type = _SIGNATURES;
+_SIGNATURES.fields_by_name['signatures'].message_type = _SIGNATURES_SIGNATURE
+_INSTALLOPERATION.fields_by_name['type'].enum_type = _INSTALLOPERATION_TYPE
+_INSTALLOPERATION.fields_by_name['src_extents'].message_type = _EXTENT
+_INSTALLOPERATION.fields_by_name['dst_extents'].message_type = _EXTENT
+_INSTALLOPERATION_TYPE.containing_type = _INSTALLOPERATION;
+_PARTITIONUPDATE.fields_by_name['new_partition_signature'].message_type = _SIGNATURES_SIGNATURE
+_PARTITIONUPDATE.fields_by_name['old_partition_info'].message_type = _PARTITIONINFO
+_PARTITIONUPDATE.fields_by_name['new_partition_info'].message_type = _PARTITIONINFO
+_PARTITIONUPDATE.fields_by_name['operations'].message_type = _INSTALLOPERATION
+_DELTAARCHIVEMANIFEST.fields_by_name['install_operations'].message_type = _INSTALLOPERATION
+_DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations'].message_type = _INSTALLOPERATION
+_DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info'].message_type = _PARTITIONINFO
+_DELTAARCHIVEMANIFEST.fields_by_name['new_kernel_info'].message_type = _PARTITIONINFO
+_DELTAARCHIVEMANIFEST.fields_by_name['old_rootfs_info'].message_type = _PARTITIONINFO
+_DELTAARCHIVEMANIFEST.fields_by_name['new_rootfs_info'].message_type = _PARTITIONINFO
+_DELTAARCHIVEMANIFEST.fields_by_name['old_image_info'].message_type = _IMAGEINFO
+_DELTAARCHIVEMANIFEST.fields_by_name['new_image_info'].message_type = _IMAGEINFO
+_DELTAARCHIVEMANIFEST.fields_by_name['partitions'].message_type = _PARTITIONUPDATE
+DESCRIPTOR.message_types_by_name['Extent'] = _EXTENT
+DESCRIPTOR.message_types_by_name['Signatures'] = _SIGNATURES
+DESCRIPTOR.message_types_by_name['PartitionInfo'] = _PARTITIONINFO
+DESCRIPTOR.message_types_by_name['ImageInfo'] = _IMAGEINFO
+DESCRIPTOR.message_types_by_name['InstallOperation'] = _INSTALLOPERATION
+DESCRIPTOR.message_types_by_name['PartitionUpdate'] = _PARTITIONUPDATE
+DESCRIPTOR.message_types_by_name['DeltaArchiveManifest'] = _DELTAARCHIVEMANIFEST
+
+class Extent(_message.Message):
+  __metaclass__ = _reflection.GeneratedProtocolMessageType
+  DESCRIPTOR = _EXTENT
+
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.Extent)
+
+class Signatures(_message.Message):
+  __metaclass__ = _reflection.GeneratedProtocolMessageType
+
+  class Signature(_message.Message):
+    __metaclass__ = _reflection.GeneratedProtocolMessageType
+    DESCRIPTOR = _SIGNATURES_SIGNATURE
+
+    # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures.Signature)
+  DESCRIPTOR = _SIGNATURES
+
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures)
+
+class PartitionInfo(_message.Message):
+  __metaclass__ = _reflection.GeneratedProtocolMessageType
+  DESCRIPTOR = _PARTITIONINFO
+
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionInfo)
+
+class ImageInfo(_message.Message):
+  __metaclass__ = _reflection.GeneratedProtocolMessageType
+  DESCRIPTOR = _IMAGEINFO
+
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.ImageInfo)
+
+class InstallOperation(_message.Message):
+  __metaclass__ = _reflection.GeneratedProtocolMessageType
+  DESCRIPTOR = _INSTALLOPERATION
+
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.InstallOperation)
+
+class PartitionUpdate(_message.Message):
+  __metaclass__ = _reflection.GeneratedProtocolMessageType
+  DESCRIPTOR = _PARTITIONUPDATE
+
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionUpdate)
+
+class DeltaArchiveManifest(_message.Message):
+  __metaclass__ = _reflection.GeneratedProtocolMessageType
+  DESCRIPTOR = _DELTAARCHIVEMANIFEST
+
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.DeltaArchiveManifest)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), 'H\003')
+# @@protoc_insertion_point(module_scope)