Revert "Move more non-AB code to separate files"

This reverts commit 513b86e5c210b2d9f73c1a2bd2272ea4c3f94a58.

Change-Id: I6aae60642772a052404eb1773966b2e637864bbc
diff --git a/tools/releasetools/non_ab_ota.py b/tools/releasetools/non_ab_ota.py
index 5839d17..667891c 100644
--- a/tools/releasetools/non_ab_ota.py
+++ b/tools/releasetools/non_ab_ota.py
@@ -13,25 +13,17 @@
 # limitations under the License.
 
 import collections
-import copy
-import imp
 import logging
 import os
-import time
-import threading
-import tempfile
 import zipfile
-import subprocess
-import shlex
 
 import common
 import edify_generator
-from edify_generator import ErrorCode
+import verity_utils
 from check_target_files_vintf import CheckVintfIfTrebleEnabled, HasPartition
-from common import OPTIONS, Run, MakeTempDir, RunAndCheckOutput, ZipWrite, MakeTempFile
+from common import OPTIONS
 from ota_utils import UNZIP_PATTERN, FinalizeMetadata, GetPackageMetadata, PropertyFiles
-from blockimgdiff import BlockImageDiff
-from hashlib import sha1
+import subprocess
 
 logger = logging.getLogger(__name__)
 
@@ -59,7 +51,7 @@
     check_first_block = partition_source_info.fs_type == "ext4"
     # Disable imgdiff because it relies on zlib to produce stable output
     # across different versions, which is often not the case.
-    return BlockDifference(name, partition_tgt, partition_src,
+    return common.BlockDifference(name, partition_tgt, partition_src,
                                   check_first_block,
                                   version=blockimgdiff_version,
                                   disable_imgdiff=True)
@@ -84,7 +76,7 @@
       tgt = common.GetUserImage(partition, OPTIONS.input_tmp, target_zip,
                                 info_dict=target_info,
                                 reset_file_map=True)
-      block_diff_dict[partition] = BlockDifference(partition, tgt,
+      block_diff_dict[partition] = common.BlockDifference(partition, tgt,
                                                           src=None)
     # Incremental OTA update.
     else:
@@ -103,7 +95,7 @@
     function_name = "FullOTA_GetBlockDifferences"
 
   if device_specific_diffs:
-    assert all(isinstance(diff, BlockDifference)
+    assert all(isinstance(diff, common.BlockDifference)
                for diff in device_specific_diffs), \
         "{} is not returning a list of BlockDifference objects".format(
             function_name)
@@ -139,7 +131,7 @@
   output_zip = zipfile.ZipFile(
       staging_file, "w", compression=zipfile.ZIP_DEFLATED)
 
-  device_specific = DeviceSpecificParams(
+  device_specific = common.DeviceSpecificParams(
       input_zip=input_zip,
       input_version=target_api_version,
       output_zip=output_zip,
@@ -225,7 +217,7 @@
   if target_info.get('use_dynamic_partitions') == "true":
     # Use empty source_info_dict to indicate that all partitions / groups must
     # be re-added.
-    dynamic_partitions_diff = DynamicPartitionsDifference(
+    dynamic_partitions_diff = common.DynamicPartitionsDifference(
         info_dict=OPTIONS.info_dict,
         block_diffs=block_diff_dict.values(),
         progress_dict=progress_dict)
@@ -317,7 +309,7 @@
   output_zip = zipfile.ZipFile(
       staging_file, "w", compression=zipfile.ZIP_DEFLATED)
 
-  device_specific = DeviceSpecificParams(
+  device_specific = common.DeviceSpecificParams(
       source_zip=source_zip,
       source_version=source_api_version,
       source_tmp=OPTIONS.source_tmp,
@@ -412,9 +404,9 @@
   required_cache_sizes = [diff.required_cache for diff in
                           block_diff_dict.values()]
   if updating_boot:
-    boot_type, boot_device_expr = GetTypeAndDeviceExpr("/boot",
+    boot_type, boot_device_expr = common.GetTypeAndDeviceExpr("/boot",
                                                               source_info)
-    d = Difference(target_boot, source_boot, "bsdiff")
+    d = common.Difference(target_boot, source_boot, "bsdiff")
     _, _, d = d.ComputePatch()
     if d is None:
       include_full_boot = True
@@ -469,7 +461,7 @@
     if OPTIONS.target_info_dict.get("use_dynamic_partitions") != "true":
       raise RuntimeError(
           "can't generate incremental that disables dynamic partitions")
-    dynamic_partitions_diff = DynamicPartitionsDifference(
+    dynamic_partitions_diff = common.DynamicPartitionsDifference(
         info_dict=OPTIONS.target_info_dict,
         source_info_dict=OPTIONS.source_info_dict,
         block_diffs=block_diff_dict.values(),
@@ -695,891 +687,3 @@
 
   namelist = target_files_zip.namelist()
   return patch in namelist or img in namelist
-
-
-class DeviceSpecificParams(object):
-  module = None
-
-  def __init__(self, **kwargs):
-    """Keyword arguments to the constructor become attributes of this
-    object, which is passed to all functions in the device-specific
-    module."""
-    for k, v in kwargs.items():
-      setattr(self, k, v)
-    self.extras = OPTIONS.extras
-
-    if self.module is None:
-      path = OPTIONS.device_specific
-      if not path:
-        return
-      try:
-        if os.path.isdir(path):
-          info = imp.find_module("releasetools", [path])
-        else:
-          d, f = os.path.split(path)
-          b, x = os.path.splitext(f)
-          if x == ".py":
-            f = b
-          info = imp.find_module(f, [d])
-        logger.info("loaded device-specific extensions from %s", path)
-        self.module = imp.load_module("device_specific", *info)
-      except ImportError:
-        logger.info("unable to load device-specific module; assuming none")
-
-  def _DoCall(self, function_name, *args, **kwargs):
-    """Call the named function in the device-specific module, passing
-    the given args and kwargs.  The first argument to the call will be
-    the DeviceSpecific object itself.  If there is no module, or the
-    module does not define the function, return the value of the
-    'default' kwarg (which itself defaults to None)."""
-    if self.module is None or not hasattr(self.module, function_name):
-      return kwargs.get("default")
-    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
-
-  def FullOTA_Assertions(self):
-    """Called after emitting the block of assertions at the top of a
-    full OTA package.  Implementations can add whatever additional
-    assertions they like."""
-    return self._DoCall("FullOTA_Assertions")
-
-  def FullOTA_InstallBegin(self):
-    """Called at the start of full OTA installation."""
-    return self._DoCall("FullOTA_InstallBegin")
-
-  def FullOTA_GetBlockDifferences(self):
-    """Called during full OTA installation and verification.
-    Implementation should return a list of BlockDifference objects describing
-    the update on each additional partitions.
-    """
-    return self._DoCall("FullOTA_GetBlockDifferences")
-
-  def FullOTA_InstallEnd(self):
-    """Called at the end of full OTA installation; typically this is
-    used to install the image for the device's baseband processor."""
-    return self._DoCall("FullOTA_InstallEnd")
-
-  def IncrementalOTA_Assertions(self):
-    """Called after emitting the block of assertions at the top of an
-    incremental OTA package.  Implementations can add whatever
-    additional assertions they like."""
-    return self._DoCall("IncrementalOTA_Assertions")
-
-  def IncrementalOTA_VerifyBegin(self):
-    """Called at the start of the verification phase of incremental
-    OTA installation; additional checks can be placed here to abort
-    the script before any changes are made."""
-    return self._DoCall("IncrementalOTA_VerifyBegin")
-
-  def IncrementalOTA_VerifyEnd(self):
-    """Called at the end of the verification phase of incremental OTA
-    installation; additional checks can be placed here to abort the
-    script before any changes are made."""
-    return self._DoCall("IncrementalOTA_VerifyEnd")
-
-  def IncrementalOTA_InstallBegin(self):
-    """Called at the start of incremental OTA installation (after
-    verification is complete)."""
-    return self._DoCall("IncrementalOTA_InstallBegin")
-
-  def IncrementalOTA_GetBlockDifferences(self):
-    """Called during incremental OTA installation and verification.
-    Implementation should return a list of BlockDifference objects describing
-    the update on each additional partitions.
-    """
-    return self._DoCall("IncrementalOTA_GetBlockDifferences")
-
-  def IncrementalOTA_InstallEnd(self):
-    """Called at the end of incremental OTA installation; typically
-    this is used to install the image for the device's baseband
-    processor."""
-    return self._DoCall("IncrementalOTA_InstallEnd")
-
-  def VerifyOTA_Assertions(self):
-    return self._DoCall("VerifyOTA_Assertions")
-
-
-DIFF_PROGRAM_BY_EXT = {
-    ".gz": "imgdiff",
-    ".zip": ["imgdiff", "-z"],
-    ".jar": ["imgdiff", "-z"],
-    ".apk": ["imgdiff", "-z"],
-    ".img": "imgdiff",
-}
-
-
-class Difference(object):
-  def __init__(self, tf, sf, diff_program=None):
-    self.tf = tf
-    self.sf = sf
-    self.patch = None
-    self.diff_program = diff_program
-
-  def ComputePatch(self):
-    """Compute the patch (as a string of data) needed to turn sf into
-    tf.  Returns the same tuple as GetPatch()."""
-
-    tf = self.tf
-    sf = self.sf
-
-    if self.diff_program:
-      diff_program = self.diff_program
-    else:
-      ext = os.path.splitext(tf.name)[1]
-      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
-
-    ttemp = tf.WriteToTemp()
-    stemp = sf.WriteToTemp()
-
-    ext = os.path.splitext(tf.name)[1]
-
-    try:
-      ptemp = tempfile.NamedTemporaryFile()
-      if isinstance(diff_program, list):
-        cmd = copy.copy(diff_program)
-      else:
-        cmd = [diff_program]
-      cmd.append(stemp.name)
-      cmd.append(ttemp.name)
-      cmd.append(ptemp.name)
-      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-      err = []
-
-      def run():
-        _, e = p.communicate()
-        if e:
-          err.append(e)
-      th = threading.Thread(target=run)
-      th.start()
-      th.join(timeout=300)   # 5 mins
-      if th.is_alive():
-        logger.warning("diff command timed out")
-        p.terminate()
-        th.join(5)
-        if th.is_alive():
-          p.kill()
-          th.join()
-
-      if p.returncode != 0:
-        logger.warning("Failure running %s:\n%s\n", cmd, "".join(err))
-        self.patch = None
-        return None, None, None
-      diff = ptemp.read()
-    finally:
-      ptemp.close()
-      stemp.close()
-      ttemp.close()
-
-    self.patch = diff
-    return self.tf, self.sf, self.patch
-
-  def GetPatch(self):
-    """Returns a tuple of (target_file, source_file, patch_data).
-
-    patch_data may be None if ComputePatch hasn't been called, or if
-    computing the patch failed.
-    """
-    return self.tf, self.sf, self.patch
-
-
-def ComputeDifferences(diffs):
-  """Call ComputePatch on all the Difference objects in 'diffs'."""
-  logger.info("%d diffs to compute", len(diffs))
-
-  # Do the largest files first, to try and reduce the long-pole effect.
-  by_size = [(i.tf.size, i) for i in diffs]
-  by_size.sort(reverse=True)
-  by_size = [i[1] for i in by_size]
-
-  lock = threading.Lock()
-  diff_iter = iter(by_size)   # accessed under lock
-
-  def worker():
-    try:
-      lock.acquire()
-      for d in diff_iter:
-        lock.release()
-        start = time.time()
-        d.ComputePatch()
-        dur = time.time() - start
-        lock.acquire()
-
-        tf, sf, patch = d.GetPatch()
-        if sf.name == tf.name:
-          name = tf.name
-        else:
-          name = "%s (%s)" % (tf.name, sf.name)
-        if patch is None:
-          logger.error("patching failed! %40s", name)
-        else:
-          logger.info(
-              "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
-              tf.size, 100.0 * len(patch) / tf.size, name)
-      lock.release()
-    except Exception:
-      logger.exception("Failed to compute diff from worker")
-      raise
-
-  # start worker threads; wait for them all to finish.
-  threads = [threading.Thread(target=worker)
-             for i in range(OPTIONS.worker_threads)]
-  for th in threads:
-    th.start()
-  while threads:
-    threads.pop().join()
-
-
-class BlockDifference(object):
-  def __init__(self, partition, tgt, src=None, check_first_block=False,
-               version=None, disable_imgdiff=False):
-    self.tgt = tgt
-    self.src = src
-    self.partition = partition
-    self.check_first_block = check_first_block
-    self.disable_imgdiff = disable_imgdiff
-
-    if version is None:
-      version = max(
-          int(i) for i in
-          OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
-    assert version >= 3
-    self.version = version
-
-    b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
-                       version=self.version,
-                       disable_imgdiff=self.disable_imgdiff)
-    self.path = os.path.join(MakeTempDir(), partition)
-    b.Compute(self.path)
-    self._required_cache = b.max_stashed_size
-    self.touched_src_ranges = b.touched_src_ranges
-    self.touched_src_sha1 = b.touched_src_sha1
-
-    # On devices with dynamic partitions, for new partitions,
-    # src is None but OPTIONS.source_info_dict is not.
-    if OPTIONS.source_info_dict is None:
-      is_dynamic_build = OPTIONS.info_dict.get(
-          "use_dynamic_partitions") == "true"
-      is_dynamic_source = False
-    else:
-      is_dynamic_build = OPTIONS.source_info_dict.get(
-          "use_dynamic_partitions") == "true"
-      is_dynamic_source = partition in shlex.split(
-          OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
-
-    is_dynamic_target = partition in shlex.split(
-        OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
-
-    # For dynamic partitions builds, check partition list in both source
-    # and target build because new partitions may be added, and existing
-    # partitions may be removed.
-    is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
-
-    if is_dynamic:
-      self.device = 'map_partition("%s")' % partition
-    else:
-      if OPTIONS.source_info_dict is None:
-        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
-                                              OPTIONS.info_dict)
-      else:
-        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
-                                              OPTIONS.source_info_dict)
-      self.device = device_expr
-
-  @property
-  def required_cache(self):
-    return self._required_cache
-
-  def WriteScript(self, script, output_zip, progress=None,
-                  write_verify_script=False):
-    if not self.src:
-      # write the output unconditionally
-      script.Print("Patching %s image unconditionally..." % (self.partition,))
-    else:
-      script.Print("Patching %s image after verification." % (self.partition,))
-
-    if progress:
-      script.ShowProgress(progress, 0)
-    self._WriteUpdate(script, output_zip)
-
-    if write_verify_script:
-      self.WritePostInstallVerifyScript(script)
-
-  def WriteStrictVerifyScript(self, script):
-    """Verify all the blocks in the care_map, including clobbered blocks.
-
-    This differs from the WriteVerifyScript() function: a) it prints different
-    error messages; b) it doesn't allow half-way updated images to pass the
-    verification."""
-
-    partition = self.partition
-    script.Print("Verifying %s..." % (partition,))
-    ranges = self.tgt.care_map
-    ranges_str = ranges.to_string_raw()
-    script.AppendExtra(
-        'range_sha1(%s, "%s") == "%s" && ui_print("    Verified.") || '
-        'ui_print("%s has unexpected contents.");' % (
-            self.device, ranges_str,
-            self.tgt.TotalSha1(include_clobbered_blocks=True),
-            self.partition))
-    script.AppendExtra("")
-
-  def WriteVerifyScript(self, script, touched_blocks_only=False):
-    partition = self.partition
-
-    # full OTA
-    if not self.src:
-      script.Print("Image %s will be patched unconditionally." % (partition,))
-
-    # incremental OTA
-    else:
-      if touched_blocks_only:
-        ranges = self.touched_src_ranges
-        expected_sha1 = self.touched_src_sha1
-      else:
-        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
-        expected_sha1 = self.src.TotalSha1()
-
-      # No blocks to be checked, skipping.
-      if not ranges:
-        return
-
-      ranges_str = ranges.to_string_raw()
-      script.AppendExtra(
-          'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
-          'package_extract_file("%s.transfer.list"), "%s.new.dat", '
-          '"%s.patch.dat")) then' % (
-              self.device, ranges_str, expected_sha1,
-              self.device, partition, partition, partition))
-      script.Print('Verified %s image...' % (partition,))
-      script.AppendExtra('else')
-
-      if self.version >= 4:
-
-        # Bug: 21124327
-        # When generating incrementals for the system and vendor partitions in
-        # version 4 or newer, explicitly check the first block (which contains
-        # the superblock) of the partition to see if it's what we expect. If
-        # this check fails, give an explicit log message about the partition
-        # having been remounted R/W (the most likely explanation).
-        if self.check_first_block:
-          script.AppendExtra('check_first_block(%s);' % (self.device,))
-
-        # If version >= 4, try block recovery before abort update
-        if partition == "system":
-          code = ErrorCode.SYSTEM_RECOVER_FAILURE
-        else:
-          code = ErrorCode.VENDOR_RECOVER_FAILURE
-        script.AppendExtra((
-            'ifelse (block_image_recover({device}, "{ranges}") && '
-            'block_image_verify({device}, '
-            'package_extract_file("{partition}.transfer.list"), '
-            '"{partition}.new.dat", "{partition}.patch.dat"), '
-            'ui_print("{partition} recovered successfully."), '
-            'abort("E{code}: {partition} partition fails to recover"));\n'
-            'endif;').format(device=self.device, ranges=ranges_str,
-                             partition=partition, code=code))
-
-      # Abort the OTA update. Note that the incremental OTA cannot be applied
-      # even if it may match the checksum of the target partition.
-      # a) If version < 3, operations like move and erase will make changes
-      #    unconditionally and damage the partition.
-      # b) If version >= 3, it won't even reach here.
-      else:
-        if partition == "system":
-          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
-        else:
-          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
-        script.AppendExtra((
-            'abort("E%d: %s partition has unexpected contents");\n'
-            'endif;') % (code, partition))
-
-  def WritePostInstallVerifyScript(self, script):
-    partition = self.partition
-    script.Print('Verifying the updated %s image...' % (partition,))
-    # Unlike pre-install verification, clobbered_blocks should not be ignored.
-    ranges = self.tgt.care_map
-    ranges_str = ranges.to_string_raw()
-    script.AppendExtra(
-        'if range_sha1(%s, "%s") == "%s" then' % (
-            self.device, ranges_str,
-            self.tgt.TotalSha1(include_clobbered_blocks=True)))
-
-    # Bug: 20881595
-    # Verify that extended blocks are really zeroed out.
-    if self.tgt.extended:
-      ranges_str = self.tgt.extended.to_string_raw()
-      script.AppendExtra(
-          'if range_sha1(%s, "%s") == "%s" then' % (
-              self.device, ranges_str,
-              self._HashZeroBlocks(self.tgt.extended.size())))
-      script.Print('Verified the updated %s image.' % (partition,))
-      if partition == "system":
-        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
-      else:
-        code = ErrorCode.VENDOR_NONZERO_CONTENTS
-      script.AppendExtra(
-          'else\n'
-          '  abort("E%d: %s partition has unexpected non-zero contents after '
-          'OTA update");\n'
-          'endif;' % (code, partition))
-    else:
-      script.Print('Verified the updated %s image.' % (partition,))
-
-    if partition == "system":
-      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
-    else:
-      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
-
-    script.AppendExtra(
-        'else\n'
-        '  abort("E%d: %s partition has unexpected contents after OTA '
-        'update");\n'
-        'endif;' % (code, partition))
-
-  def _WriteUpdate(self, script, output_zip):
-    ZipWrite(output_zip,
-             '{}.transfer.list'.format(self.path),
-             '{}.transfer.list'.format(self.partition))
-
-    # For full OTA, compress the new.dat with brotli with quality 6 to reduce
-    # its size. Quailty 9 almost triples the compression time but doesn't
-    # further reduce the size too much. For a typical 1.8G system.new.dat
-    #                       zip  | brotli(quality 6)  | brotli(quality 9)
-    #   compressed_size:    942M | 869M (~8% reduced) | 854M
-    #   compression_time:   75s  | 265s               | 719s
-    #   decompression_time: 15s  | 25s                | 25s
-
-    if not self.src:
-      brotli_cmd = ['brotli', '--quality=6',
-                    '--output={}.new.dat.br'.format(self.path),
-                    '{}.new.dat'.format(self.path)]
-      print("Compressing {}.new.dat with brotli".format(self.partition))
-      RunAndCheckOutput(brotli_cmd)
-
-      new_data_name = '{}.new.dat.br'.format(self.partition)
-      ZipWrite(output_zip,
-               '{}.new.dat.br'.format(self.path),
-               new_data_name,
-               compress_type=zipfile.ZIP_STORED)
-    else:
-      new_data_name = '{}.new.dat'.format(self.partition)
-      ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
-
-    ZipWrite(output_zip,
-             '{}.patch.dat'.format(self.path),
-             '{}.patch.dat'.format(self.partition),
-             compress_type=zipfile.ZIP_STORED)
-
-    if self.partition == "system":
-      code = ErrorCode.SYSTEM_UPDATE_FAILURE
-    else:
-      code = ErrorCode.VENDOR_UPDATE_FAILURE
-
-    call = ('block_image_update({device}, '
-            'package_extract_file("{partition}.transfer.list"), '
-            '"{new_data_name}", "{partition}.patch.dat") ||\n'
-            '  abort("E{code}: Failed to update {partition} image.");'.format(
-                device=self.device, partition=self.partition,
-                new_data_name=new_data_name, code=code))
-    script.AppendExtra(script.WordWrap(call))
-
-  def _HashBlocks(self, source, ranges):  # pylint: disable=no-self-use
-    data = source.ReadRangeSet(ranges)
-    ctx = sha1()
-
-    for p in data:
-      ctx.update(p)
-
-    return ctx.hexdigest()
-
-  def _HashZeroBlocks(self, num_blocks):  # pylint: disable=no-self-use
-    """Return the hash value for all zero blocks."""
-    zero_block = '\x00' * 4096
-    ctx = sha1()
-    for _ in range(num_blocks):
-      ctx.update(zero_block)
-
-    return ctx.hexdigest()
-
-
-def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
-                      info_dict=None):
-  """Generates the recovery-from-boot patch and writes the script to output.
-
-  Most of the space in the boot and recovery images is just the kernel, which is
-  identical for the two, so the resulting patch should be efficient. Add it to
-  the output zip, along with a shell script that is run from init.rc on first
-  boot to actually do the patching and install the new recovery image.
-
-  Args:
-    input_dir: The top-level input directory of the target-files.zip.
-    output_sink: The callback function that writes the result.
-    recovery_img: File object for the recovery image.
-    boot_img: File objects for the boot image.
-    info_dict: A dict returned by common.LoadInfoDict() on the input
-        target_files. Will use OPTIONS.info_dict if None has been given.
-  """
-  if info_dict is None:
-    info_dict = OPTIONS.info_dict
-
-  full_recovery_image = info_dict.get("full_recovery_image") == "true"
-  board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
-
-  if board_uses_vendorimage:
-    # In this case, the output sink is rooted at VENDOR
-    recovery_img_path = "etc/recovery.img"
-    recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat"
-    sh_dir = "bin"
-  else:
-    # In this case the output sink is rooted at SYSTEM
-    recovery_img_path = "vendor/etc/recovery.img"
-    recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat"
-    sh_dir = "vendor/bin"
-
-  if full_recovery_image:
-    output_sink(recovery_img_path, recovery_img.data)
-
-  else:
-    system_root_image = info_dict.get("system_root_image") == "true"
-    include_recovery_dtbo = info_dict.get("include_recovery_dtbo") == "true"
-    include_recovery_acpio = info_dict.get("include_recovery_acpio") == "true"
-    path = os.path.join(input_dir, recovery_resource_dat_path)
-    # With system-root-image, boot and recovery images will have mismatching
-    # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
-    # to handle such a case.
-    if system_root_image or include_recovery_dtbo or include_recovery_acpio:
-      diff_program = ["bsdiff"]
-      bonus_args = ""
-      assert not os.path.exists(path)
-    else:
-      diff_program = ["imgdiff"]
-      if os.path.exists(path):
-        diff_program.append("-b")
-        diff_program.append(path)
-        bonus_args = "--bonus /vendor/etc/recovery-resource.dat"
-      else:
-        bonus_args = ""
-
-    d = Difference(recovery_img, boot_img, diff_program=diff_program)
-    _, _, patch = d.ComputePatch()
-    output_sink("recovery-from-boot.p", patch)
-
-  try:
-    # The following GetTypeAndDevice()s need to use the path in the target
-    # info_dict instead of source_info_dict.
-    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict,
-                                              check_no_slot=False)
-    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict,
-                                                      check_no_slot=False)
-  except KeyError:
-    return
-
-  if full_recovery_image:
-
-    # Note that we use /vendor to refer to the recovery resources. This will
-    # work for a separate vendor partition mounted at /vendor or a
-    # /system/vendor subdirectory on the system partition, for which init will
-    # create a symlink from /vendor to /system/vendor.
-
-    sh = """#!/vendor/bin/sh
-if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
-  applypatch \\
-          --flash /vendor/etc/recovery.img \\
-          --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
-      log -t recovery "Installing new recovery image: succeeded" || \\
-      log -t recovery "Installing new recovery image: failed"
-else
-  log -t recovery "Recovery image already installed"
-fi
-""" % {'type': recovery_type,
-       'device': recovery_device,
-       'sha1': recovery_img.sha1,
-       'size': recovery_img.size}
-  else:
-    sh = """#!/vendor/bin/sh
-if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
-  applypatch %(bonus_args)s \\
-          --patch /vendor/recovery-from-boot.p \\
-          --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
-          --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
-      log -t recovery "Installing new recovery image: succeeded" || \\
-      log -t recovery "Installing new recovery image: failed"
-else
-  log -t recovery "Recovery image already installed"
-fi
-""" % {'boot_size': boot_img.size,
-       'boot_sha1': boot_img.sha1,
-       'recovery_size': recovery_img.size,
-       'recovery_sha1': recovery_img.sha1,
-       'boot_type': boot_type,
-       'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)',
-       'recovery_type': recovery_type,
-       'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)',
-       'bonus_args': bonus_args}
-
-  # The install script location moved from /system/etc to /system/bin in the L
-  # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin.
-  sh_location = os.path.join(sh_dir, "install-recovery.sh")
-
-  logger.info("putting script in %s", sh_location)
-
-  output_sink(sh_location, sh.encode())
-
-
-class DynamicPartitionUpdate(object):
-  def __init__(self, src_group=None, tgt_group=None, progress=None,
-               block_difference=None):
-    self.src_group = src_group
-    self.tgt_group = tgt_group
-    self.progress = progress
-    self.block_difference = block_difference
-
-  @property
-  def src_size(self):
-    if not self.block_difference:
-      return 0
-    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
-
-  @property
-  def tgt_size(self):
-    if not self.block_difference:
-      return 0
-    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
-
-  @staticmethod
-  def _GetSparseImageSize(img):
-    if not img:
-      return 0
-    return img.blocksize * img.total_blocks
-
-
-class DynamicGroupUpdate(object):
-  def __init__(self, src_size=None, tgt_size=None):
-    # None: group does not exist. 0: no size limits.
-    self.src_size = src_size
-    self.tgt_size = tgt_size
-
-
-class DynamicPartitionsDifference(object):
-  def __init__(self, info_dict, block_diffs, progress_dict=None,
-               source_info_dict=None):
-    if progress_dict is None:
-      progress_dict = {}
-
-    self._remove_all_before_apply = False
-    if source_info_dict is None:
-      self._remove_all_before_apply = True
-      source_info_dict = {}
-
-    block_diff_dict = collections.OrderedDict(
-        [(e.partition, e) for e in block_diffs])
-
-    assert len(block_diff_dict) == len(block_diffs), \
-        "Duplicated BlockDifference object for {}".format(
-            [partition for partition, count in
-             collections.Counter(e.partition for e in block_diffs).items()
-             if count > 1])
-
-    self._partition_updates = collections.OrderedDict()
-
-    for p, block_diff in block_diff_dict.items():
-      self._partition_updates[p] = DynamicPartitionUpdate()
-      self._partition_updates[p].block_difference = block_diff
-
-    for p, progress in progress_dict.items():
-      if p in self._partition_updates:
-        self._partition_updates[p].progress = progress
-
-    tgt_groups = shlex.split(info_dict.get(
-        "super_partition_groups", "").strip())
-    src_groups = shlex.split(source_info_dict.get(
-        "super_partition_groups", "").strip())
-
-    for g in tgt_groups:
-      for p in shlex.split(info_dict.get(
-              "super_%s_partition_list" % g, "").strip()):
-        assert p in self._partition_updates, \
-            "{} is in target super_{}_partition_list but no BlockDifference " \
-            "object is provided.".format(p, g)
-        self._partition_updates[p].tgt_group = g
-
-    for g in src_groups:
-      for p in shlex.split(source_info_dict.get(
-              "super_%s_partition_list" % g, "").strip()):
-        assert p in self._partition_updates, \
-            "{} is in source super_{}_partition_list but no BlockDifference " \
-            "object is provided.".format(p, g)
-        self._partition_updates[p].src_group = g
-
-    target_dynamic_partitions = set(shlex.split(info_dict.get(
-        "dynamic_partition_list", "").strip()))
-    block_diffs_with_target = set(p for p, u in self._partition_updates.items()
-                                  if u.tgt_size)
-    assert block_diffs_with_target == target_dynamic_partitions, \
-        "Target Dynamic partitions: {}, BlockDifference with target: {}".format(
-            list(target_dynamic_partitions), list(block_diffs_with_target))
-
-    source_dynamic_partitions = set(shlex.split(source_info_dict.get(
-        "dynamic_partition_list", "").strip()))
-    block_diffs_with_source = set(p for p, u in self._partition_updates.items()
-                                  if u.src_size)
-    assert block_diffs_with_source == source_dynamic_partitions, \
-        "Source Dynamic partitions: {}, BlockDifference with source: {}".format(
-            list(source_dynamic_partitions), list(block_diffs_with_source))
-
-    if self._partition_updates:
-      logger.info("Updating dynamic partitions %s",
-                  self._partition_updates.keys())
-
-    self._group_updates = collections.OrderedDict()
-
-    for g in tgt_groups:
-      self._group_updates[g] = DynamicGroupUpdate()
-      self._group_updates[g].tgt_size = int(info_dict.get(
-          "super_%s_group_size" % g, "0").strip())
-
-    for g in src_groups:
-      if g not in self._group_updates:
-        self._group_updates[g] = DynamicGroupUpdate()
-      self._group_updates[g].src_size = int(source_info_dict.get(
-          "super_%s_group_size" % g, "0").strip())
-
-    self._Compute()
-
-  def WriteScript(self, script, output_zip, write_verify_script=False):
-    script.Comment('--- Start patching dynamic partitions ---')
-    for p, u in self._partition_updates.items():
-      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
-        script.Comment('Patch partition %s' % p)
-        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
-                                       write_verify_script=False)
-
-    op_list_path = MakeTempFile()
-    with open(op_list_path, 'w') as f:
-      for line in self._op_list:
-        f.write('{}\n'.format(line))
-
-    ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
-
-    script.Comment('Update dynamic partition metadata')
-    script.AppendExtra('assert(update_dynamic_partitions('
-                       'package_extract_file("dynamic_partitions_op_list")));')
-
-    if write_verify_script:
-      for p, u in self._partition_updates.items():
-        if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
-          u.block_difference.WritePostInstallVerifyScript(script)
-          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
-
-    for p, u in self._partition_updates.items():
-      if u.tgt_size and u.src_size <= u.tgt_size:
-        script.Comment('Patch partition %s' % p)
-        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
-                                       write_verify_script=write_verify_script)
-        if write_verify_script:
-          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
-
-    script.Comment('--- End patching dynamic partitions ---')
-
-  def _Compute(self):
-    self._op_list = list()
-
-    def append(line):
-      self._op_list.append(line)
-
-    def comment(line):
-      self._op_list.append("# %s" % line)
-
-    if self._remove_all_before_apply:
-      comment('Remove all existing dynamic partitions and groups before '
-              'applying full OTA')
-      append('remove_all_groups')
-
-    for p, u in self._partition_updates.items():
-      if u.src_group and not u.tgt_group:
-        append('remove %s' % p)
-
-    for p, u in self._partition_updates.items():
-      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
-        comment('Move partition %s from %s to default' % (p, u.src_group))
-        append('move %s default' % p)
-
-    for p, u in self._partition_updates.items():
-      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
-        comment('Shrink partition %s from %d to %d' %
-                (p, u.src_size, u.tgt_size))
-        append('resize %s %s' % (p, u.tgt_size))
-
-    for g, u in self._group_updates.items():
-      if u.src_size is not None and u.tgt_size is None:
-        append('remove_group %s' % g)
-      if (u.src_size is not None and u.tgt_size is not None and
-              u.src_size > u.tgt_size):
-        comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
-        append('resize_group %s %d' % (g, u.tgt_size))
-
-    for g, u in self._group_updates.items():
-      if u.src_size is None and u.tgt_size is not None:
-        comment('Add group %s with maximum size %d' % (g, u.tgt_size))
-        append('add_group %s %d' % (g, u.tgt_size))
-      if (u.src_size is not None and u.tgt_size is not None and
-              u.src_size < u.tgt_size):
-        comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
-        append('resize_group %s %d' % (g, u.tgt_size))
-
-    for p, u in self._partition_updates.items():
-      if u.tgt_group and not u.src_group:
-        comment('Add partition %s to group %s' % (p, u.tgt_group))
-        append('add %s %s' % (p, u.tgt_group))
-
-    for p, u in self._partition_updates.items():
-      if u.tgt_size and u.src_size < u.tgt_size:
-        comment('Grow partition %s from %d to %d' %
-                (p, u.src_size, u.tgt_size))
-        append('resize %s %d' % (p, u.tgt_size))
-
-    for p, u in self._partition_updates.items():
-      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
-        comment('Move partition %s from default to %s' %
-                (p, u.tgt_group))
-        append('move %s %s' % (p, u.tgt_group))
-
-
-# map recovery.fstab's fs_types to mount/format "partition types"
-PARTITION_TYPES = {
-    "ext4": "EMMC",
-    "emmc": "EMMC",
-    "f2fs": "EMMC",
-    "squashfs": "EMMC",
-    "erofs": "EMMC"
-}
-
-
-def GetTypeAndDevice(mount_point, info, check_no_slot=True):
-  """
-  Use GetTypeAndDeviceExpr whenever possible. This function is kept for
-  backwards compatibility. It aborts if the fstab entry has slotselect option
-  (unless check_no_slot is explicitly set to False).
-  """
-  fstab = info["fstab"]
-  if fstab:
-    if check_no_slot:
-      assert not fstab[mount_point].slotselect, \
-          "Use GetTypeAndDeviceExpr instead"
-    return (PARTITION_TYPES[fstab[mount_point].fs_type],
-            fstab[mount_point].device)
-  raise KeyError
-
-
-def GetTypeAndDeviceExpr(mount_point, info):
-  """
-  Return the filesystem of the partition, and an edify expression that evaluates
-  to the device at runtime.
-  """
-  fstab = info["fstab"]
-  if fstab:
-    p = fstab[mount_point]
-    device_expr = '"%s"' % fstab[mount_point].device
-    if p.slotselect:
-      device_expr = 'add_slot_suffix(%s)' % device_expr
-    return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
-  raise KeyError