update OTA script in tm-mainline-prod to the latest version.

The last change in tm-mainline-prod was last year (aosp/2040556), did a copy of current version in udc-dev.

Test: local ota
Bug: 279622634
Change-Id: I6563122f21d7213bfa7200e28cdfa69bd95aa3e2
Merged-In: Iaa317a3a4b8addbca8ea987aee9953c78fa1a679
diff --git a/Android.bp b/Android.bp
index cace5b6..cdb492f 100644
--- a/Android.bp
+++ b/Android.bp
@@ -1170,3 +1170,11 @@
         "update_metadata-protos",
     ],
 }
+
+python_library_host {
+    name: "update_metadata-protos-python",
+    srcs: ["update_metadata.proto"],
+    proto: {
+        canonical_path_from_root: false,
+    },
+}
diff --git a/scripts/Android.bp b/scripts/Android.bp
new file mode 100644
index 0000000..1e854a7
--- /dev/null
+++ b/scripts/Android.bp
@@ -0,0 +1,60 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//
+// Module-specific defaults.
+//
+// For module X, if we need to build it both as a library and an executable:
+//  - A default rule `releasetools_X_defaults` is created, which lists `srcs`, `libs` and
+//    `required` properties.
+//  - `python_library_host` and `python_binary_host` are created by listing
+//    `releasetools_X_defaults` in their defaults.
+//
+
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "system_update_engine_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["system_update_engine_license"],
+}
+
+filegroup {
+    name: "update_device_script",
+    srcs: [
+        "**/*.py",
+    ],
+    path: ".",
+}
+
+python_library_host {
+    name: "update_payload",
+
+    srcs: [
+        "update_payload/__init__.py",
+        "update_payload/payload.py",
+        "update_payload/checker.py",
+        "update_payload/common.py",
+        "update_payload/error.py",
+        "update_payload/histogram.py",
+        "update_payload/format_utils.py",
+    ],
+    proto: {
+        canonical_path_from_root: false,
+    },
+    libs: [
+        "update_metadata-protos-python",
+    ],
+}
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index b2d6080..6652b38 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -220,6 +220,8 @@
     "Required if --enabled_lz4diff true is passed. Path to liblz4.so. delta_generator will use this copy of liblz4.so for compression. It is important that this copy of liblz4.so is the same as the one on source build."
   DEFINE_string erofs_compression_param "" \
     "Compression parameter passed to mkfs.erofs's -z option."
+  DEFINE_string security_patch_level "" \
+    "Optional: security patch level of this OTA"
 fi
 if [[ "${COMMAND}" == "hash" || "${COMMAND}" == "sign" ]]; then
   DEFINE_string unsigned_payload "" "Path to the input unsigned payload."
@@ -776,6 +778,10 @@
     GENERATOR_ARGS+=( --max_timestamp="${FLAGS_max_timestamp}" )
   fi
 
+  if [[ -n "${FLAGS_security_patch_level}" ]]; then
+    GENERATOR_ARGS+=( --security_patch_level="${FLAGS_security_patch_level}" )
+  fi
+
   if [[ -n "${FLAGS_partition_timestamps}" ]]; then
     GENERATOR_ARGS+=( --partition_timestamps="${FLAGS_partition_timestamps}" )
   fi
diff --git a/scripts/simulate_ota.py b/scripts/simulate_ota.py
old mode 100644
new mode 100755
index bf1fc98..0e5a21b
--- a/scripts/simulate_ota.py
+++ b/scripts/simulate_ota.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 #
 # Copyright (C) 2020 The Android Open Source Project
 #
@@ -17,8 +18,6 @@
 """Tools for running host side simulation of an OTA update."""
 
 
-from __future__ import print_function
-
 import argparse
 import filecmp
 import os
@@ -49,7 +48,8 @@
     return fp.read(4) == b'\x3A\xFF\x26\xED'
 
 
-def extract_img(zip_archive: zipfile.ZipFile, img_name, output_path):
+def extract_img(zip_archive: zipfile.ZipFile, img_name, output_path, is_source):
+  """ Extract and unsparse partition image from zip archive """
   entry_name = "IMAGES/" + img_name + ".img"
   try:
     extract_file(zip_archive, entry_name, output_path)
@@ -61,6 +61,22 @@
     subprocess.check_output(["simg2img", output_path, raw_img_path])
     os.rename(raw_img_path, output_path)
 
+  # delta_generator only supports images multiple of 4 KiB. For target images
+  # we pad the data with zeros if needed, but for source images we truncate
+  # down the data since the last block of the old image could be padded on
+  # disk with unknown data.
+  file_size = os.path.getsize(output_path)
+  if file_size % 4096 != 0:
+    if is_source:
+      print("Rounding DOWN partition {} to a multiple of 4 KiB."
+            .format(output_path))
+      file_size = file_size & -4096
+    else:
+      print("Rounding UP partition {} to a multiple of 4 KiB."
+            .format(output_path))
+      file_size = (file_size + 4095) & -4096
+    with open(output_path, 'a') as f:
+      f.truncate(file_size)
 
 def run_ota(source, target, payload_path, tempdir, output_dir):
   """Run an OTA on host side"""
@@ -87,10 +103,10 @@
           "source target file must point to a valid zipfile or directory " + \
           source
       print("Extracting source image for", name)
-      extract_img(source, name, old_image)
+      extract_img(source, name, old_image, True)
     if target_exist:
       print("Extracting target image for", name)
-      extract_img(target, name, new_image)
+      extract_img(target, name, new_image, False)
 
     old_partitions.append(old_image)
     scratch_image_name = new_image + ".actual"
diff --git a/scripts/trim_ota_package.py b/scripts/trim_ota_package.py
index df7f170..8bf2182 100644
--- a/scripts/trim_ota_package.py
+++ b/scripts/trim_ota_package.py
@@ -27,7 +27,7 @@
   outfile = argv[2]
   with zipfile.ZipFile(infile, "r") as inzfp, zipfile.ZipFile(outfile, "w") as outzfp:
     for entry in inzfp.infolist():
-      if entry.filename.startswith("META") or entry.filename.endswith(".map"):
+      if entry.filename.startswith("META") or entry.filename.endswith(".map") or entry.filename.endswith(".prop"):
         outzfp.writestr(entry, inzfp.read(entry))
       elif entry.filename == "payload.bin":
         outzfp.writestr(entry, readPayloadMetadata(inzfp, entry))
diff --git a/scripts/update_device.py b/scripts/update_device.py
index 72cee49..f94774b 100755
--- a/scripts/update_device.py
+++ b/scripts/update_device.py
@@ -25,6 +25,7 @@
 import hashlib
 import logging
 import os
+import re
 import socket
 import subprocess
 import sys
@@ -50,7 +51,7 @@
 DEVICE_PORT = 1234
 
 
-def CopyFileObjLength(fsrc, fdst, buffer_size=128 * 1024, copy_length=None):
+def CopyFileObjLength(fsrc, fdst, buffer_size=128 * 1024, copy_length=None, speed_limit=None):
   """Copy from a file object to another.
 
   This function is similar to shutil.copyfileobj except that it allows to copy
@@ -61,10 +62,18 @@
     fdst: destination file object where to write to.
     buffer_size: size of the copy buffer in memory.
     copy_length: maximum number of bytes to copy, or None to copy everything.
+    speed_limit: upper limit for copying speed, in bytes per second.
 
   Returns:
     the number of bytes copied.
   """
+  # If buffer size significantly bigger than speed limit
+  # traffic would seem extremely spiky to the client.
+  if speed_limit:
+    print(f"Applying speed limit: {speed_limit}")
+    buffer_size = min(speed_limit//32, buffer_size)
+
+  start_time = time.time()
   copied = 0
   while True:
     chunk_size = buffer_size
@@ -75,6 +84,11 @@
     buf = fsrc.read(chunk_size)
     if not buf:
       break
+    if speed_limit:
+      expected_duration = copied/speed_limit
+      actual_duration = time.time() - start_time
+      if actual_duration < expected_duration:
+        time.sleep(expected_duration-actual_duration)
     fdst.write(buf)
     copied += len(buf)
   return copied
@@ -211,7 +225,8 @@
     self.end_headers()
 
     f.seek(serving_start + start_range)
-    CopyFileObjLength(f, self.wfile, copy_length=end_range - start_range)
+    CopyFileObjLength(f, self.wfile, copy_length=end_range -
+                      start_range, speed_limit=self.speed_limit)
 
   def do_POST(self):  # pylint: disable=invalid-name
     """Reply with the omaha response xml."""
@@ -291,12 +306,13 @@
 class ServerThread(threading.Thread):
   """A thread for serving HTTP requests."""
 
-  def __init__(self, ota_filename, serving_range):
+  def __init__(self, ota_filename, serving_range, speed_limit):
     threading.Thread.__init__(self)
     # serving_payload and serving_range are class attributes and the
     # UpdateHandler class is instantiated with every request.
     UpdateHandler.serving_payload = ota_filename
     UpdateHandler.serving_range = serving_range
+    UpdateHandler.speed_limit = speed_limit
     self._httpd = BaseHTTPServer.HTTPServer(('127.0.0.1', 0), UpdateHandler)
     self.port = self._httpd.server_port
 
@@ -312,8 +328,8 @@
     self._httpd.socket.close()
 
 
-def StartServer(ota_filename, serving_range):
-  t = ServerThread(ota_filename, serving_range)
+def StartServer(ota_filename, serving_range, speed_limit):
+  t = ServerThread(ota_filename, serving_range, speed_limit)
   t.start()
   return t
 
@@ -408,6 +424,27 @@
       ]) == 0
 
 
+def ParseSpeedLimit(arg: str) -> int:
+  arg = arg.strip().upper()
+  if not re.match(r"\d+[KkMmGgTt]?", arg):
+    raise argparse.ArgumentError(
+        "Wrong speed limit format, expected format is number followed by unit, such as 10K, 5m, 3G (case insensitive)")
+  unit = 1
+  if arg[-1].isalpha():
+    if arg[-1] == "K":
+      unit = 1024
+    elif arg[-1] == "M":
+      unit = 1024 * 1024
+    elif arg[-1] == "G":
+      unit = 1024 * 1024 * 1024
+    elif arg[-1] == "T":
+      unit = 1024 * 1024 * 1024 * 1024
+    else:
+      raise argparse.ArgumentError(
+          f"Unsupported unit for download speed: {arg[-1]}, supported units are K,M,G,T (case insensitive)")
+  return int(float(arg[:-1]) * unit)
+
+
 def main():
   parser = argparse.ArgumentParser(description='Android A/B OTA helper.')
   parser.add_argument('otafile', metavar='PAYLOAD', type=str,
@@ -444,7 +481,22 @@
                       help='Perform reset slot switch for this OTA package')
   parser.add_argument('--wipe-user-data', action='store_true',
                       help='Wipe userdata after installing OTA')
+  parser.add_argument('--vabc-none', action='store_true',
+                      help='Set Virtual AB Compression algorithm to none, but still use Android COW format')
+  parser.add_argument('--disable-vabc', action='store_true',
+                      help='Option to enable or disable vabc. If set to false, will fall back on A/B')
+  parser.add_argument('--enable-threading', action='store_true',
+                      help='Enable multi-threaded compression for VABC')
+  parser.add_argument('--batched-writes', action='store_true',
+                      help='Enable batched writes for VABC')
+  parser.add_argument('--speed-limit', type=str,
+                      help='Speed limit for serving payloads over HTTP. For '
+                      'example: 10K, 5m, 1G, input is case insensitive')
+
   args = parser.parse_args()
+  if args.speed_limit:
+    args.speed_limit = ParseSpeedLimit(args.speed_limit)
+
   logging.basicConfig(
       level=logging.WARNING if args.no_verbose else logging.INFO)
 
@@ -497,6 +549,14 @@
     args.extra_headers += "\nRUN_POST_INSTALL=0"
   if args.wipe_user_data:
     args.extra_headers += "\nPOWERWASH=1"
+  if args.vabc_none:
+    args.extra_headers += "\nVABC_NONE=1"
+  if args.disable_vabc:
+    args.extra_headers += "\nDISABLE_VABC=1"
+  if args.enable_threading:
+    args.extra_headers += "\nENABLE_THREADING=1"
+  if args.batched_writes:
+    args.extra_headers += "\nBATCHED_WRITES=1"
 
   with zipfile.ZipFile(args.otafile) as zfp:
     CARE_MAP_ENTRY_NAME = "care_map.pb"
@@ -531,7 +591,7 @@
       serving_range = (ota.offset, ota.size)
     else:
       serving_range = (0, os.stat(args.otafile).st_size)
-    server_thread = StartServer(args.otafile, serving_range)
+    server_thread = StartServer(args.otafile, serving_range, args.speed_limit)
     cmds.append(
         ['reverse', 'tcp:%d' % DEVICE_PORT, 'tcp:%d' % server_thread.port])
     finalize_cmds.append(['reverse', '--remove', 'tcp:%d' % DEVICE_PORT])
diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_metadata_pb2.py
similarity index 73%
rename from scripts/update_payload/update_metadata_pb2.py
rename to scripts/update_metadata_pb2.py
index b62a67a..3cf2a0b 100644
--- a/scripts/update_payload/update_metadata_pb2.py
+++ b/scripts/update_metadata_pb2.py
@@ -20,7 +20,7 @@
   package='chromeos_update_engine',
   syntax='proto2',
   serialized_options=_b('H\003'),
-  serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"\x9f\x01\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1aO\n\tSignature\x12\x13\n\x07version\x18\x01 \x01(\rB\x02\x18\x01\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x1f\n\x17unpadded_signature_size\x18\x03 \x01(\x07\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"\x8f\x01\n\tImageInfo\x12\x11\n\x05\x62oard\x18\x01 \x01(\tB\x02\x18\x01\x12\x0f\n\x03key\x18\x02 \x01(\tB\x02\x18\x01\x12\x13\n\x07\x63hannel\x18\x03 \x01(\tB\x02\x18\x01\x12\x13\n\x07version\x18\x04 \x01(\tB\x02\x18\x01\x12\x19\n\rbuild_channel\x18\x05 \x01(\tB\x02\x18\x01\x12\x19\n\rbuild_version\x18\x06 \x01(\tB\x02\x18\x01\"\xfc\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xbb\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\x12\x0c\n\x08ZUCCHINI\x10\x0b\"\x81\x02\n\x11\x43owMergeOperation\x12<\n\x04type\x18\x01 \x01(\x0e\x32..chromeos_update_engine.CowMergeOperation.Type\x12\x32\n\nsrc_extent\x18\x02 \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\ndst_extent\x18\x03 \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_offset\x18\x04 \x01(\r\"2\n\x04Type\x12\x0c\n\x08\x43OW_COPY\x10\x00\x12\x0b\n\x07\x43OW_XOR\x10\x01\x12\x0f\n\x0b\x43OW_REPLACE\x10\x02\"\xc8\x06\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\x12\x0f\n\x07version\x18\x11 \x01(\t\x12\x43\n\x10merge_operations\x18\x12 \x03(\x0b\x32).chromeos_update_engine.CowMergeOperation\x12\x19\n\x11\x65stimate_cow_size\x18\x13 \x01(\x04\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"\xbe\x01\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\x12\x14\n\x0cvabc_enabled\x18\x03 \x01(\x08\x12\x1e\n\x16vabc_compression_param\x18\x04 \x01(\t\x12\x13\n\x0b\x63ow_version\x18\x05 \x01(\r\"c\n\x08\x41pexInfo\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\x03\x12\x15\n\ris_compressed\x18\x03 \x01(\x08\x12\x19\n\x11\x64\x65\x63ompressed_size\x18\x04 \x01(\x03\"C\n\x0c\x41pexMetadata\x12\x33\n\tapex_info\x18\x01 \x03(\x0b\x32 .chromeos_update_engine.ApexInfo\"\x9e\x07\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12=\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfoB\x02\x18\x01\x12=\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfoB\x02\x18\x01\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadata\x12\x16\n\x0epartial_update\x18\x10 \x01(\x08\x12\x33\n\tapex_info\x18\x11 \x03(\x0b\x32 .chromeos_update_engine.ApexInfoB\x02H\x03')
+  serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"\x9f\x01\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1aO\n\tSignature\x12\x13\n\x07version\x18\x01 \x01(\rB\x02\x18\x01\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x1f\n\x17unpadded_signature_size\x18\x03 \x01(\x07\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"\xa6\x04\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xe5\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\x12\x0c\n\x08ZUCCHINI\x10\x0b\x12\x12\n\x0eLZ4DIFF_BSDIFF\x10\x0c\x12\x14\n\x10LZ4DIFF_PUFFDIFF\x10\r\"\x81\x02\n\x11\x43owMergeOperation\x12<\n\x04type\x18\x01 \x01(\x0e\x32..chromeos_update_engine.CowMergeOperation.Type\x12\x32\n\nsrc_extent\x18\x02 \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\ndst_extent\x18\x03 \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_offset\x18\x04 \x01(\r\"2\n\x04Type\x12\x0c\n\x08\x43OW_COPY\x10\x00\x12\x0b\n\x07\x43OW_XOR\x10\x01\x12\x0f\n\x0b\x43OW_REPLACE\x10\x02\"\xc8\x06\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\x12\x0f\n\x07version\x18\x11 \x01(\t\x12\x43\n\x10merge_operations\x18\x12 \x03(\x0b\x32).chromeos_update_engine.CowMergeOperation\x12\x19\n\x11\x65stimate_cow_size\x18\x13 \x01(\x04\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"\xbe\x01\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\x12\x14\n\x0cvabc_enabled\x18\x03 \x01(\x08\x12\x1e\n\x16vabc_compression_param\x18\x04 \x01(\t\x12\x13\n\x0b\x63ow_version\x18\x05 \x01(\r\"c\n\x08\x41pexInfo\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\x03\x12\x15\n\ris_compressed\x18\x03 \x01(\x08\x12\x19\n\x11\x64\x65\x63ompressed_size\x18\x04 \x01(\x03\"C\n\x0c\x41pexMetadata\x12\x33\n\tapex_info\x18\x01 \x03(\x0b\x32 .chromeos_update_engine.ApexInfo\"\xc3\x03\n\x14\x44\x65ltaArchiveManifest\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadata\x12\x16\n\x0epartial_update\x18\x10 \x01(\x08\x12\x33\n\tapex_info\x18\x11 \x03(\x0b\x32 .chromeos_update_engine.ApexInfo\x12\x1c\n\x14security_patch_level\x18\x12 \x01(\tJ\x04\x08\x01\x10\x02J\x04\x08\x02\x10\x03J\x04\x08\x06\x10\x07J\x04\x08\x07\x10\x08J\x04\x08\x08\x10\tJ\x04\x08\t\x10\nJ\x04\x08\n\x10\x0bJ\x04\x08\x0b\x10\x0c\x42\x02H\x03')
 )
 
 
@@ -79,11 +79,19 @@
       name='ZUCCHINI', index=11, number=11,
       serialized_options=None,
       type=None),
+    _descriptor.EnumValueDescriptor(
+      name='LZ4DIFF_BSDIFF', index=12, number=12,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='LZ4DIFF_PUFFDIFF', index=13, number=13,
+      serialized_options=None,
+      type=None),
   ],
   containing_type=None,
   serialized_options=None,
-  serialized_start=775,
-  serialized_end=962,
+  serialized_start=629,
+  serialized_end=858,
 )
 _sym_db.RegisterEnumDescriptor(_INSTALLOPERATION_TYPE)
 
@@ -108,8 +116,8 @@
   ],
   containing_type=None,
   serialized_options=None,
-  serialized_start=1172,
-  serialized_end=1222,
+  serialized_start=1068,
+  serialized_end=1118,
 )
 _sym_db.RegisterEnumDescriptor(_COWMERGEOPERATION_TYPE)
 
@@ -265,72 +273,6 @@
 )
 
 
-_IMAGEINFO = _descriptor.Descriptor(
-  name='ImageInfo',
-  full_name='chromeos_update_engine.ImageInfo',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='board', full_name='chromeos_update_engine.ImageInfo.board', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='key', full_name='chromeos_update_engine.ImageInfo.key', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='channel', full_name='chromeos_update_engine.ImageInfo.channel', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='version', full_name='chromeos_update_engine.ImageInfo.version', index=3,
-      number=4, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='build_channel', full_name='chromeos_update_engine.ImageInfo.build_channel', index=4,
-      number=5, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='build_version', full_name='chromeos_update_engine.ImageInfo.build_version', index=5,
-      number=6, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  serialized_options=None,
-  is_extendable=False,
-  syntax='proto2',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=308,
-  serialized_end=451,
-)
-
-
 _INSTALLOPERATION = _descriptor.Descriptor(
   name='InstallOperation',
   full_name='chromeos_update_engine.InstallOperation',
@@ -414,8 +356,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=454,
-  serialized_end=962,
+  serialized_start=308,
+  serialized_end=858,
 )
 
 
@@ -467,8 +409,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=965,
-  serialized_end=1222,
+  serialized_start=861,
+  serialized_end=1118,
 )
 
 
@@ -624,8 +566,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1225,
-  serialized_end=2065,
+  serialized_start=1121,
+  serialized_end=1961,
 )
 
 
@@ -669,8 +611,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2067,
-  serialized_end=2143,
+  serialized_start=1963,
+  serialized_end=2039,
 )
 
 
@@ -728,8 +670,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2146,
-  serialized_end=2336,
+  serialized_start=2042,
+  serialized_end=2232,
 )
 
 
@@ -780,8 +722,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2338,
-  serialized_end=2437,
+  serialized_start=2234,
+  serialized_end=2333,
 )
 
 
@@ -811,8 +753,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2439,
-  serialized_end=2506,
+  serialized_start=2335,
+  serialized_end=2402,
 )
 
 
@@ -824,124 +766,75 @@
   containing_type=None,
   fields=[
     _descriptor.FieldDescriptor(
-      name='install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.install_operations', index=0,
-      number=1, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1,
-      number=2, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2,
+      name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=0,
       number=3, type=13, cpp_type=3, label=1,
       has_default_value=True, default_value=4096,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='signatures_offset', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_offset', index=3,
+      name='signatures_offset', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_offset', index=1,
       number=4, type=4, cpp_type=4, label=1,
       has_default_value=False, default_value=0,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='signatures_size', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_size', index=4,
+      name='signatures_size', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_size', index=2,
       number=5, type=4, cpp_type=4, label=1,
       has_default_value=False, default_value=0,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='old_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_kernel_info', index=5,
-      number=6, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6,
-      number=7, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7,
-      number=8, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8,
-      number=9, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9,
-      number=10, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='new_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_image_info', index=10,
-      number=11, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='minor_version', full_name='chromeos_update_engine.DeltaArchiveManifest.minor_version', index=11,
+      name='minor_version', full_name='chromeos_update_engine.DeltaArchiveManifest.minor_version', index=3,
       number=12, type=13, cpp_type=3, label=1,
       has_default_value=True, default_value=0,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='partitions', full_name='chromeos_update_engine.DeltaArchiveManifest.partitions', index=12,
+      name='partitions', full_name='chromeos_update_engine.DeltaArchiveManifest.partitions', index=4,
       number=13, type=11, cpp_type=10, label=3,
       has_default_value=False, default_value=[],
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='max_timestamp', full_name='chromeos_update_engine.DeltaArchiveManifest.max_timestamp', index=13,
+      name='max_timestamp', full_name='chromeos_update_engine.DeltaArchiveManifest.max_timestamp', index=5,
       number=14, type=3, cpp_type=2, label=1,
       has_default_value=False, default_value=0,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='dynamic_partition_metadata', full_name='chromeos_update_engine.DeltaArchiveManifest.dynamic_partition_metadata', index=14,
+      name='dynamic_partition_metadata', full_name='chromeos_update_engine.DeltaArchiveManifest.dynamic_partition_metadata', index=6,
       number=15, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='partial_update', full_name='chromeos_update_engine.DeltaArchiveManifest.partial_update', index=15,
+      name='partial_update', full_name='chromeos_update_engine.DeltaArchiveManifest.partial_update', index=7,
       number=16, type=8, cpp_type=7, label=1,
       has_default_value=False, default_value=False,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='apex_info', full_name='chromeos_update_engine.DeltaArchiveManifest.apex_info', index=16,
+      name='apex_info', full_name='chromeos_update_engine.DeltaArchiveManifest.apex_info', index=8,
       number=17, type=11, cpp_type=10, label=3,
       has_default_value=False, default_value=[],
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='security_patch_level', full_name='chromeos_update_engine.DeltaArchiveManifest.security_patch_level', index=9,
+      number=18, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -954,8 +847,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2509,
-  serialized_end=3435,
+  serialized_start=2405,
+  serialized_end=2856,
 )
 
 _SIGNATURES_SIGNATURE.containing_type = _SIGNATURES
@@ -979,21 +872,12 @@
 _PARTITIONUPDATE.fields_by_name['merge_operations'].message_type = _COWMERGEOPERATION
 _DYNAMICPARTITIONMETADATA.fields_by_name['groups'].message_type = _DYNAMICPARTITIONGROUP
 _APEXMETADATA.fields_by_name['apex_info'].message_type = _APEXINFO
-_DELTAARCHIVEMANIFEST.fields_by_name['install_operations'].message_type = _INSTALLOPERATION
-_DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations'].message_type = _INSTALLOPERATION
-_DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info'].message_type = _PARTITIONINFO
-_DELTAARCHIVEMANIFEST.fields_by_name['new_kernel_info'].message_type = _PARTITIONINFO
-_DELTAARCHIVEMANIFEST.fields_by_name['old_rootfs_info'].message_type = _PARTITIONINFO
-_DELTAARCHIVEMANIFEST.fields_by_name['new_rootfs_info'].message_type = _PARTITIONINFO
-_DELTAARCHIVEMANIFEST.fields_by_name['old_image_info'].message_type = _IMAGEINFO
-_DELTAARCHIVEMANIFEST.fields_by_name['new_image_info'].message_type = _IMAGEINFO
 _DELTAARCHIVEMANIFEST.fields_by_name['partitions'].message_type = _PARTITIONUPDATE
 _DELTAARCHIVEMANIFEST.fields_by_name['dynamic_partition_metadata'].message_type = _DYNAMICPARTITIONMETADATA
 _DELTAARCHIVEMANIFEST.fields_by_name['apex_info'].message_type = _APEXINFO
 DESCRIPTOR.message_types_by_name['Extent'] = _EXTENT
 DESCRIPTOR.message_types_by_name['Signatures'] = _SIGNATURES
 DESCRIPTOR.message_types_by_name['PartitionInfo'] = _PARTITIONINFO
-DESCRIPTOR.message_types_by_name['ImageInfo'] = _IMAGEINFO
 DESCRIPTOR.message_types_by_name['InstallOperation'] = _INSTALLOPERATION
 DESCRIPTOR.message_types_by_name['CowMergeOperation'] = _COWMERGEOPERATION
 DESCRIPTOR.message_types_by_name['PartitionUpdate'] = _PARTITIONUPDATE
@@ -1033,13 +917,6 @@
   })
 _sym_db.RegisterMessage(PartitionInfo)
 
-ImageInfo = _reflection.GeneratedProtocolMessageType('ImageInfo', (_message.Message,), {
-  'DESCRIPTOR' : _IMAGEINFO,
-  '__module__' : 'update_metadata_pb2'
-  # @@protoc_insertion_point(class_scope:chromeos_update_engine.ImageInfo)
-  })
-_sym_db.RegisterMessage(ImageInfo)
-
 InstallOperation = _reflection.GeneratedProtocolMessageType('InstallOperation', (_message.Message,), {
   'DESCRIPTOR' : _INSTALLOPERATION,
   '__module__' : 'update_metadata_pb2'
@@ -1099,20 +976,6 @@
 
 DESCRIPTOR._options = None
 _SIGNATURES_SIGNATURE.fields_by_name['version']._options = None
-_IMAGEINFO.fields_by_name['board']._options = None
-_IMAGEINFO.fields_by_name['key']._options = None
-_IMAGEINFO.fields_by_name['channel']._options = None
-_IMAGEINFO.fields_by_name['version']._options = None
-_IMAGEINFO.fields_by_name['build_channel']._options = None
-_IMAGEINFO.fields_by_name['build_version']._options = None
 _INSTALLOPERATION_TYPE.values_by_name["MOVE"]._options = None
 _INSTALLOPERATION_TYPE.values_by_name["BSDIFF"]._options = None
-_DELTAARCHIVEMANIFEST.fields_by_name['install_operations']._options = None
-_DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations']._options = None
-_DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info']._options = None
-_DELTAARCHIVEMANIFEST.fields_by_name['new_kernel_info']._options = None
-_DELTAARCHIVEMANIFEST.fields_by_name['old_rootfs_info']._options = None
-_DELTAARCHIVEMANIFEST.fields_by_name['new_rootfs_info']._options = None
-_DELTAARCHIVEMANIFEST.fields_by_name['old_image_info']._options = None
-_DELTAARCHIVEMANIFEST.fields_by_name['new_image_info']._options = None
 # @@protoc_insertion_point(module_scope)
diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py
deleted file mode 100644
index 29ccb8e..0000000
--- a/scripts/update_payload/applier.py
+++ /dev/null
@@ -1,621 +0,0 @@
-#
-# Copyright (C) 2013 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""Applying a Chrome OS update payload.
-
-This module is used internally by the main Payload class for applying an update
-payload. The interface for invoking the applier is as follows:
-
-  applier = PayloadApplier(payload)
-  applier.Run(...)
-
-"""
-
-from __future__ import absolute_import
-from __future__ import print_function
-
-import array
-import bz2
-import hashlib
-# Not everywhere we can have the lzma library so we ignore it if we didn't have
-# it because it is not going to be used. For example, 'cros flash' uses
-# devserver code which eventually loads this file, but the lzma library is not
-# included in the client test devices, and it is not necessary to do so. But
-# lzma is not used in 'cros flash' so it should be fine. Python 3.x include
-# lzma, but for backward compatibility with Python 2.7, backports-lzma is
-# needed.
-try:
-  import lzma
-except ImportError:
-  try:
-    from backports import lzma
-  except ImportError:
-    pass
-import os
-import subprocess
-import sys
-import tempfile
-
-from update_payload import common
-from update_payload.error import PayloadError
-
-#
-# Helper functions.
-#
-def _VerifySha256(file_obj, expected_hash, name, length=-1):
-  """Verifies the SHA256 hash of a file.
-
-  Args:
-    file_obj: file object to read
-    expected_hash: the hash digest we expect to be getting
-    name: name string of this hash, for error reporting
-    length: precise length of data to verify (optional)
-
-  Raises:
-    PayloadError if computed hash doesn't match expected one, or if fails to
-    read the specified length of data.
-  """
-  hasher = hashlib.sha256()
-  block_length = 1024 * 1024
-  max_length = length if length >= 0 else sys.maxsize
-
-  while max_length > 0:
-    read_length = min(max_length, block_length)
-    data = file_obj.read(read_length)
-    if not data:
-      break
-    max_length -= len(data)
-    hasher.update(data)
-
-  if length >= 0 and max_length > 0:
-    raise PayloadError(
-        'insufficient data (%d instead of %d) when verifying %s' %
-        (length - max_length, length, name))
-
-  actual_hash = hasher.digest()
-  if actual_hash != expected_hash:
-    raise PayloadError('%s hash (%s) not as expected (%s)' %
-                       (name, common.FormatSha256(actual_hash),
-                        common.FormatSha256(expected_hash)))
-
-
-def _ReadExtents(file_obj, extents, block_size, max_length=-1):
-  """Reads data from file as defined by extent sequence.
-
-  This tries to be efficient by not copying data as it is read in chunks.
-
-  Args:
-    file_obj: file object
-    extents: sequence of block extents (offset and length)
-    block_size: size of each block
-    max_length: maximum length to read (optional)
-
-  Returns:
-    A character array containing the concatenated read data.
-  """
-  data = array.array('B')
-  if max_length < 0:
-    max_length = sys.maxsize
-  for ex in extents:
-    if max_length == 0:
-      break
-    read_length = min(max_length, ex.num_blocks * block_size)
-
-    file_obj.seek(ex.start_block * block_size)
-    data.fromfile(file_obj, read_length)
-
-    max_length -= read_length
-
-  return data
-
-
-def _WriteExtents(file_obj, data, extents, block_size, base_name):
-  """Writes data to file as defined by extent sequence.
-
-  This tries to be efficient by not copy data as it is written in chunks.
-
-  Args:
-    file_obj: file object
-    data: data to write
-    extents: sequence of block extents (offset and length)
-    block_size: size of each block
-    base_name: name string of extent sequence for error reporting
-
-  Raises:
-    PayloadError when things don't add up.
-  """
-  data_offset = 0
-  data_length = len(data)
-  for ex, ex_name in common.ExtentIter(extents, base_name):
-    if not data_length:
-      raise PayloadError('%s: more write extents than data' % ex_name)
-    write_length = min(data_length, ex.num_blocks * block_size)
-    file_obj.seek(ex.start_block * block_size)
-    file_obj.write(data[data_offset:(data_offset + write_length)])
-
-    data_offset += write_length
-    data_length -= write_length
-
-  if data_length:
-    raise PayloadError('%s: more data than write extents' % base_name)
-
-
-def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1):
-  """Translates an extent sequence into a bspatch-compatible string argument.
-
-  Args:
-    extents: sequence of block extents (offset and length)
-    block_size: size of each block
-    base_name: name string of extent sequence for error reporting
-    data_length: the actual total length of the data in bytes (optional)
-
-  Returns:
-    A tuple consisting of (i) a string of the form
-    "off_1:len_1,...,off_n:len_n", (ii) an offset where zero padding is needed
-    for filling the last extent, (iii) the length of the padding (zero means no
-    padding is needed and the extents cover the full length of data).
-
-  Raises:
-    PayloadError if data_length is too short or too long.
-  """
-  arg = ''
-  pad_off = pad_len = 0
-  if data_length < 0:
-    data_length = sys.maxsize
-  for ex, ex_name in common.ExtentIter(extents, base_name):
-    if not data_length:
-      raise PayloadError('%s: more extents than total data length' % ex_name)
-
-    start_byte = ex.start_block * block_size
-    num_bytes = ex.num_blocks * block_size
-    if data_length < num_bytes:
-      # We're only padding a real extent.
-      pad_off = start_byte + data_length
-      pad_len = num_bytes - data_length
-      num_bytes = data_length
-
-    arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
-    data_length -= num_bytes
-
-  if data_length:
-    raise PayloadError('%s: extents not covering full data length' % base_name)
-
-  return arg, pad_off, pad_len
-
-
-#
-# Payload application.
-#
-class PayloadApplier(object):
-  """Applying an update payload.
-
-  This is a short-lived object whose purpose is to isolate the logic used for
-  applying an update payload.
-  """
-
-  def __init__(self, payload, bsdiff_in_place=True, bspatch_path=None,
-               puffpatch_path=None, truncate_to_expected_size=True):
-    """Initialize the applier.
-
-    Args:
-      payload: the payload object to check
-      bsdiff_in_place: whether to perform BSDIFF operation in-place (optional)
-      bspatch_path: path to the bspatch binary (optional)
-      puffpatch_path: path to the puffpatch binary (optional)
-      truncate_to_expected_size: whether to truncate the resulting partitions
-                                 to their expected sizes, as specified in the
-                                 payload (optional)
-    """
-    assert payload.is_init, 'uninitialized update payload'
-    self.payload = payload
-    self.block_size = payload.manifest.block_size
-    self.minor_version = payload.manifest.minor_version
-    self.bsdiff_in_place = bsdiff_in_place
-    self.bspatch_path = bspatch_path or 'bspatch'
-    self.puffpatch_path = puffpatch_path or 'puffin'
-    self.truncate_to_expected_size = truncate_to_expected_size
-
-  def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
-    """Applies a REPLACE{,_BZ,_XZ} operation.
-
-    Args:
-      op: the operation object
-      op_name: name string for error reporting
-      out_data: the data to be written
-      part_file: the partition file object
-      part_size: the size of the partition
-
-    Raises:
-      PayloadError if something goes wrong.
-    """
-    block_size = self.block_size
-    data_length = len(out_data)
-
-    # Decompress data if needed.
-    if op.type == common.OpType.REPLACE_BZ:
-      out_data = bz2.decompress(out_data)
-      data_length = len(out_data)
-    elif op.type == common.OpType.REPLACE_XZ:
-      # pylint: disable=no-member
-      out_data = lzma.decompress(out_data)
-      data_length = len(out_data)
-
-    # Write data to blocks specified in dst extents.
-    data_start = 0
-    for ex, ex_name in common.ExtentIter(op.dst_extents,
-                                         '%s.dst_extents' % op_name):
-      start_block = ex.start_block
-      num_blocks = ex.num_blocks
-      count = num_blocks * block_size
-
-      data_end = data_start + count
-
-      # Make sure we're not running past partition boundary.
-      if (start_block + num_blocks) * block_size > part_size:
-        raise PayloadError(
-            '%s: extent (%s) exceeds partition size (%d)' %
-            (ex_name, common.FormatExtent(ex, block_size),
-             part_size))
-
-      # Make sure that we have enough data to write.
-      if data_end >= data_length + block_size:
-        raise PayloadError(
-            '%s: more dst blocks than data (even with padding)')
-
-      # Pad with zeros if necessary.
-      if data_end > data_length:
-        padding = data_end - data_length
-        out_data += b'\0' * padding
-
-      self.payload.payload_file.seek(start_block * block_size)
-      part_file.seek(start_block * block_size)
-      part_file.write(out_data[data_start:data_end])
-
-      data_start += count
-
-    # Make sure we wrote all data.
-    if data_start < data_length:
-      raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
-                         (op_name, data_start, data_length))
-
-  def _ApplyZeroOperation(self, op, op_name, part_file):
-    """Applies a ZERO operation.
-
-    Args:
-      op: the operation object
-      op_name: name string for error reporting
-      part_file: the partition file object
-
-    Raises:
-      PayloadError if something goes wrong.
-    """
-    block_size = self.block_size
-    base_name = '%s.dst_extents' % op_name
-
-    # Iterate over the extents and write zero.
-    # pylint: disable=unused-variable
-    for ex, ex_name in common.ExtentIter(op.dst_extents, base_name):
-      part_file.seek(ex.start_block * block_size)
-      part_file.write(b'\0' * (ex.num_blocks * block_size))
-
-  def _ApplySourceCopyOperation(self, op, op_name, old_part_file,
-                                new_part_file):
-    """Applies a SOURCE_COPY operation.
-
-    Args:
-      op: the operation object
-      op_name: name string for error reporting
-      old_part_file: the old partition file object
-      new_part_file: the new partition file object
-
-    Raises:
-      PayloadError if something goes wrong.
-    """
-    if not old_part_file:
-      raise PayloadError(
-          '%s: no source partition file provided for operation type (%d)' %
-          (op_name, op.type))
-
-    block_size = self.block_size
-
-    # Gather input raw data from src extents.
-    in_data = _ReadExtents(old_part_file, op.src_extents, block_size)
-
-    # Dump extracted data to dst extents.
-    _WriteExtents(new_part_file, in_data, op.dst_extents, block_size,
-                  '%s.dst_extents' % op_name)
-
-  def _BytesInExtents(self, extents, base_name):
-    """Counts the length of extents in bytes.
-
-    Args:
-      extents: The list of Extents.
-      base_name: For error reporting.
-
-    Returns:
-      The number of bytes in extents.
-    """
-
-    length = 0
-    # pylint: disable=unused-variable
-    for ex, ex_name in common.ExtentIter(extents, base_name):
-      length += ex.num_blocks * self.block_size
-    return length
-
-  def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file,
-                          new_part_file):
-    """Applies a SOURCE_BSDIFF, BROTLI_BSDIFF or PUFFDIFF operation.
-
-    Args:
-      op: the operation object
-      op_name: name string for error reporting
-      patch_data: the binary patch content
-      old_part_file: the source partition file object
-      new_part_file: the target partition file object
-
-    Raises:
-      PayloadError if something goes wrong.
-    """
-    if not old_part_file:
-      raise PayloadError(
-          '%s: no source partition file provided for operation type (%d)' %
-          (op_name, op.type))
-
-    block_size = self.block_size
-
-    # Dump patch data to file.
-    with tempfile.NamedTemporaryFile(delete=False) as patch_file:
-      patch_file_name = patch_file.name
-      patch_file.write(patch_data)
-
-    if (hasattr(new_part_file, 'fileno') and
-        ((not old_part_file) or hasattr(old_part_file, 'fileno'))):
-      # Construct input and output extents argument for bspatch.
-
-      in_extents_arg, _, _ = _ExtentsToBspatchArg(
-          op.src_extents, block_size, '%s.src_extents' % op_name,
-          data_length=op.src_length if op.src_length else
-          self._BytesInExtents(op.src_extents, "%s.src_extents"))
-      out_extents_arg, pad_off, pad_len = _ExtentsToBspatchArg(
-          op.dst_extents, block_size, '%s.dst_extents' % op_name,
-          data_length=op.dst_length if op.dst_length else
-          self._BytesInExtents(op.dst_extents, "%s.dst_extents"))
-
-      new_file_name = '/dev/fd/%d' % new_part_file.fileno()
-      # Diff from source partition.
-      old_file_name = '/dev/fd/%d' % old_part_file.fileno()
-
-      # In python3, file descriptors(fd) are not passed to child processes by
-      # default. To pass the fds to the child processes, we need to set the flag
-      # 'inheritable' in the fds and make the subprocess calls with the argument
-      # close_fds set to False.
-      if sys.version_info.major >= 3:
-        os.set_inheritable(new_part_file.fileno(), True)
-        os.set_inheritable(old_part_file.fileno(), True)
-
-      if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF):
-        # Invoke bspatch on partition file with extents args.
-        bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name,
-                       patch_file_name, in_extents_arg, out_extents_arg]
-        subprocess.check_call(bspatch_cmd, close_fds=False)
-      elif op.type == common.OpType.PUFFDIFF:
-        # Invoke puffpatch on partition file with extents args.
-        puffpatch_cmd = [self.puffpatch_path,
-                         "--operation=puffpatch",
-                         "--src_file=%s" % old_file_name,
-                         "--dst_file=%s" % new_file_name,
-                         "--patch_file=%s" % patch_file_name,
-                         "--src_extents=%s" % in_extents_arg,
-                         "--dst_extents=%s" % out_extents_arg]
-        subprocess.check_call(puffpatch_cmd, close_fds=False)
-      else:
-        raise PayloadError("Unknown operation %s" % op.type)
-
-      # Pad with zeros past the total output length.
-      if pad_len:
-        new_part_file.seek(pad_off)
-        new_part_file.write(b'\0' * pad_len)
-    else:
-      # Gather input raw data and write to a temp file.
-      input_part_file = old_part_file if old_part_file else new_part_file
-      in_data = _ReadExtents(input_part_file, op.src_extents, block_size,
-                             max_length=op.src_length if op.src_length else
-                             self._BytesInExtents(op.src_extents,
-                                                  "%s.src_extents"))
-      with tempfile.NamedTemporaryFile(delete=False) as in_file:
-        in_file_name = in_file.name
-        in_file.write(in_data)
-
-      # Allocate temporary output file.
-      with tempfile.NamedTemporaryFile(delete=False) as out_file:
-        out_file_name = out_file.name
-
-      if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF):
-        # Invoke bspatch.
-        bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name,
-                       patch_file_name]
-        subprocess.check_call(bspatch_cmd)
-      elif op.type == common.OpType.PUFFDIFF:
-        # Invoke puffpatch.
-        puffpatch_cmd = [self.puffpatch_path,
-                         "--operation=puffpatch",
-                         "--src_file=%s" % in_file_name,
-                         "--dst_file=%s" % out_file_name,
-                         "--patch_file=%s" % patch_file_name]
-        subprocess.check_call(puffpatch_cmd)
-      else:
-        raise PayloadError("Unknown operation %s" % op.type)
-
-      # Read output.
-      with open(out_file_name, 'rb') as out_file:
-        out_data = out_file.read()
-        if len(out_data) != op.dst_length:
-          raise PayloadError(
-              '%s: actual patched data length (%d) not as expected (%d)' %
-              (op_name, len(out_data), op.dst_length))
-
-      # Write output back to partition, with padding.
-      unaligned_out_len = len(out_data) % block_size
-      if unaligned_out_len:
-        out_data += b'\0' * (block_size - unaligned_out_len)
-      _WriteExtents(new_part_file, out_data, op.dst_extents, block_size,
-                    '%s.dst_extents' % op_name)
-
-      # Delete input/output files.
-      os.remove(in_file_name)
-      os.remove(out_file_name)
-
-    # Delete patch file.
-    os.remove(patch_file_name)
-
-  def _ApplyOperations(self, operations, base_name, old_part_file,
-                       new_part_file, part_size):
-    """Applies a sequence of update operations to a partition.
-
-    Args:
-      operations: the sequence of operations
-      base_name: the name of the operation sequence
-      old_part_file: the old partition file object, open for reading/writing
-      new_part_file: the new partition file object, open for reading/writing
-      part_size: the partition size
-
-    Raises:
-      PayloadError if anything goes wrong while processing the payload.
-    """
-    for op, op_name in common.OperationIter(operations, base_name):
-      # Read data blob.
-      data = self.payload.ReadDataBlob(op.data_offset, op.data_length)
-
-      if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
-                     common.OpType.REPLACE_XZ):
-        self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size)
-      elif op.type == common.OpType.ZERO:
-        self._ApplyZeroOperation(op, op_name, new_part_file)
-      elif op.type == common.OpType.SOURCE_COPY:
-        self._ApplySourceCopyOperation(op, op_name, old_part_file,
-                                       new_part_file)
-      elif op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.PUFFDIFF,
-                       common.OpType.BROTLI_BSDIFF):
-        self._ApplyDiffOperation(op, op_name, data, old_part_file,
-                                 new_part_file)
-      else:
-        raise PayloadError('%s: unknown operation type (%d)' %
-                           (op_name, op.type))
-
-  def _ApplyToPartition(self, operations, part_name, base_name,
-                        new_part_file_name, new_part_info,
-                        old_part_file_name=None, old_part_info=None):
-    """Applies an update to a partition.
-
-    Args:
-      operations: the sequence of update operations to apply
-      part_name: the name of the partition, for error reporting
-      base_name: the name of the operation sequence
-      new_part_file_name: file name to write partition data to
-      new_part_info: size and expected hash of dest partition
-      old_part_file_name: file name of source partition (optional)
-      old_part_info: size and expected hash of source partition (optional)
-
-    Raises:
-      PayloadError if anything goes wrong with the update.
-    """
-    # Do we have a source partition?
-    if old_part_file_name:
-      # Verify the source partition.
-      with open(old_part_file_name, 'rb') as old_part_file:
-        _VerifySha256(old_part_file, old_part_info.hash,
-                      'old ' + part_name, length=old_part_info.size)
-      new_part_file_mode = 'r+b'
-      open(new_part_file_name, 'w').close()
-
-    else:
-      # We need to create/truncate the dst partition file.
-      new_part_file_mode = 'w+b'
-
-    # Apply operations.
-    with open(new_part_file_name, new_part_file_mode) as new_part_file:
-      old_part_file = (open(old_part_file_name, 'r+b')
-                       if old_part_file_name else None)
-      try:
-        self._ApplyOperations(operations, base_name, old_part_file,
-                              new_part_file, new_part_info.size)
-      finally:
-        if old_part_file:
-          old_part_file.close()
-
-      # Truncate the result, if so instructed.
-      if self.truncate_to_expected_size:
-        new_part_file.seek(0, 2)
-        if new_part_file.tell() > new_part_info.size:
-          new_part_file.seek(new_part_info.size)
-          new_part_file.truncate()
-
-    # Verify the resulting partition.
-    with open(new_part_file_name, 'rb') as new_part_file:
-      _VerifySha256(new_part_file, new_part_info.hash,
-                    'new ' + part_name, length=new_part_info.size)
-
-  def Run(self, new_parts, old_parts=None):
-    """Applier entry point, invoking all update operations.
-
-    Args:
-      new_parts: map of partition name to dest partition file
-      old_parts: map of partition name to source partition file (optional)
-
-    Raises:
-      PayloadError if payload application failed.
-    """
-    if old_parts is None:
-      old_parts = {}
-
-    self.payload.ResetFile()
-
-    new_part_info = {}
-    old_part_info = {}
-    install_operations = []
-
-    manifest = self.payload.manifest
-    for part in manifest.partitions:
-      name = part.partition_name
-      new_part_info[name] = part.new_partition_info
-      old_part_info[name] = part.old_partition_info
-      install_operations.append((name, part.operations))
-
-    part_names = set(new_part_info.keys())  # Equivalently, old_part_info.keys()
-
-    # Make sure the arguments are sane and match the payload.
-    new_part_names = set(new_parts.keys())
-    if new_part_names != part_names:
-      raise PayloadError('missing dst partition(s) %s' %
-                         ', '.join(part_names - new_part_names))
-
-    old_part_names = set(old_parts.keys())
-    if part_names - old_part_names:
-      if self.payload.IsDelta():
-        raise PayloadError('trying to apply a delta update without src '
-                           'partition(s) %s' %
-                           ', '.join(part_names - old_part_names))
-    elif old_part_names == part_names:
-      if self.payload.IsFull():
-        raise PayloadError('trying to apply a full update onto src partitions')
-    else:
-      raise PayloadError('not all src partitions provided')
-
-    for name, operations in install_operations:
-      # Apply update to partition.
-      self._ApplyToPartition(
-          operations, name, '%s_install_operations' % name, new_parts[name],
-          new_part_info[name], old_parts.get(name, None), old_part_info[name])
diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py
index 56a9370..44b6811 100644
--- a/scripts/update_payload/checker.py
+++ b/scripts/update_payload/checker.py
@@ -42,7 +42,7 @@
 from update_payload import error
 from update_payload import format_utils
 from update_payload import histogram
-from update_payload import update_metadata_pb2
+import update_metadata_pb2
 
 #
 # Constants.
diff --git a/scripts/update_payload/checker_unittest.py b/scripts/update_payload/checker_unittest.py
index 993b785..cf813fd 100755
--- a/scripts/update_payload/checker_unittest.py
+++ b/scripts/update_payload/checker_unittest.py
@@ -37,7 +37,7 @@
 from update_payload import checker
 from update_payload import common
 from update_payload import test_utils
-from update_payload import update_metadata_pb2
+import update_metadata_pb2
 from update_payload.error import PayloadError
 from update_payload.payload import Payload  # Avoid name conflicts later.
 
diff --git a/scripts/update_payload/common.py b/scripts/update_payload/common.py
index 7c6ec8f..7139f6f 100644
--- a/scripts/update_payload/common.py
+++ b/scripts/update_payload/common.py
@@ -21,7 +21,7 @@
 
 import base64
 
-from update_payload import update_metadata_pb2
+import update_metadata_pb2
 from update_payload.error import PayloadError
 
 
diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py
index 86caef7..4abd63e 100644
--- a/scripts/update_payload/payload.py
+++ b/scripts/update_payload/payload.py
@@ -18,6 +18,7 @@
 
 from __future__ import absolute_import
 from __future__ import print_function
+import binascii
 
 import hashlib
 import io
@@ -25,10 +26,10 @@
 import struct
 import zipfile
 
-from update_payload import applier
+import update_metadata_pb2
+
 from update_payload import checker
 from update_payload import common
-from update_payload import update_metadata_pb2
 from update_payload.error import PayloadError
 
 
@@ -123,15 +124,22 @@
       payload_file_offset: the offset of the actual payload
     """
     if zipfile.is_zipfile(payload_file):
+      self.name = payload_file
       with zipfile.ZipFile(payload_file) as zfp:
+        if "payload.bin" not in zfp.namelist():
+          raise ValueError(f"payload.bin missing in archive {payload_file}")
         self.payload_file = zfp.open("payload.bin", "r")
     elif isinstance(payload_file, str):
+      self.name = payload_file
       payload_fp = open(payload_file, "rb")
       payload_bytes = mmap.mmap(
           payload_fp.fileno(), 0, access=mmap.ACCESS_READ)
       self.payload_file = io.BytesIO(payload_bytes)
     else:
+      self.name = payload_file.name
       self.payload_file = payload_file
+    self.payload_file_size = self.payload_file.seek(0, io.SEEK_END)
+    self.payload_file.seek(0, io.SEEK_SET)
     self.payload_file_offset = payload_file_offset
     self.manifest_hasher = None
     self.is_init = False
@@ -141,6 +149,7 @@
     self.metadata_signature = None
     self.payload_signature = None
     self.metadata_size = None
+    self.Init()
 
   @property
   def is_incremental(self):
@@ -150,6 +159,20 @@
   def is_partial(self):
     return self.manifest.partial_update
 
+  @property
+  def total_data_length(self):
+    """Return the total data length of this payload, excluding payload
+    signature at the very end.
+    """
+    # Operations are sorted in ascending data_offset order, so iterating
+    # backwards and find the first one with non zero data_offset will tell
+    # us total data length
+    for partition in reversed(self.manifest.partitions):
+      for op in reversed(partition.operations):
+        if op.data_length > 0:
+          return op.data_offset + op.data_length
+    return 0
+
   def _ReadHeader(self):
     """Reads and returns the payload header.
 
@@ -223,7 +246,7 @@
       correctly.
     """
     if self.is_init:
-      raise PayloadError('payload object already initialized')
+      return
 
     self.manifest_hasher = hashlib.sha256()
 
@@ -245,7 +268,7 @@
     self.metadata_size = self.header.size + self.header.manifest_len
     self.data_offset = self.metadata_size + self.header.metadata_signature_len
 
-    if self.manifest.signatures_offset and self.manifest.signatures_size:
+    if self.manifest.signatures_offset and self.manifest.signatures_size and self.manifest.signatures_offset + self.manifest.signatures_size <= self.payload_file_size:
       payload_signature_blob = self.ReadDataBlob(
           self.manifest.signatures_offset, self.manifest.signatures_size)
       payload_signature = update_metadata_pb2.Signatures()
@@ -305,29 +328,16 @@
                part_sizes=part_sizes,
                report_out_file=report_out_file)
 
-  def Apply(self, new_parts, old_parts=None, bsdiff_in_place=True,
-            bspatch_path=None, puffpatch_path=None,
-            truncate_to_expected_size=True):
-    """Applies the update payload.
-
-    Args:
-      new_parts: map of partition name to dest partition file
-      old_parts: map of partition name to partition file (optional)
-      bsdiff_in_place: whether to perform BSDIFF operations in-place (optional)
-      bspatch_path: path to the bspatch binary (optional)
-      puffpatch_path: path to the puffpatch binary (optional)
-      truncate_to_expected_size: whether to truncate the resulting partitions
-                                 to their expected sizes, as specified in the
-                                 payload (optional)
-
-    Raises:
-      PayloadError if payload application failed.
-    """
-    self._AssertInit()
-
-    # Create a short-lived payload applier object and run it.
-    helper = applier.PayloadApplier(
-        self, bsdiff_in_place=bsdiff_in_place, bspatch_path=bspatch_path,
-        puffpatch_path=puffpatch_path,
-        truncate_to_expected_size=truncate_to_expected_size)
-    helper.Run(new_parts, old_parts=old_parts)
+  def CheckDataHash(self):
+    for part in self.manifest.partitions:
+      for op in part.operations:
+        if op.data_length == 0:
+          continue
+        if not op.data_sha256_hash:
+          raise PayloadError(
+              f"Operation {op} in partition {part.partition_name} missing data_sha256_hash")
+        blob = self.ReadDataBlob(op.data_offset, op.data_length)
+        blob_hash = hashlib.sha256(blob)
+        if blob_hash.digest() != op.data_sha256_hash:
+          raise PayloadError(
+              f"Operation {op} in partition {part.partition_name} has unexpected hash, expected: {binascii.hexlify(op.data_sha256_hash)}, actual: {blob_hash.hexdigest()}")
diff --git a/scripts/update_payload/test_utils.py b/scripts/update_payload/test_utils.py
index e153669..7005827 100644
--- a/scripts/update_payload/test_utils.py
+++ b/scripts/update_payload/test_utils.py
@@ -27,7 +27,7 @@
 
 from update_payload import common
 from update_payload import payload
-from update_payload import update_metadata_pb2
+import update_metadata_pb2
 
 
 class TestError(Exception):
diff --git a/update_metadata.proto b/update_metadata.proto
index 3f454ad..96e04f2 100644
--- a/update_metadata.proto
+++ b/update_metadata.proto
@@ -331,6 +331,11 @@
   repeated string partition_names = 3;
 }
 
+message VABCFeatureSet {
+  optional bool threaded = 1;
+  optional bool batch_writes = 2;
+}
+
 // Metadata related to all dynamic partitions.
 message DynamicPartitionMetadata {
   // All updatable groups present in |partitions| of this DeltaArchiveManifest.
@@ -361,6 +366,9 @@
   // COW version used by VABC. The represents the major version in the COW
   // header
   optional uint32 cow_version = 5;
+
+  // A collection of knobs to tune Virtual AB Compression
+  optional VABCFeatureSet vabc_feature_set = 6;
 }
 
 // Definition has been duplicated from
@@ -423,4 +431,8 @@
   // Information on compressed APEX to figure out how much space is required for
   // their decompression
   repeated ApexInfo apex_info = 17;
+
+  // Security patch level of the device, usually in the format of
+  // yyyy-mm-dd
+  optional string security_patch_level = 18;
 }