update_engine: Merge remote-tracking branch 'cros/upstream' into cros/master

Done with:
git merge cros/upstream --commit -s recursive

- Added EC key support and its unittests.
- Resolved a conlict on error codes. Since Android versions are not
  uploading any UMA metrics, I gave the priority to the Android version
  Since they can't be changed.
- Changed the openssl functions to get1 version (from get0) version
  because of a current issue with gale. Once the issue is resolved we
  need to change them back.
- Some remaining styling issues fixed by clang-format

BUG=b:163153182
TEST=CQ passes
TEST=unittests

Change-Id: Ib95034422b92433ce26e28336bc4806b34910d38
diff --git a/.clang-format b/.clang-format
index aed0ce8..3044f59 100644
--- a/.clang-format
+++ b/.clang-format
@@ -34,6 +34,7 @@
 BinPackParameters: false
 CommentPragmas: NOLINT:.*
 DerivePointerAlignment: false
+IncludeBlocks: Preserve
 PointerAlignment: Left
 TabWidth: 2
 
diff --git a/Android.bp b/Android.bp
index 47f0318..a8b5fc2 100644
--- a/Android.bp
+++ b/Android.bp
@@ -120,6 +120,7 @@
         "libbz",
         "libbspatch",
         "libbrotli",
+        "libc++fs",
         "libfec_rs",
         "libpuffpatch",
         "libverity_tree",
@@ -128,6 +129,7 @@
         "libbase",
         "libcrypto",
         "libfec",
+        "libziparchive",
     ],
 }
 
@@ -146,6 +148,7 @@
         "common/clock.cc",
         "common/constants.cc",
         "common/cpu_limiter.cc",
+        "common/dynamic_partition_control_stub.cc",
         "common/error_code_utils.cc",
         "common/file_fetcher.cc",
         "common/hash_calculator.cc",
@@ -161,6 +164,7 @@
         "common/utils.cc",
         "payload_consumer/bzip_extent_writer.cc",
         "payload_consumer/cached_file_descriptor.cc",
+        "payload_consumer/certificate_parser_android.cc",
         "payload_consumer/delta_performer.cc",
         "payload_consumer/download_action.cc",
         "payload_consumer/extent_reader.cc",
@@ -178,6 +182,7 @@
         "payload_consumer/verity_writer_android.cc",
         "payload_consumer/xz_extent_writer.cc",
         "payload_consumer/fec_file_descriptor.cc",
+        "payload_consumer/partition_update_generator_android.cc",
     ],
 }
 
@@ -188,16 +193,41 @@
     name: "libupdate_engine_boot_control_exports",
     defaults: ["update_metadata-protos_exports"],
 
-    static_libs: ["update_metadata-protos"],
+    static_libs: [
+        "libcutils",
+        "libfs_mgr_binder",
+        "libgsi",
+        "libpayload_consumer",
+        "libsnapshot",
+        "update_metadata-protos",
+    ],
     shared_libs: [
         "libbootloader_message",
-        "libfs_mgr",
-        "libhwbinder",
         "libhidlbase",
         "liblp",
+        "libstatslog",
         "libutils",
         "android.hardware.boot@1.0",
+        "android.hardware.boot@1.1",
     ],
+    header_libs: [
+        "avb_headers",
+    ],
+    target: {
+        recovery: {
+            static_libs: [
+                "libfs_mgr",
+                "libsnapshot_nobinder",
+            ],
+            exclude_static_libs: [
+                "libfs_mgr_binder",
+                "libsnapshot",
+            ],
+            exclude_shared_libs: [
+                "libstatslog",
+            ],
+        },
+    },
 }
 
 cc_library_static {
@@ -205,12 +235,15 @@
     defaults: [
         "ue_defaults",
         "libupdate_engine_boot_control_exports",
+        "libpayload_consumer_exports",
     ],
     recovery_available: true,
 
     srcs: [
         "boot_control_android.cc",
+        "cleanup_previous_update_action.cc",
         "dynamic_partition_control_android.cc",
+        "dynamic_partition_utils.cc",
     ],
 }
 
@@ -227,6 +260,7 @@
     ],
 
     static_libs: [
+        "libkver",
         "libpayload_consumer",
         "libupdate_engine_boot_control",
     ],
@@ -239,9 +273,10 @@
         "libbrillo-binder",
         "libcurl",
         "libcutils",
+        "libupdate_engine_stable-cpp",
         "liblog",
-        "libmetricslogger",
         "libssl",
+        "libstatslog",
         "libutils",
     ],
 }
@@ -265,12 +300,13 @@
     srcs: [
         ":libupdate_engine_aidl",
         "binder_service_android.cc",
+        "binder_service_stable_android.cc",
         "certificate_checker.cc",
         "daemon_android.cc",
         "daemon_state_android.cc",
         "hardware_android.cc",
         "libcurl_http_fetcher.cc",
-        "metrics_reporter_android.cc",
+        "logging_android.cc",
         "metrics_utils.cc",
         "network_selector_android.cc",
         "update_attempter_android.cc",
@@ -290,9 +326,12 @@
     ],
 
     static_libs: ["libupdate_engine_android"],
-    required: ["cacerts_google"],
+    required: [
+        "cacerts_google",
+        "otacerts",
+    ],
 
-    srcs: ["main.cc"],
+    srcs: ["main.cc", "metrics_reporter_android.cc"],
     init_rc: ["update_engine.rc"],
 }
 
@@ -318,6 +357,7 @@
 
     srcs: [
         "hardware_android.cc",
+        "logging_android.cc",
         "metrics_reporter_stub.cc",
         "metrics_utils.cc",
         "network_selector_stub.cc",
@@ -350,12 +390,12 @@
         "libbrillo-stream",
         "libbrillo",
         "libchrome",
+        "libkver",
     ],
     target: {
         recovery: {
             exclude_shared_libs: [
                 "libprotobuf-cpp-lite",
-                "libhwbinder",
                 "libbrillo-stream",
                 "libbrillo",
                 "libchrome",
@@ -363,7 +403,9 @@
         },
     },
 
-    required: ["android.hardware.boot@1.0-impl-wrapper.recovery"],
+    required: [
+        "otacerts.recovery",
+    ],
 }
 
 // update_engine_client (type: executable)
@@ -407,6 +449,9 @@
         "update_metadata-protos_exports",
     ],
 
+    header_libs: [
+        "bootimg_headers",
+    ],
     static_libs: [
         "libavb",
         "libbrotli",
@@ -426,6 +471,20 @@
 }
 
 cc_library_static {
+    name: "libpayload_extent_ranges",
+    defaults: [
+        "ue_defaults",
+    ],
+    host_supported: true,
+    srcs: [
+        "payload_generator/extent_ranges.cc",
+    ],
+    static_libs: [
+        "update_metadata-protos",
+    ],
+}
+
+cc_library_static {
     name: "libpayload_generator",
     defaults: [
         "ue_defaults",
@@ -448,6 +507,7 @@
         "payload_generator/extent_utils.cc",
         "payload_generator/full_update_generator.cc",
         "payload_generator/mapfile_filesystem.cc",
+        "payload_generator/merge_sequence_generator.cc",
         "payload_generator/payload_file.cc",
         "payload_generator/payload_generation_config_android.cc",
         "payload_generator/payload_generation_config.cc",
@@ -496,8 +556,6 @@
 
     gtest: false,
     stem: "delta_generator",
-    relative_install_path: "update_engine_unittests",
-    no_named_install_directory: true,
 }
 
 // test_http_server (type: executable)
@@ -512,8 +570,6 @@
     ],
 
     gtest: false,
-    relative_install_path: "update_engine_unittests",
-    no_named_install_directory: true,
 }
 
 // test_subprocess (type: executable)
@@ -525,8 +581,6 @@
     srcs: ["test_subprocess.cc"],
 
     gtest: false,
-    relative_install_path: "update_engine_unittests",
-    no_named_install_directory: true,
 }
 
 // Public keys for unittests.
@@ -534,14 +588,20 @@
 genrule {
     name: "ue_unittest_keys",
     cmd: "openssl rsa -in $(location unittest_key.pem) -pubout -out $(location unittest_key.pub.pem) &&" +
-        "openssl rsa -in $(location unittest_key2.pem) -pubout -out $(location unittest_key2.pub.pem)",
+        "openssl rsa -in $(location unittest_key2.pem) -pubout -out $(location unittest_key2.pub.pem) &&" +
+        "openssl rsa -in $(location unittest_key_RSA4096.pem) -pubout -out $(location unittest_key_RSA4096.pub.pem) &&" +
+        "openssl pkey -in $(location unittest_key_EC.pem) -pubout -out $(location unittest_key_EC.pub.pem)",
     srcs: [
         "unittest_key.pem",
         "unittest_key2.pem",
+        "unittest_key_RSA4096.pem",
+        "unittest_key_EC.pem",
     ],
     out: [
         "unittest_key.pub.pem",
         "unittest_key2.pub.pem",
+        "unittest_key_RSA4096.pub.pem",
+        "unittest_key_EC.pub.pem",
     ],
 }
 
@@ -571,11 +631,6 @@
         "libpayload_generator_exports",
         "libupdate_engine_android_exports",
     ],
-    required: [
-        "test_http_server",
-        "test_subprocess",
-        "ue_unittest_delta_generator",
-    ],
 
     static_libs: [
         "libpayload_generator",
@@ -583,21 +638,33 @@
         "libgmock",
         "libchrome_test_helpers",
         "libupdate_engine_android",
+        "libdm",
     ],
-    shared_libs: [
-        "libhidltransport",
+
+    header_libs: [
+        "libstorage_literals_headers",
     ],
 
     data: [
+        ":test_http_server",
+        ":test_subprocess",
+        ":ue_unittest_delta_generator",
         ":ue_unittest_disk_imgs",
         ":ue_unittest_keys",
+        "otacerts.zip",
         "unittest_key.pem",
         "unittest_key2.pem",
+        "unittest_key_RSA4096.pem",
+        "unittest_key_EC.pem",
         "update_engine.conf",
     ],
 
+    // We cannot use the default generated AndroidTest.xml because of the use of helper modules
+    // (i.e. test_http_server, test_subprocess, ue_unittest_delta_generator).
+    test_config: "test_config.xml",
+    test_suites: ["device-tests"],
+
     srcs: [
-        "boot_control_android_unittest.cc",
         "certificate_checker_unittest.cc",
         "common/action_pipe_unittest.cc",
         "common/action_processor_unittest.cc",
@@ -615,17 +682,22 @@
         "common/terminator_unittest.cc",
         "common/test_utils.cc",
         "common/utils_unittest.cc",
+        "dynamic_partition_control_android_unittest.cc",
         "libcurl_http_fetcher_unittest.cc",
+        "hardware_android_unittest.cc",
         "payload_consumer/bzip_extent_writer_unittest.cc",
         "payload_consumer/cached_file_descriptor_unittest.cc",
+        "payload_consumer/certificate_parser_android_unittest.cc",
         "payload_consumer/delta_performer_integration_test.cc",
         "payload_consumer/delta_performer_unittest.cc",
+        "payload_consumer/download_action_android_unittest.cc",
         "payload_consumer/extent_reader_unittest.cc",
         "payload_consumer/extent_writer_unittest.cc",
         "payload_consumer/fake_file_descriptor.cc",
         "payload_consumer/file_descriptor_utils_unittest.cc",
         "payload_consumer/file_writer_unittest.cc",
         "payload_consumer/filesystem_verifier_action_unittest.cc",
+        "payload_consumer/partition_update_generator_android_unittest.cc",
         "payload_consumer/postinstall_runner_action_unittest.cc",
         "payload_consumer/verity_writer_android_unittest.cc",
         "payload_consumer/xz_extent_writer_unittest.cc",
@@ -641,6 +713,7 @@
         "payload_generator/fake_filesystem.cc",
         "payload_generator/full_update_generator_unittest.cc",
         "payload_generator/mapfile_filesystem_unittest.cc",
+        "payload_generator/merge_sequence_generator_unittest.cc",
         "payload_generator/payload_file_unittest.cc",
         "payload_generator/payload_generation_config_android_unittest.cc",
         "payload_generator/payload_generation_config_unittest.cc",
@@ -651,17 +724,18 @@
         "testrunner.cc",
         "update_attempter_android_unittest.cc",
         "update_status_utils_unittest.cc",
+        "metrics_reporter_stub.cc",
     ],
 }
 
 // Brillo update payload generation script
 // ========================================================
-cc_prebuilt_binary {
+sh_binary {
     name: "brillo_update_payload",
     device_supported: false,
     host_supported: true,
 
-    srcs: ["scripts/brillo_update_payload"],
+    src: "scripts/brillo_update_payload",
     required: [
         "delta_generator",
         "shflags",
@@ -674,3 +748,13 @@
         },
     },
 }
+
+// update_engine header library
+cc_library_headers {
+    name: "libupdate_engine_headers",
+    export_include_dirs: ["."],
+    apex_available: [
+        "com.android.gki.*",
+    ],
+    host_supported: true,
+}
diff --git a/BUILD.gn b/BUILD.gn
index 43bc787..59aa004 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -44,6 +44,7 @@
       ":test_subprocess",
       ":update_engine-test_images",
       ":update_engine-testkeys",
+      ":update_engine-testkeys-ec",
       ":update_engine_test_libs",
       ":update_engine_unittests",
     ]
@@ -135,6 +136,7 @@
     "common/clock.cc",
     "common/constants.cc",
     "common/cpu_limiter.cc",
+    "common/dynamic_partition_control_stub.cc",
     "common/error_code_utils.cc",
     "common/hash_calculator.cc",
     "common/http_common.cc",
@@ -149,6 +151,7 @@
     "common/utils.cc",
     "payload_consumer/bzip_extent_writer.cc",
     "payload_consumer/cached_file_descriptor.cc",
+    "payload_consumer/certificate_parser_stub.cc",
     "payload_consumer/delta_performer.cc",
     "payload_consumer/download_action.cc",
     "payload_consumer/extent_reader.cc",
@@ -159,6 +162,7 @@
     "payload_consumer/filesystem_verifier_action.cc",
     "payload_consumer/install_plan.cc",
     "payload_consumer/mount_history.cc",
+    "payload_consumer/partition_update_generator_stub.cc",
     "payload_consumer/payload_constants.cc",
     "payload_consumer/payload_metadata.cc",
     "payload_consumer/payload_verifier.cc",
@@ -202,6 +206,7 @@
     "hardware_chromeos.cc",
     "image_properties_chromeos.cc",
     "libcurl_http_fetcher.cc",
+    "logging.cc",
     "metrics_reporter_omaha.cc",
     "metrics_utils.cc",
     "omaha_request_action.cc",
@@ -335,7 +340,7 @@
     "payload_generator/annotated_operation.cc",
     "payload_generator/blob_file_writer.cc",
     "payload_generator/block_mapping.cc",
-    "payload_generator/boot_img_filesystem.cc",
+    "payload_generator/boot_img_filesystem_stub.cc",
     "payload_generator/bzip.cc",
     "payload_generator/deflate_utils.cc",
     "payload_generator/delta_diff_generator.cc",
@@ -345,6 +350,7 @@
     "payload_generator/extent_utils.cc",
     "payload_generator/full_update_generator.cc",
     "payload_generator/mapfile_filesystem.cc",
+    "payload_generator/merge_sequence_generator.cc",
     "payload_generator/payload_file.cc",
     "payload_generator/payload_generation_config.cc",
     "payload_generator/payload_generation_config_chromeos.cc",
@@ -414,10 +420,20 @@
     openssl_pem_out_dir = "include/update_engine"
     sources = [
       "unittest_key.pem",
+      "unittest_key_RSA4096.pem",
       "unittest_key2.pem",
     ]
   }
 
+  genopenssl_key("update_engine-testkeys-ec") {
+    openssl_pem_in_dir = "."
+    openssl_pem_out_dir = "include/update_engine"
+    openssl_pem_algorithm = "ec"
+    sources = [
+      "unittest_key_EC.pem",
+    ]
+  }
+
   # Unpacks sample images used for testing.
   tar_bunzip2("update_engine-test_images") {
     image_out_dir = "."
@@ -496,7 +512,6 @@
       "payload_generator/ab_generator_unittest.cc",
       "payload_generator/blob_file_writer_unittest.cc",
       "payload_generator/block_mapping_unittest.cc",
-      "payload_generator/boot_img_filesystem_unittest.cc",
       "payload_generator/deflate_utils_unittest.cc",
       "payload_generator/delta_diff_utils_unittest.cc",
       "payload_generator/ext2_filesystem_unittest.cc",
@@ -504,6 +519,7 @@
       "payload_generator/extent_utils_unittest.cc",
       "payload_generator/full_update_generator_unittest.cc",
       "payload_generator/mapfile_filesystem_unittest.cc",
+      "payload_generator/merge_sequence_generator_unittest.cc",
       "payload_generator/payload_file_unittest.cc",
       "payload_generator/payload_generation_config_unittest.cc",
       "payload_generator/payload_properties_unittest.cc",
diff --git a/Doxyfile b/Doxyfile
new file mode 100644
index 0000000..db31f86
--- /dev/null
+++ b/Doxyfile
@@ -0,0 +1,9 @@
+CLANG_DATABASE_PATH=../../
+HAVE_DOT=YES
+CALL_GRAPH=YES
+CALLER_GRAPH=YES
+GENERATE_HTML=YES
+GENERATE_LATEX=NO
+INPUT=.
+RECURSIVE=YES
+
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..d97975c
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,3 @@
+third_party {
+  license_type: NOTICE
+}
diff --git a/OWNERS b/OWNERS
index 75fd9f1..938752f 100644
--- a/OWNERS
+++ b/OWNERS
@@ -2,7 +2,9 @@
 
 # Android et. al. maintainers:
 deymo@google.com
+elsk@google.com
 senj@google.com
+xunchang@google.com
 
 # Chromium OS maintainers:
 ahassani@google.com
diff --git a/binder_bindings/android/os/IUpdateEngine.aidl b/binder_bindings/android/os/IUpdateEngine.aidl
index c0e29f5..c9580da 100644
--- a/binder_bindings/android/os/IUpdateEngine.aidl
+++ b/binder_bindings/android/os/IUpdateEngine.aidl
@@ -17,6 +17,7 @@
 package android.os;
 
 import android.os.IUpdateEngineCallback;
+import android.os.ParcelFileDescriptor;
 
 /** @hide */
 interface IUpdateEngine {
@@ -26,6 +27,11 @@
                     in long payload_size,
                     in String[] headerKeyValuePairs);
   /** @hide */
+  void applyPayloadFd(in ParcelFileDescriptor pfd,
+                      in long payload_offset,
+                      in long payload_size,
+                      in String[] headerKeyValuePairs);
+  /** @hide */
   boolean bind(IUpdateEngineCallback callback);
   /** @hide */
   boolean unbind(IUpdateEngineCallback callback);
@@ -39,4 +45,30 @@
   void resetStatus();
   /** @hide */
   boolean verifyPayloadApplicable(in String metadataFilename);
+  /**
+   * Allocate space on userdata partition.
+   *
+   * @return 0 indicates allocation is successful.
+   *   Non-zero indicates space is insufficient. The returned value is the
+   *   total required space (in bytes) on userdata partition.
+   *
+   * @throws ServiceSpecificException for other errors.
+   *
+   * @hide
+   */
+  long allocateSpaceForPayload(in String metadataFilename,
+                               in String[] headerKeyValuePairs);
+  /** @hide
+   *
+   * Wait for merge to finish, and clean up necessary files.
+   *
+   * @param callback Report status updates in callback (not the one previously
+   * bound with {@link #bind()}).
+   * {@link IUpdateEngineCallback#onStatusUpdate} is called with
+   * CLEANUP_PREVIOUS_UPDATE and a progress value during the cleanup.
+   * {@link IUpdateEngineCallback#onPayloadApplicationComplete} is called at
+   * the end with SUCCESS if successful. ERROR if transient errors (e.g. merged
+   * but needs reboot). DEVICE_CORRUPTED for permanent errors.
+   */
+  void cleanupSuccessfulUpdate(IUpdateEngineCallback callback);
 }
diff --git a/binder_bindings/android/os/IUpdateEngineCallback.aidl b/binder_bindings/android/os/IUpdateEngineCallback.aidl
index ee15c8b..4bacf9a 100644
--- a/binder_bindings/android/os/IUpdateEngineCallback.aidl
+++ b/binder_bindings/android/os/IUpdateEngineCallback.aidl
@@ -19,6 +19,7 @@
 /** @hide */
 oneway interface IUpdateEngineCallback {
   /** @hide */
+  @UnsupportedAppUsage
   void onStatusUpdate(int status_code, float percentage);
   /** @hide */
   void onPayloadApplicationComplete(int error_code);
diff --git a/binder_service_android.cc b/binder_service_android.cc
index 137694a..0c8bc2f 100644
--- a/binder_service_android.cc
+++ b/binder_service_android.cc
@@ -16,23 +16,23 @@
 
 #include "update_engine/binder_service_android.h"
 
+#include <memory>
+
 #include <base/bind.h>
 #include <base/logging.h>
 #include <binderwrapper/binder_wrapper.h>
 #include <brillo/errors/error.h>
 #include <utils/String8.h>
 
+#include "update_engine/binder_service_android_common.h"
+
 using android::binder::Status;
 using android::os::IUpdateEngineCallback;
+using android::os::ParcelFileDescriptor;
+using std::string;
+using std::vector;
 using update_engine::UpdateEngineStatus;
 
-namespace {
-Status ErrorPtrToStatus(const brillo::ErrorPtr& error) {
-  return Status::fromServiceSpecificError(
-      1, android::String8{error->GetMessage().c_str()});
-}
-}  // namespace
-
 namespace chromeos_update_engine {
 
 BinderUpdateEngineAndroidService::BinderUpdateEngineAndroidService(
@@ -57,6 +57,20 @@
 
 Status BinderUpdateEngineAndroidService::bind(
     const android::sp<IUpdateEngineCallback>& callback, bool* return_value) {
+  // Send an status update on connection (except when no update sent so far).
+  // Even though the status update is oneway, it still returns an erroneous
+  // status in case of a selinux denial. We should at least check this status
+  // and fails the binding.
+  if (last_status_ != -1) {
+    auto status = callback->onStatusUpdate(last_status_, last_progress_);
+    if (!status.isOk()) {
+      LOG(ERROR) << "Failed to call onStatusUpdate() from callback: "
+                 << status.toString8();
+      *return_value = false;
+      return Status::ok();
+    }
+  }
+
   callbacks_.emplace_back(callback);
 
   const android::sp<IBinder>& callback_binder =
@@ -69,12 +83,6 @@
           base::Unretained(this),
           base::Unretained(callback_binder.get())));
 
-  // Send an status update on connection (except when no update sent so far),
-  // since the status update is oneway and we don't need to wait for the
-  // response.
-  if (last_status_ != -1)
-    callback->onStatusUpdate(last_status_, last_progress_);
-
   *return_value = true;
   return Status::ok();
 }
@@ -94,13 +102,9 @@
     const android::String16& url,
     int64_t payload_offset,
     int64_t payload_size,
-    const std::vector<android::String16>& header_kv_pairs) {
-  const std::string payload_url{android::String8{url}.string()};
-  std::vector<std::string> str_headers;
-  str_headers.reserve(header_kv_pairs.size());
-  for (const auto& header : header_kv_pairs) {
-    str_headers.emplace_back(android::String8{header}.string());
-  }
+    const vector<android::String16>& header_kv_pairs) {
+  const string payload_url{android::String8{url}.string()};
+  vector<string> str_headers = ToVecString(header_kv_pairs);
 
   brillo::ErrorPtr error;
   if (!service_delegate_->ApplyPayload(
@@ -110,6 +114,21 @@
   return Status::ok();
 }
 
+Status BinderUpdateEngineAndroidService::applyPayloadFd(
+    const ParcelFileDescriptor& pfd,
+    int64_t payload_offset,
+    int64_t payload_size,
+    const vector<android::String16>& header_kv_pairs) {
+  vector<string> str_headers = ToVecString(header_kv_pairs);
+
+  brillo::ErrorPtr error;
+  if (!service_delegate_->ApplyPayload(
+          pfd.get(), payload_offset, payload_size, str_headers, &error)) {
+    return ErrorPtrToStatus(error);
+  }
+  return Status::ok();
+}
+
 Status BinderUpdateEngineAndroidService::suspend() {
   brillo::ErrorPtr error;
   if (!service_delegate_->SuspendUpdate(&error))
@@ -167,4 +186,58 @@
   return true;
 }
 
+Status BinderUpdateEngineAndroidService::allocateSpaceForPayload(
+    const android::String16& metadata_filename,
+    const vector<android::String16>& header_kv_pairs,
+    int64_t* return_value) {
+  const std::string payload_metadata{
+      android::String8{metadata_filename}.string()};
+  vector<string> str_headers = ToVecString(header_kv_pairs);
+  LOG(INFO) << "Received a request of allocating space for " << payload_metadata
+            << ".";
+  brillo::ErrorPtr error;
+  *return_value =
+      static_cast<int64_t>(service_delegate_->AllocateSpaceForPayload(
+          payload_metadata, str_headers, &error));
+  if (error != nullptr)
+    return ErrorPtrToStatus(error);
+  return Status::ok();
+}
+
+class CleanupSuccessfulUpdateCallback
+    : public CleanupSuccessfulUpdateCallbackInterface {
+ public:
+  CleanupSuccessfulUpdateCallback(
+      const android::sp<IUpdateEngineCallback>& callback)
+      : callback_(callback) {}
+  void OnCleanupComplete(int32_t error_code) {
+    ignore_result(callback_->onPayloadApplicationComplete(error_code));
+  }
+  void OnCleanupProgressUpdate(double progress) {
+    ignore_result(callback_->onStatusUpdate(
+        static_cast<int32_t>(
+            update_engine::UpdateStatus::CLEANUP_PREVIOUS_UPDATE),
+        progress));
+  }
+  void RegisterForDeathNotifications(base::Closure unbind) {
+    const android::sp<android::IBinder>& callback_binder =
+        IUpdateEngineCallback::asBinder(callback_);
+    auto binder_wrapper = android::BinderWrapper::Get();
+    binder_wrapper->RegisterForDeathNotifications(callback_binder, unbind);
+  }
+
+ private:
+  android::sp<IUpdateEngineCallback> callback_;
+};
+
+Status BinderUpdateEngineAndroidService::cleanupSuccessfulUpdate(
+    const android::sp<IUpdateEngineCallback>& callback) {
+  brillo::ErrorPtr error;
+  service_delegate_->CleanupSuccessfulUpdate(
+      std::make_unique<CleanupSuccessfulUpdateCallback>(callback), &error);
+  if (error != nullptr)
+    return ErrorPtrToStatus(error);
+  return Status::ok();
+}
+
 }  // namespace chromeos_update_engine
diff --git a/binder_service_android.h b/binder_service_android.h
index d8c4e9c..5f28225 100644
--- a/binder_service_android.h
+++ b/binder_service_android.h
@@ -53,6 +53,11 @@
       int64_t payload_offset,
       int64_t payload_size,
       const std::vector<android::String16>& header_kv_pairs) override;
+  android::binder::Status applyPayloadFd(
+      const ::android::os::ParcelFileDescriptor& pfd,
+      int64_t payload_offset,
+      int64_t payload_size,
+      const std::vector<android::String16>& header_kv_pairs) override;
   android::binder::Status bind(
       const android::sp<android::os::IUpdateEngineCallback>& callback,
       bool* return_value) override;
@@ -65,6 +70,12 @@
   android::binder::Status resetStatus() override;
   android::binder::Status verifyPayloadApplicable(
       const android::String16& metadata_filename, bool* return_value) override;
+  android::binder::Status allocateSpaceForPayload(
+      const android::String16& metadata_filename,
+      const std::vector<android::String16>& header_kv_pairs,
+      int64_t* return_value) override;
+  android::binder::Status cleanupSuccessfulUpdate(
+      const android::sp<android::os::IUpdateEngineCallback>& callback) override;
 
  private:
   // Remove the passed |callback| from the list of registered callbacks. Called
diff --git a/binder_service_android_common.h b/binder_service_android_common.h
new file mode 100644
index 0000000..fc621d9
--- /dev/null
+++ b/binder_service_android_common.h
@@ -0,0 +1,45 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_BINDER_SERVICE_ANDROID_COMMON_H_
+#define UPDATE_ENGINE_BINDER_SERVICE_ANDROID_COMMON_H_
+
+#include <string>
+#include <vector>
+
+#include <binder/Status.h>
+
+namespace chromeos_update_engine {
+
+static inline android::binder::Status ErrorPtrToStatus(
+    const brillo::ErrorPtr& error) {
+  return android::binder::Status::fromServiceSpecificError(
+      1, android::String8{error->GetMessage().c_str()});
+}
+
+static inline std::vector<std::string> ToVecString(
+    const std::vector<android::String16>& inp) {
+  std::vector<std::string> out;
+  out.reserve(inp.size());
+  for (const auto& e : inp) {
+    out.emplace_back(android::String8{e}.string());
+  }
+  return out;
+}
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_BINDER_SERVICE_ANDROID_COMMON_H_
diff --git a/binder_service_stable_android.cc b/binder_service_stable_android.cc
new file mode 100644
index 0000000..a12b349
--- /dev/null
+++ b/binder_service_stable_android.cc
@@ -0,0 +1,132 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/binder_service_stable_android.h"
+
+#include <memory>
+
+#include <base/bind.h>
+#include <base/logging.h>
+#include <binderwrapper/binder_wrapper.h>
+#include <brillo/errors/error.h>
+#include <utils/String8.h>
+
+#include "update_engine/binder_service_android_common.h"
+
+using android::binder::Status;
+using android::os::IUpdateEngineStableCallback;
+using android::os::ParcelFileDescriptor;
+using std::string;
+using std::vector;
+using update_engine::UpdateEngineStatus;
+
+namespace chromeos_update_engine {
+
+BinderUpdateEngineAndroidStableService::BinderUpdateEngineAndroidStableService(
+    ServiceDelegateAndroidInterface* service_delegate)
+    : service_delegate_(service_delegate) {}
+
+void BinderUpdateEngineAndroidStableService::SendStatusUpdate(
+    const UpdateEngineStatus& update_engine_status) {
+  last_status_ = static_cast<int>(update_engine_status.status);
+  last_progress_ = update_engine_status.progress;
+  if (callback_) {
+    callback_->onStatusUpdate(last_status_, last_progress_);
+  }
+}
+
+void BinderUpdateEngineAndroidStableService::SendPayloadApplicationComplete(
+    ErrorCode error_code) {
+  if (callback_) {
+    callback_->onPayloadApplicationComplete(static_cast<int>(error_code));
+  }
+}
+
+Status BinderUpdateEngineAndroidStableService::bind(
+    const android::sp<IUpdateEngineStableCallback>& callback,
+    bool* return_value) {
+  // Reject binding if another callback is already bound.
+  if (callback_ != nullptr) {
+    LOG(ERROR) << "Another callback is already bound. Can't bind new callback.";
+    *return_value = false;
+    return Status::ok();
+  }
+
+  // See BinderUpdateEngineAndroidService::bind.
+  if (last_status_ != -1) {
+    auto status = callback->onStatusUpdate(last_status_, last_progress_);
+    if (!status.isOk()) {
+      LOG(ERROR) << "Failed to call onStatusUpdate() from callback: "
+                 << status.toString8();
+      *return_value = false;
+      return Status::ok();
+    }
+  }
+
+  callback_ = callback;
+
+  const android::sp<IBinder>& callback_binder =
+      IUpdateEngineStableCallback::asBinder(callback);
+  auto binder_wrapper = android::BinderWrapper::Get();
+  binder_wrapper->RegisterForDeathNotifications(
+      callback_binder,
+      base::Bind(base::IgnoreResult(
+                     &BinderUpdateEngineAndroidStableService::UnbindCallback),
+                 base::Unretained(this),
+                 base::Unretained(callback_binder.get())));
+
+  *return_value = true;
+  return Status::ok();
+}
+
+Status BinderUpdateEngineAndroidStableService::unbind(
+    const android::sp<IUpdateEngineStableCallback>& callback,
+    bool* return_value) {
+  const android::sp<IBinder>& callback_binder =
+      IUpdateEngineStableCallback::asBinder(callback);
+  auto binder_wrapper = android::BinderWrapper::Get();
+  binder_wrapper->UnregisterForDeathNotifications(callback_binder);
+
+  *return_value = UnbindCallback(callback_binder.get());
+  return Status::ok();
+}
+
+Status BinderUpdateEngineAndroidStableService::applyPayloadFd(
+    const ParcelFileDescriptor& pfd,
+    int64_t payload_offset,
+    int64_t payload_size,
+    const vector<android::String16>& header_kv_pairs) {
+  vector<string> str_headers = ToVecString(header_kv_pairs);
+
+  brillo::ErrorPtr error;
+  if (!service_delegate_->ApplyPayload(
+          pfd.get(), payload_offset, payload_size, str_headers, &error)) {
+    return ErrorPtrToStatus(error);
+  }
+  return Status::ok();
+}
+
+bool BinderUpdateEngineAndroidStableService::UnbindCallback(
+    const IBinder* callback) {
+  if (IUpdateEngineStableCallback::asBinder(callback_).get() != callback) {
+    LOG(ERROR) << "Unable to unbind unknown callback.";
+    return false;
+  }
+  callback_ = nullptr;
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/binder_service_stable_android.h b/binder_service_stable_android.h
new file mode 100644
index 0000000..1667798
--- /dev/null
+++ b/binder_service_stable_android.h
@@ -0,0 +1,85 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_BINDER_SERVICE_STABLE_ANDROID_H_
+#define UPDATE_ENGINE_BINDER_SERVICE_STABLE_ANDROID_H_
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include <utils/Errors.h>
+#include <utils/String16.h>
+#include <utils/StrongPointer.h>
+
+#include "android/os/BnUpdateEngineStable.h"
+#include "android/os/IUpdateEngineStableCallback.h"
+#include "update_engine/service_delegate_android_interface.h"
+#include "update_engine/service_observer_interface.h"
+
+namespace chromeos_update_engine {
+
+class BinderUpdateEngineAndroidStableService
+    : public android::os::BnUpdateEngineStable,
+      public ServiceObserverInterface {
+ public:
+  explicit BinderUpdateEngineAndroidStableService(
+      ServiceDelegateAndroidInterface* service_delegate);
+  ~BinderUpdateEngineAndroidStableService() override = default;
+
+  const char* ServiceName() const {
+    return "android.os.UpdateEngineStableService";
+  }
+
+  // ServiceObserverInterface overrides.
+  void SendStatusUpdate(
+      const update_engine::UpdateEngineStatus& update_engine_status) override;
+  void SendPayloadApplicationComplete(ErrorCode error_code) override;
+
+  // android::os::BnUpdateEngineStable overrides.
+  android::binder::Status applyPayloadFd(
+      const ::android::os::ParcelFileDescriptor& pfd,
+      int64_t payload_offset,
+      int64_t payload_size,
+      const std::vector<android::String16>& header_kv_pairs) override;
+  android::binder::Status bind(
+      const android::sp<android::os::IUpdateEngineStableCallback>& callback,
+      bool* return_value) override;
+  android::binder::Status unbind(
+      const android::sp<android::os::IUpdateEngineStableCallback>& callback,
+      bool* return_value) override;
+
+ private:
+  // Remove the passed |callback| from the list of registered callbacks. Called
+  // on unbind() or whenever the callback object is destroyed.
+  // Returns true on success.
+  bool UnbindCallback(const IBinder* callback);
+
+  // Bound callback. The stable interface only supports one callback at a time.
+  android::sp<android::os::IUpdateEngineStableCallback> callback_;
+
+  // Cached copy of the last status update sent. Used to send an initial
+  // notification when bind() is called from the client.
+  int last_status_{-1};
+  double last_progress_{0.0};
+
+  ServiceDelegateAndroidInterface* service_delegate_;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_BINDER_SERVICE_STABLE_ANDROID_H_
diff --git a/boot_control_android.cc b/boot_control_android.cc
index 8909cd9..dee5fa8 100644
--- a/boot_control_android.cc
+++ b/boot_control_android.cc
@@ -22,11 +22,8 @@
 
 #include <base/bind.h>
 #include <base/logging.h>
-#include <base/strings/string_util.h>
 #include <bootloader_message/bootloader_message.h>
 #include <brillo/message_loops/message_loop.h>
-#include <fs_mgr.h>
-#include <fs_mgr_overlayfs.h>
 
 #include "update_engine/common/utils.h"
 #include "update_engine/dynamic_partition_control_android.h"
@@ -34,15 +31,12 @@
 using std::string;
 
 using android::dm::DmDeviceState;
-using android::fs_mgr::Partition;
 using android::hardware::hidl_string;
 using android::hardware::Return;
 using android::hardware::boot::V1_0::BoolResult;
 using android::hardware::boot::V1_0::CommandResult;
 using android::hardware::boot::V1_0::IBootControl;
 using Slot = chromeos_update_engine::BootControlInterface::Slot;
-using PartitionMetadata =
-    chromeos_update_engine::BootControlInterface::PartitionMetadata;
 
 namespace {
 
@@ -80,10 +74,6 @@
   return true;
 }
 
-void BootControlAndroid::Cleanup() {
-  dynamic_control_->Cleanup();
-}
-
 unsigned int BootControlAndroid::GetNumSlots() const {
   return module_->getNumberSlots();
 }
@@ -92,140 +82,24 @@
   return module_->getCurrentSlot();
 }
 
-bool BootControlAndroid::GetSuffix(Slot slot, string* suffix) const {
-  auto store_suffix_cb = [&suffix](hidl_string cb_suffix) {
-    *suffix = cb_suffix.c_str();
-  };
-  Return<void> ret = module_->getSuffix(slot, store_suffix_cb);
-
-  if (!ret.isOk()) {
-    LOG(ERROR) << "boot_control impl returned no suffix for slot "
-               << SlotName(slot);
-    return false;
-  }
-  return true;
-}
-
-bool BootControlAndroid::IsSuperBlockDevice(
-    const base::FilePath& device_dir,
-    Slot slot,
-    const string& partition_name_suffix) const {
-  string source_device =
-      device_dir.Append(fs_mgr_get_super_partition_name(slot)).value();
-  auto source_metadata = dynamic_control_->LoadMetadataBuilder(
-      source_device, slot, BootControlInterface::kInvalidSlot);
-  return source_metadata->HasBlockDevice(partition_name_suffix);
-}
-
-BootControlAndroid::DynamicPartitionDeviceStatus
-BootControlAndroid::GetDynamicPartitionDevice(
-    const base::FilePath& device_dir,
-    const string& partition_name_suffix,
-    Slot slot,
-    string* device) const {
-  string super_device =
-      device_dir.Append(fs_mgr_get_super_partition_name(slot)).value();
-
-  auto builder = dynamic_control_->LoadMetadataBuilder(
-      super_device, slot, BootControlInterface::kInvalidSlot);
-
-  if (builder == nullptr) {
-    LOG(ERROR) << "No metadata in slot "
-               << BootControlInterface::SlotName(slot);
-    return DynamicPartitionDeviceStatus::ERROR;
-  }
-
-  if (builder->FindPartition(partition_name_suffix) == nullptr) {
-    LOG(INFO) << partition_name_suffix
-              << " is not in super partition metadata.";
-
-    Slot current_slot = GetCurrentSlot();
-    if (IsSuperBlockDevice(device_dir, current_slot, partition_name_suffix)) {
-      LOG(ERROR) << "The static partition " << partition_name_suffix
-                 << " is a block device for current metadata ("
-                 << fs_mgr_get_super_partition_name(current_slot) << ", slot "
-                 << BootControlInterface::SlotName(current_slot)
-                 << "). It cannot be used as a logical partition.";
-      return DynamicPartitionDeviceStatus::ERROR;
-    }
-
-    return DynamicPartitionDeviceStatus::TRY_STATIC;
-  }
-
-  DmDeviceState state = dynamic_control_->GetState(partition_name_suffix);
-
-  // Device is mapped in the previous GetPartitionDevice() call. Just return
-  // the path.
-  if (state == DmDeviceState::ACTIVE) {
-    if (dynamic_control_->GetDmDevicePathByName(partition_name_suffix,
-                                                device)) {
-      LOG(INFO) << partition_name_suffix
-                << " is mapped on device mapper: " << *device;
-      return DynamicPartitionDeviceStatus::SUCCESS;
-    }
-    LOG(ERROR) << partition_name_suffix << " is mapped but path is unknown.";
-    return DynamicPartitionDeviceStatus::ERROR;
-  }
-
-  if (state == DmDeviceState::INVALID) {
-    bool force_writable = slot != GetCurrentSlot();
-    if (dynamic_control_->MapPartitionOnDeviceMapper(super_device,
-                                                     partition_name_suffix,
-                                                     slot,
-                                                     force_writable,
-                                                     device)) {
-      return DynamicPartitionDeviceStatus::SUCCESS;
-    }
-    return DynamicPartitionDeviceStatus::ERROR;
-  }
-
-  LOG(ERROR) << partition_name_suffix
-             << " is mapped on device mapper but state is unknown: "
-             << static_cast<std::underlying_type_t<DmDeviceState>>(state);
-  return DynamicPartitionDeviceStatus::ERROR;
+bool BootControlAndroid::GetPartitionDevice(const std::string& partition_name,
+                                            BootControlInterface::Slot slot,
+                                            bool not_in_payload,
+                                            std::string* device,
+                                            bool* is_dynamic) const {
+  return dynamic_control_->GetPartitionDevice(partition_name,
+                                              slot,
+                                              GetCurrentSlot(),
+                                              not_in_payload,
+                                              device,
+                                              is_dynamic);
 }
 
 bool BootControlAndroid::GetPartitionDevice(const string& partition_name,
-                                            Slot slot,
+                                            BootControlInterface::Slot slot,
                                             string* device) const {
-  string suffix;
-  if (!GetSuffix(slot, &suffix)) {
-    return false;
-  }
-  const string partition_name_suffix = partition_name + suffix;
-
-  string device_dir_str;
-  if (!dynamic_control_->GetDeviceDir(&device_dir_str)) {
-    return false;
-  }
-  base::FilePath device_dir(device_dir_str);
-
-  // When looking up target partition devices, treat them as static if the
-  // current payload doesn't encode them as dynamic partitions. This may happen
-  // when applying a retrofit update on top of a dynamic-partitions-enabled
-  // build.
-  if (dynamic_control_->IsDynamicPartitionsEnabled() &&
-      (slot == GetCurrentSlot() || is_target_dynamic_)) {
-    switch (GetDynamicPartitionDevice(
-        device_dir, partition_name_suffix, slot, device)) {
-      case DynamicPartitionDeviceStatus::SUCCESS:
-        return true;
-      case DynamicPartitionDeviceStatus::TRY_STATIC:
-        break;
-      case DynamicPartitionDeviceStatus::ERROR:  // fallthrough
-      default:
-        return false;
-    }
-  }
-
-  base::FilePath path = device_dir.Append(partition_name_suffix);
-  if (!dynamic_control_->DeviceExists(path.value())) {
-    LOG(ERROR) << "Device file " << path.value() << " does not exist.";
-    return false;
-  }
-
-  *device = path.value();
-  return true;
+  return GetPartitionDevice(
+      partition_name, slot, false /* not_in_payload */, device, nullptr);
 }
 
 bool BootControlAndroid::IsSlotBootable(Slot slot) const {
@@ -288,160 +162,25 @@
          brillo::MessageLoop::kTaskIdNull;
 }
 
-namespace {
-
-bool UpdatePartitionMetadata(DynamicPartitionControlInterface* dynamic_control,
-                             Slot source_slot,
-                             Slot target_slot,
-                             const string& target_suffix,
-                             const PartitionMetadata& partition_metadata) {
-  string device_dir_str;
-  if (!dynamic_control->GetDeviceDir(&device_dir_str)) {
+bool BootControlAndroid::IsSlotMarkedSuccessful(
+    BootControlInterface::Slot slot) const {
+  Return<BoolResult> ret = module_->isSlotMarkedSuccessful(slot);
+  CommandResult result;
+  if (!ret.isOk()) {
+    LOG(ERROR) << "Unable to determine if slot " << SlotName(slot)
+               << " is marked successful: " << ret.description();
     return false;
   }
-  base::FilePath device_dir(device_dir_str);
-  auto source_device =
-      device_dir.Append(fs_mgr_get_super_partition_name(source_slot)).value();
-
-  auto builder = dynamic_control->LoadMetadataBuilder(
-      source_device, source_slot, target_slot);
-  if (builder == nullptr) {
-    // TODO(elsk): allow reconstructing metadata from partition_metadata
-    // in recovery sideload.
-    LOG(ERROR) << "No metadata at "
-               << BootControlInterface::SlotName(source_slot);
+  if (ret == BoolResult::INVALID_SLOT) {
+    LOG(ERROR) << "Invalid slot: " << SlotName(slot);
     return false;
   }
-
-  std::vector<string> groups = builder->ListGroups();
-  for (const auto& group_name : groups) {
-    if (base::EndsWith(
-            group_name, target_suffix, base::CompareCase::SENSITIVE)) {
-      LOG(INFO) << "Removing group " << group_name;
-      builder->RemoveGroupAndPartitions(group_name);
-    }
-  }
-
-  uint64_t total_size = 0;
-  for (const auto& group : partition_metadata.groups) {
-    total_size += group.size;
-  }
-
-  string expr;
-  uint64_t allocatable_space = builder->AllocatableSpace();
-  if (!dynamic_control->IsDynamicPartitionsRetrofit()) {
-    allocatable_space /= 2;
-    expr = "half of ";
-  }
-  if (total_size > allocatable_space) {
-    LOG(ERROR) << "The maximum size of all groups with suffix " << target_suffix
-               << " (" << total_size << ") has exceeded " << expr
-               << " allocatable space for dynamic partitions "
-               << allocatable_space << ".";
-    return false;
-  }
-
-  for (const auto& group : partition_metadata.groups) {
-    auto group_name_suffix = group.name + target_suffix;
-    if (!builder->AddGroup(group_name_suffix, group.size)) {
-      LOG(ERROR) << "Cannot add group " << group_name_suffix << " with size "
-                 << group.size;
-      return false;
-    }
-    LOG(INFO) << "Added group " << group_name_suffix << " with size "
-              << group.size;
-
-    for (const auto& partition : group.partitions) {
-      auto partition_name_suffix = partition.name + target_suffix;
-      Partition* p = builder->AddPartition(
-          partition_name_suffix, group_name_suffix, LP_PARTITION_ATTR_READONLY);
-      if (!p) {
-        LOG(ERROR) << "Cannot add partition " << partition_name_suffix
-                   << " to group " << group_name_suffix;
-        return false;
-      }
-      if (!builder->ResizePartition(p, partition.size)) {
-        LOG(ERROR) << "Cannot resize partition " << partition_name_suffix
-                   << " to size " << partition.size << ". Not enough space?";
-        return false;
-      }
-      LOG(INFO) << "Added partition " << partition_name_suffix << " to group "
-                << group_name_suffix << " with size " << partition.size;
-    }
-  }
-
-  auto target_device =
-      device_dir.Append(fs_mgr_get_super_partition_name(target_slot)).value();
-  return dynamic_control->StoreMetadata(
-      target_device, builder.get(), target_slot);
+  return ret == BoolResult::TRUE;
 }
 
-bool UnmapTargetPartitions(DynamicPartitionControlInterface* dynamic_control,
-                           const string& target_suffix,
-                           const PartitionMetadata& partition_metadata) {
-  for (const auto& group : partition_metadata.groups) {
-    for (const auto& partition : group.partitions) {
-      if (!dynamic_control->UnmapPartitionOnDeviceMapper(
-              partition.name + target_suffix, true /* wait */)) {
-        return false;
-      }
-    }
-  }
-  return true;
-}
-
-}  // namespace
-
-bool BootControlAndroid::InitPartitionMetadata(
-    Slot target_slot,
-    const PartitionMetadata& partition_metadata,
-    bool update_metadata) {
-  if (fs_mgr_overlayfs_is_setup()) {
-    // Non DAP devices can use overlayfs as well.
-    LOG(WARNING)
-        << "overlayfs overrides are active and can interfere with our "
-           "resources.\n"
-        << "run adb enable-verity to deactivate if required and try again.";
-  }
-  if (!dynamic_control_->IsDynamicPartitionsEnabled()) {
-    return true;
-  }
-
-  auto source_slot = GetCurrentSlot();
-  if (target_slot == source_slot) {
-    LOG(ERROR) << "Cannot call InitPartitionMetadata on current slot.";
-    return false;
-  }
-
-  // Although the current build supports dynamic partitions, the given payload
-  // doesn't use it for target partitions. This could happen when applying a
-  // retrofit update. Skip updating the partition metadata for the target slot.
-  is_target_dynamic_ = !partition_metadata.groups.empty();
-  if (!is_target_dynamic_) {
-    return true;
-  }
-
-  if (!update_metadata) {
-    return true;
-  }
-
-  string target_suffix;
-  if (!GetSuffix(target_slot, &target_suffix)) {
-    return false;
-  }
-
-  // Unmap all the target dynamic partitions because they would become
-  // inconsistent with the new metadata.
-  if (!UnmapTargetPartitions(
-          dynamic_control_.get(), target_suffix, partition_metadata)) {
-    return false;
-  }
-
-  return UpdatePartitionMetadata(dynamic_control_.get(),
-                                 source_slot,
-                                 target_slot,
-                                 target_suffix,
-                                 partition_metadata);
+DynamicPartitionControlInterface*
+BootControlAndroid::GetDynamicPartitionControl() {
+  return dynamic_control_.get();
 }
 
 }  // namespace chromeos_update_engine
diff --git a/boot_control_android.h b/boot_control_android.h
index a6f33be..5009dbd 100644
--- a/boot_control_android.h
+++ b/boot_control_android.h
@@ -22,11 +22,11 @@
 #include <string>
 
 #include <android/hardware/boot/1.0/IBootControl.h>
-#include <base/files/file_util.h>
 #include <liblp/builder.h>
 
 #include "update_engine/common/boot_control.h"
-#include "update_engine/dynamic_partition_control_interface.h"
+#include "update_engine/common/dynamic_partition_control_interface.h"
+#include "update_engine/dynamic_partition_control_android.h"
 
 namespace chromeos_update_engine {
 
@@ -46,47 +46,25 @@
   BootControlInterface::Slot GetCurrentSlot() const override;
   bool GetPartitionDevice(const std::string& partition_name,
                           BootControlInterface::Slot slot,
+                          bool not_in_payload,
+                          std::string* device,
+                          bool* is_dynamic) const override;
+  bool GetPartitionDevice(const std::string& partition_name,
+                          BootControlInterface::Slot slot,
                           std::string* device) const override;
   bool IsSlotBootable(BootControlInterface::Slot slot) const override;
   bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
   bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
   bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
-  bool InitPartitionMetadata(Slot slot,
-                             const PartitionMetadata& partition_metadata,
-                             bool update_metadata) override;
-  void Cleanup() override;
+  bool IsSlotMarkedSuccessful(BootControlInterface::Slot slot) const override;
+  DynamicPartitionControlInterface* GetDynamicPartitionControl() override;
 
  private:
   ::android::sp<::android::hardware::boot::V1_0::IBootControl> module_;
-  std::unique_ptr<DynamicPartitionControlInterface> dynamic_control_;
+  std::unique_ptr<DynamicPartitionControlAndroid> dynamic_control_;
 
   friend class BootControlAndroidTest;
 
-  // Wrapper method of IBootControl::getSuffix().
-  bool GetSuffix(Slot slot, std::string* out) const;
-
-  enum class DynamicPartitionDeviceStatus {
-    SUCCESS,
-    ERROR,
-    TRY_STATIC,
-  };
-
-  DynamicPartitionDeviceStatus GetDynamicPartitionDevice(
-      const base::FilePath& device_dir,
-      const std::string& partition_name_suffix,
-      Slot slot,
-      std::string* device) const;
-
-  // Return true if |partition_name_suffix| is a block device of
-  // super partition metadata slot |slot|.
-  bool IsSuperBlockDevice(const base::FilePath& device_dir,
-                          Slot slot,
-                          const std::string& partition_name_suffix) const;
-
-  // Whether the target partitions should be loaded as dynamic partitions. Set
-  // by InitPartitionMetadata() per each update.
-  bool is_target_dynamic_{false};
-
   DISALLOW_COPY_AND_ASSIGN(BootControlAndroid);
 };
 
diff --git a/boot_control_android_unittest.cc b/boot_control_android_unittest.cc
deleted file mode 100644
index bb9903e..0000000
--- a/boot_control_android_unittest.cc
+++ /dev/null
@@ -1,853 +0,0 @@
-//
-// Copyright (C) 2018 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/boot_control_android.h"
-
-#include <set>
-#include <vector>
-
-#include <base/logging.h>
-#include <base/strings/string_util.h>
-#include <fs_mgr.h>
-#include <gmock/gmock.h>
-#include <gtest/gtest.h>
-#include <libdm/dm.h>
-
-#include "update_engine/mock_boot_control_hal.h"
-#include "update_engine/mock_dynamic_partition_control.h"
-
-using android::dm::DmDeviceState;
-using android::fs_mgr::MetadataBuilder;
-using android::hardware::Void;
-using std::string;
-using testing::_;
-using testing::AnyNumber;
-using testing::Contains;
-using testing::Eq;
-using testing::Invoke;
-using testing::Key;
-using testing::MakeMatcher;
-using testing::Matcher;
-using testing::MatcherInterface;
-using testing::MatchResultListener;
-using testing::NiceMock;
-using testing::Not;
-using testing::Return;
-
-namespace chromeos_update_engine {
-
-constexpr const uint32_t kMaxNumSlots = 2;
-constexpr const char* kSlotSuffixes[kMaxNumSlots] = {"_a", "_b"};
-constexpr const char* kFakeDevicePath = "/fake/dev/path/";
-constexpr const char* kFakeDmDevicePath = "/fake/dm/dev/path/";
-constexpr const uint32_t kFakeMetadataSize = 65536;
-constexpr const char* kDefaultGroup = "foo";
-
-// A map describing the size of each partition.
-// "{name, size}"
-using PartitionSizes = std::map<string, uint64_t>;
-
-// "{name_a, size}"
-using PartitionSuffixSizes = std::map<string, uint64_t>;
-
-using PartitionMetadata = BootControlInterface::PartitionMetadata;
-
-// C++ standards do not allow uint64_t (aka unsigned long) to be the parameter
-// of user-defined literal operators.
-constexpr unsigned long long operator"" _MiB(unsigned long long x) {  // NOLINT
-  return x << 20;
-}
-constexpr unsigned long long operator"" _GiB(unsigned long long x) {  // NOLINT
-  return x << 30;
-}
-
-constexpr uint64_t kDefaultGroupSize = 5_GiB;
-// Super device size. 1 MiB for metadata.
-constexpr uint64_t kDefaultSuperSize = kDefaultGroupSize * 2 + 1_MiB;
-
-template <typename U, typename V>
-std::ostream& operator<<(std::ostream& os, const std::map<U, V>& param) {
-  os << "{";
-  bool first = true;
-  for (const auto& pair : param) {
-    if (!first)
-      os << ", ";
-    os << pair.first << ":" << pair.second;
-    first = false;
-  }
-  return os << "}";
-}
-
-template <typename T>
-std::ostream& operator<<(std::ostream& os, const std::vector<T>& param) {
-  os << "[";
-  bool first = true;
-  for (const auto& e : param) {
-    if (!first)
-      os << ", ";
-    os << e;
-    first = false;
-  }
-  return os << "]";
-}
-
-std::ostream& operator<<(std::ostream& os,
-                         const PartitionMetadata::Partition& p) {
-  return os << "{" << p.name << ", " << p.size << "}";
-}
-
-std::ostream& operator<<(std::ostream& os, const PartitionMetadata::Group& g) {
-  return os << "{" << g.name << ", " << g.size << ", " << g.partitions << "}";
-}
-
-std::ostream& operator<<(std::ostream& os, const PartitionMetadata& m) {
-  return os << m.groups;
-}
-
-inline string GetDevice(const string& name) {
-  return kFakeDevicePath + name;
-}
-
-inline string GetDmDevice(const string& name) {
-  return kFakeDmDevicePath + name;
-}
-
-// TODO(elsk): fs_mgr_get_super_partition_name should be mocked.
-inline string GetSuperDevice(uint32_t slot) {
-  return GetDevice(fs_mgr_get_super_partition_name(slot));
-}
-
-struct TestParam {
-  uint32_t source;
-  uint32_t target;
-};
-std::ostream& operator<<(std::ostream& os, const TestParam& param) {
-  return os << "{source: " << param.source << ", target:" << param.target
-            << "}";
-}
-
-// To support legacy tests, auto-convert {name_a: size} map to
-// PartitionMetadata.
-PartitionMetadata partitionSuffixSizesToMetadata(
-    const PartitionSuffixSizes& partition_sizes) {
-  PartitionMetadata metadata;
-  for (const char* suffix : kSlotSuffixes) {
-    metadata.groups.push_back(
-        {string(kDefaultGroup) + suffix, kDefaultGroupSize, {}});
-  }
-  for (const auto& pair : partition_sizes) {
-    for (size_t suffix_idx = 0; suffix_idx < kMaxNumSlots; ++suffix_idx) {
-      if (base::EndsWith(pair.first,
-                         kSlotSuffixes[suffix_idx],
-                         base::CompareCase::SENSITIVE)) {
-        metadata.groups[suffix_idx].partitions.push_back(
-            {pair.first, pair.second});
-      }
-    }
-  }
-  return metadata;
-}
-
-// To support legacy tests, auto-convert {name: size} map to PartitionMetadata.
-PartitionMetadata partitionSizesToMetadata(
-    const PartitionSizes& partition_sizes) {
-  PartitionMetadata metadata;
-  metadata.groups.push_back({string{kDefaultGroup}, kDefaultGroupSize, {}});
-  for (const auto& pair : partition_sizes) {
-    metadata.groups[0].partitions.push_back({pair.first, pair.second});
-  }
-  return metadata;
-}
-
-std::unique_ptr<MetadataBuilder> NewFakeMetadata(
-    const PartitionMetadata& metadata) {
-  auto builder =
-      MetadataBuilder::New(kDefaultSuperSize, kFakeMetadataSize, kMaxNumSlots);
-  EXPECT_GE(builder->AllocatableSpace(), kDefaultGroupSize * 2);
-  EXPECT_NE(nullptr, builder);
-  if (builder == nullptr)
-    return nullptr;
-  for (const auto& group : metadata.groups) {
-    EXPECT_TRUE(builder->AddGroup(group.name, group.size));
-    for (const auto& partition : group.partitions) {
-      auto p = builder->AddPartition(partition.name, group.name, 0 /* attr */);
-      EXPECT_TRUE(p && builder->ResizePartition(p, partition.size));
-    }
-  }
-  return builder;
-}
-
-class MetadataMatcher : public MatcherInterface<MetadataBuilder*> {
- public:
-  explicit MetadataMatcher(const PartitionSuffixSizes& partition_sizes)
-      : partition_metadata_(partitionSuffixSizesToMetadata(partition_sizes)) {}
-  explicit MetadataMatcher(const PartitionMetadata& partition_metadata)
-      : partition_metadata_(partition_metadata) {}
-
-  bool MatchAndExplain(MetadataBuilder* metadata,
-                       MatchResultListener* listener) const override {
-    bool success = true;
-    for (const auto& group : partition_metadata_.groups) {
-      for (const auto& partition : group.partitions) {
-        auto p = metadata->FindPartition(partition.name);
-        if (p == nullptr) {
-          if (!success)
-            *listener << "; ";
-          *listener << "No partition " << partition.name;
-          success = false;
-          continue;
-        }
-        if (p->size() != partition.size) {
-          if (!success)
-            *listener << "; ";
-          *listener << "Partition " << partition.name << " has size "
-                    << p->size() << ", expected " << partition.size;
-          success = false;
-        }
-        if (p->group_name() != group.name) {
-          if (!success)
-            *listener << "; ";
-          *listener << "Partition " << partition.name << " has group "
-                    << p->group_name() << ", expected " << group.name;
-          success = false;
-        }
-      }
-    }
-    return success;
-  }
-
-  void DescribeTo(std::ostream* os) const override {
-    *os << "expect: " << partition_metadata_;
-  }
-
-  void DescribeNegationTo(std::ostream* os) const override {
-    *os << "expect not: " << partition_metadata_;
-  }
-
- private:
-  PartitionMetadata partition_metadata_;
-};
-
-inline Matcher<MetadataBuilder*> MetadataMatches(
-    const PartitionSuffixSizes& partition_sizes) {
-  return MakeMatcher(new MetadataMatcher(partition_sizes));
-}
-
-inline Matcher<MetadataBuilder*> MetadataMatches(
-    const PartitionMetadata& partition_metadata) {
-  return MakeMatcher(new MetadataMatcher(partition_metadata));
-}
-
-MATCHER_P(HasGroup, group, " has group " + group) {
-  auto groups = arg->ListGroups();
-  return std::find(groups.begin(), groups.end(), group) != groups.end();
-}
-
-class BootControlAndroidTest : public ::testing::Test {
- protected:
-  void SetUp() override {
-    // Fake init bootctl_
-    bootctl_.module_ = new NiceMock<MockBootControlHal>();
-    bootctl_.dynamic_control_ =
-        std::make_unique<NiceMock<MockDynamicPartitionControl>>();
-
-    ON_CALL(module(), getNumberSlots()).WillByDefault(Invoke([] {
-      return kMaxNumSlots;
-    }));
-    ON_CALL(module(), getSuffix(_, _))
-        .WillByDefault(Invoke([](auto slot, auto cb) {
-          EXPECT_LE(slot, kMaxNumSlots);
-          cb(slot < kMaxNumSlots ? kSlotSuffixes[slot] : "");
-          return Void();
-        }));
-
-    ON_CALL(dynamicControl(), IsDynamicPartitionsEnabled())
-        .WillByDefault(Return(true));
-    ON_CALL(dynamicControl(), IsDynamicPartitionsRetrofit())
-        .WillByDefault(Return(false));
-    ON_CALL(dynamicControl(), DeviceExists(_)).WillByDefault(Return(true));
-    ON_CALL(dynamicControl(), GetDeviceDir(_))
-        .WillByDefault(Invoke([](auto path) {
-          *path = kFakeDevicePath;
-          return true;
-        }));
-    ON_CALL(dynamicControl(), GetDmDevicePathByName(_, _))
-        .WillByDefault(Invoke([](auto partition_name_suffix, auto device) {
-          *device = GetDmDevice(partition_name_suffix);
-          return true;
-        }));
-  }
-
-  // Return the mocked HAL module.
-  NiceMock<MockBootControlHal>& module() {
-    return static_cast<NiceMock<MockBootControlHal>&>(*bootctl_.module_);
-  }
-
-  // Return the mocked DynamicPartitionControlInterface.
-  NiceMock<MockDynamicPartitionControl>& dynamicControl() {
-    return static_cast<NiceMock<MockDynamicPartitionControl>&>(
-        *bootctl_.dynamic_control_);
-  }
-
-  // Set the fake metadata to return when LoadMetadataBuilder is called on
-  // |slot|.
-  void SetMetadata(uint32_t slot, const PartitionSuffixSizes& sizes) {
-    SetMetadata(slot, partitionSuffixSizesToMetadata(sizes));
-  }
-
-  void SetMetadata(uint32_t slot, const PartitionMetadata& metadata) {
-    EXPECT_CALL(dynamicControl(),
-                LoadMetadataBuilder(GetSuperDevice(slot), slot, _))
-        .Times(AnyNumber())
-        .WillRepeatedly(Invoke([metadata](auto, auto, auto) {
-          return NewFakeMetadata(metadata);
-        }));
-  }
-
-  // Expect that UnmapPartitionOnDeviceMapper is called on target() metadata
-  // slot with each partition in |partitions|.
-  void ExpectUnmap(const std::set<string>& partitions) {
-    // Error when UnmapPartitionOnDeviceMapper is called on unknown arguments.
-    ON_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_, _))
-        .WillByDefault(Return(false));
-
-    for (const auto& partition : partitions) {
-      EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(partition, _))
-          .WillOnce(Invoke([this](auto partition, auto) {
-            mapped_devices_.erase(partition);
-            return true;
-          }));
-    }
-  }
-
-  void ExpectDevicesAreMapped(const std::set<string>& partitions) {
-    ASSERT_EQ(partitions.size(), mapped_devices_.size());
-    for (const auto& partition : partitions) {
-      EXPECT_THAT(mapped_devices_, Contains(Key(Eq(partition))))
-          << "Expect that " << partition << " is mapped, but it is not.";
-    }
-  }
-
-  void ExpectStoreMetadata(const PartitionSuffixSizes& partition_sizes) {
-    ExpectStoreMetadataMatch(MetadataMatches(partition_sizes));
-  }
-
-  virtual void ExpectStoreMetadataMatch(
-      const Matcher<MetadataBuilder*>& matcher) {
-    EXPECT_CALL(dynamicControl(),
-                StoreMetadata(GetSuperDevice(target()), matcher, target()))
-        .WillOnce(Return(true));
-  }
-
-  uint32_t source() { return slots_.source; }
-
-  uint32_t target() { return slots_.target; }
-
-  // Return partition names with suffix of source().
-  string S(const string& name) { return name + kSlotSuffixes[source()]; }
-
-  // Return partition names with suffix of target().
-  string T(const string& name) { return name + kSlotSuffixes[target()]; }
-
-  // Set source and target slots to use before testing.
-  void SetSlots(const TestParam& slots) {
-    slots_ = slots;
-
-    ON_CALL(module(), getCurrentSlot()).WillByDefault(Invoke([this] {
-      return source();
-    }));
-    // Should not store metadata to source slot.
-    EXPECT_CALL(dynamicControl(),
-                StoreMetadata(GetSuperDevice(source()), _, source()))
-        .Times(0);
-    // Should not load metadata from target slot.
-    EXPECT_CALL(dynamicControl(),
-                LoadMetadataBuilder(GetSuperDevice(target()), target(), _))
-        .Times(0);
-  }
-
-  bool InitPartitionMetadata(uint32_t slot,
-                             PartitionSizes partition_sizes,
-                             bool update_metadata = true) {
-    auto m = partitionSizesToMetadata(partition_sizes);
-    LOG(INFO) << m;
-    return bootctl_.InitPartitionMetadata(slot, m, update_metadata);
-  }
-
-  BootControlAndroid bootctl_;  // BootControlAndroid under test.
-  TestParam slots_;
-  // mapped devices through MapPartitionOnDeviceMapper.
-  std::map<string, string> mapped_devices_;
-};
-
-class BootControlAndroidTestP
-    : public BootControlAndroidTest,
-      public ::testing::WithParamInterface<TestParam> {
- public:
-  void SetUp() override {
-    BootControlAndroidTest::SetUp();
-    SetSlots(GetParam());
-  }
-};
-
-// Test resize case. Grow if target metadata contains a partition with a size
-// less than expected.
-TEST_P(BootControlAndroidTestP, NeedGrowIfSizeNotMatchWhenResizing) {
-  SetMetadata(source(),
-              {{S("system"), 2_GiB},
-               {S("vendor"), 1_GiB},
-               {T("system"), 2_GiB},
-               {T("vendor"), 1_GiB}});
-  ExpectStoreMetadata({{S("system"), 2_GiB},
-                       {S("vendor"), 1_GiB},
-                       {T("system"), 3_GiB},
-                       {T("vendor"), 1_GiB}});
-  ExpectUnmap({T("system"), T("vendor")});
-
-  EXPECT_TRUE(
-      InitPartitionMetadata(target(), {{"system", 3_GiB}, {"vendor", 1_GiB}}));
-}
-
-// Test resize case. Shrink if target metadata contains a partition with a size
-// greater than expected.
-TEST_P(BootControlAndroidTestP, NeedShrinkIfSizeNotMatchWhenResizing) {
-  SetMetadata(source(),
-              {{S("system"), 2_GiB},
-               {S("vendor"), 1_GiB},
-               {T("system"), 2_GiB},
-               {T("vendor"), 1_GiB}});
-  ExpectStoreMetadata({{S("system"), 2_GiB},
-                       {S("vendor"), 1_GiB},
-                       {T("system"), 2_GiB},
-                       {T("vendor"), 150_MiB}});
-  ExpectUnmap({T("system"), T("vendor")});
-
-  EXPECT_TRUE(InitPartitionMetadata(target(),
-                                    {{"system", 2_GiB}, {"vendor", 150_MiB}}));
-}
-
-// Test adding partitions on the first run.
-TEST_P(BootControlAndroidTestP, AddPartitionToEmptyMetadata) {
-  SetMetadata(source(), PartitionSuffixSizes{});
-  ExpectStoreMetadata({{T("system"), 2_GiB}, {T("vendor"), 1_GiB}});
-  ExpectUnmap({T("system"), T("vendor")});
-
-  EXPECT_TRUE(
-      InitPartitionMetadata(target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}));
-}
-
-// Test subsequent add case.
-TEST_P(BootControlAndroidTestP, AddAdditionalPartition) {
-  SetMetadata(source(), {{S("system"), 2_GiB}, {T("system"), 2_GiB}});
-  ExpectStoreMetadata(
-      {{S("system"), 2_GiB}, {T("system"), 2_GiB}, {T("vendor"), 1_GiB}});
-  ExpectUnmap({T("system"), T("vendor")});
-
-  EXPECT_TRUE(
-      InitPartitionMetadata(target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}));
-}
-
-// Test delete one partition.
-TEST_P(BootControlAndroidTestP, DeletePartition) {
-  SetMetadata(source(),
-              {{S("system"), 2_GiB},
-               {S("vendor"), 1_GiB},
-               {T("system"), 2_GiB},
-               {T("vendor"), 1_GiB}});
-  // No T("vendor")
-  ExpectStoreMetadata(
-      {{S("system"), 2_GiB}, {S("vendor"), 1_GiB}, {T("system"), 2_GiB}});
-  ExpectUnmap({T("system")});
-
-  EXPECT_TRUE(InitPartitionMetadata(target(), {{"system", 2_GiB}}));
-}
-
-// Test delete all partitions.
-TEST_P(BootControlAndroidTestP, DeleteAll) {
-  SetMetadata(source(),
-              {{S("system"), 2_GiB},
-               {S("vendor"), 1_GiB},
-               {T("system"), 2_GiB},
-               {T("vendor"), 1_GiB}});
-  ExpectStoreMetadata({{S("system"), 2_GiB}, {S("vendor"), 1_GiB}});
-
-  EXPECT_TRUE(InitPartitionMetadata(target(), {}));
-}
-
-// Test corrupt source metadata case.
-TEST_P(BootControlAndroidTestP, CorruptedSourceMetadata) {
-  EXPECT_CALL(dynamicControl(),
-              LoadMetadataBuilder(GetSuperDevice(source()), source(), _))
-      .WillOnce(Invoke([](auto, auto, auto) { return nullptr; }));
-  ExpectUnmap({T("system")});
-
-  EXPECT_FALSE(InitPartitionMetadata(target(), {{"system", 1_GiB}}))
-      << "Should not be able to continue with corrupt source metadata";
-}
-
-// Test that InitPartitionMetadata fail if there is not enough space on the
-// device.
-TEST_P(BootControlAndroidTestP, NotEnoughSpace) {
-  SetMetadata(source(),
-              {{S("system"), 3_GiB},
-               {S("vendor"), 2_GiB},
-               {T("system"), 0},
-               {T("vendor"), 0}});
-  EXPECT_FALSE(
-      InitPartitionMetadata(target(), {{"system", 3_GiB}, {"vendor", 3_GiB}}))
-      << "Should not be able to fit 11GiB data into 10GiB space";
-}
-
-TEST_P(BootControlAndroidTestP, NotEnoughSpaceForSlot) {
-  SetMetadata(source(),
-              {{S("system"), 1_GiB},
-               {S("vendor"), 1_GiB},
-               {T("system"), 0},
-               {T("vendor"), 0}});
-  EXPECT_FALSE(
-      InitPartitionMetadata(target(), {{"system", 3_GiB}, {"vendor", 3_GiB}}))
-      << "Should not be able to grow over size of super / 2";
-}
-
-// Test applying retrofit update on a build with dynamic partitions enabled.
-TEST_P(BootControlAndroidTestP,
-       ApplyRetrofitUpdateOnDynamicPartitionsEnabledBuild) {
-  SetMetadata(source(),
-              {{S("system"), 2_GiB},
-               {S("vendor"), 1_GiB},
-               {T("system"), 2_GiB},
-               {T("vendor"), 1_GiB}});
-  // Should not try to unmap any target partition.
-  EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_, _)).Times(0);
-  // Should not store metadata to target slot.
-  EXPECT_CALL(dynamicControl(),
-              StoreMetadata(GetSuperDevice(target()), _, target()))
-      .Times(0);
-
-  // Not calling through BootControlAndroidTest::InitPartitionMetadata(), since
-  // we don't want any default group in the PartitionMetadata.
-  EXPECT_TRUE(bootctl_.InitPartitionMetadata(target(), {}, true));
-
-  // Should use dynamic source partitions.
-  EXPECT_CALL(dynamicControl(), GetState(S("system")))
-      .Times(1)
-      .WillOnce(Return(DmDeviceState::ACTIVE));
-  string system_device;
-  EXPECT_TRUE(bootctl_.GetPartitionDevice("system", source(), &system_device));
-  EXPECT_EQ(GetDmDevice(S("system")), system_device);
-
-  // Should use static target partitions without querying dynamic control.
-  EXPECT_CALL(dynamicControl(), GetState(T("system"))).Times(0);
-  EXPECT_TRUE(bootctl_.GetPartitionDevice("system", target(), &system_device));
-  EXPECT_EQ(GetDevice(T("system")), system_device);
-
-  // Static partition "bar".
-  EXPECT_CALL(dynamicControl(), GetState(S("bar"))).Times(0);
-  std::string bar_device;
-  EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", source(), &bar_device));
-  EXPECT_EQ(GetDevice(S("bar")), bar_device);
-
-  EXPECT_CALL(dynamicControl(), GetState(T("bar"))).Times(0);
-  EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", target(), &bar_device));
-  EXPECT_EQ(GetDevice(T("bar")), bar_device);
-}
-
-TEST_P(BootControlAndroidTestP, GetPartitionDeviceWhenResumingUpdate) {
-  // Both of the two slots contain valid partition metadata, since this is
-  // resuming an update.
-  SetMetadata(source(),
-              {{S("system"), 2_GiB},
-               {S("vendor"), 1_GiB},
-               {T("system"), 2_GiB},
-               {T("vendor"), 1_GiB}});
-  SetMetadata(target(),
-              {{S("system"), 2_GiB},
-               {S("vendor"), 1_GiB},
-               {T("system"), 2_GiB},
-               {T("vendor"), 1_GiB}});
-  EXPECT_CALL(dynamicControl(),
-              StoreMetadata(GetSuperDevice(target()), _, target()))
-      .Times(0);
-  EXPECT_TRUE(InitPartitionMetadata(
-      target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}, false));
-
-  // Dynamic partition "system".
-  EXPECT_CALL(dynamicControl(), GetState(S("system")))
-      .Times(1)
-      .WillOnce(Return(DmDeviceState::ACTIVE));
-  string system_device;
-  EXPECT_TRUE(bootctl_.GetPartitionDevice("system", source(), &system_device));
-  EXPECT_EQ(GetDmDevice(S("system")), system_device);
-
-  EXPECT_CALL(dynamicControl(), GetState(T("system")))
-      .Times(1)
-      .WillOnce(Return(DmDeviceState::ACTIVE));
-  EXPECT_TRUE(bootctl_.GetPartitionDevice("system", target(), &system_device));
-  EXPECT_EQ(GetDmDevice(T("system")), system_device);
-
-  // Static partition "bar".
-  EXPECT_CALL(dynamicControl(), GetState(S("bar"))).Times(0);
-  std::string bar_device;
-  EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", source(), &bar_device));
-  EXPECT_EQ(GetDevice(S("bar")), bar_device);
-
-  EXPECT_CALL(dynamicControl(), GetState(T("bar"))).Times(0);
-  EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", target(), &bar_device));
-  EXPECT_EQ(GetDevice(T("bar")), bar_device);
-}
-
-INSTANTIATE_TEST_CASE_P(BootControlAndroidTest,
-                        BootControlAndroidTestP,
-                        testing::Values(TestParam{0, 1}, TestParam{1, 0}));
-
-const PartitionSuffixSizes update_sizes_0() {
-  // Initial state is 0 for "other" slot.
-  return {
-      {"grown_a", 2_GiB},
-      {"shrunk_a", 1_GiB},
-      {"same_a", 100_MiB},
-      {"deleted_a", 150_MiB},
-      // no added_a
-      {"grown_b", 200_MiB},
-      // simulate system_other
-      {"shrunk_b", 0},
-      {"same_b", 0},
-      {"deleted_b", 0},
-      // no added_b
-  };
-}
-
-const PartitionSuffixSizes update_sizes_1() {
-  return {
-      {"grown_a", 2_GiB},
-      {"shrunk_a", 1_GiB},
-      {"same_a", 100_MiB},
-      {"deleted_a", 150_MiB},
-      // no added_a
-      {"grown_b", 3_GiB},
-      {"shrunk_b", 150_MiB},
-      {"same_b", 100_MiB},
-      {"added_b", 150_MiB},
-      // no deleted_b
-  };
-}
-
-const PartitionSuffixSizes update_sizes_2() {
-  return {
-      {"grown_a", 4_GiB},
-      {"shrunk_a", 100_MiB},
-      {"same_a", 100_MiB},
-      {"deleted_a", 64_MiB},
-      // no added_a
-      {"grown_b", 3_GiB},
-      {"shrunk_b", 150_MiB},
-      {"same_b", 100_MiB},
-      {"added_b", 150_MiB},
-      // no deleted_b
-  };
-}
-
-// Test case for first update after the device is manufactured, in which
-// case the "other" slot is likely of size "0" (except system, which is
-// non-zero because of system_other partition)
-TEST_F(BootControlAndroidTest, SimulatedFirstUpdate) {
-  SetSlots({0, 1});
-
-  SetMetadata(source(), update_sizes_0());
-  SetMetadata(target(), update_sizes_0());
-  ExpectStoreMetadata(update_sizes_1());
-  ExpectUnmap({"grown_b", "shrunk_b", "same_b", "added_b"});
-
-  EXPECT_TRUE(InitPartitionMetadata(target(),
-                                    {{"grown", 3_GiB},
-                                     {"shrunk", 150_MiB},
-                                     {"same", 100_MiB},
-                                     {"added", 150_MiB}}));
-}
-
-// After first update, test for the second update. In the second update, the
-// "added" partition is deleted and "deleted" partition is re-added.
-TEST_F(BootControlAndroidTest, SimulatedSecondUpdate) {
-  SetSlots({1, 0});
-
-  SetMetadata(source(), update_sizes_1());
-  SetMetadata(target(), update_sizes_0());
-
-  ExpectStoreMetadata(update_sizes_2());
-  ExpectUnmap({"grown_a", "shrunk_a", "same_a", "deleted_a"});
-
-  EXPECT_TRUE(InitPartitionMetadata(target(),
-                                    {{"grown", 4_GiB},
-                                     {"shrunk", 100_MiB},
-                                     {"same", 100_MiB},
-                                     {"deleted", 64_MiB}}));
-}
-
-TEST_F(BootControlAndroidTest, ApplyingToCurrentSlot) {
-  SetSlots({1, 1});
-  EXPECT_FALSE(InitPartitionMetadata(target(), {}))
-      << "Should not be able to apply to current slot.";
-}
-
-class BootControlAndroidGroupTestP : public BootControlAndroidTestP {
- public:
-  void SetUp() override {
-    BootControlAndroidTestP::SetUp();
-    SetMetadata(
-        source(),
-        {.groups = {SimpleGroup(S("android"), 3_GiB, S("system"), 2_GiB),
-                    SimpleGroup(S("oem"), 2_GiB, S("vendor"), 1_GiB),
-                    SimpleGroup(T("android"), 3_GiB, T("system"), 0),
-                    SimpleGroup(T("oem"), 2_GiB, T("vendor"), 0)}});
-  }
-
-  // Return a simple group with only one partition.
-  PartitionMetadata::Group SimpleGroup(const string& group,
-                                       uint64_t group_size,
-                                       const string& partition,
-                                       uint64_t partition_size) {
-    return {.name = group,
-            .size = group_size,
-            .partitions = {{.name = partition, .size = partition_size}}};
-  }
-
-  void ExpectStoreMetadata(const PartitionMetadata& partition_metadata) {
-    ExpectStoreMetadataMatch(MetadataMatches(partition_metadata));
-  }
-
-  // Expect that target slot is stored with target groups.
-  void ExpectStoreMetadataMatch(
-      const Matcher<MetadataBuilder*>& matcher) override {
-    BootControlAndroidTestP::ExpectStoreMetadataMatch(AllOf(
-        MetadataMatches(PartitionMetadata{
-            .groups = {SimpleGroup(S("android"), 3_GiB, S("system"), 2_GiB),
-                       SimpleGroup(S("oem"), 2_GiB, S("vendor"), 1_GiB)}}),
-        matcher));
-  }
-};
-
-// Allow to resize within group.
-TEST_P(BootControlAndroidGroupTestP, ResizeWithinGroup) {
-  ExpectStoreMetadata(PartitionMetadata{
-      .groups = {SimpleGroup(T("android"), 3_GiB, T("system"), 3_GiB),
-                 SimpleGroup(T("oem"), 2_GiB, T("vendor"), 2_GiB)}});
-  ExpectUnmap({T("system"), T("vendor")});
-
-  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
-      target(),
-      PartitionMetadata{
-          .groups = {SimpleGroup("android", 3_GiB, "system", 3_GiB),
-                     SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}},
-      true));
-}
-
-TEST_P(BootControlAndroidGroupTestP, NotEnoughSpaceForGroup) {
-  EXPECT_FALSE(bootctl_.InitPartitionMetadata(
-      target(),
-      PartitionMetadata{
-          .groups = {SimpleGroup("android", 3_GiB, "system", 1_GiB),
-                     SimpleGroup("oem", 2_GiB, "vendor", 3_GiB)}},
-      true))
-      << "Should not be able to grow over maximum size of group";
-}
-
-TEST_P(BootControlAndroidGroupTestP, GroupTooBig) {
-  EXPECT_FALSE(bootctl_.InitPartitionMetadata(
-      target(),
-      PartitionMetadata{.groups = {{.name = "android", .size = 3_GiB},
-                                   {.name = "oem", .size = 3_GiB}}},
-      true))
-      << "Should not be able to grow over size of super / 2";
-}
-
-TEST_P(BootControlAndroidGroupTestP, AddPartitionToGroup) {
-  ExpectStoreMetadata(PartitionMetadata{
-      .groups = {
-          {.name = T("android"),
-           .size = 3_GiB,
-           .partitions = {{.name = T("system"), .size = 2_GiB},
-                          {.name = T("product_services"), .size = 1_GiB}}}}});
-  ExpectUnmap({T("system"), T("vendor"), T("product_services")});
-
-  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
-      target(),
-      PartitionMetadata{
-          .groups = {{.name = "android",
-                      .size = 3_GiB,
-                      .partitions = {{.name = "system", .size = 2_GiB},
-                                     {.name = "product_services",
-                                      .size = 1_GiB}}},
-                     SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}},
-      true));
-}
-
-TEST_P(BootControlAndroidGroupTestP, RemovePartitionFromGroup) {
-  ExpectStoreMetadata(PartitionMetadata{
-      .groups = {{.name = T("android"), .size = 3_GiB, .partitions = {}}}});
-  ExpectUnmap({T("vendor")});
-
-  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
-      target(),
-      PartitionMetadata{
-          .groups = {{.name = "android", .size = 3_GiB, .partitions = {}},
-                     SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}},
-      true));
-}
-
-TEST_P(BootControlAndroidGroupTestP, AddGroup) {
-  ExpectStoreMetadata(PartitionMetadata{
-      .groups = {
-          SimpleGroup(T("new_group"), 2_GiB, T("new_partition"), 2_GiB)}});
-  ExpectUnmap({T("system"), T("vendor"), T("new_partition")});
-
-  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
-      target(),
-      PartitionMetadata{
-          .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB),
-                     SimpleGroup("oem", 1_GiB, "vendor", 1_GiB),
-                     SimpleGroup("new_group", 2_GiB, "new_partition", 2_GiB)}},
-      true));
-}
-
-TEST_P(BootControlAndroidGroupTestP, RemoveGroup) {
-  ExpectStoreMetadataMatch(Not(HasGroup(T("oem"))));
-  ExpectUnmap({T("system")});
-  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
-      target(),
-      PartitionMetadata{
-          .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB)}},
-      true));
-}
-
-TEST_P(BootControlAndroidGroupTestP, ResizeGroup) {
-  ExpectStoreMetadata(PartitionMetadata{
-      .groups = {SimpleGroup(T("android"), 2_GiB, T("system"), 2_GiB),
-                 SimpleGroup(T("oem"), 3_GiB, T("vendor"), 3_GiB)}});
-  ExpectUnmap({T("system"), T("vendor")});
-
-  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
-      target(),
-      PartitionMetadata{
-          .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB),
-                     SimpleGroup("oem", 3_GiB, "vendor", 3_GiB)}},
-      true));
-}
-
-INSTANTIATE_TEST_CASE_P(BootControlAndroidTest,
-                        BootControlAndroidGroupTestP,
-                        testing::Values(TestParam{0, 1}, TestParam{1, 0}));
-
-}  // namespace chromeos_update_engine
diff --git a/boot_control_chromeos.cc b/boot_control_chromeos.cc
index 3f1eac4..95456f0 100644
--- a/boot_control_chromeos.cc
+++ b/boot_control_chromeos.cc
@@ -34,6 +34,7 @@
 }
 
 #include "update_engine/common/boot_control.h"
+#include "update_engine/common/dynamic_partition_control_stub.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/utils.h"
 
@@ -134,6 +135,8 @@
     return false;
   }
 
+  dynamic_partition_control_.reset(new DynamicPartitionControlStub());
+
   LOG(INFO) << "Booted from slot " << current_slot_ << " (slot "
             << SlotName(current_slot_) << ") of " << num_slots_
             << " slots present on disk " << boot_disk_name_;
@@ -173,9 +176,11 @@
   return true;
 }
 
-bool BootControlChromeOS::GetPartitionDevice(const string& partition_name,
-                                             unsigned int slot,
-                                             string* device) const {
+bool BootControlChromeOS::GetPartitionDevice(const std::string& partition_name,
+                                             BootControlInterface::Slot slot,
+                                             bool not_in_payload,
+                                             std::string* device,
+                                             bool* is_dynamic) const {
   // Partition name prefixed with |kPartitionNamePrefixDlc| is a DLC module.
   if (base::StartsWith(partition_name,
                        kPartitionNamePrefixDlc,
@@ -201,9 +206,18 @@
     return false;
 
   *device = part_device;
+  if (is_dynamic) {
+    *is_dynamic = false;
+  }
   return true;
 }
 
+bool BootControlChromeOS::GetPartitionDevice(const string& partition_name,
+                                             BootControlInterface::Slot slot,
+                                             string* device) const {
+  return GetPartitionDevice(partition_name, slot, false, device, nullptr);
+}
+
 bool BootControlChromeOS::IsSlotBootable(Slot slot) const {
   int partition_num = GetPartitionNumber(kChromeOSPartitionNameKernel, slot);
   if (partition_num < 0)
@@ -350,13 +364,14 @@
   return -1;
 }
 
-bool BootControlChromeOS::InitPartitionMetadata(
-    Slot slot,
-    const PartitionMetadata& partition_metadata,
-    bool update_metadata) {
-  return true;
+bool BootControlChromeOS::IsSlotMarkedSuccessful(Slot slot) const {
+  LOG(ERROR) << __func__ << " not supported.";
+  return false;
 }
 
-void BootControlChromeOS::Cleanup() {}
+DynamicPartitionControlInterface*
+BootControlChromeOS::GetDynamicPartitionControl() {
+  return dynamic_partition_control_.get();
+}
 
 }  // namespace chromeos_update_engine
diff --git a/boot_control_chromeos.h b/boot_control_chromeos.h
index 109197f..4271672 100644
--- a/boot_control_chromeos.h
+++ b/boot_control_chromeos.h
@@ -17,12 +17,14 @@
 #ifndef UPDATE_ENGINE_BOOT_CONTROL_CHROMEOS_H_
 #define UPDATE_ENGINE_BOOT_CONTROL_CHROMEOS_H_
 
+#include <memory>
 #include <string>
 
 #include <base/callback.h>
 #include <gtest/gtest_prod.h>  // for FRIEND_TEST
 
 #include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/dynamic_partition_control_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -45,15 +47,18 @@
   BootControlInterface::Slot GetCurrentSlot() const override;
   bool GetPartitionDevice(const std::string& partition_name,
                           BootControlInterface::Slot slot,
+                          bool not_in_payload,
+                          std::string* device,
+                          bool* is_dynamic) const override;
+  bool GetPartitionDevice(const std::string& partition_name,
+                          BootControlInterface::Slot slot,
                           std::string* device) const override;
   bool IsSlotBootable(BootControlInterface::Slot slot) const override;
   bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
   bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
   bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
-  bool InitPartitionMetadata(Slot slot,
-                             const PartitionMetadata& partition_metadata,
-                             bool update_metadata) override;
-  void Cleanup() override;
+  bool IsSlotMarkedSuccessful(BootControlInterface::Slot slot) const override;
+  DynamicPartitionControlInterface* GetDynamicPartitionControl() override;
 
  private:
   friend class BootControlChromeOSTest;
@@ -77,7 +82,7 @@
 
   // Extracts DLC module ID and package ID from partition name. The structure of
   // the partition name is dlc/<dlc-id>/<dlc-package>. For example:
-  // dlc/dummy-dlc/dummy-package
+  // dlc/fake-dlc/fake-package
   bool ParseDlcPartitionName(const std::string partition_name,
                              std::string* dlc_id,
                              std::string* dlc_package) const;
@@ -89,6 +94,8 @@
   // The block device of the disk we booted from, without the partition number.
   std::string boot_disk_name_;
 
+  std::unique_ptr<DynamicPartitionControlInterface> dynamic_partition_control_;
+
   DISALLOW_COPY_AND_ASSIGN(BootControlChromeOS);
 };
 
diff --git a/cleanup_previous_update_action.cc b/cleanup_previous_update_action.cc
new file mode 100644
index 0000000..1a2476f
--- /dev/null
+++ b/cleanup_previous_update_action.cc
@@ -0,0 +1,425 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "update_engine/cleanup_previous_update_action.h"
+
+#include <chrono>  // NOLINT(build/c++11) -- for merge times
+#include <functional>
+#include <string>
+#include <type_traits>
+
+#include <android-base/properties.h>
+#include <base/bind.h>
+
+#ifndef __ANDROID_RECOVERY__
+#include <statslog.h>
+#endif
+
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/delta_performer.h"
+
+using android::base::GetBoolProperty;
+using android::snapshot::ISnapshotManager;
+using android::snapshot::SnapshotMergeStats;
+using android::snapshot::UpdateState;
+using brillo::MessageLoop;
+
+constexpr char kBootCompletedProp[] = "sys.boot_completed";
+// Interval to check sys.boot_completed.
+constexpr auto kCheckBootCompletedInterval = base::TimeDelta::FromSeconds(2);
+// Interval to check IBootControl::isSlotMarkedSuccessful
+constexpr auto kCheckSlotMarkedSuccessfulInterval =
+    base::TimeDelta::FromSeconds(2);
+// Interval to call SnapshotManager::ProcessUpdateState
+constexpr auto kWaitForMergeInterval = base::TimeDelta::FromSeconds(2);
+
+#ifdef __ANDROID_RECOVERY__
+static constexpr bool kIsRecovery = true;
+#else
+static constexpr bool kIsRecovery = false;
+#endif
+
+namespace chromeos_update_engine {
+
+CleanupPreviousUpdateAction::CleanupPreviousUpdateAction(
+    PrefsInterface* prefs,
+    BootControlInterface* boot_control,
+    android::snapshot::ISnapshotManager* snapshot,
+    CleanupPreviousUpdateActionDelegateInterface* delegate)
+    : prefs_(prefs),
+      boot_control_(boot_control),
+      snapshot_(snapshot),
+      delegate_(delegate),
+      running_(false),
+      cancel_failed_(false),
+      last_percentage_(0),
+      merge_stats_(nullptr) {}
+
+void CleanupPreviousUpdateAction::PerformAction() {
+  ResumeAction();
+}
+
+void CleanupPreviousUpdateAction::TerminateProcessing() {
+  SuspendAction();
+}
+
+void CleanupPreviousUpdateAction::ResumeAction() {
+  CHECK(prefs_);
+  CHECK(boot_control_);
+
+  LOG(INFO) << "Starting/resuming CleanupPreviousUpdateAction";
+  running_ = true;
+  StartActionInternal();
+}
+
+void CleanupPreviousUpdateAction::SuspendAction() {
+  LOG(INFO) << "Stopping/suspending CleanupPreviousUpdateAction";
+  running_ = false;
+}
+
+void CleanupPreviousUpdateAction::ActionCompleted(ErrorCode error_code) {
+  running_ = false;
+  ReportMergeStats();
+  metadata_device_ = nullptr;
+}
+
+std::string CleanupPreviousUpdateAction::Type() const {
+  return StaticType();
+}
+
+std::string CleanupPreviousUpdateAction::StaticType() {
+  return "CleanupPreviousUpdateAction";
+}
+
+void CleanupPreviousUpdateAction::StartActionInternal() {
+  // Do nothing on non-VAB device.
+  if (!boot_control_->GetDynamicPartitionControl()
+           ->GetVirtualAbFeatureFlag()
+           .IsEnabled()) {
+    processor_->ActionComplete(this, ErrorCode::kSuccess);
+    return;
+  }
+  // SnapshotManager must be available on VAB devices.
+  CHECK(snapshot_ != nullptr);
+  merge_stats_ = snapshot_->GetSnapshotMergeStatsInstance();
+  CHECK(merge_stats_ != nullptr);
+  WaitBootCompletedOrSchedule();
+}
+
+void CleanupPreviousUpdateAction::ScheduleWaitBootCompleted() {
+  TEST_AND_RETURN(running_);
+  MessageLoop::current()->PostDelayedTask(
+      FROM_HERE,
+      base::Bind(&CleanupPreviousUpdateAction::WaitBootCompletedOrSchedule,
+                 base::Unretained(this)),
+      kCheckBootCompletedInterval);
+}
+
+void CleanupPreviousUpdateAction::WaitBootCompletedOrSchedule() {
+  TEST_AND_RETURN(running_);
+  if (!kIsRecovery &&
+      !android::base::GetBoolProperty(kBootCompletedProp, false)) {
+    // repeat
+    ScheduleWaitBootCompleted();
+    return;
+  }
+
+  LOG(INFO) << "Boot completed, waiting on markBootSuccessful()";
+  CheckSlotMarkedSuccessfulOrSchedule();
+}
+
+void CleanupPreviousUpdateAction::ScheduleWaitMarkBootSuccessful() {
+  TEST_AND_RETURN(running_);
+  MessageLoop::current()->PostDelayedTask(
+      FROM_HERE,
+      base::Bind(
+          &CleanupPreviousUpdateAction::CheckSlotMarkedSuccessfulOrSchedule,
+          base::Unretained(this)),
+      kCheckSlotMarkedSuccessfulInterval);
+}
+
+void CleanupPreviousUpdateAction::CheckSlotMarkedSuccessfulOrSchedule() {
+  TEST_AND_RETURN(running_);
+  if (!kIsRecovery &&
+      !boot_control_->IsSlotMarkedSuccessful(boot_control_->GetCurrentSlot())) {
+    ScheduleWaitMarkBootSuccessful();
+  }
+
+  if (metadata_device_ == nullptr) {
+    metadata_device_ = snapshot_->EnsureMetadataMounted();
+  }
+
+  if (metadata_device_ == nullptr) {
+    LOG(ERROR) << "Failed to mount /metadata.";
+    // If metadata is erased but not formatted, it is possible to not mount
+    // it in recovery. It is safe to skip CleanupPreviousUpdateAction.
+    processor_->ActionComplete(
+        this, kIsRecovery ? ErrorCode::kSuccess : ErrorCode::kError);
+    return;
+  }
+
+  if (kIsRecovery) {
+    auto snapshots_created =
+        snapshot_->RecoveryCreateSnapshotDevices(metadata_device_);
+    switch (snapshots_created) {
+      case android::snapshot::CreateResult::CREATED: {
+        // If previous update has not finished merging, snapshots exists and are
+        // created here so that ProcessUpdateState can proceed.
+        LOG(INFO) << "Snapshot devices are created";
+        break;
+      }
+      case android::snapshot::CreateResult::NOT_CREATED: {
+        // If there is no previous update, no snapshot devices are created and
+        // ProcessUpdateState will return immediately. Hence, NOT_CREATED is not
+        // considered an error.
+        LOG(INFO) << "Snapshot devices are not created";
+        break;
+      }
+      case android::snapshot::CreateResult::ERROR:
+      default: {
+        LOG(ERROR)
+            << "Failed to create snapshot devices (CreateResult = "
+            << static_cast<
+                   std::underlying_type_t<android::snapshot::CreateResult>>(
+                   snapshots_created);
+        processor_->ActionComplete(this, ErrorCode::kError);
+        return;
+      }
+    }
+  }
+
+  if (!merge_stats_->Start()) {
+    // Not an error because CleanupPreviousUpdateAction may be paused and
+    // resumed while kernel continues merging snapshots in the background.
+    LOG(WARNING) << "SnapshotMergeStats::Start failed.";
+  }
+  LOG(INFO) << "Waiting for any previous merge request to complete. "
+            << "This can take up to several minutes.";
+  WaitForMergeOrSchedule();
+}
+
+void CleanupPreviousUpdateAction::ScheduleWaitForMerge() {
+  TEST_AND_RETURN(running_);
+  MessageLoop::current()->PostDelayedTask(
+      FROM_HERE,
+      base::Bind(&CleanupPreviousUpdateAction::WaitForMergeOrSchedule,
+                 base::Unretained(this)),
+      kWaitForMergeInterval);
+}
+
+void CleanupPreviousUpdateAction::WaitForMergeOrSchedule() {
+  TEST_AND_RETURN(running_);
+  auto state = snapshot_->ProcessUpdateState(
+      std::bind(&CleanupPreviousUpdateAction::OnMergePercentageUpdate, this),
+      std::bind(&CleanupPreviousUpdateAction::BeforeCancel, this));
+  merge_stats_->set_state(state);
+
+  switch (state) {
+    case UpdateState::None: {
+      LOG(INFO) << "Can't find any snapshot to merge.";
+      ErrorCode error_code = ErrorCode::kSuccess;
+      if (!snapshot_->CancelUpdate()) {
+        error_code = ErrorCode::kError;
+        LOG(INFO) << "Failed to call SnapshotManager::CancelUpdate().";
+      }
+      processor_->ActionComplete(this, error_code);
+      return;
+    }
+
+    case UpdateState::Initiated: {
+      LOG(ERROR) << "Previous update has not been completed, not cleaning up";
+      processor_->ActionComplete(this, ErrorCode::kSuccess);
+      return;
+    }
+
+    case UpdateState::Unverified: {
+      InitiateMergeAndWait();
+      return;
+    }
+
+    case UpdateState::Merging: {
+      ScheduleWaitForMerge();
+      return;
+    }
+
+    case UpdateState::MergeNeedsReboot: {
+      LOG(ERROR) << "Need reboot to finish merging.";
+      processor_->ActionComplete(this, ErrorCode::kError);
+      return;
+    }
+
+    case UpdateState::MergeCompleted: {
+      LOG(INFO) << "Merge finished with state MergeCompleted.";
+      processor_->ActionComplete(this, ErrorCode::kSuccess);
+      return;
+    }
+
+    case UpdateState::MergeFailed: {
+      LOG(ERROR) << "Merge failed. Device may be corrupted.";
+      processor_->ActionComplete(this, ErrorCode::kDeviceCorrupted);
+      return;
+    }
+
+    case UpdateState::Cancelled: {
+      // DeltaPerformer::ResetUpdateProgress failed, hence snapshots are
+      // not deleted to avoid inconsistency.
+      // Nothing can be done here; just try next time.
+      ErrorCode error_code =
+          cancel_failed_ ? ErrorCode::kError : ErrorCode::kSuccess;
+      processor_->ActionComplete(this, error_code);
+      return;
+    }
+
+    default: {
+      // Protobuf has some reserved enum values, so a default case is needed.
+      LOG(FATAL) << "SnapshotManager::ProcessUpdateState returns "
+                 << static_cast<int32_t>(state);
+    }
+  }
+}
+
+bool CleanupPreviousUpdateAction::OnMergePercentageUpdate() {
+  double percentage = 0.0;
+  snapshot_->GetUpdateState(&percentage);
+  if (delegate_) {
+    // libsnapshot uses [0, 100] percentage but update_engine uses [0, 1].
+    delegate_->OnCleanupProgressUpdate(percentage / 100);
+  }
+
+  // Log if percentage increments by at least 1.
+  if (last_percentage_ < static_cast<unsigned int>(percentage)) {
+    last_percentage_ = percentage;
+    LOG(INFO) << "Waiting for merge to complete: " << last_percentage_ << "%.";
+  }
+
+  // Do not continue to wait for merge. Instead, let ProcessUpdateState
+  // return Merging directly so that we can ScheduleWaitForMerge() in
+  // MessageLoop.
+  return false;
+}
+
+bool CleanupPreviousUpdateAction::BeforeCancel() {
+  if (DeltaPerformer::ResetUpdateProgress(
+          prefs_,
+          false /* quick */,
+          false /* skip dynamic partitions metadata*/)) {
+    return true;
+  }
+
+  // ResetUpdateProgress might not work on stub prefs. Do additional checks.
+  LOG(WARNING) << "ProcessUpdateState returns Cancelled but cleanup failed.";
+
+  std::string val;
+  ignore_result(prefs_->GetString(kPrefsDynamicPartitionMetadataUpdated, &val));
+  if (val.empty()) {
+    LOG(INFO) << kPrefsDynamicPartitionMetadataUpdated
+              << " is empty, assuming successful cleanup";
+    return true;
+  }
+  LOG(WARNING)
+      << kPrefsDynamicPartitionMetadataUpdated << " is " << val
+      << ", not deleting snapshots even though UpdateState is Cancelled.";
+  cancel_failed_ = true;
+  return false;
+}
+
+void CleanupPreviousUpdateAction::InitiateMergeAndWait() {
+  TEST_AND_RETURN(running_);
+  LOG(INFO) << "Attempting to initiate merge.";
+  // suspend the VAB merge when running a DSU
+  if (GetBoolProperty("ro.gsid.image_running", false)) {
+    LOG(WARNING) << "Suspend the VAB merge when running a DSU.";
+    processor_->ActionComplete(this, ErrorCode::kError);
+    return;
+  }
+
+  uint64_t cow_file_size;
+  if (snapshot_->InitiateMerge(&cow_file_size)) {
+    merge_stats_->set_cow_file_size(cow_file_size);
+    WaitForMergeOrSchedule();
+    return;
+  }
+
+  LOG(WARNING) << "InitiateMerge failed.";
+  auto state = snapshot_->GetUpdateState();
+  merge_stats_->set_state(state);
+  if (state == UpdateState::Unverified) {
+    // We are stuck at unverified state. This can happen if the update has
+    // been applied, but it has not even been attempted yet (in libsnapshot,
+    // rollback indicator does not exist); for example, if update_engine
+    // restarts before the device reboots, then this state may be reached.
+    // Nothing should be done here.
+    LOG(WARNING) << "InitiateMerge leaves the device at "
+                 << "UpdateState::Unverified. (Did update_engine "
+                 << "restarted?)";
+    processor_->ActionComplete(this, ErrorCode::kSuccess);
+    return;
+  }
+
+  // State does seems to be advanced.
+  // It is possibly racy. For example, on a userdebug build, the user may
+  // manually initiate a merge with snapshotctl between last time
+  // update_engine checks UpdateState. Hence, just call
+  // WaitForMergeOrSchedule one more time.
+  LOG(WARNING) << "IniitateMerge failed but GetUpdateState returned "
+               << android::snapshot::UpdateState_Name(state)
+               << ", try to wait for merge again.";
+  WaitForMergeOrSchedule();
+  return;
+}
+
+void CleanupPreviousUpdateAction::ReportMergeStats() {
+  auto result = merge_stats_->Finish();
+  if (result == nullptr) {
+    LOG(WARNING) << "Not reporting merge stats because "
+                    "SnapshotMergeStats::Finish failed.";
+    return;
+  }
+
+#ifdef __ANDROID_RECOVERY__
+  LOG(INFO) << "Skip reporting merge stats in recovery.";
+#else
+  const auto& report = result->report();
+
+  if (report.state() == UpdateState::None ||
+      report.state() == UpdateState::Initiated ||
+      report.state() == UpdateState::Unverified) {
+    LOG(INFO) << "Not reporting merge stats because state is "
+              << android::snapshot::UpdateState_Name(report.state());
+    return;
+  }
+
+  auto passed_ms = std::chrono::duration_cast<std::chrono::milliseconds>(
+      result->merge_time());
+
+  bool vab_retrofit = boot_control_->GetDynamicPartitionControl()
+                          ->GetVirtualAbFeatureFlag()
+                          .IsRetrofit();
+
+  LOG(INFO) << "Reporting merge stats: "
+            << android::snapshot::UpdateState_Name(report.state()) << " in "
+            << passed_ms.count() << "ms (resumed " << report.resume_count()
+            << " times), using " << report.cow_file_size()
+            << " bytes of COW image.";
+  android::util::stats_write(android::util::SNAPSHOT_MERGE_REPORTED,
+                             static_cast<int32_t>(report.state()),
+                             static_cast<int64_t>(passed_ms.count()),
+                             static_cast<int32_t>(report.resume_count()),
+                             vab_retrofit,
+                             static_cast<int64_t>(report.cow_file_size()));
+#endif
+}
+
+}  // namespace chromeos_update_engine
diff --git a/cleanup_previous_update_action.h b/cleanup_previous_update_action.h
new file mode 100644
index 0000000..6f6ce07
--- /dev/null
+++ b/cleanup_previous_update_action.h
@@ -0,0 +1,95 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_H_
+#define UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_H_
+
+#include <chrono>  // NOLINT(build/c++11) -- for merge times
+#include <memory>
+#include <string>
+
+#include <brillo/message_loops/message_loop.h>
+#include <libsnapshot/snapshot.h>
+#include <libsnapshot/snapshot_stats.h>
+
+#include "update_engine/common/action.h"
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/cleanup_previous_update_action_delegate.h"
+#include "update_engine/common/error_code.h"
+#include "update_engine/common/prefs_interface.h"
+
+namespace chromeos_update_engine {
+
+class CleanupPreviousUpdateAction;
+
+template <>
+class ActionTraits<CleanupPreviousUpdateAction> {
+ public:
+  typedef NoneType InputObjectType;
+  typedef NoneType OutputObjectType;
+};
+
+// On Android Virtual A/B devices, clean up snapshots from previous update
+// attempt. See DynamicPartitionControlAndroid::CleanupSuccessfulUpdate.
+class CleanupPreviousUpdateAction : public Action<CleanupPreviousUpdateAction> {
+ public:
+  CleanupPreviousUpdateAction(
+      PrefsInterface* prefs,
+      BootControlInterface* boot_control,
+      android::snapshot::ISnapshotManager* snapshot,
+      CleanupPreviousUpdateActionDelegateInterface* delegate);
+
+  void PerformAction() override;
+  void SuspendAction() override;
+  void ResumeAction() override;
+  void TerminateProcessing() override;
+  void ActionCompleted(ErrorCode error_code) override;
+  std::string Type() const override;
+  static std::string StaticType();
+  typedef ActionTraits<CleanupPreviousUpdateAction>::InputObjectType
+      InputObjectType;
+  typedef ActionTraits<CleanupPreviousUpdateAction>::OutputObjectType
+      OutputObjectType;
+
+ private:
+  PrefsInterface* prefs_;
+  BootControlInterface* boot_control_;
+  android::snapshot::ISnapshotManager* snapshot_;
+  CleanupPreviousUpdateActionDelegateInterface* delegate_;
+  std::unique_ptr<android::snapshot::AutoDevice> metadata_device_;
+  bool running_{false};
+  bool cancel_failed_{false};
+  unsigned int last_percentage_{0};
+  android::snapshot::ISnapshotMergeStats* merge_stats_;
+
+  void StartActionInternal();
+  void ScheduleWaitBootCompleted();
+  void WaitBootCompletedOrSchedule();
+  void ScheduleWaitMarkBootSuccessful();
+  void CheckSlotMarkedSuccessfulOrSchedule();
+  void ScheduleWaitForMerge();
+  void WaitForMergeOrSchedule();
+  void InitiateMergeAndWait();
+  void ReportMergeStats();
+
+  // Callbacks to ProcessUpdateState.
+  bool OnMergePercentageUpdate();
+  bool BeforeCancel();
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_H_
diff --git a/client_library/include/update_engine/update_status.h b/client_library/include/update_engine/update_status.h
index b1cf1f8..043a36e 100644
--- a/client_library/include/update_engine/update_status.h
+++ b/client_library/include/update_engine/update_status.h
@@ -48,6 +48,7 @@
   // Broadcast this state when an update aborts because user preferences do not
   // allow updates, e.g. over cellular network.
   NEED_PERMISSION_TO_UPDATE = 10,
+  CLEANUP_PREVIOUS_UPDATE = 11,
 
   // This value is exclusively used in Chrome. DO NOT define nor use it.
   // TODO(crbug.com/977320): Remove this value from chrome by refactoring the
diff --git a/common/action.h b/common/action.h
index 9e2f5ff..fd82c2d 100644
--- a/common/action.h
+++ b/common/action.h
@@ -222,6 +222,17 @@
       out_pipe_;
 };
 
+// An action that does nothing and completes with kSuccess immediately.
+class NoOpAction : public AbstractAction {
+ public:
+  ~NoOpAction() override {}
+  void PerformAction() override {
+    processor_->ActionComplete(this, ErrorCode::kSuccess);
+  }
+  static std::string StaticType() { return "NoOpAction"; }
+  std::string Type() const override { return StaticType(); }
+};
+
 };  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_COMMON_ACTION_H_
diff --git a/common/action_pipe.h b/common/action_pipe.h
index 0c98ee1..4c56812 100644
--- a/common/action_pipe.h
+++ b/common/action_pipe.h
@@ -79,6 +79,8 @@
 
  private:
   ObjectType contents_;
+  // Give unit test access
+  friend class DownloadActionTest;
 
   // The ctor is private. This is because this class should construct itself
   // via the static Bond() method.
diff --git a/common/action_processor.h b/common/action_processor.h
index 735a106..ad98cc9 100644
--- a/common/action_processor.h
+++ b/common/action_processor.h
@@ -89,7 +89,7 @@
   // But this call deletes the action if there no other object has a reference
   // to it, so in that case, the caller should not try to access any of its
   // member variables after this call.
-  void ActionComplete(AbstractAction* actionptr, ErrorCode code);
+  virtual void ActionComplete(AbstractAction* actionptr, ErrorCode code);
 
  private:
   FRIEND_TEST(ActionProcessorTest, ChainActionsTest);
diff --git a/common/boot_control_interface.h b/common/boot_control_interface.h
index 392d785..c93de5c 100644
--- a/common/boot_control_interface.h
+++ b/common/boot_control_interface.h
@@ -25,6 +25,9 @@
 #include <base/callback.h>
 #include <base/macros.h>
 
+#include "update_engine/common/dynamic_partition_control_interface.h"
+#include "update_engine/update_metadata.pb.h"
+
 namespace chromeos_update_engine {
 
 // The abstract boot control interface defines the interaction with the
@@ -35,19 +38,6 @@
  public:
   using Slot = unsigned int;
 
-  struct PartitionMetadata {
-    struct Partition {
-      std::string name;
-      uint64_t size;
-    };
-    struct Group {
-      std::string name;
-      uint64_t size;
-      std::vector<Partition> partitions;
-    };
-    std::vector<Group> groups;
-  };
-
   static const Slot kInvalidSlot = UINT_MAX;
 
   virtual ~BootControlInterface() = default;
@@ -67,9 +57,20 @@
   // The |slot| number must be between 0 and GetNumSlots() - 1 and the
   // |partition_name| is a platform-specific name that identifies a partition on
   // every slot. In order to access the dynamic partitions in the target slot,
-  // InitPartitionMetadata() must be called (once per payload) prior to calling
-  // this function. On success, returns true and stores the block device in
-  // |device|.
+  // GetDynamicPartitionControl()->PreparePartitionsForUpdate() must be called
+  // (with |update| == true for the first time for a payload, and |false| for
+  // for the rest of the times) prior to calling this function.
+  // The handling may be different based on whether the partition is included
+  // in the update payload. On success, returns true; and stores the block
+  // device in |device|, if the partition is dynamic in |is_dynamic|.
+  virtual bool GetPartitionDevice(const std::string& partition_name,
+                                  Slot slot,
+                                  bool not_in_payload,
+                                  std::string* device,
+                                  bool* is_dynamic) const = 0;
+
+  // Overload of the above function. We assume the partition is always included
+  // in the payload.
   virtual bool GetPartitionDevice(const std::string& partition_name,
                                   Slot slot,
                                   std::string* device) const = 0;
@@ -94,17 +95,11 @@
   // of the operation.
   virtual bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) = 0;
 
-  // Initializes the metadata of the underlying partitions for a given |slot|
-  // and sets up the states for accessing dynamic partitions.
-  // |partition_metadata| will be written to the specified |slot| if
-  // |update_metadata| is set.
-  virtual bool InitPartitionMetadata(
-      Slot slot,
-      const PartitionMetadata& partition_metadata,
-      bool update_metadata) = 0;
+  // Check if |slot| is marked boot successfully.
+  virtual bool IsSlotMarkedSuccessful(Slot slot) const = 0;
 
-  // Do necessary clean-up operations after the whole update.
-  virtual void Cleanup() = 0;
+  // Return the dynamic partition control interface.
+  virtual DynamicPartitionControlInterface* GetDynamicPartitionControl() = 0;
 
   // Return a human-readable slot name used for logging.
   static std::string SlotName(Slot slot) {
diff --git a/common/boot_control_stub.cc b/common/boot_control_stub.cc
index 0fe8a98..907f670 100644
--- a/common/boot_control_stub.cc
+++ b/common/boot_control_stub.cc
@@ -15,6 +15,7 @@
 //
 
 #include "update_engine/common/boot_control_stub.h"
+#include "update_engine/common/dynamic_partition_control_stub.h"
 
 #include <base/logging.h>
 
@@ -22,6 +23,9 @@
 
 namespace chromeos_update_engine {
 
+BootControlStub::BootControlStub()
+    : dynamic_partition_control_(new DynamicPartitionControlStub()) {}
+
 unsigned int BootControlStub::GetNumSlots() const {
   return 0;
 }
@@ -31,6 +35,15 @@
   return 0;
 }
 
+bool BootControlStub::GetPartitionDevice(const std::string& partition_name,
+                                         BootControlInterface::Slot slot,
+                                         bool not_in_payload,
+                                         std::string* device,
+                                         bool* is_dynamic) const {
+  LOG(ERROR) << __FUNCTION__ << " should never be called.";
+  return false;
+}
+
 bool BootControlStub::GetPartitionDevice(const string& partition_name,
                                          Slot slot,
                                          string* device) const {
@@ -59,16 +72,14 @@
   return false;
 }
 
-bool BootControlStub::InitPartitionMetadata(
-    Slot slot,
-    const PartitionMetadata& partition_metadata,
-    bool update_metadata) {
+bool BootControlStub::IsSlotMarkedSuccessful(Slot slot) const {
   LOG(ERROR) << __FUNCTION__ << " should never be called.";
   return false;
 }
 
-void BootControlStub::Cleanup() {
-  LOG(ERROR) << __FUNCTION__ << " should never be called.";
+DynamicPartitionControlInterface*
+BootControlStub::GetDynamicPartitionControl() {
+  return dynamic_partition_control_.get();
 }
 
 }  // namespace chromeos_update_engine
diff --git a/common/boot_control_stub.h b/common/boot_control_stub.h
index 8dfaffc..a1bdb96 100644
--- a/common/boot_control_stub.h
+++ b/common/boot_control_stub.h
@@ -17,9 +17,11 @@
 #ifndef UPDATE_ENGINE_COMMON_BOOT_CONTROL_STUB_H_
 #define UPDATE_ENGINE_COMMON_BOOT_CONTROL_STUB_H_
 
+#include <memory>
 #include <string>
 
 #include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/dynamic_partition_control_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -32,25 +34,30 @@
 // implementation is in use.
 class BootControlStub : public BootControlInterface {
  public:
-  BootControlStub() = default;
+  BootControlStub();
   ~BootControlStub() = default;
 
   // BootControlInterface overrides.
   unsigned int GetNumSlots() const override;
   BootControlInterface::Slot GetCurrentSlot() const override;
   bool GetPartitionDevice(const std::string& partition_name,
+                          Slot slot,
+                          bool not_in_payload,
+                          std::string* device,
+                          bool* is_dynamic) const override;
+  bool GetPartitionDevice(const std::string& partition_name,
                           BootControlInterface::Slot slot,
                           std::string* device) const override;
   bool IsSlotBootable(BootControlInterface::Slot slot) const override;
   bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
   bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
   bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
-  bool InitPartitionMetadata(Slot slot,
-                             const PartitionMetadata& partition_metadata,
-                             bool update_metadata) override;
-  void Cleanup() override;
+  bool IsSlotMarkedSuccessful(BootControlInterface::Slot slot) const override;
+  DynamicPartitionControlInterface* GetDynamicPartitionControl() override;
 
  private:
+  std::unique_ptr<DynamicPartitionControlInterface> dynamic_partition_control_;
+
   DISALLOW_COPY_AND_ASSIGN(BootControlStub);
 };
 
diff --git a/common/cleanup_previous_update_action_delegate.h b/common/cleanup_previous_update_action_delegate.h
new file mode 100644
index 0000000..7dad9c5
--- /dev/null
+++ b/common/cleanup_previous_update_action_delegate.h
@@ -0,0 +1,32 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_
+#define UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_
+
+namespace chromeos_update_engine {
+
+// Delegate interface for CleanupPreviousUpdateAction.
+class CleanupPreviousUpdateActionDelegateInterface {
+ public:
+  virtual ~CleanupPreviousUpdateActionDelegateInterface() {}
+  // |progress| is within [0, 1]
+  virtual void OnCleanupProgressUpdate(double progress) = 0;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_
diff --git a/common/constants.cc b/common/constants.cc
index ad511d5..8883668 100644
--- a/common/constants.cc
+++ b/common/constants.cc
@@ -110,6 +110,7 @@
 const char kPrefsWallClockScatteringWaitPeriod[] = "wall-clock-wait-period";
 const char kPrefsWallClockStagingWaitPeriod[] =
     "wall-clock-staging-wait-period";
+const char kPrefsManifestBytes[] = "manifest-bytes";
 
 // These four fields are generated by scripts/brillo_update_payload.
 const char kPayloadPropertyFileSize[] = "FILE_SIZE";
@@ -130,8 +131,7 @@
 // Set "SWITCH_SLOT_ON_REBOOT=0" to skip marking the updated partitions active.
 // The default is 1 (always switch slot if update succeeded).
 const char kPayloadPropertySwitchSlotOnReboot[] = "SWITCH_SLOT_ON_REBOOT";
-// Set "RUN_POST_INSTALL=0" to skip running post install, this will only be
-// honored if we're resuming an update and post install has already succeeded.
+// Set "RUN_POST_INSTALL=0" to skip running optional post install.
 // The default is 1 (always run post install).
 const char kPayloadPropertyRunPostInstall[] = "RUN_POST_INSTALL";
 
diff --git a/common/constants.h b/common/constants.h
index 446b147..3685102 100644
--- a/common/constants.h
+++ b/common/constants.h
@@ -105,6 +105,7 @@
 extern const char kPrefsVerityWritten[];
 extern const char kPrefsWallClockScatteringWaitPeriod[];
 extern const char kPrefsWallClockStagingWaitPeriod[];
+extern const char kPrefsManifestBytes[];
 
 // Keys used when storing and loading payload properties.
 extern const char kPayloadPropertyFileSize[];
@@ -215,6 +216,9 @@
 const int kDownloadConnectTimeoutSeconds = 30;
 const int kDownloadP2PConnectTimeoutSeconds = 5;
 
+// Size in bytes of SHA256 hash.
+const int kSHA256Size = 32;
+
 }  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_COMMON_CONSTANTS_H_
diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h
new file mode 100644
index 0000000..7c2d0b0
--- /dev/null
+++ b/common/dynamic_partition_control_interface.h
@@ -0,0 +1,144 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
+#define UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "update_engine/common/action.h"
+#include "update_engine/common/cleanup_previous_update_action_delegate.h"
+#include "update_engine/common/error_code.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+
+struct FeatureFlag {
+  enum class Value { NONE = 0, RETROFIT, LAUNCH };
+  constexpr explicit FeatureFlag(Value value) : value_(value) {}
+  constexpr bool IsEnabled() const { return value_ != Value::NONE; }
+  constexpr bool IsRetrofit() const { return value_ == Value::RETROFIT; }
+  constexpr bool IsLaunch() const { return value_ == Value::LAUNCH; }
+
+ private:
+  Value value_;
+};
+
+class BootControlInterface;
+class PrefsInterface;
+
+class DynamicPartitionControlInterface {
+ public:
+  virtual ~DynamicPartitionControlInterface() = default;
+
+  // Return the feature flags of dynamic partitions on this device.
+  // Return RETROFIT iff dynamic partitions is retrofitted on this device,
+  //        LAUNCH iff this device is launched with dynamic partitions,
+  //        NONE iff dynamic partitions is disabled on this device.
+  virtual FeatureFlag GetDynamicPartitionsFeatureFlag() = 0;
+
+  // Return the feature flags of Virtual A/B on this device.
+  virtual FeatureFlag GetVirtualAbFeatureFlag() = 0;
+
+  // Attempt to optimize |operation|.
+  // If successful, |optimized| contains an operation with extents that
+  // needs to be written.
+  // If failed, no optimization is available, and caller should perform
+  // |operation| directly.
+  // |partition_name| should not have the slot suffix; implementation of
+  // DynamicPartitionControlInterface checks partition at the target slot
+  // previously set with PreparePartitionsForUpdate().
+  virtual bool OptimizeOperation(const std::string& partition_name,
+                                 const InstallOperation& operation,
+                                 InstallOperation* optimized) = 0;
+
+  // Do necessary cleanups before destroying the object.
+  virtual void Cleanup() = 0;
+
+  // Prepare all partitions for an update specified in |manifest|.
+  // This is needed before calling MapPartitionOnDeviceMapper(), otherwise the
+  // device would be mapped in an inconsistent way.
+  // If |update| is set, create snapshots and writes super partition metadata.
+  // If |required_size| is not null and call fails due to insufficient space,
+  // |required_size| will be set to total free space required on userdata
+  // partition to apply the update. Otherwise (call succeeds, or fails
+  // due to other errors), |required_size| is set to zero.
+  virtual bool PreparePartitionsForUpdate(uint32_t source_slot,
+                                          uint32_t target_slot,
+                                          const DeltaArchiveManifest& manifest,
+                                          bool update,
+                                          uint64_t* required_size) = 0;
+
+  // After writing to new partitions, before rebooting into the new slot, call
+  // this function to indicate writes to new partitions are done.
+  virtual bool FinishUpdate(bool powerwash_required) = 0;
+
+  // Get an action to clean up previous update.
+  // Return NoOpAction on non-Virtual A/B devices.
+  // Before applying the next update, run this action to clean up previous
+  // update files. This function blocks until delta files are merged into
+  // current OS partitions and finished cleaning up.
+  // - If successful, action completes with kSuccess.
+  // - If any error, but caller should retry after reboot, action completes with
+  //   kError.
+  // - If any irrecoverable failures, action completes with kDeviceCorrupted.
+  //
+  // See ResetUpdate for differences between CleanuPreviousUpdateAction and
+  // ResetUpdate.
+  virtual std::unique_ptr<AbstractAction> GetCleanupPreviousUpdateAction(
+      BootControlInterface* boot_control,
+      PrefsInterface* prefs,
+      CleanupPreviousUpdateActionDelegateInterface* delegate) = 0;
+
+  // Called after an unwanted payload has been successfully applied and the
+  // device has not yet been rebooted.
+  //
+  // For snapshot updates (Virtual A/B), it calls
+  // DeltaPerformer::ResetUpdateProgress(false /* quick */) and
+  // frees previously allocated space; the next update will need to be
+  // started over.
+  //
+  // Note: CleanupPreviousUpdateAction does not do anything if an update is in
+  // progress, while ResetUpdate() forcefully free previously
+  // allocated space for snapshot updates.
+  virtual bool ResetUpdate(PrefsInterface* prefs) = 0;
+
+  // Reads the dynamic partitions metadata from the current slot, and puts the
+  // name of the dynamic partitions with the current suffix to |partitions|.
+  // Returns true on success.
+  virtual bool ListDynamicPartitionsForSlot(
+      uint32_t current_slot, std::vector<std::string>* partitions) = 0;
+
+  // Finds a possible location that list all block devices by name; and puts
+  // the result in |path|. Returns true on success.
+  // Sample result: /dev/block/by-name/
+  virtual bool GetDeviceDir(std::string* path) = 0;
+
+  // Verifies that the untouched dynamic partitions in the target metadata have
+  // the same extents as the source metadata.
+  virtual bool VerifyExtentsForUntouchedPartitions(
+      uint32_t source_slot,
+      uint32_t target_slot,
+      const std::vector<std::string>& partitions) = 0;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
diff --git a/common/dynamic_partition_control_stub.cc b/common/dynamic_partition_control_stub.cc
new file mode 100644
index 0000000..5a8ca43
--- /dev/null
+++ b/common/dynamic_partition_control_stub.cc
@@ -0,0 +1,86 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include <base/logging.h>
+
+#include "update_engine/common/dynamic_partition_control_stub.h"
+
+namespace chromeos_update_engine {
+
+FeatureFlag DynamicPartitionControlStub::GetDynamicPartitionsFeatureFlag() {
+  return FeatureFlag(FeatureFlag::Value::NONE);
+}
+
+FeatureFlag DynamicPartitionControlStub::GetVirtualAbFeatureFlag() {
+  return FeatureFlag(FeatureFlag::Value::NONE);
+}
+
+bool DynamicPartitionControlStub::OptimizeOperation(
+    const std::string& partition_name,
+    const InstallOperation& operation,
+    InstallOperation* optimized) {
+  return false;
+}
+
+void DynamicPartitionControlStub::Cleanup() {}
+
+bool DynamicPartitionControlStub::PreparePartitionsForUpdate(
+    uint32_t source_slot,
+    uint32_t target_slot,
+    const DeltaArchiveManifest& manifest,
+    bool update,
+    uint64_t* required_size) {
+  return true;
+}
+
+bool DynamicPartitionControlStub::FinishUpdate(bool powerwash_required) {
+  return true;
+}
+
+std::unique_ptr<AbstractAction>
+DynamicPartitionControlStub::GetCleanupPreviousUpdateAction(
+    BootControlInterface* boot_control,
+    PrefsInterface* prefs,
+    CleanupPreviousUpdateActionDelegateInterface* delegate) {
+  return std::make_unique<NoOpAction>();
+}
+
+bool DynamicPartitionControlStub::ResetUpdate(PrefsInterface* prefs) {
+  return false;
+}
+
+bool DynamicPartitionControlStub::ListDynamicPartitionsForSlot(
+    uint32_t current_slot, std::vector<std::string>* partitions) {
+  return true;
+}
+
+bool DynamicPartitionControlStub::GetDeviceDir(std::string* path) {
+  return true;
+}
+
+bool DynamicPartitionControlStub::VerifyExtentsForUntouchedPartitions(
+    uint32_t source_slot,
+    uint32_t target_slot,
+    const std::vector<std::string>& partitions) {
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h
new file mode 100644
index 0000000..94dba1b
--- /dev/null
+++ b/common/dynamic_partition_control_stub.h
@@ -0,0 +1,63 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_STUB_H_
+#define UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_STUB_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "update_engine/common/dynamic_partition_control_interface.h"
+
+namespace chromeos_update_engine {
+
+class DynamicPartitionControlStub : public DynamicPartitionControlInterface {
+ public:
+  FeatureFlag GetDynamicPartitionsFeatureFlag() override;
+  FeatureFlag GetVirtualAbFeatureFlag() override;
+  bool OptimizeOperation(const std::string& partition_name,
+                         const InstallOperation& operation,
+                         InstallOperation* optimized) override;
+  void Cleanup() override;
+  bool PreparePartitionsForUpdate(uint32_t source_slot,
+                                  uint32_t target_slot,
+                                  const DeltaArchiveManifest& manifest,
+                                  bool update,
+                                  uint64_t* required_size) override;
+
+  bool FinishUpdate(bool powerwash_required) override;
+  std::unique_ptr<AbstractAction> GetCleanupPreviousUpdateAction(
+      BootControlInterface* boot_control,
+      PrefsInterface* prefs,
+      CleanupPreviousUpdateActionDelegateInterface* delegate) override;
+  bool ResetUpdate(PrefsInterface* prefs) override;
+
+  bool ListDynamicPartitionsForSlot(
+      uint32_t current_slot, std::vector<std::string>* partitions) override;
+  bool GetDeviceDir(std::string* path) override;
+
+  bool VerifyExtentsForUntouchedPartitions(
+      uint32_t source_slot,
+      uint32_t target_slot,
+      const std::vector<std::string>& partitions) override;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_STUB_H_
diff --git a/common/error_code.h b/common/error_code.h
index 7acb3b6..7d9cfff 100644
--- a/common/error_code.h
+++ b/common/error_code.h
@@ -83,7 +83,9 @@
   kInternalLibCurlError = 57,
   kUnresolvedHostError = 58,
   kUnresolvedHostRecovered = 59,
-  kPackageExcludedFromUpdate = 60,
+  kNotEnoughSpace = 60,
+  kDeviceCorrupted = 61,
+  kPackageExcludedFromUpdate = 62,
 
   // VERY IMPORTANT! When adding new error codes:
   //
diff --git a/common/error_code_utils.cc b/common/error_code_utils.cc
index 55d876f..cda4c7e 100644
--- a/common/error_code_utils.cc
+++ b/common/error_code_utils.cc
@@ -167,6 +167,10 @@
       return "ErrorCode::kUnresolvedHostError";
     case ErrorCode::kUnresolvedHostRecovered:
       return "ErrorCode::kUnresolvedHostRecovered";
+    case ErrorCode::kNotEnoughSpace:
+      return "ErrorCode::kNotEnoughSpace";
+    case ErrorCode::kDeviceCorrupted:
+      return "ErrorCode::kDeviceCorrupted";
     case ErrorCode::kPackageExcludedFromUpdate:
       return "ErrorCode::kPackageExcludedFromUpdate";
       // Don't add a default case to let the compiler warn about newly added
diff --git a/common/fake_boot_control.h b/common/fake_boot_control.h
index 3d65075..98b93e6 100644
--- a/common/fake_boot_control.h
+++ b/common/fake_boot_control.h
@@ -18,12 +18,14 @@
 #define UPDATE_ENGINE_COMMON_FAKE_BOOT_CONTROL_H_
 
 #include <map>
+#include <memory>
 #include <string>
 #include <vector>
 
 #include <base/time/time.h>
 
 #include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/dynamic_partition_control_stub.h"
 
 namespace chromeos_update_engine {
 
@@ -34,6 +36,8 @@
     SetNumSlots(num_slots_);
     // The current slot should be bootable.
     is_bootable_[current_slot_] = true;
+
+    dynamic_partition_control_.reset(new DynamicPartitionControlStub());
   }
 
   // BootControlInterface overrides.
@@ -44,16 +48,27 @@
 
   bool GetPartitionDevice(const std::string& partition_name,
                           BootControlInterface::Slot slot,
-                          std::string* device) const override {
+                          bool not_in_payload,
+                          std::string* device,
+                          bool* is_dynamic) const override {
     if (slot >= num_slots_)
       return false;
     auto part_it = devices_[slot].find(partition_name);
     if (part_it == devices_[slot].end())
       return false;
     *device = part_it->second;
+    if (is_dynamic != nullptr) {
+      *is_dynamic = false;
+    }
     return true;
   }
 
+  bool GetPartitionDevice(const std::string& partition_name,
+                          BootControlInterface::Slot slot,
+                          std::string* device) const override {
+    return GetPartitionDevice(partition_name, slot, false, device, nullptr);
+  }
+
   bool IsSlotBootable(BootControlInterface::Slot slot) const override {
     return slot < num_slots_ && is_bootable_[slot];
   }
@@ -70,22 +85,20 @@
   bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override {
     // We run the callback directly from here to avoid having to setup a message
     // loop in the test environment.
+    is_marked_successful_[GetCurrentSlot()] = true;
     callback.Run(true);
     return true;
   }
 
-  bool InitPartitionMetadata(Slot slot,
-                             const PartitionMetadata& partition_metadata,
-                             bool update_metadata) override {
-    return true;
+  bool IsSlotMarkedSuccessful(Slot slot) const override {
+    return slot < num_slots_ && is_marked_successful_[slot];
   }
 
-  void Cleanup() override {}
-
   // Setters
   void SetNumSlots(unsigned int num_slots) {
     num_slots_ = num_slots;
     is_bootable_.resize(num_slots_, false);
+    is_marked_successful_.resize(num_slots_, false);
     devices_.resize(num_slots_);
   }
 
@@ -103,13 +116,20 @@
     is_bootable_[slot] = bootable;
   }
 
+  DynamicPartitionControlInterface* GetDynamicPartitionControl() override {
+    return dynamic_partition_control_.get();
+  }
+
  private:
   BootControlInterface::Slot num_slots_{2};
   BootControlInterface::Slot current_slot_{0};
 
   std::vector<bool> is_bootable_;
+  std::vector<bool> is_marked_successful_;
   std::vector<std::map<std::string, std::string>> devices_;
 
+  std::unique_ptr<DynamicPartitionControlInterface> dynamic_partition_control_;
+
   DISALLOW_COPY_AND_ASSIGN(FakeBootControl);
 };
 
diff --git a/common/fake_hardware.h b/common/fake_hardware.h
index 6604534..82382ff 100644
--- a/common/fake_hardware.h
+++ b/common/fake_hardware.h
@@ -19,10 +19,13 @@
 
 #include <map>
 #include <string>
+#include <utility>
 
 #include <base/time/time.h>
 
+#include "update_engine/common/error_code.h"
 #include "update_engine/common/hardware_interface.h"
+#include "update_engine/common/utils.h"
 
 namespace chromeos_update_engine {
 
@@ -132,6 +135,8 @@
 
   int64_t GetBuildTimestamp() const override { return build_timestamp_; }
 
+  bool AllowDowngrade() const override { return false; }
+
   bool GetFirstActiveOmahaPingSent() const override {
     return first_active_omaha_ping_sent_;
   }
@@ -197,12 +202,27 @@
     build_timestamp_ = build_timestamp;
   }
 
+  void SetWarmReset(bool warm_reset) override { warm_reset_ = warm_reset; }
+
   // Getters to verify state.
   int GetMaxKernelKeyRollforward() const { return kernel_max_rollforward_; }
 
   bool GetIsRollbackPowerwashScheduled() const {
     return powerwash_scheduled_ && save_rollback_data_;
   }
+  std::string GetVersionForLogging(
+      const std::string& partition_name) const override {
+    return partition_timestamps_[partition_name];
+  }
+  void SetVersion(const std::string& partition_name, std::string timestamp) {
+    partition_timestamps_[partition_name] = std::move(timestamp);
+  }
+  ErrorCode IsPartitionUpdateValid(
+      const std::string& partition_name,
+      const std::string& new_version) const override {
+    const auto old_version = GetVersionForLogging(partition_name);
+    return utils::IsTimestampNewer(old_version, new_version);
+  }
 
  private:
   bool is_official_build_{true};
@@ -225,6 +245,8 @@
   bool save_rollback_data_{false};
   int64_t build_timestamp_{0};
   bool first_active_omaha_ping_sent_{false};
+  bool warm_reset_{false};
+  mutable std::map<std::string, std::string> partition_timestamps_;
 
   DISALLOW_COPY_AND_ASSIGN(FakeHardware);
 };
diff --git a/common/file_fetcher.cc b/common/file_fetcher.cc
index 3836e54..7134fd6 100644
--- a/common/file_fetcher.cc
+++ b/common/file_fetcher.cc
@@ -43,8 +43,9 @@
 // static
 bool FileFetcher::SupportedUrl(const string& url) {
   // Note that we require the file path to start with a "/".
-  return base::StartsWith(
-      url, "file:///", base::CompareCase::INSENSITIVE_ASCII);
+  return (
+      base::StartsWith(url, "file:///", base::CompareCase::INSENSITIVE_ASCII) ||
+      base::StartsWith(url, "fd://", base::CompareCase::INSENSITIVE_ASCII));
 }
 
 FileFetcher::~FileFetcher() {
@@ -67,12 +68,20 @@
     return;
   }
 
-  string file_path = url.substr(strlen("file://"));
-  stream_ =
-      brillo::FileStream::Open(base::FilePath(file_path),
-                               brillo::Stream::AccessMode::READ,
-                               brillo::FileStream::Disposition::OPEN_EXISTING,
-                               nullptr);
+  string file_path;
+
+  if (base::StartsWith(url, "fd://", base::CompareCase::INSENSITIVE_ASCII)) {
+    int fd = std::stoi(url.substr(strlen("fd://")));
+    file_path = url;
+    stream_ = brillo::FileStream::FromFileDescriptor(fd, false, nullptr);
+  } else {
+    file_path = url.substr(strlen("file://"));
+    stream_ =
+        brillo::FileStream::Open(base::FilePath(file_path),
+                                 brillo::Stream::AccessMode::READ,
+                                 brillo::FileStream::Disposition::OPEN_EXISTING,
+                                 nullptr);
+  }
 
   if (!stream_) {
     LOG(ERROR) << "Couldn't open " << file_path;
@@ -183,5 +192,4 @@
   transfer_in_progress_ = false;
   transfer_paused_ = false;
 }
-
 }  // namespace chromeos_update_engine
diff --git a/common/hardware_interface.h b/common/hardware_interface.h
index da9f10e..b37b007 100644
--- a/common/hardware_interface.h
+++ b/common/hardware_interface.h
@@ -25,6 +25,8 @@
 #include <base/files/file_path.h>
 #include <base/time/time.h>
 
+#include "update_engine/common/error_code.h"
+
 namespace chromeos_update_engine {
 
 // The hardware interface allows access to the crossystem exposed properties,
@@ -126,6 +128,10 @@
   // Returns the timestamp of the current OS build.
   virtual int64_t GetBuildTimestamp() const = 0;
 
+  // Returns true if the current OS build allows installing the payload with an
+  // older timestamp.
+  virtual bool AllowDowngrade() const = 0;
+
   // Returns whether the first active ping was sent to Omaha at some point, and
   // that the value is persisted across recovery (and powerwash) once set with
   // |SetFirstActiveOmahaPingSent()|.
@@ -134,6 +140,30 @@
   // Persist the fact that first active ping was sent to omaha and returns false
   // if failed to persist it.
   virtual bool SetFirstActiveOmahaPingSent() = 0;
+
+  // If |warm_reset| is true, sets the warm reset to indicate a warm reset is
+  // needed on the next reboot. Otherwise, clears the flag.
+  virtual void SetWarmReset(bool warm_reset) = 0;
+
+  // Return the version/timestamp for partition `partition_name`.
+  // Don't make any assumption about the formatting of returned string.
+  // Only used for logging/debugging purposes.
+  virtual std::string GetVersionForLogging(
+      const std::string& partition_name) const = 0;
+
+  // Return true if and only if `new_version` is "newer" than the
+  // version number of partition `partition_name`. The notion of
+  // "newer" is defined by this function. Caller should not make
+  // any assumption about the underlying logic.
+  // Return:
+  // - kSuccess if update is valid.
+  // - kPayloadTimestampError if downgrade is detected
+  // - kDownloadManifestParseError if |new_version| has an incorrect format
+  // - Other error values if the source of error is known, or kError for
+  //   a generic error on the device.
+  virtual ErrorCode IsPartitionUpdateValid(
+      const std::string& partition_name,
+      const std::string& new_version) const = 0;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/common/http_fetcher_unittest.cc b/common/http_fetcher_unittest.cc
index 3ecb996..9338087 100644
--- a/common/http_fetcher_unittest.cc
+++ b/common/http_fetcher_unittest.cc
@@ -37,7 +37,11 @@
 #include <brillo/message_loops/base_message_loop.h>
 #include <brillo/message_loops/message_loop.h>
 #include <brillo/message_loops/message_loop_utils.h>
+#ifdef __CHROMEOS__
 #include <brillo/process/process.h>
+#else
+#include <brillo/process.h>
+#endif  // __CHROMEOS__
 #include <brillo/streams/file_stream.h>
 #include <brillo/streams/stream.h>
 #include <gtest/gtest.h>
diff --git a/common/mock_action_processor.h b/common/mock_action_processor.h
index 4c62109..9785776 100644
--- a/common/mock_action_processor.h
+++ b/common/mock_action_processor.h
@@ -32,6 +32,8 @@
   MOCK_METHOD0(StartProcessing, void());
   MOCK_METHOD1(EnqueueAction, void(AbstractAction* action));
 
+  MOCK_METHOD2(ActionComplete, void(AbstractAction*, ErrorCode));
+
   // This is a legacy workaround described in:
   // https://github.com/google/googletest/blob/master/googlemock/docs/CookBook.md#legacy-workarounds-for-move-only-types-legacymoveonly
   void EnqueueAction(std::unique_ptr<AbstractAction> action) override {
diff --git a/common/mock_http_fetcher.cc b/common/mock_http_fetcher.cc
index 10e3b9e..1b3cd7d 100644
--- a/common/mock_http_fetcher.cc
+++ b/common/mock_http_fetcher.cc
@@ -22,6 +22,7 @@
 #include <base/logging.h>
 #include <base/strings/string_util.h>
 #include <base/time/time.h>
+#include <brillo/message_loops/message_loop.h>
 #include <gtest/gtest.h>
 
 // This is a mock implementation of HttpFetcher which is useful for testing.
@@ -43,12 +44,12 @@
     SignalTransferComplete();
     return;
   }
-  if (sent_size_ < data_.size())
+  if (sent_offset_ < data_.size())
     SendData(true);
 }
 
 void MockHttpFetcher::SendData(bool skip_delivery) {
-  if (fail_transfer_ || sent_size_ == data_.size()) {
+  if (fail_transfer_ || sent_offset_ == data_.size()) {
     SignalTransferComplete();
     return;
   }
@@ -60,19 +61,22 @@
 
   // Setup timeout callback even if the transfer is about to be completed in
   // order to get a call to |TransferComplete|.
-  if (timeout_id_ == MessageLoop::kTaskIdNull) {
+  if (timeout_id_ == MessageLoop::kTaskIdNull && delay_) {
+    CHECK(MessageLoop::current());
     timeout_id_ = MessageLoop::current()->PostDelayedTask(
         FROM_HERE,
         base::Bind(&MockHttpFetcher::TimeoutCallback, base::Unretained(this)),
         base::TimeDelta::FromMilliseconds(10));
   }
 
-  if (!skip_delivery) {
+  if (!skip_delivery || !delay_) {
     const size_t chunk_size =
-        min(kMockHttpFetcherChunkSize, data_.size() - sent_size_);
-    sent_size_ += chunk_size;
+        min(kMockHttpFetcherChunkSize, data_.size() - sent_offset_);
+    sent_offset_ += chunk_size;
+    bytes_sent_ += chunk_size;
     CHECK(delegate_);
-    delegate_->ReceivedBytes(this, &data_[sent_size_ - chunk_size], chunk_size);
+    delegate_->ReceivedBytes(
+        this, &data_[sent_offset_ - chunk_size], chunk_size);
   }
   // We may get terminated and deleted right after |ReceivedBytes| call, so we
   // should not access any class member variable after this call.
@@ -81,7 +85,7 @@
 void MockHttpFetcher::TimeoutCallback() {
   CHECK(!paused_);
   timeout_id_ = MessageLoop::kTaskIdNull;
-  CHECK_LE(sent_size_, data_.size());
+  CHECK_LE(sent_offset_, data_.size());
   // Same here, we should not access any member variable after this call.
   SendData(false);
 }
@@ -90,10 +94,15 @@
 // The transfer cannot be resumed.
 void MockHttpFetcher::TerminateTransfer() {
   LOG(INFO) << "Terminating transfer.";
-  // Kill any timeout, it is ok to call with kTaskIdNull.
-  MessageLoop::current()->CancelTask(timeout_id_);
-  timeout_id_ = MessageLoop::kTaskIdNull;
-  delegate_->TransferTerminated(this);
+  // During testing, MessageLoop may or may not be available.
+  // So don't call CancelTask() unless necessary.
+  if (timeout_id_ != MessageLoop::kTaskIdNull) {
+    MessageLoop::current()->CancelTask(timeout_id_);
+    timeout_id_ = MessageLoop::kTaskIdNull;
+  }
+  if (delegate_) {
+    delegate_->TransferTerminated(this);
+  }
 }
 
 void MockHttpFetcher::SetHeader(const std::string& header_name,
diff --git a/common/mock_http_fetcher.h b/common/mock_http_fetcher.h
index 0f04319..ea5b83d 100644
--- a/common/mock_http_fetcher.h
+++ b/common/mock_http_fetcher.h
@@ -46,7 +46,7 @@
                   size_t size,
                   ProxyResolver* proxy_resolver)
       : HttpFetcher(proxy_resolver),
-        sent_size_(0),
+        sent_offset_(0),
         timeout_id_(brillo::MessageLoop::kTaskIdNull),
         paused_(false),
         fail_transfer_(false),
@@ -64,7 +64,7 @@
 
   // Ignores this.
   void SetOffset(off_t offset) override {
-    sent_size_ = offset;
+    sent_offset_ = offset;
     if (delegate_)
       delegate_->SeekToOffset(offset);
   }
@@ -76,8 +76,8 @@
   void set_connect_timeout(int connect_timeout_seconds) override {}
   void set_max_retry_count(int max_retry_count) override {}
 
-  // Dummy: no bytes were downloaded.
-  size_t GetBytesDownloaded() override { return sent_size_; }
+  // No bytes were downloaded in the mock class.
+  size_t GetBytesDownloaded() override { return bytes_sent_; }
 
   // Begins the transfer if it hasn't already begun.
   void BeginTransfer(const std::string& url) override;
@@ -113,6 +113,8 @@
 
   const brillo::Blob& post_data() const { return post_data_; }
 
+  void set_delay(bool delay) { delay_ = delay; }
+
  private:
   // Sends data to the delegate and sets up a timeout callback if needed. There
   // must be a delegate. If |skip_delivery| is true, no bytes will be delivered,
@@ -129,8 +131,11 @@
   // A full copy of the data we'll return to the delegate
   brillo::Blob data_;
 
-  // The number of bytes we've sent so far
-  size_t sent_size_;
+  // The current offset, marks the first byte that will be sent next
+  size_t sent_offset_{0};
+
+  // Total number of bytes transferred
+  size_t bytes_sent_{0};
 
   // The extra headers set.
   std::map<std::string, std::string> extra_headers_;
@@ -140,13 +145,16 @@
   brillo::MessageLoop::TaskId timeout_id_;
 
   // True iff the fetcher is paused.
-  bool paused_;
+  bool paused_{false};
 
   // Set to true if the transfer should fail.
-  bool fail_transfer_;
+  bool fail_transfer_{false};
 
   // Set to true if BeginTransfer should EXPECT fail.
-  bool never_use_;
+  bool never_use_{false};
+
+  // Whether it should wait for 10ms before sending data to delegates
+  bool delay_{true};
 
   DISALLOW_COPY_AND_ASSIGN(MockHttpFetcher);
 };
diff --git a/common/platform_constants.h b/common/platform_constants.h
index 6eaa940..243af69 100644
--- a/common/platform_constants.h
+++ b/common/platform_constants.h
@@ -38,6 +38,10 @@
 // whole payload.
 extern const char kUpdatePayloadPublicKeyPath[];
 
+// Path to the location of the zip archive file that contains PEM encoded X509
+// certificates. e.g. 'system/etc/security/otacerts.zip'.
+extern const char kUpdateCertificatesPath[];
+
 // Path to the directory containing all the SSL certificates accepted by
 // update_engine when sending requests to Omaha and the download server (if
 // HTTPS is used for that as well).
diff --git a/common/platform_constants_android.cc b/common/platform_constants_android.cc
index 9d8d30e..f468c3b 100644
--- a/common/platform_constants_android.cc
+++ b/common/platform_constants_android.cc
@@ -25,8 +25,8 @@
     "https://clients2.google.com/service/update2/brillo";
 const char kOmahaUpdaterID[] = "Brillo";
 const char kOmahaPlatformName[] = "Brillo";
-const char kUpdatePayloadPublicKeyPath[] =
-    "/etc/update_engine/update-payload-key.pub.pem";
+const char kUpdatePayloadPublicKeyPath[] = "";
+const char kUpdateCertificatesPath[] = "/system/etc/security/otacerts.zip";
 const char kCACertificatesPath[] = "/system/etc/security/cacerts_google";
 // No deadline file API support on Android.
 const char kOmahaResponseDeadlineFile[] = "";
diff --git a/common/platform_constants_chromeos.cc b/common/platform_constants_chromeos.cc
index f1ac490..fe94a45 100644
--- a/common/platform_constants_chromeos.cc
+++ b/common/platform_constants_chromeos.cc
@@ -27,6 +27,7 @@
 const char kOmahaPlatformName[] = "Chrome OS";
 const char kUpdatePayloadPublicKeyPath[] =
     "/usr/share/update_engine/update-payload-key.pub.pem";
+const char kUpdateCertificatesPath[] = "";
 const char kCACertificatesPath[] = "/usr/share/chromeos-ca-certificates";
 const char kOmahaResponseDeadlineFile[] = "/tmp/update-check-response-deadline";
 // This directory is wiped during powerwash.
diff --git a/common/subprocess.cc b/common/subprocess.cc
index ff37472..023017b 100644
--- a/common/subprocess.cc
+++ b/common/subprocess.cc
@@ -32,11 +32,11 @@
 #include <base/stl_util.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
-#include <brillo/process/process.h>
 #include <brillo/secure_blob.h>
 
 #include "update_engine/common/utils.h"
 
+using brillo::MessageLoop;
 using std::string;
 using std::unique_ptr;
 using std::vector;
diff --git a/common/subprocess.h b/common/subprocess.h
index e1a7ce3..179a5c5 100644
--- a/common/subprocess.h
+++ b/common/subprocess.h
@@ -30,8 +30,13 @@
 #include <base/macros.h>
 #include <brillo/asynchronous_signal_handler_interface.h>
 #include <brillo/message_loops/message_loop.h>
+#ifdef __CHROMEOS__
 #include <brillo/process/process.h>
 #include <brillo/process/process_reaper.h>
+#else
+#include <brillo/process.h>
+#include <brillo/process_reaper.h>
+#endif  // __CHROMEOS__
 #include <gtest/gtest_prod.h>  // for FRIEND_TEST
 
 // The Subprocess class is a singleton. It's used to spawn off a subprocess
@@ -124,6 +129,7 @@
     // These are used to monitor the stdout of the running process, including
     // the stderr if it was redirected.
     std::unique_ptr<base::FileDescriptorWatcher::Controller> stdout_controller;
+
     int stdout_fd{-1};
     std::string stdout;
   };
diff --git a/common/subprocess_unittest.cc b/common/subprocess_unittest.cc
index 19b24f4..b4d068f 100644
--- a/common/subprocess_unittest.cc
+++ b/common/subprocess_unittest.cc
@@ -279,7 +279,7 @@
   // This test would leak a callback that runs when the child process exits
   // unless we wait for it to run.
   brillo::MessageLoopRunUntil(
-      &loop_, TimeDelta::FromSeconds(120), base::Bind([] {
+      &loop_, TimeDelta::FromSeconds(20), base::Bind([] {
         return Subprocess::Get().subprocess_records_.empty();
       }));
   EXPECT_TRUE(Subprocess::Get().subprocess_records_.empty());
diff --git a/common/test_utils.cc b/common/test_utils.cc
index 50b0962..bd69d03 100644
--- a/common/test_utils.cc
+++ b/common/test_utils.cc
@@ -37,6 +37,10 @@
 #include <base/files/file_util.h>
 #include <base/logging.h>
 
+#ifdef __ANDROID__
+#include <libdm/loop_control.h>
+#endif
+
 #include "update_engine/common/error_code_utils.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/file_writer.h"
@@ -44,16 +48,7 @@
 using std::set;
 using std::string;
 using std::vector;
-
-namespace {
-
-#ifdef __ANDROID__
-#define kLoopDevicePrefix "/dev/block/loop"
-#else
-#define kLoopDevicePrefix "/dev/loop"
-#endif  // __ANDROID__
-
-}  // namespace
+using namespace std::chrono_literals;
 
 namespace chromeos_update_engine {
 
@@ -112,17 +107,43 @@
   return utils::WriteFile(path.c_str(), data.data(), data.size());
 }
 
-bool BindToUnusedLoopDevice(const string& filename,
-                            bool writable,
-                            string* out_lo_dev_name) {
-  CHECK(out_lo_dev_name);
+bool SetLoopDeviceStatus(int loop_device_fd,
+                         const std::string& filename,
+                         int loop_number,
+                         bool writable) {
+  struct loop_info64 device_info {};
+  device_info.lo_offset = 0;
+  device_info.lo_sizelimit = 0;  // 0 means whole file.
+  device_info.lo_flags = (writable ? 0 : LO_FLAGS_READ_ONLY);
+  device_info.lo_number = loop_number;
+  strncpy(reinterpret_cast<char*>(device_info.lo_file_name),
+          base::FilePath(filename).BaseName().value().c_str(),
+          LO_NAME_SIZE - 1);
+  device_info.lo_file_name[LO_NAME_SIZE - 1] = '\0';
+  TEST_AND_RETURN_FALSE_ERRNO(
+      ioctl(loop_device_fd, LOOP_SET_STATUS64, &device_info) == 0);
+  if (writable) {
+    // Make sure loop device isn't read only.
+    int ro = 0;
+    if (ioctl(loop_device_fd, BLKROSET, &ro) != 0) {
+      PLOG(WARNING) << "Failed to mark loop device writable.";
+    }
+  }
+
+  return true;
+}
+
+bool BindToUnusedLoopDeviceLegacy(int data_fd,
+                                  const string& filename,
+                                  bool writable,
+                                  string* out_lo_dev_name) {
   // Get the next available loop-device.
   int control_fd =
       HANDLE_EINTR(open("/dev/loop-control", O_RDWR | O_LARGEFILE));
   TEST_AND_RETURN_FALSE_ERRNO(control_fd >= 0);
   int loop_number = ioctl(control_fd, LOOP_CTL_GET_FREE);
   IGNORE_EINTR(close(control_fd));
-  *out_lo_dev_name = kLoopDevicePrefix + std::to_string(loop_number);
+  *out_lo_dev_name = "/dev/loop" + std::to_string(loop_number);
 
   // Double check that the loop exists and is free.
   int loop_device_fd =
@@ -146,32 +167,35 @@
     return false;
   }
 
-  // Open our data file and assign it to the loop device.
+  // Assign the data fd to the loop device.
+  TEST_AND_RETURN_FALSE_ERRNO(ioctl(loop_device_fd, LOOP_SET_FD, data_fd) == 0);
+  return SetLoopDeviceStatus(loop_device_fd, filename, loop_number, writable);
+}
+
+bool BindToUnusedLoopDevice(const string& filename,
+                            bool writable,
+                            string* out_lo_dev_name) {
+  CHECK(out_lo_dev_name);
   int data_fd = open(filename.c_str(),
                      (writable ? O_RDWR : O_RDONLY) | O_LARGEFILE | O_CLOEXEC);
   TEST_AND_RETURN_FALSE_ERRNO(data_fd >= 0);
   ScopedFdCloser data_fd_closer(&data_fd);
-  TEST_AND_RETURN_FALSE_ERRNO(ioctl(loop_device_fd, LOOP_SET_FD, data_fd) == 0);
 
-  memset(&device_info, 0, sizeof(device_info));
-  device_info.lo_offset = 0;
-  device_info.lo_sizelimit = 0;  // 0 means whole file.
-  device_info.lo_flags = (writable ? 0 : LO_FLAGS_READ_ONLY);
-  device_info.lo_number = loop_number;
-  strncpy(reinterpret_cast<char*>(device_info.lo_file_name),
-          base::FilePath(filename).BaseName().value().c_str(),
-          LO_NAME_SIZE - 1);
-  device_info.lo_file_name[LO_NAME_SIZE - 1] = '\0';
-  TEST_AND_RETURN_FALSE_ERRNO(
-      ioctl(loop_device_fd, LOOP_SET_STATUS64, &device_info) == 0);
-  if (writable) {
-    // Make sure loop device isn't read only.
-    int ro = 0;
-    if (ioctl(loop_device_fd, BLKROSET, &ro) != 0) {
-      PLOG(WARNING) << "Failed to mark loop device writable.";
-    }
-  }
-  return true;
+#ifdef __ANDROID__
+  // Use libdm to bind a free loop device. The library internally handles the
+  // race condition.
+  android::dm::LoopControl loop_control;
+  TEST_AND_RETURN_FALSE(loop_control.Attach(data_fd, 5s, out_lo_dev_name));
+  int loop_device_fd = open(out_lo_dev_name->c_str(), O_RDWR | O_CLOEXEC);
+  ScopedFdCloser loop_fd_closer(&loop_device_fd);
+  int loop_number;
+  TEST_AND_RETURN_FALSE(
+      sscanf(out_lo_dev_name->c_str(), "/dev/block/loop%d", &loop_number) == 1);
+  return SetLoopDeviceStatus(loop_device_fd, filename, loop_number, writable);
+#else
+  return BindToUnusedLoopDeviceLegacy(
+      data_fd, filename, writable, out_lo_dev_name);
+#endif
 }
 
 bool UnbindLoopDevice(const string& lo_dev_name) {
diff --git a/common/utils.cc b/common/utils.cc
index 50b45fa..9e1e6c5 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -30,6 +30,7 @@
 #include <sys/resource.h>
 #include <sys/stat.h>
 #include <sys/types.h>
+#include <time.h>
 #include <unistd.h>
 
 #include <algorithm>
@@ -819,7 +820,7 @@
   return base_code;
 }
 
-string StringVectorToString(const vector<string> &vec_str) {
+string StringVectorToString(const vector<string>& vec_str) {
   string str = "[";
   for (vector<string>::const_iterator i = vec_str.begin(); i != vec_str.end();
        ++i) {
@@ -848,7 +849,7 @@
                             encoded_hash.c_str());
 }
 
-bool ConvertToOmahaInstallDate(Time time, int *out_num_days) {
+bool ConvertToOmahaInstallDate(Time time, int* out_num_days) {
   time_t unix_time = time.ToTimeT();
   // Output of: date +"%s" --date="Jan 1, 2007 0:00 PST".
   const time_t kOmahaEpoch = 1167638400;
@@ -978,10 +979,58 @@
   }
 }
 
+string GetFilePath(int fd) {
+  base::FilePath proc("/proc/self/fd/" + std::to_string(fd));
+  base::FilePath file_name;
+
+  if (!base::ReadSymbolicLink(proc, &file_name)) {
+    return "not found";
+  }
+  return file_name.value();
+}
+
+string GetTimeAsString(time_t utime) {
+  struct tm tm;
+  CHECK_EQ(localtime_r(&utime, &tm), &tm);
+  char str[16];
+  CHECK_EQ(strftime(str, sizeof(str), "%Y%m%d-%H%M%S", &tm), 15u);
+  return str;
+}
+
 string GetExclusionName(const string& str_to_convert) {
   return base::NumberToString(base::StringPieceHash()(str_to_convert));
 }
 
+static bool ParseTimestamp(const std::string& str, int64_t* out) {
+  if (!base::StringToInt64(str, out)) {
+    LOG(WARNING) << "Invalid timestamp: " << str;
+    return false;
+  }
+  return true;
+}
+
+ErrorCode IsTimestampNewer(const std::string& old_version,
+                           const std::string& new_version) {
+  if (old_version.empty() || new_version.empty()) {
+    LOG(WARNING)
+        << "One of old/new timestamp is empty, permit update anyway. Old: "
+        << old_version << " New: " << new_version;
+    return ErrorCode::kSuccess;
+  }
+  int64_t old_ver = 0;
+  if (!ParseTimestamp(old_version, &old_ver)) {
+    return ErrorCode::kError;
+  }
+  int64_t new_ver = 0;
+  if (!ParseTimestamp(new_version, &new_ver)) {
+    return ErrorCode::kDownloadManifestParseError;
+  }
+  if (old_ver > new_ver) {
+    return ErrorCode::kPayloadTimestampError;
+  }
+  return ErrorCode::kSuccess;
+}
+
 }  // namespace utils
 
 }  // namespace chromeos_update_engine
diff --git a/common/utils.h b/common/utils.h
index b6880ed..bcaed31 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -18,6 +18,7 @@
 #define UPDATE_ENGINE_COMMON_UTILS_H_
 
 #include <errno.h>
+#include <time.h>
 #include <unistd.h>
 
 #include <algorithm>
@@ -295,6 +296,9 @@
 // shell command. Returns true on success.
 bool GetVpdValue(std::string key, std::string* result);
 
+// This function gets the file path of the file pointed to by FileDiscriptor.
+std::string GetFilePath(int fd);
+
 // Divide |x| by |y| and round up to the nearest integer.
 constexpr uint64_t DivRoundUp(uint64_t x, uint64_t y) {
   return (x + y - 1) / y;
@@ -317,10 +321,23 @@
                              uint16_t* high_version,
                              uint16_t* low_version);
 
+// Return a string representation of |utime| for log file names.
+std::string GetTimeAsString(time_t utime);
 // Returns the string format of the hashed |str_to_convert| that can be used
 // with |Excluder| as the exclusion name.
 std::string GetExclusionName(const std::string& str_to_convert);
 
+// Parse `old_version` and `new_version` as integer timestamps and
+// Return kSuccess if `new_version` is larger/newer.
+// Return kSuccess if either one is empty.
+// Return kError if |old_version| is not empty and not an integer.
+// Return kDownloadManifestParseError if |new_version| is not empty and not an
+// integer.
+// Return kPayloadTimestampError if both are integers but |new_version| <
+// |old_version|.
+ErrorCode IsTimestampNewer(const std::string& old_version,
+                           const std::string& new_version);
+
 }  // namespace utils
 
 // Utility class to close a file descriptor
diff --git a/common/utils_unittest.cc b/common/utils_unittest.cc
index f9eb596..d73b3da 100644
--- a/common/utils_unittest.cc
+++ b/common/utils_unittest.cc
@@ -472,4 +472,23 @@
   ExpectInvalidParseRollbackKeyVersion("1.99999");
 }
 
+TEST(UtilsTest, GetFilePathTest) {
+  test_utils::ScopedTempFile file;
+  int fd = HANDLE_EINTR(open(file.path().c_str(), O_RDONLY));
+  EXPECT_GE(fd, 0);
+  EXPECT_EQ(file.path(), utils::GetFilePath(fd));
+  EXPECT_EQ("not found", utils::GetFilePath(-1));
+  IGNORE_EINTR(close(fd));
+}
+
+TEST(UtilsTest, ValidatePerPartitionTimestamp) {
+  ASSERT_EQ(ErrorCode::kPayloadTimestampError,
+            utils::IsTimestampNewer("10", "5"));
+  ASSERT_EQ(ErrorCode::kSuccess, utils::IsTimestampNewer("10", "11"));
+  ASSERT_EQ(ErrorCode::kDownloadManifestParseError,
+            utils::IsTimestampNewer("10", "lol"));
+  ASSERT_EQ(ErrorCode::kError, utils::IsTimestampNewer("lol", "ZZZ"));
+  ASSERT_EQ(ErrorCode::kSuccess, utils::IsTimestampNewer("10", ""));
+}
+
 }  // namespace chromeos_update_engine
diff --git a/daemon_android.cc b/daemon_android.cc
index 1aa921f..313d7dd 100644
--- a/daemon_android.cc
+++ b/daemon_android.cc
@@ -47,16 +47,26 @@
   LOG_IF(ERROR, !daemon_state_android->Initialize())
       << "Failed to initialize system state.";
 
+  auto binder_wrapper = android::BinderWrapper::Get();
+
   // Create the Binder Service.
   binder_service_ = new BinderUpdateEngineAndroidService{
       daemon_state_android->service_delegate()};
-  auto binder_wrapper = android::BinderWrapper::Get();
   if (!binder_wrapper->RegisterService(binder_service_->ServiceName(),
                                        binder_service_)) {
     LOG(ERROR) << "Failed to register binder service.";
   }
-
   daemon_state_->AddObserver(binder_service_.get());
+
+  // Create the stable binder service.
+  stable_binder_service_ = new BinderUpdateEngineAndroidStableService{
+      daemon_state_android->service_delegate()};
+  if (!binder_wrapper->RegisterService(stable_binder_service_->ServiceName(),
+                                       stable_binder_service_)) {
+    LOG(ERROR) << "Failed to register stable binder service.";
+  }
+  daemon_state_->AddObserver(stable_binder_service_.get());
+
   daemon_state_->StartUpdater();
   return EX_OK;
 }
diff --git a/daemon_android.h b/daemon_android.h
index baead37..f0c028e 100644
--- a/daemon_android.h
+++ b/daemon_android.h
@@ -22,6 +22,7 @@
 #include <brillo/binder_watcher.h>
 
 #include "update_engine/binder_service_android.h"
+#include "update_engine/binder_service_stable_android.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/daemon_base.h"
 #include "update_engine/daemon_state_interface.h"
@@ -43,6 +44,7 @@
 
   brillo::BinderWatcher binder_watcher_;
   android::sp<BinderUpdateEngineAndroidService> binder_service_;
+  android::sp<BinderUpdateEngineAndroidStableService> stable_binder_service_;
 
   // The daemon state with all the required daemon classes for the configured
   // platform.
diff --git a/daemon_state_android.cc b/daemon_state_android.cc
index c9c09b8..3376e64 100644
--- a/daemon_state_android.cc
+++ b/daemon_state_android.cc
@@ -45,17 +45,17 @@
 
   // Initialize prefs.
   base::FilePath non_volatile_path;
-  // TODO(deymo): Fall back to in-memory prefs if there's no physical directory
-  // available.
   if (!hardware_->GetNonVolatileDirectory(&non_volatile_path)) {
-    LOG(ERROR) << "Failed to get a non-volatile directory.";
-    return false;
-  }
-  Prefs* prefs = new Prefs();
-  prefs_.reset(prefs);
-  if (!prefs->Init(non_volatile_path.Append(kPrefsSubDirectory))) {
-    LOG(ERROR) << "Failed to initialize preferences.";
-    return false;
+    prefs_.reset(new MemoryPrefs());
+    LOG(WARNING)
+        << "Could not get a non-volatile directory, fall back to memory prefs";
+  } else {
+    Prefs* prefs = new Prefs();
+    prefs_.reset(prefs);
+    if (!prefs->Init(non_volatile_path.Append(kPrefsSubDirectory))) {
+      LOG(ERROR) << "Failed to initialize preferences.";
+      return false;
+    }
   }
 
   // The CertificateChecker singleton is used by the update attempter.
diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc
index bd34ea9..ccb99ba 100644
--- a/dynamic_partition_control_android.cc
+++ b/dynamic_partition_control_android.cc
@@ -16,60 +16,162 @@
 
 #include "update_engine/dynamic_partition_control_android.h"
 
+#include <chrono>  // NOLINT(build/c++11) - using libsnapshot / liblp API
+#include <map>
 #include <memory>
 #include <set>
 #include <string>
+#include <string_view>
+#include <utility>
+#include <vector>
 
 #include <android-base/properties.h>
 #include <android-base/strings.h>
 #include <base/files/file_util.h>
 #include <base/logging.h>
+#include <base/strings/string_util.h>
 #include <bootloader_message/bootloader_message.h>
+#include <fs_mgr.h>
 #include <fs_mgr_dm_linear.h>
+#include <fs_mgr_overlayfs.h>
+#include <libavb/libavb.h>
+#include <libdm/dm.h>
+#include <libsnapshot/snapshot.h>
+#include <libsnapshot/snapshot_stub.h>
 
+#include "update_engine/cleanup_previous_update_action.h"
 #include "update_engine/common/boot_control_interface.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/dynamic_partition_utils.h"
+#include "update_engine/payload_consumer/delta_performer.h"
 
 using android::base::GetBoolProperty;
+using android::base::GetProperty;
 using android::base::Join;
 using android::dm::DeviceMapper;
 using android::dm::DmDeviceState;
 using android::fs_mgr::CreateLogicalPartition;
+using android::fs_mgr::CreateLogicalPartitionParams;
 using android::fs_mgr::DestroyLogicalPartition;
+using android::fs_mgr::Fstab;
 using android::fs_mgr::MetadataBuilder;
+using android::fs_mgr::Partition;
 using android::fs_mgr::PartitionOpener;
+using android::fs_mgr::SlotSuffixForSlotNumber;
+using android::snapshot::OptimizeSourceCopyOperation;
+using android::snapshot::Return;
+using android::snapshot::SnapshotManager;
+using android::snapshot::SnapshotManagerStub;
+using android::snapshot::UpdateState;
 
 namespace chromeos_update_engine {
 
 constexpr char kUseDynamicPartitions[] = "ro.boot.dynamic_partitions";
 constexpr char kRetrfoitDynamicPartitions[] =
     "ro.boot.dynamic_partitions_retrofit";
-constexpr uint64_t kMapTimeoutMillis = 1000;
+constexpr char kVirtualAbEnabled[] = "ro.virtual_ab.enabled";
+constexpr char kVirtualAbRetrofit[] = "ro.virtual_ab.retrofit";
+constexpr char kPostinstallFstabPrefix[] = "ro.postinstall.fstab.prefix";
+// Map timeout for dynamic partitions.
+constexpr std::chrono::milliseconds kMapTimeout{1000};
+// Map timeout for dynamic partitions with snapshots. Since several devices
+// needs to be mapped, this timeout is longer than |kMapTimeout|.
+constexpr std::chrono::milliseconds kMapSnapshotTimeout{5000};
+
+#ifdef __ANDROID_RECOVERY__
+constexpr bool kIsRecovery = true;
+#else
+constexpr bool kIsRecovery = false;
+#endif
 
 DynamicPartitionControlAndroid::~DynamicPartitionControlAndroid() {
-  CleanupInternal(false /* wait */);
+  Cleanup();
 }
 
-bool DynamicPartitionControlAndroid::IsDynamicPartitionsEnabled() {
-  return GetBoolProperty(kUseDynamicPartitions, false);
+static FeatureFlag GetFeatureFlag(const char* enable_prop,
+                                  const char* retrofit_prop) {
+  bool retrofit = GetBoolProperty(retrofit_prop, false);
+  bool enabled = GetBoolProperty(enable_prop, false);
+  if (retrofit && !enabled) {
+    LOG(ERROR) << retrofit_prop << " is true but " << enable_prop
+               << " is not. These sysprops are inconsistent. Assume that "
+               << enable_prop << " is true from now on.";
+  }
+  if (retrofit) {
+    return FeatureFlag(FeatureFlag::Value::RETROFIT);
+  }
+  if (enabled) {
+    return FeatureFlag(FeatureFlag::Value::LAUNCH);
+  }
+  return FeatureFlag(FeatureFlag::Value::NONE);
 }
 
-bool DynamicPartitionControlAndroid::IsDynamicPartitionsRetrofit() {
-  return GetBoolProperty(kRetrfoitDynamicPartitions, false);
+DynamicPartitionControlAndroid::DynamicPartitionControlAndroid()
+    : dynamic_partitions_(
+          GetFeatureFlag(kUseDynamicPartitions, kRetrfoitDynamicPartitions)),
+      virtual_ab_(GetFeatureFlag(kVirtualAbEnabled, kVirtualAbRetrofit)) {
+  if (GetVirtualAbFeatureFlag().IsEnabled()) {
+    snapshot_ = SnapshotManager::New();
+  } else {
+    snapshot_ = SnapshotManagerStub::New();
+  }
+  CHECK(snapshot_ != nullptr) << "Cannot initialize SnapshotManager.";
 }
 
-bool DynamicPartitionControlAndroid::MapPartitionOnDeviceMapper(
+FeatureFlag DynamicPartitionControlAndroid::GetDynamicPartitionsFeatureFlag() {
+  return dynamic_partitions_;
+}
+
+FeatureFlag DynamicPartitionControlAndroid::GetVirtualAbFeatureFlag() {
+  return virtual_ab_;
+}
+
+bool DynamicPartitionControlAndroid::OptimizeOperation(
+    const std::string& partition_name,
+    const InstallOperation& operation,
+    InstallOperation* optimized) {
+  switch (operation.type()) {
+    case InstallOperation::SOURCE_COPY:
+      return target_supports_snapshot_ &&
+             GetVirtualAbFeatureFlag().IsEnabled() &&
+             mapped_devices_.count(partition_name +
+                                   SlotSuffixForSlotNumber(target_slot_)) > 0 &&
+             OptimizeSourceCopyOperation(operation, optimized);
+      break;
+    default:
+      break;
+  }
+  return false;
+}
+
+bool DynamicPartitionControlAndroid::MapPartitionInternal(
     const std::string& super_device,
     const std::string& target_partition_name,
     uint32_t slot,
     bool force_writable,
     std::string* path) {
-  if (!CreateLogicalPartition(super_device.c_str(),
-                              slot,
-                              target_partition_name,
-                              force_writable,
-                              std::chrono::milliseconds(kMapTimeoutMillis),
-                              path)) {
+  CreateLogicalPartitionParams params = {
+      .block_device = super_device,
+      .metadata_slot = slot,
+      .partition_name = target_partition_name,
+      .force_writable = force_writable,
+  };
+  bool success = false;
+  if (GetVirtualAbFeatureFlag().IsEnabled() && target_supports_snapshot_ &&
+      force_writable && ExpectMetadataMounted()) {
+    // Only target partitions are mapped with force_writable. On Virtual
+    // A/B devices, target partitions may overlap with source partitions, so
+    // they must be mapped with snapshot.
+    // One exception is when /metadata is not mounted. Fallback to
+    // CreateLogicalPartition as snapshots are not created in the first place.
+    params.timeout_ms = kMapSnapshotTimeout;
+    success = snapshot_->MapUpdateSnapshot(params, path);
+  } else {
+    params.timeout_ms = kMapTimeout;
+    success = CreateLogicalPartition(params, path);
+  }
+
+  if (!success) {
     LOG(ERROR) << "Cannot map " << target_partition_name << " in "
                << super_device << " on device mapper.";
     return false;
@@ -81,13 +183,71 @@
   return true;
 }
 
+bool DynamicPartitionControlAndroid::MapPartitionOnDeviceMapper(
+    const std::string& super_device,
+    const std::string& target_partition_name,
+    uint32_t slot,
+    bool force_writable,
+    std::string* path) {
+  DmDeviceState state = GetState(target_partition_name);
+  if (state == DmDeviceState::ACTIVE) {
+    if (mapped_devices_.find(target_partition_name) != mapped_devices_.end()) {
+      if (GetDmDevicePathByName(target_partition_name, path)) {
+        LOG(INFO) << target_partition_name
+                  << " is mapped on device mapper: " << *path;
+        return true;
+      }
+      LOG(ERROR) << target_partition_name << " is mapped but path is unknown.";
+      return false;
+    }
+    // If target_partition_name is not in mapped_devices_ but state is ACTIVE,
+    // the device might be mapped incorrectly before. Attempt to unmap it.
+    // Note that for source partitions, if GetState() == ACTIVE, callers (e.g.
+    // BootControlAndroid) should not call MapPartitionOnDeviceMapper, but
+    // should directly call GetDmDevicePathByName.
+    if (!UnmapPartitionOnDeviceMapper(target_partition_name)) {
+      LOG(ERROR) << target_partition_name
+                 << " is mapped before the update, and it cannot be unmapped.";
+      return false;
+    }
+    state = GetState(target_partition_name);
+    if (state != DmDeviceState::INVALID) {
+      LOG(ERROR) << target_partition_name << " is unmapped but state is "
+                 << static_cast<std::underlying_type_t<DmDeviceState>>(state);
+      return false;
+    }
+  }
+  if (state == DmDeviceState::INVALID) {
+    return MapPartitionInternal(
+        super_device, target_partition_name, slot, force_writable, path);
+  }
+
+  LOG(ERROR) << target_partition_name
+             << " is mapped on device mapper but state is unknown: "
+             << static_cast<std::underlying_type_t<DmDeviceState>>(state);
+  return false;
+}
+
 bool DynamicPartitionControlAndroid::UnmapPartitionOnDeviceMapper(
-    const std::string& target_partition_name, bool wait) {
+    const std::string& target_partition_name) {
   if (DeviceMapper::Instance().GetState(target_partition_name) !=
       DmDeviceState::INVALID) {
-    if (!DestroyLogicalPartition(
-            target_partition_name,
-            std::chrono::milliseconds(wait ? kMapTimeoutMillis : 0))) {
+    // Partitions at target slot on non-Virtual A/B devices are mapped as
+    // dm-linear. Also, on Virtual A/B devices, system_other may be mapped for
+    // preopt apps as dm-linear.
+    // Call DestroyLogicalPartition to handle these cases.
+    bool success = DestroyLogicalPartition(target_partition_name);
+
+    // On a Virtual A/B device, |target_partition_name| may be a leftover from
+    // a paused update. Clean up any underlying devices.
+    if (ExpectMetadataMounted()) {
+      success &= snapshot_->UnmapUpdateSnapshot(target_partition_name);
+    } else {
+      LOG(INFO) << "Skip UnmapUpdateSnapshot(" << target_partition_name
+                << ") because metadata is not mounted";
+    }
+
+    if (!success) {
       LOG(ERROR) << "Cannot unmap " << target_partition_name
                  << " from device mapper.";
       return false;
@@ -99,18 +259,22 @@
   return true;
 }
 
-void DynamicPartitionControlAndroid::CleanupInternal(bool wait) {
+void DynamicPartitionControlAndroid::UnmapAllPartitions() {
+  if (mapped_devices_.empty()) {
+    return;
+  }
   // UnmapPartitionOnDeviceMapper removes objects from mapped_devices_, hence
   // a copy is needed for the loop.
   std::set<std::string> mapped = mapped_devices_;
   LOG(INFO) << "Destroying [" << Join(mapped, ", ") << "] from device mapper";
   for (const auto& partition_name : mapped) {
-    ignore_result(UnmapPartitionOnDeviceMapper(partition_name, wait));
+    ignore_result(UnmapPartitionOnDeviceMapper(partition_name));
   }
 }
 
 void DynamicPartitionControlAndroid::Cleanup() {
-  CleanupInternal(true /* wait */);
+  UnmapAllPartitions();
+  metadata_device_.reset();
 }
 
 bool DynamicPartitionControlAndroid::DeviceExists(const std::string& path) {
@@ -129,27 +293,36 @@
 
 std::unique_ptr<MetadataBuilder>
 DynamicPartitionControlAndroid::LoadMetadataBuilder(
+    const std::string& super_device, uint32_t slot) {
+  auto builder = MetadataBuilder::New(PartitionOpener(), super_device, slot);
+  if (builder == nullptr) {
+    LOG(WARNING) << "No metadata slot " << BootControlInterface::SlotName(slot)
+                 << " in " << super_device;
+    return nullptr;
+  }
+  LOG(INFO) << "Loaded metadata from slot "
+            << BootControlInterface::SlotName(slot) << " in " << super_device;
+  return builder;
+}
+
+std::unique_ptr<MetadataBuilder>
+DynamicPartitionControlAndroid::LoadMetadataBuilder(
     const std::string& super_device,
     uint32_t source_slot,
     uint32_t target_slot) {
-  std::unique_ptr<MetadataBuilder> builder;
-
-  if (target_slot != BootControlInterface::kInvalidSlot &&
-      IsDynamicPartitionsRetrofit()) {
-    builder = MetadataBuilder::NewForUpdate(
-        PartitionOpener(), super_device, source_slot, target_slot);
-  } else {
-    builder =
-        MetadataBuilder::New(PartitionOpener(), super_device, source_slot);
-  }
-
+  bool always_keep_source_slot = !target_supports_snapshot_;
+  auto builder = MetadataBuilder::NewForUpdate(PartitionOpener(),
+                                               super_device,
+                                               source_slot,
+                                               target_slot,
+                                               always_keep_source_slot);
   if (builder == nullptr) {
     LOG(WARNING) << "No metadata slot "
                  << BootControlInterface::SlotName(source_slot) << " in "
                  << super_device;
     return nullptr;
   }
-  LOG(INFO) << "Loaded metadata from slot "
+  LOG(INFO) << "Created metadata for new update from slot "
             << BootControlInterface::SlotName(source_slot) << " in "
             << super_device;
   return builder;
@@ -167,7 +340,7 @@
     return false;
   }
 
-  if (IsDynamicPartitionsRetrofit()) {
+  if (GetDynamicPartitionsFeatureFlag().IsRetrofit()) {
     if (!FlashPartitionTable(super_device, *metadata)) {
       LOG(ERROR) << "Cannot write metadata to " << super_device;
       return false;
@@ -222,4 +395,828 @@
   *out = base::FilePath(misc_device).DirName().value();
   return true;
 }
+
+bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate(
+    uint32_t source_slot,
+    uint32_t target_slot,
+    const DeltaArchiveManifest& manifest,
+    bool update,
+    uint64_t* required_size) {
+  source_slot_ = source_slot;
+  target_slot_ = target_slot;
+  if (required_size != nullptr) {
+    *required_size = 0;
+  }
+
+  if (fs_mgr_overlayfs_is_setup()) {
+    // Non DAP devices can use overlayfs as well.
+    LOG(WARNING)
+        << "overlayfs overrides are active and can interfere with our "
+           "resources.\n"
+        << "run adb enable-verity to deactivate if required and try again.";
+  }
+
+  // If metadata is erased but not formatted, it is possible to not mount
+  // it in recovery. It is acceptable to skip mounting and choose fallback path
+  // (PrepareDynamicPartitionsForUpdate) when sideloading full OTAs.
+  TEST_AND_RETURN_FALSE(EnsureMetadataMounted() || IsRecovery());
+
+  if (update) {
+    TEST_AND_RETURN_FALSE(EraseSystemOtherAvbFooter(source_slot, target_slot));
+  }
+
+  if (!GetDynamicPartitionsFeatureFlag().IsEnabled()) {
+    return true;
+  }
+
+  if (target_slot == source_slot) {
+    LOG(ERROR) << "Cannot call PreparePartitionsForUpdate on current slot.";
+    return false;
+  }
+
+  if (!SetTargetBuildVars(manifest)) {
+    return false;
+  }
+
+  // Although the current build supports dynamic partitions, the given payload
+  // doesn't use it for target partitions. This could happen when applying a
+  // retrofit update. Skip updating the partition metadata for the target slot.
+  if (!is_target_dynamic_) {
+    return true;
+  }
+
+  if (!update)
+    return true;
+
+  bool delete_source = false;
+
+  if (GetVirtualAbFeatureFlag().IsEnabled()) {
+    // On Virtual A/B device, either CancelUpdate() or BeginUpdate() must be
+    // called before calling UnmapUpdateSnapshot.
+    // - If target_supports_snapshot_, PrepareSnapshotPartitionsForUpdate()
+    //   calls BeginUpdate() which resets update state
+    // - If !target_supports_snapshot_ or PrepareSnapshotPartitionsForUpdate
+    //   failed in recovery, explicitly CancelUpdate().
+    if (target_supports_snapshot_) {
+      if (PrepareSnapshotPartitionsForUpdate(
+              source_slot, target_slot, manifest, required_size)) {
+        return true;
+      }
+
+      // Virtual A/B device doing Virtual A/B update in Android mode must use
+      // snapshots.
+      if (!IsRecovery()) {
+        LOG(ERROR) << "PrepareSnapshotPartitionsForUpdate failed in Android "
+                   << "mode";
+        return false;
+      }
+
+      delete_source = true;
+      LOG(INFO) << "PrepareSnapshotPartitionsForUpdate failed in recovery. "
+                << "Attempt to overwrite existing partitions if possible";
+    } else {
+      // Downgrading to an non-Virtual A/B build or is secondary OTA.
+      LOG(INFO) << "Using regular A/B on Virtual A/B because package disabled "
+                << "snapshots.";
+    }
+
+    // In recovery, if /metadata is not mounted, it is likely that metadata
+    // partition is erased and not formatted yet. After sideloading, when
+    // rebooting into the new version, init will erase metadata partition,
+    // hence the failure of CancelUpdate() can be ignored here.
+    // However, if metadata is mounted and CancelUpdate fails, sideloading
+    // should not proceed because during next boot, snapshots will overlay on
+    // the devices incorrectly.
+    if (ExpectMetadataMounted()) {
+      TEST_AND_RETURN_FALSE(snapshot_->CancelUpdate());
+    } else {
+      LOG(INFO) << "Skip canceling previous update because metadata is not "
+                << "mounted";
+    }
+  }
+
+  // TODO(xunchang) support partial update on non VAB enabled devices.
+  TEST_AND_RETURN_FALSE(PrepareDynamicPartitionsForUpdate(
+      source_slot, target_slot, manifest, delete_source));
+
+  if (required_size != nullptr) {
+    *required_size = 0;
+  }
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::SetTargetBuildVars(
+    const DeltaArchiveManifest& manifest) {
+  // Precondition: current build supports dynamic partition.
+  CHECK(GetDynamicPartitionsFeatureFlag().IsEnabled());
+
+  bool is_target_dynamic =
+      !manifest.dynamic_partition_metadata().groups().empty();
+  bool target_supports_snapshot =
+      manifest.dynamic_partition_metadata().snapshot_enabled();
+
+  if (manifest.partial_update()) {
+    // Partial updates requires DAP. On partial updates that does not involve
+    // dynamic partitions, groups() can be empty, so also assume
+    // is_target_dynamic in this case. This assumption should be safe because we
+    // also check target_supports_snapshot below, which presumably also implies
+    // target build supports dynamic partition.
+    if (!is_target_dynamic) {
+      LOG(INFO) << "Assuming target build supports dynamic partitions for "
+                   "partial updates.";
+      is_target_dynamic = true;
+    }
+
+    // Partial updates requires Virtual A/B. Double check that both current
+    // build and target build supports Virtual A/B.
+    if (!GetVirtualAbFeatureFlag().IsEnabled()) {
+      LOG(ERROR) << "Partial update cannot be applied on a device that does "
+                    "not support snapshots.";
+      return false;
+    }
+    if (!target_supports_snapshot) {
+      LOG(ERROR) << "Cannot apply partial update to a build that does not "
+                    "support snapshots.";
+      return false;
+    }
+  }
+
+  // Store the flags.
+  is_target_dynamic_ = is_target_dynamic;
+  // If !is_target_dynamic_, leave target_supports_snapshot_ unset because
+  // snapshots would not work without dynamic partition.
+  if (is_target_dynamic_) {
+    target_supports_snapshot_ = target_supports_snapshot;
+  }
+  return true;
+}
+
+namespace {
+// Try our best to erase AVB footer.
+class AvbFooterEraser {
+ public:
+  explicit AvbFooterEraser(const std::string& path) : path_(path) {}
+  bool Erase() {
+    // Try to mark the block device read-only. Ignore any
+    // failure since this won't work when passing regular files.
+    ignore_result(utils::SetBlockDeviceReadOnly(path_, false /* readonly */));
+
+    fd_.reset(new EintrSafeFileDescriptor());
+    int flags = O_WRONLY | O_TRUNC | O_CLOEXEC | O_SYNC;
+    TEST_AND_RETURN_FALSE(fd_->Open(path_.c_str(), flags));
+
+    // Need to write end-AVB_FOOTER_SIZE to end.
+    static_assert(AVB_FOOTER_SIZE > 0);
+    off64_t offset = fd_->Seek(-AVB_FOOTER_SIZE, SEEK_END);
+    TEST_AND_RETURN_FALSE_ERRNO(offset >= 0);
+    uint64_t write_size = AVB_FOOTER_SIZE;
+    LOG(INFO) << "Zeroing " << path_ << " @ [" << offset << ", "
+              << (offset + write_size) << "] (" << write_size << " bytes)";
+    brillo::Blob zeros(write_size);
+    TEST_AND_RETURN_FALSE(utils::WriteAll(fd_, zeros.data(), zeros.size()));
+    return true;
+  }
+  ~AvbFooterEraser() {
+    TEST_AND_RETURN(fd_ != nullptr && fd_->IsOpen());
+    if (!fd_->Close()) {
+      LOG(WARNING) << "Failed to close fd for " << path_;
+    }
+  }
+
+ private:
+  std::string path_;
+  FileDescriptorPtr fd_;
+};
+
+}  // namespace
+
+std::optional<bool>
+DynamicPartitionControlAndroid::IsAvbEnabledOnSystemOther() {
+  auto prefix = GetProperty(kPostinstallFstabPrefix, "");
+  if (prefix.empty()) {
+    LOG(WARNING) << "Cannot get " << kPostinstallFstabPrefix;
+    return std::nullopt;
+  }
+  auto path = base::FilePath(prefix).Append("etc/fstab.postinstall").value();
+  return IsAvbEnabledInFstab(path);
+}
+
+std::optional<bool> DynamicPartitionControlAndroid::IsAvbEnabledInFstab(
+    const std::string& path) {
+  Fstab fstab;
+  if (!ReadFstabFromFile(path, &fstab)) {
+    PLOG(WARNING) << "Cannot read fstab from " << path;
+    if (errno == ENOENT) {
+      return false;
+    }
+    return std::nullopt;
+  }
+  for (const auto& entry : fstab) {
+    if (!entry.avb_keys.empty()) {
+      return true;
+    }
+  }
+  return false;
+}
+
+bool DynamicPartitionControlAndroid::GetSystemOtherPath(
+    uint32_t source_slot,
+    uint32_t target_slot,
+    const std::string& partition_name_suffix,
+    std::string* path,
+    bool* should_unmap) {
+  path->clear();
+  *should_unmap = false;
+
+  // Check that AVB is enabled on system_other before erasing.
+  auto has_avb = IsAvbEnabledOnSystemOther();
+  TEST_AND_RETURN_FALSE(has_avb.has_value());
+  if (!has_avb.value()) {
+    LOG(INFO) << "AVB is not enabled on system_other. Skip erasing.";
+    return true;
+  }
+
+  if (!IsRecovery()) {
+    // Found unexpected avb_keys for system_other on devices retrofitting
+    // dynamic partitions. Previous crash in update_engine may leave logical
+    // partitions mapped on physical system_other partition. It is difficult to
+    // handle these cases. Just fail.
+    if (GetDynamicPartitionsFeatureFlag().IsRetrofit()) {
+      LOG(ERROR) << "Cannot erase AVB footer on system_other on devices with "
+                 << "retrofit dynamic partitions. They should not have AVB "
+                 << "enabled on system_other.";
+      return false;
+    }
+  }
+
+  std::string device_dir_str;
+  TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str));
+  base::FilePath device_dir(device_dir_str);
+
+  // On devices without dynamic partition, search for static partitions.
+  if (!GetDynamicPartitionsFeatureFlag().IsEnabled()) {
+    *path = device_dir.Append(partition_name_suffix).value();
+    TEST_AND_RETURN_FALSE(DeviceExists(*path));
+    return true;
+  }
+
+  auto source_super_device =
+      device_dir.Append(GetSuperPartitionName(source_slot)).value();
+
+  auto builder = LoadMetadataBuilder(source_super_device, source_slot);
+  if (builder == nullptr) {
+    if (IsRecovery()) {
+      // It might be corrupted for some reason. It should still be able to
+      // sideload.
+      LOG(WARNING) << "Super partition metadata cannot be read from the source "
+                   << "slot, skip erasing.";
+      return true;
+    } else {
+      // Device has booted into Android mode, indicating that the super
+      // partition metadata should be there.
+      LOG(ERROR) << "Super partition metadata cannot be read from the source "
+                 << "slot. This is unexpected on devices with dynamic "
+                 << "partitions enabled.";
+      return false;
+    }
+  }
+  auto p = builder->FindPartition(partition_name_suffix);
+  if (p == nullptr) {
+    // If the source slot is flashed without system_other, it does not exist
+    // in super partition metadata at source slot. It is safe to skip it.
+    LOG(INFO) << "Can't find " << partition_name_suffix
+              << " in metadata source slot, skip erasing.";
+    return true;
+  }
+  // System_other created by flashing tools should be erased.
+  // If partition is created by update_engine (via NewForUpdate), it is a
+  // left-over partition from the previous update and does not contain
+  // system_other, hence there is no need to erase.
+  // Note the reverse is not necessary true. If the flag is not set, we don't
+  // know if the partition is created by update_engine or by flashing tools
+  // because older versions of super partition metadata does not contain this
+  // flag. It is okay to erase the AVB footer anyways.
+  if (p->attributes() & LP_PARTITION_ATTR_UPDATED) {
+    LOG(INFO) << partition_name_suffix
+              << " does not contain system_other, skip erasing.";
+    return true;
+  }
+
+  if (p->size() < AVB_FOOTER_SIZE) {
+    LOG(INFO) << partition_name_suffix << " has length " << p->size()
+              << "( < AVB_FOOTER_SIZE " << AVB_FOOTER_SIZE
+              << "), skip erasing.";
+    return true;
+  }
+
+  // Delete any pre-existing device with name |partition_name_suffix| and
+  // also remove it from |mapped_devices_|.
+  // In recovery, metadata might not be mounted, and
+  // UnmapPartitionOnDeviceMapper might fail. However,
+  // it is unusual that system_other has already been mapped. Hence, just skip.
+  TEST_AND_RETURN_FALSE(UnmapPartitionOnDeviceMapper(partition_name_suffix));
+  // Use CreateLogicalPartition directly to avoid mapping with existing
+  // snapshots.
+  CreateLogicalPartitionParams params = {
+      .block_device = source_super_device,
+      .metadata_slot = source_slot,
+      .partition_name = partition_name_suffix,
+      .force_writable = true,
+      .timeout_ms = kMapTimeout,
+  };
+  TEST_AND_RETURN_FALSE(CreateLogicalPartition(params, path));
+  *should_unmap = true;
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::EraseSystemOtherAvbFooter(
+    uint32_t source_slot, uint32_t target_slot) {
+  LOG(INFO) << "Erasing AVB footer of system_other partition before update.";
+
+  const std::string target_suffix = SlotSuffixForSlotNumber(target_slot);
+  const std::string partition_name_suffix = "system" + target_suffix;
+
+  std::string path;
+  bool should_unmap = false;
+
+  TEST_AND_RETURN_FALSE(GetSystemOtherPath(
+      source_slot, target_slot, partition_name_suffix, &path, &should_unmap));
+
+  if (path.empty()) {
+    return true;
+  }
+
+  bool ret = AvbFooterEraser(path).Erase();
+
+  // Delete |partition_name_suffix| from device mapper and from
+  // |mapped_devices_| again so that it does not interfere with update process.
+  // In recovery, metadata might not be mounted, and
+  // UnmapPartitionOnDeviceMapper might fail. However, DestroyLogicalPartition
+  // should be called. If DestroyLogicalPartition does fail, it is still okay
+  // to skip the error here and let Prepare*() fail later.
+  if (should_unmap) {
+    TEST_AND_RETURN_FALSE(UnmapPartitionOnDeviceMapper(partition_name_suffix));
+  }
+
+  return ret;
+}
+
+bool DynamicPartitionControlAndroid::PrepareDynamicPartitionsForUpdate(
+    uint32_t source_slot,
+    uint32_t target_slot,
+    const DeltaArchiveManifest& manifest,
+    bool delete_source) {
+  const std::string target_suffix = SlotSuffixForSlotNumber(target_slot);
+
+  // Unmap all the target dynamic partitions because they would become
+  // inconsistent with the new metadata.
+  for (const auto& group : manifest.dynamic_partition_metadata().groups()) {
+    for (const auto& partition_name : group.partition_names()) {
+      if (!UnmapPartitionOnDeviceMapper(partition_name + target_suffix)) {
+        return false;
+      }
+    }
+  }
+
+  std::string device_dir_str;
+  if (!GetDeviceDir(&device_dir_str)) {
+    return false;
+  }
+  base::FilePath device_dir(device_dir_str);
+  auto source_device =
+      device_dir.Append(GetSuperPartitionName(source_slot)).value();
+
+  auto builder = LoadMetadataBuilder(source_device, source_slot, target_slot);
+  if (builder == nullptr) {
+    LOG(ERROR) << "No metadata at "
+               << BootControlInterface::SlotName(source_slot);
+    return false;
+  }
+
+  if (delete_source) {
+    TEST_AND_RETURN_FALSE(
+        DeleteSourcePartitions(builder.get(), source_slot, manifest));
+  }
+
+  if (!UpdatePartitionMetadata(builder.get(), target_slot, manifest)) {
+    return false;
+  }
+
+  auto target_device =
+      device_dir.Append(GetSuperPartitionName(target_slot)).value();
+  return StoreMetadata(target_device, builder.get(), target_slot);
+}
+
+bool DynamicPartitionControlAndroid::PrepareSnapshotPartitionsForUpdate(
+    uint32_t source_slot,
+    uint32_t target_slot,
+    const DeltaArchiveManifest& manifest,
+    uint64_t* required_size) {
+  TEST_AND_RETURN_FALSE(ExpectMetadataMounted());
+  if (!snapshot_->BeginUpdate()) {
+    LOG(ERROR) << "Cannot begin new update.";
+    return false;
+  }
+  auto ret = snapshot_->CreateUpdateSnapshots(manifest);
+  if (!ret) {
+    LOG(ERROR) << "Cannot create update snapshots: " << ret.string();
+    if (required_size != nullptr &&
+        ret.error_code() == Return::ErrorCode::NO_SPACE) {
+      *required_size = ret.required_size();
+    }
+    return false;
+  }
+  return true;
+}
+
+std::string DynamicPartitionControlAndroid::GetSuperPartitionName(
+    uint32_t slot) {
+  return fs_mgr_get_super_partition_name(slot);
+}
+
+bool DynamicPartitionControlAndroid::UpdatePartitionMetadata(
+    MetadataBuilder* builder,
+    uint32_t target_slot,
+    const DeltaArchiveManifest& manifest) {
+  // Check preconditions.
+  CHECK(!GetVirtualAbFeatureFlag().IsEnabled() || IsRecovery())
+      << "UpdatePartitionMetadata is called on a Virtual A/B device "
+         "but source partitions is not deleted. This is not allowed.";
+
+  // If applying downgrade from Virtual A/B to non-Virtual A/B, the left-over
+  // COW group needs to be deleted to ensure there are enough space to create
+  // target partitions.
+  builder->RemoveGroupAndPartitions(android::snapshot::kCowGroupName);
+
+  const std::string target_suffix = SlotSuffixForSlotNumber(target_slot);
+  DeleteGroupsWithSuffix(builder, target_suffix);
+
+  uint64_t total_size = 0;
+  for (const auto& group : manifest.dynamic_partition_metadata().groups()) {
+    total_size += group.size();
+  }
+
+  std::string expr;
+  uint64_t allocatable_space = builder->AllocatableSpace();
+  // On device retrofitting dynamic partitions, allocatable_space = super.
+  // On device launching dynamic partitions w/o VAB,
+  //   allocatable_space = super / 2.
+  // On device launching dynamic partitions with VAB, allocatable_space = super.
+  if (!GetDynamicPartitionsFeatureFlag().IsRetrofit() &&
+      !GetVirtualAbFeatureFlag().IsEnabled()) {
+    allocatable_space /= 2;
+    expr = "half of ";
+  }
+  if (total_size > allocatable_space) {
+    LOG(ERROR) << "The maximum size of all groups with suffix " << target_suffix
+               << " (" << total_size << ") has exceeded " << expr
+               << "allocatable space for dynamic partitions "
+               << allocatable_space << ".";
+    return false;
+  }
+
+  // name of partition(e.g. "system") -> size in bytes
+  std::map<std::string, uint64_t> partition_sizes;
+  for (const auto& partition : manifest.partitions()) {
+    partition_sizes.emplace(partition.partition_name(),
+                            partition.new_partition_info().size());
+  }
+
+  for (const auto& group : manifest.dynamic_partition_metadata().groups()) {
+    auto group_name_suffix = group.name() + target_suffix;
+    if (!builder->AddGroup(group_name_suffix, group.size())) {
+      LOG(ERROR) << "Cannot add group " << group_name_suffix << " with size "
+                 << group.size();
+      return false;
+    }
+    LOG(INFO) << "Added group " << group_name_suffix << " with size "
+              << group.size();
+
+    for (const auto& partition_name : group.partition_names()) {
+      auto partition_sizes_it = partition_sizes.find(partition_name);
+      if (partition_sizes_it == partition_sizes.end()) {
+        // TODO(tbao): Support auto-filling partition info for framework-only
+        // OTA.
+        LOG(ERROR) << "dynamic_partition_metadata contains partition "
+                   << partition_name << " but it is not part of the manifest. "
+                   << "This is not supported.";
+        return false;
+      }
+      uint64_t partition_size = partition_sizes_it->second;
+
+      auto partition_name_suffix = partition_name + target_suffix;
+      Partition* p = builder->AddPartition(
+          partition_name_suffix, group_name_suffix, LP_PARTITION_ATTR_READONLY);
+      if (!p) {
+        LOG(ERROR) << "Cannot add partition " << partition_name_suffix
+                   << " to group " << group_name_suffix;
+        return false;
+      }
+      if (!builder->ResizePartition(p, partition_size)) {
+        LOG(ERROR) << "Cannot resize partition " << partition_name_suffix
+                   << " to size " << partition_size << ". Not enough space?";
+        return false;
+      }
+      LOG(INFO) << "Added partition " << partition_name_suffix << " to group "
+                << group_name_suffix << " with size " << partition_size;
+    }
+  }
+
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::FinishUpdate(bool powerwash_required) {
+  if (ExpectMetadataMounted()) {
+    if (snapshot_->GetUpdateState() == UpdateState::Initiated) {
+      LOG(INFO) << "Snapshot writes are done.";
+      return snapshot_->FinishedSnapshotWrites(powerwash_required);
+    }
+  } else {
+    LOG(INFO) << "Skip FinishedSnapshotWrites() because /metadata is not "
+              << "mounted";
+  }
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::GetPartitionDevice(
+    const std::string& partition_name,
+    uint32_t slot,
+    uint32_t current_slot,
+    bool not_in_payload,
+    std::string* device,
+    bool* is_dynamic) {
+  const auto& partition_name_suffix =
+      partition_name + SlotSuffixForSlotNumber(slot);
+  std::string device_dir_str;
+  TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str));
+  base::FilePath device_dir(device_dir_str);
+
+  if (is_dynamic) {
+    *is_dynamic = false;
+  }
+
+  // When looking up target partition devices, treat them as static if the
+  // current payload doesn't encode them as dynamic partitions. This may happen
+  // when applying a retrofit update on top of a dynamic-partitions-enabled
+  // build.
+  if (GetDynamicPartitionsFeatureFlag().IsEnabled() &&
+      (slot == current_slot || is_target_dynamic_)) {
+    switch (GetDynamicPartitionDevice(device_dir,
+                                      partition_name_suffix,
+                                      slot,
+                                      current_slot,
+                                      not_in_payload,
+                                      device)) {
+      case DynamicPartitionDeviceStatus::SUCCESS:
+        if (is_dynamic) {
+          *is_dynamic = true;
+        }
+        return true;
+      case DynamicPartitionDeviceStatus::TRY_STATIC:
+        break;
+      case DynamicPartitionDeviceStatus::ERROR:  // fallthrough
+      default:
+        return false;
+    }
+  }
+  base::FilePath path = device_dir.Append(partition_name_suffix);
+  if (!DeviceExists(path.value())) {
+    LOG(ERROR) << "Device file " << path.value() << " does not exist.";
+    return false;
+  }
+
+  *device = path.value();
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::GetPartitionDevice(
+    const std::string& partition_name,
+    uint32_t slot,
+    uint32_t current_slot,
+    std::string* device) {
+  return GetPartitionDevice(
+      partition_name, slot, current_slot, false, device, nullptr);
+}
+
+bool DynamicPartitionControlAndroid::IsSuperBlockDevice(
+    const base::FilePath& device_dir,
+    uint32_t current_slot,
+    const std::string& partition_name_suffix) {
+  std::string source_device =
+      device_dir.Append(GetSuperPartitionName(current_slot)).value();
+  auto source_metadata = LoadMetadataBuilder(source_device, current_slot);
+  return source_metadata->HasBlockDevice(partition_name_suffix);
+}
+
+DynamicPartitionControlAndroid::DynamicPartitionDeviceStatus
+DynamicPartitionControlAndroid::GetDynamicPartitionDevice(
+    const base::FilePath& device_dir,
+    const std::string& partition_name_suffix,
+    uint32_t slot,
+    uint32_t current_slot,
+    bool not_in_payload,
+    std::string* device) {
+  std::string super_device =
+      device_dir.Append(GetSuperPartitionName(slot)).value();
+
+  auto builder = LoadMetadataBuilder(super_device, slot);
+  if (builder == nullptr) {
+    LOG(ERROR) << "No metadata in slot "
+               << BootControlInterface::SlotName(slot);
+    return DynamicPartitionDeviceStatus::ERROR;
+  }
+  if (builder->FindPartition(partition_name_suffix) == nullptr) {
+    LOG(INFO) << partition_name_suffix
+              << " is not in super partition metadata.";
+
+    if (IsSuperBlockDevice(device_dir, current_slot, partition_name_suffix)) {
+      LOG(ERROR) << "The static partition " << partition_name_suffix
+                 << " is a block device for current metadata."
+                 << "It cannot be used as a logical partition.";
+      return DynamicPartitionDeviceStatus::ERROR;
+    }
+
+    return DynamicPartitionDeviceStatus::TRY_STATIC;
+  }
+
+  if (slot == current_slot) {
+    if (GetState(partition_name_suffix) != DmDeviceState::ACTIVE) {
+      LOG(WARNING) << partition_name_suffix << " is at current slot but it is "
+                   << "not mapped. Now try to map it.";
+    } else {
+      if (GetDmDevicePathByName(partition_name_suffix, device)) {
+        LOG(INFO) << partition_name_suffix
+                  << " is mapped on device mapper: " << *device;
+        return DynamicPartitionDeviceStatus::SUCCESS;
+      }
+      LOG(ERROR) << partition_name_suffix << "is mapped but path is unknown.";
+      return DynamicPartitionDeviceStatus::ERROR;
+    }
+  }
+
+  bool force_writable = (slot != current_slot) && !not_in_payload;
+  if (MapPartitionOnDeviceMapper(
+          super_device, partition_name_suffix, slot, force_writable, device)) {
+    return DynamicPartitionDeviceStatus::SUCCESS;
+  }
+  return DynamicPartitionDeviceStatus::ERROR;
+}
+
+void DynamicPartitionControlAndroid::set_fake_mapped_devices(
+    const std::set<std::string>& fake) {
+  mapped_devices_ = fake;
+}
+
+bool DynamicPartitionControlAndroid::IsRecovery() {
+  return kIsRecovery;
+}
+
+static bool IsIncrementalUpdate(const DeltaArchiveManifest& manifest) {
+  const auto& partitions = manifest.partitions();
+  return std::any_of(partitions.begin(), partitions.end(), [](const auto& p) {
+    return p.has_old_partition_info();
+  });
+}
+
+bool DynamicPartitionControlAndroid::DeleteSourcePartitions(
+    MetadataBuilder* builder,
+    uint32_t source_slot,
+    const DeltaArchiveManifest& manifest) {
+  TEST_AND_RETURN_FALSE(IsRecovery());
+
+  if (IsIncrementalUpdate(manifest)) {
+    LOG(ERROR) << "Cannot sideload incremental OTA because snapshots cannot "
+               << "be created.";
+    if (GetVirtualAbFeatureFlag().IsLaunch()) {
+      LOG(ERROR) << "Sideloading incremental updates on devices launches "
+                 << " Virtual A/B is not supported.";
+    }
+    return false;
+  }
+
+  LOG(INFO) << "Will overwrite existing partitions. Slot "
+            << BootControlInterface::SlotName(source_slot)
+            << "may be unbootable until update finishes!";
+  const std::string source_suffix = SlotSuffixForSlotNumber(source_slot);
+  DeleteGroupsWithSuffix(builder, source_suffix);
+
+  return true;
+}
+
+std::unique_ptr<AbstractAction>
+DynamicPartitionControlAndroid::GetCleanupPreviousUpdateAction(
+    BootControlInterface* boot_control,
+    PrefsInterface* prefs,
+    CleanupPreviousUpdateActionDelegateInterface* delegate) {
+  if (!GetVirtualAbFeatureFlag().IsEnabled()) {
+    return std::make_unique<NoOpAction>();
+  }
+  return std::make_unique<CleanupPreviousUpdateAction>(
+      prefs, boot_control, snapshot_.get(), delegate);
+}
+
+bool DynamicPartitionControlAndroid::ResetUpdate(PrefsInterface* prefs) {
+  if (!GetVirtualAbFeatureFlag().IsEnabled()) {
+    return true;
+  }
+
+  LOG(INFO) << __func__ << " resetting update state and deleting snapshots.";
+  TEST_AND_RETURN_FALSE(prefs != nullptr);
+
+  // If the device has already booted into the target slot,
+  // ResetUpdateProgress may pass but CancelUpdate fails.
+  // This is expected. A scheduled CleanupPreviousUpdateAction should free
+  // space when it is done.
+  TEST_AND_RETURN_FALSE(DeltaPerformer::ResetUpdateProgress(
+      prefs, false /* quick */, false /* skip dynamic partitions metadata */));
+
+  if (ExpectMetadataMounted()) {
+    TEST_AND_RETURN_FALSE(snapshot_->CancelUpdate());
+  } else {
+    LOG(INFO) << "Skip cancelling update in ResetUpdate because /metadata is "
+              << "not mounted";
+  }
+
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::ListDynamicPartitionsForSlot(
+    uint32_t current_slot, std::vector<std::string>* partitions) {
+  if (!GetDynamicPartitionsFeatureFlag().IsEnabled()) {
+    LOG(ERROR) << "Dynamic partition is not enabled";
+    return false;
+  }
+
+  std::string device_dir_str;
+  TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str));
+  base::FilePath device_dir(device_dir_str);
+  auto super_device =
+      device_dir.Append(GetSuperPartitionName(current_slot)).value();
+  auto builder = LoadMetadataBuilder(super_device, current_slot);
+  TEST_AND_RETURN_FALSE(builder != nullptr);
+
+  std::vector<std::string> result;
+  auto suffix = SlotSuffixForSlotNumber(current_slot);
+  for (const auto& group : builder->ListGroups()) {
+    for (const auto& partition : builder->ListPartitionsInGroup(group)) {
+      std::string_view partition_name = partition->name();
+      if (!android::base::ConsumeSuffix(&partition_name, suffix)) {
+        continue;
+      }
+      result.emplace_back(partition_name);
+    }
+  }
+  *partitions = std::move(result);
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::VerifyExtentsForUntouchedPartitions(
+    uint32_t source_slot,
+    uint32_t target_slot,
+    const std::vector<std::string>& partitions) {
+  std::string device_dir_str;
+  TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str));
+  base::FilePath device_dir(device_dir_str);
+
+  auto source_super_device =
+      device_dir.Append(GetSuperPartitionName(source_slot)).value();
+  auto source_builder = LoadMetadataBuilder(source_super_device, source_slot);
+  TEST_AND_RETURN_FALSE(source_builder != nullptr);
+
+  auto target_super_device =
+      device_dir.Append(GetSuperPartitionName(target_slot)).value();
+  auto target_builder = LoadMetadataBuilder(target_super_device, target_slot);
+  TEST_AND_RETURN_FALSE(target_builder != nullptr);
+
+  return MetadataBuilder::VerifyExtentsAgainstSourceMetadata(
+      *source_builder, source_slot, *target_builder, target_slot, partitions);
+}
+
+bool DynamicPartitionControlAndroid::ExpectMetadataMounted() {
+  // No need to mount metadata for non-Virtual A/B devices.
+  if (!GetVirtualAbFeatureFlag().IsEnabled()) {
+    return false;
+  }
+  // Intentionally not checking |metadata_device_| in Android mode.
+  // /metadata should always be mounted in Android mode. If it isn't, let caller
+  // fails when calling into SnapshotManager.
+  if (!IsRecovery()) {
+    return true;
+  }
+  // In recovery mode, explicitly check |metadata_device_|.
+  return metadata_device_ != nullptr;
+}
+
+bool DynamicPartitionControlAndroid::EnsureMetadataMounted() {
+  // No need to mount metadata for non-Virtual A/B devices.
+  if (!GetVirtualAbFeatureFlag().IsEnabled()) {
+    return true;
+  }
+
+  if (metadata_device_ == nullptr) {
+    metadata_device_ = snapshot_->EnsureMetadataMounted();
+  }
+  return metadata_device_ != nullptr;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h
index 0ccab4e..49967f6 100644
--- a/dynamic_partition_control_android.h
+++ b/dynamic_partition_control_android.h
@@ -17,45 +17,274 @@
 #ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
 #define UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
 
-#include "update_engine/dynamic_partition_control_interface.h"
-
 #include <memory>
 #include <set>
 #include <string>
+#include <vector>
+
+#include <base/files/file_util.h>
+#include <libsnapshot/auto_device.h>
+#include <libsnapshot/snapshot.h>
+
+#include "update_engine/common/dynamic_partition_control_interface.h"
 
 namespace chromeos_update_engine {
 
 class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface {
  public:
-  DynamicPartitionControlAndroid() = default;
+  DynamicPartitionControlAndroid();
   ~DynamicPartitionControlAndroid();
-  bool IsDynamicPartitionsEnabled() override;
-  bool IsDynamicPartitionsRetrofit() override;
-  bool MapPartitionOnDeviceMapper(const std::string& super_device,
-                                  const std::string& target_partition_name,
-                                  uint32_t slot,
-                                  bool force_writable,
-                                  std::string* path) override;
-  bool UnmapPartitionOnDeviceMapper(const std::string& target_partition_name,
-                                    bool wait) override;
+  FeatureFlag GetDynamicPartitionsFeatureFlag() override;
+  FeatureFlag GetVirtualAbFeatureFlag() override;
+  bool OptimizeOperation(const std::string& partition_name,
+                         const InstallOperation& operation,
+                         InstallOperation* optimized) override;
   void Cleanup() override;
-  bool DeviceExists(const std::string& path) override;
-  android::dm::DmDeviceState GetState(const std::string& name) override;
-  bool GetDmDevicePathByName(const std::string& name,
-                             std::string* path) override;
-  std::unique_ptr<android::fs_mgr::MetadataBuilder> LoadMetadataBuilder(
-      const std::string& super_device,
+
+  bool PreparePartitionsForUpdate(uint32_t source_slot,
+                                  uint32_t target_slot,
+                                  const DeltaArchiveManifest& manifest,
+                                  bool update,
+                                  uint64_t* required_size) override;
+  bool FinishUpdate(bool powerwash_required) override;
+  std::unique_ptr<AbstractAction> GetCleanupPreviousUpdateAction(
+      BootControlInterface* boot_control,
+      PrefsInterface* prefs,
+      CleanupPreviousUpdateActionDelegateInterface* delegate) override;
+
+  bool ResetUpdate(PrefsInterface* prefs) override;
+
+  bool ListDynamicPartitionsForSlot(
+      uint32_t current_slot, std::vector<std::string>* partitions) override;
+
+  bool VerifyExtentsForUntouchedPartitions(
       uint32_t source_slot,
-      uint32_t target_slot) override;
-  bool StoreMetadata(const std::string& super_device,
-                     android::fs_mgr::MetadataBuilder* builder,
-                     uint32_t target_slot) override;
+      uint32_t target_slot,
+      const std::vector<std::string>& partitions) override;
+
   bool GetDeviceDir(std::string* path) override;
 
- private:
-  std::set<std::string> mapped_devices_;
+  // Return the device for partition |partition_name| at slot |slot|.
+  // |current_slot| should be set to the current active slot.
+  // Note: this function is only used by BootControl*::GetPartitionDevice.
+  // Other callers should prefer BootControl*::GetPartitionDevice over
+  // BootControl*::GetDynamicPartitionControl()->GetPartitionDevice().
+  bool GetPartitionDevice(const std::string& partition_name,
+                          uint32_t slot,
+                          uint32_t current_slot,
+                          bool not_in_payload,
+                          std::string* device,
+                          bool* is_dynamic);
 
-  void CleanupInternal(bool wait);
+  bool GetPartitionDevice(const std::string& partition_name,
+                          uint32_t slot,
+                          uint32_t current_slot,
+                          std::string* device);
+
+ protected:
+  // These functions are exposed for testing.
+
+  // Unmap logical partition on device mapper. This is the reverse operation
+  // of MapPartitionOnDeviceMapper.
+  // Returns true if unmapped successfully.
+  virtual bool UnmapPartitionOnDeviceMapper(
+      const std::string& target_partition_name);
+
+  // Retrieves metadata from |super_device| at slot |slot|.
+  virtual std::unique_ptr<android::fs_mgr::MetadataBuilder> LoadMetadataBuilder(
+      const std::string& super_device, uint32_t slot);
+
+  // Retrieves metadata from |super_device| at slot |source_slot|. And modifies
+  // the metadata so that during updates, the metadata can be written to
+  // |target_slot|. In particular, on retrofit devices, the returned metadata
+  // automatically includes block devices at |target_slot|.
+  virtual std::unique_ptr<android::fs_mgr::MetadataBuilder> LoadMetadataBuilder(
+      const std::string& super_device,
+      uint32_t source_slot,
+      uint32_t target_slot);
+
+  // Write metadata |builder| to |super_device| at slot |target_slot|.
+  virtual bool StoreMetadata(const std::string& super_device,
+                             android::fs_mgr::MetadataBuilder* builder,
+                             uint32_t target_slot);
+
+  // Map logical partition on device-mapper.
+  // |super_device| is the device path of the physical partition ("super").
+  // |target_partition_name| is the identifier used in metadata; for example,
+  // "vendor_a"
+  // |slot| is the selected slot to mount; for example, 0 for "_a".
+  // Returns true if mapped successfully; if so, |path| is set to the device
+  // path of the mapped logical partition.
+  virtual bool MapPartitionOnDeviceMapper(
+      const std::string& super_device,
+      const std::string& target_partition_name,
+      uint32_t slot,
+      bool force_writable,
+      std::string* path);
+
+  // Return true if a static partition exists at device path |path|.
+  virtual bool DeviceExists(const std::string& path);
+
+  // Returns the current state of the underlying device mapper device
+  // with given name.
+  // One of INVALID, SUSPENDED or ACTIVE.
+  virtual android::dm::DmDeviceState GetState(const std::string& name);
+
+  // Returns the path to the device mapper device node in '/dev' corresponding
+  // to 'name'. If the device does not exist, false is returned, and the path
+  // parameter is not set.
+  virtual bool GetDmDevicePathByName(const std::string& name,
+                                     std::string* path);
+
+  // Return the name of the super partition (which stores super partition
+  // metadata) for a given slot.
+  virtual std::string GetSuperPartitionName(uint32_t slot);
+
+  virtual void set_fake_mapped_devices(const std::set<std::string>& fake);
+
+  // Allow mock objects to override this to test recovery mode.
+  virtual bool IsRecovery();
+
+  // Determine path for system_other partition.
+  // |source_slot| should be current slot.
+  // |target_slot| should be "other" slot.
+  // |partition_name_suffix| should be "system" + suffix(|target_slot|).
+  // Return true and set |path| if successful.
+  // Set |path| to empty if no need to erase system_other.
+  // Set |should_unmap| to true if path needs to be unmapped later.
+  //
+  // Note: system_other cannot use GetPartitionDevice or
+  // GetDynamicPartitionDevice because:
+  // - super partition metadata may be loaded from the source slot
+  // - UPDATED flag needs to be check to skip erasing if partition is not
+  //   created by flashing tools
+  // - Snapshots from previous update attempts should not be used.
+  virtual bool GetSystemOtherPath(uint32_t source_slot,
+                                  uint32_t target_slot,
+                                  const std::string& partition_name_suffix,
+                                  std::string* path,
+                                  bool* should_unmap);
+
+  // Returns true if any entry in the fstab file in |path| has AVB enabled,
+  // false if not enabled, and nullopt for any error.
+  virtual std::optional<bool> IsAvbEnabledInFstab(const std::string& path);
+
+  // Returns true if system_other has AVB enabled, false if not enabled, and
+  // nullopt for any error.
+  virtual std::optional<bool> IsAvbEnabledOnSystemOther();
+
+  // Erase system_other partition that may contain system_other.img.
+  // After the update, the content of system_other may be corrupted but with
+  // valid AVB footer. If the update is rolled back and factory data reset is
+  // triggered, system_b fails to be mapped with verity errors (see
+  // b/152444348). Erase the system_other so that mapping system_other is
+  // skipped.
+  virtual bool EraseSystemOtherAvbFooter(uint32_t source_slot,
+                                         uint32_t target_slot);
+
+  // Helper for PreparePartitionsForUpdate. Used for devices with dynamic
+  // partitions updating without snapshots.
+  // If |delete_source| is set, source partitions are deleted before resizing
+  // target partitions (using DeleteSourcePartitions).
+  virtual bool PrepareDynamicPartitionsForUpdate(
+      uint32_t source_slot,
+      uint32_t target_slot,
+      const DeltaArchiveManifest& manifest,
+      bool delete_source);
+
+ private:
+  friend class DynamicPartitionControlAndroidTest;
+  friend class SnapshotPartitionTestP;
+
+  void UnmapAllPartitions();
+  bool MapPartitionInternal(const std::string& super_device,
+                            const std::string& target_partition_name,
+                            uint32_t slot,
+                            bool force_writable,
+                            std::string* path);
+
+  // Update |builder| according to |partition_metadata|.
+  // - In Android mode, this is only called when the device
+  //   does not have Virtual A/B.
+  // - When sideloading, this maybe called as a fallback path if CoW cannot
+  //   be created.
+  bool UpdatePartitionMetadata(android::fs_mgr::MetadataBuilder* builder,
+                               uint32_t target_slot,
+                               const DeltaArchiveManifest& manifest);
+
+  // Helper for PreparePartitionsForUpdate. Used for snapshotted partitions for
+  // Virtual A/B update.
+  bool PrepareSnapshotPartitionsForUpdate(uint32_t source_slot,
+                                          uint32_t target_slot,
+                                          const DeltaArchiveManifest& manifest,
+                                          uint64_t* required_size);
+
+  enum class DynamicPartitionDeviceStatus {
+    SUCCESS,
+    ERROR,
+    TRY_STATIC,
+  };
+
+  // Return SUCCESS and path in |device| if partition is dynamic.
+  // Return ERROR if any error.
+  // Return TRY_STATIC if caller should resolve the partition as a static
+  // partition instead.
+  DynamicPartitionDeviceStatus GetDynamicPartitionDevice(
+      const base::FilePath& device_dir,
+      const std::string& partition_name_suffix,
+      uint32_t slot,
+      uint32_t current_slot,
+      bool not_in_payload,
+      std::string* device);
+
+  // Return true if |partition_name_suffix| is a block device of
+  // super partition metadata slot |slot|.
+  bool IsSuperBlockDevice(const base::FilePath& device_dir,
+                          uint32_t current_slot,
+                          const std::string& partition_name_suffix);
+
+  // If sideloading a full OTA, delete source partitions from |builder|.
+  bool DeleteSourcePartitions(android::fs_mgr::MetadataBuilder* builder,
+                              uint32_t source_slot,
+                              const DeltaArchiveManifest& manifest);
+
+  // Returns true if metadata is expected to be mounted, false otherwise.
+  // Note that it returns false on non-Virtual A/B devices.
+  //
+  // Almost all functions of SnapshotManager depends on metadata being mounted.
+  // - In Android mode for Virtual A/B devices, assume it is mounted. If not,
+  //   let caller fails when calling into SnapshotManager.
+  // - In recovery for Virtual A/B devices, it is possible that metadata is not
+  //   formatted, hence it cannot be mounted. Caller should not call into
+  //   SnapshotManager.
+  // - On non-Virtual A/B devices, updates do not depend on metadata partition.
+  //   Caller should not call into SnapshotManager.
+  //
+  // This function does NOT mount metadata partition. Use EnsureMetadataMounted
+  // to mount metadata partition.
+  bool ExpectMetadataMounted();
+
+  // Ensure /metadata is mounted. Returns true if successful, false otherwise.
+  //
+  // Note that this function returns true on non-Virtual A/B devices without
+  // doing anything.
+  bool EnsureMetadataMounted();
+
+  // Set boolean flags related to target build. This includes flags like
+  // target_supports_snapshot_ and is_target_dynamic_.
+  bool SetTargetBuildVars(const DeltaArchiveManifest& manifest);
+
+  std::set<std::string> mapped_devices_;
+  const FeatureFlag dynamic_partitions_;
+  const FeatureFlag virtual_ab_;
+  std::unique_ptr<android::snapshot::ISnapshotManager> snapshot_;
+  std::unique_ptr<android::snapshot::AutoDevice> metadata_device_;
+  bool target_supports_snapshot_ = false;
+  // Whether the target partitions should be loaded as dynamic partitions. Set
+  // by PreparePartitionsForUpdate() per each update.
+  bool is_target_dynamic_ = false;
+  uint32_t source_slot_ = UINT32_MAX;
+  uint32_t target_slot_ = UINT32_MAX;
 
   DISALLOW_COPY_AND_ASSIGN(DynamicPartitionControlAndroid);
 };
diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc
new file mode 100644
index 0000000..223e177
--- /dev/null
+++ b/dynamic_partition_control_android_unittest.cc
@@ -0,0 +1,1030 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/dynamic_partition_control_android.h"
+
+#include <set>
+#include <vector>
+
+#include <base/logging.h>
+#include <base/strings/string_util.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <libavb/libavb.h>
+#include <libsnapshot/mock_snapshot.h>
+
+#include "update_engine/common/mock_prefs.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/dynamic_partition_test_utils.h"
+#include "update_engine/mock_dynamic_partition_control.h"
+
+using android::dm::DmDeviceState;
+using android::snapshot::MockSnapshotManager;
+using chromeos_update_engine::test_utils::ScopedLoopbackDeviceBinder;
+using chromeos_update_engine::test_utils::ScopedTempFile;
+using std::string;
+using testing::_;
+using testing::AnyNumber;
+using testing::AnyOf;
+using testing::Invoke;
+using testing::NiceMock;
+using testing::Not;
+using testing::Optional;
+using testing::Return;
+
+namespace chromeos_update_engine {
+
+class DynamicPartitionControlAndroidTest : public ::testing::Test {
+ public:
+  void SetUp() override {
+    module_ = std::make_unique<NiceMock<MockDynamicPartitionControlAndroid>>();
+
+    ON_CALL(dynamicControl(), GetDynamicPartitionsFeatureFlag())
+        .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
+    ON_CALL(dynamicControl(), GetVirtualAbFeatureFlag())
+        .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::NONE)));
+
+    ON_CALL(dynamicControl(), GetDeviceDir(_))
+        .WillByDefault(Invoke([](auto path) {
+          *path = kFakeDevicePath;
+          return true;
+        }));
+
+    ON_CALL(dynamicControl(), GetSuperPartitionName(_))
+        .WillByDefault(Return(kFakeSuper));
+
+    ON_CALL(dynamicControl(), GetDmDevicePathByName(_, _))
+        .WillByDefault(Invoke([](auto partition_name_suffix, auto device) {
+          *device = GetDmDevice(partition_name_suffix);
+          return true;
+        }));
+
+    ON_CALL(dynamicControl(), EraseSystemOtherAvbFooter(_, _))
+        .WillByDefault(Return(true));
+
+    ON_CALL(dynamicControl(), IsRecovery()).WillByDefault(Return(false));
+
+    ON_CALL(dynamicControl(), PrepareDynamicPartitionsForUpdate(_, _, _, _))
+        .WillByDefault(Invoke([&](uint32_t source_slot,
+                                  uint32_t target_slot,
+                                  const DeltaArchiveManifest& manifest,
+                                  bool delete_source) {
+          return dynamicControl().RealPrepareDynamicPartitionsForUpdate(
+              source_slot, target_slot, manifest, delete_source);
+        }));
+  }
+
+  // Return the mocked DynamicPartitionControlInterface.
+  NiceMock<MockDynamicPartitionControlAndroid>& dynamicControl() {
+    return static_cast<NiceMock<MockDynamicPartitionControlAndroid>&>(*module_);
+  }
+
+  std::string GetSuperDevice(uint32_t slot) {
+    return GetDevice(dynamicControl().GetSuperPartitionName(slot));
+  }
+
+  uint32_t source() { return slots_.source; }
+  uint32_t target() { return slots_.target; }
+
+  // Return partition names with suffix of source().
+  std::string S(const std::string& name) {
+    return name + kSlotSuffixes[source()];
+  }
+
+  // Return partition names with suffix of target().
+  std::string T(const std::string& name) {
+    return name + kSlotSuffixes[target()];
+  }
+
+  // Set the fake metadata to return when LoadMetadataBuilder is called on
+  // |slot|.
+  void SetMetadata(uint32_t slot,
+                   const PartitionSuffixSizes& sizes,
+                   uint32_t partition_attr = 0,
+                   uint64_t super_size = kDefaultSuperSize) {
+    EXPECT_CALL(dynamicControl(),
+                LoadMetadataBuilder(GetSuperDevice(slot), slot))
+        .Times(AnyNumber())
+        .WillRepeatedly(Invoke([=](auto, auto) {
+          return NewFakeMetadata(PartitionSuffixSizesToManifest(sizes),
+                                 partition_attr,
+                                 super_size);
+        }));
+
+    EXPECT_CALL(dynamicControl(),
+                LoadMetadataBuilder(GetSuperDevice(slot), slot, _))
+        .Times(AnyNumber())
+        .WillRepeatedly(Invoke([=](auto, auto, auto) {
+          return NewFakeMetadata(PartitionSuffixSizesToManifest(sizes),
+                                 partition_attr,
+                                 super_size);
+        }));
+  }
+
+  void ExpectStoreMetadata(const PartitionSuffixSizes& partition_sizes) {
+    EXPECT_CALL(dynamicControl(),
+                StoreMetadata(GetSuperDevice(target()),
+                              MetadataMatches(partition_sizes),
+                              target()))
+        .WillOnce(Return(true));
+  }
+
+  // Expect that UnmapPartitionOnDeviceMapper is called on target() metadata
+  // slot with each partition in |partitions|.
+  void ExpectUnmap(const std::set<std::string>& partitions) {
+    // Error when UnmapPartitionOnDeviceMapper is called on unknown arguments.
+    ON_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_))
+        .WillByDefault(Return(false));
+
+    for (const auto& partition : partitions) {
+      EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(partition))
+          .WillOnce(Return(true));
+    }
+  }
+  bool PreparePartitionsForUpdate(const PartitionSizes& partition_sizes) {
+    return dynamicControl().PreparePartitionsForUpdate(
+        source(),
+        target(),
+        PartitionSizesToManifest(partition_sizes),
+        true,
+        nullptr);
+  }
+  void SetSlots(const TestParam& slots) { slots_ = slots; }
+
+  void SetSnapshotEnabled(bool enabled) {
+    dynamicControl().target_supports_snapshot_ = enabled;
+  }
+
+  struct Listener : public ::testing::MatchResultListener {
+    explicit Listener(std::ostream* os) : MatchResultListener(os) {}
+  };
+
+  testing::AssertionResult UpdatePartitionMetadata(
+      const PartitionSuffixSizes& source_metadata,
+      const PartitionSizes& update_metadata,
+      const PartitionSuffixSizes& expected) {
+    return UpdatePartitionMetadata(
+        PartitionSuffixSizesToManifest(source_metadata),
+        PartitionSizesToManifest(update_metadata),
+        PartitionSuffixSizesToManifest(expected));
+  }
+  testing::AssertionResult UpdatePartitionMetadata(
+      const DeltaArchiveManifest& source_manifest,
+      const DeltaArchiveManifest& update_manifest,
+      const DeltaArchiveManifest& expected) {
+    return UpdatePartitionMetadata(
+        source_manifest, update_manifest, MetadataMatches(expected));
+  }
+  testing::AssertionResult UpdatePartitionMetadata(
+      const DeltaArchiveManifest& source_manifest,
+      const DeltaArchiveManifest& update_manifest,
+      const Matcher<MetadataBuilder*>& matcher) {
+    auto super_metadata = NewFakeMetadata(source_manifest);
+    if (!module_->UpdatePartitionMetadata(
+            super_metadata.get(), target(), update_manifest)) {
+      return testing::AssertionFailure()
+             << "UpdatePartitionMetadataInternal failed";
+    }
+    std::stringstream ss;
+    Listener listener(&ss);
+    if (matcher.MatchAndExplain(super_metadata.get(), &listener)) {
+      return testing::AssertionSuccess() << ss.str();
+    } else {
+      return testing::AssertionFailure() << ss.str();
+    }
+  }
+
+  std::unique_ptr<DynamicPartitionControlAndroid> module_;
+  TestParam slots_;
+};
+
+class DynamicPartitionControlAndroidTestP
+    : public DynamicPartitionControlAndroidTest,
+      public ::testing::WithParamInterface<TestParam> {
+ public:
+  void SetUp() override {
+    DynamicPartitionControlAndroidTest::SetUp();
+    SetSlots(GetParam());
+  }
+};
+
+// Test resize case. Grow if target metadata contains a partition with a size
+// less than expected.
+TEST_P(DynamicPartitionControlAndroidTestP,
+       NeedGrowIfSizeNotMatchWhenResizing) {
+  PartitionSuffixSizes source_metadata{{S("system"), 2_GiB},
+                                       {S("vendor"), 1_GiB},
+                                       {T("system"), 2_GiB},
+                                       {T("vendor"), 1_GiB}};
+  PartitionSuffixSizes expected{{S("system"), 2_GiB},
+                                {S("vendor"), 1_GiB},
+                                {T("system"), 3_GiB},
+                                {T("vendor"), 1_GiB}};
+  PartitionSizes update_metadata{{"system", 3_GiB}, {"vendor", 1_GiB}};
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_metadata, update_metadata, expected));
+}
+
+// Test resize case. Shrink if target metadata contains a partition with a size
+// greater than expected.
+TEST_P(DynamicPartitionControlAndroidTestP,
+       NeedShrinkIfSizeNotMatchWhenResizing) {
+  PartitionSuffixSizes source_metadata{{S("system"), 2_GiB},
+                                       {S("vendor"), 1_GiB},
+                                       {T("system"), 2_GiB},
+                                       {T("vendor"), 1_GiB}};
+  PartitionSuffixSizes expected{{S("system"), 2_GiB},
+                                {S("vendor"), 1_GiB},
+                                {T("system"), 2_GiB},
+                                {T("vendor"), 150_MiB}};
+  PartitionSizes update_metadata{{"system", 2_GiB}, {"vendor", 150_MiB}};
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_metadata, update_metadata, expected));
+}
+
+// Test adding partitions on the first run.
+TEST_P(DynamicPartitionControlAndroidTestP, AddPartitionToEmptyMetadata) {
+  PartitionSuffixSizes source_metadata{};
+  PartitionSuffixSizes expected{{T("system"), 2_GiB}, {T("vendor"), 1_GiB}};
+  PartitionSizes update_metadata{{"system", 2_GiB}, {"vendor", 1_GiB}};
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_metadata, update_metadata, expected));
+}
+
+// Test subsequent add case.
+TEST_P(DynamicPartitionControlAndroidTestP, AddAdditionalPartition) {
+  PartitionSuffixSizes source_metadata{{S("system"), 2_GiB},
+                                       {T("system"), 2_GiB}};
+  PartitionSuffixSizes expected{
+      {S("system"), 2_GiB}, {T("system"), 2_GiB}, {T("vendor"), 1_GiB}};
+  PartitionSizes update_metadata{{"system", 2_GiB}, {"vendor", 1_GiB}};
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_metadata, update_metadata, expected));
+}
+
+// Test delete one partition.
+TEST_P(DynamicPartitionControlAndroidTestP, DeletePartition) {
+  PartitionSuffixSizes source_metadata{{S("system"), 2_GiB},
+                                       {S("vendor"), 1_GiB},
+                                       {T("system"), 2_GiB},
+                                       {T("vendor"), 1_GiB}};
+  // No T("vendor")
+  PartitionSuffixSizes expected{
+      {S("system"), 2_GiB}, {S("vendor"), 1_GiB}, {T("system"), 2_GiB}};
+  PartitionSizes update_metadata{{"system", 2_GiB}};
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_metadata, update_metadata, expected));
+}
+
+// Test delete all partitions.
+TEST_P(DynamicPartitionControlAndroidTestP, DeleteAll) {
+  PartitionSuffixSizes source_metadata{{S("system"), 2_GiB},
+                                       {S("vendor"), 1_GiB},
+                                       {T("system"), 2_GiB},
+                                       {T("vendor"), 1_GiB}};
+  PartitionSuffixSizes expected{{S("system"), 2_GiB}, {S("vendor"), 1_GiB}};
+  PartitionSizes update_metadata{};
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_metadata, update_metadata, expected));
+}
+
+// Test corrupt source metadata case.
+TEST_P(DynamicPartitionControlAndroidTestP, CorruptedSourceMetadata) {
+  EXPECT_CALL(dynamicControl(),
+              LoadMetadataBuilder(GetSuperDevice(source()), source(), _))
+      .WillOnce(Invoke([](auto, auto, auto) { return nullptr; }));
+  ExpectUnmap({T("system")});
+
+  EXPECT_FALSE(PreparePartitionsForUpdate({{"system", 1_GiB}}))
+      << "Should not be able to continue with corrupt source metadata";
+}
+
+// Test that UpdatePartitionMetadata fails if there is not enough space on the
+// device.
+TEST_P(DynamicPartitionControlAndroidTestP, NotEnoughSpace) {
+  PartitionSuffixSizes source_metadata{{S("system"), 3_GiB},
+                                       {S("vendor"), 2_GiB},
+                                       {T("system"), 0},
+                                       {T("vendor"), 0}};
+  PartitionSizes update_metadata{{"system", 3_GiB}, {"vendor", 3_GiB}};
+
+  EXPECT_FALSE(UpdatePartitionMetadata(source_metadata, update_metadata, {}))
+      << "Should not be able to fit 11GiB data into 10GiB space";
+}
+
+TEST_P(DynamicPartitionControlAndroidTestP, NotEnoughSpaceForSlot) {
+  PartitionSuffixSizes source_metadata{{S("system"), 1_GiB},
+                                       {S("vendor"), 1_GiB},
+                                       {T("system"), 0},
+                                       {T("vendor"), 0}};
+  PartitionSizes update_metadata{{"system", 3_GiB}, {"vendor", 3_GiB}};
+  EXPECT_FALSE(UpdatePartitionMetadata(source_metadata, update_metadata, {}))
+      << "Should not be able to grow over size of super / 2";
+}
+
+TEST_P(DynamicPartitionControlAndroidTestP,
+       ApplyRetrofitUpdateOnDynamicPartitionsEnabledBuild) {
+  ON_CALL(dynamicControl(), GetDynamicPartitionsFeatureFlag())
+      .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::RETROFIT)));
+  // Static partition {system,bar}_{a,b} exists.
+  EXPECT_CALL(dynamicControl(),
+              DeviceExists(AnyOf(GetDevice(S("bar")),
+                                 GetDevice(T("bar")),
+                                 GetDevice(S("system")),
+                                 GetDevice(T("system")))))
+      .WillRepeatedly(Return(true));
+
+  SetMetadata(source(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 2_GiB},
+               {T("vendor"), 1_GiB}});
+
+  // Not calling through
+  // DynamicPartitionControlAndroidTest::PreparePartitionsForUpdate(), since we
+  // don't want any default group in the PartitionMetadata.
+  EXPECT_TRUE(dynamicControl().PreparePartitionsForUpdate(
+      source(), target(), {}, true, nullptr));
+
+  // Should use dynamic source partitions.
+  EXPECT_CALL(dynamicControl(), GetState(S("system")))
+      .Times(1)
+      .WillOnce(Return(DmDeviceState::ACTIVE));
+  string system_device;
+  EXPECT_TRUE(dynamicControl().GetPartitionDevice(
+      "system", source(), source(), &system_device));
+  EXPECT_EQ(GetDmDevice(S("system")), system_device);
+
+  // Should use static target partitions without querying dynamic control.
+  EXPECT_CALL(dynamicControl(), GetState(T("system"))).Times(0);
+  EXPECT_TRUE(dynamicControl().GetPartitionDevice(
+      "system", target(), source(), &system_device));
+  EXPECT_EQ(GetDevice(T("system")), system_device);
+
+  // Static partition "bar".
+  EXPECT_CALL(dynamicControl(), GetState(S("bar"))).Times(0);
+  std::string bar_device;
+  EXPECT_TRUE(dynamicControl().GetPartitionDevice(
+      "bar", source(), source(), &bar_device));
+  EXPECT_EQ(GetDevice(S("bar")), bar_device);
+
+  EXPECT_CALL(dynamicControl(), GetState(T("bar"))).Times(0);
+  EXPECT_TRUE(dynamicControl().GetPartitionDevice(
+      "bar", target(), source(), &bar_device));
+  EXPECT_EQ(GetDevice(T("bar")), bar_device);
+}
+
+TEST_P(DynamicPartitionControlAndroidTestP,
+       GetPartitionDeviceWhenResumingUpdate) {
+  // Static partition bar_{a,b} exists.
+  EXPECT_CALL(dynamicControl(),
+              DeviceExists(AnyOf(GetDevice(S("bar")), GetDevice(T("bar")))))
+      .WillRepeatedly(Return(true));
+
+  // Both of the two slots contain valid partition metadata, since this is
+  // resuming an update.
+  SetMetadata(source(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 2_GiB},
+               {T("vendor"), 1_GiB}});
+  SetMetadata(target(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 2_GiB},
+               {T("vendor"), 1_GiB}});
+
+  EXPECT_TRUE(dynamicControl().PreparePartitionsForUpdate(
+      source(),
+      target(),
+      PartitionSizesToManifest({{"system", 2_GiB}, {"vendor", 1_GiB}}),
+      false,
+      nullptr));
+
+  // Dynamic partition "system".
+  EXPECT_CALL(dynamicControl(), GetState(S("system")))
+      .Times(1)
+      .WillOnce(Return(DmDeviceState::ACTIVE));
+  string system_device;
+  EXPECT_TRUE(dynamicControl().GetPartitionDevice(
+      "system", source(), source(), &system_device));
+  EXPECT_EQ(GetDmDevice(S("system")), system_device);
+
+  EXPECT_CALL(dynamicControl(), GetState(T("system")))
+      .Times(AnyNumber())
+      .WillOnce(Return(DmDeviceState::ACTIVE));
+  EXPECT_CALL(dynamicControl(),
+              MapPartitionOnDeviceMapper(
+                  GetSuperDevice(target()), T("system"), target(), _, _))
+      .Times(AnyNumber())
+      .WillRepeatedly(
+          Invoke([](const auto&, const auto& name, auto, auto, auto* device) {
+            *device = "/fake/remapped/" + name;
+            return true;
+          }));
+  EXPECT_TRUE(dynamicControl().GetPartitionDevice(
+      "system", target(), source(), &system_device));
+  EXPECT_EQ("/fake/remapped/" + T("system"), system_device);
+
+  // Static partition "bar".
+  EXPECT_CALL(dynamicControl(), GetState(S("bar"))).Times(0);
+  std::string bar_device;
+  EXPECT_TRUE(dynamicControl().GetPartitionDevice(
+      "bar", source(), source(), &bar_device));
+  EXPECT_EQ(GetDevice(S("bar")), bar_device);
+
+  EXPECT_CALL(dynamicControl(), GetState(T("bar"))).Times(0);
+  EXPECT_TRUE(dynamicControl().GetPartitionDevice(
+      "bar", target(), source(), &bar_device));
+  EXPECT_EQ(GetDevice(T("bar")), bar_device);
+}
+
+INSTANTIATE_TEST_CASE_P(DynamicPartitionControlAndroidTest,
+                        DynamicPartitionControlAndroidTestP,
+                        testing::Values(TestParam{0, 1}, TestParam{1, 0}));
+
+class DynamicPartitionControlAndroidGroupTestP
+    : public DynamicPartitionControlAndroidTestP {
+ public:
+  DeltaArchiveManifest source_manifest;
+  void SetUp() override {
+    DynamicPartitionControlAndroidTestP::SetUp();
+    AddGroupAndPartition(
+        &source_manifest, S("android"), 3_GiB, S("system"), 2_GiB);
+    AddGroupAndPartition(&source_manifest, S("oem"), 2_GiB, S("vendor"), 1_GiB);
+    AddGroupAndPartition(&source_manifest, T("android"), 3_GiB, T("system"), 0);
+    AddGroupAndPartition(&source_manifest, T("oem"), 2_GiB, T("vendor"), 0);
+  }
+
+  void AddGroupAndPartition(DeltaArchiveManifest* manifest,
+                            const string& group,
+                            uint64_t group_size,
+                            const string& partition,
+                            uint64_t partition_size) {
+    auto* g = AddGroup(manifest, group, group_size);
+    AddPartition(manifest, g, partition, partition_size);
+  }
+};
+
+// Allow to resize within group.
+TEST_P(DynamicPartitionControlAndroidGroupTestP, ResizeWithinGroup) {
+  DeltaArchiveManifest expected;
+  AddGroupAndPartition(&expected, T("android"), 3_GiB, T("system"), 3_GiB);
+  AddGroupAndPartition(&expected, T("oem"), 2_GiB, T("vendor"), 2_GiB);
+
+  DeltaArchiveManifest update_manifest;
+  AddGroupAndPartition(&update_manifest, "android", 3_GiB, "system", 3_GiB);
+  AddGroupAndPartition(&update_manifest, "oem", 2_GiB, "vendor", 2_GiB);
+
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_manifest, update_manifest, expected));
+}
+
+TEST_P(DynamicPartitionControlAndroidGroupTestP, NotEnoughSpaceForGroup) {
+  DeltaArchiveManifest update_manifest;
+  AddGroupAndPartition(&update_manifest, "android", 3_GiB, "system", 1_GiB),
+      AddGroupAndPartition(&update_manifest, "oem", 2_GiB, "vendor", 3_GiB);
+  EXPECT_FALSE(UpdatePartitionMetadata(source_manifest, update_manifest, {}))
+      << "Should not be able to grow over maximum size of group";
+}
+
+TEST_P(DynamicPartitionControlAndroidGroupTestP, GroupTooBig) {
+  DeltaArchiveManifest update_manifest;
+  AddGroup(&update_manifest, "android", 3_GiB);
+  AddGroup(&update_manifest, "oem", 3_GiB);
+  EXPECT_FALSE(UpdatePartitionMetadata(source_manifest, update_manifest, {}))
+      << "Should not be able to grow over size of super / 2";
+}
+
+TEST_P(DynamicPartitionControlAndroidGroupTestP, AddPartitionToGroup) {
+  DeltaArchiveManifest expected;
+  auto* g = AddGroup(&expected, T("android"), 3_GiB);
+  AddPartition(&expected, g, T("system"), 2_GiB);
+  AddPartition(&expected, g, T("system_ext"), 1_GiB);
+
+  DeltaArchiveManifest update_manifest;
+  g = AddGroup(&update_manifest, "android", 3_GiB);
+  AddPartition(&update_manifest, g, "system", 2_GiB);
+  AddPartition(&update_manifest, g, "system_ext", 1_GiB);
+  AddGroupAndPartition(&update_manifest, "oem", 2_GiB, "vendor", 2_GiB);
+
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_manifest, update_manifest, expected));
+}
+
+TEST_P(DynamicPartitionControlAndroidGroupTestP, RemovePartitionFromGroup) {
+  DeltaArchiveManifest expected;
+  AddGroup(&expected, T("android"), 3_GiB);
+
+  DeltaArchiveManifest update_manifest;
+  AddGroup(&update_manifest, "android", 3_GiB);
+  AddGroupAndPartition(&update_manifest, "oem", 2_GiB, "vendor", 2_GiB);
+
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_manifest, update_manifest, expected));
+}
+
+TEST_P(DynamicPartitionControlAndroidGroupTestP, AddGroup) {
+  DeltaArchiveManifest expected;
+  AddGroupAndPartition(
+      &expected, T("new_group"), 2_GiB, T("new_partition"), 2_GiB);
+
+  DeltaArchiveManifest update_manifest;
+  AddGroupAndPartition(&update_manifest, "android", 2_GiB, "system", 2_GiB);
+  AddGroupAndPartition(&update_manifest, "oem", 1_GiB, "vendor", 1_GiB);
+  AddGroupAndPartition(
+      &update_manifest, "new_group", 2_GiB, "new_partition", 2_GiB);
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_manifest, update_manifest, expected));
+}
+
+TEST_P(DynamicPartitionControlAndroidGroupTestP, RemoveGroup) {
+  DeltaArchiveManifest update_manifest;
+  AddGroupAndPartition(&update_manifest, "android", 2_GiB, "system", 2_GiB);
+
+  EXPECT_TRUE(UpdatePartitionMetadata(
+      source_manifest, update_manifest, Not(HasGroup(T("oem")))));
+}
+
+TEST_P(DynamicPartitionControlAndroidGroupTestP, ResizeGroup) {
+  DeltaArchiveManifest expected;
+  AddGroupAndPartition(&expected, T("android"), 2_GiB, T("system"), 2_GiB);
+  AddGroupAndPartition(&expected, T("oem"), 3_GiB, T("vendor"), 3_GiB);
+  DeltaArchiveManifest update_manifest;
+  AddGroupAndPartition(&update_manifest, "android", 2_GiB, "system", 2_GiB),
+      AddGroupAndPartition(&update_manifest, "oem", 3_GiB, "vendor", 3_GiB);
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_manifest, update_manifest, expected));
+}
+
+INSTANTIATE_TEST_CASE_P(DynamicPartitionControlAndroidTest,
+                        DynamicPartitionControlAndroidGroupTestP,
+                        testing::Values(TestParam{0, 1}, TestParam{1, 0}));
+
+const PartitionSuffixSizes update_sizes_0() {
+  // Initial state is 0 for "other" slot.
+  return {
+      {"grown_a", 2_GiB},
+      {"shrunk_a", 1_GiB},
+      {"same_a", 100_MiB},
+      {"deleted_a", 150_MiB},
+      // no added_a
+      {"grown_b", 200_MiB},
+      // simulate system_other
+      {"shrunk_b", 0},
+      {"same_b", 0},
+      {"deleted_b", 0},
+      // no added_b
+  };
+}
+
+const PartitionSuffixSizes update_sizes_1() {
+  return {
+      {"grown_a", 2_GiB},
+      {"shrunk_a", 1_GiB},
+      {"same_a", 100_MiB},
+      {"deleted_a", 150_MiB},
+      // no added_a
+      {"grown_b", 3_GiB},
+      {"shrunk_b", 150_MiB},
+      {"same_b", 100_MiB},
+      {"added_b", 150_MiB},
+      // no deleted_b
+  };
+}
+
+const PartitionSuffixSizes update_sizes_2() {
+  return {
+      {"grown_a", 4_GiB},
+      {"shrunk_a", 100_MiB},
+      {"same_a", 100_MiB},
+      {"deleted_a", 64_MiB},
+      // no added_a
+      {"grown_b", 3_GiB},
+      {"shrunk_b", 150_MiB},
+      {"same_b", 100_MiB},
+      {"added_b", 150_MiB},
+      // no deleted_b
+  };
+}
+
+// Test case for first update after the device is manufactured, in which
+// case the "other" slot is likely of size "0" (except system, which is
+// non-zero because of system_other partition)
+TEST_F(DynamicPartitionControlAndroidTest, SimulatedFirstUpdate) {
+  SetSlots({0, 1});
+
+  SetMetadata(source(), update_sizes_0());
+  SetMetadata(target(), update_sizes_0());
+  ExpectStoreMetadata(update_sizes_1());
+  ExpectUnmap({"grown_b", "shrunk_b", "same_b", "added_b"});
+
+  EXPECT_TRUE(PreparePartitionsForUpdate({{"grown", 3_GiB},
+                                          {"shrunk", 150_MiB},
+                                          {"same", 100_MiB},
+                                          {"added", 150_MiB}}));
+}
+
+// After first update, test for the second update. In the second update, the
+// "added" partition is deleted and "deleted" partition is re-added.
+TEST_F(DynamicPartitionControlAndroidTest, SimulatedSecondUpdate) {
+  SetSlots({1, 0});
+
+  SetMetadata(source(), update_sizes_1());
+  SetMetadata(target(), update_sizes_0());
+
+  ExpectStoreMetadata(update_sizes_2());
+  ExpectUnmap({"grown_a", "shrunk_a", "same_a", "deleted_a"});
+
+  EXPECT_TRUE(PreparePartitionsForUpdate({{"grown", 4_GiB},
+                                          {"shrunk", 100_MiB},
+                                          {"same", 100_MiB},
+                                          {"deleted", 64_MiB}}));
+}
+
+TEST_F(DynamicPartitionControlAndroidTest, ApplyingToCurrentSlot) {
+  SetSlots({1, 1});
+  EXPECT_FALSE(PreparePartitionsForUpdate({}))
+      << "Should not be able to apply to current slot.";
+}
+
+TEST_P(DynamicPartitionControlAndroidTestP, OptimizeOperationTest) {
+  ASSERT_TRUE(dynamicControl().PreparePartitionsForUpdate(
+      source(),
+      target(),
+      PartitionSizesToManifest({{"foo", 4_MiB}}),
+      false,
+      nullptr));
+  dynamicControl().set_fake_mapped_devices({T("foo")});
+
+  InstallOperation iop;
+  InstallOperation optimized;
+  Extent *se, *de;
+
+  // Not a SOURCE_COPY operation, cannot skip.
+  iop.set_type(InstallOperation::REPLACE);
+  EXPECT_FALSE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+
+  iop.set_type(InstallOperation::SOURCE_COPY);
+
+  // By default GetVirtualAbFeatureFlag is disabled. Cannot skip operation.
+  EXPECT_FALSE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+
+  // Enable GetVirtualAbFeatureFlag in the mock interface.
+  ON_CALL(dynamicControl(), GetVirtualAbFeatureFlag())
+      .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
+
+  // By default target_supports_snapshot_ is set to false. Cannot skip
+  // operation.
+  EXPECT_FALSE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+
+  SetSnapshotEnabled(true);
+
+  // Empty source and destination. Skip.
+  EXPECT_TRUE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+  EXPECT_TRUE(optimized.src_extents().empty());
+  EXPECT_TRUE(optimized.dst_extents().empty());
+
+  se = iop.add_src_extents();
+  se->set_start_block(0);
+  se->set_num_blocks(1);
+
+  // There is something in sources, but destinations are empty. Cannot skip.
+  EXPECT_FALSE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+
+  InstallOperation iop2;
+
+  de = iop2.add_dst_extents();
+  de->set_start_block(0);
+  de->set_num_blocks(1);
+
+  // There is something in destinations, but sources are empty. Cannot skip.
+  EXPECT_FALSE(dynamicControl().OptimizeOperation("foo", iop2, &optimized));
+
+  de = iop.add_dst_extents();
+  de->set_start_block(0);
+  de->set_num_blocks(1);
+
+  // Sources and destinations are identical. Skip.
+  EXPECT_TRUE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+  EXPECT_TRUE(optimized.src_extents().empty());
+  EXPECT_TRUE(optimized.dst_extents().empty());
+
+  se = iop.add_src_extents();
+  se->set_start_block(1);
+  se->set_num_blocks(5);
+
+  // There is something in source, but not in destination. Cannot skip.
+  EXPECT_FALSE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+
+  de = iop.add_dst_extents();
+  de->set_start_block(1);
+  de->set_num_blocks(5);
+
+  // There is source and destination are equal. Skip.
+  EXPECT_TRUE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+  EXPECT_TRUE(optimized.src_extents().empty());
+  EXPECT_TRUE(optimized.dst_extents().empty());
+
+  de = iop.add_dst_extents();
+  de->set_start_block(6);
+  de->set_num_blocks(5);
+
+  // There is something extra in dest. Cannot skip.
+  EXPECT_FALSE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+
+  se = iop.add_src_extents();
+  se->set_start_block(6);
+  se->set_num_blocks(5);
+
+  // Source and dest are identical again. Skip.
+  EXPECT_TRUE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+  EXPECT_TRUE(optimized.src_extents().empty());
+  EXPECT_TRUE(optimized.dst_extents().empty());
+
+  iop.Clear();
+  iop.set_type(InstallOperation::SOURCE_COPY);
+  se = iop.add_src_extents();
+  se->set_start_block(1);
+  se->set_num_blocks(1);
+  se = iop.add_src_extents();
+  se->set_start_block(3);
+  se->set_num_blocks(2);
+  se = iop.add_src_extents();
+  se->set_start_block(7);
+  se->set_num_blocks(2);
+  de = iop.add_dst_extents();
+  de->set_start_block(2);
+  de->set_num_blocks(5);
+
+  // [1, 3, 4, 7, 8] -> [2, 3, 4, 5, 6] should return [1, 7, 8] -> [2, 5, 6]
+  EXPECT_TRUE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+  ASSERT_EQ(2, optimized.src_extents_size());
+  ASSERT_EQ(2, optimized.dst_extents_size());
+  EXPECT_EQ(1u, optimized.src_extents(0).start_block());
+  EXPECT_EQ(1u, optimized.src_extents(0).num_blocks());
+  EXPECT_EQ(2u, optimized.dst_extents(0).start_block());
+  EXPECT_EQ(1u, optimized.dst_extents(0).num_blocks());
+  EXPECT_EQ(7u, optimized.src_extents(1).start_block());
+  EXPECT_EQ(2u, optimized.src_extents(1).num_blocks());
+  EXPECT_EQ(5u, optimized.dst_extents(1).start_block());
+  EXPECT_EQ(2u, optimized.dst_extents(1).num_blocks());
+
+  // Don't skip for static partitions.
+  EXPECT_FALSE(dynamicControl().OptimizeOperation("bar", iop, &optimized));
+}
+
+TEST_F(DynamicPartitionControlAndroidTest, ResetUpdate) {
+  MockPrefs prefs;
+  ASSERT_TRUE(dynamicControl().ResetUpdate(&prefs));
+}
+
+TEST_F(DynamicPartitionControlAndroidTest, IsAvbNotEnabledInFstab) {
+  // clang-format off
+  std::string fstab_content =
+      "system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other,logical\n"  // NOLINT(whitespace/line_length)
+      "/dev/block/by-name/system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other\n";  // NOLINT(whitespace/line_length)
+  // clang-format on
+  ScopedTempFile fstab;
+  ASSERT_TRUE(test_utils::WriteFileString(fstab.path(), fstab_content));
+  ASSERT_THAT(dynamicControl().RealIsAvbEnabledInFstab(fstab.path()),
+              Optional(false));
+}
+
+TEST_F(DynamicPartitionControlAndroidTest, IsAvbEnabledInFstab) {
+  // clang-format off
+  std::string fstab_content =
+      "system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other,logical,avb_keys=/foo\n";  // NOLINT(whitespace/line_length)
+  // clang-format on
+  ScopedTempFile fstab;
+  ASSERT_TRUE(test_utils::WriteFileString(fstab.path(), fstab_content));
+  ASSERT_THAT(dynamicControl().RealIsAvbEnabledInFstab(fstab.path()),
+              Optional(true));
+}
+
+TEST_P(DynamicPartitionControlAndroidTestP, AvbNotEnabledOnSystemOther) {
+  ON_CALL(dynamicControl(), GetSystemOtherPath(_, _, _, _, _))
+      .WillByDefault(Invoke([&](auto source_slot,
+                                auto target_slot,
+                                const auto& name,
+                                auto path,
+                                auto should_unmap) {
+        return dynamicControl().RealGetSystemOtherPath(
+            source_slot, target_slot, name, path, should_unmap);
+      }));
+  ON_CALL(dynamicControl(), IsAvbEnabledOnSystemOther())
+      .WillByDefault(Return(false));
+  EXPECT_TRUE(
+      dynamicControl().RealEraseSystemOtherAvbFooter(source(), target()));
+}
+
+TEST_P(DynamicPartitionControlAndroidTestP, NoSystemOtherToErase) {
+  SetMetadata(source(), {{S("system"), 100_MiB}});
+  ON_CALL(dynamicControl(), IsAvbEnabledOnSystemOther())
+      .WillByDefault(Return(true));
+  std::string path;
+  bool should_unmap;
+  ASSERT_TRUE(dynamicControl().RealGetSystemOtherPath(
+      source(), target(), T("system"), &path, &should_unmap));
+  ASSERT_TRUE(path.empty()) << path;
+  ASSERT_FALSE(should_unmap);
+  ON_CALL(dynamicControl(), GetSystemOtherPath(_, _, _, _, _))
+      .WillByDefault(Invoke([&](auto source_slot,
+                                auto target_slot,
+                                const auto& name,
+                                auto path,
+                                auto should_unmap) {
+        return dynamicControl().RealGetSystemOtherPath(
+            source_slot, target_slot, name, path, should_unmap);
+      }));
+  EXPECT_TRUE(
+      dynamicControl().RealEraseSystemOtherAvbFooter(source(), target()));
+}
+
+TEST_P(DynamicPartitionControlAndroidTestP, SkipEraseUpdatedSystemOther) {
+  PartitionSuffixSizes sizes{{S("system"), 100_MiB}, {T("system"), 100_MiB}};
+  SetMetadata(source(), sizes, LP_PARTITION_ATTR_UPDATED);
+  ON_CALL(dynamicControl(), IsAvbEnabledOnSystemOther())
+      .WillByDefault(Return(true));
+  std::string path;
+  bool should_unmap;
+  ASSERT_TRUE(dynamicControl().RealGetSystemOtherPath(
+      source(), target(), T("system"), &path, &should_unmap));
+  ASSERT_TRUE(path.empty()) << path;
+  ASSERT_FALSE(should_unmap);
+  ON_CALL(dynamicControl(), GetSystemOtherPath(_, _, _, _, _))
+      .WillByDefault(Invoke([&](auto source_slot,
+                                auto target_slot,
+                                const auto& name,
+                                auto path,
+                                auto should_unmap) {
+        return dynamicControl().RealGetSystemOtherPath(
+            source_slot, target_slot, name, path, should_unmap);
+      }));
+  EXPECT_TRUE(
+      dynamicControl().RealEraseSystemOtherAvbFooter(source(), target()));
+}
+
+TEST_P(DynamicPartitionControlAndroidTestP, EraseSystemOtherAvbFooter) {
+  constexpr uint64_t file_size = 1_MiB;
+  static_assert(file_size > AVB_FOOTER_SIZE);
+  ScopedTempFile system_other;
+  brillo::Blob original(file_size, 'X');
+  ASSERT_TRUE(test_utils::WriteFileVector(system_other.path(), original));
+  std::string mnt_path;
+  ScopedLoopbackDeviceBinder dev(system_other.path(), true, &mnt_path);
+  ASSERT_TRUE(dev.is_bound());
+
+  brillo::Blob device_content;
+  ASSERT_TRUE(utils::ReadFile(mnt_path, &device_content));
+  ASSERT_EQ(original, device_content);
+
+  PartitionSuffixSizes sizes{{S("system"), 100_MiB}, {T("system"), file_size}};
+  SetMetadata(source(), sizes);
+  ON_CALL(dynamicControl(), IsAvbEnabledOnSystemOther())
+      .WillByDefault(Return(true));
+  EXPECT_CALL(dynamicControl(),
+              GetSystemOtherPath(source(), target(), T("system"), _, _))
+      .WillRepeatedly(
+          Invoke([&](auto, auto, const auto&, auto path, auto should_unmap) {
+            *path = mnt_path;
+            *should_unmap = false;
+            return true;
+          }));
+  ASSERT_TRUE(
+      dynamicControl().RealEraseSystemOtherAvbFooter(source(), target()));
+
+  device_content.clear();
+  ASSERT_TRUE(utils::ReadFile(mnt_path, &device_content));
+  brillo::Blob new_expected(original);
+  // Clear the last AVB_FOOTER_SIZE bytes.
+  new_expected.resize(file_size - AVB_FOOTER_SIZE);
+  new_expected.resize(file_size, '\0');
+  ASSERT_EQ(new_expected, device_content);
+}
+
+class FakeAutoDevice : public android::snapshot::AutoDevice {
+ public:
+  FakeAutoDevice() : AutoDevice("") {}
+};
+
+class SnapshotPartitionTestP : public DynamicPartitionControlAndroidTestP {
+ public:
+  void SetUp() override {
+    DynamicPartitionControlAndroidTestP::SetUp();
+    ON_CALL(dynamicControl(), GetVirtualAbFeatureFlag())
+        .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
+
+    snapshot_ = new NiceMock<MockSnapshotManager>();
+    dynamicControl().snapshot_.reset(snapshot_);  // takes ownership
+    EXPECT_CALL(*snapshot_, BeginUpdate()).WillOnce(Return(true));
+    EXPECT_CALL(*snapshot_, EnsureMetadataMounted())
+        .WillRepeatedly(
+            Invoke([]() { return std::make_unique<FakeAutoDevice>(); }));
+
+    manifest_ =
+        PartitionSizesToManifest({{"system", 3_GiB}, {"vendor", 1_GiB}});
+  }
+  void ExpectCreateUpdateSnapshots(android::snapshot::Return val) {
+    manifest_.mutable_dynamic_partition_metadata()->set_snapshot_enabled(true);
+    EXPECT_CALL(*snapshot_, CreateUpdateSnapshots(_))
+        .WillRepeatedly(Invoke([&, val](const auto& manifest) {
+          // Deep comparison requires full protobuf library. Comparing the
+          // pointers are sufficient.
+          EXPECT_EQ(&manifest_, &manifest);
+          LOG(WARNING) << "CreateUpdateSnapshots returning " << val.string();
+          return val;
+        }));
+  }
+  bool PreparePartitionsForUpdate(uint64_t* required_size) {
+    return dynamicControl().PreparePartitionsForUpdate(
+        source(), target(), manifest_, true /* update */, required_size);
+  }
+  MockSnapshotManager* snapshot_ = nullptr;
+  DeltaArchiveManifest manifest_;
+};
+
+// Test happy path of PreparePartitionsForUpdate on a Virtual A/B device.
+TEST_P(SnapshotPartitionTestP, PreparePartitions) {
+  ExpectCreateUpdateSnapshots(android::snapshot::Return::Ok());
+  uint64_t required_size = 0;
+  EXPECT_TRUE(PreparePartitionsForUpdate(&required_size));
+  EXPECT_EQ(0u, required_size);
+}
+
+// Test that if not enough space, required size returned by SnapshotManager is
+// passed up.
+TEST_P(SnapshotPartitionTestP, PreparePartitionsNoSpace) {
+  ExpectCreateUpdateSnapshots(android::snapshot::Return::NoSpace(1_GiB));
+  uint64_t required_size = 0;
+  EXPECT_FALSE(PreparePartitionsForUpdate(&required_size));
+  EXPECT_EQ(1_GiB, required_size);
+}
+
+// Test that in recovery, use empty space in super partition for a snapshot
+// update first.
+TEST_P(SnapshotPartitionTestP, RecoveryUseSuperEmpty) {
+  ExpectCreateUpdateSnapshots(android::snapshot::Return::Ok());
+  EXPECT_CALL(dynamicControl(), IsRecovery()).WillRepeatedly(Return(true));
+  // Must not call PrepareDynamicPartitionsForUpdate if
+  // PrepareSnapshotPartitionsForUpdate succeeds.
+  EXPECT_CALL(dynamicControl(), PrepareDynamicPartitionsForUpdate(_, _, _, _))
+      .Times(0);
+  uint64_t required_size = 0;
+  EXPECT_TRUE(PreparePartitionsForUpdate(&required_size));
+  EXPECT_EQ(0u, required_size);
+}
+
+// Test that in recovery, if CreateUpdateSnapshots throws an error, try
+// the flashing path for full updates.
+TEST_P(SnapshotPartitionTestP, RecoveryErrorShouldDeleteSource) {
+  // Expectation on PreparePartitionsForUpdate
+  ExpectCreateUpdateSnapshots(android::snapshot::Return::NoSpace(1_GiB));
+  EXPECT_CALL(dynamicControl(), IsRecovery()).WillRepeatedly(Return(true));
+  EXPECT_CALL(*snapshot_, CancelUpdate()).WillOnce(Return(true));
+  EXPECT_CALL(dynamicControl(), PrepareDynamicPartitionsForUpdate(_, _, _, _))
+      .WillRepeatedly(Invoke([&](auto source_slot,
+                                 auto target_slot,
+                                 const auto& manifest,
+                                 auto delete_source) {
+        EXPECT_EQ(source(), source_slot);
+        EXPECT_EQ(target(), target_slot);
+        // Deep comparison requires full protobuf library. Comparing the
+        // pointers are sufficient.
+        EXPECT_EQ(&manifest_, &manifest);
+        EXPECT_TRUE(delete_source);
+        return dynamicControl().RealPrepareDynamicPartitionsForUpdate(
+            source_slot, target_slot, manifest, delete_source);
+      }));
+  // Only one slot of space in super
+  uint64_t super_size = kDefaultGroupSize + 1_MiB;
+  // Expectation on PrepareDynamicPartitionsForUpdate
+  SetMetadata(
+      source(), {{S("system"), 2_GiB}, {S("vendor"), 1_GiB}}, 0, super_size);
+  ExpectUnmap({T("system"), T("vendor")});
+  // Expect that the source partitions aren't present in target super metadata.
+  ExpectStoreMetadata({{T("system"), 3_GiB}, {T("vendor"), 1_GiB}});
+
+  uint64_t required_size = 0;
+  EXPECT_TRUE(PreparePartitionsForUpdate(&required_size));
+  EXPECT_EQ(0u, required_size);
+}
+
+INSTANTIATE_TEST_CASE_P(DynamicPartitionControlAndroidTest,
+                        SnapshotPartitionTestP,
+                        testing::Values(TestParam{0, 1}, TestParam{1, 0}));
+
+}  // namespace chromeos_update_engine
diff --git a/dynamic_partition_control_interface.h b/dynamic_partition_control_interface.h
deleted file mode 100644
index 86a0730..0000000
--- a/dynamic_partition_control_interface.h
+++ /dev/null
@@ -1,98 +0,0 @@
-//
-// Copyright (C) 2018 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
-#define UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
-
-#include <stdint.h>
-
-#include <memory>
-#include <string>
-
-#include <base/files/file_util.h>
-#include <libdm/dm.h>
-#include <liblp/builder.h>
-
-namespace chromeos_update_engine {
-
-class DynamicPartitionControlInterface {
- public:
-  virtual ~DynamicPartitionControlInterface() = default;
-
-  // Return true iff dynamic partitions is enabled on this device.
-  virtual bool IsDynamicPartitionsEnabled() = 0;
-
-  // Return true iff dynamic partitions is retrofitted on this device.
-  virtual bool IsDynamicPartitionsRetrofit() = 0;
-
-  // Map logical partition on device-mapper.
-  // |super_device| is the device path of the physical partition ("super").
-  // |target_partition_name| is the identifier used in metadata; for example,
-  // "vendor_a"
-  // |slot| is the selected slot to mount; for example, 0 for "_a".
-  // Returns true if mapped successfully; if so, |path| is set to the device
-  // path of the mapped logical partition.
-  virtual bool MapPartitionOnDeviceMapper(
-      const std::string& super_device,
-      const std::string& target_partition_name,
-      uint32_t slot,
-      bool force_writable,
-      std::string* path) = 0;
-
-  // Unmap logical partition on device mapper. This is the reverse operation
-  // of MapPartitionOnDeviceMapper.
-  // If |wait| is set, wait until the device is unmapped.
-  // Returns true if unmapped successfully.
-  virtual bool UnmapPartitionOnDeviceMapper(
-      const std::string& target_partition_name, bool wait) = 0;
-
-  // Do necessary cleanups before destroying the object.
-  virtual void Cleanup() = 0;
-
-  // Return true if a static partition exists at device path |path|.
-  virtual bool DeviceExists(const std::string& path) = 0;
-
-  // Returns the current state of the underlying device mapper device
-  // with given name.
-  // One of INVALID, SUSPENDED or ACTIVE.
-  virtual android::dm::DmDeviceState GetState(const std::string& name) = 0;
-
-  // Returns the path to the device mapper device node in '/dev' corresponding
-  // to 'name'. If the device does not exist, false is returned, and the path
-  // parameter is not set.
-  virtual bool GetDmDevicePathByName(const std::string& name,
-                                     std::string* path) = 0;
-
-  // Retrieve metadata from |super_device| at slot |source_slot|.
-  // On retrofit devices, if |target_slot| != kInvalidSlot, the returned
-  // metadata automatically includes block devices at |target_slot|.
-  virtual std::unique_ptr<android::fs_mgr::MetadataBuilder> LoadMetadataBuilder(
-      const std::string& super_device,
-      uint32_t source_slot,
-      uint32_t target_slot) = 0;
-
-  // Write metadata |builder| to |super_device| at slot |target_slot|.
-  virtual bool StoreMetadata(const std::string& super_device,
-                             android::fs_mgr::MetadataBuilder* builder,
-                             uint32_t target_slot) = 0;
-
-  // Return a possible location for devices listed by name.
-  virtual bool GetDeviceDir(std::string* path) = 0;
-};
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
diff --git a/dynamic_partition_test_utils.h b/dynamic_partition_test_utils.h
new file mode 100644
index 0000000..d701dce
--- /dev/null
+++ b/dynamic_partition_test_utils.h
@@ -0,0 +1,288 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_TEST_UTILS_H_
+#define UPDATE_ENGINE_DYNAMIC_PARTITION_TEST_UTILS_H_
+
+#include <stdint.h>
+
+#include <iostream>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <base/strings/string_util.h>
+#include <fs_mgr.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <liblp/builder.h>
+#include <storage_literals/storage_literals.h>
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+
+using android::fs_mgr::MetadataBuilder;
+using testing::_;
+using testing::MakeMatcher;
+using testing::Matcher;
+using testing::MatcherInterface;
+using testing::MatchResultListener;
+using namespace android::storage_literals;  // NOLINT(build/namespaces)
+
+constexpr const uint32_t kMaxNumSlots = 2;
+constexpr const char* kSlotSuffixes[kMaxNumSlots] = {"_a", "_b"};
+constexpr const char* kFakeDevicePath = "/fake/dev/path/";
+constexpr const char* kFakeDmDevicePath = "/fake/dm/dev/path/";
+constexpr const uint32_t kFakeMetadataSize = 65536;
+constexpr const char* kDefaultGroup = "foo";
+constexpr const char* kFakeSuper = "fake_super";
+
+// A map describing the size of each partition.
+// "{name, size}"
+using PartitionSizes = std::map<std::string, uint64_t>;
+
+// "{name_a, size}"
+using PartitionSuffixSizes = std::map<std::string, uint64_t>;
+
+constexpr uint64_t kDefaultGroupSize = 5_GiB;
+// Super device size. 1 MiB for metadata.
+constexpr uint64_t kDefaultSuperSize = kDefaultGroupSize * 2 + 1_MiB;
+
+template <typename U, typename V>
+inline std::ostream& operator<<(std::ostream& os, const std::map<U, V>& param) {
+  os << "{";
+  bool first = true;
+  for (const auto& pair : param) {
+    if (!first)
+      os << ", ";
+    os << pair.first << ":" << pair.second;
+    first = false;
+  }
+  return os << "}";
+}
+
+template <typename V>
+inline void VectorToStream(std::ostream& os, const V& param) {
+  os << "[";
+  bool first = true;
+  for (const auto& e : param) {
+    if (!first)
+      os << ", ";
+    os << e;
+    first = false;
+  }
+  os << "]";
+}
+
+inline std::ostream& operator<<(std::ostream& os, const PartitionUpdate& p) {
+  return os << "{" << p.partition_name() << ", "
+            << p.new_partition_info().size() << "}";
+}
+
+inline std::ostream& operator<<(std::ostream& os,
+                                const DynamicPartitionGroup& g) {
+  os << "{" << g.name() << ", " << g.size() << ", ";
+  VectorToStream(os, g.partition_names());
+  return os << "}";
+}
+
+inline std::ostream& operator<<(std::ostream& os,
+                                const DeltaArchiveManifest& m) {
+  os << "{.groups = ";
+  VectorToStream(os, m.dynamic_partition_metadata().groups());
+  os << ", .partitions = ";
+  VectorToStream(os, m.partitions());
+  return os;
+}
+
+inline std::string GetDevice(const std::string& name) {
+  return kFakeDevicePath + name;
+}
+
+inline std::string GetDmDevice(const std::string& name) {
+  return kFakeDmDevicePath + name;
+}
+
+inline DynamicPartitionGroup* AddGroup(DeltaArchiveManifest* manifest,
+                                       const std::string& group,
+                                       uint64_t group_size) {
+  auto* g = manifest->mutable_dynamic_partition_metadata()->add_groups();
+  g->set_name(group);
+  g->set_size(group_size);
+  return g;
+}
+
+inline void AddPartition(DeltaArchiveManifest* manifest,
+                         DynamicPartitionGroup* group,
+                         const std::string& partition,
+                         uint64_t partition_size) {
+  group->add_partition_names(partition);
+  auto* p = manifest->add_partitions();
+  p->set_partition_name(partition);
+  p->mutable_new_partition_info()->set_size(partition_size);
+}
+
+// To support legacy tests, auto-convert {name_a: size} map to
+// DeltaArchiveManifest.
+inline DeltaArchiveManifest PartitionSuffixSizesToManifest(
+    const PartitionSuffixSizes& partition_sizes) {
+  DeltaArchiveManifest manifest;
+  for (const char* suffix : kSlotSuffixes) {
+    AddGroup(&manifest, std::string(kDefaultGroup) + suffix, kDefaultGroupSize);
+  }
+  for (const auto& pair : partition_sizes) {
+    for (size_t suffix_idx = 0; suffix_idx < kMaxNumSlots; ++suffix_idx) {
+      if (base::EndsWith(pair.first,
+                         kSlotSuffixes[suffix_idx],
+                         base::CompareCase::SENSITIVE)) {
+        AddPartition(
+            &manifest,
+            manifest.mutable_dynamic_partition_metadata()->mutable_groups(
+                suffix_idx),
+            pair.first,
+            pair.second);
+      }
+    }
+  }
+  return manifest;
+}
+
+// To support legacy tests, auto-convert {name: size} map to PartitionMetadata.
+inline DeltaArchiveManifest PartitionSizesToManifest(
+    const PartitionSizes& partition_sizes) {
+  DeltaArchiveManifest manifest;
+  auto* g = AddGroup(&manifest, std::string(kDefaultGroup), kDefaultGroupSize);
+  for (const auto& pair : partition_sizes) {
+    AddPartition(&manifest, g, pair.first, pair.second);
+  }
+  return manifest;
+}
+
+inline std::unique_ptr<MetadataBuilder> NewFakeMetadata(
+    const DeltaArchiveManifest& manifest,
+    uint32_t partition_attr = 0,
+    uint64_t super_size = kDefaultSuperSize) {
+  auto builder =
+      MetadataBuilder::New(super_size, kFakeMetadataSize, kMaxNumSlots);
+  for (const auto& group : manifest.dynamic_partition_metadata().groups()) {
+    EXPECT_TRUE(builder->AddGroup(group.name(), group.size()));
+    for (const auto& partition_name : group.partition_names()) {
+      EXPECT_NE(
+          nullptr,
+          builder->AddPartition(partition_name, group.name(), partition_attr));
+    }
+  }
+  for (const auto& partition : manifest.partitions()) {
+    auto p = builder->FindPartition(partition.partition_name());
+    EXPECT_TRUE(p && builder->ResizePartition(
+                         p, partition.new_partition_info().size()));
+  }
+  return builder;
+}
+
+class MetadataMatcher : public MatcherInterface<MetadataBuilder*> {
+ public:
+  explicit MetadataMatcher(const PartitionSuffixSizes& partition_sizes)
+      : manifest_(PartitionSuffixSizesToManifest(partition_sizes)) {}
+  explicit MetadataMatcher(const DeltaArchiveManifest& manifest)
+      : manifest_(manifest) {}
+
+  bool MatchAndExplain(MetadataBuilder* metadata,
+                       MatchResultListener* listener) const override {
+    bool success = true;
+    for (const auto& group : manifest_.dynamic_partition_metadata().groups()) {
+      for (const auto& partition_name : group.partition_names()) {
+        auto p = metadata->FindPartition(partition_name);
+        if (p == nullptr) {
+          if (!success)
+            *listener << "; ";
+          *listener << "No partition " << partition_name;
+          success = false;
+          continue;
+        }
+        const auto& partition_updates = manifest_.partitions();
+        auto it = std::find_if(partition_updates.begin(),
+                               partition_updates.end(),
+                               [&](const auto& p) {
+                                 return p.partition_name() == partition_name;
+                               });
+        if (it == partition_updates.end()) {
+          *listener << "Can't find partition update " << partition_name;
+          success = false;
+          continue;
+        }
+        auto partition_size = it->new_partition_info().size();
+        if (p->size() != partition_size) {
+          if (!success)
+            *listener << "; ";
+          *listener << "Partition " << partition_name << " has size "
+                    << p->size() << ", expected " << partition_size;
+          success = false;
+        }
+        if (p->group_name() != group.name()) {
+          if (!success)
+            *listener << "; ";
+          *listener << "Partition " << partition_name << " has group "
+                    << p->group_name() << ", expected " << group.name();
+          success = false;
+        }
+      }
+    }
+    return success;
+  }
+
+  void DescribeTo(std::ostream* os) const override {
+    *os << "expect: " << manifest_;
+  }
+
+  void DescribeNegationTo(std::ostream* os) const override {
+    *os << "expect not: " << manifest_;
+  }
+
+ private:
+  DeltaArchiveManifest manifest_;
+};
+
+inline Matcher<MetadataBuilder*> MetadataMatches(
+    const PartitionSuffixSizes& partition_sizes) {
+  return MakeMatcher(new MetadataMatcher(partition_sizes));
+}
+
+inline Matcher<MetadataBuilder*> MetadataMatches(
+    const DeltaArchiveManifest& manifest) {
+  return MakeMatcher(new MetadataMatcher(manifest));
+}
+
+MATCHER_P(HasGroup, group, " has group " + group) {
+  auto groups = arg->ListGroups();
+  return std::find(groups.begin(), groups.end(), group) != groups.end();
+}
+
+struct TestParam {
+  uint32_t source;
+  uint32_t target;
+};
+inline std::ostream& operator<<(std::ostream& os, const TestParam& param) {
+  return os << "{source: " << param.source << ", target:" << param.target
+            << "}";
+}
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_DYNAMIC_PARTITION_TEST_UTILS_H_
diff --git a/dynamic_partition_utils.cc b/dynamic_partition_utils.cc
new file mode 100644
index 0000000..f9bd886
--- /dev/null
+++ b/dynamic_partition_utils.cc
@@ -0,0 +1,39 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/dynamic_partition_utils.h"
+
+#include <vector>
+
+#include <base/logging.h>
+#include <base/strings/string_util.h>
+
+using android::fs_mgr::MetadataBuilder;
+
+namespace chromeos_update_engine {
+
+void DeleteGroupsWithSuffix(MetadataBuilder* builder,
+                            const std::string& suffix) {
+  std::vector<std::string> groups = builder->ListGroups();
+  for (const auto& group_name : groups) {
+    if (base::EndsWith(group_name, suffix, base::CompareCase::SENSITIVE)) {
+      LOG(INFO) << "Removing group " << group_name;
+      builder->RemoveGroupAndPartitions(group_name);
+    }
+  }
+}
+
+}  // namespace chromeos_update_engine
diff --git a/dynamic_partition_utils.h b/dynamic_partition_utils.h
new file mode 100644
index 0000000..09fce00
--- /dev/null
+++ b/dynamic_partition_utils.h
@@ -0,0 +1,33 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_
+#define UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_
+
+#include <string>
+
+#include <liblp/builder.h>
+
+namespace chromeos_update_engine {
+
+// Delete all groups (and their partitions) in |builder| that have names
+// ending with |suffix|.
+void DeleteGroupsWithSuffix(android::fs_mgr::MetadataBuilder* builder,
+                            const std::string& suffix);
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_
diff --git a/excluder_chromeos_unittest.cc b/excluder_chromeos_unittest.cc
index a8c14b3..dba77e4 100644
--- a/excluder_chromeos_unittest.cc
+++ b/excluder_chromeos_unittest.cc
@@ -29,7 +29,7 @@
 
 namespace chromeos_update_engine {
 
-constexpr char kDummyHash[] =
+constexpr char kFakeHash[] =
     "71ff43d76e2488e394e46872f5b066cc25e394c2c3e3790dd319517883b33db1";
 
 class ExcluderChromeOSTest : public ::testing::Test {
@@ -47,20 +47,20 @@
 };
 
 TEST_F(ExcluderChromeOSTest, ExclusionCheck) {
-  EXPECT_FALSE(excluder_->IsExcluded(kDummyHash));
-  EXPECT_TRUE(excluder_->Exclude(kDummyHash));
-  EXPECT_TRUE(excluder_->IsExcluded(kDummyHash));
+  EXPECT_FALSE(excluder_->IsExcluded(kFakeHash));
+  EXPECT_TRUE(excluder_->Exclude(kFakeHash));
+  EXPECT_TRUE(excluder_->IsExcluded(kFakeHash));
 }
 
 TEST_F(ExcluderChromeOSTest, ResetFlow) {
   EXPECT_TRUE(excluder_->Exclude("abc"));
-  EXPECT_TRUE(excluder_->Exclude(kDummyHash));
+  EXPECT_TRUE(excluder_->Exclude(kFakeHash));
   EXPECT_TRUE(excluder_->IsExcluded("abc"));
-  EXPECT_TRUE(excluder_->IsExcluded(kDummyHash));
+  EXPECT_TRUE(excluder_->IsExcluded(kFakeHash));
 
   EXPECT_TRUE(excluder_->Reset());
   EXPECT_FALSE(excluder_->IsExcluded("abc"));
-  EXPECT_FALSE(excluder_->IsExcluded(kDummyHash));
+  EXPECT_FALSE(excluder_->IsExcluded(kFakeHash));
 }
 
 }  // namespace chromeos_update_engine
diff --git a/hardware_android.cc b/hardware_android.cc
index 82f1b9a..4894522 100644
--- a/hardware_android.cc
+++ b/hardware_android.cc
@@ -17,19 +17,29 @@
 #include "update_engine/hardware_android.h"
 
 #include <sys/types.h>
+#include <sys/utsname.h>
 
 #include <memory>
+#include <string>
+#include <string_view>
 
+#include <android-base/parseint.h>
 #include <android-base/properties.h>
 #include <base/files/file_util.h>
 #include <bootloader_message/bootloader_message.h>
+#include <kver/kernel_release.h>
+#include <kver/utils.h>
 
+#include "update_engine/common/error_code_utils.h"
 #include "update_engine/common/hardware.h"
 #include "update_engine/common/platform_constants.h"
+#include "update_engine/common/utils.h"
 
 using android::base::GetBoolProperty;
 using android::base::GetIntProperty;
 using android::base::GetProperty;
+using android::kver::IsKernelUpdateValid;
+using android::kver::KernelRelease;
 using std::string;
 
 namespace chromeos_update_engine {
@@ -46,6 +56,11 @@
 const char kPropBootRevision[] = "ro.boot.revision";
 const char kPropBuildDateUTC[] = "ro.build.date.utc";
 
+string GetPartitionBuildDate(const string& partition_name) {
+  return android::base::GetProperty("ro." + partition_name + ".build.date.utc",
+                                    "");
+}
+
 }  // namespace
 
 namespace hardware {
@@ -181,7 +196,7 @@
 
 bool HardwareAndroid::GetNonVolatileDirectory(base::FilePath* path) const {
   base::FilePath local_path(constants::kNonVolatileDirectory);
-  if (!base::PathExists(local_path)) {
+  if (!base::DirectoryExists(local_path)) {
     LOG(ERROR) << "Non-volatile directory not found: " << local_path.value();
     return false;
   }
@@ -198,6 +213,13 @@
   return GetIntProperty<int64_t>(kPropBuildDateUTC, 0);
 }
 
+// Returns true if the device runs an userdebug build, and explicitly allows OTA
+// downgrade.
+bool HardwareAndroid::AllowDowngrade() const {
+  return GetBoolProperty("ro.ota.allow_downgrade", false) &&
+         GetBoolProperty("ro.debuggable", false);
+}
+
 bool HardwareAndroid::GetFirstActiveOmahaPingSent() const {
   LOG(WARNING) << "STUB: Assuming first active omaha was never set.";
   return false;
@@ -209,4 +231,75 @@
   return true;
 }
 
+void HardwareAndroid::SetWarmReset(bool warm_reset) {
+  constexpr char warm_reset_prop[] = "ota.warm_reset";
+  if (!android::base::SetProperty(warm_reset_prop, warm_reset ? "1" : "0")) {
+    LOG(WARNING) << "Failed to set prop " << warm_reset_prop;
+  }
+}
+
+string HardwareAndroid::GetVersionForLogging(
+    const string& partition_name) const {
+  if (partition_name == "boot") {
+    struct utsname buf;
+    if (uname(&buf) != 0) {
+      PLOG(ERROR) << "Unable to call uname()";
+      return "";
+    }
+    auto kernel_release =
+        KernelRelease::Parse(buf.release, true /* allow_suffix */);
+    return kernel_release.has_value() ? kernel_release->string() : "";
+  }
+  return GetPartitionBuildDate(partition_name);
+}
+
+ErrorCode HardwareAndroid::IsPartitionUpdateValid(
+    const string& partition_name, const string& new_version) const {
+  if (partition_name == "boot") {
+    struct utsname buf;
+    if (uname(&buf) != 0) {
+      PLOG(ERROR) << "Unable to call uname()";
+      return ErrorCode::kError;
+    }
+    return IsKernelUpdateValid(buf.release, new_version);
+  }
+
+  const auto old_version = GetPartitionBuildDate(partition_name);
+  // TODO(zhangkelvin)  for some partitions, missing a current timestamp should
+  // be an error, e.g. system, vendor, product etc.
+  auto error_code = utils::IsTimestampNewer(old_version, new_version);
+  if (error_code != ErrorCode::kSuccess) {
+    LOG(ERROR) << "Timestamp check failed with "
+               << utils::ErrorCodeToString(error_code)
+               << " Partition timestamp: " << old_version
+               << " Update timestamp: " << new_version;
+  }
+  return error_code;
+}
+
+ErrorCode HardwareAndroid::IsKernelUpdateValid(const string& old_release,
+                                               const string& new_release) {
+  // Check that the package either contain an empty version (indicating that the
+  // new build does not use GKI), or a valid GKI kernel release.
+  std::optional<KernelRelease> new_kernel_release;
+  if (new_release.empty()) {
+    LOG(INFO) << "New build does not contain GKI.";
+  } else {
+    new_kernel_release =
+        KernelRelease::Parse(new_release, true /* allow_suffix */);
+    if (!new_kernel_release.has_value()) {
+      LOG(ERROR) << "New kernel release is not valid GKI kernel release: "
+                 << new_release;
+      return ErrorCode::kDownloadManifestParseError;
+    }
+  }
+
+  auto old_kernel_release =
+      KernelRelease::Parse(old_release, true /* allow_suffix */);
+  return android::kver::IsKernelUpdateValid(old_kernel_release,
+                                            new_kernel_release)
+             ? ErrorCode::kSuccess
+             : ErrorCode::kPayloadTimestampError;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/hardware_android.h b/hardware_android.h
index 6edf468..b670447 100644
--- a/hardware_android.h
+++ b/hardware_android.h
@@ -18,17 +18,20 @@
 #define UPDATE_ENGINE_HARDWARE_ANDROID_H_
 
 #include <string>
+#include <string_view>
 
 #include <base/macros.h>
 #include <base/time/time.h>
+#include <gtest/gtest_prod.h>
 
+#include "update_engine/common/error_code.h"
 #include "update_engine/common/hardware.h"
 #include "update_engine/common/hardware_interface.h"
 
 namespace chromeos_update_engine {
 
 // Implements the real interface with the hardware in the Android platform.
-class HardwareAndroid final : public HardwareInterface {
+class HardwareAndroid : public HardwareInterface {
  public:
   HardwareAndroid() = default;
   ~HardwareAndroid() override = default;
@@ -54,10 +57,23 @@
   bool GetNonVolatileDirectory(base::FilePath* path) const override;
   bool GetPowerwashSafeDirectory(base::FilePath* path) const override;
   int64_t GetBuildTimestamp() const override;
+  bool AllowDowngrade() const override;
   bool GetFirstActiveOmahaPingSent() const override;
   bool SetFirstActiveOmahaPingSent() override;
+  void SetWarmReset(bool warm_reset) override;
+  [[nodiscard]] std::string GetVersionForLogging(
+      const std::string& partition_name) const override;
+  [[nodiscard]] ErrorCode IsPartitionUpdateValid(
+      const std::string& partition_name,
+      const std::string& new_version) const override;
 
  private:
+  FRIEND_TEST(HardwareAndroidTest, IsKernelUpdateValid);
+
+  // Helper for IsPartitionUpdateValid.
+  static ErrorCode IsKernelUpdateValid(const std::string& old_release,
+                                       const std::string& new_release);
+
   DISALLOW_COPY_AND_ASSIGN(HardwareAndroid);
 };
 
diff --git a/hardware_android_unittest.cc b/hardware_android_unittest.cc
new file mode 100644
index 0000000..9a491f3
--- /dev/null
+++ b/hardware_android_unittest.cc
@@ -0,0 +1,67 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <gtest/gtest.h>
+
+#include "update_engine/common/error_code.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/hardware_android.h"
+
+namespace chromeos_update_engine {
+
+TEST(HardwareAndroidTest, IsKernelUpdateValid) {
+  EXPECT_EQ(ErrorCode::kSuccess,
+            HardwareAndroid::IsKernelUpdateValid("5.4.42-not-gki", ""))
+      << "Legacy update should be fine";
+
+  EXPECT_EQ(ErrorCode::kSuccess,
+            HardwareAndroid::IsKernelUpdateValid("5.4.42-not-gki",
+                                                 "5.4.42-android12-0"))
+      << "Update to GKI should be fine";
+
+  EXPECT_EQ(
+      ErrorCode::kDownloadManifestParseError,
+      HardwareAndroid::IsKernelUpdateValid("5.4.42-not-gki", "5.4.42-not-gki"))
+      << "Should report parse error for invalid version field";
+
+  EXPECT_EQ(ErrorCode::kSuccess,
+            HardwareAndroid::IsKernelUpdateValid(
+                "5.4.42-android12-0-something", "5.4.42-android12-0-something"))
+      << "Self update should be fine";
+
+  EXPECT_EQ(ErrorCode::kSuccess,
+            HardwareAndroid::IsKernelUpdateValid(
+                "5.4.42-android12-0-something", "5.4.43-android12-0-something"))
+      << "Sub-level update should be fine";
+
+  EXPECT_EQ(
+      ErrorCode::kSuccess,
+      HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something",
+                                           "5.10.10-android12-0-something"))
+      << "KMI version update should be fine";
+
+  EXPECT_EQ(ErrorCode::kPayloadTimestampError,
+            HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something",
+                                                 "5.4.5-android12-0-something"))
+      << "Should detect sub-level downgrade";
+
+  EXPECT_EQ(ErrorCode::kPayloadTimestampError,
+            HardwareAndroid::IsKernelUpdateValid("5.4.42-android12-0-something",
+                                                 "5.1.5-android12-0-something"))
+      << "Should detect KMI version downgrade";
+}
+
+}  // namespace chromeos_update_engine
diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc
index 2aae9f0..cce5e84 100644
--- a/hardware_chromeos.cc
+++ b/hardware_chromeos.cc
@@ -366,4 +366,18 @@
   return true;
 }
 
+void HardwareChromeOS::SetWarmReset(bool warm_reset) {}
+
+std::string HardwareChromeOS::GetVersionForLogging(
+    const std::string& partition_name) const {
+  // TODO(zhangkelvin) Implement per-partition timestamp for Chrome OS.
+  return "";
+}
+
+ErrorCode HardwareChromeOS::IsPartitionUpdateValid(
+    const std::string& partition_name, const std::string& new_version) const {
+  // TODO(zhangkelvin) Implement per-partition timestamp for Chrome OS.
+  return ErrorCode::kSuccess;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/hardware_chromeos.h b/hardware_chromeos.h
index 230e864..bbfe273 100644
--- a/hardware_chromeos.h
+++ b/hardware_chromeos.h
@@ -25,6 +25,7 @@
 #include <base/time/time.h>
 #include <debugd/dbus-proxies.h>
 
+#include "update_engine/common/error_code.h"
 #include "update_engine/common/hardware_interface.h"
 
 namespace chromeos_update_engine {
@@ -59,8 +60,15 @@
   bool GetNonVolatileDirectory(base::FilePath* path) const override;
   bool GetPowerwashSafeDirectory(base::FilePath* path) const override;
   int64_t GetBuildTimestamp() const override;
+  bool AllowDowngrade() const override { return false; }
   bool GetFirstActiveOmahaPingSent() const override;
   bool SetFirstActiveOmahaPingSent() override;
+  void SetWarmReset(bool warm_reset) override;
+  std::string GetVersionForLogging(
+      const std::string& partition_name) const override;
+  ErrorCode IsPartitionUpdateValid(
+      const std::string& partition_name,
+      const std::string& new_version) const override;
 
  private:
   friend class HardwareChromeOSTest;
diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc
index d317d48..bce0920 100644
--- a/libcurl_http_fetcher.cc
+++ b/libcurl_http_fetcher.cc
@@ -31,6 +31,7 @@
 #include <base/strings/string_split.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
+#include <base/threading/thread_task_runner_handle.h>
 
 #ifdef __ANDROID__
 #include <cutils/qtaguid.h>
@@ -78,6 +79,7 @@
 #ifdef __ANDROID__
   qtaguid_untagSocket(item);
 #endif  // __ANDROID__
+
   LibcurlHttpFetcher* fetcher = static_cast<LibcurlHttpFetcher*>(clientp);
   // Stop watching the socket before closing it.
   for (size_t t = 0; t < base::size(fetcher->fd_controller_maps_); ++t) {
@@ -456,6 +458,19 @@
     // There's either more work to do or we are paused, so we just keep the
     // file descriptors to watch up to date and exit, until we are done with the
     // work and we are not paused.
+#ifdef __ANDROID__
+    // When there's no base::SingleThreadTaskRunner on current thread, it's not
+    // possible to watch file descriptors. Just poll it later. This usually
+    // happens if brillo::FakeMessageLoop is used.
+    if (!base::ThreadTaskRunnerHandle::IsSet()) {
+      MessageLoop::current()->PostDelayedTask(
+          FROM_HERE,
+          base::Bind(&LibcurlHttpFetcher::CurlPerformOnce,
+                     base::Unretained(this)),
+          TimeDelta::FromSeconds(1));
+      return;
+    }
+#endif
     SetupMessageLoopSources();
     return;
   }
diff --git a/libcurl_http_fetcher.h b/libcurl_http_fetcher.h
index 97a9a87..4e91b69 100644
--- a/libcurl_http_fetcher.h
+++ b/libcurl_http_fetcher.h
@@ -218,7 +218,7 @@
   }
 
   // Cleans up the following if they are non-null:
-  // curl(m) handles, fd_controller_maps_, timeout_id_.
+  // curl(m) handles, fd_controller_maps_(fd_task_maps_), timeout_id_.
   void CleanUp();
 
   // Force terminate the transfer. This will invoke the delegate's (if any)
diff --git a/libcurl_http_fetcher_unittest.cc b/libcurl_http_fetcher_unittest.cc
index 20f05b9..874ef2e 100644
--- a/libcurl_http_fetcher_unittest.cc
+++ b/libcurl_http_fetcher_unittest.cc
@@ -100,8 +100,18 @@
 
   libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid");
 
+#ifdef __ANDROID__
+  // It's slower on Android that libcurl handle may not finish within 1 cycle.
+  // Will need to wait for more cycles until it finishes. Original test didn't
+  // correctly handle when we need to re-watch libcurl fds.
+  while (loop_.PendingTasks() &&
+         libcurl_fetcher_.GetAuxiliaryErrorCode() == ErrorCode::kSuccess) {
+    loop_.RunOnce(true);
+  }
+#else
   // The first time it can't resolve.
   loop_.RunOnce(true);
+#endif
   EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(),
             ErrorCode::kUnresolvedHostError);
 
@@ -131,8 +141,18 @@
   // easier to mock the part that depends on internet connectivity.
   libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid");
 
+#ifdef __ANDROID__
+  // It's slower on Android that libcurl handle may not finish within 1 cycle.
+  // Will need to wait for more cycles until it finishes. Original test didn't
+  // correctly handle when we need to re-watch libcurl fds.
+  while (loop_.PendingTasks() &&
+         libcurl_fetcher_.GetAuxiliaryErrorCode() == ErrorCode::kSuccess) {
+    loop_.RunOnce(true);
+  }
+#else
   // The first time it can't resolve.
   loop_.RunOnce(true);
+#endif
   EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(),
             ErrorCode::kUnresolvedHostError);
 
@@ -145,9 +165,19 @@
           [this]() { libcurl_fetcher_.http_response_code_ = 0; }));
   libcurl_fetcher_.transfer_size_ = 10;
 
+#ifdef __ANDROID__
+  // It's slower on Android that libcurl handle may not finish within 1 cycle.
+  // Will need to wait for more cycles until it finishes. Original test didn't
+  // correctly handle when we need to re-watch libcurl fds.
+  while (loop_.PendingTasks() && libcurl_fetcher_.GetAuxiliaryErrorCode() ==
+                                     ErrorCode::kUnresolvedHostError) {
+    loop_.RunOnce(true);
+  }
+#else
   // This time the host is resolved. But after that again we can't resolve
   // anymore (See above).
   loop_.RunOnce(true);
+#endif
   EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(),
             ErrorCode::kUnresolvedHostRecovered);
 
diff --git a/logging.cc b/logging.cc
new file mode 100644
index 0000000..6320e36
--- /dev/null
+++ b/logging.cc
@@ -0,0 +1,87 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <string>
+
+#include <base/files/file_util.h>
+#include <base/logging.h>
+#include <base/strings/string_util.h>
+#include <base/strings/stringprintf.h>
+
+#include "update_engine/common/utils.h"
+#include "update_engine/logging.h"
+
+using std::string;
+
+namespace chromeos_update_engine {
+
+namespace {
+
+constexpr char kSystemLogsRoot[] = "/var/log";
+
+void SetupLogSymlink(const string& symlink_path, const string& log_path) {
+  // TODO(petkov): To ensure a smooth transition between non-timestamped and
+  // timestamped logs, move an existing log to start the first timestamped
+  // one. This code can go away once all clients are switched to this version or
+  // we stop caring about the old-style logs.
+  if (utils::FileExists(symlink_path.c_str()) &&
+      !utils::IsSymlink(symlink_path.c_str())) {
+    base::ReplaceFile(
+        base::FilePath(symlink_path), base::FilePath(log_path), nullptr);
+  }
+  base::DeleteFile(base::FilePath(symlink_path), true);
+  if (symlink(log_path.c_str(), symlink_path.c_str()) == -1) {
+    PLOG(ERROR) << "Unable to create symlink " << symlink_path
+                << " pointing at " << log_path;
+  }
+}
+
+string SetupLogFile(const string& kLogsRoot) {
+  const string kLogSymlink = kLogsRoot + "/update_engine.log";
+  const string kLogsDir = kLogsRoot + "/update_engine";
+  const string kLogPath =
+      base::StringPrintf("%s/update_engine.%s",
+                         kLogsDir.c_str(),
+                         utils::GetTimeAsString(::time(nullptr)).c_str());
+  mkdir(kLogsDir.c_str(), 0755);
+  SetupLogSymlink(kLogSymlink, kLogPath);
+  return kLogSymlink;
+}
+
+}  // namespace
+
+void SetupLogging(bool log_to_system, bool log_to_file) {
+  logging::LoggingSettings log_settings;
+  log_settings.lock_log = logging::DONT_LOCK_LOG_FILE;
+  log_settings.logging_dest = static_cast<logging::LoggingDestination>(
+      (log_to_system ? logging::LOG_TO_SYSTEM_DEBUG_LOG : 0) |
+      (log_to_file ? logging::LOG_TO_FILE : 0));
+  log_settings.log_file = nullptr;
+
+  string log_file;
+  if (log_to_file) {
+    log_file = SetupLogFile(kSystemLogsRoot);
+    log_settings.delete_old = logging::APPEND_TO_OLD_LOG_FILE;
+    log_settings.log_file = log_file.c_str();
+  }
+  logging::InitLogging(log_settings);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/logging.h b/logging.h
new file mode 100644
index 0000000..c9e7483
--- /dev/null
+++ b/logging.h
@@ -0,0 +1,23 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+namespace chromeos_update_engine {
+
+// Set up logging. |log_to_system| and |log_to_file| specifies
+// the destination of logs.
+void SetupLogging(bool log_to_system, bool log_to_file);
+
+}  // namespace chromeos_update_engine
diff --git a/logging_android.cc b/logging_android.cc
new file mode 100644
index 0000000..0219075
--- /dev/null
+++ b/logging_android.cc
@@ -0,0 +1,276 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <functional>
+#include <iomanip>
+#include <string>
+#include <string_view>
+#include <vector>
+
+#include <android-base/file.h>
+#include <android-base/strings.h>
+#include <android-base/unique_fd.h>
+#include <base/files/dir_reader_posix.h>
+#include <base/logging.h>
+#include <base/strings/string_util.h>
+#include <base/strings/stringprintf.h>
+#include <log/log.h>
+
+#include "update_engine/common/utils.h"
+
+using std::string;
+
+#ifdef _UE_SIDELOAD
+constexpr bool kSideload = true;
+#else
+constexpr bool kSideload = false;
+#endif
+
+namespace chromeos_update_engine {
+namespace {
+
+constexpr char kSystemLogsRoot[] = "/data/misc/update_engine_log";
+constexpr size_t kLogCount = 5;
+
+// Keep the most recent |kLogCount| logs but remove the old ones in
+// "/data/misc/update_engine_log/".
+void DeleteOldLogs(const string& kLogsRoot) {
+  base::DirReaderPosix reader(kLogsRoot.c_str());
+  if (!reader.IsValid()) {
+    LOG(ERROR) << "Failed to read " << kLogsRoot;
+    return;
+  }
+
+  std::vector<string> old_logs;
+  while (reader.Next()) {
+    if (reader.name()[0] == '.')
+      continue;
+
+    // Log files are in format "update_engine.%Y%m%d-%H%M%S",
+    // e.g. update_engine.20090103-231425
+    uint64_t date;
+    uint64_t local_time;
+    if (sscanf(reader.name(),
+               "update_engine.%" PRIu64 "-%" PRIu64 "",
+               &date,
+               &local_time) == 2) {
+      old_logs.push_back(reader.name());
+    } else {
+      LOG(WARNING) << "Unrecognized log file " << reader.name();
+    }
+  }
+
+  std::sort(old_logs.begin(), old_logs.end(), std::greater<string>());
+  for (size_t i = kLogCount; i < old_logs.size(); i++) {
+    string log_path = kLogsRoot + "/" + old_logs[i];
+    if (unlink(log_path.c_str()) == -1) {
+      PLOG(WARNING) << "Failed to unlink " << log_path;
+    }
+  }
+}
+
+string SetupLogFile(const string& kLogsRoot) {
+  DeleteOldLogs(kLogsRoot);
+
+  return base::StringPrintf("%s/update_engine.%s",
+                            kLogsRoot.c_str(),
+                            utils::GetTimeAsString(::time(nullptr)).c_str());
+}
+
+const char* LogPriorityToCString(int priority) {
+  switch (priority) {
+    case ANDROID_LOG_VERBOSE:
+      return "VERBOSE";
+    case ANDROID_LOG_DEBUG:
+      return "DEBUG";
+    case ANDROID_LOG_INFO:
+      return "INFO";
+    case ANDROID_LOG_WARN:
+      return "WARN";
+    case ANDROID_LOG_ERROR:
+      return "ERROR";
+    case ANDROID_LOG_FATAL:
+      return "FATAL";
+    default:
+      return "UNKNOWN";
+  }
+}
+
+using LoggerFunction = std::function<void(const struct __android_log_message*)>;
+
+class FileLogger {
+ public:
+  explicit FileLogger(const string& path) {
+    fd_.reset(TEMP_FAILURE_RETRY(
+        open(path.c_str(),
+             O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC | O_NOFOLLOW | O_SYNC,
+             0644)));
+    if (fd_ == -1) {
+      // Use ALOGE that logs to logd before __android_log_set_logger.
+      ALOGE("Cannot open persistent log %s: %s", path.c_str(), strerror(errno));
+      return;
+    }
+    // The log file will have AID_LOG as group ID; this GID is inherited from
+    // the parent directory "/data/misc/update_engine_log" which sets the SGID
+    // bit.
+    if (fchmod(fd_.get(), 0640) == -1) {
+      // Use ALOGE that logs to logd before __android_log_set_logger.
+      ALOGE("Cannot chmod 0640 persistent log %s: %s",
+            path.c_str(),
+            strerror(errno));
+      return;
+    }
+  }
+  // Copy-constructor needed to be converted to std::function.
+  FileLogger(const FileLogger& other) { fd_.reset(dup(other.fd_)); }
+  void operator()(const struct __android_log_message* log_message) {
+    if (fd_ == -1) {
+      return;
+    }
+
+    std::string_view message_str =
+        log_message->message != nullptr ? log_message->message : "";
+
+    WriteToFd(GetPrefix(log_message));
+    WriteToFd(message_str);
+    WriteToFd("\n");
+  }
+
+ private:
+  android::base::unique_fd fd_;
+  void WriteToFd(std::string_view message) {
+    ignore_result(
+        android::base::WriteFully(fd_, message.data(), message.size()));
+  }
+
+  string GetPrefix(const struct __android_log_message* log_message) {
+    std::stringstream ss;
+    timeval tv;
+    gettimeofday(&tv, nullptr);
+    time_t t = tv.tv_sec;
+    struct tm local_time;
+    localtime_r(&t, &local_time);
+    struct tm* tm_time = &local_time;
+    ss << "[" << std::setfill('0') << std::setw(2) << 1 + tm_time->tm_mon
+       << std::setw(2) << tm_time->tm_mday << '/' << std::setw(2)
+       << tm_time->tm_hour << std::setw(2) << tm_time->tm_min << std::setw(2)
+       << tm_time->tm_sec << '.' << std::setw(6) << tv.tv_usec << "] ";
+    // libchrome logs prepends |message| with severity, file and line, but
+    // leave logger_data->file as nullptr.
+    // libbase / liblog logs doesn't. Hence, add them to match the style.
+    // For liblog logs that doesn't set logger_data->file, not printing the
+    // priority is acceptable.
+    if (log_message->file) {
+      ss << "[" << LogPriorityToCString(log_message->priority) << ':'
+         << log_message->file << '(' << log_message->line << ")] ";
+    }
+    return ss.str();
+  }
+};
+
+class CombinedLogger {
+ public:
+  CombinedLogger(bool log_to_system, bool log_to_file) {
+    if (log_to_system) {
+      if (kSideload) {
+        // No logd in sideload. Use stdout.
+        // recovery has already redirected stdio properly.
+        loggers_.push_back(__android_log_stderr_logger);
+      } else {
+        loggers_.push_back(__android_log_logd_logger);
+      }
+    }
+    if (log_to_file) {
+      loggers_.push_back(std::move(FileLogger(SetupLogFile(kSystemLogsRoot))));
+    }
+  }
+  void operator()(const struct __android_log_message* log_message) {
+    for (auto&& logger : loggers_) {
+      logger(log_message);
+    }
+  }
+
+ private:
+  std::vector<LoggerFunction> loggers_;
+};
+
+// Redirect all libchrome logs to liblog using our custom handler that does
+// not call __android_log_write and explicitly write to stderr at the same
+// time. The preset CombinedLogger already writes to stderr properly.
+bool RedirectToLiblog(int severity,
+                      const char* file,
+                      int line,
+                      size_t message_start,
+                      const std::string& str_newline) {
+  android_LogPriority priority =
+      (severity < 0) ? ANDROID_LOG_VERBOSE : ANDROID_LOG_UNKNOWN;
+  switch (severity) {
+    case logging::LOG_INFO:
+      priority = ANDROID_LOG_INFO;
+      break;
+    case logging::LOG_WARNING:
+      priority = ANDROID_LOG_WARN;
+      break;
+    case logging::LOG_ERROR:
+      priority = ANDROID_LOG_ERROR;
+      break;
+    case logging::LOG_FATAL:
+      priority = ANDROID_LOG_FATAL;
+      break;
+  }
+  std::string_view sv = str_newline;
+  ignore_result(android::base::ConsumeSuffix(&sv, "\n"));
+  std::string str(sv.data(), sv.size());
+  // This will eventually be redirected to CombinedLogger.
+  // Use nullptr as tag so that liblog infers log tag from getprogname().
+  __android_log_write(priority, nullptr /* tag */, str.c_str());
+  return true;
+}
+
+}  // namespace
+
+void SetupLogging(bool log_to_system, bool log_to_file) {
+  // Note that libchrome logging uses liblog.
+  // By calling liblog's __android_log_set_logger function, all of libchrome
+  // (used by update_engine) / libbase / liblog (used by depended modules)
+  // logging eventually redirects to CombinedLogger.
+  static auto g_logger =
+      std::make_unique<CombinedLogger>(log_to_system, log_to_file);
+  __android_log_set_logger([](const struct __android_log_message* log_message) {
+    (*g_logger)(log_message);
+  });
+
+  // libchrome logging should not log to file.
+  logging::LoggingSettings log_settings;
+  log_settings.lock_log = logging::DONT_LOCK_LOG_FILE;
+  log_settings.logging_dest =
+      static_cast<logging::LoggingDestination>(logging::LOG_NONE);
+  log_settings.log_file = nullptr;
+  logging::InitLogging(log_settings);
+  logging::SetLogItems(false /* enable_process_id */,
+                       false /* enable_thread_id */,
+                       false /* enable_timestamp */,
+                       false /* enable_tickcount */);
+  logging::SetLogMessageHandler(&RedirectToLiblog);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/main.cc b/main.cc
index 1d161e3..ceb5b56 100644
--- a/main.cc
+++ b/main.cc
@@ -14,154 +14,23 @@
 // limitations under the License.
 //
 
-#include <inttypes.h>
 #include <sys/stat.h>
 #include <sys/types.h>
-#include <unistd.h>
 #include <xz.h>
 
-#include <algorithm>
-#include <string>
-#include <vector>
-
 #include <base/at_exit.h>
 #include <base/command_line.h>
-#include <base/files/dir_reader_posix.h>
-#include <base/files/file_util.h>
 #include <base/logging.h>
-#include <base/strings/string_util.h>
-#include <base/strings/stringprintf.h>
 #include <brillo/flag_helper.h>
 
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/terminator.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/daemon_base.h"
+#include "update_engine/logging.h"
 
 using std::string;
 
-namespace chromeos_update_engine {
-namespace {
-
-string GetTimeAsString(time_t utime) {
-  struct tm tm;
-  CHECK_EQ(localtime_r(&utime, &tm), &tm);
-  char str[16];
-  CHECK_EQ(strftime(str, sizeof(str), "%Y%m%d-%H%M%S", &tm), 15u);
-  return str;
-}
-
-#ifdef __ANDROID__
-constexpr char kSystemLogsRoot[] = "/data/misc/update_engine_log";
-constexpr size_t kLogCount = 5;
-
-// Keep the most recent |kLogCount| logs but remove the old ones in
-// "/data/misc/update_engine_log/".
-void DeleteOldLogs(const string& kLogsRoot) {
-  base::DirReaderPosix reader(kLogsRoot.c_str());
-  if (!reader.IsValid()) {
-    LOG(ERROR) << "Failed to read " << kLogsRoot;
-    return;
-  }
-
-  std::vector<string> old_logs;
-  while (reader.Next()) {
-    if (reader.name()[0] == '.')
-      continue;
-
-    // Log files are in format "update_engine.%Y%m%d-%H%M%S",
-    // e.g. update_engine.20090103-231425
-    uint64_t date;
-    uint64_t local_time;
-    if (sscanf(reader.name(),
-               "update_engine.%" PRIu64 "-%" PRIu64 "",
-               &date,
-               &local_time) == 2) {
-      old_logs.push_back(reader.name());
-    } else {
-      LOG(WARNING) << "Unrecognized log file " << reader.name();
-    }
-  }
-
-  std::sort(old_logs.begin(), old_logs.end(), std::greater<string>());
-  for (size_t i = kLogCount; i < old_logs.size(); i++) {
-    string log_path = kLogsRoot + "/" + old_logs[i];
-    if (unlink(log_path.c_str()) == -1) {
-      PLOG(WARNING) << "Failed to unlink " << log_path;
-    }
-  }
-}
-
-string SetupLogFile(const string& kLogsRoot) {
-  DeleteOldLogs(kLogsRoot);
-
-  return base::StringPrintf("%s/update_engine.%s",
-                            kLogsRoot.c_str(),
-                            GetTimeAsString(::time(nullptr)).c_str());
-}
-#else
-constexpr char kSystemLogsRoot[] = "/var/log";
-
-void SetupLogSymlink(const string& symlink_path, const string& log_path) {
-  // TODO(petkov): To ensure a smooth transition between non-timestamped and
-  // timestamped logs, move an existing log to start the first timestamped
-  // one. This code can go away once all clients are switched to this version or
-  // we stop caring about the old-style logs.
-  if (utils::FileExists(symlink_path.c_str()) &&
-      !utils::IsSymlink(symlink_path.c_str())) {
-    base::ReplaceFile(
-        base::FilePath(symlink_path), base::FilePath(log_path), nullptr);
-  }
-  base::DeleteFile(base::FilePath(symlink_path), true);
-  if (symlink(log_path.c_str(), symlink_path.c_str()) == -1) {
-    PLOG(ERROR) << "Unable to create symlink " << symlink_path
-                << " pointing at " << log_path;
-  }
-}
-
-string SetupLogFile(const string& kLogsRoot) {
-  const string kLogSymlink = kLogsRoot + "/update_engine.log";
-  const string kLogsDir = kLogsRoot + "/update_engine";
-  const string kLogPath =
-      base::StringPrintf("%s/update_engine.%s",
-                         kLogsDir.c_str(),
-                         GetTimeAsString(::time(nullptr)).c_str());
-  mkdir(kLogsDir.c_str(), 0755);
-  SetupLogSymlink(kLogSymlink, kLogPath);
-  return kLogSymlink;
-}
-#endif  // __ANDROID__
-
-void SetupLogging(bool log_to_system, bool log_to_file) {
-  logging::LoggingSettings log_settings;
-  log_settings.lock_log = logging::DONT_LOCK_LOG_FILE;
-  log_settings.logging_dest = static_cast<logging::LoggingDestination>(
-      (log_to_system ? logging::LOG_TO_SYSTEM_DEBUG_LOG : 0) |
-      (log_to_file ? logging::LOG_TO_FILE : 0));
-  log_settings.log_file = nullptr;
-
-  string log_file;
-  if (log_to_file) {
-    log_file = SetupLogFile(kSystemLogsRoot);
-    log_settings.delete_old = logging::APPEND_TO_OLD_LOG_FILE;
-#if BASE_VER < 780000
-    log_settings.log_file = log_file.c_str();
-#else
-    log_settings.log_file_path = log_file.c_str();
-#endif
-  }
-  logging::InitLogging(log_settings);
-
-#ifdef __ANDROID__
-  // The log file will have AID_LOG as group ID; this GID is inherited from the
-  // parent directory "/data/misc/update_engine_log" which sets the SGID bit.
-  chmod(log_file.c_str(), 0640);
-#endif
-}
-
-}  // namespace
-}  // namespace chromeos_update_engine
-
 int main(int argc, char** argv) {
   DEFINE_bool(logtofile, false, "Write logs to a file in log_dir.");
   DEFINE_bool(logtostderr,
diff --git a/metrics_reporter_android.cc b/metrics_reporter_android.cc
index 9165f0d..d8fa6e5 100644
--- a/metrics_reporter_android.cc
+++ b/metrics_reporter_android.cc
@@ -16,17 +16,37 @@
 
 #include "update_engine/metrics_reporter_android.h"
 
+#include <stdint.h>
+
 #include <memory>
 #include <string>
 
-#include <metricslogger/metrics_logger.h>
+#include <android-base/properties.h>
+#include <base/strings/string_util.h>
+#include <fs_mgr.h>
+#include <libdm/dm.h>
+#include <liblp/builder.h>
+#include <liblp/liblp.h>
+#include <statslog.h>
 
 #include "update_engine/common/constants.h"
 
+using android::fs_mgr::GetPartitionGroupName;
+using android::fs_mgr::LpMetadata;
+using android::fs_mgr::MetadataBuilder;
+using android::fs_mgr::ReadMetadata;
+using android::fs_mgr::SlotNumberForSlotSuffix;
+using base::EndsWith;
+
 namespace {
-void LogHistogram(const std::string& metrics, int value) {
-  android::metricslogger::LogHistogram(metrics, value);
-  LOG(INFO) << "uploading " << value << " to histogram for metric " << metrics;
+// A number offset adds on top of the enum value. e.g. ErrorCode::SUCCESS will
+// be reported as 10000, and AttemptResult::UPDATE_CANCELED will be reported as
+// 10011. This keeps the ordering of update engine's enum definition when statsd
+// atoms reserve the value 0 for unknown state.
+constexpr auto kMetricsReporterEnumOffset = 10000;
+
+int32_t GetStatsdEnumValue(int32_t value) {
+  return kMetricsReporterEnumOffset + value;
 }
 }  // namespace
 
@@ -34,41 +54,6 @@
 
 namespace metrics {
 
-// The histograms are defined in:
-// depot/google3/analysis/uma/configs/clearcut/TRON/histograms.xml
-constexpr char kMetricsUpdateEngineAttemptNumber[] =
-    "ota_update_engine_attempt_number";
-constexpr char kMetricsUpdateEngineAttemptResult[] =
-    "ota_update_engine_attempt_result";
-constexpr char kMetricsUpdateEngineAttemptDurationInMinutes[] =
-    "ota_update_engine_attempt_fixed_duration_boottime_in_minutes";
-constexpr char kMetricsUpdateEngineAttemptDurationUptimeInMinutes[] =
-    "ota_update_engine_attempt_duration_monotonic_in_minutes";
-constexpr char kMetricsUpdateEngineAttemptErrorCode[] =
-    "ota_update_engine_attempt_error_code";
-constexpr char kMetricsUpdateEngineAttemptPayloadSizeMiB[] =
-    "ota_update_engine_attempt_payload_size_mib";
-constexpr char kMetricsUpdateEngineAttemptPayloadType[] =
-    "ota_update_engine_attempt_payload_type";
-constexpr char kMetricsUpdateEngineAttemptCurrentBytesDownloadedMiB[] =
-    "ota_update_engine_attempt_fixed_current_bytes_downloaded_mib";
-
-constexpr char kMetricsUpdateEngineSuccessfulUpdateAttemptCount[] =
-    "ota_update_engine_successful_update_attempt_count";
-constexpr char kMetricsUpdateEngineSuccessfulUpdateTotalDurationInMinutes[] =
-    "ota_update_engine_successful_update_fixed_total_duration_in_minutes";
-constexpr char kMetricsUpdateEngineSuccessfulUpdatePayloadSizeMiB[] =
-    "ota_update_engine_successful_update_payload_size_mib";
-constexpr char kMetricsUpdateEngineSuccessfulUpdatePayloadType[] =
-    "ota_update_engine_successful_update_payload_type";
-constexpr char kMetricsUpdateEngineSuccessfulUpdateRebootCount[] =
-    "ota_update_engine_successful_update_reboot_count";
-constexpr char kMetricsUpdateEngineSuccessfulUpdateTotalBytesDownloadedMiB[] =
-    "ota_update_engine_successful_update_total_bytes_downloaded_mib";
-constexpr char
-    kMetricsUpdateEngineSuccessfulUpdateDownloadOverheadPercentage[] =
-        "ota_update_engine_successful_update_download_overhead_percentage";
-
 std::unique_ptr<MetricsReporterInterface> CreateMetricsReporter() {
   return std::make_unique<MetricsReporterAndroid>();
 }
@@ -84,22 +69,56 @@
     int64_t payload_size,
     metrics::AttemptResult attempt_result,
     ErrorCode error_code) {
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptNumber, attempt_number);
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptPayloadType,
-               static_cast<int>(payload_type));
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptDurationInMinutes,
-               duration.InMinutes());
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptDurationUptimeInMinutes,
-               duration_uptime.InMinutes());
-
   int64_t payload_size_mib = payload_size / kNumBytesInOneMiB;
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptPayloadSizeMiB,
-               payload_size_mib);
 
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptResult,
-               static_cast<int>(attempt_result));
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptErrorCode,
-               static_cast<int>(error_code));
+  int64_t super_partition_size_bytes = 0;
+  int64_t super_free_space = 0;
+  int64_t slot_size_bytes = 0;
+
+  if (android::base::GetBoolProperty("ro.boot.dynamic_partitions", false)) {
+    uint32_t slot = SlotNumberForSlotSuffix(fs_mgr_get_slot_suffix());
+    auto super_device = fs_mgr_get_super_partition_name();
+    std::unique_ptr<LpMetadata> metadata = ReadMetadata(super_device, slot);
+    if (metadata) {
+      super_partition_size_bytes = GetTotalSuperPartitionSize(*metadata);
+
+      for (const auto& group : metadata->groups) {
+        if (EndsWith(GetPartitionGroupName(group),
+                     fs_mgr_get_slot_suffix(),
+                     base::CompareCase::SENSITIVE)) {
+          slot_size_bytes += group.maximum_size;
+        }
+      }
+
+      auto metadata_builder = MetadataBuilder::New(*metadata);
+      if (metadata_builder) {
+        auto free_regions = metadata_builder->GetFreeRegions();
+        for (const auto& interval : free_regions) {
+          super_free_space += interval.length();
+        }
+        super_free_space *= android::dm::kSectorSize;
+      } else {
+        LOG(ERROR) << "Cannot create metadata builder.";
+      }
+    } else {
+      LOG(ERROR) << "Could not read dynamic partition metadata for device: "
+                 << super_device;
+    }
+  }
+
+  android::util::stats_write(
+      android::util::UPDATE_ENGINE_UPDATE_ATTEMPT_REPORTED,
+      attempt_number,
+      GetStatsdEnumValue(static_cast<int32_t>(payload_type)),
+      duration.InMinutes(),
+      duration_uptime.InMinutes(),
+      payload_size_mib,
+      GetStatsdEnumValue(static_cast<int32_t>(attempt_result)),
+      GetStatsdEnumValue(static_cast<int32_t>(error_code)),
+      android::base::GetProperty("ro.build.fingerprint", "").c_str(),
+      super_partition_size_bytes,
+      slot_size_bytes,
+      super_free_space);
 }
 
 void MetricsReporterAndroid::ReportUpdateAttemptDownloadMetrics(
@@ -108,8 +127,9 @@
     DownloadSource /* download_source */,
     metrics::DownloadErrorCode /* payload_download_error_code */,
     metrics::ConnectionType /* connection_type */) {
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptCurrentBytesDownloadedMiB,
-               payload_bytes_downloaded / kNumBytesInOneMiB);
+  // TODO(xunchang) add statsd reporting
+  LOG(INFO) << "Current update attempt downloads "
+            << payload_bytes_downloaded / kNumBytesInOneMiB << " bytes data";
 }
 
 void MetricsReporterAndroid::ReportSuccessfulUpdateMetrics(
@@ -123,37 +143,28 @@
     base::TimeDelta /* total_duration_uptime */,
     int reboot_count,
     int /* url_switch_count */) {
-  LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdateAttemptCount,
-               attempt_count);
-  LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdatePayloadType,
-               static_cast<int>(payload_type));
-
   int64_t payload_size_mib = payload_size / kNumBytesInOneMiB;
-  LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdatePayloadSizeMiB,
-               payload_size_mib);
-
   int64_t total_bytes_downloaded = 0;
   for (size_t i = 0; i < kNumDownloadSources; i++) {
     total_bytes_downloaded += num_bytes_downloaded[i] / kNumBytesInOneMiB;
   }
-  LogHistogram(
-      metrics::kMetricsUpdateEngineSuccessfulUpdateTotalBytesDownloadedMiB,
-      total_bytes_downloaded);
-  LogHistogram(
-      metrics::kMetricsUpdateEngineSuccessfulUpdateDownloadOverheadPercentage,
-      download_overhead_percentage);
 
-  LogHistogram(
-      metrics::kMetricsUpdateEngineSuccessfulUpdateTotalDurationInMinutes,
-      total_duration.InMinutes());
-  LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdateRebootCount,
-               reboot_count);
+  android::util::stats_write(
+      android::util::UPDATE_ENGINE_SUCCESSFUL_UPDATE_REPORTED,
+      static_cast<int32_t>(attempt_count),
+      GetStatsdEnumValue(static_cast<int32_t>(payload_type)),
+      static_cast<int32_t>(payload_size_mib),
+      static_cast<int32_t>(total_bytes_downloaded),
+      static_cast<int32_t>(download_overhead_percentage),
+      static_cast<int32_t>(total_duration.InMinutes()),
+      static_cast<int32_t>(reboot_count));
 }
 
 void MetricsReporterAndroid::ReportAbnormallyTerminatedUpdateAttemptMetrics() {
   int attempt_result =
       static_cast<int>(metrics::AttemptResult::kAbnormalTermination);
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptResult, attempt_result);
+  // TODO(xunchang) add statsd reporting
+  LOG(INFO) << "Abnormally terminated update attempt result " << attempt_result;
 }
 
 };  // namespace chromeos_update_engine
diff --git a/metrics_utils.cc b/metrics_utils.cc
index 9f0caa5..2211a67 100644
--- a/metrics_utils.cc
+++ b/metrics_utils.cc
@@ -72,6 +72,8 @@
     case ErrorCode::kFilesystemCopierError:
     case ErrorCode::kFilesystemVerifierError:
     case ErrorCode::kVerityCalculationError:
+    case ErrorCode::kNotEnoughSpace:
+    case ErrorCode::kDeviceCorrupted:
       return metrics::AttemptResult::kOperationExecutionError;
 
     case ErrorCode::kDownloadMetadataSignatureMismatch:
@@ -237,6 +239,8 @@
     case ErrorCode::kRollbackNotPossible:
     case ErrorCode::kFirstActiveOmahaPingSentPersistenceError:
     case ErrorCode::kVerityCalculationError:
+    case ErrorCode::kNotEnoughSpace:
+    case ErrorCode::kDeviceCorrupted:
     case ErrorCode::kPackageExcludedFromUpdate:
       break;
 
diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h
index 24aca06..e85df32 100644
--- a/mock_dynamic_partition_control.h
+++ b/mock_dynamic_partition_control.h
@@ -17,37 +17,102 @@
 #include <stdint.h>
 
 #include <memory>
+#include <set>
 #include <string>
 
 #include <gmock/gmock.h>
 
-#include "update_engine/dynamic_partition_control_interface.h"
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/dynamic_partition_control_interface.h"
+#include "update_engine/dynamic_partition_control_android.h"
 
 namespace chromeos_update_engine {
 
-class MockDynamicPartitionControl : public DynamicPartitionControlInterface {
+class MockDynamicPartitionControlAndroid
+    : public DynamicPartitionControlAndroid {
  public:
-  MOCK_METHOD5(MapPartitionOnDeviceMapper,
-               bool(const std::string&,
-                    const std::string&,
-                    uint32_t,
-                    bool,
-                    std::string*));
-  MOCK_METHOD2(UnmapPartitionOnDeviceMapper, bool(const std::string&, bool));
-  MOCK_METHOD0(Cleanup, void());
-  MOCK_METHOD1(DeviceExists, bool(const std::string&));
-  MOCK_METHOD1(GetState, ::android::dm::DmDeviceState(const std::string&));
-  MOCK_METHOD2(GetDmDevicePathByName, bool(const std::string&, std::string*));
-  MOCK_METHOD3(LoadMetadataBuilder,
-               std::unique_ptr<::android::fs_mgr::MetadataBuilder>(
-                   const std::string&, uint32_t, uint32_t));
-  MOCK_METHOD3(StoreMetadata,
-               bool(const std::string&,
-                    android::fs_mgr::MetadataBuilder*,
-                    uint32_t));
-  MOCK_METHOD1(GetDeviceDir, bool(std::string*));
-  MOCK_METHOD0(IsDynamicPartitionsEnabled, bool());
-  MOCK_METHOD0(IsDynamicPartitionsRetrofit, bool());
+  MOCK_METHOD(
+      bool,
+      MapPartitionOnDeviceMapper,
+      (const std::string&, const std::string&, uint32_t, bool, std::string*),
+      (override));
+  MOCK_METHOD(bool,
+              UnmapPartitionOnDeviceMapper,
+              (const std::string&),
+              (override));
+  MOCK_METHOD(void, Cleanup, (), (override));
+  MOCK_METHOD(bool, DeviceExists, (const std::string&), (override));
+  MOCK_METHOD(::android::dm::DmDeviceState,
+              GetState,
+              (const std::string&),
+              (override));
+  MOCK_METHOD(bool,
+              GetDmDevicePathByName,
+              (const std::string&, std::string*),
+              (override));
+  MOCK_METHOD(std::unique_ptr<::android::fs_mgr::MetadataBuilder>,
+              LoadMetadataBuilder,
+              (const std::string&, uint32_t),
+              (override));
+  MOCK_METHOD(std::unique_ptr<::android::fs_mgr::MetadataBuilder>,
+              LoadMetadataBuilder,
+              (const std::string&, uint32_t, uint32_t),
+              (override));
+  MOCK_METHOD(bool,
+              StoreMetadata,
+              (const std::string&, android::fs_mgr::MetadataBuilder*, uint32_t),
+              (override));
+  MOCK_METHOD(bool, GetDeviceDir, (std::string*), (override));
+  MOCK_METHOD(FeatureFlag, GetDynamicPartitionsFeatureFlag, (), (override));
+  MOCK_METHOD(std::string, GetSuperPartitionName, (uint32_t), (override));
+  MOCK_METHOD(FeatureFlag, GetVirtualAbFeatureFlag, (), (override));
+  MOCK_METHOD(bool, FinishUpdate, (bool), (override));
+  MOCK_METHOD(bool,
+              GetSystemOtherPath,
+              (uint32_t, uint32_t, const std::string&, std::string*, bool*),
+              (override));
+  MOCK_METHOD(bool,
+              EraseSystemOtherAvbFooter,
+              (uint32_t, uint32_t),
+              (override));
+  MOCK_METHOD(std::optional<bool>, IsAvbEnabledOnSystemOther, (), (override));
+  MOCK_METHOD(bool, IsRecovery, (), (override));
+  MOCK_METHOD(bool,
+              PrepareDynamicPartitionsForUpdate,
+              (uint32_t, uint32_t, const DeltaArchiveManifest&, bool),
+              (override));
+
+  void set_fake_mapped_devices(const std::set<std::string>& fake) override {
+    DynamicPartitionControlAndroid::set_fake_mapped_devices(fake);
+  }
+
+  bool RealGetSystemOtherPath(uint32_t source_slot,
+                              uint32_t target_slot,
+                              const std::string& partition_name_suffix,
+                              std::string* path,
+                              bool* should_unmap) {
+    return DynamicPartitionControlAndroid::GetSystemOtherPath(
+        source_slot, target_slot, partition_name_suffix, path, should_unmap);
+  }
+
+  bool RealEraseSystemOtherAvbFooter(uint32_t source_slot,
+                                     uint32_t target_slot) {
+    return DynamicPartitionControlAndroid::EraseSystemOtherAvbFooter(
+        source_slot, target_slot);
+  }
+
+  std::optional<bool> RealIsAvbEnabledInFstab(const std::string& path) {
+    return DynamicPartitionControlAndroid::IsAvbEnabledInFstab(path);
+  }
+
+  bool RealPrepareDynamicPartitionsForUpdate(
+      uint32_t source_slot,
+      uint32_t target_slot,
+      const DeltaArchiveManifest& manifest,
+      bool delete_source) {
+    return DynamicPartitionControlAndroid::PrepareDynamicPartitionsForUpdate(
+        source_slot, target_slot, manifest, delete_source);
+  }
 };
 
 }  // namespace chromeos_update_engine
diff --git a/omaha_request_action.cc b/omaha_request_action.cc
index 83ee5b2..161cf43 100644
--- a/omaha_request_action.cc
+++ b/omaha_request_action.cc
@@ -351,7 +351,7 @@
   // If we have the value stored on disk, just return it.
   int64_t stored_value;
   if (prefs->GetInt64(kPrefsInstallDateDays, &stored_value)) {
-    // Convert and sanity-check.
+    // Convert and validity-check.
     int install_date_days = static_cast<int>(stored_value);
     if (install_date_days >= 0)
       return install_date_days;
@@ -957,10 +957,10 @@
     int code = GetHTTPResponseCode();
     LOG(ERROR) << "Omaha request network transfer failed with HTTPResponseCode="
                << code;
-    // Makes sure we send sane error values.
+    // Makes sure we send proper error values.
     if (code < 0 || code >= 1000) {
       code = 999;
-      LOG(WARNING) << "Converting to sane HTTPResponseCode=" << code;
+      LOG(WARNING) << "Converting to proper HTTPResponseCode=" << code;
     }
     completer.set_code(static_cast<ErrorCode>(
         static_cast<int>(ErrorCode::kOmahaRequestHTTPResponseBase) + code));
diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc
index 8add89f..690a4ef 100644
--- a/omaha_request_builder_xml.cc
+++ b/omaha_request_builder_xml.cc
@@ -230,7 +230,7 @@
     if (!prefs_->GetString(prefs_key, &cohort_value) || cohort_value.empty())
       return "";
   }
-  // This is a sanity check to avoid sending a huge XML file back to Ohama due
+  // This is a validity check to avoid sending a huge XML file back to Ohama due
   // to a compromised stateful partition making the update check fail in low
   // network environments envent after a reboot.
   if (cohort_value.size() > 1024) {
diff --git a/omaha_request_builder_xml_unittest.cc b/omaha_request_builder_xml_unittest.cc
index 291189d..a804420 100644
--- a/omaha_request_builder_xml_unittest.cc
+++ b/omaha_request_builder_xml_unittest.cc
@@ -368,7 +368,7 @@
       1,
       CountSubstringInString(
           kRequestXml,
-          "<event eventtype=\"3\" eventresult=\"0\" errorcode=\"60\"></event>"))
+          "<event eventtype=\"3\" eventresult=\"0\" errorcode=\"62\"></event>"))
       << kRequestXml;
 }
 
@@ -399,7 +399,7 @@
       2,
       CountSubstringInString(
           kRequestXml,
-          "<event eventtype=\"3\" eventresult=\"0\" errorcode=\"60\"></event>"))
+          "<event eventtype=\"3\" eventresult=\"0\" errorcode=\"62\"></event>"))
       << kRequestXml;
 }
 }  // namespace chromeos_update_engine
diff --git a/omaha_request_params.cc b/omaha_request_params.cc
index d4b8d64..8a2e3dc 100644
--- a/omaha_request_params.cc
+++ b/omaha_request_params.cc
@@ -66,7 +66,7 @@
   image_props_ = LoadImageProperties(system_state_);
   mutable_image_props_ = LoadMutableImageProperties(system_state_);
 
-  // Sanity check the channel names.
+  // Validation check the channel names.
   if (!IsValidChannel(image_props_.current_channel))
     image_props_.current_channel = "stable-channel";
   if (!IsValidChannel(mutable_image_props_.target_channel))
diff --git a/otacerts.zip b/otacerts.zip
new file mode 100644
index 0000000..00a5a51
--- /dev/null
+++ b/otacerts.zip
Binary files differ
diff --git a/payload_consumer/certificate_parser_android.cc b/payload_consumer/certificate_parser_android.cc
new file mode 100644
index 0000000..4a20547
--- /dev/null
+++ b/payload_consumer/certificate_parser_android.cc
@@ -0,0 +1,121 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/certificate_parser_android.h"
+
+#include <memory>
+#include <utility>
+
+#include <base/logging.h>
+#include <openssl/bio.h>
+#include <openssl/pem.h>
+#include <ziparchive/zip_archive.h>
+
+#include "update_engine/payload_consumer/certificate_parser_interface.h"
+
+namespace {
+bool IterateZipEntriesAndSearchForKeys(
+    const ZipArchiveHandle& handle, std::vector<std::vector<uint8_t>>* result) {
+  void* cookie;
+  int32_t iter_status = StartIteration(handle, &cookie, "", "x509.pem");
+  if (iter_status != 0) {
+    LOG(ERROR) << "Failed to iterate over entries in the certificate zipfile: "
+               << ErrorCodeString(iter_status);
+    return false;
+  }
+  std::unique_ptr<void, decltype(&EndIteration)> guard(cookie, EndIteration);
+
+  std::vector<std::vector<uint8_t>> pem_keys;
+  std::string_view name;
+  ZipEntry entry;
+  while ((iter_status = Next(cookie, &entry, &name)) == 0) {
+    std::vector<uint8_t> pem_content(entry.uncompressed_length);
+    if (int32_t extract_status = ExtractToMemory(
+            handle, &entry, pem_content.data(), pem_content.size());
+        extract_status != 0) {
+      LOG(ERROR) << "Failed to extract " << name << ": "
+                 << ErrorCodeString(extract_status);
+      return false;
+    }
+    pem_keys.push_back(pem_content);
+  }
+
+  if (iter_status != -1) {
+    LOG(ERROR) << "Error while iterating over zip entries: "
+               << ErrorCodeString(iter_status);
+    return false;
+  }
+
+  *result = std::move(pem_keys);
+  return true;
+}
+
+}  // namespace
+
+namespace chromeos_update_engine {
+bool CertificateParserAndroid::ReadPublicKeysFromCertificates(
+    const std::string& path,
+    std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>*
+        out_public_keys) {
+  out_public_keys->clear();
+
+  ZipArchiveHandle handle;
+  if (int32_t open_status = OpenArchive(path.c_str(), &handle);
+      open_status != 0) {
+    LOG(ERROR) << "Failed to open " << path << ": "
+               << ErrorCodeString(open_status);
+    return false;
+  }
+
+  std::vector<std::vector<uint8_t>> pem_certs;
+  if (!IterateZipEntriesAndSearchForKeys(handle, &pem_certs)) {
+    CloseArchive(handle);
+    return false;
+  }
+  CloseArchive(handle);
+
+  // Convert the certificates into public keys. Stop and return false if we
+  // encounter an error.
+  std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>> result;
+  for (const auto& cert : pem_certs) {
+    std::unique_ptr<BIO, decltype(&BIO_free)> input(
+        BIO_new_mem_buf(cert.data(), cert.size()), BIO_free);
+
+    std::unique_ptr<X509, decltype(&X509_free)> x509(
+        PEM_read_bio_X509(input.get(), nullptr, nullptr, nullptr), X509_free);
+    if (!x509) {
+      LOG(ERROR) << "Failed to read x509 certificate";
+      return false;
+    }
+
+    std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)> public_key(
+        X509_get_pubkey(x509.get()), EVP_PKEY_free);
+    if (!public_key) {
+      LOG(ERROR) << "Failed to extract the public key from x509 certificate";
+      return false;
+    }
+    result.push_back(std::move(public_key));
+  }
+
+  *out_public_keys = std::move(result);
+  return true;
+}
+
+std::unique_ptr<CertificateParserInterface> CreateCertificateParser() {
+  return std::make_unique<CertificateParserAndroid>();
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/certificate_parser_android.h b/payload_consumer/certificate_parser_android.h
new file mode 100644
index 0000000..ccb9293
--- /dev/null
+++ b/payload_consumer/certificate_parser_android.h
@@ -0,0 +1,46 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CERTIFICATE_PARSER_ANDROID_H_
+#define UPDATE_ENGINE_CERTIFICATE_PARSER_ANDROID_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <base/macros.h>
+
+#include "payload_consumer/certificate_parser_interface.h"
+
+namespace chromeos_update_engine {
+// This class parses the certificates from a zip file. Because the Android
+// build system stores the certs in otacerts.zip.
+class CertificateParserAndroid : public CertificateParserInterface {
+ public:
+  CertificateParserAndroid() = default;
+
+  bool ReadPublicKeysFromCertificates(
+      const std::string& path,
+      std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>*
+          out_public_keys) override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CertificateParserAndroid);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/certificate_parser_android_unittest.cc b/payload_consumer/certificate_parser_android_unittest.cc
new file mode 100644
index 0000000..e300414
--- /dev/null
+++ b/payload_consumer/certificate_parser_android_unittest.cc
@@ -0,0 +1,61 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/certificate_parser_interface.h"
+
+#include <string>
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/payload_verifier.h"
+#include "update_engine/payload_generator/payload_signer.h"
+
+namespace chromeos_update_engine {
+
+extern const char* kUnittestPrivateKeyPath;
+const char* kUnittestOtacertsPath = "otacerts.zip";
+
+TEST(CertificateParserAndroidTest, ParseZipArchive) {
+  std::string ota_cert =
+      test_utils::GetBuildArtifactsPath(kUnittestOtacertsPath);
+  ASSERT_TRUE(utils::FileExists(ota_cert.c_str()));
+
+  std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>> keys;
+  auto parser = CreateCertificateParser();
+  ASSERT_TRUE(parser->ReadPublicKeysFromCertificates(ota_cert, &keys));
+  ASSERT_EQ(1u, keys.size());
+}
+
+TEST(CertificateParserAndroidTest, VerifySignature) {
+  brillo::Blob hash_blob;
+  ASSERT_TRUE(HashCalculator::RawHashOfData({'x'}, &hash_blob));
+  brillo::Blob sig_blob;
+  ASSERT_TRUE(PayloadSigner::SignHash(
+      hash_blob,
+      test_utils::GetBuildArtifactsPath(kUnittestPrivateKeyPath),
+      &sig_blob));
+
+  auto verifier = PayloadVerifier::CreateInstanceFromZipPath(
+      test_utils::GetBuildArtifactsPath(kUnittestOtacertsPath));
+  ASSERT_TRUE(verifier != nullptr);
+  ASSERT_TRUE(verifier->VerifyRawSignature(sig_blob, hash_blob, nullptr));
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/certificate_parser_interface.h b/payload_consumer/certificate_parser_interface.h
new file mode 100644
index 0000000..dad23d2
--- /dev/null
+++ b/payload_consumer/certificate_parser_interface.h
@@ -0,0 +1,44 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CERTIFICATE_PARSER_INTERFACE_H_
+#define UPDATE_ENGINE_CERTIFICATE_PARSER_INTERFACE_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <openssl/pem.h>
+
+namespace chromeos_update_engine {
+
+// This class parses the PEM encoded X509 certificates from |path|; and
+// passes the parsed public keys to the caller.
+class CertificateParserInterface {
+ public:
+  virtual ~CertificateParserInterface() = default;
+
+  virtual bool ReadPublicKeysFromCertificates(
+      const std::string& path,
+      std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>*
+          out_public_keys) = 0;
+};
+
+std::unique_ptr<CertificateParserInterface> CreateCertificateParser();
+
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/certificate_parser_stub.cc b/payload_consumer/certificate_parser_stub.cc
new file mode 100644
index 0000000..a365ab8
--- /dev/null
+++ b/payload_consumer/certificate_parser_stub.cc
@@ -0,0 +1,31 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/certificate_parser_stub.h"
+
+namespace chromeos_update_engine {
+bool CertificateParserStub::ReadPublicKeysFromCertificates(
+    const std::string& path,
+    std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>*
+        out_public_keys) {
+  return true;
+}
+
+std::unique_ptr<CertificateParserInterface> CreateCertificateParser() {
+  return std::make_unique<CertificateParserStub>();
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/certificate_parser_stub.h b/payload_consumer/certificate_parser_stub.h
new file mode 100644
index 0000000..a51c2c6
--- /dev/null
+++ b/payload_consumer/certificate_parser_stub.h
@@ -0,0 +1,44 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CERTIFICATE_PARSER_STUB_H_
+#define UPDATE_ENGINE_CERTIFICATE_PARSER_STUB_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <base/macros.h>
+
+#include "update_engine/payload_consumer/certificate_parser_interface.h"
+
+namespace chromeos_update_engine {
+class CertificateParserStub : public CertificateParserInterface {
+ public:
+  CertificateParserStub() = default;
+
+  bool ReadPublicKeysFromCertificates(
+      const std::string& path,
+      std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>*
+          out_public_keys) override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CertificateParserStub);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_CERTIFICATE_PARSER_STUB_H_
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index 95dfbcc..7375d37 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -23,6 +23,7 @@
 #include <cstring>
 #include <map>
 #include <memory>
+#include <set>
 #include <string>
 #include <utility>
 #include <vector>
@@ -40,15 +41,19 @@
 #include <puffin/puffpatch.h>
 
 #include "update_engine/common/constants.h"
+#include "update_engine/common/error_code.h"
+#include "update_engine/common/error_code_utils.h"
 #include "update_engine/common/hardware_interface.h"
 #include "update_engine/common/prefs_interface.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/terminator.h"
 #include "update_engine/payload_consumer/bzip_extent_writer.h"
 #include "update_engine/payload_consumer/cached_file_descriptor.h"
+#include "update_engine/payload_consumer/certificate_parser_interface.h"
 #include "update_engine/payload_consumer/download_action.h"
 #include "update_engine/payload_consumer/extent_reader.h"
 #include "update_engine/payload_consumer/extent_writer.h"
+#include "update_engine/payload_consumer/partition_update_generator_interface.h"
 #if USE_FEC
 #include "update_engine/payload_consumer/fec_file_descriptor.h"
 #endif  // USE_FEC
@@ -315,8 +320,14 @@
       install_plan_->partitions.size() - partitions_.size();
   const InstallPlan::Partition& install_part =
       install_plan_->partitions[num_previous_partitions + current_partition_];
-  // Open source fds if we have a delta payload.
-  if (payload_->type == InstallPayloadType::kDelta) {
+  // Open source fds if we have a delta payload, or for partitions in the
+  // partial update.
+  bool source_may_exist = manifest_.partial_update() ||
+                          payload_->type == InstallPayloadType::kDelta;
+  // We shouldn't open the source partition in certain cases, e.g. some dynamic
+  // partitions in delta payload, partitions included in the full payload for
+  // partial updates. Use the source size as the indicator.
+  if (source_may_exist && install_part.source_size > 0) {
     source_path_ = install_part.source_path;
     int err;
     source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, false, &err);
@@ -495,17 +506,19 @@
                  << "Trusting metadata size in payload = " << metadata_size_;
   }
 
-  string public_key;
-  if (!GetPublicKey(&public_key)) {
-    LOG(ERROR) << "Failed to get public key.";
+  auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
+  if (!payload_verifier) {
+    LOG(ERROR) << "Failed to create payload verifier.";
     *error = ErrorCode::kDownloadMetadataSignatureVerificationError;
-    return MetadataParseResult::kError;
+    if (perform_verification) {
+      return MetadataParseResult::kError;
+    }
+  } else {
+    // We have the full metadata in |payload|. Verify its integrity
+    // and authenticity based on the information we have in Omaha response.
+    *error = payload_metadata_.ValidateMetadataSignature(
+        payload, payload_->metadata_signature, *payload_verifier);
   }
-
-  // We have the full metadata in |payload|. Verify its integrity
-  // and authenticity based on the information we have in Omaha response.
-  *error = payload_metadata_.ValidateMetadataSignature(
-      payload, payload_->metadata_signature, public_key);
   if (*error != ErrorCode::kSuccess) {
     if (install_plan_->hash_checks_mandatory) {
       // The autoupdate_CatchBadSignatures test checks for this string
@@ -574,6 +587,9 @@
     if ((*error = ValidateManifest()) != ErrorCode::kSuccess)
       return false;
     manifest_valid_ = true;
+    if (!install_plan_->is_resume) {
+      prefs_->SetString(kPrefsManifestBytes, {buffer_.begin(), buffer_.end()});
+    }
 
     // Clear the download buffer.
     DiscardBuffer(false, metadata_size_);
@@ -726,7 +742,7 @@
     CheckpointUpdateProgress(false);
   }
 
-  // In major version 2, we don't add dummy operation to the payload.
+  // In major version 2, we don't add unused operation to the payload.
   // If we already extracted the signature we should skip this step.
   if (manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
       signatures_message_data_.empty()) {
@@ -767,7 +783,61 @@
   for (const PartitionUpdate& partition : manifest_.partitions()) {
     partitions_.push_back(partition);
   }
+
+  // For VAB and partial updates, the partition preparation will copy the
+  // dynamic partitions metadata to the target metadata slot, and rename the
+  // slot suffix of the partitions in the metadata.
+  if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) {
+    uint64_t required_size = 0;
+    if (!PreparePartitionsForUpdate(&required_size)) {
+      if (required_size > 0) {
+        *error = ErrorCode::kNotEnoughSpace;
+      } else {
+        *error = ErrorCode::kInstallDeviceOpenError;
+      }
+      return false;
+    }
+  }
+
+  // Partitions in manifest are no longer needed after preparing partitions.
   manifest_.clear_partitions();
+  // TODO(xunchang) TBD: allow partial update only on devices with dynamic
+  // partition.
+  if (manifest_.partial_update()) {
+    std::set<std::string> touched_partitions;
+    for (const auto& partition_update : partitions_) {
+      touched_partitions.insert(partition_update.partition_name());
+    }
+
+    auto generator = partition_update_generator::Create(boot_control_,
+                                                        manifest_.block_size());
+    std::vector<PartitionUpdate> untouched_static_partitions;
+    TEST_AND_RETURN_FALSE(
+        generator->GenerateOperationsForPartitionsNotInPayload(
+            install_plan_->source_slot,
+            install_plan_->target_slot,
+            touched_partitions,
+            &untouched_static_partitions));
+    partitions_.insert(partitions_.end(),
+                       untouched_static_partitions.begin(),
+                       untouched_static_partitions.end());
+
+    // Save the untouched dynamic partitions in install plan.
+    std::vector<std::string> dynamic_partitions;
+    if (!boot_control_->GetDynamicPartitionControl()
+             ->ListDynamicPartitionsForSlot(install_plan_->source_slot,
+                                            &dynamic_partitions)) {
+      LOG(ERROR) << "Failed to load dynamic partitions from slot "
+                 << install_plan_->source_slot;
+      return false;
+    }
+    install_plan_->untouched_dynamic_partitions.clear();
+    for (const auto& name : dynamic_partitions) {
+      if (touched_partitions.find(name) == touched_partitions.end()) {
+        install_plan_->untouched_dynamic_partitions.push_back(name);
+      }
+    }
+  }
 
   // Fill in the InstallPlan::partitions based on the partitions from the
   // payload.
@@ -842,13 +912,9 @@
     install_plan_->partitions.push_back(install_part);
   }
 
-  if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) {
-    if (!InitPartitionMetadata()) {
-      *error = ErrorCode::kInstallDeviceOpenError;
-      return false;
-    }
-  }
-
+  // TODO(xunchang) only need to load the partitions for those in payload.
+  // Because we have already loaded the other once when generating SOURCE_COPY
+  // operations.
   if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) {
     LOG(ERROR) << "Unable to determine all the partition devices.";
     *error = ErrorCode::kInstallDeviceOpenError;
@@ -858,45 +924,57 @@
   return true;
 }
 
-bool DeltaPerformer::InitPartitionMetadata() {
-  BootControlInterface::PartitionMetadata partition_metadata;
-  if (manifest_.has_dynamic_partition_metadata()) {
-    std::map<string, uint64_t> partition_sizes;
-    for (const auto& partition : install_plan_->partitions) {
-      partition_sizes.emplace(partition.name, partition.target_size);
-    }
-    for (const auto& group : manifest_.dynamic_partition_metadata().groups()) {
-      BootControlInterface::PartitionMetadata::Group e;
-      e.name = group.name();
-      e.size = group.size();
-      for (const auto& partition_name : group.partition_names()) {
-        auto it = partition_sizes.find(partition_name);
-        if (it == partition_sizes.end()) {
-          // TODO(tbao): Support auto-filling partition info for framework-only
-          // OTA.
-          LOG(ERROR) << "dynamic_partition_metadata contains partition "
-                     << partition_name
-                     << " but it is not part of the manifest. "
-                     << "This is not supported.";
-          return false;
-        }
-        e.partitions.push_back({partition_name, it->second});
-      }
-      partition_metadata.groups.push_back(std::move(e));
-    }
+bool DeltaPerformer::PreparePartitionsForUpdate(uint64_t* required_size) {
+  // Call static PreparePartitionsForUpdate with hash from
+  // kPrefsUpdateCheckResponseHash to ensure hash of payload that space is
+  // preallocated for is the same as the hash of payload being applied.
+  string update_check_response_hash;
+  ignore_result(prefs_->GetString(kPrefsUpdateCheckResponseHash,
+                                  &update_check_response_hash));
+  return PreparePartitionsForUpdate(prefs_,
+                                    boot_control_,
+                                    install_plan_->target_slot,
+                                    manifest_,
+                                    update_check_response_hash,
+                                    required_size);
+}
+
+bool DeltaPerformer::PreparePartitionsForUpdate(
+    PrefsInterface* prefs,
+    BootControlInterface* boot_control,
+    BootControlInterface::Slot target_slot,
+    const DeltaArchiveManifest& manifest,
+    const std::string& update_check_response_hash,
+    uint64_t* required_size) {
+  string last_hash;
+  ignore_result(
+      prefs->GetString(kPrefsDynamicPartitionMetadataUpdated, &last_hash));
+
+  bool is_resume = !update_check_response_hash.empty() &&
+                   last_hash == update_check_response_hash;
+
+  if (is_resume) {
+    LOG(INFO) << "Using previously prepared partitions for update. hash = "
+              << last_hash;
+  } else {
+    LOG(INFO) << "Preparing partitions for new update. last hash = "
+              << last_hash << ", new hash = " << update_check_response_hash;
   }
 
-  bool metadata_updated = false;
-  prefs_->GetBoolean(kPrefsDynamicPartitionMetadataUpdated, &metadata_updated);
-  if (!boot_control_->InitPartitionMetadata(
-          install_plan_->target_slot, partition_metadata, !metadata_updated)) {
+  if (!boot_control->GetDynamicPartitionControl()->PreparePartitionsForUpdate(
+          boot_control->GetCurrentSlot(),
+          target_slot,
+          manifest,
+          !is_resume /* should update */,
+          required_size)) {
     LOG(ERROR) << "Unable to initialize partition metadata for slot "
-               << BootControlInterface::SlotName(install_plan_->target_slot);
+               << BootControlInterface::SlotName(target_slot);
     return false;
   }
-  TEST_AND_RETURN_FALSE(
-      prefs_->SetBoolean(kPrefsDynamicPartitionMetadataUpdated, true));
-  LOG(INFO) << "InitPartitionMetadata done.";
+
+  TEST_AND_RETURN_FALSE(prefs->SetString(kPrefsDynamicPartitionMetadataUpdated,
+                                         update_check_response_hash));
+  LOG(INFO) << "PreparePartitionsForUpdate done.";
 
   return true;
 }
@@ -1031,7 +1109,21 @@
   if (operation.has_dst_length())
     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
 
+  TEST_AND_RETURN_FALSE(source_fd_ != nullptr);
+
+  // The device may optimize the SOURCE_COPY operation.
+  // Being this a device-specific optimization let DynamicPartitionController
+  // decide it the operation should be skipped.
+  const PartitionUpdate& partition = partitions_[current_partition_];
+  const auto& partition_control = boot_control_->GetDynamicPartitionControl();
+
+  InstallOperation buf;
+  bool should_optimize = partition_control->OptimizeOperation(
+      partition.partition_name(), operation, &buf);
+  const InstallOperation& optimized = should_optimize ? buf : operation;
+
   if (operation.has_src_sha256_hash()) {
+    bool read_ok;
     brillo::Blob source_hash;
     brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
                                       operation.src_sha256_hash().end());
@@ -1040,15 +1132,30 @@
     // device doesn't match or there was an error reading the source partition.
     // Note that this code will also fall back if writing the target partition
     // fails.
-    bool read_ok = fd_utils::CopyAndHashExtents(source_fd_,
-                                                operation.src_extents(),
-                                                target_fd_,
-                                                operation.dst_extents(),
-                                                block_size_,
-                                                &source_hash);
+    if (should_optimize) {
+      // Hash operation.src_extents(), then copy optimized.src_extents to
+      // optimized.dst_extents.
+      read_ok =
+          fd_utils::ReadAndHashExtents(
+              source_fd_, operation.src_extents(), block_size_, &source_hash) &&
+          fd_utils::CopyAndHashExtents(source_fd_,
+                                       optimized.src_extents(),
+                                       target_fd_,
+                                       optimized.dst_extents(),
+                                       block_size_,
+                                       nullptr /* skip hashing */);
+    } else {
+      read_ok = fd_utils::CopyAndHashExtents(source_fd_,
+                                             operation.src_extents(),
+                                             target_fd_,
+                                             operation.dst_extents(),
+                                             block_size_,
+                                             &source_hash);
+    }
     if (read_ok && expected_source_hash == source_hash)
       return true;
-
+    LOG(WARNING) << "Source hash from RAW device mismatched, attempting to "
+                    "correct using ECC";
     if (!OpenCurrentECCPartition()) {
       // The following function call will return false since the source hash
       // mismatches, but we still want to call it so it prints the appropriate
@@ -1061,13 +1168,25 @@
                  << ", expected "
                  << base::HexEncode(expected_source_hash.data(),
                                     expected_source_hash.size());
-
-    TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_ecc_fd_,
-                                                       operation.src_extents(),
-                                                       target_fd_,
-                                                       operation.dst_extents(),
-                                                       block_size_,
-                                                       &source_hash));
+    if (should_optimize) {
+      TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
+          source_ecc_fd_, operation.src_extents(), block_size_, &source_hash));
+      TEST_AND_RETURN_FALSE(
+          fd_utils::CopyAndHashExtents(source_ecc_fd_,
+                                       optimized.src_extents(),
+                                       target_fd_,
+                                       optimized.dst_extents(),
+                                       block_size_,
+                                       nullptr /* skip hashing */));
+    } else {
+      TEST_AND_RETURN_FALSE(
+          fd_utils::CopyAndHashExtents(source_ecc_fd_,
+                                       operation.src_extents(),
+                                       target_fd_,
+                                       operation.dst_extents(),
+                                       block_size_,
+                                       &source_hash));
+    }
     TEST_AND_RETURN_FALSE(
         ValidateSourceHash(source_hash, operation, source_ecc_fd_, error));
     // At this point reading from the the error corrected device worked, but
@@ -1079,19 +1198,20 @@
     // corrected device first since we can't verify the block in the raw device
     // at this point, but we fall back to the raw device since the error
     // corrected device can be shorter or not available.
+
     if (OpenCurrentECCPartition() &&
         fd_utils::CopyAndHashExtents(source_ecc_fd_,
-                                     operation.src_extents(),
+                                     optimized.src_extents(),
                                      target_fd_,
-                                     operation.dst_extents(),
+                                     optimized.dst_extents(),
                                      block_size_,
                                      nullptr)) {
       return true;
     }
     TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_,
-                                                       operation.src_extents(),
+                                                       optimized.src_extents(),
                                                        target_fd_,
-                                                       operation.dst_extents(),
+                                                       optimized.dst_extents(),
                                                        block_size_,
                                                        nullptr));
   }
@@ -1100,6 +1220,11 @@
 
 FileDescriptorPtr DeltaPerformer::ChooseSourceFD(
     const InstallOperation& operation, ErrorCode* error) {
+  if (source_fd_ == nullptr) {
+    LOG(ERROR) << "ChooseSourceFD fail: source_fd_ == nullptr";
+    return nullptr;
+  }
+
   if (!operation.has_src_sha256_hash()) {
     // When the operation doesn't include a source hash, we attempt the error
     // corrected device first since we can't verify the block in the raw device
@@ -1396,8 +1521,7 @@
   // blob and the signed sha-256 context.
   LOG_IF(WARNING,
          !prefs_->SetString(kPrefsUpdateStateSignatureBlob,
-                            string(signatures_message_data_.begin(),
-                                   signatures_message_data_.end())))
+                            signatures_message_data_))
       << "Unable to store the signature blob.";
 
   LOG(INFO) << "Extracted signature data of size "
@@ -1421,14 +1545,35 @@
     return brillo::data_encoding::Base64Decode(install_plan_->public_key_rsa,
                                                out_public_key);
   }
-
+  LOG(INFO) << "No public keys found for verification.";
   return true;
 }
 
-ErrorCode DeltaPerformer::ValidateManifest() {
-  // Perform assorted checks to sanity check the manifest, make sure it
-  // matches data from other sources, and that it is a supported version.
+std::pair<std::unique_ptr<PayloadVerifier>, bool>
+DeltaPerformer::CreatePayloadVerifier() {
+  if (utils::FileExists(update_certificates_path_.c_str())) {
+    LOG(INFO) << "Verifying using certificates: " << update_certificates_path_;
+    return {
+        PayloadVerifier::CreateInstanceFromZipPath(update_certificates_path_),
+        true};
+  }
 
+  string public_key;
+  if (!GetPublicKey(&public_key)) {
+    LOG(ERROR) << "Failed to read public key";
+    return {nullptr, true};
+  }
+
+  // Skips the verification if the public key is empty.
+  if (public_key.empty()) {
+    return {nullptr, false};
+  }
+  return {PayloadVerifier::CreateInstance(public_key), true};
+}
+
+ErrorCode DeltaPerformer::ValidateManifest() {
+  // Perform assorted checks to validation check the manifest, make sure it
+  // matches data from other sources, and that it is a supported version.
   bool has_old_fields = std::any_of(manifest_.partitions().begin(),
                                     manifest_.partitions().end(),
                                     [](const PartitionUpdate& partition) {
@@ -1436,9 +1581,12 @@
                                     });
 
   // The presence of an old partition hash is the sole indicator for a delta
-  // update.
+  // update. Also, always treat the partial update as delta so that we can
+  // perform the minor version check correctly.
   InstallPayloadType actual_payload_type =
-      has_old_fields ? InstallPayloadType::kDelta : InstallPayloadType::kFull;
+      (has_old_fields || manifest_.partial_update())
+          ? InstallPayloadType::kDelta
+          : InstallPayloadType::kFull;
 
   if (payload_->type == InstallPayloadType::kUnknown) {
     LOG(INFO) << "Detected a '"
@@ -1453,8 +1601,8 @@
                << "' payload.";
     return ErrorCode::kPayloadMismatchedType;
   }
-
   // Check that the minor version is compatible.
+  // TODO(xunchang) increment minor version & add check for partial update
   if (actual_payload_type == InstallPayloadType::kFull) {
     if (manifest_.minor_version() != kFullPayloadMinorVersion) {
       LOG(ERROR) << "Manifest contains minor version "
@@ -1475,6 +1623,88 @@
     }
   }
 
+  ErrorCode error_code = CheckTimestampError();
+  if (error_code != ErrorCode::kSuccess) {
+    if (error_code == ErrorCode::kPayloadTimestampError) {
+      if (!hardware_->AllowDowngrade()) {
+        return ErrorCode::kPayloadTimestampError;
+      }
+      LOG(INFO) << "The current OS build allows downgrade, continuing to apply"
+                   " the payload with an older timestamp.";
+    } else {
+      LOG(ERROR) << "Timestamp check returned "
+                 << utils::ErrorCodeToString(error_code);
+      return error_code;
+    }
+  }
+
+  // TODO(crbug.com/37661) we should be adding more and more manifest checks,
+  // such as partition boundaries, etc.
+
+  return ErrorCode::kSuccess;
+}
+
+ErrorCode DeltaPerformer::CheckTimestampError() const {
+  bool is_partial_update =
+      manifest_.has_partial_update() && manifest_.partial_update();
+  const auto& partitions = manifest_.partitions();
+
+  // Check version field for a given PartitionUpdate object. If an error
+  // is encountered, set |error_code| accordingly. If downgrade is detected,
+  // |downgrade_detected| is set. Return true if the program should continue to
+  // check the next partition or not, or false if it should exit early due to
+  // errors.
+  auto&& timestamp_valid = [this](const PartitionUpdate& partition,
+                                  bool allow_empty_version,
+                                  bool* downgrade_detected) -> ErrorCode {
+    if (!partition.has_version()) {
+      if (allow_empty_version) {
+        return ErrorCode::kSuccess;
+      }
+      LOG(ERROR)
+          << "PartitionUpdate " << partition.partition_name()
+          << " does ot have a version field. Not allowed in partial updates.";
+      return ErrorCode::kDownloadManifestParseError;
+    }
+
+    auto error_code = hardware_->IsPartitionUpdateValid(
+        partition.partition_name(), partition.version());
+    switch (error_code) {
+      case ErrorCode::kSuccess:
+        break;
+      case ErrorCode::kPayloadTimestampError:
+        *downgrade_detected = true;
+        LOG(WARNING) << "PartitionUpdate " << partition.partition_name()
+                     << " has an older version than partition on device.";
+        break;
+      default:
+        LOG(ERROR) << "IsPartitionUpdateValid(" << partition.partition_name()
+                   << ") returned" << utils::ErrorCodeToString(error_code);
+        break;
+    }
+    return error_code;
+  };
+
+  bool downgrade_detected = false;
+
+  if (is_partial_update) {
+    // for partial updates, all partition MUST have valid timestamps
+    // But max_timestamp can be empty
+    for (const auto& partition : partitions) {
+      auto error_code = timestamp_valid(
+          partition, false /* allow_empty_version */, &downgrade_detected);
+      if (error_code != ErrorCode::kSuccess &&
+          error_code != ErrorCode::kPayloadTimestampError) {
+        return error_code;
+      }
+    }
+    if (downgrade_detected) {
+      return ErrorCode::kPayloadTimestampError;
+    }
+    return ErrorCode::kSuccess;
+  }
+
+  // For non-partial updates, check max_timestamp first.
   if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) {
     LOG(ERROR) << "The current OS build timestamp ("
                << hardware_->GetBuildTimestamp()
@@ -1482,10 +1712,18 @@
                << manifest_.max_timestamp() << ")";
     return ErrorCode::kPayloadTimestampError;
   }
-
-  // TODO(crbug.com/37661) we should be adding more and more manifest checks,
-  // such as partition boundaries, etc.
-
+  // Otherwise... partitions can have empty timestamps.
+  for (const auto& partition : partitions) {
+    auto error_code = timestamp_valid(
+        partition, true /* allow_empty_version */, &downgrade_detected);
+    if (error_code != ErrorCode::kSuccess &&
+        error_code != ErrorCode::kPayloadTimestampError) {
+      return error_code;
+    }
+  }
+  if (downgrade_detected) {
+    return ErrorCode::kPayloadTimestampError;
+  }
   return ErrorCode::kSuccess;
 }
 
@@ -1505,7 +1743,7 @@
     // corresponding update should have been produced with the operation
     // hashes. So if it happens it means either we've turned operation hash
     // generation off in DeltaDiffGenerator or it's a regression of some sort.
-    // One caveat though: The last operation is a dummy signature operation
+    // One caveat though: The last operation is a unused signature operation
     // that doesn't have a hash at the time the manifest is created. So we
     // should not complaint about that operation. This operation can be
     // recognized by the fact that it's offset is mentioned in the manifest.
@@ -1563,12 +1801,6 @@
 ErrorCode DeltaPerformer::VerifyPayload(
     const brillo::Blob& update_check_response_hash,
     const uint64_t update_check_response_size) {
-  string public_key;
-  if (!GetPublicKey(&public_key)) {
-    LOG(ERROR) << "Failed to get public key.";
-    return ErrorCode::kDownloadPayloadPubKeyVerificationError;
-  }
-
   // Verifies the download size.
   if (update_check_response_size !=
       metadata_size_ + metadata_signature_size_ + buffer_offset_) {
@@ -1579,6 +1811,16 @@
     return ErrorCode::kPayloadSizeMismatchError;
   }
 
+  auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
+  if (!perform_verification) {
+    LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
+    return ErrorCode::kSuccess;
+  }
+  if (!payload_verifier) {
+    LOG(ERROR) << "Failed to create the payload verifier.";
+    return ErrorCode::kDownloadPayloadPubKeyVerificationError;
+  }
+
   // Verifies the payload hash.
   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError,
                       !payload_hash_calculator_.raw_hash().empty());
@@ -1586,21 +1828,13 @@
       ErrorCode::kPayloadHashMismatchError,
       payload_hash_calculator_.raw_hash() == update_check_response_hash);
 
-  // Verifies the signed payload hash.
-  if (public_key.empty()) {
-    LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
-    return ErrorCode::kSuccess;
-  }
   TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError,
                       !signatures_message_data_.empty());
   brillo::Blob hash_data = signed_hash_calculator_.raw_hash();
   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
-                      PayloadVerifier::PadRSA2048SHA256Hash(&hash_data));
-  TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
-                      !hash_data.empty());
+                      hash_data.size() == kSHA256Size);
 
-  if (!PayloadVerifier::VerifySignature(
-          signatures_message_data_, public_key, hash_data)) {
+  if (!payload_verifier->VerifySignature(signatures_message_data_, hash_data)) {
     // The autoupdate_CatchBadSignatures test checks for this string
     // in log-files. Keep in sync.
     LOG(ERROR) << "Public key verification failed, thus update failed.";
@@ -1645,7 +1879,7 @@
       resumed_update_failures > kMaxResumedUpdateFailures)
     return false;
 
-  // Sanity check the rest.
+  // Validation check the rest.
   int64_t next_data_offset = -1;
   if (!(prefs->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
         next_data_offset >= 0))
@@ -1670,7 +1904,10 @@
   return true;
 }
 
-bool DeltaPerformer::ResetUpdateProgress(PrefsInterface* prefs, bool quick) {
+bool DeltaPerformer::ResetUpdateProgress(
+    PrefsInterface* prefs,
+    bool quick,
+    bool skip_dynamic_partititon_metadata_updated) {
   TEST_AND_RETURN_FALSE(prefs->SetInt64(kPrefsUpdateStateNextOperation,
                                         kUpdateStateOperationInvalid));
   if (!quick) {
@@ -1684,7 +1921,11 @@
     prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
     prefs->Delete(kPrefsPostInstallSucceeded);
     prefs->Delete(kPrefsVerityWritten);
-    prefs->Delete(kPrefsDynamicPartitionMetadataUpdated);
+
+    if (!skip_dynamic_partititon_metadata_updated) {
+      LOG(INFO) << "Resetting recorded hash for prepared partitions.";
+      prefs->Delete(kPrefsDynamicPartitionMetadataUpdated);
+    }
   }
   return true;
 }
@@ -1758,11 +1999,7 @@
         signed_hash_calculator_.SetContext(signed_hash_context));
   }
 
-  string signature_blob;
-  if (prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signature_blob)) {
-    signatures_message_data_.assign(signature_blob.begin(),
-                                    signature_blob.end());
-  }
+  prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signatures_message_data_);
 
   string hash_context;
   TEST_AND_RETURN_FALSE(
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index 7860747..88076af 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -20,7 +20,9 @@
 #include <inttypes.h>
 
 #include <limits>
+#include <memory>
 #include <string>
+#include <utility>
 #include <vector>
 
 #include <base/time/time.h>
@@ -34,6 +36,7 @@
 #include "update_engine/payload_consumer/file_writer.h"
 #include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/payload_consumer/payload_metadata.h"
+#include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/update_metadata.pb.h"
 
 namespace chromeos_update_engine {
@@ -45,7 +48,6 @@
 
 // This class performs the actions in a delta update synchronously. The delta
 // update itself should be passed in in chunks as it is received.
-
 class DeltaPerformer : public FileWriter {
  public:
   // Defines the granularity of progress logging in terms of how many "completed
@@ -75,7 +77,9 @@
         download_delegate_(download_delegate),
         install_plan_(install_plan),
         payload_(payload),
-        interactive_(interactive) {}
+        interactive_(interactive) {
+    CHECK(install_plan_);
+  }
 
   // FileWriter's Write implementation where caller doesn't care about
   // error codes.
@@ -140,9 +144,14 @@
 
   // Resets the persistent update progress state to indicate that an update
   // can't be resumed. Performs a quick update-in-progress reset if |quick| is
-  // true, otherwise resets all progress-related update state. Returns true on
-  // success, false otherwise.
-  static bool ResetUpdateProgress(PrefsInterface* prefs, bool quick);
+  // true, otherwise resets all progress-related update state.
+  // If |skip_dynamic_partititon_metadata_updated| is true, do not reset
+  // dynamic-partition-metadata-updated.
+  // Returns true on success, false otherwise.
+  static bool ResetUpdateProgress(
+      PrefsInterface* prefs,
+      bool quick,
+      bool skip_dynamic_partititon_metadata_updated = false);
 
   // Attempts to parse the update metadata starting from the beginning of
   // |payload|. On success, returns kMetadataParseSuccess. Returns
@@ -156,6 +165,11 @@
     public_key_path_ = public_key_path;
   }
 
+  void set_update_certificates_path(
+      const std::string& update_certificates_path) {
+    update_certificates_path_ = update_certificates_path;
+  }
+
   // Return true if header parsing is finished and no errors occurred.
   bool IsHeaderParsed() const;
 
@@ -171,6 +185,24 @@
                                  const FileDescriptorPtr source_fd,
                                  ErrorCode* error);
 
+  // Initialize partitions and allocate required space for an update with the
+  // given |manifest|. |update_check_response_hash| is used to check if the
+  // previous call to this function corresponds to the same payload.
+  // - Same payload: not make any persistent modifications (not write to disk)
+  // - Different payload: make persistent modifications (write to disk)
+  // In both cases, in-memory flags are updated. This function must be called
+  // on the payload at least once (to update in-memory flags) before writing
+  // (applying) the payload.
+  // If error due to insufficient space, |required_size| is set to the required
+  // size on the device to apply the payload.
+  static bool PreparePartitionsForUpdate(
+      PrefsInterface* prefs,
+      BootControlInterface* boot_control,
+      BootControlInterface::Slot target_slot,
+      const DeltaArchiveManifest& manifest,
+      const std::string& update_check_response_hash,
+      uint64_t* required_size);
+
  private:
   friend class DeltaPerformerTest;
   friend class DeltaPerformerIntegrationTest;
@@ -266,9 +298,25 @@
   // |out_public_key|. Returns false on failures.
   bool GetPublicKey(std::string* out_public_key);
 
+  // Creates a PayloadVerifier from the zip file containing certificates. If the
+  // path to the zip file doesn't exist, falls back to use the public key.
+  // Returns a tuple with the created PayloadVerifier and if we should perform
+  // the verification.
+  std::pair<std::unique_ptr<PayloadVerifier>, bool> CreatePayloadVerifier();
+
   // After install_plan_ is filled with partition names and sizes, initialize
   // metadata of partitions and map necessary devices before opening devices.
-  bool InitPartitionMetadata();
+  // Also see comment for the static PreparePartitionsForUpdate().
+  bool PreparePartitionsForUpdate(uint64_t* required_size);
+
+  // Check if current manifest contains timestamp errors.
+  // Return:
+  // - kSuccess if update is valid.
+  // - kPayloadTimestampError if downgrade is detected
+  // - kDownloadManifestParseError if |new_version| has an incorrect format
+  // - Other error values if the source of error is known, or kError for
+  //   a generic error on the device.
+  ErrorCode CheckTimestampError() const;
 
   // Update Engine preference store.
   PrefsInterface* prefs_;
@@ -370,12 +418,15 @@
   HashCalculator signed_hash_calculator_;
 
   // Signatures message blob extracted directly from the payload.
-  brillo::Blob signatures_message_data_;
+  std::string signatures_message_data_;
 
   // The public key to be used. Provided as a member so that tests can
   // override with test keys.
   std::string public_key_path_{constants::kUpdatePayloadPublicKeyPath};
 
+  // The path to the zip file with X509 certificates.
+  std::string update_certificates_path_{constants::kUpdateCertificatesPath};
+
   // The number of bytes received so far, used for progress tracking.
   size_t total_bytes_received_{0};
 
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index af6682a..f2aeb03 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -28,6 +28,7 @@
 #include <base/stl_util.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
+#include <gmock/gmock-matchers.h>
 #include <google/protobuf/repeated_field.h>
 #include <gtest/gtest.h>
 #include <openssl/pem.h>
@@ -35,9 +36,12 @@
 #include "update_engine/common/constants.h"
 #include "update_engine/common/fake_boot_control.h"
 #include "update_engine/common/fake_hardware.h"
+#include "update_engine/common/fake_prefs.h"
 #include "update_engine/common/mock_prefs.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/hardware_android.h"
+#include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/payload_consumer/mock_download_action.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/payload_metadata.h"
@@ -61,6 +65,8 @@
 extern const char* kUnittestPublicKeyPath;
 extern const char* kUnittestPrivateKey2Path;
 extern const char* kUnittestPublicKey2Path;
+extern const char* kUnittestPrivateKeyECPath;
+extern const char* kUnittestPublicKeyECPath;
 
 static const uint32_t kDefaultKernelSize = 4096;  // Something small for a test
 // clang-format off
@@ -109,6 +115,7 @@
   kSignatureGeneratedPlaceholder,  // Insert placeholder signatures, then real.
   kSignatureGeneratedPlaceholderMismatch,  // Insert a wrong sized placeholder.
   kSignatureGeneratedShell,  // Sign the generated payload through shell cmds.
+  kSignatureGeneratedShellECKey,      // Sign with a EC key through shell cmds.
   kSignatureGeneratedShellBadKey,     // Sign with a bad key through shell cmds.
   kSignatureGeneratedShellRotateCl1,  // Rotate key, test client v1
   kSignatureGeneratedShellRotateCl2,  // Rotate key, test client v2
@@ -121,7 +128,41 @@
 
 }  // namespace
 
-class DeltaPerformerIntegrationTest : public ::testing::Test {};
+class DeltaPerformerIntegrationTest : public ::testing::Test {
+ public:
+  void RunManifestValidation(const DeltaArchiveManifest& manifest,
+                             uint64_t major_version,
+                             ErrorCode expected) {
+    FakePrefs prefs;
+    InstallPlan::Payload payload;
+    InstallPlan install_plan;
+    DeltaPerformer performer{&prefs,
+                             nullptr,
+                             &fake_hardware_,
+                             nullptr,
+                             &install_plan,
+                             &payload,
+                             false /* interactive*/};
+    // Delta performer will treat manifest as kDelta payload
+    // if it's a partial update.
+    payload.type = manifest.partial_update() ? InstallPayloadType::kDelta
+                                             : InstallPayloadType::kFull;
+
+    // The Manifest we are validating.
+    performer.manifest_.CopyFrom(manifest);
+    performer.major_payload_version_ = major_version;
+
+    EXPECT_EQ(expected, performer.ValidateManifest());
+  }
+  void AddPartition(DeltaArchiveManifest* manifest,
+                    std::string name,
+                    int timestamp) {
+    auto& partition = *manifest->add_partitions();
+    partition.set_version(std::to_string(timestamp));
+    partition.set_partition_name(name);
+  }
+  FakeHardware fake_hardware_;
+};
 
 static void CompareFilesByBlock(const string& a_file,
                                 const string& b_file,
@@ -166,29 +207,26 @@
   return true;
 }
 
-static size_t GetSignatureSize(const string& private_key_path) {
-  const brillo::Blob data(1, 'x');
-  brillo::Blob hash;
-  EXPECT_TRUE(HashCalculator::RawHashOfData(data, &hash));
-  brillo::Blob signature;
-  EXPECT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature));
-  return signature.size();
-}
-
-static bool InsertSignaturePlaceholder(int signature_size,
+static bool InsertSignaturePlaceholder(size_t signature_size,
                                        const string& payload_path,
                                        uint64_t* out_metadata_size) {
   vector<brillo::Blob> signatures;
   signatures.push_back(brillo::Blob(signature_size, 0));
 
-  return PayloadSigner::AddSignatureToPayload(
-      payload_path, signatures, {}, payload_path, out_metadata_size);
+  return PayloadSigner::AddSignatureToPayload(payload_path,
+                                              {signature_size},
+                                              signatures,
+                                              {},
+                                              payload_path,
+                                              out_metadata_size);
 }
 
 static void SignGeneratedPayload(const string& payload_path,
                                  uint64_t* out_metadata_size) {
   string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath);
-  int signature_size = GetSignatureSize(private_key_path);
+  size_t signature_size;
+  ASSERT_TRUE(PayloadSigner::GetMaximumSignatureSize(private_key_path,
+                                                     &signature_size));
   brillo::Blob metadata_hash, payload_hash;
   ASSERT_TRUE(PayloadSigner::HashPayloadForSigning(
       payload_path, {signature_size}, &payload_hash, &metadata_hash));
@@ -198,6 +236,7 @@
   ASSERT_TRUE(PayloadSigner::SignHash(
       metadata_hash, private_key_path, &metadata_signature));
   ASSERT_TRUE(PayloadSigner::AddSignatureToPayload(payload_path,
+                                                   {signature_size},
                                                    {payload_signature},
                                                    {metadata_signature},
                                                    payload_path,
@@ -206,28 +245,112 @@
       payload_path, GetBuildArtifactsPath(kUnittestPublicKeyPath)));
 }
 
-static void SignHashToFile(const string& hash_file,
-                           const string& signature_file,
-                           const string& private_key_file) {
-  brillo::Blob hash, signature;
-  ASSERT_TRUE(utils::ReadFile(hash_file, &hash));
-  ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_file, &signature));
-  ASSERT_TRUE(test_utils::WriteFileVector(signature_file, signature));
+static void SignGeneratedShellPayloadWithKeys(
+    const string& payload_path,
+    const vector<string>& private_key_paths,
+    const string& public_key_path,
+    bool verification_success) {
+  vector<string> signature_size_strings;
+  for (const auto& key_path : private_key_paths) {
+    size_t signature_size;
+    ASSERT_TRUE(
+        PayloadSigner::GetMaximumSignatureSize(key_path, &signature_size));
+    signature_size_strings.push_back(base::StringPrintf("%zu", signature_size));
+  }
+  string signature_size_string = base::JoinString(signature_size_strings, ":");
+
+  test_utils::ScopedTempFile hash_file("hash.XXXXXX"),
+      metadata_hash_file("hash.XXXXXX");
+  string delta_generator_path = GetBuildArtifactsPath("delta_generator");
+  ASSERT_EQ(0,
+            System(base::StringPrintf(
+                "%s -in_file=%s -signature_size=%s -out_hash_file=%s "
+                "-out_metadata_hash_file=%s",
+                delta_generator_path.c_str(),
+                payload_path.c_str(),
+                signature_size_string.c_str(),
+                hash_file.path().c_str(),
+                metadata_hash_file.path().c_str())));
+
+  // Sign the hash with all private keys.
+  vector<test_utils::ScopedTempFile> sig_files, metadata_sig_files;
+  vector<string> sig_file_paths, metadata_sig_file_paths;
+  for (const auto& key_path : private_key_paths) {
+    brillo::Blob hash, signature;
+    ASSERT_TRUE(utils::ReadFile(hash_file.path(), &hash));
+    ASSERT_TRUE(PayloadSigner::SignHash(hash, key_path, &signature));
+
+    test_utils::ScopedTempFile sig_file("signature.XXXXXX");
+    ASSERT_TRUE(test_utils::WriteFileVector(sig_file.path(), signature));
+    sig_file_paths.push_back(sig_file.path());
+    sig_files.push_back(std::move(sig_file));
+
+    brillo::Blob metadata_hash, metadata_signature;
+    ASSERT_TRUE(utils::ReadFile(metadata_hash_file.path(), &metadata_hash));
+    ASSERT_TRUE(
+        PayloadSigner::SignHash(metadata_hash, key_path, &metadata_signature));
+
+    test_utils::ScopedTempFile metadata_sig_file("signature.XXXXXX");
+    ASSERT_TRUE(test_utils::WriteFileVector(metadata_sig_file.path(),
+                                            metadata_signature));
+
+    metadata_sig_file_paths.push_back(metadata_sig_file.path());
+    metadata_sig_files.push_back(std::move(metadata_sig_file));
+  }
+  string sig_files_string = base::JoinString(sig_file_paths, ":");
+  string metadata_sig_files_string =
+      base::JoinString(metadata_sig_file_paths, ":");
+
+  // Add the signature to the payload.
+  ASSERT_EQ(0,
+            System(base::StringPrintf("%s --signature_size=%s -in_file=%s "
+                                      "-payload_signature_file=%s "
+                                      "-metadata_signature_file=%s "
+                                      "-out_file=%s",
+                                      delta_generator_path.c_str(),
+                                      signature_size_string.c_str(),
+                                      payload_path.c_str(),
+                                      sig_files_string.c_str(),
+                                      metadata_sig_files_string.c_str(),
+                                      payload_path.c_str())));
+
+  int verify_result = System(base::StringPrintf("%s -in_file=%s -public_key=%s",
+                                                delta_generator_path.c_str(),
+                                                payload_path.c_str(),
+                                                public_key_path.c_str()));
+
+  if (verification_success) {
+    ASSERT_EQ(0, verify_result);
+  } else {
+    ASSERT_NE(0, verify_result);
+  }
 }
 
 static void SignGeneratedShellPayload(SignatureTest signature_test,
                                       const string& payload_path) {
-  string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath);
+  vector<SignatureTest> supported_test = {
+      kSignatureGeneratedShell,
+      kSignatureGeneratedShellBadKey,
+      kSignatureGeneratedShellECKey,
+      kSignatureGeneratedShellRotateCl1,
+      kSignatureGeneratedShellRotateCl2,
+  };
+  ASSERT_TRUE(std::find(supported_test.begin(),
+                        supported_test.end(),
+                        signature_test) != supported_test.end());
+
+  string private_key_path;
   if (signature_test == kSignatureGeneratedShellBadKey) {
     ASSERT_TRUE(utils::MakeTempFile("key.XXXXXX", &private_key_path, nullptr));
+  } else if (signature_test == kSignatureGeneratedShellECKey) {
+    private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyECPath);
   } else {
-    ASSERT_TRUE(signature_test == kSignatureGeneratedShell ||
-                signature_test == kSignatureGeneratedShellRotateCl1 ||
-                signature_test == kSignatureGeneratedShellRotateCl2);
+    private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath);
   }
   ScopedPathUnlinker key_unlinker(private_key_path);
   key_unlinker.set_should_remove(signature_test ==
                                  kSignatureGeneratedShellBadKey);
+
   // Generates a new private key that will not match the public key.
   if (signature_test == kSignatureGeneratedShellBadKey) {
     LOG(INFO) << "Generating a mismatched private key.";
@@ -246,78 +369,26 @@
     fclose(fprikey);
     RSA_free(rsa);
   }
-  int signature_size = GetSignatureSize(private_key_path);
-  test_utils::ScopedTempFile payload_hash_file("hash.XXXXXX"),
-      metadata_hash_file("hash.XXXXXX");
-  string signature_size_string;
-  if (signature_test == kSignatureGeneratedShellRotateCl1 ||
-      signature_test == kSignatureGeneratedShellRotateCl2)
-    signature_size_string =
-        base::StringPrintf("%d:%d", signature_size, signature_size);
-  else
-    signature_size_string = base::StringPrintf("%d", signature_size);
-  string delta_generator_path = GetBuildArtifactsPath("delta_generator");
-  ASSERT_EQ(0,
-            System(base::StringPrintf(
-                "%s -in_file=%s -signature_size=%s -out_hash_file=%s "
-                "-out_metadata_hash_file=%s",
-                delta_generator_path.c_str(),
-                payload_path.c_str(),
-                signature_size_string.c_str(),
-                payload_hash_file.path().c_str(),
-                metadata_hash_file.path().c_str())));
 
-  // Sign the payload hash.
-  test_utils::ScopedTempFile payload_signature_file("signature.XXXXXX");
-  SignHashToFile(payload_hash_file.path(),
-                 payload_signature_file.path(),
-                 private_key_path);
-  string payload_sig_files = payload_signature_file.path();
-  // Sign the metadata hash.
-  test_utils::ScopedTempFile metadata_signature_file("signature.XXXXXX");
-  SignHashToFile(metadata_hash_file.path(),
-                 metadata_signature_file.path(),
-                 private_key_path);
-  string metadata_sig_files = metadata_signature_file.path();
-
-  test_utils::ScopedTempFile payload_signature_file2("signature.XXXXXX");
-  test_utils::ScopedTempFile metadata_signature_file2("signature.XXXXXX");
+  vector<string> private_key_paths = {private_key_path};
   if (signature_test == kSignatureGeneratedShellRotateCl1 ||
       signature_test == kSignatureGeneratedShellRotateCl2) {
-    SignHashToFile(payload_hash_file.path(),
-                   payload_signature_file2.path(),
-                   GetBuildArtifactsPath(kUnittestPrivateKey2Path));
-    SignHashToFile(metadata_hash_file.path(),
-                   metadata_signature_file2.path(),
-                   GetBuildArtifactsPath(kUnittestPrivateKey2Path));
-    // Append second sig file to first path
-    payload_sig_files += ":" + payload_signature_file2.path();
-    metadata_sig_files += ":" + metadata_signature_file2.path();
+    private_key_paths.push_back(
+        GetBuildArtifactsPath(kUnittestPrivateKey2Path));
   }
 
-  ASSERT_EQ(
-      0,
-      System(base::StringPrintf("%s -in_file=%s -payload_signature_file=%s "
-                                "-metadata_signature_file=%s -out_file=%s",
-                                delta_generator_path.c_str(),
-                                payload_path.c_str(),
-                                payload_sig_files.c_str(),
-                                metadata_sig_files.c_str(),
-                                payload_path.c_str())));
-  int verify_result = System(base::StringPrintf(
-      "%s -in_file=%s -public_key=%s -public_key_version=%d",
-      delta_generator_path.c_str(),
-      payload_path.c_str(),
-      (signature_test == kSignatureGeneratedShellRotateCl2
-           ? GetBuildArtifactsPath(kUnittestPublicKey2Path)
-           : GetBuildArtifactsPath(kUnittestPublicKeyPath))
-          .c_str(),
-      signature_test == kSignatureGeneratedShellRotateCl2 ? 2 : 1));
-  if (signature_test == kSignatureGeneratedShellBadKey) {
-    ASSERT_NE(0, verify_result);
+  std::string public_key;
+  if (signature_test == kSignatureGeneratedShellRotateCl2) {
+    public_key = GetBuildArtifactsPath(kUnittestPublicKey2Path);
+  } else if (signature_test == kSignatureGeneratedShellECKey) {
+    public_key = GetBuildArtifactsPath(kUnittestPublicKeyECPath);
   } else {
-    ASSERT_EQ(0, verify_result);
+    public_key = GetBuildArtifactsPath(kUnittestPublicKeyPath);
   }
+
+  bool verification_success = signature_test != kSignatureGeneratedShellBadKey;
+  SignGeneratedShellPayloadWithKeys(
+      payload_path, private_key_paths, public_key, verification_success);
 }
 
 static void GenerateDeltaFile(bool full_kernel,
@@ -549,8 +620,9 @@
 
   if (signature_test == kSignatureGeneratedPlaceholder ||
       signature_test == kSignatureGeneratedPlaceholderMismatch) {
-    int signature_size =
-        GetSignatureSize(GetBuildArtifactsPath(kUnittestPrivateKeyPath));
+    size_t signature_size;
+    ASSERT_TRUE(PayloadSigner::GetMaximumSignatureSize(
+        GetBuildArtifactsPath(kUnittestPrivateKeyPath), &signature_size));
     LOG(INFO) << "Inserting placeholder signature.";
     ASSERT_TRUE(InsertSignaturePlaceholder(
         signature_size, state->delta_path, &state->metadata_size));
@@ -573,6 +645,7 @@
     LOG(INFO) << "Signing payload.";
     SignGeneratedPayload(state->delta_path, &state->metadata_size);
   } else if (signature_test == kSignatureGeneratedShell ||
+             signature_test == kSignatureGeneratedShellECKey ||
              signature_test == kSignatureGeneratedShellBadKey ||
              signature_test == kSignatureGeneratedShellRotateCl1 ||
              signature_test == kSignatureGeneratedShellRotateCl2) {
@@ -617,15 +690,16 @@
         EXPECT_EQ(2, sigs_message.signatures_size());
       else
         EXPECT_EQ(1, sigs_message.signatures_size());
-      const Signatures_Signature& signature = sigs_message.signatures(0);
-      EXPECT_EQ(1U, signature.version());
+      const Signatures::Signature& signature = sigs_message.signatures(0);
 
-      uint64_t expected_sig_data_length = 0;
       vector<string> key_paths{GetBuildArtifactsPath(kUnittestPrivateKeyPath)};
-      if (signature_test == kSignatureGeneratedShellRotateCl1 ||
-          signature_test == kSignatureGeneratedShellRotateCl2) {
+      if (signature_test == kSignatureGeneratedShellECKey) {
+        key_paths = {GetBuildArtifactsPath(kUnittestPrivateKeyECPath)};
+      } else if (signature_test == kSignatureGeneratedShellRotateCl1 ||
+                 signature_test == kSignatureGeneratedShellRotateCl2) {
         key_paths.push_back(GetBuildArtifactsPath(kUnittestPrivateKey2Path));
       }
+      uint64_t expected_sig_data_length = 0;
       EXPECT_TRUE(PayloadSigner::SignatureBlobLength(
           key_paths, &expected_sig_data_length));
       EXPECT_EQ(expected_sig_data_length, manifest.signatures_size());
@@ -701,7 +775,12 @@
       .WillRepeatedly(Return(true));
   EXPECT_CALL(prefs, SetString(kPrefsUpdateStateSignedSHA256Context, _))
       .WillRepeatedly(Return(true));
-  EXPECT_CALL(prefs, SetBoolean(kPrefsDynamicPartitionMetadataUpdated, _))
+  EXPECT_CALL(prefs, SetString(kPrefsDynamicPartitionMetadataUpdated, _))
+      .WillRepeatedly(Return(true));
+  EXPECT_CALL(prefs,
+              SetString(kPrefsManifestBytes,
+                        testing::SizeIs(state->metadata_signature_size +
+                                        state->metadata_size)))
       .WillRepeatedly(Return(true));
   if (op_hash_test == kValidOperationData && signature_test != kSignatureNone) {
     EXPECT_CALL(prefs, SetString(kPrefsUpdateStateSignatureBlob, _))
@@ -733,7 +812,9 @@
   ASSERT_TRUE(PayloadSigner::GetMetadataSignature(
       state->delta.data(),
       state->metadata_size,
-      GetBuildArtifactsPath(kUnittestPrivateKeyPath),
+      (signature_test == kSignatureGeneratedShellECKey)
+          ? GetBuildArtifactsPath(kUnittestPrivateKeyECPath)
+          : GetBuildArtifactsPath(kUnittestPrivateKeyPath),
       &install_plan->payloads[0].metadata_signature));
   EXPECT_FALSE(install_plan->payloads[0].metadata_signature.empty());
 
@@ -744,9 +825,12 @@
                                   install_plan,
                                   &install_plan->payloads[0],
                                   false /* interactive */);
-  string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath);
+  string public_key_path = signature_test == kSignatureGeneratedShellECKey
+                               ? GetBuildArtifactsPath(kUnittestPublicKeyECPath)
+                               : GetBuildArtifactsPath(kUnittestPublicKeyPath);
   EXPECT_TRUE(utils::FileExists(public_key_path.c_str()));
   (*performer)->set_public_key_path(public_key_path);
+  (*performer)->set_update_certificates_path("");
 
   EXPECT_EQ(static_cast<off_t>(state->image_size),
             HashCalculator::RawHashOfFile(
@@ -948,13 +1032,13 @@
   delete performer;
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageTest) {
   DoSmallImageTest(
       false, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest,
-     RunAsRootSmallImageSignaturePlaceholderTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+       RunAsRootSmallImageSignaturePlaceholderTest) {
   DoSmallImageTest(false,
                    false,
                    -1,
@@ -963,8 +1047,8 @@
                    kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest,
-     RunAsRootSmallImageSignaturePlaceholderMismatchTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+       RunAsRootSmallImageSignaturePlaceholderMismatchTest) {
   DeltaState state;
   GenerateDeltaFile(false,
                     false,
@@ -974,7 +1058,7 @@
                     kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageChunksTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageChunksTest) {
   DoSmallImageTest(false,
                    false,
                    kBlockSize,
@@ -983,31 +1067,28 @@
                    kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootFullKernelSmallImageTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootFullKernelSmallImageTest) {
   DoSmallImageTest(
       true, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootFullSmallImageTest) {
-  DoSmallImageTest(true,
-                   true,
-                   -1,
-                   kSignatureGenerator,
-                   true,
-                   kFullPayloadMinorVersion);
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootFullSmallImageTest) {
+  DoSmallImageTest(
+      true, true, -1, kSignatureGenerator, true, kFullPayloadMinorVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignNoneTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignNoneTest) {
   DoSmallImageTest(
       false, false, -1, kSignatureNone, false, kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedTest) {
   DoSmallImageTest(
       false, false, -1, kSignatureGenerated, true, kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+       RunAsRootSmallImageSignGeneratedShellTest) {
   DoSmallImageTest(false,
                    false,
                    -1,
@@ -1016,8 +1097,18 @@
                    kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest,
-     RunAsRootSmallImageSignGeneratedShellBadKeyTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+       RunAsRootSmallImageSignGeneratedShellECKeyTest) {
+  DoSmallImageTest(false,
+                   false,
+                   -1,
+                   kSignatureGeneratedShellECKey,
+                   false,
+                   kSourceMinorPayloadVersion);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+       RunAsRootSmallImageSignGeneratedShellBadKeyTest) {
   DoSmallImageTest(false,
                    false,
                    -1,
@@ -1026,8 +1117,8 @@
                    kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest,
-     RunAsRootSmallImageSignGeneratedShellRotateCl1Test) {
+TEST_F(DeltaPerformerIntegrationTest,
+       RunAsRootSmallImageSignGeneratedShellRotateCl1Test) {
   DoSmallImageTest(false,
                    false,
                    -1,
@@ -1036,8 +1127,8 @@
                    kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest,
-     RunAsRootSmallImageSignGeneratedShellRotateCl2Test) {
+TEST_F(DeltaPerformerIntegrationTest,
+       RunAsRootSmallImageSignGeneratedShellRotateCl2Test) {
   DoSmallImageTest(false,
                    false,
                    -1,
@@ -1046,18 +1137,137 @@
                    kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSourceOpsTest) {
-  DoSmallImageTest(false,
-                   false,
-                   -1,
-                   kSignatureGenerator,
-                   false,
-                   kSourceMinorPayloadVersion);
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageSourceOpsTest) {
+  DoSmallImageTest(
+      false, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest,
-     RunAsRootMandatoryOperationHashMismatchTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+       RunAsRootMandatoryOperationHashMismatchTest) {
   DoOperationHashMismatchTest(kInvalidOperationData, true);
 }
 
+TEST_F(DeltaPerformerIntegrationTest, ValidatePerPartitionTimestampSuccess) {
+  // The Manifest we are validating.
+  DeltaArchiveManifest manifest;
+
+  fake_hardware_.SetVersion("system", "5");
+  fake_hardware_.SetVersion("product", "99");
+  fake_hardware_.SetBuildTimestamp(1);
+
+  manifest.set_minor_version(kFullPayloadMinorVersion);
+  manifest.set_max_timestamp(2);
+  AddPartition(&manifest, "system", 10);
+  AddPartition(&manifest, "product", 100);
+
+  RunManifestValidation(
+      manifest, kMaxSupportedMajorPayloadVersion, ErrorCode::kSuccess);
+}
+
+TEST_F(DeltaPerformerIntegrationTest, ValidatePerPartitionTimestampFailure) {
+  // The Manifest we are validating.
+  DeltaArchiveManifest manifest;
+
+  fake_hardware_.SetVersion("system", "5");
+  fake_hardware_.SetVersion("product", "99");
+  fake_hardware_.SetBuildTimestamp(1);
+
+  manifest.set_minor_version(kFullPayloadMinorVersion);
+  manifest.set_max_timestamp(2);
+  AddPartition(&manifest, "system", 10);
+  AddPartition(&manifest, "product", 98);
+
+  RunManifestValidation(manifest,
+                        kMaxSupportedMajorPayloadVersion,
+                        ErrorCode::kPayloadTimestampError);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+       ValidatePerPartitionTimestampMissingTimestamp) {
+  // The Manifest we are validating.
+  DeltaArchiveManifest manifest;
+
+  fake_hardware_.SetVersion("system", "5");
+  fake_hardware_.SetVersion("product", "99");
+  fake_hardware_.SetBuildTimestamp(1);
+
+  manifest.set_minor_version(kFullPayloadMinorVersion);
+  manifest.set_max_timestamp(2);
+  AddPartition(&manifest, "system", 10);
+  {
+    auto& partition = *manifest.add_partitions();
+    // For complete updates, missing timestamp should not trigger
+    // timestamp error.
+    partition.set_partition_name("product");
+  }
+
+  RunManifestValidation(
+      manifest, kMaxSupportedMajorPayloadVersion, ErrorCode::kSuccess);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+       ValidatePerPartitionTimestampPartialUpdatePass) {
+  fake_hardware_.SetVersion("system", "5");
+  fake_hardware_.SetVersion("product", "99");
+
+  DeltaArchiveManifest manifest;
+  manifest.set_minor_version(kPartialUpdateMinorPayloadVersion);
+  manifest.set_partial_update(true);
+  AddPartition(&manifest, "product", 100);
+  RunManifestValidation(
+      manifest, kMaxSupportedMajorPayloadVersion, ErrorCode::kSuccess);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+       ValidatePerPartitionTimestampPartialUpdateDowngrade) {
+  fake_hardware_.SetVersion("system", "5");
+  fake_hardware_.SetVersion("product", "99");
+
+  DeltaArchiveManifest manifest;
+  manifest.set_minor_version(kPartialUpdateMinorPayloadVersion);
+  manifest.set_partial_update(true);
+  AddPartition(&manifest, "product", 98);
+  RunManifestValidation(manifest,
+                        kMaxSupportedMajorPayloadVersion,
+                        ErrorCode::kPayloadTimestampError);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+       ValidatePerPartitionTimestampPartialUpdateMissingVersion) {
+  fake_hardware_.SetVersion("system", "5");
+  fake_hardware_.SetVersion("product", "99");
+
+  DeltaArchiveManifest manifest;
+  manifest.set_minor_version(kPartialUpdateMinorPayloadVersion);
+  manifest.set_partial_update(true);
+  {
+    auto& partition = *manifest.add_partitions();
+    // For partial updates, missing timestamp should trigger an error
+    partition.set_partition_name("product");
+    // has_version() == false.
+  }
+  RunManifestValidation(manifest,
+                        kMaxSupportedMajorPayloadVersion,
+                        ErrorCode::kDownloadManifestParseError);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+       ValidatePerPartitionTimestampPartialUpdateEmptyVersion) {
+  fake_hardware_.SetVersion("system", "5");
+  fake_hardware_.SetVersion("product", "99");
+
+  DeltaArchiveManifest manifest;
+  manifest.set_minor_version(kPartialUpdateMinorPayloadVersion);
+  manifest.set_partial_update(true);
+  {
+    auto& partition = *manifest.add_partitions();
+    // For partial updates, invalid timestamp should trigger an error
+    partition.set_partition_name("product");
+    partition.set_version("something");
+  }
+  RunManifestValidation(manifest,
+                        kMaxSupportedMajorPayloadVersion,
+                        ErrorCode::kDownloadManifestParseError);
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index 3901195..449201c 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -36,9 +36,11 @@
 #include <gtest/gtest.h>
 
 #include "update_engine/common/constants.h"
+#include "update_engine/common/error_code.h"
 #include "update_engine/common/fake_boot_control.h"
 #include "update_engine/common/fake_hardware.h"
 #include "update_engine/common/fake_prefs.h"
+#include "update_engine/common/hardware_interface.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/fake_file_descriptor.h"
@@ -161,6 +163,11 @@
     install_plan_.target_slot = 1;
     EXPECT_CALL(mock_delegate_, ShouldCancel(_))
         .WillRepeatedly(testing::Return(false));
+    performer_.set_update_certificates_path("");
+    // Set the public key corresponding to the unittest private key.
+    string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath);
+    EXPECT_TRUE(utils::FileExists(public_key_path.c_str()));
+    performer_.set_public_key_path(public_key_path);
   }
 
   // Test helper placed where it can easily be friended from DeltaPerformer.
@@ -179,19 +186,22 @@
 
   brillo::Blob GeneratePayload(const brillo::Blob& blob_data,
                                const vector<AnnotatedOperation>& aops,
-                               bool sign_payload) {
+                               bool sign_payload,
+                               PartitionConfig* old_part = nullptr) {
     return GeneratePayload(blob_data,
                            aops,
                            sign_payload,
                            kMaxSupportedMajorPayloadVersion,
-                           kMaxSupportedMinorPayloadVersion);
+                           kMaxSupportedMinorPayloadVersion,
+                           old_part);
   }
 
   brillo::Blob GeneratePayload(const brillo::Blob& blob_data,
                                const vector<AnnotatedOperation>& aops,
                                bool sign_payload,
                                uint64_t major_version,
-                               uint32_t minor_version) {
+                               uint32_t minor_version,
+                               PartitionConfig* old_part = nullptr) {
     test_utils::ScopedTempFile blob_file("Blob-XXXXXX");
     EXPECT_TRUE(test_utils::WriteFileVector(blob_file.path(), blob_data));
 
@@ -202,24 +212,29 @@
     PayloadFile payload;
     EXPECT_TRUE(payload.Init(config));
 
-    PartitionConfig old_part(kPartitionNameRoot);
+    std::unique_ptr<PartitionConfig> old_part_uptr;
+    if (!old_part) {
+      old_part_uptr = std::make_unique<PartitionConfig>(kPartitionNameRoot);
+      old_part = old_part_uptr.get();
+    }
     if (minor_version != kFullPayloadMinorVersion) {
       // When generating a delta payload we need to include the old partition
       // information to mark it as a delta payload.
-      old_part.path = "/dev/null";
-      old_part.size = 0;
+      if (old_part->path.empty()) {
+        old_part->path = "/dev/null";
+      }
     }
     PartitionConfig new_part(kPartitionNameRoot);
     new_part.path = "/dev/zero";
     new_part.size = 1234;
 
-    payload.AddPartition(old_part, new_part, aops);
+    payload.AddPartition(*old_part, new_part, aops, {});
 
     // We include a kernel partition without operations.
-    old_part.name = kPartitionNameKernel;
+    old_part->name = kPartitionNameKernel;
     new_part.name = kPartitionNameKernel;
     new_part.size = 0;
-    payload.AddPartition(old_part, new_part, {});
+    payload.AddPartition(*old_part, new_part, {}, {});
 
     test_utils::ScopedTempFile payload_file("Payload-XXXXXX");
     string private_key =
@@ -235,7 +250,8 @@
   }
 
   brillo::Blob GenerateSourceCopyPayload(const brillo::Blob& copied_data,
-                                         bool add_hash) {
+                                         bool add_hash,
+                                         PartitionConfig* old_part = nullptr) {
     PayloadGenerationConfig config;
     const uint64_t kDefaultBlockSize = config.block_size;
     EXPECT_EQ(0U, copied_data.size() % kDefaultBlockSize);
@@ -249,7 +265,7 @@
     if (add_hash)
       aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
 
-    return GeneratePayload(brillo::Blob(), {aop}, false);
+    return GeneratePayload(brillo::Blob(), {aop}, false, old_part);
   }
 
   // Apply |payload_data| on partition specified in |source_path|.
@@ -316,7 +332,7 @@
     // When filling in size in manifest, exclude the size of the 24-byte header.
     uint64_t size_in_manifest = htobe64(actual_metadata_size - 24);
     performer_.Write(&size_in_manifest, 8, &error_code);
-    uint32_t signature_size = htobe64(10);
+    auto signature_size = htobe64(10);
     bool result = performer_.Write(&signature_size, 4, &error_code);
     if (expected_metadata_size == actual_metadata_size ||
         !hash_checks_mandatory) {
@@ -389,12 +405,6 @@
       expected_error = ErrorCode::kSuccess;
     }
 
-    // Use the public key corresponding to the private key used above to
-    // sign the metadata.
-    string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath);
-    EXPECT_TRUE(utils::FileExists(public_key_path.c_str()));
-    performer_.set_public_key_path(public_key_path);
-
     // Init actual_error with an invalid value so that we make sure
     // ParsePayloadMetadata properly populates it in all cases.
     actual_error = ErrorCode::kUmaReportedMax;
@@ -581,11 +591,16 @@
   EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash));
   aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
 
-  brillo::Blob payload_data = GeneratePayload(brillo::Blob(), {aop}, false);
-
   test_utils::ScopedTempFile source("Source-XXXXXX");
   EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data));
 
+  PartitionConfig old_part(kPartitionNameRoot);
+  old_part.path = source.path();
+  old_part.size = expected_data.size();
+
+  brillo::Blob payload_data =
+      GeneratePayload(brillo::Blob(), {aop}, false, &old_part);
+
   EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
 }
 
@@ -604,11 +619,16 @@
   EXPECT_TRUE(HashCalculator::RawHashOfData(src, &src_hash));
   aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
 
-  brillo::Blob payload_data = GeneratePayload(puffdiff_payload, {aop}, false);
-
   test_utils::ScopedTempFile source("Source-XXXXXX");
   EXPECT_TRUE(test_utils::WriteFileVector(source.path(), src));
 
+  PartitionConfig old_part(kPartitionNameRoot);
+  old_part.path = source.path();
+  old_part.size = src.size();
+
+  brillo::Blob payload_data =
+      GeneratePayload(puffdiff_payload, {aop}, false, &old_part);
+
   brillo::Blob dst(std::begin(dst_deflates), std::end(dst_deflates));
   EXPECT_EQ(dst, ApplyPayload(payload_data, source.path(), true));
 }
@@ -627,11 +647,16 @@
   EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash));
   aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
 
-  brillo::Blob payload_data = GeneratePayload(brillo::Blob(), {aop}, false);
-
   test_utils::ScopedTempFile source("Source-XXXXXX");
   EXPECT_TRUE(test_utils::WriteFileVector(source.path(), actual_data));
 
+  PartitionConfig old_part(kPartitionNameRoot);
+  old_part.path = source.path();
+  old_part.size = actual_data.size();
+
+  brillo::Blob payload_data =
+      GeneratePayload(brillo::Blob(), {aop}, false, &old_part);
+
   EXPECT_EQ(actual_data, ApplyPayload(payload_data, source.path(), false));
 }
 
@@ -650,7 +675,12 @@
   FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize);
   brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize);
 
-  brillo::Blob payload_data = GenerateSourceCopyPayload(expected_data, true);
+  PartitionConfig old_part(kPartitionNameRoot);
+  old_part.path = source.path();
+  old_part.size = invalid_data.size();
+
+  brillo::Blob payload_data =
+      GenerateSourceCopyPayload(expected_data, true, &old_part);
   EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
   // Verify that the fake_fec was actually used.
   EXPECT_EQ(1U, fake_fec->GetReadOps().size());
@@ -671,8 +701,13 @@
   // the expected.
   FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize / 2);
 
+  PartitionConfig old_part(kPartitionNameRoot);
+  old_part.path = source.path();
+  old_part.size = expected_data.size();
+
   // The payload operation doesn't include an operation hash.
-  brillo::Blob payload_data = GenerateSourceCopyPayload(expected_data, false);
+  brillo::Blob payload_data =
+      GenerateSourceCopyPayload(expected_data, false, &old_part);
   EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
   // Verify that the fake_fec was attempted to be used. Since the file
   // descriptor is shorter it can actually do more than one read to realize it
@@ -866,6 +901,24 @@
                         ErrorCode::kPayloadTimestampError);
 }
 
+TEST_F(DeltaPerformerTest, ValidatePerPartitionTimestampSuccess) {
+  // The Manifest we are validating.
+  DeltaArchiveManifest manifest;
+
+  manifest.set_minor_version(kFullPayloadMinorVersion);
+  manifest.set_max_timestamp(2);
+  fake_hardware_.SetBuildTimestamp(1);
+  auto& partition = *manifest.add_partitions();
+  partition.set_version("10");
+  partition.set_partition_name("system");
+  fake_hardware_.SetVersion("system", "5");
+
+  RunManifestValidation(manifest,
+                        kMaxSupportedMajorPayloadVersion,
+                        InstallPayloadType::kFull,
+                        ErrorCode::kSuccess);
+}
+
 TEST_F(DeltaPerformerTest, BrilloMetadataSignatureSizeTest) {
   unsigned int seed = time(nullptr);
   EXPECT_TRUE(performer_.Write(kDeltaMagic, sizeof(kDeltaMagic)));
@@ -968,7 +1021,6 @@
   brillo::Blob payload_data = GeneratePayload(
       {}, {}, true, kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion);
   install_plan_.hash_checks_mandatory = true;
-  performer_.set_public_key_path(GetBuildArtifactsPath(kUnittestPublicKeyPath));
   payload_.size = payload_data.size();
   ErrorCode error;
   EXPECT_EQ(MetadataParseResult::kSuccess,
diff --git a/payload_consumer/download_action.cc b/payload_consumer/download_action.cc
index 45df5a9..ea99892 100644
--- a/payload_consumer/download_action.cc
+++ b/payload_consumer/download_action.cc
@@ -55,8 +55,7 @@
       code_(ErrorCode::kSuccess),
       delegate_(nullptr),
       p2p_sharing_fd_(-1),
-      p2p_visible_(true) {
-}
+      p2p_visible_(true) {}
 
 DownloadAction::~DownloadAction() {}
 
@@ -203,18 +202,76 @@
   StartDownloading();
 }
 
+bool DownloadAction::LoadCachedManifest(int64_t manifest_size) {
+  std::string cached_manifest_bytes;
+  if (!prefs_->GetString(kPrefsManifestBytes, &cached_manifest_bytes) ||
+      cached_manifest_bytes.size() <= 0) {
+    LOG(INFO) << "Cached Manifest data not found";
+    return false;
+  }
+  if (static_cast<int64_t>(cached_manifest_bytes.size()) != manifest_size) {
+    LOG(WARNING) << "Cached metadata has unexpected size: "
+                 << cached_manifest_bytes.size() << " vs. " << manifest_size;
+    return false;
+  }
+
+  ErrorCode error;
+  const bool success =
+      delta_performer_->Write(
+          cached_manifest_bytes.data(), cached_manifest_bytes.size(), &error) &&
+      delta_performer_->IsManifestValid();
+  if (success) {
+    LOG(INFO) << "Successfully parsed cached manifest";
+  } else {
+    // If parsing of cached data failed, fall back to fetch them using HTTP
+    LOG(WARNING) << "Cached manifest data fails to load, error code:"
+                 << static_cast<int>(error) << "," << error;
+  }
+  return success;
+}
+
 void DownloadAction::StartDownloading() {
   download_active_ = true;
   http_fetcher_->ClearRanges();
+
+  if (writer_ && writer_ != delta_performer_.get()) {
+    LOG(INFO) << "Using writer for test.";
+  } else {
+    delta_performer_.reset(new DeltaPerformer(prefs_,
+                                              boot_control_,
+                                              hardware_,
+                                              delegate_,
+                                              &install_plan_,
+                                              payload_,
+                                              interactive_));
+    writer_ = delta_performer_.get();
+  }
+
   if (install_plan_.is_resume &&
       payload_ == &install_plan_.payloads[resume_payload_index_]) {
-    // Resuming an update so fetch the update manifest metadata first.
+    // Resuming an update so parse the cached manifest first
     int64_t manifest_metadata_size = 0;
     int64_t manifest_signature_size = 0;
     prefs_->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size);
     prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size);
-    http_fetcher_->AddRange(base_offset_,
-                            manifest_metadata_size + manifest_signature_size);
+
+    // TODO(zhangkelvin) Add unittest for success and fallback route
+    if (!LoadCachedManifest(manifest_metadata_size + manifest_signature_size)) {
+      if (delta_performer_) {
+        // Create a new DeltaPerformer to reset all its state
+        delta_performer_ = std::make_unique<DeltaPerformer>(prefs_,
+                                                            boot_control_,
+                                                            hardware_,
+                                                            delegate_,
+                                                            &install_plan_,
+                                                            payload_,
+                                                            interactive_);
+        writer_ = delta_performer_.get();
+      }
+      http_fetcher_->AddRange(base_offset_,
+                              manifest_metadata_size + manifest_signature_size);
+    }
+
     // If there're remaining unprocessed data blobs, fetch them. Be careful not
     // to request data beyond the end of the payload to avoid 416 HTTP response
     // error codes.
@@ -238,18 +295,6 @@
     }
   }
 
-  if (writer_ && writer_ != delta_performer_.get()) {
-    LOG(INFO) << "Using writer for test.";
-  } else {
-    delta_performer_.reset(new DeltaPerformer(prefs_,
-                                              boot_control_,
-                                              hardware_,
-                                              delegate_,
-                                              &install_plan_,
-                                              payload_,
-                                              interactive_));
-    writer_ = delta_performer_.get();
-  }
   if (system_state_ != nullptr) {
     const PayloadStateInterface* payload_state = system_state_->payload_state();
     string file_id = utils::CalculateP2PFileId(payload_->hash, payload_->size);
diff --git a/payload_consumer/download_action.h b/payload_consumer/download_action.h
index 1777e22..6928443 100644
--- a/payload_consumer/download_action.h
+++ b/payload_consumer/download_action.h
@@ -124,19 +124,20 @@
   bool SetupP2PSharingFd();
 
   // Writes |length| bytes of payload from |data| into |file_offset|
-  // of the p2p file. Also does sanity checks; for example ensures we
+  // of the p2p file. Also does validation checks; for example ensures we
   // don't end up with a file with holes in it.
   //
   // This method does nothing if SetupP2PSharingFd() hasn't been
   // called or if CloseP2PSharingFd() has been called.
   void WriteToP2PFile(const void* data, size_t length, off_t file_offset);
 
+  // Attempt to load cached manifest data from prefs
+  // return true on success, false otherwise.
+  bool LoadCachedManifest(int64_t manifest_size);
+
   // Start downloading the current payload using delta_performer.
   void StartDownloading();
 
-  // The InstallPlan passed in
-  InstallPlan install_plan_;
-
   // Pointer to the current payload in install_plan_.payloads.
   InstallPlan::Payload* payload_{nullptr};
 
diff --git a/payload_consumer/download_action_android_unittest.cc b/payload_consumer/download_action_android_unittest.cc
new file mode 100644
index 0000000..f78845f
--- /dev/null
+++ b/payload_consumer/download_action_android_unittest.cc
@@ -0,0 +1,90 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "common/mock_action_processor.h"
+#include <gmock/gmock-actions.h>
+#include <gmock/gmock-function-mocker.h>
+#include <gmock/gmock-spec-builders.h>
+
+#include "payload_consumer/install_plan.h"
+#include "update_engine/common/action_pipe.h"
+#include "update_engine/common/boot_control_stub.h"
+#include "update_engine/common/constants.h"
+#include "update_engine/common/mock_http_fetcher.h"
+#include "update_engine/common/mock_prefs.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/payload_consumer/download_action.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <memory>
+
+namespace chromeos_update_engine {
+using testing::_;
+using testing::DoAll;
+using testing::Return;
+using testing::SetArgPointee;
+
+class DownloadActionTest : public ::testing::Test {
+ public:
+  static constexpr int64_t METADATA_SIZE = 1024;
+  static constexpr int64_t SIGNATURE_SIZE = 256;
+  std::shared_ptr<ActionPipe<InstallPlan>> action_pipe{
+      new ActionPipe<InstallPlan>()};
+};
+
+TEST_F(DownloadActionTest, CacheManifestInvalid) {
+  std::string data(METADATA_SIZE + SIGNATURE_SIZE, '-');
+  MockPrefs prefs;
+  EXPECT_CALL(prefs, GetInt64(kPrefsUpdateStatePayloadIndex, _))
+      .WillRepeatedly(DoAll(SetArgPointee<1>(0L), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsManifestMetadataSize, _))
+      .WillRepeatedly(DoAll(SetArgPointee<1>(METADATA_SIZE), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsManifestSignatureSize, _))
+      .WillRepeatedly(DoAll(SetArgPointee<1>(SIGNATURE_SIZE), Return(true)));
+  EXPECT_CALL(prefs, GetInt64(kPrefsUpdateStateNextDataOffset, _))
+      .WillRepeatedly(DoAll(SetArgPointee<1>(0L), Return(true)));
+  EXPECT_CALL(prefs, GetString(kPrefsManifestBytes, _))
+      .WillRepeatedly(DoAll(SetArgPointee<1>(data), Return(true)));
+
+  BootControlStub boot_control;
+  MockHttpFetcher* http_fetcher =
+      new MockHttpFetcher(data.data(), data.size(), nullptr);
+  http_fetcher->set_delay(false);
+  InstallPlan install_plan;
+  auto& payload = install_plan.payloads.emplace_back();
+  install_plan.download_url = "http://fake_url.invalid";
+  payload.size = data.size();
+  payload.payload_urls.emplace_back("http://fake_url.invalid");
+  install_plan.is_resume = true;
+  action_pipe->set_contents(install_plan);
+
+  // takes ownership of passed in HttpFetcher
+  auto download_action =
+      std::make_unique<DownloadAction>(&prefs,
+                                       &boot_control,
+                                       nullptr,
+                                       nullptr,
+                                       http_fetcher,
+                                       false /* interactive */);
+  download_action->set_in_pipe(action_pipe);
+  MockActionProcessor mock_processor;
+  download_action->SetProcessor(&mock_processor);
+  download_action->PerformAction();
+  ASSERT_EQ(download_action->http_fetcher()->GetBytesDownloaded(), data.size());
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/filesystem_verifier_action.cc b/payload_consumer/filesystem_verifier_action.cc
index f9e7f81..61917ea 100644
--- a/payload_consumer/filesystem_verifier_action.cc
+++ b/payload_consumer/filesystem_verifier_action.cc
@@ -28,6 +28,7 @@
 #include <base/bind.h>
 #include <brillo/data_encoding.h>
 #include <brillo/streams/file_stream.h>
+#include <base/strings/string_util.h>
 
 #include "update_engine/common/utils.h"
 
@@ -77,11 +78,32 @@
     return;
   if (code == ErrorCode::kSuccess && HasOutputPipe())
     SetOutputObject(install_plan_);
+  UpdateProgress(1.0);
   processor_->ActionComplete(this, code);
 }
 
+void FilesystemVerifierAction::UpdateProgress(double progress) {
+  if (delegate_ != nullptr) {
+    delegate_->OnVerifyProgressUpdate(progress);
+  }
+}
+
 void FilesystemVerifierAction::StartPartitionHashing() {
   if (partition_index_ == install_plan_.partitions.size()) {
+    if (!install_plan_.untouched_dynamic_partitions.empty()) {
+      LOG(INFO) << "Verifying extents of untouched dynamic partitions ["
+                << base::JoinString(install_plan_.untouched_dynamic_partitions,
+                                    ", ")
+                << "]";
+      if (!dynamic_control_->VerifyExtentsForUntouchedPartitions(
+              install_plan_.source_slot,
+              install_plan_.target_slot,
+              install_plan_.untouched_dynamic_partitions)) {
+        Cleanup(ErrorCode::kFilesystemVerifierError);
+        return;
+      }
+    }
+
     Cleanup(ErrorCode::kSuccess);
     return;
   }
@@ -188,7 +210,6 @@
     Cleanup(ErrorCode::kError);
     return;
   }
-
   if (bytes_read == 0) {
     LOG(ERROR) << "Failed to read the remaining " << partition_size_ - offset_
                << " bytes from partition "
@@ -203,6 +224,13 @@
     return;
   }
 
+  // WE don't consider sizes of each partition. Every partition
+  // has the same length on progress bar.
+  // TODO(zhangkelvin) Take sizes of each partition into account
+
+  UpdateProgress(
+      (static_cast<double>(offset_) / partition_size_ + partition_index_) /
+      install_plan_.partitions.size());
   if (verifier_step_ == VerifierStep::kVerifyTargetHash &&
       install_plan_.write_verity) {
     if (!verity_writer_->Update(offset_, buffer_.data(), bytes_read)) {
diff --git a/payload_consumer/filesystem_verifier_action.h b/payload_consumer/filesystem_verifier_action.h
index 83d6668..6a8823a 100644
--- a/payload_consumer/filesystem_verifier_action.h
+++ b/payload_consumer/filesystem_verifier_action.h
@@ -49,15 +49,34 @@
   kVerifySourceHash,
 };
 
+class FilesystemVerifyDelegate {
+ public:
+  virtual ~FilesystemVerifyDelegate() = default;
+  virtual void OnVerifyProgressUpdate(double progress) = 0;
+};
+
 class FilesystemVerifierAction : public InstallPlanAction {
  public:
-  FilesystemVerifierAction()
-      : verity_writer_(verity_writer::CreateVerityWriter()) {}
+  explicit FilesystemVerifierAction(
+      DynamicPartitionControlInterface* dynamic_control)
+      : verity_writer_(verity_writer::CreateVerityWriter()),
+        dynamic_control_(dynamic_control) {
+    CHECK(dynamic_control_);
+  }
+
   ~FilesystemVerifierAction() override = default;
 
   void PerformAction() override;
   void TerminateProcessing() override;
 
+  // Used for listening to progress updates
+  void set_delegate(FilesystemVerifyDelegate* delegate) {
+    this->delegate_ = delegate;
+  }
+  [[nodiscard]] FilesystemVerifyDelegate* get_delegate() const {
+    return this->delegate_;
+  }
+
   // Debugging/logging
   static std::string StaticType() { return "FilesystemVerifierAction"; }
   std::string Type() const override { return StaticType(); }
@@ -85,6 +104,9 @@
   // true if TerminateProcessing() was called.
   void Cleanup(ErrorCode code);
 
+  // Invoke delegate callback to report progress, if delegate is not null
+  void UpdateProgress(double progress);
+
   // The type of the partition that we are verifying.
   VerifierStep verifier_step_ = VerifierStep::kVerifyTargetHash;
 
@@ -100,15 +122,15 @@
 
   bool cancelled_{false};  // true if the action has been cancelled.
 
-  // The install plan we're passed in via the input pipe.
-  InstallPlan install_plan_;
-
   // Calculates the hash of the data.
   std::unique_ptr<HashCalculator> hasher_;
 
   // Write verity data of the current partition.
   std::unique_ptr<VerityWriterInterface> verity_writer_;
 
+  // Verifies the untouched dynamic partitions for partial updates.
+  DynamicPartitionControlInterface* dynamic_control_{nullptr};
+
   // Reads and hashes this many bytes from the head of the input stream. When
   // the partition starts to be hashed, this field is initialized from the
   // corresponding InstallPlan::Partition size which is the total size
@@ -119,6 +141,9 @@
   // The byte offset that we are reading in the current partition.
   uint64_t offset_{0};
 
+  // An observer that observes progress updates of this action.
+  FilesystemVerifyDelegate* delegate_{};
+
   DISALLOW_COPY_AND_ASSIGN(FilesystemVerifierAction);
 };
 
diff --git a/payload_consumer/filesystem_verifier_action_unittest.cc b/payload_consumer/filesystem_verifier_action_unittest.cc
index cb33404..2971849 100644
--- a/payload_consumer/filesystem_verifier_action_unittest.cc
+++ b/payload_consumer/filesystem_verifier_action_unittest.cc
@@ -27,6 +27,7 @@
 #include <brillo/secure_blob.h>
 #include <gtest/gtest.h>
 
+#include "update_engine/common/dynamic_partition_control_stub.h"
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
@@ -51,6 +52,7 @@
 
   brillo::FakeMessageLoop loop_{nullptr};
   ActionProcessor processor_;
+  DynamicPartitionControlStub dynamic_control_stub_;
 };
 
 class FilesystemVerifierActionTestDelegate : public ActionProcessorDelegate {
@@ -188,7 +190,8 @@
 void FilesystemVerifierActionTest::BuildActions(
     const InstallPlan& install_plan) {
   auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
-  auto verifier_action = std::make_unique<FilesystemVerifierAction>();
+  auto verifier_action =
+      std::make_unique<FilesystemVerifierAction>(&dynamic_control_stub_);
   auto collector_action =
       std::make_unique<ObjectCollectorAction<InstallPlan>>();
 
@@ -217,7 +220,8 @@
 };
 
 TEST_F(FilesystemVerifierActionTest, MissingInputObjectTest) {
-  auto copier_action = std::make_unique<FilesystemVerifierAction>();
+  auto copier_action =
+      std::make_unique<FilesystemVerifierAction>(&dynamic_control_stub_);
   auto collector_action =
       std::make_unique<ObjectCollectorAction<InstallPlan>>();
 
diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc
index 4638fbe..c7ef7b2 100644
--- a/payload_consumer/install_plan.cc
+++ b/payload_consumer/install_plan.cc
@@ -87,12 +87,19 @@
         base::StringPrintf(", system_version: %s", system_version.c_str());
   }
 
+  string url_str = download_url;
+  if (base::StartsWith(
+          url_str, "fd://", base::CompareCase::INSENSITIVE_ASCII)) {
+    int fd = std::stoi(url_str.substr(strlen("fd://")));
+    url_str = utils::GetFilePath(fd);
+  }
+
   LOG(INFO) << "InstallPlan: " << (is_resume ? "resume" : "new_update")
             << version_str
             << ", source_slot: " << BootControlInterface::SlotName(source_slot)
             << ", target_slot: " << BootControlInterface::SlotName(target_slot)
-            << ", initial url: " << download_url << payloads_str
-            << partitions_str << ", hash_checks_mandatory: "
+            << ", initial url: " << url_str << payloads_str << partitions_str
+            << ", hash_checks_mandatory: "
             << utils::ToString(hash_checks_mandatory)
             << ", powerwash_required: " << utils::ToString(powerwash_required)
             << ", switch_slot_on_reboot: "
@@ -105,7 +112,8 @@
 bool InstallPlan::LoadPartitionsFromSlots(BootControlInterface* boot_control) {
   bool result = true;
   for (Partition& partition : partitions) {
-    if (source_slot != BootControlInterface::kInvalidSlot) {
+    if (source_slot != BootControlInterface::kInvalidSlot &&
+        partition.source_size > 0) {
       result = boot_control->GetPartitionDevice(
                    partition.name, source_slot, &partition.source_path) &&
                result;
diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h
index 7a95ab4..f04c650 100644
--- a/payload_consumer/install_plan.h
+++ b/payload_consumer/install_plan.h
@@ -158,6 +158,10 @@
   // If not blank, a base-64 encoded representation of the PEM-encoded
   // public key in the response.
   std::string public_key_rsa;
+
+  // The name of dynamic partitions not included in the payload. Only used
+  // for partial updates.
+  std::vector<std::string> untouched_dynamic_partitions;
 };
 
 class InstallPlanAction;
@@ -195,9 +199,10 @@
   typedef ActionTraits<InstallPlanAction>::InputObjectType InputObjectType;
   typedef ActionTraits<InstallPlanAction>::OutputObjectType OutputObjectType;
 
- private:
+ protected:
   InstallPlan install_plan_;
 
+ private:
   DISALLOW_COPY_AND_ASSIGN(InstallPlanAction);
 };
 
diff --git a/payload_consumer/partition_update_generator_android.cc b/payload_consumer/partition_update_generator_android.cc
new file mode 100644
index 0000000..25771e1
--- /dev/null
+++ b/payload_consumer/partition_update_generator_android.cc
@@ -0,0 +1,178 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/partition_update_generator_android.h"
+
+#include <filesystem>
+#include <memory>
+#include <utility>
+
+#include <android-base/properties.h>
+#include <android-base/strings.h>
+#include <base/logging.h>
+#include <base/strings/string_split.h>
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+PartitionUpdateGeneratorAndroid::PartitionUpdateGeneratorAndroid(
+    BootControlInterface* boot_control, size_t block_size)
+    : boot_control_(boot_control), block_size_(block_size) {}
+
+bool PartitionUpdateGeneratorAndroid::
+    GenerateOperationsForPartitionsNotInPayload(
+        BootControlInterface::Slot source_slot,
+        BootControlInterface::Slot target_slot,
+        const std::set<std::string>& partitions_in_payload,
+        std::vector<PartitionUpdate>* update_list) {
+  auto ab_partitions = GetAbPartitionsOnDevice();
+  if (ab_partitions.empty()) {
+    LOG(ERROR) << "Failed to load static a/b partitions";
+    return false;
+  }
+
+  std::vector<PartitionUpdate> partition_updates;
+  for (const auto& partition_name : ab_partitions) {
+    if (partitions_in_payload.find(partition_name) !=
+        partitions_in_payload.end()) {
+      LOG(INFO) << partition_name << " has included in payload";
+      continue;
+    }
+    bool is_source_dynamic = false;
+    std::string source_device;
+
+    TEST_AND_RETURN_FALSE(
+        boot_control_->GetPartitionDevice(partition_name,
+                                          source_slot,
+                                          true, /* not_in_payload */
+                                          &source_device,
+                                          &is_source_dynamic));
+    bool is_target_dynamic = false;
+    std::string target_device;
+    TEST_AND_RETURN_FALSE(boot_control_->GetPartitionDevice(
+        partition_name, target_slot, true, &target_device, &is_target_dynamic));
+
+    if (is_source_dynamic || is_target_dynamic) {
+      if (is_source_dynamic != is_target_dynamic) {
+        LOG(ERROR) << "Partition " << partition_name << " is expected to be a"
+                   << " static partition. source slot is "
+                   << (is_source_dynamic ? "" : "not")
+                   << " dynamic, and target slot " << target_slot << " is "
+                   << (is_target_dynamic ? "" : "not") << " dynamic.";
+        return false;
+      } else {
+        continue;
+      }
+    }
+
+    auto source_size = utils::FileSize(source_device);
+    auto target_size = utils::FileSize(target_device);
+    if (source_size == -1 || target_size == -1 || source_size != target_size ||
+        source_size % block_size_ != 0) {
+      LOG(ERROR) << "Invalid partition size. source size " << source_size
+                 << ", target size " << target_size;
+      return false;
+    }
+
+    auto partition_update = CreatePartitionUpdate(
+        partition_name, source_device, target_device, source_size);
+    if (!partition_update.has_value()) {
+      LOG(ERROR) << "Failed to create partition update for " << partition_name;
+      return false;
+    }
+    partition_updates.push_back(partition_update.value());
+  }
+  *update_list = std::move(partition_updates);
+  return true;
+}
+
+std::vector<std::string>
+PartitionUpdateGeneratorAndroid::GetAbPartitionsOnDevice() const {
+  auto partition_list_str =
+      android::base::GetProperty("ro.product.ab_ota_partitions", "");
+  return base::SplitString(partition_list_str,
+                           ",",
+                           base::TRIM_WHITESPACE,
+                           base::SPLIT_WANT_NONEMPTY);
+}
+
+std::optional<PartitionUpdate>
+PartitionUpdateGeneratorAndroid::CreatePartitionUpdate(
+    const std::string& partition_name,
+    const std::string& source_device,
+    const std::string& target_device,
+    int64_t partition_size) {
+  PartitionUpdate partition_update;
+  partition_update.set_partition_name(partition_name);
+  auto old_partition_info = partition_update.mutable_old_partition_info();
+  old_partition_info->set_size(partition_size);
+
+  auto raw_hash = CalculateHashForPartition(source_device, partition_size);
+  if (!raw_hash.has_value()) {
+    LOG(ERROR) << "Failed to calculate hash for partition " << source_device
+               << " size: " << partition_size;
+    return {};
+  }
+  old_partition_info->set_hash(raw_hash->data(), raw_hash->size());
+  auto new_partition_info = partition_update.mutable_new_partition_info();
+  new_partition_info->set_size(partition_size);
+  new_partition_info->set_hash(raw_hash->data(), raw_hash->size());
+
+  auto copy_operation = partition_update.add_operations();
+  copy_operation->set_type(InstallOperation::SOURCE_COPY);
+  Extent copy_extent;
+  copy_extent.set_start_block(0);
+  copy_extent.set_num_blocks(partition_size / block_size_);
+
+  *copy_operation->add_src_extents() = copy_extent;
+  *copy_operation->add_dst_extents() = copy_extent;
+
+  return partition_update;
+}
+
+std::optional<brillo::Blob>
+PartitionUpdateGeneratorAndroid::CalculateHashForPartition(
+    const std::string& block_device, int64_t partition_size) {
+  // TODO(xunchang) compute the hash with ecc partitions first, the hashing
+  // behavior should match the one in SOURCE_COPY. Also, we don't have the
+  // correct hash for source partition.
+  // An alternative way is to verify the written bytes match the read bytes
+  // during filesystem verification. This could probably save us a read of
+  // partitions here.
+  brillo::Blob raw_hash;
+  if (HashCalculator::RawHashOfFile(block_device, partition_size, &raw_hash) !=
+      partition_size) {
+    LOG(ERROR) << "Failed to calculate hash for " << block_device;
+    return std::nullopt;
+  }
+
+  return raw_hash;
+}
+
+namespace partition_update_generator {
+std::unique_ptr<PartitionUpdateGeneratorInterface> Create(
+    BootControlInterface* boot_control, size_t block_size) {
+  CHECK(boot_control);
+
+  return std::unique_ptr<PartitionUpdateGeneratorInterface>(
+      new PartitionUpdateGeneratorAndroid(boot_control, block_size));
+}
+}  // namespace partition_update_generator
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/partition_update_generator_android.h b/payload_consumer/partition_update_generator_android.h
new file mode 100644
index 0000000..0330c99
--- /dev/null
+++ b/payload_consumer/partition_update_generator_android.h
@@ -0,0 +1,68 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_ANDROID_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_ANDROID_H_
+
+#include <optional>
+#include <set>
+#include <string>
+#include <vector>
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest_prod.h>  // for FRIEND_TEST
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/payload_consumer/partition_update_generator_interface.h"
+
+namespace chromeos_update_engine {
+
+class PartitionUpdateGeneratorAndroid
+    : public PartitionUpdateGeneratorInterface {
+ public:
+  PartitionUpdateGeneratorAndroid(BootControlInterface* boot_control,
+                                  size_t block_size);
+
+  bool GenerateOperationsForPartitionsNotInPayload(
+      BootControlInterface::Slot source_slot,
+      BootControlInterface::Slot target_slot,
+      const std::set<std::string>& partitions_in_payload,
+      std::vector<PartitionUpdate>* update_list) override;
+  virtual std::vector<std::string> GetAbPartitionsOnDevice() const;
+
+ private:
+  friend class PartitionUpdateGeneratorAndroidTest;
+  FRIEND_TEST(PartitionUpdateGeneratorAndroidTest, GetStaticPartitions);
+  FRIEND_TEST(PartitionUpdateGeneratorAndroidTest, CreatePartitionUpdate);
+
+  // Creates a PartitionUpdate object for a given partition to update from
+  // source to target. Returns std::nullopt on failure.
+  std::optional<PartitionUpdate> CreatePartitionUpdate(
+      const std::string& partition_name,
+      const std::string& source_device,
+      const std::string& target_device,
+      int64_t partition_size);
+
+  std::optional<brillo::Blob> CalculateHashForPartition(
+      const std::string& block_device, int64_t partition_size);
+
+  BootControlInterface* boot_control_;
+  size_t block_size_;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/partition_update_generator_android_unittest.cc b/payload_consumer/partition_update_generator_android_unittest.cc
new file mode 100644
index 0000000..86d025e
--- /dev/null
+++ b/payload_consumer/partition_update_generator_android_unittest.cc
@@ -0,0 +1,159 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/partition_update_generator_android.h"
+
+#include <map>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include <android-base/strings.h>
+#include <brillo/secure_blob.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/fake_boot_control.h"
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+class FakePartitionUpdateGenerator : public PartitionUpdateGeneratorAndroid {
+ public:
+  std::vector<std::string> GetAbPartitionsOnDevice() const {
+    return ab_partitions_;
+  }
+  using PartitionUpdateGeneratorAndroid::PartitionUpdateGeneratorAndroid;
+  std::vector<std::string> ab_partitions_;
+};
+
+class PartitionUpdateGeneratorAndroidTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    ASSERT_TRUE(device_dir_.CreateUniqueTempDir());
+    boot_control_ = std::make_unique<FakeBootControl>();
+    ASSERT_TRUE(boot_control_);
+    boot_control_->SetNumSlots(2);
+    generator_ = std::make_unique<FakePartitionUpdateGenerator>(
+        boot_control_.get(), 4096);
+    ASSERT_TRUE(generator_);
+  }
+
+  std::unique_ptr<FakePartitionUpdateGenerator> generator_;
+  std::unique_ptr<FakeBootControl> boot_control_;
+
+  base::ScopedTempDir device_dir_;
+  std::map<std::string, std::string> device_map_;
+
+  void SetUpBlockDevice(const std::map<std::string, std::string>& contents) {
+    std::set<std::string> partition_base_names;
+    for (const auto& [name, content] : contents) {
+      auto path = device_dir_.GetPath().value() + "/" + name;
+      ASSERT_TRUE(
+          utils::WriteFile(path.c_str(), content.data(), content.size()));
+
+      if (android::base::EndsWith(name, "_a")) {
+        auto prefix = name.substr(0, name.size() - 2);
+        boot_control_->SetPartitionDevice(prefix, 0, path);
+        partition_base_names.emplace(prefix);
+      } else if (android::base::EndsWith(name, "_b")) {
+        auto prefix = name.substr(0, name.size() - 2);
+        boot_control_->SetPartitionDevice(prefix, 1, path);
+        partition_base_names.emplace(prefix);
+      }
+      device_map_[name] = std::move(path);
+    }
+    generator_->ab_partitions_ = {partition_base_names.begin(),
+                                  partition_base_names.end()};
+  }
+
+  void CheckPartitionUpdate(const std::string& name,
+                            const std::string& content,
+                            const PartitionUpdate& partition_update) {
+    ASSERT_EQ(name, partition_update.partition_name());
+
+    brillo::Blob out_hash;
+    ASSERT_TRUE(HashCalculator::RawHashOfBytes(
+        content.data(), content.size(), &out_hash));
+    ASSERT_EQ(std::string(out_hash.begin(), out_hash.end()),
+              partition_update.old_partition_info().hash());
+    ASSERT_EQ(std::string(out_hash.begin(), out_hash.end()),
+              partition_update.new_partition_info().hash());
+
+    ASSERT_EQ(1, partition_update.operations_size());
+    const auto& operation = partition_update.operations(0);
+    ASSERT_EQ(InstallOperation::SOURCE_COPY, operation.type());
+
+    ASSERT_EQ(1, operation.src_extents_size());
+    ASSERT_EQ(0u, operation.src_extents(0).start_block());
+    ASSERT_EQ(content.size() / 4096, operation.src_extents(0).num_blocks());
+
+    ASSERT_EQ(1, operation.dst_extents_size());
+    ASSERT_EQ(0u, operation.dst_extents(0).start_block());
+    ASSERT_EQ(content.size() / 4096, operation.dst_extents(0).num_blocks());
+  }
+};
+
+TEST_F(PartitionUpdateGeneratorAndroidTest, CreatePartitionUpdate) {
+  auto system_contents = std::string(4096 * 2, '1');
+  auto boot_contents = std::string(4096 * 5, 'b');
+  std::map<std::string, std::string> contents = {
+      {"system_a", system_contents},
+      {"system_b", std::string(4096 * 2, 0)},
+      {"boot_a", boot_contents},
+      {"boot_b", std::string(4096 * 5, 0)},
+  };
+  SetUpBlockDevice(contents);
+
+  auto system_partition_update = generator_->CreatePartitionUpdate(
+      "system", device_map_["system_a"], device_map_["system_b"], 4096 * 2);
+  ASSERT_TRUE(system_partition_update.has_value());
+  CheckPartitionUpdate(
+      "system", system_contents, system_partition_update.value());
+
+  auto boot_partition_update = generator_->CreatePartitionUpdate(
+      "boot", device_map_["boot_a"], device_map_["boot_b"], 4096 * 5);
+  ASSERT_TRUE(boot_partition_update.has_value());
+  CheckPartitionUpdate("boot", boot_contents, boot_partition_update.value());
+}
+
+TEST_F(PartitionUpdateGeneratorAndroidTest, GenerateOperations) {
+  auto system_contents = std::string(4096 * 10, '2');
+  auto boot_contents = std::string(4096 * 5, 'b');
+  std::map<std::string, std::string> contents = {
+      {"system_a", system_contents},
+      {"system_b", std::string(4096 * 10, 0)},
+      {"boot_a", boot_contents},
+      {"boot_b", std::string(4096 * 5, 0)},
+      {"vendor_a", ""},
+      {"vendor_b", ""},
+      {"persist", ""},
+  };
+  SetUpBlockDevice(contents);
+
+  std::vector<PartitionUpdate> update_list;
+  ASSERT_TRUE(generator_->GenerateOperationsForPartitionsNotInPayload(
+      0, 1, std::set<std::string>{"vendor"}, &update_list));
+
+  ASSERT_EQ(2u, update_list.size());
+  CheckPartitionUpdate("boot", boot_contents, update_list[0]);
+  CheckPartitionUpdate("system", system_contents, update_list[1]);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/partition_update_generator_interface.h b/payload_consumer/partition_update_generator_interface.h
new file mode 100644
index 0000000..3fa3dfb
--- /dev/null
+++ b/payload_consumer/partition_update_generator_interface.h
@@ -0,0 +1,55 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_INTERFACE_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_INTERFACE_H_
+
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "update_engine/common/boot_control_interface.h"
+
+namespace chromeos_update_engine {
+class PartitionUpdate;
+
+// This class parses the partitions that are not included in the payload of a
+// partial A/B update. And it generates additional operations for these
+// partitions to make the update complete.
+class PartitionUpdateGeneratorInterface {
+ public:
+  virtual ~PartitionUpdateGeneratorInterface() = default;
+
+  // Adds PartitionUpdate for partitions not included in the payload. For static
+  // partitions, it generates SOURCE_COPY operations to copy the bytes from the
+  // source slot to target slot. For dynamic partitions, it only calculates the
+  // partition hash for the filesystem verification later.
+  virtual bool GenerateOperationsForPartitionsNotInPayload(
+      BootControlInterface::Slot source_slot,
+      BootControlInterface::Slot target_slot,
+      const std::set<std::string>& partitions_in_payload,
+      std::vector<PartitionUpdate>* update_list) = 0;
+};
+
+namespace partition_update_generator {
+std::unique_ptr<PartitionUpdateGeneratorInterface> Create(
+    BootControlInterface* boot_control, size_t block_size);
+}
+
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/partition_update_generator_stub.cc b/payload_consumer/partition_update_generator_stub.cc
new file mode 100644
index 0000000..cfbd5e1
--- /dev/null
+++ b/payload_consumer/partition_update_generator_stub.cc
@@ -0,0 +1,38 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/partition_update_generator_stub.h"
+
+#include <memory>
+
+namespace chromeos_update_engine {
+
+bool PartitionUpdateGeneratorStub::GenerateOperationsForPartitionsNotInPayload(
+    chromeos_update_engine::BootControlInterface::Slot source_slot,
+    chromeos_update_engine::BootControlInterface::Slot target_slot,
+    const std::set<std::string>& partitions_in_payload,
+    std::vector<PartitionUpdate>* update_list) {
+  return true;
+}
+
+namespace partition_update_generator {
+std::unique_ptr<PartitionUpdateGeneratorInterface> Create(
+    BootControlInterface* boot_control, size_t block_size) {
+  return std::make_unique<PartitionUpdateGeneratorStub>();
+}
+}  // namespace partition_update_generator
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/partition_update_generator_stub.h b/payload_consumer/partition_update_generator_stub.h
new file mode 100644
index 0000000..282875e
--- /dev/null
+++ b/payload_consumer/partition_update_generator_stub.h
@@ -0,0 +1,40 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_STUB_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_STUB_H_
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/payload_consumer/partition_update_generator_interface.h"
+
+namespace chromeos_update_engine {
+class PartitionUpdateGeneratorStub : public PartitionUpdateGeneratorInterface {
+ public:
+  PartitionUpdateGeneratorStub() = default;
+  bool GenerateOperationsForPartitionsNotInPayload(
+      BootControlInterface::Slot source_slot,
+      BootControlInterface::Slot target_slot,
+      const std::set<std::string>& partitions_in_payload,
+      std::vector<PartitionUpdate>* update_list) override;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc
index 299bcfc..663ab81 100644
--- a/payload_consumer/payload_constants.cc
+++ b/payload_consumer/payload_constants.cc
@@ -33,9 +33,11 @@
 const uint32_t kBrotliBsdiffMinorPayloadVersion = 4;
 const uint32_t kPuffdiffMinorPayloadVersion = 5;
 const uint32_t kVerityMinorPayloadVersion = 6;
+const uint32_t kPartialUpdateMinorPayloadVersion = 7;
 
 const uint32_t kMinSupportedMinorPayloadVersion = kSourceMinorPayloadVersion;
-const uint32_t kMaxSupportedMinorPayloadVersion = kVerityMinorPayloadVersion;
+const uint32_t kMaxSupportedMinorPayloadVersion =
+    kPartialUpdateMinorPayloadVersion;
 
 const uint64_t kMaxPayloadHeaderSize = 24;
 
@@ -44,7 +46,7 @@
 
 const char kDeltaMagic[4] = {'C', 'r', 'A', 'U'};
 
-const char* InstallOperationTypeName(InstallOperation_Type op_type) {
+const char* InstallOperationTypeName(InstallOperation::Type op_type) {
   switch (op_type) {
     case InstallOperation::REPLACE:
       return "REPLACE";
diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h
index 888fa2a..03647ee 100644
--- a/payload_consumer/payload_constants.h
+++ b/payload_consumer/payload_constants.h
@@ -56,6 +56,9 @@
 // The minor version that allows Verity hash tree and FEC generation.
 extern const uint32_t kVerityMinorPayloadVersion;
 
+// The minor version that allows partial update, e.g. kernel only update.
+extern const uint32_t kPartialUpdateMinorPayloadVersion;
+
 // The minimum and maximum supported minor version.
 extern const uint32_t kMinSupportedMinorPayloadVersion;
 extern const uint32_t kMaxSupportedMinorPayloadVersion;
@@ -77,7 +80,7 @@
 const uint64_t kSparseHole = std::numeric_limits<uint64_t>::max();
 
 // Return the name of the operation type.
-const char* InstallOperationTypeName(InstallOperation_Type op_type);
+const char* InstallOperationTypeName(InstallOperation::Type op_type);
 
 }  // namespace chromeos_update_engine
 
diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc
index b83001a..2cb73eb 100644
--- a/payload_consumer/payload_metadata.cc
+++ b/payload_consumer/payload_metadata.cc
@@ -18,8 +18,10 @@
 
 #include <endian.h>
 
+#include <base/strings/stringprintf.h>
 #include <brillo/data_encoding.h>
 
+#include "update_engine/common/constants.h"
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/payload_constants.h"
@@ -54,7 +56,18 @@
 
   // Validate the magic string.
   if (memcmp(payload.data(), kDeltaMagic, sizeof(kDeltaMagic)) != 0) {
-    LOG(ERROR) << "Bad payload format -- invalid delta magic.";
+    LOG(ERROR) << "Bad payload format -- invalid delta magic: "
+               << base::StringPrintf("%02x%02x%02x%02x",
+                                     payload[0],
+                                     payload[1],
+                                     payload[2],
+                                     payload[3])
+               << " Expected: "
+               << base::StringPrintf("%02x%02x%02x%02x",
+                                     kDeltaMagic[0],
+                                     kDeltaMagic[1],
+                                     kDeltaMagic[2],
+                                     kDeltaMagic[3]);
     *error = ErrorCode::kDownloadInvalidMetadataMagicString;
     return MetadataParseResult::kError;
   }
@@ -131,12 +144,16 @@
 
 ErrorCode PayloadMetadata::ValidateMetadataSignature(
     const brillo::Blob& payload,
-    const std::string& metadata_signature,
-    const std::string& pem_public_key) const {
+    const string& metadata_signature,
+    const PayloadVerifier& payload_verifier) const {
   if (payload.size() < metadata_size_ + metadata_signature_size_)
     return ErrorCode::kDownloadMetadataSignatureError;
 
-  brillo::Blob metadata_signature_blob, metadata_signature_protobuf_blob;
+  // A single signature in raw bytes.
+  brillo::Blob metadata_signature_blob;
+  // The serialized Signatures protobuf message stored in major version >=2
+  // payload, it may contain multiple signatures.
+  string metadata_signature_protobuf;
   if (!metadata_signature.empty()) {
     // Convert base64-encoded signature to raw bytes.
     if (!brillo::data_encoding::Base64Decode(metadata_signature,
@@ -146,49 +163,43 @@
       return ErrorCode::kDownloadMetadataSignatureError;
     }
   } else {
-    metadata_signature_protobuf_blob.assign(
+    metadata_signature_protobuf.assign(
         payload.begin() + metadata_size_,
         payload.begin() + metadata_size_ + metadata_signature_size_);
   }
 
-  if (metadata_signature_blob.empty() &&
-      metadata_signature_protobuf_blob.empty()) {
+  if (metadata_signature_blob.empty() && metadata_signature_protobuf.empty()) {
     LOG(ERROR) << "Missing mandatory metadata signature in both Omaha "
                << "response and payload.";
     return ErrorCode::kDownloadMetadataSignatureMissingError;
   }
 
-  brillo::Blob calculated_metadata_hash;
+  brillo::Blob metadata_hash;
   if (!HashCalculator::RawHashOfBytes(
-          payload.data(), metadata_size_, &calculated_metadata_hash)) {
+          payload.data(), metadata_size_, &metadata_hash)) {
     LOG(ERROR) << "Unable to compute actual hash of manifest";
     return ErrorCode::kDownloadMetadataSignatureVerificationError;
   }
 
-  PayloadVerifier::PadRSA2048SHA256Hash(&calculated_metadata_hash);
-  if (calculated_metadata_hash.empty()) {
-    LOG(ERROR) << "Computed actual hash of metadata is empty.";
+  if (metadata_hash.size() != kSHA256Size) {
+    LOG(ERROR) << "Computed actual hash of metadata has incorrect size: "
+               << metadata_hash.size();
     return ErrorCode::kDownloadMetadataSignatureVerificationError;
   }
 
   if (!metadata_signature_blob.empty()) {
-    brillo::Blob expected_metadata_hash;
-    if (!PayloadVerifier::GetRawHashFromSignature(
-            metadata_signature_blob, pem_public_key, &expected_metadata_hash)) {
-      LOG(ERROR) << "Unable to compute expected hash from metadata signature";
-      return ErrorCode::kDownloadMetadataSignatureError;
-    }
-    if (calculated_metadata_hash != expected_metadata_hash) {
-      LOG(ERROR) << "Manifest hash verification failed. Expected hash = ";
-      utils::HexDumpVector(expected_metadata_hash);
-      LOG(ERROR) << "Calculated hash = ";
-      utils::HexDumpVector(calculated_metadata_hash);
+    brillo::Blob decrypted_signature;
+    if (!payload_verifier.VerifyRawSignature(
+            metadata_signature_blob, metadata_hash, &decrypted_signature)) {
+      LOG(ERROR) << "Manifest hash verification failed. Decrypted hash = ";
+      utils::HexDumpVector(decrypted_signature);
+      LOG(ERROR) << "Calculated hash before padding = ";
+      utils::HexDumpVector(metadata_hash);
       return ErrorCode::kDownloadMetadataSignatureMismatch;
     }
   } else {
-    if (!PayloadVerifier::VerifySignature(metadata_signature_protobuf_blob,
-                                          pem_public_key,
-                                          calculated_metadata_hash)) {
+    if (!payload_verifier.VerifySignature(metadata_signature_protobuf,
+                                          metadata_hash)) {
       LOG(ERROR) << "Manifest hash verification failed.";
       return ErrorCode::kDownloadMetadataSignatureMismatch;
     }
diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h
index be43c41..8b36f53 100644
--- a/payload_consumer/payload_metadata.h
+++ b/payload_consumer/payload_metadata.h
@@ -27,6 +27,7 @@
 
 #include "update_engine/common/error_code.h"
 #include "update_engine/common/platform_constants.h"
+#include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/update_metadata.pb.h"
 
 namespace chromeos_update_engine {
@@ -62,12 +63,13 @@
   // |metadata_signature| (if present) or the metadata signature in payload
   // itself (if present). Returns ErrorCode::kSuccess on match or a suitable
   // error code otherwise. This method must be called before any part of the
-  // metadata is parsed so that a man-in-the-middle attack on the SSL connection
+  // metadata is parsed so that an on-path attack on the SSL connection
   // to the payload server doesn't exploit any vulnerability in the code that
   // parses the protocol buffer.
-  ErrorCode ValidateMetadataSignature(const brillo::Blob& payload,
-                                      const std::string& metadata_signature,
-                                      const std::string& pem_public_key) const;
+  ErrorCode ValidateMetadataSignature(
+      const brillo::Blob& payload,
+      const std::string& metadata_signature,
+      const PayloadVerifier& payload_verifier) const;
 
   // Returns the major payload version. If the version was not yet parsed,
   // returns zero.
diff --git a/payload_consumer/payload_verifier.cc b/payload_consumer/payload_verifier.cc
index 2f7c133..7fd2b8e 100644
--- a/payload_consumer/payload_verifier.cc
+++ b/payload_consumer/payload_verifier.cc
@@ -16,13 +16,16 @@
 
 #include "update_engine/payload_consumer/payload_verifier.h"
 
+#include <utility>
 #include <vector>
 
 #include <base/logging.h>
 #include <openssl/pem.h>
 
+#include "update_engine/common/constants.h"
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/certificate_parser_interface.h"
 #include "update_engine/update_metadata.pb.h"
 
 using std::string;
@@ -31,61 +34,73 @@
 
 namespace {
 
-// The following is a standard PKCS1-v1_5 padding for SHA256 signatures, as
-// defined in RFC3447. It is prepended to the actual signature (32 bytes) to
-// form a sequence of 256 bytes (2048 bits) that is amenable to RSA signing. The
-// padded hash will look as follows:
+// The ASN.1 DigestInfo prefix for encoding SHA256 digest. The complete 51-byte
+// DigestInfo consists of 19-byte SHA256_DIGEST_INFO_PREFIX and 32-byte SHA256
+// digest.
 //
-//    0x00 0x01 0xff ... 0xff 0x00  ASN1HEADER  SHA256HASH
-//   |--------------205-----------||----19----||----32----|
-//
-// where ASN1HEADER is the ASN.1 description of the signed data. The complete 51
-// bytes of actual data (i.e. the ASN.1 header complete with the hash) are
-// packed as follows:
-//
-//  SEQUENCE(2+49) {
+// SEQUENCE(2+49) {
 //   SEQUENCE(2+13) {
-//    OBJECT(2+9) id-sha256
-//    NULL(2+0)
+//     OBJECT(2+9) id-sha256
+//     NULL(2+0)
 //   }
 //   OCTET STRING(2+32) <actual signature bytes...>
-//  }
-// clang-format off
-const uint8_t kRSA2048SHA256Padding[] = {
-    // PKCS1-v1_5 padding
-    0x00, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0x00,
-    // ASN.1 header
-    0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03,
-    0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20,
+// }
+const uint8_t kSHA256DigestInfoPrefix[] = {
+    0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01,
+    0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20,
 };
-// clang-format on
 
 }  // namespace
 
-bool PayloadVerifier::VerifySignature(const brillo::Blob& signature_blob,
-                                      const string& pem_public_key,
-                                      const brillo::Blob& hash_data) {
+std::unique_ptr<PayloadVerifier> PayloadVerifier::CreateInstance(
+    const std::string& pem_public_key) {
+  std::unique_ptr<BIO, decltype(&BIO_free)> bp(
+      BIO_new_mem_buf(pem_public_key.data(), pem_public_key.size()), BIO_free);
+  if (!bp) {
+    LOG(ERROR) << "Failed to read " << pem_public_key << " into buffer.";
+    return nullptr;
+  }
+
+  auto pub_key = std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>(
+      PEM_read_bio_PUBKEY(bp.get(), nullptr, nullptr, nullptr), EVP_PKEY_free);
+  if (!pub_key) {
+    LOG(ERROR) << "Failed to parse the public key in: " << pem_public_key;
+    return nullptr;
+  }
+
+  std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>> keys;
+  keys.emplace_back(std::move(pub_key));
+  return std::unique_ptr<PayloadVerifier>(new PayloadVerifier(std::move(keys)));
+}
+
+std::unique_ptr<PayloadVerifier> PayloadVerifier::CreateInstanceFromZipPath(
+    const std::string& certificate_zip_path) {
+  auto parser = CreateCertificateParser();
+  if (!parser) {
+    LOG(ERROR) << "Failed to create certificate parser from "
+               << certificate_zip_path;
+    return nullptr;
+  }
+
+  std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>> public_keys;
+  if (!parser->ReadPublicKeysFromCertificates(certificate_zip_path,
+                                              &public_keys) ||
+      public_keys.empty()) {
+    LOG(ERROR) << "Failed to parse public keys in: " << certificate_zip_path;
+    return nullptr;
+  }
+
+  return std::unique_ptr<PayloadVerifier>(
+      new PayloadVerifier(std::move(public_keys)));
+}
+
+bool PayloadVerifier::VerifySignature(
+    const string& signature_proto, const brillo::Blob& sha256_hash_data) const {
+  TEST_AND_RETURN_FALSE(!public_keys_.empty());
+
   Signatures signatures;
-  LOG(INFO) << "signature blob size = " << signature_blob.size();
-  TEST_AND_RETURN_FALSE(
-      signatures.ParseFromArray(signature_blob.data(), signature_blob.size()));
+  LOG(INFO) << "signature blob size = " << signature_proto.size();
+  TEST_AND_RETURN_FALSE(signatures.ParseFromString(signature_proto));
 
   if (!signatures.signatures_size()) {
     LOG(ERROR) << "No signatures stored in the blob.";
@@ -95,41 +110,109 @@
   std::vector<brillo::Blob> tested_hashes;
   // Tries every signature in the signature blob.
   for (int i = 0; i < signatures.signatures_size(); i++) {
-    const Signatures_Signature& signature = signatures.signatures(i);
-    brillo::Blob sig_data(signature.data().begin(), signature.data().end());
-    brillo::Blob sig_hash_data;
-    if (!GetRawHashFromSignature(sig_data, pem_public_key, &sig_hash_data))
-      continue;
+    const Signatures::Signature& signature = signatures.signatures(i);
+    brillo::Blob sig_data;
+    if (signature.has_unpadded_signature_size()) {
+      TEST_AND_RETURN_FALSE(signature.unpadded_signature_size() <=
+                            signature.data().size());
+      LOG(INFO) << "Truncating the signature to its unpadded size: "
+                << signature.unpadded_signature_size() << ".";
+      sig_data.assign(
+          signature.data().begin(),
+          signature.data().begin() + signature.unpadded_signature_size());
+    } else {
+      sig_data.assign(signature.data().begin(), signature.data().end());
+    }
 
-    if (hash_data == sig_hash_data) {
+    brillo::Blob sig_hash_data;
+    if (VerifyRawSignature(sig_data, sha256_hash_data, &sig_hash_data)) {
       LOG(INFO) << "Verified correct signature " << i + 1 << " out of "
                 << signatures.signatures_size() << " signatures.";
       return true;
     }
-    tested_hashes.push_back(sig_hash_data);
+    if (!sig_hash_data.empty()) {
+      tested_hashes.push_back(sig_hash_data);
+    }
   }
   LOG(ERROR) << "None of the " << signatures.signatures_size()
-             << " signatures is correct. Expected:";
-  utils::HexDumpVector(hash_data);
-  LOG(ERROR) << "But found decrypted hashes:";
+             << " signatures is correct. Expected hash before padding:";
+  utils::HexDumpVector(sha256_hash_data);
+  LOG(ERROR) << "But found RSA decrypted hashes:";
   for (const auto& sig_hash_data : tested_hashes) {
     utils::HexDumpVector(sig_hash_data);
   }
   return false;
 }
 
-bool PayloadVerifier::GetRawHashFromSignature(const brillo::Blob& sig_data,
-                                              const string& pem_public_key,
-                                              brillo::Blob* out_hash_data) {
+bool PayloadVerifier::VerifyRawSignature(
+    const brillo::Blob& sig_data,
+    const brillo::Blob& sha256_hash_data,
+    brillo::Blob* decrypted_sig_data) const {
+  TEST_AND_RETURN_FALSE(!public_keys_.empty());
+
+  for (const auto& public_key : public_keys_) {
+    int key_type = EVP_PKEY_id(public_key.get());
+    if (key_type == EVP_PKEY_RSA) {
+      brillo::Blob sig_hash_data;
+      if (!GetRawHashFromSignature(
+              sig_data, public_key.get(), &sig_hash_data)) {
+        LOG(WARNING)
+            << "Failed to get the raw hash with RSA key. Trying other keys.";
+        continue;
+      }
+
+      if (decrypted_sig_data != nullptr) {
+        *decrypted_sig_data = sig_hash_data;
+      }
+
+      brillo::Blob padded_hash_data = sha256_hash_data;
+      TEST_AND_RETURN_FALSE(
+          PadRSASHA256Hash(&padded_hash_data, sig_hash_data.size()));
+
+      if (padded_hash_data == sig_hash_data) {
+        return true;
+      }
+    }
+
+    if (key_type == EVP_PKEY_EC) {
+      // TODO(b/158580694): Switch back to get0 version and remove manual
+      // freeing of the object once the bug is resolved or gale has been moved
+      // to informational.
+      EC_KEY* ec_key = EVP_PKEY_get1_EC_KEY(public_key.get());
+      TEST_AND_RETURN_FALSE(ec_key != nullptr);
+      if (ECDSA_verify(0,
+                       sha256_hash_data.data(),
+                       sha256_hash_data.size(),
+                       sig_data.data(),
+                       sig_data.size(),
+                       ec_key) == 1) {
+        EC_KEY_free(ec_key);
+        return true;
+      }
+      EC_KEY_free(ec_key);
+    }
+
+    LOG(ERROR) << "Unsupported key type " << key_type;
+    return false;
+  }
+  LOG(INFO) << "Failed to verify the signature with " << public_keys_.size()
+            << " keys.";
+  return false;
+}
+
+bool PayloadVerifier::GetRawHashFromSignature(
+    const brillo::Blob& sig_data,
+    const EVP_PKEY* public_key,
+    brillo::Blob* out_hash_data) const {
+  // TODO(b/158580694): Switch back to get0 version and remove manual freeing of
+  // the object once the bug is resolved or gale has been moved to
+  // informational.
+  //
   // The code below executes the equivalent of:
   //
-  // openssl rsautl -verify -pubin -inkey <(echo |pem_public_key|)
+  // openssl rsautl -verify -pubin -inkey <(echo pem_public_key)
   //   -in |sig_data| -out |out_hash_data|
-
-  BIO* bp = BIO_new_mem_buf(pem_public_key.data(), pem_public_key.size());
-  char dummy_password[] = {' ', 0};  // Ensure no password is read from stdin.
-  RSA* rsa = PEM_read_bio_RSA_PUBKEY(bp, nullptr, nullptr, dummy_password);
-  BIO_free(bp);
+  RSA* rsa = EVP_PKEY_get1_RSA(const_cast<EVP_PKEY*>(public_key));
 
   TEST_AND_RETURN_FALSE(rsa != nullptr);
   unsigned int keysize = RSA_size(rsa);
@@ -151,13 +234,30 @@
   return true;
 }
 
-bool PayloadVerifier::PadRSA2048SHA256Hash(brillo::Blob* hash) {
-  TEST_AND_RETURN_FALSE(hash->size() == 32);
-  hash->insert(hash->begin(),
-               reinterpret_cast<const char*>(kRSA2048SHA256Padding),
-               reinterpret_cast<const char*>(kRSA2048SHA256Padding +
-                                             sizeof(kRSA2048SHA256Padding)));
-  TEST_AND_RETURN_FALSE(hash->size() == 256);
+bool PayloadVerifier::PadRSASHA256Hash(brillo::Blob* hash, size_t rsa_size) {
+  TEST_AND_RETURN_FALSE(hash->size() == kSHA256Size);
+  TEST_AND_RETURN_FALSE(rsa_size == 256 || rsa_size == 512);
+
+  // The following is a standard PKCS1-v1_5 padding for SHA256 signatures, as
+  // defined in RFC3447 section 9.2. It is prepended to the actual signature
+  // (32 bytes) to form a sequence of 256|512 bytes (2048|4096 bits) that is
+  // amenable to RSA signing. The padded hash will look as follows:
+  //
+  //    0x00 0x01 0xff ... 0xff 0x00  ASN1HEADER  SHA256HASH
+  //   |-----------205|461----------||----19----||----32----|
+  size_t padding_string_size =
+      rsa_size - hash->size() - sizeof(kSHA256DigestInfoPrefix) - 3;
+  brillo::Blob padded_result = brillo::CombineBlobs({
+      {0x00, 0x01},
+      brillo::Blob(padding_string_size, 0xff),
+      {0x00},
+      brillo::Blob(kSHA256DigestInfoPrefix,
+                   kSHA256DigestInfoPrefix + sizeof(kSHA256DigestInfoPrefix)),
+      *hash,
+  });
+
+  *hash = std::move(padded_result);
+  TEST_AND_RETURN_FALSE(hash->size() == rsa_size);
   return true;
 }
 
diff --git a/payload_consumer/payload_verifier.h b/payload_consumer/payload_verifier.h
index ec23ef2..bc5231f 100644
--- a/payload_consumer/payload_verifier.h
+++ b/payload_consumer/payload_verifier.h
@@ -17,47 +17,72 @@
 #ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PAYLOAD_VERIFIER_H_
 #define UPDATE_ENGINE_PAYLOAD_CONSUMER_PAYLOAD_VERIFIER_H_
 
+#include <memory>
 #include <string>
+#include <utility>
+#include <vector>
 
-#include <base/macros.h>
 #include <brillo/secure_blob.h>
+#include <openssl/evp.h>
 
 #include "update_engine/update_metadata.pb.h"
 
-// This class encapsulates methods used for payload signature verification.
-// See payload_generator/payload_signer.h for payload signing.
+// This class holds the public keys and implements methods used for payload
+// signature verification. See payload_generator/payload_signer.h for payload
+// signing.
 
 namespace chromeos_update_engine {
 
 class PayloadVerifier {
  public:
-  // Interprets |signature_blob| as a protocol buffer containing the Signatures
-  // message and decrypts each signature data using the |pem_public_key|.
-  // |pem_public_key| should be a PEM format RSA public key data.
-  // Returns whether *any* of the decrypted hashes matches the |hash_data|.
-  // In case of any error parsing the signatures or the public key, returns
-  // false.
-  static bool VerifySignature(const brillo::Blob& signature_blob,
-                              const std::string& pem_public_key,
-                              const brillo::Blob& hash_data);
+  // Pads a SHA256 hash so that it may be encrypted/signed with RSA2048 or
+  // RSA4096 using the PKCS#1 v1.5 scheme.
+  // hash should be a pointer to vector of exactly 256 bits. |rsa_size| must be
+  // one of 256 or 512 bytes. The vector will be modified in place and will
+  // result in having a length of 2048 or 4096 bits, depending on the rsa size.
+  // Returns true on success, false otherwise.
+  static bool PadRSASHA256Hash(brillo::Blob* hash, size_t rsa_size);
 
-  // Decrypts |sig_data| with the given |pem_public_key| and populates
-  // |out_hash_data| with the decoded raw hash. |pem_public_key| should be a PEM
-  // format RSA public key data. Returns true if successful, false otherwise.
-  static bool GetRawHashFromSignature(const brillo::Blob& sig_data,
-                                      const std::string& pem_public_key,
-                                      brillo::Blob* out_hash_data);
+  // Parses the input as a PEM encoded public string. And creates a
+  // PayloadVerifier with that public key for signature verification.
+  static std::unique_ptr<PayloadVerifier> CreateInstance(
+      const std::string& pem_public_key);
 
-  // Pads a SHA256 hash so that it may be encrypted/signed with RSA2048
-  // using the PKCS#1 v1.5 scheme.
-  // hash should be a pointer to vector of exactly 256 bits. The vector
-  // will be modified in place and will result in having a length of
-  // 2048 bits. Returns true on success, false otherwise.
-  static bool PadRSA2048SHA256Hash(brillo::Blob* hash);
+  // Extracts the public keys from the certificates contained in the input
+  // zip file. And creates a PayloadVerifier with these public keys.
+  static std::unique_ptr<PayloadVerifier> CreateInstanceFromZipPath(
+      const std::string& certificate_zip_path);
+
+  // Interprets |signature_proto| as a protocol buffer containing the
+  // |Signatures| message and decrypts each signature data using the stored
+  // public key. Pads the 32 bytes |sha256_hash_data| to 256 or 512 bytes
+  // according to the PKCS#1 v1.5 standard; and returns whether *any* of the
+  // decrypted hashes matches the padded hash data. In case of any error parsing
+  // the signatures, returns false.
+  bool VerifySignature(const std::string& signature_proto,
+                       const brillo::Blob& sha256_hash_data) const;
+
+  // Verifies if |sig_data| is a raw signature of the hash |sha256_hash_data|.
+  // If PayloadVerifier is using RSA as the public key, further puts the
+  // decrypted data of |sig_data| into |decrypted_sig_data|.
+  bool VerifyRawSignature(const brillo::Blob& sig_data,
+                          const brillo::Blob& sha256_hash_data,
+                          brillo::Blob* decrypted_sig_data) const;
 
  private:
-  // This should never be constructed
-  DISALLOW_IMPLICIT_CONSTRUCTORS(PayloadVerifier);
+  explicit PayloadVerifier(
+      std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>&&
+          public_keys)
+      : public_keys_(std::move(public_keys)) {}
+
+  // Decrypts |sig_data| with the given |public_key| and populates
+  // |out_hash_data| with the decoded raw hash. Returns true if successful,
+  // false otherwise.
+  bool GetRawHashFromSignature(const brillo::Blob& sig_data,
+                               const EVP_PKEY* public_key,
+                               brillo::Blob* out_hash_data) const;
+
+  std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>> public_keys_;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc
index a0b67ea..e8fa81b 100644
--- a/payload_consumer/postinstall_runner_action.cc
+++ b/payload_consumer/postinstall_runner_action.cc
@@ -50,6 +50,7 @@
 
 namespace chromeos_update_engine {
 
+using brillo::MessageLoop;
 using std::string;
 using std::vector;
 
@@ -75,10 +76,17 @@
   partition_weight_.resize(install_plan_.partitions.size());
   total_weight_ = 0;
   for (size_t i = 0; i < install_plan_.partitions.size(); ++i) {
+    auto& partition = install_plan_.partitions[i];
+    if (!install_plan_.run_post_install && partition.postinstall_optional) {
+      partition.run_postinstall = false;
+      LOG(INFO) << "Skipping optional post-install for partition "
+                << partition.name << " according to install plan.";
+    }
+
     // TODO(deymo): This code sets the weight to all the postinstall commands,
     // but we could remember how long they took in the past and use those
     // values.
-    partition_weight_[i] = install_plan_.partitions[i].run_postinstall;
+    partition_weight_[i] = partition.run_postinstall;
     total_weight_ += partition_weight_[i];
   }
   accumulated_weight_ = 0;
@@ -88,11 +96,6 @@
 }
 
 void PostinstallRunnerAction::PerformPartitionPostinstall() {
-  if (!install_plan_.run_post_install) {
-    LOG(INFO) << "Skipping post-install according to install plan.";
-    return CompletePostinstall(ErrorCode::kSuccess);
-  }
-
   if (install_plan_.download_url.empty()) {
     LOG(INFO) << "Skipping post-install during rollback";
     return CompletePostinstall(ErrorCode::kSuccess);
@@ -290,6 +293,7 @@
 
   progress_fd_ = -1;
   progress_controller_.reset();
+
   progress_buffer_.clear();
 }
 
@@ -336,8 +340,13 @@
   // steps succeeded.
   if (error_code == ErrorCode::kSuccess) {
     if (install_plan_.switch_slot_on_reboot) {
-      if (!boot_control_->SetActiveBootSlot(install_plan_.target_slot)) {
+      if (!boot_control_->GetDynamicPartitionControl()->FinishUpdate(
+              install_plan_.powerwash_required) ||
+          !boot_control_->SetActiveBootSlot(install_plan_.target_slot)) {
         error_code = ErrorCode::kPostinstallRunnerError;
+      } else {
+        // Schedules warm reset on next reboot, ignores the error.
+        hardware_->SetWarmReset(true);
       }
     } else {
       error_code = ErrorCode::kUpdatedButNotActive;
diff --git a/payload_consumer/postinstall_runner_action.h b/payload_consumer/postinstall_runner_action.h
index 838b235..e404107 100644
--- a/payload_consumer/postinstall_runner_action.h
+++ b/payload_consumer/postinstall_runner_action.h
@@ -97,8 +97,6 @@
   // ready. Called when the post-install script was run for all the partitions.
   void CompletePostinstall(ErrorCode error_code);
 
-  InstallPlan install_plan_;
-
   // The path where the filesystem will be mounted during post-install.
   std::string fs_mount_dir_;
 
@@ -141,6 +139,7 @@
   // The parent progress file descriptor used to watch for progress reports from
   // the postinstall program and the task watching for them.
   int progress_fd_{-1};
+
   std::unique_ptr<base::FileDescriptorWatcher::Controller> progress_controller_;
 
   // A buffer of a partial read line from the progress file descriptor.
diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc
index 84f2c2c..cf5158b 100644
--- a/payload_consumer/postinstall_runner_action_unittest.cc
+++ b/payload_consumer/postinstall_runner_action_unittest.cc
@@ -103,6 +103,8 @@
                             bool is_rollback,
                             bool save_rollback_data);
 
+  void RunPostinstallActionWithInstallPlan(const InstallPlan& install_plan);
+
  public:
   void ResumeRunningAction() {
     ASSERT_NE(nullptr, postinstall_action_);
@@ -180,9 +182,6 @@
     bool powerwash_required,
     bool is_rollback,
     bool save_rollback_data) {
-  ActionProcessor processor;
-  processor_ = &processor;
-  auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
   InstallPlan::Partition part;
   part.name = "part";
   part.target_path = device_path;
@@ -194,6 +193,14 @@
   install_plan.powerwash_required = powerwash_required;
   install_plan.is_rollback = is_rollback;
   install_plan.rollback_data_save_requested = save_rollback_data;
+  RunPostinstallActionWithInstallPlan(install_plan);
+}
+
+void PostinstallRunnerActionTest::RunPostinstallActionWithInstallPlan(
+    const chromeos_update_engine::InstallPlan& install_plan) {
+  ActionProcessor processor;
+  processor_ = &processor;
+  auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
   feeder_action->set_obj(install_plan);
   auto runner_action = std::make_unique<PostinstallRunnerAction>(
       &fake_boot_control_, &fake_hardware_);
@@ -220,7 +227,7 @@
   EXPECT_TRUE(processor_delegate_.processing_stopped_called_ ||
               processor_delegate_.processing_done_called_);
   if (processor_delegate_.processing_done_called_) {
-    // Sanity check that the code was set when the processor finishes.
+    // Validation check that the code was set when the processor finishes.
     EXPECT_TRUE(processor_delegate_.code_set_);
   }
 }
@@ -335,6 +342,27 @@
   EXPECT_FALSE(fake_hardware_.GetIsRollbackPowerwashScheduled());
 }
 
+TEST_F(PostinstallRunnerActionTest, RunAsRootSkipOptionalPostinstallTest) {
+  InstallPlan::Partition part;
+  part.name = "part";
+  part.target_path = "/dev/null";
+  part.run_postinstall = true;
+  part.postinstall_path = kPostinstallDefaultScript;
+  part.postinstall_optional = true;
+  InstallPlan install_plan;
+  install_plan.partitions = {part};
+  install_plan.download_url = "http://127.0.0.1:8080/update";
+
+  // Optional postinstalls will be skipped, and the postinstall action succeeds.
+  RunPostinstallActionWithInstallPlan(install_plan);
+  EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
+
+  part.postinstall_optional = false;
+  install_plan.partitions = {part};
+  RunPostinstallActionWithInstallPlan(install_plan);
+  EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
+}
+
 // Check that the failures from the postinstall script cause the action to
 // fail.
 TEST_F(PostinstallRunnerActionTest, RunAsRootErrScriptTest) {
diff --git a/payload_consumer/verity_writer_android.cc b/payload_consumer/verity_writer_android.cc
index 06d1489..d5437b6 100644
--- a/payload_consumer/verity_writer_android.cc
+++ b/payload_consumer/verity_writer_android.cc
@@ -41,6 +41,9 @@
 bool VerityWriterAndroid::Init(const InstallPlan::Partition& partition) {
   partition_ = &partition;
 
+  if (partition_->hash_tree_size != 0 || partition_->fec_size != 0) {
+    utils::SetBlockDeviceReadOnly(partition_->target_path, false);
+  }
   if (partition_->hash_tree_size != 0) {
     auto hash_function =
         HashTreeBuilder::HashFunction(partition_->hash_tree_algorithm);
diff --git a/payload_generator/ab_generator.cc b/payload_generator/ab_generator.cc
index f4cc9fb..d9b9d88 100644
--- a/payload_generator/ab_generator.cc
+++ b/payload_generator/ab_generator.cc
@@ -276,7 +276,7 @@
       target_part_path, dst_extents, &data, data.size(), kBlockSize));
 
   brillo::Blob blob;
-  InstallOperation_Type op_type;
+  InstallOperation::Type op_type;
   TEST_AND_RETURN_FALSE(
       diff_utils::GenerateBestFullOperation(data, version, &blob, &op_type));
 
diff --git a/payload_generator/ab_generator_unittest.cc b/payload_generator/ab_generator_unittest.cc
index 170e0e3..7a95284 100644
--- a/payload_generator/ab_generator_unittest.cc
+++ b/payload_generator/ab_generator_unittest.cc
@@ -49,7 +49,7 @@
 }
 
 // Tests splitting of a REPLACE/REPLACE_XZ operation.
-void TestSplitReplaceOrReplaceXzOperation(InstallOperation_Type orig_type,
+void TestSplitReplaceOrReplaceXzOperation(InstallOperation::Type orig_type,
                                           bool compressible) {
   const size_t op_ex1_start_block = 2;
   const size_t op_ex1_num_blocks = 2;
@@ -124,7 +124,7 @@
       version, aop, part_file.path(), &result_ops, &blob_file));
 
   // Check the result.
-  InstallOperation_Type expected_type =
+  InstallOperation::Type expected_type =
       compressible ? InstallOperation::REPLACE_XZ : InstallOperation::REPLACE;
 
   ASSERT_EQ(2U, result_ops.size());
@@ -200,7 +200,7 @@
 }
 
 // Tests merging of REPLACE/REPLACE_XZ operations.
-void TestMergeReplaceOrReplaceXzOperations(InstallOperation_Type orig_type,
+void TestMergeReplaceOrReplaceXzOperations(InstallOperation::Type orig_type,
                                            bool compressible) {
   const size_t first_op_num_blocks = 1;
   const size_t second_op_num_blocks = 2;
@@ -287,7 +287,7 @@
       &aops, version, 5, part_file.path(), &blob_file));
 
   // Check the result.
-  InstallOperation_Type expected_op_type =
+  InstallOperation::Type expected_op_type =
       compressible ? InstallOperation::REPLACE_XZ : InstallOperation::REPLACE;
   EXPECT_EQ(1U, aops.size());
   InstallOperation new_op = aops[0].op;
diff --git a/payload_generator/blob_file_writer.cc b/payload_generator/blob_file_writer.cc
index 7cdeb35..a1afe87 100644
--- a/payload_generator/blob_file_writer.cc
+++ b/payload_generator/blob_file_writer.cc
@@ -38,9 +38,9 @@
   return result;
 }
 
-void BlobFileWriter::SetTotalBlobs(size_t total_blobs) {
-  total_blobs_ = total_blobs;
-  stored_blobs_ = 0;
+void BlobFileWriter::IncTotalBlobs(size_t increment) {
+  base::AutoLock auto_lock(blob_mutex_);
+  total_blobs_ += increment;
 }
 
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/blob_file_writer.h b/payload_generator/blob_file_writer.h
index 48553be..bdd4c08 100644
--- a/payload_generator/blob_file_writer.h
+++ b/payload_generator/blob_file_writer.h
@@ -35,10 +35,8 @@
   // was stored, or -1 in case of failure.
   off_t StoreBlob(const brillo::Blob& blob);
 
-  // The number of |total_blobs| is the number of blobs that will be stored but
-  // is only used for logging purposes. If not set or set to 0, logging will be
-  // skipped. This function will also reset the number of stored blobs to 0.
-  void SetTotalBlobs(size_t total_blobs);
+  // Increase |total_blobs| by |increment|. Thread safe.
+  void IncTotalBlobs(size_t increment);
 
  private:
   size_t total_blobs_{0};
diff --git a/payload_generator/boot_img_filesystem.cc b/payload_generator/boot_img_filesystem.cc
index 19de410..89b175e 100644
--- a/payload_generator/boot_img_filesystem.cc
+++ b/payload_generator/boot_img_filesystem.cc
@@ -17,6 +17,7 @@
 #include "update_engine/payload_generator/boot_img_filesystem.h"
 
 #include <base/logging.h>
+#include <bootimg.h>
 #include <brillo/secure_blob.h>
 #include <puffin/utils.h>
 
@@ -35,16 +36,61 @@
   if (filename.empty())
     return nullptr;
 
-  brillo::Blob header;
-  if (!utils::ReadFileChunk(filename, 0, sizeof(boot_img_hdr), &header) ||
-      header.size() != sizeof(boot_img_hdr) ||
-      memcmp(header.data(), BOOT_MAGIC, BOOT_MAGIC_SIZE) != 0) {
+  if (brillo::Blob header_magic;
+      !utils::ReadFileChunk(filename, 0, BOOT_MAGIC_SIZE, &header_magic) ||
+      memcmp(header_magic.data(), BOOT_MAGIC, BOOT_MAGIC_SIZE) != 0) {
+    return nullptr;
+  }
+
+  // The order of image header fields are different in version 3 from the
+  // previous versions. But the position of "header_version" is fixed at #9
+  // across all image headers.
+  // See details in system/tools/mkbootimg/include/bootimg/bootimg.h
+  constexpr size_t header_version_offset =
+      BOOT_MAGIC_SIZE + 8 * sizeof(uint32_t);
+  brillo::Blob header_version_blob;
+  if (!utils::ReadFileChunk(filename,
+                            header_version_offset,
+                            sizeof(uint32_t),
+                            &header_version_blob)) {
+    return nullptr;
+  }
+  uint32_t header_version =
+      *reinterpret_cast<uint32_t*>(header_version_blob.data());
+  if (header_version > 3) {
+    LOG(WARNING) << "Boot image header version " << header_version
+                 << " isn't supported for parsing";
+    return nullptr;
+  }
+
+  // Read the bytes of boot image header based on the header version.
+  size_t header_size =
+      header_version == 3 ? sizeof(boot_img_hdr_v3) : sizeof(boot_img_hdr_v0);
+  brillo::Blob header_blob;
+  if (!utils::ReadFileChunk(filename, 0, header_size, &header_blob)) {
     return nullptr;
   }
 
   unique_ptr<BootImgFilesystem> result(new BootImgFilesystem());
   result->filename_ = filename;
-  memcpy(&result->hdr_, header.data(), header.size());
+  if (header_version < 3) {
+    auto hdr_v0 = reinterpret_cast<boot_img_hdr_v0*>(header_blob.data());
+    CHECK_EQ(0, memcmp(hdr_v0->magic, BOOT_MAGIC, BOOT_MAGIC_SIZE));
+    CHECK_LT(hdr_v0->header_version, 3u);
+    result->kernel_size_ = hdr_v0->kernel_size;
+    result->ramdisk_size_ = hdr_v0->ramdisk_size;
+    result->page_size_ = hdr_v0->page_size;
+  } else {
+    auto hdr_v3 = reinterpret_cast<boot_img_hdr_v3*>(header_blob.data());
+    CHECK_EQ(0, memcmp(hdr_v3->magic, BOOT_MAGIC, BOOT_MAGIC_SIZE));
+    CHECK_EQ(3u, hdr_v3->header_version);
+    result->kernel_size_ = hdr_v3->kernel_size;
+    result->ramdisk_size_ = hdr_v3->ramdisk_size;
+    result->page_size_ = 4096;
+  }
+
+  CHECK_GT(result->page_size_, 0u);
+
   return result;
 }
 
@@ -87,13 +133,13 @@
   files->clear();
   const uint64_t file_size = utils::FileSize(filename_);
   // The first page is header.
-  uint64_t offset = hdr_.page_size;
-  if (hdr_.kernel_size > 0 && offset + hdr_.kernel_size <= file_size) {
-    files->emplace_back(GetFile("<kernel>", offset, hdr_.kernel_size));
+  uint64_t offset = page_size_;
+  if (kernel_size_ > 0 && offset + kernel_size_ <= file_size) {
+    files->emplace_back(GetFile("<kernel>", offset, kernel_size_));
   }
-  offset += utils::RoundUp(hdr_.kernel_size, hdr_.page_size);
-  if (hdr_.ramdisk_size > 0 && offset + hdr_.ramdisk_size <= file_size) {
-    files->emplace_back(GetFile("<ramdisk>", offset, hdr_.ramdisk_size));
+  offset += utils::RoundUp(kernel_size_, page_size_);
+  if (ramdisk_size_ > 0 && offset + ramdisk_size_ <= file_size) {
+    files->emplace_back(GetFile("<ramdisk>", offset, ramdisk_size_));
   }
   return true;
 }
diff --git a/payload_generator/boot_img_filesystem.h b/payload_generator/boot_img_filesystem.h
index 87725d4..61f755c 100644
--- a/payload_generator/boot_img_filesystem.h
+++ b/payload_generator/boot_img_filesystem.h
@@ -52,23 +52,9 @@
   // The boot.img file path.
   std::string filename_;
 
-// https://android.googlesource.com/platform/system/core/+/master/mkbootimg/include/bootimg/bootimg.h
-#define BOOT_MAGIC "ANDROID!"
-#define BOOT_MAGIC_SIZE 8
-  struct boot_img_hdr {
-    // Must be BOOT_MAGIC.
-    uint8_t magic[BOOT_MAGIC_SIZE];
-    uint32_t kernel_size;  /* size in bytes */
-    uint32_t kernel_addr;  /* physical load addr */
-    uint32_t ramdisk_size; /* size in bytes */
-    uint32_t ramdisk_addr; /* physical load addr */
-    uint32_t second_size;  /* size in bytes */
-    uint32_t second_addr;  /* physical load addr */
-    uint32_t tags_addr;    /* physical addr for kernel tags */
-    uint32_t page_size;    /* flash page size we assume */
-  } __attribute__((packed));
-  // The boot image header.
-  boot_img_hdr hdr_;
+  uint32_t kernel_size_;  /* size in bytes */
+  uint32_t ramdisk_size_; /* size in bytes */
+  uint32_t page_size_;    /* flash page size we assume */
 
   DISALLOW_COPY_AND_ASSIGN(BootImgFilesystem);
 };
diff --git a/payload_generator/boot_img_filesystem_stub.cc b/payload_generator/boot_img_filesystem_stub.cc
new file mode 100644
index 0000000..4928fa1
--- /dev/null
+++ b/payload_generator/boot_img_filesystem_stub.cc
@@ -0,0 +1,48 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/boot_img_filesystem.h"
+
+namespace chromeos_update_engine {
+std::unique_ptr<BootImgFilesystem> BootImgFilesystem::CreateFromFile(
+    const std::string& /* filename */) {
+  return nullptr;
+}
+
+size_t BootImgFilesystem::GetBlockSize() const {
+  return 4096;
+}
+
+size_t BootImgFilesystem::GetBlockCount() const {
+  return 0;
+}
+
+FilesystemInterface::File BootImgFilesystem::GetFile(
+    const std::string& /* name */,
+    uint64_t /* offset */,
+    uint64_t /* size */) const {
+  return {};
+}
+
+bool BootImgFilesystem::GetFiles(std::vector<File>* /* files */) const {
+  return false;
+}
+
+bool BootImgFilesystem::LoadSettings(brillo::KeyValueStore* /* store */) const {
+  return false;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/boot_img_filesystem_unittest.cc b/payload_generator/boot_img_filesystem_unittest.cc
index b1e0d99..0b115e0 100644
--- a/payload_generator/boot_img_filesystem_unittest.cc
+++ b/payload_generator/boot_img_filesystem_unittest.cc
@@ -18,6 +18,7 @@
 
 #include <vector>
 
+#include <bootimg.h>
 #include <brillo/secure_blob.h>
 #include <gtest/gtest.h>
 
@@ -32,18 +33,32 @@
 class BootImgFilesystemTest : public ::testing::Test {
  protected:
   brillo::Blob GetBootImg(const brillo::Blob& kernel,
-                          const brillo::Blob& ramdisk) {
+                          const brillo::Blob& ramdisk,
+                          bool header_version3 = false) {
     brillo::Blob boot_img(16 * 1024);
-    BootImgFilesystem::boot_img_hdr hdr;
-    memcpy(hdr.magic, BOOT_MAGIC, BOOT_MAGIC_SIZE);
-    hdr.kernel_size = kernel.size();
-    hdr.ramdisk_size = ramdisk.size();
-    hdr.page_size = 4096;
+    constexpr uint32_t page_size = 4096;
+
     size_t offset = 0;
-    memcpy(boot_img.data() + offset, &hdr, sizeof(hdr));
-    offset += utils::RoundUp(sizeof(hdr), hdr.page_size);
+    if (header_version3) {
+      boot_img_hdr_v3 hdr_v3{};
+      memcpy(hdr_v3.magic, BOOT_MAGIC, BOOT_MAGIC_SIZE);
+      hdr_v3.kernel_size = kernel.size();
+      hdr_v3.ramdisk_size = ramdisk.size();
+      hdr_v3.header_version = 3;
+      memcpy(boot_img.data() + offset, &hdr_v3, sizeof(hdr_v3));
+      offset += utils::RoundUp(sizeof(hdr_v3), page_size);
+    } else {
+      boot_img_hdr_v0 hdr_v0{};
+      memcpy(hdr_v0.magic, BOOT_MAGIC, BOOT_MAGIC_SIZE);
+      hdr_v0.kernel_size = kernel.size();
+      hdr_v0.ramdisk_size = ramdisk.size();
+      hdr_v0.page_size = page_size;
+      hdr_v0.header_version = 0;
+      memcpy(boot_img.data() + offset, &hdr_v0, sizeof(hdr_v0));
+      offset += utils::RoundUp(sizeof(hdr_v0), page_size);
+    }
     memcpy(boot_img.data() + offset, kernel.data(), kernel.size());
-    offset += utils::RoundUp(kernel.size(), hdr.page_size);
+    offset += utils::RoundUp(kernel.size(), page_size);
     memcpy(boot_img.data() + offset, ramdisk.data(), ramdisk.size());
     return boot_img;
   }
@@ -76,6 +91,31 @@
   EXPECT_TRUE(files[1].deflates.empty());
 }
 
+TEST_F(BootImgFilesystemTest, ImageHeaderVersion3) {
+  test_utils::WriteFileVector(
+      boot_file_.path(),
+      GetBootImg(brillo::Blob(1000, 'k'), brillo::Blob(5000, 'r'), true));
+  unique_ptr<BootImgFilesystem> fs =
+      BootImgFilesystem::CreateFromFile(boot_file_.path());
+  EXPECT_NE(nullptr, fs);
+
+  vector<FilesystemInterface::File> files;
+  EXPECT_TRUE(fs->GetFiles(&files));
+  ASSERT_EQ(2u, files.size());
+
+  EXPECT_EQ("<kernel>", files[0].name);
+  EXPECT_EQ(1u, files[0].extents.size());
+  EXPECT_EQ(1u, files[0].extents[0].start_block());
+  EXPECT_EQ(1u, files[0].extents[0].num_blocks());
+  EXPECT_TRUE(files[0].deflates.empty());
+
+  EXPECT_EQ("<ramdisk>", files[1].name);
+  EXPECT_EQ(1u, files[1].extents.size());
+  EXPECT_EQ(2u, files[1].extents[0].start_block());
+  EXPECT_EQ(2u, files[1].extents[0].num_blocks());
+  EXPECT_TRUE(files[1].deflates.empty());
+}
+
 TEST_F(BootImgFilesystemTest, BadImageTest) {
   brillo::Blob boot_img = GetBootImg({}, {});
   boot_img[7] = '?';
diff --git a/payload_generator/deflate_utils.cc b/payload_generator/deflate_utils.cc
index 5d7a766..c874bfd 100644
--- a/payload_generator/deflate_utils.cc
+++ b/payload_generator/deflate_utils.cc
@@ -74,6 +74,15 @@
   return false;
 }
 
+bool IsRegularFile(const FilesystemInterface::File& file) {
+  // If inode is 0, then stat information is invalid for some psuedo files
+  if (file.file_stat.st_ino != 0 &&
+      (file.file_stat.st_mode & S_IFMT) == S_IFREG) {
+    return true;
+  }
+  return false;
+}
+
 // Realigns subfiles |files| of a splitted file |file| into its correct
 // positions. This can be used for squashfs, zip, apk, etc.
 bool RealignSplittedFiles(const FilesystemInterface::File& file,
@@ -265,7 +274,9 @@
   result_files->reserve(tmp_files.size());
 
   for (auto& file : tmp_files) {
-    if (IsSquashfsImage(part.path, file)) {
+    auto is_regular_file = IsRegularFile(file);
+
+    if (is_regular_file && IsSquashfsImage(part.path, file)) {
       // Read the image into a file.
       base::FilePath path;
       TEST_AND_RETURN_FALSE(base::CreateTemporaryFile(&path));
@@ -296,7 +307,7 @@
       }
     }
 
-    if (extract_deflates && !file.is_compressed) {
+    if (is_regular_file && extract_deflates && !file.is_compressed) {
       // Search for deflates if the file is in zip or gzip format.
       // .zvoice files may eventually move out of rootfs. If that happens,
       // remove ".zvoice" (crbug.com/782918).
diff --git a/payload_generator/delta_diff_generator.cc b/payload_generator/delta_diff_generator.cc
index 595a41e..c2b35ee 100644
--- a/payload_generator/delta_diff_generator.cc
+++ b/payload_generator/delta_diff_generator.cc
@@ -29,14 +29,17 @@
 #include <vector>
 
 #include <base/logging.h>
+#include <base/threading/simple_thread.h>
 
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/delta_performer.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/ab_generator.h"
+#include "update_engine/payload_generator/annotated_operation.h"
 #include "update_engine/payload_generator/blob_file_writer.h"
 #include "update_engine/payload_generator/delta_diff_utils.h"
 #include "update_engine/payload_generator/full_update_generator.h"
+#include "update_engine/payload_generator/merge_sequence_generator.h"
 #include "update_engine/payload_generator/payload_file.h"
 
 using std::string;
@@ -49,6 +52,59 @@
 const size_t kRootFSPartitionSize = static_cast<size_t>(2) * 1024 * 1024 * 1024;
 const size_t kBlockSize = 4096;  // bytes
 
+class PartitionProcessor : public base::DelegateSimpleThread::Delegate {
+ public:
+  explicit PartitionProcessor(
+      const PayloadGenerationConfig& config,
+      const PartitionConfig& old_part,
+      const PartitionConfig& new_part,
+      BlobFileWriter* file_writer,
+      std::vector<AnnotatedOperation>* aops,
+      std::vector<CowMergeOperation>* cow_merge_sequence,
+      std::unique_ptr<chromeos_update_engine::OperationsGenerator> strategy)
+      : config_(config),
+        old_part_(old_part),
+        new_part_(new_part),
+        file_writer_(file_writer),
+        aops_(aops),
+        cow_merge_sequence_(cow_merge_sequence),
+        strategy_(std::move(strategy)) {}
+  PartitionProcessor(PartitionProcessor&&) noexcept = default;
+  void Run() override {
+    LOG(INFO) << "Started an async task to process partition "
+              << old_part_.name;
+    bool success = strategy_->GenerateOperations(
+        config_, old_part_, new_part_, file_writer_, aops_);
+    if (!success) {
+      // ABORT the entire process, so that developer can look
+      // at recent logs and diagnose what happened
+      LOG(FATAL) << "GenerateOperations(" << old_part_.name << ", "
+                 << new_part_.name << ") failed";
+    }
+
+    bool snapshot_enabled =
+        config_.target.dynamic_partition_metadata &&
+        config_.target.dynamic_partition_metadata->snapshot_enabled();
+    if (old_part_.path.empty() || !snapshot_enabled) {
+      return;
+    }
+    auto generator = MergeSequenceGenerator::Create(*aops_);
+    if (!generator || !generator->Generate(cow_merge_sequence_)) {
+      LOG(FATAL) << "Failed to generate merge sequence";
+    }
+  }
+
+ private:
+  const PayloadGenerationConfig& config_;
+  const PartitionConfig& old_part_;
+  const PartitionConfig& new_part_;
+  BlobFileWriter* file_writer_;
+  std::vector<AnnotatedOperation>* aops_;
+  std::vector<CowMergeOperation>* cow_merge_sequence_;
+  std::unique_ptr<chromeos_update_engine::OperationsGenerator> strategy_;
+  DISALLOW_COPY_AND_ASSIGN(PartitionProcessor);
+};
+
 bool GenerateUpdatePayloadFile(const PayloadGenerationConfig& config,
                                const string& output_path,
                                const string& private_key_path,
@@ -80,6 +136,15 @@
                             config.target.partitions.size());
     }
     PartitionConfig empty_part("");
+    std::vector<std::vector<AnnotatedOperation>> all_aops;
+    all_aops.resize(config.target.partitions.size());
+    std::vector<std::vector<CowMergeOperation>> all_merge_sequences;
+    all_merge_sequences.resize(config.target.partitions.size());
+    std::vector<PartitionProcessor> partition_tasks{};
+    auto thread_count = std::min<int>(diff_utils::GetMaxThreads(),
+                                      config.target.partitions.size());
+    base::DelegateSimpleThreadPool thread_pool{"partition-thread-pool",
+                                               thread_count};
     for (size_t i = 0; i < config.target.partitions.size(); i++) {
       const PartitionConfig& old_part =
           config.is_delta ? config.source.partitions[i] : empty_part;
@@ -99,12 +164,30 @@
         strategy.reset(new FullUpdateGenerator());
       }
 
-      vector<AnnotatedOperation> aops;
       // Generate the operations using the strategy we selected above.
-      TEST_AND_RETURN_FALSE(strategy->GenerateOperations(
-          config, old_part, new_part, &blob_file, &aops));
+      partition_tasks.push_back(PartitionProcessor(config,
+                                                   old_part,
+                                                   new_part,
+                                                   &blob_file,
+                                                   &all_aops[i],
+                                                   &all_merge_sequences[i],
+                                                   std::move(strategy)));
+    }
+    thread_pool.Start();
+    for (auto& processor : partition_tasks) {
+      thread_pool.AddWork(&processor);
+    }
+    thread_pool.JoinAll();
 
-      TEST_AND_RETURN_FALSE(payload.AddPartition(old_part, new_part, aops));
+    for (size_t i = 0; i < config.target.partitions.size(); i++) {
+      const PartitionConfig& old_part =
+          config.is_delta ? config.source.partitions[i] : empty_part;
+      const PartitionConfig& new_part = config.target.partitions[i];
+      TEST_AND_RETURN_FALSE(
+          payload.AddPartition(old_part,
+                               new_part,
+                               std::move(all_aops[i]),
+                               std::move(all_merge_sequences[i])));
     }
   }
 
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index ded30fb..220c7ae 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -637,7 +637,7 @@
 bool GenerateBestFullOperation(const brillo::Blob& new_data,
                                const PayloadVersion& version,
                                brillo::Blob* out_blob,
-                               InstallOperation_Type* out_type) {
+                               InstallOperation::Type* out_type) {
   if (new_data.empty())
     return false;
 
@@ -739,7 +739,7 @@
 
   // Try generating a full operation for the given new data, regardless of the
   // old_data.
-  InstallOperation_Type op_type;
+  InstallOperation::Type op_type;
   TEST_AND_RETURN_FALSE(
       GenerateBestFullOperation(new_data, version, &data_blob, &op_type));
   operation.set_type(op_type);
@@ -766,7 +766,7 @@
         ScopedPathUnlinker unlinker(patch.value());
 
         std::unique_ptr<bsdiff::PatchWriterInterface> bsdiff_patch_writer;
-        InstallOperation_Type operation_type = InstallOperation::SOURCE_BSDIFF;
+        InstallOperation::Type operation_type = InstallOperation::SOURCE_BSDIFF;
         if (version.OperationAllowed(InstallOperation::BROTLI_BSDIFF)) {
           bsdiff_patch_writer =
               bsdiff::CreateBSDF2PatchWriter(patch.value(),
@@ -872,13 +872,13 @@
   return true;
 }
 
-bool IsAReplaceOperation(InstallOperation_Type op_type) {
+bool IsAReplaceOperation(InstallOperation::Type op_type) {
   return (op_type == InstallOperation::REPLACE ||
           op_type == InstallOperation::REPLACE_BZ ||
           op_type == InstallOperation::REPLACE_XZ);
 }
 
-bool IsNoSourceOperation(InstallOperation_Type op_type) {
+bool IsNoSourceOperation(InstallOperation::Type op_type) {
   return (IsAReplaceOperation(op_type) || op_type == InstallOperation::ZERO ||
           op_type == InstallOperation::DISCARD);
 }
@@ -938,7 +938,7 @@
   if (magic != EXT2_SUPER_MAGIC)
     return false;
 
-  // Sanity check the parameters.
+  // Validation check the parameters.
   TEST_AND_RETURN_FALSE(log_block_size >= EXT2_MIN_BLOCK_LOG_SIZE &&
                         log_block_size <= EXT2_MAX_BLOCK_LOG_SIZE);
   TEST_AND_RETURN_FALSE(block_count > 0);
diff --git a/payload_generator/delta_diff_utils.h b/payload_generator/delta_diff_utils.h
index a062327..c75d16d 100644
--- a/payload_generator/delta_diff_utils.h
+++ b/payload_generator/delta_diff_utils.h
@@ -119,13 +119,13 @@
 bool GenerateBestFullOperation(const brillo::Blob& new_data,
                                const PayloadVersion& version,
                                brillo::Blob* out_blob,
-                               InstallOperation_Type* out_type);
+                               InstallOperation::Type* out_type);
 
 // Returns whether |op_type| is one of the REPLACE full operations.
-bool IsAReplaceOperation(InstallOperation_Type op_type);
+bool IsAReplaceOperation(InstallOperation::Type op_type);
 
 // Returns true if an operation with type |op_type| has no |src_extents|.
-bool IsNoSourceOperation(InstallOperation_Type op_type);
+bool IsNoSourceOperation(InstallOperation::Type op_type);
 
 bool InitializePartitionInfo(const PartitionConfig& partition,
                              PartitionInfo* info);
diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc
index 8a97b1b..0857f9c 100644
--- a/payload_generator/delta_diff_utils_unittest.cc
+++ b/payload_generator/delta_diff_utils_unittest.cc
@@ -231,7 +231,7 @@
     EXPECT_FALSE(data.empty());
 
     EXPECT_TRUE(op.has_type());
-    const InstallOperation_Type expected_type =
+    const InstallOperation::Type expected_type =
         (i == 0 ? InstallOperation::REPLACE : InstallOperation::REPLACE_BZ);
     EXPECT_EQ(expected_type, op.type());
     EXPECT_FALSE(op.has_data_offset());
diff --git a/payload_generator/extent_ranges.cc b/payload_generator/extent_ranges.cc
index 4600efe..2098639 100644
--- a/payload_generator/extent_ranges.cc
+++ b/payload_generator/extent_ranges.cc
@@ -202,6 +202,15 @@
   }
 }
 
+bool ExtentRanges::OverlapsWithExtent(const Extent& extent) const {
+  for (const auto& entry : extent_set_) {
+    if (ExtentsOverlap(entry, extent)) {
+      return true;
+    }
+  }
+  return false;
+}
+
 bool ExtentRanges::ContainsBlock(uint64_t block) const {
   auto lower = extent_set_.lower_bound(ExtentForRange(block, 1));
   // The block could be on the extent before the one in |lower|.
diff --git a/payload_generator/extent_ranges.h b/payload_generator/extent_ranges.h
index 62ffff4..68aa27f 100644
--- a/payload_generator/extent_ranges.h
+++ b/payload_generator/extent_ranges.h
@@ -63,6 +63,9 @@
   void AddRanges(const ExtentRanges& ranges);
   void SubtractRanges(const ExtentRanges& ranges);
 
+  // Returns true if the input extent overlaps with the current ExtentRanges.
+  bool OverlapsWithExtent(const Extent& extent) const;
+
   // Returns whether the block |block| is in this ExtentRange.
   bool ContainsBlock(uint64_t block) const;
 
diff --git a/payload_generator/full_update_generator.cc b/payload_generator/full_update_generator.cc
index 4d8b2f9..4a5f63a 100644
--- a/payload_generator/full_update_generator.cc
+++ b/payload_generator/full_update_generator.cc
@@ -99,7 +99,7 @@
       fd_, buffer_in_.data(), buffer_in_.size(), offset_, &bytes_read));
   TEST_AND_RETURN_FALSE(bytes_read == static_cast<ssize_t>(size_));
 
-  InstallOperation_Type op_type;
+  InstallOperation::Type op_type;
   TEST_AND_RETURN_FALSE(diff_utils::GenerateBestFullOperation(
       buffer_in_, version_, &op_blob, &op_type));
 
@@ -153,7 +153,7 @@
   aops->resize(num_chunks);
   vector<ChunkProcessor> chunk_processors;
   chunk_processors.reserve(num_chunks);
-  blob_file->SetTotalBlobs(num_chunks);
+  blob_file->IncTotalBlobs(num_chunks);
 
   for (size_t i = 0; i < num_chunks; ++i) {
     size_t start_block = i * chunk_blocks;
@@ -187,9 +187,6 @@
     thread_pool.AddWork(&processor);
   thread_pool.JoinAll();
 
-  // All the work done, disable logging.
-  blob_file->SetTotalBlobs(0);
-
   // All the operations must have a type set at this point. Otherwise, a
   // ChunkProcessor failed to complete.
   for (const AnnotatedOperation& aop : *aops) {
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index fe0a10b..5c1fb47 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -14,6 +14,7 @@
 // limitations under the License.
 //
 
+#include <map>
 #include <string>
 #include <vector>
 
@@ -22,6 +23,7 @@
 #include <base/logging.h>
 #include <base/strings/string_number_conversions.h>
 #include <base/strings/string_split.h>
+#include <base/strings/string_util.h>
 #include <brillo/flag_helper.h>
 #include <brillo/key_value_store.h>
 #include <brillo/message_loops/base_message_loop.h>
@@ -47,6 +49,7 @@
 // and an output file as arguments and the path to an output file and
 // generates a delta that can be sent to Chrome OS clients.
 
+using std::map;
 using std::string;
 using std::vector;
 
@@ -58,18 +61,15 @@
 constexpr char kPayloadPropertiesFormatJson[] = "json";
 
 void ParseSignatureSizes(const string& signature_sizes_flag,
-                         vector<int>* signature_sizes) {
+                         vector<size_t>* signature_sizes) {
   signature_sizes->clear();
   vector<string> split_strings = base::SplitString(
       signature_sizes_flag, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
   for (const string& str : split_strings) {
-    int size = 0;
-    bool parsing_successful = base::StringToInt(str, &size);
+    size_t size = 0;
+    bool parsing_successful = base::StringToSizeT(str, &size);
     LOG_IF(FATAL, !parsing_successful) << "Invalid signature size: " << str;
 
-    LOG_IF(FATAL, size != (2048 / 8))
-        << "Only signature sizes of 256 bytes are supported.";
-
     signature_sizes->push_back(size);
   }
 }
@@ -106,7 +106,7 @@
   return true;
 }
 
-void CalculateHashForSigning(const vector<int>& sizes,
+void CalculateHashForSigning(const vector<size_t>& sizes,
                              const string& out_hash_file,
                              const string& out_metadata_hash_file,
                              const string& in_file) {
@@ -142,6 +142,7 @@
 
 void SignPayload(const string& in_file,
                  const string& out_file,
+                 const vector<size_t>& signature_sizes,
                  const string& payload_signature_file,
                  const string& metadata_signature_file,
                  const string& out_metadata_size_file) {
@@ -155,6 +156,7 @@
   SignatureFileFlagToBlobs(metadata_signature_file, &metadata_signatures);
   uint64_t final_metadata_size;
   CHECK(PayloadSigner::AddSignatureToPayload(in_file,
+                                             signature_sizes,
                                              payload_signatures,
                                              metadata_signatures,
                                              out_file,
@@ -253,8 +255,8 @@
                                        nullptr,
                                        new FileFetcher(),
                                        true /* interactive */);
-  auto filesystem_verifier_action =
-      std::make_unique<FilesystemVerifierAction>();
+  auto filesystem_verifier_action = std::make_unique<FilesystemVerifierAction>(
+      fake_boot_control.GetDynamicPartitionControl());
 
   BondActions(install_plan_action.get(), download_action.get());
   BondActions(download_action.get(), filesystem_verifier_action.get());
@@ -295,6 +297,39 @@
   return true;
 }
 
+template <typename Key, typename Val>
+string ToString(const map<Key, Val>& map) {
+  vector<string> result;
+  result.reserve(map.size());
+  for (const auto& it : map) {
+    result.emplace_back(it.first + ": " + it.second);
+  }
+  return "{" + base::JoinString(result, ",") + "}";
+}
+
+bool ParsePerPartitionTimestamps(const string& partition_timestamps,
+                                 PayloadGenerationConfig* config) {
+  base::StringPairs pairs;
+  CHECK(base::SplitStringIntoKeyValuePairs(
+      partition_timestamps, ':', ',', &pairs))
+      << "--partition_timestamps accepts commad "
+         "separated pairs. e.x. system:1234,vendor:5678";
+  map<string, string> partition_timestamps_map{
+      std::move_iterator(pairs.begin()), std::move_iterator(pairs.end())};
+  for (auto&& partition : config->target.partitions) {
+    auto&& it = partition_timestamps_map.find(partition.name);
+    if (it != partition_timestamps_map.end()) {
+      partition.version = std::move(it->second);
+      partition_timestamps_map.erase(it);
+    }
+  }
+  if (!partition_timestamps_map.empty()) {
+    LOG(ERROR) << "Unused timestamps: " << ToString(partition_timestamps_map);
+    return false;
+  }
+  return true;
+}
+
 int Main(int argc, char** argv) {
   DEFINE_string(old_image, "", "Path to the old rootfs");
   DEFINE_string(new_image, "", "Path to the new rootfs");
@@ -385,6 +420,11 @@
                0,
                "The maximum timestamp of the OS allowed to apply this "
                "payload.");
+  DEFINE_string(
+      partition_timestamps,
+      "",
+      "The per-partition maximum timestamps which the OS allowed to apply this "
+      "payload. Passed in comma separated pairs, e.x. system:1234,vendor:5678");
 
   DEFINE_string(old_channel,
                 "",
@@ -439,6 +479,17 @@
                 "",
                 "An info file specifying dynamic partition metadata. "
                 "Only allowed in major version 2 or newer.");
+  DEFINE_bool(disable_fec_computation,
+              false,
+              "Disables the fec data computation on device.");
+  DEFINE_string(
+      out_maximum_signature_size_file,
+      "",
+      "Path to the output maximum signature size given a private key.");
+  DEFINE_bool(is_partial_update,
+              false,
+              "The payload only targets a subset of partitions on the device,"
+              "e.g. generic kernel image update.");
 
   brillo::FlagHelper::Init(
       argc,
@@ -464,8 +515,34 @@
   // Initialize the Xz compressor.
   XzCompressInit();
 
-  vector<int> signature_sizes;
-  ParseSignatureSizes(FLAGS_signature_size, &signature_sizes);
+  if (!FLAGS_out_maximum_signature_size_file.empty()) {
+    LOG_IF(FATAL, FLAGS_private_key.empty())
+        << "Private key is not provided when calculating the maximum signature "
+           "size.";
+
+    size_t maximum_signature_size;
+    if (!PayloadSigner::GetMaximumSignatureSize(FLAGS_private_key,
+                                                &maximum_signature_size)) {
+      LOG(ERROR) << "Failed to get the maximum signature size of private key: "
+                 << FLAGS_private_key;
+      return 1;
+    }
+    // Write the size string to output file.
+    string signature_size_string = std::to_string(maximum_signature_size);
+    if (!utils::WriteFile(FLAGS_out_maximum_signature_size_file.c_str(),
+                          signature_size_string.c_str(),
+                          signature_size_string.size())) {
+      PLOG(ERROR) << "Failed to write the maximum signature size to "
+                  << FLAGS_out_maximum_signature_size_file << ".";
+      return 1;
+    }
+    return 0;
+  }
+
+  vector<size_t> signature_sizes;
+  if (!FLAGS_signature_size.empty()) {
+    ParseSignatureSizes(FLAGS_signature_size, &signature_sizes);
+  }
 
   if (!FLAGS_out_hash_file.empty() || !FLAGS_out_metadata_hash_file.empty()) {
     CHECK(FLAGS_out_metadata_size_file.empty());
@@ -478,6 +555,7 @@
   if (!FLAGS_payload_signature_file.empty()) {
     SignPayload(FLAGS_in_file,
                 FLAGS_out_file,
+                signature_sizes,
                 FLAGS_payload_signature_file,
                 FLAGS_metadata_signature_file,
                 FLAGS_out_metadata_size_file);
@@ -546,6 +624,8 @@
         << "Partition name can't be empty, see --partition_names.";
     payload_config.target.partitions.emplace_back(partition_names[i]);
     payload_config.target.partitions.back().path = new_partitions[i];
+    payload_config.target.partitions.back().disable_fec_computation =
+        FLAGS_disable_fec_computation;
     if (i < new_mapfiles.size())
       payload_config.target.partitions.back().mapfile_path = new_mapfiles[i];
   }
@@ -598,6 +678,10 @@
     CHECK(payload_config.target.ValidateDynamicPartitionMetadata());
   }
 
+  if (FLAGS_is_partial_update) {
+    payload_config.is_partial_update = true;
+  }
+
   CHECK(!FLAGS_out_file.empty());
 
   // Ignore failures. These are optional arguments.
@@ -670,8 +754,13 @@
   }
 
   payload_config.max_timestamp = FLAGS_max_timestamp;
+  if (!FLAGS_partition_timestamps.empty()) {
+    CHECK(ParsePerPartitionTimestamps(FLAGS_partition_timestamps,
+                                      &payload_config));
+  }
 
-  if (payload_config.version.minor >= kVerityMinorPayloadVersion)
+  if (payload_config.is_delta &&
+      payload_config.version.minor >= kVerityMinorPayloadVersion)
     CHECK(payload_config.target.LoadVerityConfig());
 
   LOG(INFO) << "Generating " << (payload_config.is_delta ? "delta" : "full")
diff --git a/payload_generator/merge_sequence_generator.cc b/payload_generator/merge_sequence_generator.cc
new file mode 100644
index 0000000..eaffeac
--- /dev/null
+++ b/payload_generator/merge_sequence_generator.cc
@@ -0,0 +1,269 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/merge_sequence_generator.h"
+
+#include <algorithm>
+
+#include "update_engine/payload_generator/extent_utils.h"
+
+namespace chromeos_update_engine {
+
+CowMergeOperation CreateCowMergeOperation(const Extent& src_extent,
+                                          const Extent& dst_extent) {
+  CowMergeOperation ret;
+  ret.set_type(CowMergeOperation::COW_COPY);
+  *ret.mutable_src_extent() = src_extent;
+  *ret.mutable_dst_extent() = dst_extent;
+  return ret;
+}
+
+std::ostream& operator<<(std::ostream& os,
+                         const CowMergeOperation& merge_operation) {
+  os << "CowMergeOperation src extent: "
+     << ExtentsToString({merge_operation.src_extent()})
+     << ", dst extent: " << ExtentsToString({merge_operation.dst_extent()});
+  return os;
+}
+
+// The OTA generation guarantees that all blocks in the dst extent will be
+// written only once. So we can use it to order the CowMergeOperation.
+bool operator<(const CowMergeOperation& op1, const CowMergeOperation& op2) {
+  return op1.dst_extent().start_block() < op2.dst_extent().start_block();
+}
+
+bool operator==(const CowMergeOperation& op1, const CowMergeOperation& op2) {
+  return op1.type() == op2.type() && op1.src_extent() == op2.src_extent() &&
+         op1.dst_extent() == op2.dst_extent();
+}
+
+std::unique_ptr<MergeSequenceGenerator> MergeSequenceGenerator::Create(
+    const std::vector<AnnotatedOperation>& aops) {
+  std::vector<CowMergeOperation> sequence;
+  for (const auto& aop : aops) {
+    // Only handle SOURCE_COPY now for the cow size optimization.
+    if (aop.op.type() != InstallOperation::SOURCE_COPY) {
+      continue;
+    }
+    if (aop.op.dst_extents().size() != 1) {
+      std::vector<Extent> out_extents;
+      ExtentsToVector(aop.op.dst_extents(), &out_extents);
+      LOG(ERROR) << "The dst extents for source_copy expects to be contiguous,"
+                 << " dst extents: " << ExtentsToString(out_extents);
+      return nullptr;
+    }
+
+    // Split the source extents.
+    size_t used_blocks = 0;
+    for (const auto& src_extent : aop.op.src_extents()) {
+      // The dst_extent in the merge sequence will be a subset of
+      // InstallOperation's dst_extent. This will simplify the OTA -> COW
+      // conversion when we install the payload.
+      Extent dst_extent =
+          ExtentForRange(aop.op.dst_extents(0).start_block() + used_blocks,
+                         src_extent.num_blocks());
+      sequence.emplace_back(CreateCowMergeOperation(src_extent, dst_extent));
+      used_blocks += src_extent.num_blocks();
+    }
+
+    if (used_blocks != aop.op.dst_extents(0).num_blocks()) {
+      LOG(ERROR) << "Number of blocks in src extents doesn't equal to the"
+                 << " ones in the dst extents, src blocks " << used_blocks
+                 << ", dst blocks " << aop.op.dst_extents(0).num_blocks();
+      return nullptr;
+    }
+  }
+
+  std::sort(sequence.begin(), sequence.end());
+  return std::unique_ptr<MergeSequenceGenerator>(
+      new MergeSequenceGenerator(sequence));
+}
+
+bool MergeSequenceGenerator::FindDependency(
+    std::map<CowMergeOperation, std::set<CowMergeOperation>>* result) const {
+  CHECK(result);
+  LOG(INFO) << "Finding dependencies";
+
+  // Since the OTA operation may reuse some source blocks, use the binary
+  // search on sorted dst extents to find overlaps.
+  std::map<CowMergeOperation, std::set<CowMergeOperation>> merge_after;
+  for (const auto& op : operations_) {
+    // lower bound (inclusive): dst extent's end block >= src extent's start
+    // block.
+    const auto lower_it = std::lower_bound(
+        operations_.begin(),
+        operations_.end(),
+        op,
+        [](const CowMergeOperation& it, const CowMergeOperation& op) {
+          auto dst_end_block =
+              it.dst_extent().start_block() + it.dst_extent().num_blocks() - 1;
+          return dst_end_block < op.src_extent().start_block();
+        });
+    // upper bound: dst extent's start block > src extent's end block
+    const auto upper_it = std::upper_bound(
+        lower_it,
+        operations_.end(),
+        op,
+        [](const CowMergeOperation& op, const CowMergeOperation& it) {
+          auto src_end_block =
+              op.src_extent().start_block() + op.src_extent().num_blocks() - 1;
+          return src_end_block < it.dst_extent().start_block();
+        });
+
+    // TODO(xunchang) skip inserting the empty set to merge_after.
+    if (lower_it == upper_it) {
+      merge_after.insert({op, {}});
+    } else {
+      std::set<CowMergeOperation> operations(lower_it, upper_it);
+      auto it = operations.find(op);
+      if (it != operations.end()) {
+        LOG(INFO) << "Self overlapping " << op;
+        operations.erase(it);
+      }
+      auto ret = merge_after.emplace(op, std::move(operations));
+      // Check the insertion indeed happens.
+      CHECK(ret.second);
+    }
+  }
+
+  *result = std::move(merge_after);
+  return true;
+}
+
+bool MergeSequenceGenerator::Generate(
+    std::vector<CowMergeOperation>* sequence) const {
+  sequence->clear();
+  std::map<CowMergeOperation, std::set<CowMergeOperation>> merge_after;
+  if (!FindDependency(&merge_after)) {
+    LOG(ERROR) << "Failed to find dependencies";
+    return false;
+  }
+
+  LOG(INFO) << "Generating sequence";
+
+  // Use the non-DFS version of the topology sort. So we can control the
+  // operations to discard to break cycles; thus yielding a deterministic
+  // sequence.
+  std::map<CowMergeOperation, int> incoming_edges;
+  for (const auto& it : merge_after) {
+    for (const auto& blocked : it.second) {
+      // Value is default initialized to 0.
+      incoming_edges[blocked] += 1;
+    }
+  }
+
+  std::set<CowMergeOperation> free_operations;
+  for (const auto& op : operations_) {
+    if (incoming_edges.find(op) == incoming_edges.end()) {
+      free_operations.insert(op);
+    }
+  }
+
+  std::vector<CowMergeOperation> merge_sequence;
+  std::set<CowMergeOperation> convert_to_raw;
+  while (!incoming_edges.empty()) {
+    if (!free_operations.empty()) {
+      merge_sequence.insert(
+          merge_sequence.end(), free_operations.begin(), free_operations.end());
+    } else {
+      auto to_convert = incoming_edges.begin()->first;
+      free_operations.insert(to_convert);
+      convert_to_raw.insert(to_convert);
+      LOG(INFO) << "Converting operation to raw " << to_convert;
+    }
+
+    std::set<CowMergeOperation> next_free_operations;
+    for (const auto& op : free_operations) {
+      incoming_edges.erase(op);
+
+      // Now that this particular operation is merged, other operations blocked
+      // by this one may be free. Decrement the count of blocking operations,
+      // and set up the free operations for the next iteration.
+      for (const auto& blocked : merge_after[op]) {
+        auto it = incoming_edges.find(blocked);
+        if (it == incoming_edges.end()) {
+          continue;
+        }
+
+        auto blocking_transfer_count = &it->second;
+        if (*blocking_transfer_count <= 0) {
+          LOG(ERROR) << "Unexpected count in merge after map "
+                     << blocking_transfer_count;
+          return false;
+        }
+        // This operation is no longer blocked by anyone. Add it to the merge
+        // sequence in the next iteration.
+        *blocking_transfer_count -= 1;
+        if (*blocking_transfer_count == 0) {
+          next_free_operations.insert(blocked);
+        }
+      }
+    }
+
+    LOG(INFO) << "Remaining transfers " << incoming_edges.size()
+              << ", free transfers " << free_operations.size()
+              << ", merge_sequence size " << merge_sequence.size();
+    free_operations = std::move(next_free_operations);
+  }
+
+  if (!free_operations.empty()) {
+    merge_sequence.insert(
+        merge_sequence.end(), free_operations.begin(), free_operations.end());
+  }
+
+  CHECK_EQ(operations_.size(), merge_sequence.size() + convert_to_raw.size());
+
+  size_t blocks_in_sequence = 0;
+  for (const CowMergeOperation& transfer : merge_sequence) {
+    blocks_in_sequence += transfer.dst_extent().num_blocks();
+  }
+
+  size_t blocks_in_raw = 0;
+  for (const CowMergeOperation& transfer : convert_to_raw) {
+    blocks_in_raw += transfer.dst_extent().num_blocks();
+  }
+
+  LOG(INFO) << "Blocks in merge sequence " << blocks_in_sequence
+            << ", blocks in raw " << blocks_in_raw;
+  if (!ValidateSequence(merge_sequence)) {
+    return false;
+  }
+
+  *sequence = std::move(merge_sequence);
+  return true;
+}
+
+bool MergeSequenceGenerator::ValidateSequence(
+    const std::vector<CowMergeOperation>& sequence) {
+  LOG(INFO) << "Validating merge sequence";
+  ExtentRanges visited;
+  for (const auto& op : sequence) {
+    if (visited.OverlapsWithExtent(op.src_extent())) {
+      LOG(ERROR) << "Transfer violates the merge sequence " << op
+                 << "Visited extent ranges: ";
+      visited.Dump();
+      return false;
+    }
+
+    CHECK(!visited.OverlapsWithExtent(op.dst_extent()))
+        << "dst extent should write only once.";
+    visited.AddExtent(op.dst_extent());
+  }
+
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/merge_sequence_generator.h b/payload_generator/merge_sequence_generator.h
new file mode 100644
index 0000000..bc0158e
--- /dev/null
+++ b/payload_generator/merge_sequence_generator.h
@@ -0,0 +1,74 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_MERGE_SEQUENCE_GENERATOR_H_
+#define UPDATE_ENGINE_PAYLOAD_GENERATOR_MERGE_SEQUENCE_GENERATOR_H_
+
+#include <map>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "update_engine/payload_generator/annotated_operation.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+#include "update_engine/payload_generator/extent_utils.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+// Constructs CowMergeOperation from src & dst extents
+CowMergeOperation CreateCowMergeOperation(const Extent& src_extent,
+                                          const Extent& dst_extent);
+
+// Comparator for CowMergeOperation.
+bool operator<(const CowMergeOperation& op1, const CowMergeOperation& op2);
+bool operator==(const CowMergeOperation& op1, const CowMergeOperation& op2);
+
+std::ostream& operator<<(std::ostream& os,
+                         const CowMergeOperation& merge_operation);
+
+// This class takes a list of CowMergeOperations; and sorts them so that no
+// read after write will happen by following the sequence. When there is a
+// cycle, we will omit some operations in the list. Therefore, the result
+// sequence may not contain all blocks in the input list.
+class MergeSequenceGenerator {
+ public:
+  // Creates an object from a list of OTA InstallOperations. Returns nullptr on
+  // failure.
+  static std::unique_ptr<MergeSequenceGenerator> Create(
+      const std::vector<AnnotatedOperation>& aops);
+  // Checks that no read after write happens in the given sequence.
+  static bool ValidateSequence(const std::vector<CowMergeOperation>& sequence);
+
+  // Generates a merge sequence from |operations_|, puts the result in
+  // |sequence|. Returns false on failure.
+  bool Generate(std::vector<CowMergeOperation>* sequence) const;
+
+ private:
+  friend class MergeSequenceGeneratorTest;
+  explicit MergeSequenceGenerator(std::vector<CowMergeOperation> transfers)
+      : operations_(std::move(transfers)) {}
+
+  // For a given merge operation, finds all the operations that should merge
+  // after myself. Put the result in |merge_after|.
+  bool FindDependency(std::map<CowMergeOperation, std::set<CowMergeOperation>>*
+                          merge_after) const;
+  // The list of CowMergeOperations to sort.
+  std::vector<CowMergeOperation> operations_;
+};
+
+}  // namespace chromeos_update_engine
+#endif
diff --git a/payload_generator/merge_sequence_generator_unittest.cc b/payload_generator/merge_sequence_generator_unittest.cc
new file mode 100644
index 0000000..567ede1
--- /dev/null
+++ b/payload_generator/merge_sequence_generator_unittest.cc
@@ -0,0 +1,196 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <algorithm>
+#include <vector>
+
+#include <gtest/gtest.h>
+
+#include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_generator/extent_utils.h"
+#include "update_engine/payload_generator/merge_sequence_generator.h"
+
+namespace chromeos_update_engine {
+class MergeSequenceGeneratorTest : public ::testing::Test {
+ protected:
+  void VerifyTransfers(MergeSequenceGenerator* generator,
+                       const std::vector<CowMergeOperation>& expected) {
+    ASSERT_EQ(expected, generator->operations_);
+  }
+
+  void FindDependency(
+      std::vector<CowMergeOperation> transfers,
+      std::map<CowMergeOperation, std::set<CowMergeOperation>>* result) {
+    std::sort(transfers.begin(), transfers.end());
+    MergeSequenceGenerator generator(std::move(transfers));
+    ASSERT_TRUE(generator.FindDependency(result));
+  }
+
+  void GenerateSequence(std::vector<CowMergeOperation> transfers,
+                        const std::vector<CowMergeOperation>& expected) {
+    std::sort(transfers.begin(), transfers.end());
+    MergeSequenceGenerator generator(std::move(transfers));
+    std::vector<CowMergeOperation> sequence;
+    ASSERT_TRUE(generator.Generate(&sequence));
+    ASSERT_EQ(expected, sequence);
+  }
+};
+
+TEST_F(MergeSequenceGeneratorTest, Create) {
+  std::vector<AnnotatedOperation> aops{{"file1", {}}, {"file2", {}}};
+  aops[0].op.set_type(InstallOperation::SOURCE_COPY);
+  *aops[0].op.add_src_extents() = ExtentForRange(10, 10);
+  *aops[0].op.add_dst_extents() = ExtentForRange(30, 10);
+
+  aops[1].op.set_type(InstallOperation::SOURCE_COPY);
+  *aops[1].op.add_src_extents() = ExtentForRange(20, 10);
+  *aops[1].op.add_dst_extents() = ExtentForRange(40, 10);
+
+  auto generator = MergeSequenceGenerator::Create(aops);
+  ASSERT_TRUE(generator);
+  std::vector<CowMergeOperation> expected = {
+      CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(30, 10)),
+      CreateCowMergeOperation(ExtentForRange(20, 10), ExtentForRange(40, 10))};
+  VerifyTransfers(generator.get(), expected);
+
+  *aops[1].op.add_src_extents() = ExtentForRange(30, 5);
+  *aops[1].op.add_dst_extents() = ExtentForRange(50, 5);
+  generator = MergeSequenceGenerator::Create(aops);
+  ASSERT_FALSE(generator);
+}
+
+TEST_F(MergeSequenceGeneratorTest, Create_SplitSource) {
+  InstallOperation op;
+  op.set_type(InstallOperation::SOURCE_COPY);
+  *(op.add_src_extents()) = ExtentForRange(2, 3);
+  *(op.add_src_extents()) = ExtentForRange(6, 1);
+  *(op.add_src_extents()) = ExtentForRange(8, 4);
+  *(op.add_dst_extents()) = ExtentForRange(10, 8);
+
+  AnnotatedOperation aop{"file1", op};
+  auto generator = MergeSequenceGenerator::Create({aop});
+  ASSERT_TRUE(generator);
+  std::vector<CowMergeOperation> expected = {
+      CreateCowMergeOperation(ExtentForRange(2, 3), ExtentForRange(10, 3)),
+      CreateCowMergeOperation(ExtentForRange(6, 1), ExtentForRange(13, 1)),
+      CreateCowMergeOperation(ExtentForRange(8, 4), ExtentForRange(14, 4))};
+  VerifyTransfers(generator.get(), expected);
+}
+
+TEST_F(MergeSequenceGeneratorTest, FindDependency) {
+  std::vector<CowMergeOperation> transfers = {
+      CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)),
+      CreateCowMergeOperation(ExtentForRange(40, 10), ExtentForRange(50, 10)),
+  };
+
+  std::map<CowMergeOperation, std::set<CowMergeOperation>> merge_after;
+  FindDependency(transfers, &merge_after);
+  ASSERT_EQ(std::set<CowMergeOperation>(), merge_after.at(transfers[0]));
+  ASSERT_EQ(std::set<CowMergeOperation>(), merge_after.at(transfers[1]));
+
+  transfers = {
+      CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(25, 10)),
+      CreateCowMergeOperation(ExtentForRange(24, 5), ExtentForRange(35, 5)),
+      CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(15, 10)),
+  };
+
+  FindDependency(transfers, &merge_after);
+  ASSERT_EQ(std::set<CowMergeOperation>({transfers[2]}),
+            merge_after.at(transfers[0]));
+  ASSERT_EQ(std::set<CowMergeOperation>({transfers[0], transfers[2]}),
+            merge_after.at(transfers[1]));
+  ASSERT_EQ(std::set<CowMergeOperation>({transfers[0], transfers[1]}),
+            merge_after.at(transfers[2]));
+}
+
+TEST_F(MergeSequenceGeneratorTest, FindDependency_ReusedSourceBlocks) {
+  std::vector<CowMergeOperation> transfers = {
+      CreateCowMergeOperation(ExtentForRange(5, 10), ExtentForRange(15, 10)),
+      CreateCowMergeOperation(ExtentForRange(6, 5), ExtentForRange(30, 5)),
+      CreateCowMergeOperation(ExtentForRange(50, 5), ExtentForRange(5, 5)),
+  };
+
+  std::map<CowMergeOperation, std::set<CowMergeOperation>> merge_after;
+  FindDependency(transfers, &merge_after);
+  ASSERT_EQ(std::set<CowMergeOperation>({transfers[2]}),
+            merge_after.at(transfers[0]));
+  ASSERT_EQ(std::set<CowMergeOperation>({transfers[2]}),
+            merge_after.at(transfers[1]));
+}
+
+TEST_F(MergeSequenceGeneratorTest, ValidateSequence) {
+  std::vector<CowMergeOperation> transfers = {
+      CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)),
+      CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(40, 10)),
+  };
+
+  // Self overlapping
+  ASSERT_TRUE(MergeSequenceGenerator::ValidateSequence(transfers));
+
+  transfers = {
+      CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(20, 10)),
+      CreateCowMergeOperation(ExtentForRange(15, 10), ExtentForRange(10, 10)),
+  };
+  ASSERT_FALSE(MergeSequenceGenerator::ValidateSequence(transfers));
+}
+
+TEST_F(MergeSequenceGeneratorTest, GenerateSequenceNoCycles) {
+  std::vector<CowMergeOperation> transfers = {
+      CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)),
+      // file3 should merge before file2
+      CreateCowMergeOperation(ExtentForRange(40, 5), ExtentForRange(25, 5)),
+      CreateCowMergeOperation(ExtentForRange(25, 10), ExtentForRange(30, 10)),
+  };
+
+  std::vector<CowMergeOperation> expected{
+      transfers[0], transfers[2], transfers[1]};
+  GenerateSequence(transfers, expected);
+}
+
+TEST_F(MergeSequenceGeneratorTest, GenerateSequenceWithCycles) {
+  std::vector<CowMergeOperation> transfers = {
+      CreateCowMergeOperation(ExtentForRange(25, 10), ExtentForRange(30, 10)),
+      CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(40, 10)),
+      CreateCowMergeOperation(ExtentForRange(40, 10), ExtentForRange(25, 10)),
+      CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)),
+  };
+
+  // file 1,2,3 form a cycle. And file3, whose dst ext has smallest offset, will
+  // be converted to raw blocks
+  std::vector<CowMergeOperation> expected{
+      transfers[3], transfers[1], transfers[0]};
+  GenerateSequence(transfers, expected);
+}
+
+TEST_F(MergeSequenceGeneratorTest, GenerateSequenceMultipleCycles) {
+  std::vector<CowMergeOperation> transfers = {
+      // cycle 1
+      CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(25, 10)),
+      CreateCowMergeOperation(ExtentForRange(24, 5), ExtentForRange(35, 5)),
+      CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(15, 10)),
+      // cycle 2
+      CreateCowMergeOperation(ExtentForRange(55, 10), ExtentForRange(60, 10)),
+      CreateCowMergeOperation(ExtentForRange(60, 10), ExtentForRange(70, 10)),
+      CreateCowMergeOperation(ExtentForRange(70, 10), ExtentForRange(55, 10)),
+  };
+
+  // file 3, 6 will be converted to raw.
+  std::vector<CowMergeOperation> expected{
+      transfers[1], transfers[0], transfers[4], transfers[3]};
+  GenerateSequence(transfers, expected);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc
index b55d03c..49dff4e 100644
--- a/payload_generator/payload_file.cc
+++ b/payload_generator/payload_file.cc
@@ -78,17 +78,23 @@
     *(manifest_.mutable_dynamic_partition_metadata()) =
         *(config.target.dynamic_partition_metadata);
 
+  if (config.is_partial_update) {
+    manifest_.set_partial_update(true);
+  }
   return true;
 }
 
 bool PayloadFile::AddPartition(const PartitionConfig& old_conf,
                                const PartitionConfig& new_conf,
-                               const vector<AnnotatedOperation>& aops) {
+                               vector<AnnotatedOperation> aops,
+                               vector<CowMergeOperation> merge_sequence) {
   Partition part;
   part.name = new_conf.name;
-  part.aops = aops;
+  part.aops = std::move(aops);
+  part.cow_merge_sequence = std::move(merge_sequence);
   part.postinstall = new_conf.postinstall;
   part.verity = new_conf.verity;
+  part.version = new_conf.version;
   // Initialize the PartitionInfo objects if present.
   if (!old_conf.path.empty())
     TEST_AND_RETURN_FALSE(
@@ -129,6 +135,9 @@
   for (const auto& part : part_vec_) {
     PartitionUpdate* partition = manifest_.add_partitions();
     partition->set_partition_name(part.name);
+    if (!part.version.empty()) {
+      partition->set_version(part.version);
+    }
     if (part.postinstall.run) {
       partition->set_run_postinstall(true);
       if (!part.postinstall.path.empty())
@@ -156,6 +165,10 @@
     for (const AnnotatedOperation& aop : part.aops) {
       *partition->add_operations() = aop.op;
     }
+    for (const auto& merge_op : part.cow_merge_sequence) {
+      *partition->add_merge_operations() = merge_op;
+    }
+
     if (part.old_info.has_size() || part.old_info.has_hash())
       *(partition->mutable_old_partition_info()) = part.old_info;
     if (part.new_info.has_size() || part.new_info.has_hash())
@@ -167,16 +180,14 @@
   uint64_t signature_blob_length = 0;
   if (!private_key_path.empty()) {
     TEST_AND_RETURN_FALSE(PayloadSigner::SignatureBlobLength(
-        vector<string>(1, private_key_path), &signature_blob_length));
+        {private_key_path}, &signature_blob_length));
     PayloadSigner::AddSignatureToManifest(
-        next_blob_offset,
-        signature_blob_length,
-        &manifest_);
+        next_blob_offset, signature_blob_length, &manifest_);
   }
 
   // Serialize protobuf
   string serialized_manifest;
-  TEST_AND_RETURN_FALSE(manifest_.AppendToString(&serialized_manifest));
+  TEST_AND_RETURN_FALSE(manifest_.SerializeToString(&serialized_manifest));
 
   uint64_t metadata_size =
       sizeof(kDeltaMagic) + 2 * sizeof(uint64_t) + serialized_manifest.size();
@@ -215,13 +226,12 @@
 
   // Write metadata signature blob.
   if (!private_key_path.empty()) {
-    brillo::Blob metadata_hash, metadata_signature;
+    brillo::Blob metadata_hash;
     TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile(
         payload_file, metadata_size, &metadata_hash));
-    TEST_AND_RETURN_FALSE(
-        PayloadSigner::SignHashWithKeys(metadata_hash,
-                                        vector<string>(1, private_key_path),
-                                        &metadata_signature));
+    string metadata_signature;
+    TEST_AND_RETURN_FALSE(PayloadSigner::SignHashWithKeys(
+        metadata_hash, {private_key_path}, &metadata_signature));
     TEST_AND_RETURN_FALSE_ERRNO(
         writer.Write(metadata_signature.data(), metadata_signature.size()));
   }
@@ -245,16 +255,16 @@
   // Write payload signature blob.
   if (!private_key_path.empty()) {
     LOG(INFO) << "Signing the update...";
-    brillo::Blob signature_blob;
+    string signature;
     TEST_AND_RETURN_FALSE(PayloadSigner::SignPayload(
         payload_file,
-        vector<string>(1, private_key_path),
+        {private_key_path},
         metadata_size,
         metadata_signature_size,
         metadata_size + metadata_signature_size + manifest_.signatures_offset(),
-        &signature_blob));
+        &signature));
     TEST_AND_RETURN_FALSE_ERRNO(
-        writer.Write(signature_blob.data(), signature_blob.size()));
+        writer.Write(signature.data(), signature.size()));
   }
 
   ReportPayloadUsage(metadata_size);
@@ -330,15 +340,15 @@
     const DeltaObject& object = object_count.first;
     // Use printf() instead of LOG(INFO) because timestamp makes it difficult to
     // compare two reports.
-    printf(
-        kFormatString,
-        object.size * 100.0 / total_size,
-        object.size,
-        (object.type >= 0 ? InstallOperationTypeName(
-                                static_cast<InstallOperation_Type>(object.type))
-                          : "-"),
-        object.name.c_str(),
-        object_count.second);
+    printf(kFormatString,
+           object.size * 100.0 / total_size,
+           object.size,
+           (object.type >= 0
+                ? InstallOperationTypeName(
+                      static_cast<InstallOperation::Type>(object.type))
+                : "-"),
+           object.name.c_str(),
+           object_count.second);
   }
   printf(kFormatString, 100.0, total_size, "", "<total>", total_op);
   fflush(stdout);
diff --git a/payload_generator/payload_file.h b/payload_generator/payload_file.h
index 9dc80a7..8b17956 100644
--- a/payload_generator/payload_file.h
+++ b/payload_generator/payload_file.h
@@ -43,7 +43,8 @@
   // reference a blob stored in the file provided to WritePayload().
   bool AddPartition(const PartitionConfig& old_conf,
                     const PartitionConfig& new_conf,
-                    const std::vector<AnnotatedOperation>& aops);
+                    std::vector<AnnotatedOperation> aops,
+                    std::vector<CowMergeOperation> merge_sequence);
 
   // Write the payload to the |payload_file| file. The operations reference
   // blobs in the |data_blobs_path| file and the blobs will be reordered in the
@@ -60,9 +61,9 @@
   // Computes a SHA256 hash of the given buf and sets the hash value in the
   // operation so that update_engine could verify. This hash should be set
   // for all operations that have a non-zero data blob. One exception is the
-  // dummy operation for signature blob because the contents of the signature
+  // fake operation for signature blob because the contents of the signature
   // blob will not be available at payload creation time. So, update_engine will
-  // gracefully ignore the dummy signature operation.
+  // gracefully ignore the fake signature operation.
   static bool AddOperationHash(InstallOperation* op, const brillo::Blob& buf);
 
   // Install operations in the manifest may reference data blobs, which
@@ -90,12 +91,15 @@
 
     // The operations to be performed to this partition.
     std::vector<AnnotatedOperation> aops;
+    std::vector<CowMergeOperation> cow_merge_sequence;
 
     PartitionInfo old_info;
     PartitionInfo new_info;
 
     PostInstallConfig postinstall;
     VerityConfig verity;
+    // Per partition timestamp.
+    std::string version;
   };
 
   std::vector<Partition> part_vec_;
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index 7158796..35a95dc 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -150,25 +150,34 @@
   for (const auto& group_name : group_names) {
     DynamicPartitionGroup* group = metadata->add_groups();
     group->set_name(group_name);
-    if (!store.GetString(group_name + "_size", &buf)) {
-      LOG(ERROR) << "Missing " << group_name + "_size.";
+    if (!store.GetString("super_" + group_name + "_group_size", &buf) &&
+        !store.GetString(group_name + "_size", &buf)) {
+      LOG(ERROR) << "Missing super_" << group_name + "_group_size or "
+                 << group_name << "_size.";
       return false;
     }
 
     uint64_t max_size;
     if (!base::StringToUint64(buf, &max_size)) {
-      LOG(ERROR) << group_name << "_size=" << buf << " is not an integer.";
+      LOG(ERROR) << "Group size for " << group_name << " = " << buf
+                 << " is not an integer.";
       return false;
     }
     group->set_size(max_size);
 
-    if (store.GetString(group_name + "_partition_list", &buf)) {
+    if (store.GetString("super_" + group_name + "_partition_list", &buf) ||
+        store.GetString(group_name + "_partition_list", &buf)) {
       auto partition_names = brillo::string_utils::Split(buf, " ");
       for (const auto& partition_name : partition_names) {
         group->add_partition_names()->assign(partition_name);
       }
     }
   }
+
+  bool snapshot_enabled = false;
+  store.GetBoolean("virtual_ab", &snapshot_enabled);
+  metadata->set_snapshot_enabled(snapshot_enabled);
+
   dynamic_partition_metadata = std::move(metadata);
   return true;
 }
@@ -225,11 +234,12 @@
                         minor == kOpSrcHashMinorPayloadVersion ||
                         minor == kBrotliBsdiffMinorPayloadVersion ||
                         minor == kPuffdiffMinorPayloadVersion ||
-                        minor == kVerityMinorPayloadVersion);
+                        minor == kVerityMinorPayloadVersion ||
+                        minor == kPartialUpdateMinorPayloadVersion);
   return true;
 }
 
-bool PayloadVersion::OperationAllowed(InstallOperation_Type operation) const {
+bool PayloadVersion::OperationAllowed(InstallOperation::Type operation) const {
   switch (operation) {
     // Full operations:
     case InstallOperation::REPLACE:
@@ -260,13 +270,14 @@
   return false;
 }
 
-bool PayloadVersion::IsDelta() const {
+bool PayloadVersion::IsDeltaOrPartial() const {
   return minor != kFullPayloadMinorVersion;
 }
 
 bool PayloadGenerationConfig::Validate() const {
   TEST_AND_RETURN_FALSE(version.Validate());
-  TEST_AND_RETURN_FALSE(version.IsDelta() == is_delta);
+  TEST_AND_RETURN_FALSE(version.IsDeltaOrPartial() ==
+                        (is_delta || is_partial_update));
   if (is_delta) {
     for (const PartitionConfig& part : source.partitions) {
       if (!part.path.empty()) {
@@ -294,6 +305,10 @@
       TEST_AND_RETURN_FALSE(part.verity.IsEmpty());
   }
 
+  if (version.minor < kPartialUpdateMinorPayloadVersion) {
+    TEST_AND_RETURN_FALSE(!is_partial_update);
+  }
+
   TEST_AND_RETURN_FALSE(hard_chunk_size == -1 ||
                         hard_chunk_size % block_size == 0);
   TEST_AND_RETURN_FALSE(soft_chunk_size % block_size == 0);
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index 32f1229..ec63043 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -116,6 +116,12 @@
 
   PostInstallConfig postinstall;
   VerityConfig verity;
+
+  // Enables the on device fec data computation by default.
+  bool disable_fec_computation = false;
+
+  // Per-partition version, usually a number representing timestamp.
+  std::string version;
 };
 
 // The ImageConfig struct describes a pair of binaries kernel and rootfs and the
@@ -165,10 +171,10 @@
   bool Validate() const;
 
   // Return whether the passed |operation| is allowed by this payload.
-  bool OperationAllowed(InstallOperation_Type operation) const;
+  bool OperationAllowed(InstallOperation::Type operation) const;
 
-  // Whether this payload version is a delta payload.
-  bool IsDelta() const;
+  // Whether this payload version is a delta or partial payload.
+  bool IsDeltaOrPartial() const;
 
   // The major version of the payload.
   uint64_t major;
@@ -195,6 +201,10 @@
   // Whether the requested payload is a delta payload.
   bool is_delta = false;
 
+  // Whether the requested payload is a partial payload, i.e. only update a
+  // subset of partitions on device.
+  bool is_partial_update = false;
+
   // The major/minor version of the payload.
   PayloadVersion version;
 
diff --git a/payload_generator/payload_generation_config_android.cc b/payload_generator/payload_generation_config_android.cc
index 90c053f..d950092 100644
--- a/payload_generator/payload_generation_config_android.cc
+++ b/payload_generator/payload_generation_config_android.cc
@@ -63,11 +63,13 @@
   part->verity.hash_tree_extent = ExtentForBytes(
       hashtree.hash_block_size, hashtree.tree_offset, hashtree.tree_size);
 
-  part->verity.fec_data_extent =
-      ExtentForBytes(hashtree.data_block_size, 0, hashtree.fec_offset);
-  part->verity.fec_extent = ExtentForBytes(
-      hashtree.data_block_size, hashtree.fec_offset, hashtree.fec_size);
-  part->verity.fec_roots = hashtree.fec_num_roots;
+  if (!part->disable_fec_computation) {
+    part->verity.fec_data_extent =
+        ExtentForBytes(hashtree.data_block_size, 0, hashtree.fec_offset);
+    part->verity.fec_extent = ExtentForBytes(
+        hashtree.data_block_size, hashtree.fec_offset, hashtree.fec_size);
+    part->verity.fec_roots = hashtree.fec_num_roots;
+  }
   return true;
 }
 
@@ -205,7 +207,8 @@
               ExtentForRange(hash_start_block, tree_size / block_size);
         }
         fec_ecc_metadata ecc_data;
-        if (fh.get_ecc_metadata(ecc_data) && ecc_data.valid) {
+        if (!part.disable_fec_computation && fh.get_ecc_metadata(ecc_data) &&
+            ecc_data.valid) {
           TEST_AND_RETURN_FALSE(block_size == FEC_BLOCKSIZE);
           part.verity.fec_data_extent = ExtentForRange(0, ecc_data.blocks);
           part.verity.fec_extent =
diff --git a/payload_generator/payload_generation_config_android_unittest.cc b/payload_generator/payload_generation_config_android_unittest.cc
index 53378c2..44eaf55 100644
--- a/payload_generator/payload_generation_config_android_unittest.cc
+++ b/payload_generator/payload_generation_config_android_unittest.cc
@@ -160,6 +160,24 @@
   EXPECT_EQ(2u, verity.fec_roots);
 }
 
+TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigDisableFecTest) {
+  brillo::Blob part = GetAVBPartition();
+  test_utils::WriteFileVector(temp_file_.path(), part);
+  image_config_.partitions[0].disable_fec_computation = true;
+  EXPECT_TRUE(image_config_.LoadImageSize());
+  EXPECT_TRUE(image_config_.partitions[0].OpenFilesystem());
+  EXPECT_TRUE(image_config_.LoadVerityConfig());
+  const VerityConfig& verity = image_config_.partitions[0].verity;
+  EXPECT_FALSE(verity.IsEmpty());
+  EXPECT_EQ(ExtentForRange(0, 2), verity.hash_tree_data_extent);
+  EXPECT_EQ(ExtentForRange(2, 1), verity.hash_tree_extent);
+  EXPECT_EQ("sha1", verity.hash_tree_algorithm);
+  brillo::Blob salt(kHashTreeSalt, std::end(kHashTreeSalt));
+  EXPECT_EQ(salt, verity.hash_tree_salt);
+  EXPECT_EQ(0u, verity.fec_data_extent.num_blocks());
+  EXPECT_EQ(0u, verity.fec_extent.num_blocks());
+}
+
 TEST_F(PayloadGenerationConfigAndroidTest,
        LoadVerityConfigInvalidHashTreeTest) {
   brillo::Blob part = GetAVBPartition();
diff --git a/payload_generator/payload_generation_config_unittest.cc b/payload_generator/payload_generation_config_unittest.cc
index 70a3df3..aca9655 100644
--- a/payload_generator/payload_generation_config_unittest.cc
+++ b/payload_generator/payload_generation_config_unittest.cc
@@ -59,7 +59,7 @@
   ASSERT_TRUE(
       store.LoadFromString("super_partition_groups=group_a group_b\n"
                            "group_a_size=3221225472\n"
-                           "group_a_partition_list=system product_services\n"
+                           "group_a_partition_list=system system_ext\n"
                            "group_b_size=2147483648\n"
                            "group_b_partition_list=vendor\n"));
   EXPECT_TRUE(image_config.LoadDynamicPartitionMetadata(store));
@@ -72,7 +72,7 @@
   EXPECT_EQ(3221225472u, group_a.size());
   ASSERT_EQ(2, group_a.partition_names_size());
   EXPECT_EQ("system", group_a.partition_names(0));
-  EXPECT_EQ("product_services", group_a.partition_names(1));
+  EXPECT_EQ("system_ext", group_a.partition_names(1));
 
   const auto& group_b = image_config.dynamic_partition_metadata->groups(1);
   EXPECT_EQ("group_b", group_b.name());
@@ -108,17 +108,17 @@
 
   PartitionConfig system("system");
   system.size = 2147483648u;
-  PartitionConfig product_services("product_services");
-  product_services.size = 1073741824u;
+  PartitionConfig system_ext("system_ext");
+  system_ext.size = 1073741824u;
 
   image_config.partitions.push_back(std::move(system));
-  image_config.partitions.push_back(std::move(product_services));
+  image_config.partitions.push_back(std::move(system_ext));
 
   brillo::KeyValueStore store;
   ASSERT_TRUE(
       store.LoadFromString("super_partition_groups=foo\n"
                            "foo_size=3221225472\n"
-                           "foo_partition_list=system product_services\n"));
+                           "foo_partition_list=system system_ext\n"));
   EXPECT_TRUE(image_config.LoadDynamicPartitionMetadata(store));
   EXPECT_NE(nullptr, image_config.dynamic_partition_metadata);
 
diff --git a/payload_generator/payload_properties_unittest.cc b/payload_generator/payload_properties_unittest.cc
index db3902c..e0072fc 100644
--- a/payload_generator/payload_properties_unittest.cc
+++ b/payload_generator/payload_properties_unittest.cc
@@ -98,7 +98,7 @@
     EXPECT_TRUE(strategy->GenerateOperations(
         config, old_part, new_part, &blob_file_writer, &aops));
 
-    payload.AddPartition(old_part, new_part, aops);
+    payload.AddPartition(old_part, new_part, aops, {});
 
     uint64_t metadata_size;
     EXPECT_TRUE(payload.WritePayload(
diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc
index 420329f..9a44f94 100644
--- a/payload_generator/payload_signer.cc
+++ b/payload_generator/payload_signer.cc
@@ -18,6 +18,7 @@
 
 #include <endian.h>
 
+#include <memory>
 #include <utility>
 
 #include <base/logging.h>
@@ -28,6 +29,7 @@
 #include <openssl/err.h>
 #include <openssl/pem.h>
 
+#include "update_engine/common/constants.h"
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/utils.h"
@@ -45,45 +47,49 @@
 namespace chromeos_update_engine {
 
 namespace {
-
-// The payload verifier will check all the signatures included in the payload
-// regardless of the version field. Old version of the verifier require the
-// version field to be included and be 1.
-const uint32_t kSignatureMessageLegacyVersion = 1;
-
 // Given raw |signatures|, packs them into a protobuf and serializes it into a
-// binary blob. Returns true on success, false otherwise.
-bool ConvertSignatureToProtobufBlob(const vector<brillo::Blob>& signatures,
-                                    brillo::Blob* out_signature_blob) {
+// string. Returns true on success, false otherwise.
+bool ConvertSignaturesToProtobuf(const vector<brillo::Blob>& signatures,
+                                 const vector<size_t>& padded_signature_sizes,
+                                 string* out_serialized_signature) {
+  TEST_AND_RETURN_FALSE(signatures.size() == padded_signature_sizes.size());
   // Pack it into a protobuf
   Signatures out_message;
-  for (const brillo::Blob& signature : signatures) {
-    Signatures_Signature* sig_message = out_message.add_signatures();
-    // Set all the signatures with the same version number.
-    sig_message->set_version(kSignatureMessageLegacyVersion);
-    sig_message->set_data(signature.data(), signature.size());
+  for (size_t i = 0; i < signatures.size(); i++) {
+    const auto& signature = signatures[i];
+    const auto& padded_signature_size = padded_signature_sizes[i];
+    TEST_AND_RETURN_FALSE(padded_signature_size >= signature.size());
+    Signatures::Signature* sig_message = out_message.add_signatures();
+    // Skip assigning the same version number because we don't need to be
+    // compatible with old major version 1 client anymore.
+
+    // TODO(Xunchang) don't need to set the unpadded_signature_size field for
+    // RSA key signed signatures.
+    sig_message->set_unpadded_signature_size(signature.size());
+    brillo::Blob padded_signature = signature;
+    padded_signature.insert(
+        padded_signature.end(), padded_signature_size - signature.size(), 0);
+    sig_message->set_data(padded_signature.data(), padded_signature.size());
   }
 
   // Serialize protobuf
-  string serialized;
-  TEST_AND_RETURN_FALSE(out_message.AppendToString(&serialized));
-  out_signature_blob->insert(
-      out_signature_blob->end(), serialized.begin(), serialized.end());
-  LOG(INFO) << "Signature blob size: " << out_signature_blob->size();
+  TEST_AND_RETURN_FALSE(
+      out_message.SerializeToString(out_serialized_signature));
+  LOG(INFO) << "Signature blob size: " << out_serialized_signature->size();
   return true;
 }
 
-// Given an unsigned payload under |payload_path| and the |signature_blob| and
-// |metadata_signature_blob| generates an updated payload that includes the
+// Given an unsigned payload under |payload_path| and the |payload_signature|
+// and |metadata_signature| generates an updated payload that includes the
 // signatures. It populates |out_metadata_size| with the size of the final
-// manifest after adding the dummy signature operation, and
+// manifest after adding the fake signature operation, and
 // |out_signatures_offset| with the expected offset for the new blob, and
-// |out_metadata_signature_size| which will be size of |metadata_signature_blob|
+// |out_metadata_signature_size| which will be size of |metadata_signature|
 // if the payload major version supports metadata signature, 0 otherwise.
 // Returns true on success, false otherwise.
 bool AddSignatureBlobToPayload(const string& payload_path,
-                               const brillo::Blob& signature_blob,
-                               const brillo::Blob& metadata_signature_blob,
+                               const string& payload_signature,
+                               const string& metadata_signature,
                                brillo::Blob* out_payload,
                                uint64_t* out_metadata_size,
                                uint32_t* out_metadata_signature_size,
@@ -99,7 +105,7 @@
   uint32_t metadata_signature_size =
       payload_metadata.GetMetadataSignatureSize();
   // Write metadata signature size in header.
-  uint32_t metadata_signature_size_be = htobe32(metadata_signature_blob.size());
+  uint32_t metadata_signature_size_be = htobe32(metadata_signature.size());
   memcpy(payload.data() + manifest_offset,
          &metadata_signature_size_be,
          sizeof(metadata_signature_size_be));
@@ -108,9 +114,9 @@
   payload.erase(payload.begin() + metadata_size,
                 payload.begin() + metadata_size + metadata_signature_size);
   payload.insert(payload.begin() + metadata_size,
-                 metadata_signature_blob.begin(),
-                 metadata_signature_blob.end());
-  metadata_signature_size = metadata_signature_blob.size();
+                 metadata_signature.begin(),
+                 metadata_signature.end());
+  metadata_signature_size = metadata_signature.size();
   LOG(INFO) << "Metadata signature size: " << metadata_signature_size;
 
   DeltaArchiveManifest manifest;
@@ -122,10 +128,10 @@
     // contents. We don't allow the manifest to change if there is already an op
     // present, because that might invalidate previously generated
     // hashes/signatures.
-    if (manifest.signatures_size() != signature_blob.size()) {
+    if (manifest.signatures_size() != payload_signature.size()) {
       LOG(ERROR) << "Attempt to insert different signature sized blob. "
                  << "(current:" << manifest.signatures_size()
-                 << "new:" << signature_blob.size() << ")";
+                 << "new:" << payload_signature.size() << ")";
       return false;
     }
 
@@ -134,7 +140,7 @@
     // Updates the manifest to include the signature operation.
     PayloadSigner::AddSignatureToManifest(
         payload.size() - metadata_size - metadata_signature_size,
-        signature_blob.size(),
+        payload_signature.size(),
         &manifest);
 
     // Updates the payload to include the new manifest.
@@ -160,8 +166,8 @@
   LOG(INFO) << "Signature Blob Offset: " << signatures_offset;
   payload.resize(signatures_offset);
   payload.insert(payload.begin() + signatures_offset,
-                 signature_blob.begin(),
-                 signature_blob.end());
+                 payload_signature.begin(),
+                 payload_signature.end());
 
   *out_payload = std::move(payload);
   *out_metadata_size = metadata_size;
@@ -201,8 +207,35 @@
   return true;
 }
 
+std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)> CreatePrivateKeyFromPath(
+    const string& private_key_path) {
+  FILE* fprikey = fopen(private_key_path.c_str(), "rb");
+  if (!fprikey) {
+    PLOG(ERROR) << "Failed to read " << private_key_path;
+    return {nullptr, nullptr};
+  }
+
+  auto private_key = std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>(
+      PEM_read_PrivateKey(fprikey, nullptr, nullptr, nullptr), EVP_PKEY_free);
+  fclose(fprikey);
+  return private_key;
+}
+
 }  // namespace
 
+bool PayloadSigner::GetMaximumSignatureSize(const string& private_key_path,
+                                            size_t* signature_size) {
+  *signature_size = 0;
+  auto private_key = CreatePrivateKeyFromPath(private_key_path);
+  if (!private_key) {
+    LOG(ERROR) << "Failed to create private key from " << private_key_path;
+    return false;
+  }
+
+  *signature_size = EVP_PKEY_size(private_key.get());
+  return true;
+}
+
 void PayloadSigner::AddSignatureToManifest(uint64_t signature_blob_offset,
                                            uint64_t signature_blob_length,
                                            DeltaArchiveManifest* manifest) {
@@ -236,21 +269,22 @@
                                                  signatures_offset,
                                                  &payload_hash,
                                                  &metadata_hash));
-  brillo::Blob signature_blob(payload.begin() + signatures_offset,
-                              payload.end());
+  string signature(payload.begin() + signatures_offset, payload.end());
   string public_key;
   TEST_AND_RETURN_FALSE(utils::ReadFile(public_key_path, &public_key));
-  TEST_AND_RETURN_FALSE(PayloadVerifier::PadRSA2048SHA256Hash(&payload_hash));
-  TEST_AND_RETURN_FALSE(PayloadVerifier::VerifySignature(
-      signature_blob, public_key, payload_hash));
+  TEST_AND_RETURN_FALSE(payload_hash.size() == kSHA256Size);
+
+  auto payload_verifier = PayloadVerifier::CreateInstance(public_key);
+  TEST_AND_RETURN_FALSE(payload_verifier != nullptr);
+
+  TEST_AND_RETURN_FALSE(
+      payload_verifier->VerifySignature(signature, payload_hash));
   if (metadata_signature_size) {
-    signature_blob.assign(
-        payload.begin() + metadata_size,
-        payload.begin() + metadata_size + metadata_signature_size);
+    signature.assign(payload.begin() + metadata_size,
+                     payload.begin() + metadata_size + metadata_signature_size);
+    TEST_AND_RETURN_FALSE(metadata_hash.size() == kSHA256Size);
     TEST_AND_RETURN_FALSE(
-        PayloadVerifier::PadRSA2048SHA256Hash(&metadata_hash));
-    TEST_AND_RETURN_FALSE(PayloadVerifier::VerifySignature(
-        signature_blob, public_key, metadata_hash));
+        payload_verifier->VerifySignature(signature, metadata_hash));
   }
   return true;
 }
@@ -260,49 +294,97 @@
                              brillo::Blob* out_signature) {
   LOG(INFO) << "Signing hash with private key: " << private_key_path;
   // We expect unpadded SHA256 hash coming in
-  TEST_AND_RETURN_FALSE(hash.size() == 32);
-  brillo::Blob padded_hash(hash);
-  PayloadVerifier::PadRSA2048SHA256Hash(&padded_hash);
-
+  TEST_AND_RETURN_FALSE(hash.size() == kSHA256Size);
   // The code below executes the equivalent of:
   //
   // openssl rsautl -raw -sign -inkey |private_key_path|
   //   -in |padded_hash| -out |out_signature|
 
-  FILE* fprikey = fopen(private_key_path.c_str(), "rb");
-  TEST_AND_RETURN_FALSE(fprikey != nullptr);
-  RSA* rsa = PEM_read_RSAPrivateKey(fprikey, nullptr, nullptr, nullptr);
-  fclose(fprikey);
-  TEST_AND_RETURN_FALSE(rsa != nullptr);
-  brillo::Blob signature(RSA_size(rsa));
-  ssize_t signature_size = RSA_private_encrypt(padded_hash.size(),
-                                               padded_hash.data(),
-                                               signature.data(),
-                                               rsa,
-                                               RSA_NO_PADDING);
-  RSA_free(rsa);
-  if (signature_size < 0) {
-    LOG(ERROR) << "Signing hash failed: "
-               << ERR_error_string(ERR_get_error(), nullptr);
+  auto private_key = CreatePrivateKeyFromPath(private_key_path);
+  if (!private_key) {
+    LOG(ERROR) << "Failed to create private key from " << private_key_path;
     return false;
   }
-  TEST_AND_RETURN_FALSE(static_cast<size_t>(signature_size) ==
-                        signature.size());
+
+  int key_type = EVP_PKEY_id(private_key.get());
+  brillo::Blob signature;
+  if (key_type == EVP_PKEY_RSA) {
+    // TODO(b/158580694): Switch back to get0 version and remove manual freeing
+    // of the object once the bug is resolved or gale has been moved to
+    // informational.
+    RSA* rsa = EVP_PKEY_get1_RSA(private_key.get());
+    TEST_AND_RETURN_FALSE(rsa != nullptr);
+
+    brillo::Blob padded_hash = hash;
+    PayloadVerifier::PadRSASHA256Hash(&padded_hash, RSA_size(rsa));
+
+    signature.resize(RSA_size(rsa));
+    ssize_t signature_size = RSA_private_encrypt(padded_hash.size(),
+                                                 padded_hash.data(),
+                                                 signature.data(),
+                                                 rsa,
+                                                 RSA_NO_PADDING);
+    if (signature_size < 0) {
+      LOG(ERROR) << "Signing hash failed: "
+                 << ERR_error_string(ERR_get_error(), nullptr);
+      RSA_free(rsa);
+      return false;
+    }
+    RSA_free(rsa);
+    TEST_AND_RETURN_FALSE(static_cast<size_t>(signature_size) ==
+                          signature.size());
+  } else if (key_type == EVP_PKEY_EC) {
+    // TODO(b/158580694): Switch back to get0 version and remove manual freeing
+    // of the object once the bug is resolved or gale has been moved to
+    // informational.
+    EC_KEY* ec_key = EVP_PKEY_get1_EC_KEY(private_key.get());
+    TEST_AND_RETURN_FALSE(ec_key != nullptr);
+
+    signature.resize(ECDSA_size(ec_key));
+    unsigned int signature_size;
+    if (ECDSA_sign(0,
+                   hash.data(),
+                   hash.size(),
+                   signature.data(),
+                   &signature_size,
+                   ec_key) != 1) {
+      LOG(ERROR) << "Signing hash failed: "
+                 << ERR_error_string(ERR_get_error(), nullptr);
+      EC_KEY_free(ec_key);
+      return false;
+    }
+    EC_KEY_free(ec_key);
+
+    // NIST P-256
+    LOG(ERROR) << "signature max size " << signature.size() << " size "
+               << signature_size;
+    TEST_AND_RETURN_FALSE(signature.size() >= signature_size);
+    signature.resize(signature_size);
+  } else {
+    LOG(ERROR) << "key_type " << key_type << " isn't supported for signing";
+    return false;
+  }
   out_signature->swap(signature);
   return true;
 }
 
 bool PayloadSigner::SignHashWithKeys(const brillo::Blob& hash_data,
                                      const vector<string>& private_key_paths,
-                                     brillo::Blob* out_signature_blob) {
+                                     string* out_serialized_signature) {
   vector<brillo::Blob> signatures;
+  vector<size_t> padded_signature_sizes;
   for (const string& path : private_key_paths) {
     brillo::Blob signature;
     TEST_AND_RETURN_FALSE(SignHash(hash_data, path, &signature));
     signatures.push_back(signature);
+
+    size_t padded_signature_size;
+    TEST_AND_RETURN_FALSE(
+        GetMaximumSignatureSize(path, &padded_signature_size));
+    padded_signature_sizes.push_back(padded_signature_size);
   }
-  TEST_AND_RETURN_FALSE(
-      ConvertSignatureToProtobufBlob(signatures, out_signature_blob));
+  TEST_AND_RETURN_FALSE(ConvertSignaturesToProtobuf(
+      signatures, padded_signature_sizes, out_serialized_signature));
   return true;
 }
 
@@ -311,7 +393,7 @@
                                 const uint64_t metadata_size,
                                 const uint32_t metadata_signature_size,
                                 const uint64_t signatures_offset,
-                                brillo::Blob* out_signature_blob) {
+                                string* out_serialized_signature) {
   brillo::Blob payload;
   TEST_AND_RETURN_FALSE(utils::ReadFile(unsigned_payload_path, &payload));
   brillo::Blob hash_data;
@@ -322,16 +404,16 @@
                                                  &hash_data,
                                                  nullptr));
   TEST_AND_RETURN_FALSE(
-      SignHashWithKeys(hash_data, private_key_paths, out_signature_blob));
+      SignHashWithKeys(hash_data, private_key_paths, out_serialized_signature));
   return true;
 }
 
 bool PayloadSigner::SignatureBlobLength(const vector<string>& private_key_paths,
                                         uint64_t* out_length) {
   DCHECK(out_length);
-  brillo::Blob x_blob(1, 'x'), hash_blob, sig_blob;
-  TEST_AND_RETURN_FALSE(
-      HashCalculator::RawHashOfBytes(x_blob.data(), x_blob.size(), &hash_blob));
+  brillo::Blob hash_blob;
+  TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfData({'x'}, &hash_blob));
+  string sig_blob;
   TEST_AND_RETURN_FALSE(
       SignHashWithKeys(hash_blob, private_key_paths, &sig_blob));
   *out_length = sig_blob.size();
@@ -339,7 +421,7 @@
 }
 
 bool PayloadSigner::HashPayloadForSigning(const string& payload_path,
-                                          const vector<int>& signature_sizes,
+                                          const vector<size_t>& signature_sizes,
                                           brillo::Blob* out_payload_hash_data,
                                           brillo::Blob* out_metadata_hash) {
   // Create a signature blob with signatures filled with 0.
@@ -348,17 +430,17 @@
   for (int signature_size : signature_sizes) {
     signatures.emplace_back(signature_size, 0);
   }
-  brillo::Blob signature_blob;
+  string signature;
   TEST_AND_RETURN_FALSE(
-      ConvertSignatureToProtobufBlob(signatures, &signature_blob));
+      ConvertSignaturesToProtobuf(signatures, signature_sizes, &signature));
 
   brillo::Blob payload;
   uint64_t metadata_size, signatures_offset;
   uint32_t metadata_signature_size;
   // Prepare payload for hashing.
   TEST_AND_RETURN_FALSE(AddSignatureBlobToPayload(payload_path,
-                                                  signature_blob,
-                                                  signature_blob,
+                                                  signature,
+                                                  signature,
                                                   &payload,
                                                   &metadata_size,
                                                   &metadata_signature_size,
@@ -374,6 +456,7 @@
 
 bool PayloadSigner::AddSignatureToPayload(
     const string& payload_path,
+    const vector<size_t>& padded_signature_sizes,
     const vector<brillo::Blob>& payload_signatures,
     const vector<brillo::Blob>& metadata_signatures,
     const string& signed_payload_path,
@@ -381,19 +464,19 @@
   // TODO(petkov): Reduce memory usage -- the payload is manipulated in memory.
 
   // Loads the payload and adds the signature op to it.
-  brillo::Blob signature_blob, metadata_signature_blob;
-  TEST_AND_RETURN_FALSE(
-      ConvertSignatureToProtobufBlob(payload_signatures, &signature_blob));
+  string payload_signature, metadata_signature;
+  TEST_AND_RETURN_FALSE(ConvertSignaturesToProtobuf(
+      payload_signatures, padded_signature_sizes, &payload_signature));
   if (!metadata_signatures.empty()) {
-    TEST_AND_RETURN_FALSE(ConvertSignatureToProtobufBlob(
-        metadata_signatures, &metadata_signature_blob));
+    TEST_AND_RETURN_FALSE(ConvertSignaturesToProtobuf(
+        metadata_signatures, padded_signature_sizes, &metadata_signature));
   }
   brillo::Blob payload;
   uint64_t signatures_offset;
   uint32_t metadata_signature_size;
   TEST_AND_RETURN_FALSE(AddSignatureBlobToPayload(payload_path,
-                                                  signature_blob,
-                                                  metadata_signature_blob,
+                                                  payload_signature,
+                                                  metadata_signature,
                                                   &payload,
                                                   out_metadata_size,
                                                   &metadata_signature_size,
diff --git a/payload_generator/payload_signer.h b/payload_generator/payload_signer.h
index 71f4983..9676b71 100644
--- a/payload_generator/payload_signer.h
+++ b/payload_generator/payload_signer.h
@@ -51,27 +51,27 @@
                        brillo::Blob* out_signature);
 
   // Sign |hash_data| blob with all private keys in |private_key_paths|, then
-  // convert the signatures to protobuf blob.
+  // convert the signatures to serialized protobuf.
   static bool SignHashWithKeys(
       const brillo::Blob& hash_data,
       const std::vector<std::string>& private_key_paths,
-      brillo::Blob* out_signature_blob);
+      std::string* out_serialized_signature);
 
   // Given an unsigned payload in |unsigned_payload_path|, private keys in
   // |private_key_path|, metadata size in |metadata_size|, metadata signature
   // size in |metadata_signature_size| and signatures offset in
   // |signatures_offset|, calculates the payload signature blob into
-  // |out_signature_blob|. Note that the payload must already have an
-  // updated manifest that includes the dummy signature op and correct metadata
+  // |out_serialized_signature|. Note that the payload must already have an
+  // updated manifest that includes the fake signature op and correct metadata
   // signature size in header. Returns true on success, false otherwise.
   static bool SignPayload(const std::string& unsigned_payload_path,
                           const std::vector<std::string>& private_key_paths,
                           const uint64_t metadata_size,
                           const uint32_t metadata_signature_size,
                           const uint64_t signatures_offset,
-                          brillo::Blob* out_signature_blob);
+                          std::string* out_serialized_signature);
 
-  // Returns the length of out_signature_blob that will result in a call
+  // Returns the length of out_serialized_signature that will result in a call
   // to SignPayload with the given private keys. Returns true on success.
   static bool SignatureBlobLength(
       const std::vector<std::string>& private_key_paths, uint64_t* out_length);
@@ -88,11 +88,11 @@
   //
   // The changes to payload are not preserved or written to disk.
   static bool HashPayloadForSigning(const std::string& payload_path,
-                                    const std::vector<int>& signature_sizes,
+                                    const std::vector<size_t>& signature_sizes,
                                     brillo::Blob* out_payload_hash_data,
                                     brillo::Blob* out_metadata_hash);
 
-  // Given an unsigned payload in |payload_path| (with no dummy signature op)
+  // Given an unsigned payload in |payload_path| (with no fake signature op)
   // and the raw |payload_signatures| and |metadata_signatures| updates the
   // payload to include the signature thus turning it into a signed payload. The
   // new payload is stored in |signed_payload_path|. |payload_path| and
@@ -102,6 +102,7 @@
   // otherwise.
   static bool AddSignatureToPayload(
       const std::string& payload_path,
+      const std::vector<size_t>& padded_signature_sizes,
       const std::vector<brillo::Blob>& payload_signatures,
       const std::vector<brillo::Blob>& metadata_signatures,
       const std::string& signed_payload_path,
@@ -116,6 +117,16 @@
                                    const std::string& private_key_path,
                                    std::string* out_signature);
 
+  static bool ExtractPayloadProperties(const std::string& payload_path,
+                                       brillo::KeyValueStore* properties);
+
+  // This function calculates the maximum size, in bytes, of a signature signed
+  // by private_key_path. For an RSA key, this returns the number of bytes
+  // needed to represent the modulus. For an EC key, this returns the maximum
+  // size of a DER-encoded ECDSA signature.
+  static bool GetMaximumSignatureSize(const std::string& private_key_path,
+                                      size_t* signature_size);
+
  private:
   // This should never be constructed
   DISALLOW_IMPLICIT_CONSTRUCTORS(PayloadSigner);
diff --git a/payload_generator/payload_signer_unittest.cc b/payload_generator/payload_signer_unittest.cc
index f7f9c69..fe62997 100644
--- a/payload_generator/payload_signer_unittest.cc
+++ b/payload_generator/payload_signer_unittest.cc
@@ -45,6 +45,10 @@
 const char* kUnittestPublicKeyPath = "unittest_key.pub.pem";
 const char* kUnittestPrivateKey2Path = "unittest_key2.pem";
 const char* kUnittestPublicKey2Path = "unittest_key2.pub.pem";
+const char* kUnittestPrivateKeyRSA4096Path = "unittest_key_RSA4096.pem";
+const char* kUnittestPublicKeyRSA4096Path = "unittest_key_RSA4096.pub.pem";
+const char* kUnittestPrivateKeyECPath = "unittest_key_EC.pem";
+const char* kUnittestPublicKeyECPath = "unittest_key_EC.pub.pem";
 
 // Some data and its corresponding hash and signature:
 const char kDataToSign[] = "This is some data to sign.";
@@ -87,44 +91,34 @@
     0x43, 0xb9, 0xab, 0x7d};
 
 namespace {
-void SignSampleData(brillo::Blob* out_signature_blob,
-                    const vector<string>& private_keys) {
-  brillo::Blob data_blob(std::begin(kDataToSign),
-                         std::begin(kDataToSign) + strlen(kDataToSign));
+void SignSampleData(string* out_signature, const vector<string>& private_keys) {
   uint64_t length = 0;
   EXPECT_TRUE(PayloadSigner::SignatureBlobLength(private_keys, &length));
   EXPECT_GT(length, 0U);
   brillo::Blob hash_blob;
   EXPECT_TRUE(HashCalculator::RawHashOfBytes(
-      data_blob.data(), data_blob.size(), &hash_blob));
-  EXPECT_TRUE(PayloadSigner::SignHashWithKeys(
-      hash_blob, private_keys, out_signature_blob));
-  EXPECT_EQ(length, out_signature_blob->size());
+      kDataToSign, strlen(kDataToSign), &hash_blob));
+  EXPECT_TRUE(
+      PayloadSigner::SignHashWithKeys(hash_blob, private_keys, out_signature));
+  EXPECT_EQ(length, out_signature->size());
 }
 }  // namespace
 
 class PayloadSignerTest : public ::testing::Test {
  protected:
-  void SetUp() override {
-    PayloadVerifier::PadRSA2048SHA256Hash(&padded_hash_data_);
-  }
-
-  brillo::Blob padded_hash_data_{std::begin(kDataHash), std::end(kDataHash)};
+  brillo::Blob hash_data_{std::begin(kDataHash), std::end(kDataHash)};
 };
 
 TEST_F(PayloadSignerTest, SignSimpleTextTest) {
-  brillo::Blob signature_blob;
-  SignSampleData(&signature_blob,
-                 {GetBuildArtifactsPath(kUnittestPrivateKeyPath)});
+  string signature;
+  SignSampleData(&signature, {GetBuildArtifactsPath(kUnittestPrivateKeyPath)});
 
   // Check the signature itself
   Signatures signatures;
-  EXPECT_TRUE(
-      signatures.ParseFromArray(signature_blob.data(), signature_blob.size()));
+  EXPECT_TRUE(signatures.ParseFromString(signature));
   EXPECT_EQ(1, signatures.signatures_size());
-  const Signatures_Signature& signature = signatures.signatures(0);
-  EXPECT_EQ(1U, signature.version());
-  const string& sig_data = signature.data();
+  const Signatures::Signature& sig = signatures.signatures(0);
+  const string& sig_data = sig.data();
   ASSERT_EQ(base::size(kDataSignature), sig_data.size());
   for (size_t i = 0; i < base::size(kDataSignature); i++) {
     EXPECT_EQ(kDataSignature[i], static_cast<uint8_t>(sig_data[i]));
@@ -132,38 +126,44 @@
 }
 
 TEST_F(PayloadSignerTest, VerifyAllSignatureTest) {
-  brillo::Blob signature_blob;
-  SignSampleData(&signature_blob,
+  string signature;
+  SignSampleData(&signature,
                  {GetBuildArtifactsPath(kUnittestPrivateKeyPath),
-                  GetBuildArtifactsPath(kUnittestPrivateKey2Path)});
+                  GetBuildArtifactsPath(kUnittestPrivateKey2Path),
+                  GetBuildArtifactsPath(kUnittestPrivateKeyRSA4096Path),
+                  GetBuildArtifactsPath(kUnittestPrivateKeyECPath)});
 
   // Either public key should pass the verification.
-  string public_key;
-  EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(kUnittestPublicKeyPath),
-                              &public_key));
-  EXPECT_TRUE(PayloadVerifier::VerifySignature(
-      signature_blob, public_key, padded_hash_data_));
-  EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(kUnittestPublicKey2Path),
-                              &public_key));
-  EXPECT_TRUE(PayloadVerifier::VerifySignature(
-      signature_blob, public_key, padded_hash_data_));
+  for (const auto& path : {kUnittestPublicKeyPath,
+                           kUnittestPublicKey2Path,
+                           kUnittestPublicKeyRSA4096Path,
+                           kUnittestPublicKeyECPath}) {
+    string public_key;
+    EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(path), &public_key));
+    auto payload_verifier = PayloadVerifier::CreateInstance(public_key);
+    EXPECT_TRUE(payload_verifier != nullptr);
+    EXPECT_TRUE(payload_verifier->VerifySignature(signature, hash_data_));
+  }
 }
 
 TEST_F(PayloadSignerTest, VerifySignatureTest) {
-  brillo::Blob signature_blob;
-  SignSampleData(&signature_blob,
-                 {GetBuildArtifactsPath(kUnittestPrivateKeyPath)});
+  string signature;
+  SignSampleData(&signature, {GetBuildArtifactsPath(kUnittestPrivateKeyPath)});
 
   string public_key;
   EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(kUnittestPublicKeyPath),
                               &public_key));
-  EXPECT_TRUE(PayloadVerifier::VerifySignature(
-      signature_blob, public_key, padded_hash_data_));
+  auto payload_verifier = PayloadVerifier::CreateInstance(public_key);
+  EXPECT_TRUE(payload_verifier != nullptr);
+  EXPECT_TRUE(payload_verifier->VerifySignature(signature, hash_data_));
+
   // Passing the invalid key should fail the verification.
+  public_key.clear();
   EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(kUnittestPublicKey2Path),
                               &public_key));
-  EXPECT_TRUE(PayloadVerifier::VerifySignature(
-      signature_blob, public_key, padded_hash_data_));
+  payload_verifier = PayloadVerifier::CreateInstance(public_key);
+  EXPECT_TRUE(payload_verifier != nullptr);
+  EXPECT_FALSE(payload_verifier->VerifySignature(signature, hash_data_));
 }
 
 TEST_F(PayloadSignerTest, SkipMetadataSignatureTest) {
@@ -175,7 +175,7 @@
   uint64_t metadata_size;
   EXPECT_TRUE(payload.WritePayload(
       payload_file.path(), "/dev/null", "", &metadata_size));
-  const vector<int> sizes = {256};
+  const vector<size_t> sizes = {256};
   brillo::Blob unsigned_payload_hash, unsigned_metadata_hash;
   EXPECT_TRUE(PayloadSigner::HashPayloadForSigning(payload_file.path(),
                                                    sizes,
diff --git a/payload_generator/squashfs_filesystem.cc b/payload_generator/squashfs_filesystem.cc
index eb4fda3..6152d7d 100644
--- a/payload_generator/squashfs_filesystem.cc
+++ b/payload_generator/squashfs_filesystem.cc
@@ -275,7 +275,7 @@
     auto last = std::unique(zlib_blks.begin(), zlib_blks.end());
     zlib_blks.erase(last, zlib_blks.end());
 
-    // Sanity check. Make sure zlib blocks are not overlapping.
+    // Make sure zlib blocks are not overlapping.
     auto result = std::adjacent_find(
         zlib_blks.begin(),
         zlib_blks.end(),
diff --git a/payload_state.cc b/payload_state.cc
index b6c054b..1d1583b 100644
--- a/payload_state.cc
+++ b/payload_state.cc
@@ -373,6 +373,8 @@
     case ErrorCode::kInternalLibCurlError:
     case ErrorCode::kUnresolvedHostError:
     case ErrorCode::kUnresolvedHostRecovered:
+    case ErrorCode::kNotEnoughSpace:
+    case ErrorCode::kDeviceCorrupted:
     case ErrorCode::kPackageExcludedFromUpdate:
       LOG(INFO) << "Not incrementing URL index or failure count for this error";
       break;
@@ -1062,7 +1064,7 @@
     stored_time = Time::FromInternalValue(stored_value);
   }
 
-  // Sanity check: If the time read from disk is in the future
+  // Validation check: If the time read from disk is in the future
   // (modulo some slack to account for possible NTP drift
   // adjustments), something is fishy and we should report and
   // reset.
@@ -1109,7 +1111,7 @@
     stored_delta = TimeDelta::FromInternalValue(stored_value);
   }
 
-  // Sanity-check: Uptime can never be greater than the wall-clock
+  // Validation check: Uptime can never be greater than the wall-clock
   // difference (modulo some slack). If it is, report and reset
   // to the wall-clock difference.
   TimeDelta diff = GetUpdateDuration() - stored_delta;
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index c88709c..3bc87bd 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -28,12 +28,16 @@
 #  check       verify a payload using paycheck (static testing)
 #
 #  Generate command arguments:
-#  --payload             generated unsigned payload output file
-#  --source_image        if defined, generate a delta payload from the specified
-#                        image to the target_image
-#  --target_image        the target image that should be sent to clients
-#  --metadata_size_file  if defined, generate a file containing the size of the
-#                        payload metadata in bytes to the specified file
+#  --payload                  generated unsigned payload output file
+#  --source_image             if defined, generate a delta payload from the
+#                             specified image to the target_image
+#  --target_image             the target image that should be sent to clients
+#  --metadata_size_file       if defined, generate a file containing the size
+#                             of the ayload metadata in bytes to the specified
+#                             file
+#  --disable_fec_computation  Disable the on device fec data computation for
+#                             incremental update. This feature is enabled by
+#                             default
 #
 #  Hash command arguments:
 #  --unsigned_payload    the input unsigned payload to generate the hash from
@@ -182,6 +186,16 @@
     "Optional: The maximum unix timestamp of the OS allowed to apply this \
 payload, should be set to a number higher than the build timestamp of the \
 system running on the device, 0 if not specified."
+  DEFINE_string partition_timestamps "" \
+    "Optional: Per-partition maximum unix timestamp of the OS allowed to \
+apply this payload. Should be a comma separated key value pairs. e.x.\
+system:1234,vendor:456"
+  DEFINE_string disable_fec_computation "" \
+    "Optional: Disables the on device fec data computation for incremental \
+update. This feature is enabled by default."
+  DEFINE_string is_partial_update "" \
+    "Optional: True if the payload is for partial update. i.e. it only updates \
+a subset of partitions on device."
 fi
 if [[ "${COMMAND}" == "hash" || "${COMMAND}" == "sign" ]]; then
   DEFINE_string unsigned_payload "" "Path to the input unsigned payload."
@@ -248,6 +262,9 @@
 # List of partition names in order.
 declare -a PARTITIONS_ORDER
 
+# A list of PIDs of the extract_image workers.
+EXTRACT_IMAGE_PIDS=()
+
 # A list of temporary files to remove during cleanup.
 CLEANUP_FILES=()
 
@@ -271,7 +288,7 @@
   local option_key="$2"
   local default_value="${3:-}"
   local value
-  if value=$(look "${option_key}=" "${file_txt}" | tail -n 1); then
+  if value=$(grep "^${option_key}=" "${file_txt}" | tail -n 1); then
     if value=$(echo "${value}" | cut -f 2- -d "=" | grep -E "^[0-9]+$"); then
       echo "${value}"
       return
@@ -324,6 +341,25 @@
 trap cleanup_on_error INT TERM ERR
 trap cleanup_on_exit EXIT
 
+# extract_file <zip_file> <entry_name> <destination>
+#
+# Extracts |entry_name| from |zip_file| to |destination|.
+extract_file() {
+  local zip_file="$1"
+  local entry_name="$2"
+  local destination="$3"
+
+  # unzip -p won't report error upon ENOSPC. Therefore, create a temp directory
+  # as the destination of the unzip, and move the file to the intended
+  # destination.
+  local output_directory=$(
+    mktemp --directory --tmpdir="${FLAGS_work_dir}" "TEMP.XXXXXX")
+  unzip "${zip_file}" "${entry_name}" -d "${output_directory}" ||
+    { rm -rf "${output_directory}"; die "Failed to extract ${entry_name}"; }
+
+  mv "${output_directory}/${entry_name}" "${destination}"
+  rm -rf "${output_directory}"
+}
 
 # extract_image <image> <partitions_array> [partitions_order]
 #
@@ -414,7 +450,7 @@
     fi
   done
   [[ -n "${path_in_zip}" ]] || die "Failed to find ${part}.img"
-  unzip -p "${image}" "${path_in_zip}/${part}.img" >"${part_file}"
+  extract_file "${image}" "${path_in_zip}/${part}.img" "${part_file}"
 
   # If the partition is stored as an Android sparse image file, we need to
   # convert them to a raw image for the update.
@@ -428,8 +464,9 @@
   fi
 
   # Extract the .map file (if one is available).
-  unzip -p "${image}" "${path_in_zip}/${part}.map" >"${part_map_file}" \
-    2>/dev/null || true
+  if unzip -l "${image}" "${path_in_zip}/${part}.map" > /dev/null; then
+    extract_file "${image}" "${path_in_zip}/${part}.map" "${part_map_file}"
+  fi
 
   # delta_generator only supports images multiple of 4 KiB. For target images
   # we pad the data with zeros if needed, but for source images we truncate
@@ -463,7 +500,8 @@
   local ab_partitions_list
   ab_partitions_list=$(create_tempfile "ab_partitions_list.XXXXXX")
   CLEANUP_FILES+=("${ab_partitions_list}")
-  if unzip -p "${image}" "META/ab_partitions.txt" >"${ab_partitions_list}"; then
+  if unzip -l "${image}" "META/ab_partitions.txt" > /dev/null; then
+    extract_file "${image}" "META/ab_partitions.txt" "${ab_partitions_list}"
     if grep -v -E '^[a-zA-Z0-9_-]*$' "${ab_partitions_list}" >&2; then
       die "Invalid partition names found in the partition list."
     fi
@@ -488,8 +526,9 @@
     # Source image
     local ue_config=$(create_tempfile "ue_config.XXXXXX")
     CLEANUP_FILES+=("${ue_config}")
-    if ! unzip -p "${image}" "META/update_engine_config.txt" \
-        >"${ue_config}"; then
+    if unzip -l "${image}" "META/update_engine_config.txt" > /dev/null; then
+      extract_file "${image}" "META/update_engine_config.txt" "${ue_config}"
+    else
       warn "No update_engine_config.txt found. Assuming pre-release image, \
 using payload minor version 2"
     fi
@@ -510,14 +549,16 @@
     # Target image
     local postinstall_config=$(create_tempfile "postinstall_config.XXXXXX")
     CLEANUP_FILES+=("${postinstall_config}")
-    if unzip -p "${image}" "META/postinstall_config.txt" \
-        >"${postinstall_config}"; then
+    if unzip -l "${image}" "META/postinstall_config.txt" > /dev/null; then
+      extract_file "${image}" "META/postinstall_config.txt" \
+        "${postinstall_config}"
       POSTINSTALL_CONFIG_FILE="${postinstall_config}"
     fi
     local dynamic_partitions_info=$(create_tempfile "dynamic_partitions_info.XXXXXX")
     CLEANUP_FILES+=("${dynamic_partitions_info}")
-    if unzip -p "${image}" "META/dynamic_partitions_info.txt" \
-        >"${dynamic_partitions_info}"; then
+    if unzip -l "${image}" "META/dynamic_partitions_info.txt" > /dev/null; then
+      extract_file "${image}" "META/dynamic_partitions_info.txt" \
+        "${dynamic_partitions_info}"
       DYNAMIC_PARTITION_INFO_FILE="${dynamic_partitions_info}"
     fi
   fi
@@ -530,6 +571,7 @@
     # Extract partitions in background.
     extract_partition_brillo "${image}" "${partitions_array}" "${part}" \
         "${part_file}" "${part_map_file}" &
+    EXTRACT_IMAGE_PIDS+=("$!")
     eval "${partitions_array}[\"${part}\"]=\"${part_file}\""
     eval "${partitions_array}_MAP[\"${part}\"]=\"${part_map_file}\""
   done
@@ -559,8 +601,12 @@
     extract_image "${FLAGS_source_image}" SRC_PARTITIONS
   fi
   extract_image "${FLAGS_target_image}" DST_PARTITIONS PARTITIONS_ORDER
-  # Wait for all subprocesses.
-  wait
+  # Wait for all subprocesses to finish. Not using `wait` since it doesn't die
+  # on non-zero subprocess exit code. Not using `wait ${EXTRACT_IMAGE_PIDS[@]}`
+  # as it gives the status of the last process it has waited for.
+  for pid in ${EXTRACT_IMAGE_PIDS[@]}; do
+    wait ${pid}
+  done
   cleanup_partition_array SRC_PARTITIONS
   cleanup_partition_array SRC_PARTITIONS_MAP
   cleanup_partition_array DST_PARTITIONS
@@ -615,17 +661,33 @@
     --new_mapfiles="${new_mapfiles}"
   )
 
+  if [[ "${FLAGS_is_partial_update}" == "true" ]]; then
+    GENERATOR_ARGS+=( --is_partial_update="true" )
+    # Need at least minor version 7 for partial update, so generate with minor
+    # version 7 if we don't have a source image. Let the delta_generator to fail
+    # the other incompatiable minor versions.
+    if [[ -z "${FORCE_MINOR_VERSION}" ]]; then
+      FORCE_MINOR_VERSION="7"
+    fi
+  fi
+
   if [[ "${payload_type}" == "delta" ]]; then
     # Source image args:
     GENERATOR_ARGS+=(
       --old_partitions="${old_partitions}"
       --old_mapfiles="${old_mapfiles}"
     )
-    if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
-      GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
+    if [[ -n "${FLAGS_disable_fec_computation}" ]]; then
+      GENERATOR_ARGS+=(
+        --disable_fec_computation="${FLAGS_disable_fec_computation}" )
     fi
   fi
 
+  # minor version is set only for delta or partial payload.
+  if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
+    GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
+  fi
+
   if [[ -n "${FORCE_MAJOR_VERSION}" ]]; then
     GENERATOR_ARGS+=( --major_version="${FORCE_MAJOR_VERSION}" )
   fi
@@ -638,6 +700,10 @@
     GENERATOR_ARGS+=( --max_timestamp="${FLAGS_max_timestamp}" )
   fi
 
+  if [[ -n "${FLAGS_partition_timestamps}" ]]; then
+    GENERATOR_ARGS+=( --partition_timestamps="${FLAGS_partition_timestamps}" )
+  fi
+
   if [[ -n "${POSTINSTALL_CONFIG_FILE}" ]]; then
     GENERATOR_ARGS+=(
       --new_postinstall_config_file="${POSTINSTALL_CONFIG_FILE}"
@@ -838,8 +904,8 @@
   check_update_payload ${PAYCHECK_ARGS[@]} --check
 }
 
-# Sanity check that the real generator exists:
-GENERATOR="$(which delta_generator || true)"
+# Check that the real generator exists:
+[[ -x "${GENERATOR}" ]] || GENERATOR="$(which delta_generator || true)"
 [[ -x "${GENERATOR}" ]] || die "can't find delta_generator"
 
 case "$COMMAND" in
diff --git a/scripts/payload_info.py b/scripts/payload_info.py
index 965bb76..7625ee8 100755
--- a/scripts/payload_info.py
+++ b/scripts/payload_info.py
@@ -74,7 +74,9 @@
     for partition in manifest.partitions:
       DisplayValue('  Number of "%s" ops' % partition.partition_name,
                    len(partition.operations))
-
+    for partition in manifest.partitions:
+      DisplayValue("Timestamp for " +
+                   partition.partition_name, partition.version)
     DisplayValue('Block size', manifest.block_size)
     DisplayValue('Minor version', manifest.minor_version)
 
diff --git a/scripts/update_device.py b/scripts/update_device.py
index f970bd3..1cd4b6a 100755
--- a/scripts/update_device.py
+++ b/scripts/update_device.py
@@ -20,12 +20,14 @@
 from __future__ import absolute_import
 
 import argparse
+import binascii
 import hashlib
 import logging
 import os
 import socket
 import subprocess
 import sys
+import struct
 import threading
 import xml.etree.ElementTree
 import zipfile
@@ -87,17 +89,49 @@
   # Android OTA package file paths.
   OTA_PAYLOAD_BIN = 'payload.bin'
   OTA_PAYLOAD_PROPERTIES_TXT = 'payload_properties.txt'
+  SECONDARY_OTA_PAYLOAD_BIN = 'secondary/payload.bin'
+  SECONDARY_OTA_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt'
+  PAYLOAD_MAGIC_HEADER = b'CrAU'
 
-  def __init__(self, otafilename):
+  def __init__(self, otafilename, secondary_payload=False):
     self.otafilename = otafilename
 
     otazip = zipfile.ZipFile(otafilename, 'r')
-    payload_info = otazip.getinfo(self.OTA_PAYLOAD_BIN)
-    self.offset = payload_info.header_offset
-    self.offset += zipfile.sizeFileHeader
-    self.offset += len(payload_info.extra) + len(payload_info.filename)
-    self.size = payload_info.file_size
-    self.properties = otazip.read(self.OTA_PAYLOAD_PROPERTIES_TXT)
+    payload_entry = (self.SECONDARY_OTA_PAYLOAD_BIN if secondary_payload else
+                     self.OTA_PAYLOAD_BIN)
+    payload_info = otazip.getinfo(payload_entry)
+
+    if payload_info.compress_type != 0:
+      logging.error(
+          "Expected layload to be uncompressed, got compression method %d",
+          payload_info.compress_type)
+    # Don't use len(payload_info.extra). Because that returns size of extra
+    # fields in central directory. We need to look at local file directory,
+    # as these two might have different sizes.
+    with open(otafilename, "rb") as fp:
+      fp.seek(payload_info.header_offset)
+      data = fp.read(zipfile.sizeFileHeader)
+      fheader = struct.unpack(zipfile.structFileHeader, data)
+      # Last two fields of local file header are filename length and
+      # extra length
+      filename_len = fheader[-2]
+      extra_len = fheader[-1]
+      self.offset = payload_info.header_offset
+      self.offset += zipfile.sizeFileHeader
+      self.offset += filename_len + extra_len
+      self.size = payload_info.file_size
+      fp.seek(self.offset)
+      payload_header = fp.read(4)
+      if payload_header != self.PAYLOAD_MAGIC_HEADER:
+        logging.warning(
+            "Invalid header, expeted %s, got %s."
+            "Either the offset is not correct, or payload is corrupted",
+            binascii.hexlify(self.PAYLOAD_MAGIC_HEADER),
+            payload_header)
+
+    property_entry = (self.SECONDARY_OTA_PAYLOAD_PROPERTIES_TXT if
+                      secondary_payload else self.OTA_PAYLOAD_PROPERTIES_TXT)
+    self.properties = otazip.read(property_entry)
 
 
 class UpdateHandler(BaseHTTPServer.BaseHTTPRequestHandler):
@@ -280,9 +314,9 @@
   return t
 
 
-def AndroidUpdateCommand(ota_filename, payload_url, extra_headers):
+def AndroidUpdateCommand(ota_filename, secondary, payload_url, extra_headers):
   """Return the command to run to start the update in the Android device."""
-  ota = AndroidOTAPackage(ota_filename)
+  ota = AndroidOTAPackage(ota_filename, secondary)
   headers = ota.properties
   headers += 'USER_AGENT=Dalvik (something, something)\n'
   headers += 'NETWORK_ID=0\n'
@@ -365,6 +399,8 @@
                       help='Override the public key used to verify payload.')
   parser.add_argument('--extra-headers', type=str, default='',
                       help='Extra headers to pass to the device.')
+  parser.add_argument('--secondary', action='store_true',
+                      help='Update with the secondary payload in the package.')
   args = parser.parse_args()
   logging.basicConfig(
       level=logging.WARNING if args.no_verbose else logging.INFO)
@@ -400,7 +436,7 @@
     # command.
     payload_url = 'http://127.0.0.1:%d/payload' % DEVICE_PORT
     if use_omaha and zipfile.is_zipfile(args.otafile):
-      ota = AndroidOTAPackage(args.otafile)
+      ota = AndroidOTAPackage(args.otafile, args.secondary)
       serving_range = (ota.offset, ota.size)
     else:
       serving_range = (0, os.stat(args.otafile).st_size)
@@ -428,8 +464,8 @@
       update_cmd = \
           OmahaUpdateCommand('http://127.0.0.1:%d/update' % DEVICE_PORT)
     else:
-      update_cmd = \
-          AndroidUpdateCommand(args.otafile, payload_url, args.extra_headers)
+      update_cmd = AndroidUpdateCommand(args.otafile, args.secondary,
+                                        payload_url, args.extra_headers)
     cmds.append(['shell', 'su', '0'] + update_cmd)
 
     for cmd in cmds:
diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py
index ea5ed30..78b8e2c 100644
--- a/scripts/update_payload/payload.py
+++ b/scripts/update_payload/payload.py
@@ -20,7 +20,9 @@
 from __future__ import print_function
 
 import hashlib
+import io
 import struct
+import zipfile
 
 from update_payload import applier
 from update_payload import checker
@@ -119,6 +121,10 @@
       payload_file: update payload file object open for reading
       payload_file_offset: the offset of the actual payload
     """
+    if zipfile.is_zipfile(payload_file):
+      with zipfile.ZipFile(payload_file) as zfp:
+        with zfp.open("payload.bin") as payload_fp:
+          payload_file = io.BytesIO(payload_fp.read())
     self.payload_file = payload_file
     self.payload_file_offset = payload_file_offset
     self.manifest_hasher = None
diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py
index 907cc18..841cd22 100644
--- a/scripts/update_payload/update_metadata_pb2.py
+++ b/scripts/update_payload/update_metadata_pb2.py
@@ -2,8 +2,6 @@
 # Generated by the protocol buffer compiler.  DO NOT EDIT!
 # source: update_metadata.proto
 
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
 from google.protobuf import descriptor as _descriptor
 from google.protobuf import message as _message
 from google.protobuf import reflection as _reflection
@@ -19,8 +17,8 @@
   name='update_metadata.proto',
   package='chromeos_update_engine',
   syntax='proto2',
-  serialized_options=_b('H\003'),
-  serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"Y\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\"\xc9\x06\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadataB\x02H\x03')
+  serialized_options=b'H\003',
+  serialized_pb=b'\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"\x9f\x01\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1aO\n\tSignature\x12\x13\n\x07version\x18\x01 \x01(\rB\x02\x18\x01\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x1f\n\x17unpadded_signature_size\x18\x03 \x01(\x07\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xe8\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\x12\x0f\n\x07version\x18\x11 \x01(\t\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"s\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\"\xe1\x06\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadata\x12\x16\n\x0epartial_update\x18\x10 \x01(\x08\x42\x02H\x03'
 )
 
 
@@ -41,11 +39,11 @@
       type=None),
     _descriptor.EnumValueDescriptor(
       name='MOVE', index=2, number=2,
-      serialized_options=_b('\010\001'),
+      serialized_options=b'\010\001',
       type=None),
     _descriptor.EnumValueDescriptor(
       name='BSDIFF', index=3, number=3,
-      serialized_options=_b('\010\001'),
+      serialized_options=b'\010\001',
       type=None),
     _descriptor.EnumValueDescriptor(
       name='SOURCE_COPY', index=4, number=4,
@@ -78,8 +76,8 @@
   ],
   containing_type=None,
   serialized_options=None,
-  serialized_start=712,
-  serialized_end=885,
+  serialized_start=750,
+  serialized_end=923,
 )
 _sym_db.RegisterEnumDescriptor(_INSTALLOPERATION_TYPE)
 
@@ -135,11 +133,18 @@
       has_default_value=False, default_value=0,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=b'\030\001', file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='data', full_name='chromeos_update_engine.Signatures.Signature.data', index=1,
       number=2, type=12, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b(""),
+      has_default_value=False, default_value=b"",
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='unpadded_signature_size', full_name='chromeos_update_engine.Signatures.Signature.unpadded_signature_size', index=2,
+      number=3, type=7, cpp_type=3, label=1,
+      has_default_value=False, default_value=0,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
@@ -155,8 +160,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=180,
-  serialized_end=222,
+  serialized_start=181,
+  serialized_end=260,
 )
 
 _SIGNATURES = _descriptor.Descriptor(
@@ -185,8 +190,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=100,
-  serialized_end=222,
+  serialized_start=101,
+  serialized_end=260,
 )
 
 
@@ -207,7 +212,7 @@
     _descriptor.FieldDescriptor(
       name='hash', full_name='chromeos_update_engine.PartitionInfo.hash', index=1,
       number=2, type=12, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b(""),
+      has_default_value=False, default_value=b"",
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
@@ -223,8 +228,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=224,
-  serialized_end=267,
+  serialized_start=262,
+  serialized_end=305,
 )
 
 
@@ -238,42 +243,42 @@
     _descriptor.FieldDescriptor(
       name='board', full_name='chromeos_update_engine.ImageInfo.board', index=0,
       number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
+      has_default_value=False, default_value=b"".decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='key', full_name='chromeos_update_engine.ImageInfo.key', index=1,
       number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
+      has_default_value=False, default_value=b"".decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='channel', full_name='chromeos_update_engine.ImageInfo.channel', index=2,
       number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
+      has_default_value=False, default_value=b"".decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='version', full_name='chromeos_update_engine.ImageInfo.version', index=3,
       number=4, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
+      has_default_value=False, default_value=b"".decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='build_channel', full_name='chromeos_update_engine.ImageInfo.build_channel', index=4,
       number=5, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
+      has_default_value=False, default_value=b"".decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='build_version', full_name='chromeos_update_engine.ImageInfo.build_version', index=5,
       number=6, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
+      has_default_value=False, default_value=b"".decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
@@ -289,8 +294,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=269,
-  serialized_end=388,
+  serialized_start=307,
+  serialized_end=426,
 )
 
 
@@ -353,14 +358,14 @@
     _descriptor.FieldDescriptor(
       name='data_sha256_hash', full_name='chromeos_update_engine.InstallOperation.data_sha256_hash', index=7,
       number=8, type=12, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b(""),
+      has_default_value=False, default_value=b"",
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='src_sha256_hash', full_name='chromeos_update_engine.InstallOperation.src_sha256_hash', index=8,
       number=9, type=12, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b(""),
+      has_default_value=False, default_value=b"",
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
@@ -377,8 +382,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=391,
-  serialized_end=885,
+  serialized_start=429,
+  serialized_end=923,
 )
 
 
@@ -392,7 +397,7 @@
     _descriptor.FieldDescriptor(
       name='partition_name', full_name='chromeos_update_engine.PartitionUpdate.partition_name', index=0,
       number=1, type=9, cpp_type=9, label=2,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
+      has_default_value=False, default_value=b"".decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
@@ -406,14 +411,14 @@
     _descriptor.FieldDescriptor(
       name='postinstall_path', full_name='chromeos_update_engine.PartitionUpdate.postinstall_path', index=2,
       number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
+      has_default_value=False, default_value=b"".decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='filesystem_type', full_name='chromeos_update_engine.PartitionUpdate.filesystem_type', index=3,
       number=4, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
+      has_default_value=False, default_value=b"".decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
@@ -469,14 +474,14 @@
     _descriptor.FieldDescriptor(
       name='hash_tree_algorithm', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_algorithm', index=11,
       number=12, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
+      has_default_value=False, default_value=b"".decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='hash_tree_salt', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_salt', index=12,
       number=13, type=12, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b(""),
+      has_default_value=False, default_value=b"",
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
@@ -501,6 +506,13 @@
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='version', full_name='chromeos_update_engine.PartitionUpdate.version', index=16,
+      number=17, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=b"".decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -513,8 +525,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=888,
-  serialized_end=1615,
+  serialized_start=926,
+  serialized_end=1670,
 )
 
 
@@ -528,7 +540,7 @@
     _descriptor.FieldDescriptor(
       name='name', full_name='chromeos_update_engine.DynamicPartitionGroup.name', index=0,
       number=1, type=9, cpp_type=9, label=2,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
+      has_default_value=False, default_value=b"".decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
@@ -558,8 +570,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1617,
-  serialized_end=1693,
+  serialized_start=1672,
+  serialized_end=1748,
 )
 
 
@@ -577,6 +589,13 @@
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='snapshot_enabled', full_name='chromeos_update_engine.DynamicPartitionMetadata.snapshot_enabled', index=1,
+      number=2, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -589,8 +608,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1695,
-  serialized_end=1784,
+  serialized_start=1750,
+  serialized_end=1865,
 )
 
 
@@ -607,14 +626,14 @@
       has_default_value=False, default_value=[],
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
+      serialized_options=b'\030\001', file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1,
       number=2, type=11, cpp_type=10, label=3,
       has_default_value=False, default_value=[],
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
+      serialized_options=b'\030\001', file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2,
       number=3, type=13, cpp_type=3, label=1,
@@ -642,28 +661,28 @@
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
+      serialized_options=b'\030\001', file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6,
       number=7, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
+      serialized_options=b'\030\001', file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7,
       number=8, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
+      serialized_options=b'\030\001', file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8,
       number=9, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
+      serialized_options=b'\030\001', file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9,
       number=10, type=11, cpp_type=10, label=1,
@@ -706,6 +725,13 @@
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='partial_update', full_name='chromeos_update_engine.DeltaArchiveManifest.partial_update', index=15,
+      number=16, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -718,8 +744,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1787,
-  serialized_end=2628,
+  serialized_start=1868,
+  serialized_end=2733,
 )
 
 _SIGNATURES_SIGNATURE.containing_type = _SIGNATURES
@@ -758,79 +784,80 @@
 DESCRIPTOR.message_types_by_name['DeltaArchiveManifest'] = _DELTAARCHIVEMANIFEST
 _sym_db.RegisterFileDescriptor(DESCRIPTOR)
 
-Extent = _reflection.GeneratedProtocolMessageType('Extent', (_message.Message,), dict(
-  DESCRIPTOR = _EXTENT,
-  __module__ = 'update_metadata_pb2'
+Extent = _reflection.GeneratedProtocolMessageType('Extent', (_message.Message,), {
+  'DESCRIPTOR' : _EXTENT,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.Extent)
-  ))
+  })
 _sym_db.RegisterMessage(Extent)
 
-Signatures = _reflection.GeneratedProtocolMessageType('Signatures', (_message.Message,), dict(
+Signatures = _reflection.GeneratedProtocolMessageType('Signatures', (_message.Message,), {
 
-  Signature = _reflection.GeneratedProtocolMessageType('Signature', (_message.Message,), dict(
-    DESCRIPTOR = _SIGNATURES_SIGNATURE,
-    __module__ = 'update_metadata_pb2'
+  'Signature' : _reflection.GeneratedProtocolMessageType('Signature', (_message.Message,), {
+    'DESCRIPTOR' : _SIGNATURES_SIGNATURE,
+    '__module__' : 'update_metadata_pb2'
     # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures.Signature)
-    ))
+    })
   ,
-  DESCRIPTOR = _SIGNATURES,
-  __module__ = 'update_metadata_pb2'
+  'DESCRIPTOR' : _SIGNATURES,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures)
-  ))
+  })
 _sym_db.RegisterMessage(Signatures)
 _sym_db.RegisterMessage(Signatures.Signature)
 
-PartitionInfo = _reflection.GeneratedProtocolMessageType('PartitionInfo', (_message.Message,), dict(
-  DESCRIPTOR = _PARTITIONINFO,
-  __module__ = 'update_metadata_pb2'
+PartitionInfo = _reflection.GeneratedProtocolMessageType('PartitionInfo', (_message.Message,), {
+  'DESCRIPTOR' : _PARTITIONINFO,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionInfo)
-  ))
+  })
 _sym_db.RegisterMessage(PartitionInfo)
 
-ImageInfo = _reflection.GeneratedProtocolMessageType('ImageInfo', (_message.Message,), dict(
-  DESCRIPTOR = _IMAGEINFO,
-  __module__ = 'update_metadata_pb2'
+ImageInfo = _reflection.GeneratedProtocolMessageType('ImageInfo', (_message.Message,), {
+  'DESCRIPTOR' : _IMAGEINFO,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.ImageInfo)
-  ))
+  })
 _sym_db.RegisterMessage(ImageInfo)
 
-InstallOperation = _reflection.GeneratedProtocolMessageType('InstallOperation', (_message.Message,), dict(
-  DESCRIPTOR = _INSTALLOPERATION,
-  __module__ = 'update_metadata_pb2'
+InstallOperation = _reflection.GeneratedProtocolMessageType('InstallOperation', (_message.Message,), {
+  'DESCRIPTOR' : _INSTALLOPERATION,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.InstallOperation)
-  ))
+  })
 _sym_db.RegisterMessage(InstallOperation)
 
-PartitionUpdate = _reflection.GeneratedProtocolMessageType('PartitionUpdate', (_message.Message,), dict(
-  DESCRIPTOR = _PARTITIONUPDATE,
-  __module__ = 'update_metadata_pb2'
+PartitionUpdate = _reflection.GeneratedProtocolMessageType('PartitionUpdate', (_message.Message,), {
+  'DESCRIPTOR' : _PARTITIONUPDATE,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionUpdate)
-  ))
+  })
 _sym_db.RegisterMessage(PartitionUpdate)
 
-DynamicPartitionGroup = _reflection.GeneratedProtocolMessageType('DynamicPartitionGroup', (_message.Message,), dict(
-  DESCRIPTOR = _DYNAMICPARTITIONGROUP,
-  __module__ = 'update_metadata_pb2'
+DynamicPartitionGroup = _reflection.GeneratedProtocolMessageType('DynamicPartitionGroup', (_message.Message,), {
+  'DESCRIPTOR' : _DYNAMICPARTITIONGROUP,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionGroup)
-  ))
+  })
 _sym_db.RegisterMessage(DynamicPartitionGroup)
 
-DynamicPartitionMetadata = _reflection.GeneratedProtocolMessageType('DynamicPartitionMetadata', (_message.Message,), dict(
-  DESCRIPTOR = _DYNAMICPARTITIONMETADATA,
-  __module__ = 'update_metadata_pb2'
+DynamicPartitionMetadata = _reflection.GeneratedProtocolMessageType('DynamicPartitionMetadata', (_message.Message,), {
+  'DESCRIPTOR' : _DYNAMICPARTITIONMETADATA,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionMetadata)
-  ))
+  })
 _sym_db.RegisterMessage(DynamicPartitionMetadata)
 
-DeltaArchiveManifest = _reflection.GeneratedProtocolMessageType('DeltaArchiveManifest', (_message.Message,), dict(
-  DESCRIPTOR = _DELTAARCHIVEMANIFEST,
-  __module__ = 'update_metadata_pb2'
+DeltaArchiveManifest = _reflection.GeneratedProtocolMessageType('DeltaArchiveManifest', (_message.Message,), {
+  'DESCRIPTOR' : _DELTAARCHIVEMANIFEST,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.DeltaArchiveManifest)
-  ))
+  })
 _sym_db.RegisterMessage(DeltaArchiveManifest)
 
 
 DESCRIPTOR._options = None
+_SIGNATURES_SIGNATURE.fields_by_name['version']._options = None
 _INSTALLOPERATION_TYPE.values_by_name["MOVE"]._options = None
 _INSTALLOPERATION_TYPE.values_by_name["BSDIFF"]._options = None
 _DELTAARCHIVEMANIFEST.fields_by_name['install_operations']._options = None
diff --git a/service_delegate_android_interface.h b/service_delegate_android_interface.h
index 5267bb0..34a9712 100644
--- a/service_delegate_android_interface.h
+++ b/service_delegate_android_interface.h
@@ -19,6 +19,7 @@
 
 #include <inttypes.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -26,6 +27,18 @@
 
 namespace chromeos_update_engine {
 
+// See ServiceDelegateAndroidInterface.CleanupSuccessfulUpdate
+// Wraps a IUpdateEngineCallback binder object used specifically for
+// CleanupSuccessfulUpdate.
+class CleanupSuccessfulUpdateCallbackInterface {
+ public:
+  virtual ~CleanupSuccessfulUpdateCallbackInterface() {}
+  virtual void OnCleanupProgressUpdate(double progress) = 0;
+  virtual void OnCleanupComplete(int32_t error_code) = 0;
+  // Call RegisterForDeathNotifications on the internal binder object.
+  virtual void RegisterForDeathNotifications(base::Closure unbind) = 0;
+};
+
 // This class defines the interface exposed by the Android version of the
 // daemon service. This interface only includes the method calls that such
 // daemon exposes. For asynchronous events initiated by a class implementing
@@ -47,6 +60,13 @@
       const std::vector<std::string>& key_value_pair_headers,
       brillo::ErrorPtr* error) = 0;
 
+  virtual bool ApplyPayload(
+      int fd,
+      int64_t payload_offset,
+      int64_t payload_size,
+      const std::vector<std::string>& key_value_pair_headers,
+      brillo::ErrorPtr* error) = 0;
+
   // Suspend an ongoing update. Returns true if there was an update ongoing and
   // it was suspended. In case of failure, it returns false and sets |error|
   // accordingly.
@@ -76,6 +96,28 @@
   virtual bool VerifyPayloadApplicable(const std::string& metadata_filename,
                                        brillo::ErrorPtr* error) = 0;
 
+  // Allocates space for a payload.
+  // Returns 0 if space is successfully preallocated.
+  // Return non-zero if not enough space is not available; returned value is
+  // the total space required (in bytes) to be free on the device for this
+  // update to be applied, and |error| is unset.
+  // In case of error, returns 0, and sets |error| accordingly.
+  //
+  // This function may block for several minutes in the worst case.
+  virtual uint64_t AllocateSpaceForPayload(
+      const std::string& metadata_filename,
+      const std::vector<std::string>& key_value_pair_headers,
+      brillo::ErrorPtr* error) = 0;
+
+  // Wait for merge to complete, then clean up merge after an update has been
+  // successful.
+  //
+  // This function returns immediately. Progress updates are provided in
+  // |callback|.
+  virtual void CleanupSuccessfulUpdate(
+      std::unique_ptr<CleanupSuccessfulUpdateCallbackInterface> callback,
+      brillo::ErrorPtr* error) = 0;
+
  protected:
   ServiceDelegateAndroidInterface() = default;
 };
diff --git a/sideload_main.cc b/sideload_main.cc
index 818fa5c..27967cd 100644
--- a/sideload_main.cc
+++ b/sideload_main.cc
@@ -20,7 +20,6 @@
 #include <vector>
 
 #include <base/command_line.h>
-#include <base/logging.h>
 #include <base/strings/string_split.h>
 #include <base/strings/stringprintf.h>
 #include <brillo/asynchronous_signal_handler.h>
@@ -36,6 +35,7 @@
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/terminator.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/logging.h"
 #include "update_engine/update_attempter_android.h"
 
 using std::string;
@@ -46,17 +46,6 @@
 namespace chromeos_update_engine {
 namespace {
 
-void SetupLogging() {
-  string log_file;
-  logging::LoggingSettings log_settings;
-  log_settings.lock_log = logging::DONT_LOCK_LOG_FILE;
-  log_settings.delete_old = logging::APPEND_TO_OLD_LOG_FILE;
-  log_settings.log_file = nullptr;
-  log_settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
-
-  logging::InitLogging(log_settings);
-}
-
 class SideloadDaemonState : public DaemonStateInterface,
                             public ServiceObserverInterface {
  public:
@@ -195,7 +184,7 @@
   DEFINE_int64(status_fd, -1, "A file descriptor to notify the update status.");
 
   chromeos_update_engine::Terminator::Init();
-  chromeos_update_engine::SetupLogging();
+  chromeos_update_engine::SetupLogging(true /* stderr */, false /* file */);
   brillo::FlagHelper::Init(argc, argv, "Update Engine Sideload");
 
   LOG(INFO) << "Update Engine Sideloading starting";
diff --git a/stable/Android.bp b/stable/Android.bp
new file mode 100644
index 0000000..337ae96
--- /dev/null
+++ b/stable/Android.bp
@@ -0,0 +1,65 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Stable AIDL interface between update_engine and other APEXes
+// ========================================================
+aidl_interface {
+    name: "libupdate_engine_stable",
+    srcs: [
+        "android/os/IUpdateEngineStable.aidl",
+        "android/os/IUpdateEngineStableCallback.aidl",
+    ],
+    backend: {
+        cpp: {
+            enabled: true,
+        },
+        java: {
+            enabled: false,
+        },
+        ndk: {
+            enabled: true,
+            apex_available: [
+                "com.android.gki.*",
+            ],
+        },
+    },
+}
+
+// update_engine_stable_client (type: executable)
+// ========================================================
+// update_engine console client installed to APEXes
+cc_binary {
+    name: "update_engine_stable_client",
+
+    header_libs: [
+        "libupdate_engine_headers",
+    ],
+    shared_libs: [
+        "libbinder_ndk",
+        "libbase",
+        "liblog",
+    ],
+    static_libs: [
+        "libgflags",
+        "libupdate_engine_stable-ndk_platform",
+    ],
+    srcs: [
+        "update_engine_stable_client.cc",
+    ],
+    apex_available: [
+        "com.android.gki.*",
+    ],
+}
diff --git a/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStable.aidl b/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStable.aidl
new file mode 100644
index 0000000..82c3ca5
--- /dev/null
+++ b/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStable.aidl
@@ -0,0 +1,23 @@
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE.                          //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL interface (or parcelable). Do not try to
+// edit this file. It looks like you are doing that because you have modified
+// an AIDL interface in a backward-incompatible way, e.g., deleting a function
+// from an interface or a field from a parcelable and it broke the build. That
+// breakage is intended.
+//
+// You must not make a backward incompatible changes to the AIDL files built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.os;
+interface IUpdateEngineStable {
+  void applyPayloadFd(in ParcelFileDescriptor pfd, in long payload_offset, in long payload_size, in String[] headerKeyValuePairs);
+  boolean bind(android.os.IUpdateEngineStableCallback callback);
+  boolean unbind(android.os.IUpdateEngineStableCallback callback);
+}
diff --git a/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStableCallback.aidl b/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStableCallback.aidl
new file mode 100644
index 0000000..4c72b49
--- /dev/null
+++ b/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStableCallback.aidl
@@ -0,0 +1,22 @@
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE.                          //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL interface (or parcelable). Do not try to
+// edit this file. It looks like you are doing that because you have modified
+// an AIDL interface in a backward-incompatible way, e.g., deleting a function
+// from an interface or a field from a parcelable and it broke the build. That
+// breakage is intended.
+//
+// You must not make a backward incompatible changes to the AIDL files built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.os;
+interface IUpdateEngineStableCallback {
+  oneway void onStatusUpdate(int status_code, float percentage);
+  oneway void onPayloadApplicationComplete(int error_code);
+}
diff --git a/stable/android/os/IUpdateEngineStable.aidl b/stable/android/os/IUpdateEngineStable.aidl
new file mode 100644
index 0000000..b3b6674
--- /dev/null
+++ b/stable/android/os/IUpdateEngineStable.aidl
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.os;
+
+import android.os.IUpdateEngineStableCallback;
+import android.os.ParcelFileDescriptor;
+
+/**
+ * The stable interface exposed by the update engine daemon.
+ */
+interface IUpdateEngineStable {
+  /**
+   * Apply the given payload as provided in the given file descriptor.
+   *
+   * See {@link #bind(IUpdateEngineCallback)} for status updates.
+   *
+   * @param pfd The file descriptor opened at the payload file. Note that the daemon must have
+   *   enough permission to operate on the file descriptor.
+   * @param payload_offset offset into pfd where the payload binary starts.
+   * @param payload_size length after payload_offset to read from pfd. If 0, it will be auto
+   *   detected.
+   * @param headerKeyValuePairs additional header key value pairs, in the format of "key=value".
+   * @see android.os.UpdateEngine#applyPayload(android.content.res.AssetFileDescriptor, String[])
+   */
+  void applyPayloadFd(in ParcelFileDescriptor pfd,
+                      in long payload_offset,
+                      in long payload_size,
+                      in String[] headerKeyValuePairs);
+
+  /**
+   * Bind a callback for status updates on payload application.
+   *
+   * At any given time, only one callback can be bound. If a callback is already bound,
+   * subsequent binding will fail and return false until the bound callback is unbound. That is,
+   * binding is first-come, first-serve.
+   *
+   * A bound callback may be unbound explicitly by calling
+   * {@link #unbind(IUpdateEngineStableCallback)}, or
+   * implicitly when the process implementing the callback dies.
+   *
+   * @param callback See {@link IUpdateEngineStableCallback}
+   * @return true if binding is successful, false otherwise.
+   * @see android.os.UpdateEngine#bind(android.os.UpdateEngineCallback)
+   */
+  boolean bind(IUpdateEngineStableCallback callback);
+
+  /**
+   * Unbind a possibly bound callback.
+   *
+   * If the provided callback does not match the previously bound callback, unbinding fails.
+   *
+   * Note that a callback may also be unbound when the process implementing the callback dies.
+   * Hence, a client usually does not need to explicitly unbind a callback unless it wants to change
+   * the bound callback.
+   *
+   * @param callback The callback to be unbound. See {@link IUpdateEngineStableCallback}.
+   * @return true if unbinding is successful, false otherwise.
+   * @see android.os.UpdateEngine#unbind(android.os.UpdateEngineCallback)
+   */
+  boolean unbind(IUpdateEngineStableCallback callback);
+}
diff --git a/stable/android/os/IUpdateEngineStableCallback.aidl b/stable/android/os/IUpdateEngineStableCallback.aidl
new file mode 100644
index 0000000..d8fc333
--- /dev/null
+++ b/stable/android/os/IUpdateEngineStableCallback.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.os;
+
+/**
+ * The stable Callback interface for IUpdateEngineStable.
+ */
+oneway interface IUpdateEngineStableCallback {
+  /**
+   * Invoked when a payload is being applied and there is a status update.
+   *
+   * @param status_code see {@link android.os.UpdateEngine.UpdateStatusConstants}.
+   * @param percentage percentage of progress of the current stage.
+   * @see android.os.UpdateEngineCallback#onStatusUpdate(int, float)
+   */
+  void onStatusUpdate(int status_code, float percentage);
+
+  /**
+   * Invoked when a payload has finished being applied.
+   *
+   * @param error_code see {@link android.os.UpdateEngine.ErrorCodeConstants}
+   * @see android.os.UpdateEngineCallback#onPayloadApplicationComplete(int)
+   */
+  void onPayloadApplicationComplete(int error_code);
+}
diff --git a/stable/update_engine_stable_client.cc b/stable/update_engine_stable_client.cc
new file mode 100644
index 0000000..da203c4
--- /dev/null
+++ b/stable/update_engine_stable_client.cc
@@ -0,0 +1,188 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// update_engine console client installed to APEXes for scripts to invoke
+// directly. Uses the stable API.
+
+#include <fcntl.h>
+#include <sysexits.h>
+#include <unistd.h>
+
+#include <vector>
+
+#include <aidl/android/os/BnUpdateEngineStableCallback.h>
+#include <aidl/android/os/IUpdateEngineStable.h>
+#include <android-base/logging.h>
+#include <android-base/strings.h>
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+#include <android/binder_ibinder.h>
+#include <common/error_code.h>
+#include <gflags/gflags.h>
+#include <utils/StrongPointer.h>
+
+namespace chromeos_update_engine::internal {
+
+DEFINE_string(payload,
+              "file:///path/to/payload.bin",
+              "The file URI to the update payload to use, or path to the file");
+DEFINE_int64(offset,
+             0,
+             "The offset in the payload where the CrAU update starts.");
+DEFINE_int64(size,
+             0,
+             "The size of the CrAU part of the payload. If 0 is passed, it "
+             "will be autodetected.");
+DEFINE_string(headers,
+              "",
+              "A list of key-value pairs, one element of the list per line.");
+
+[[noreturn]] int Exit(int return_code) {
+  LOG(INFO) << "Exit: " << return_code;
+  exit(return_code);
+}
+// Called whenever the UpdateEngine daemon dies.
+void UpdateEngineServiceDied(void*) {
+  LOG(ERROR) << "UpdateEngineService died.";
+  Exit(EX_SOFTWARE);
+}
+
+class UpdateEngineClientAndroid {
+ public:
+  UpdateEngineClientAndroid() = default;
+  int Run();
+
+ private:
+  class UECallback : public aidl::android::os::BnUpdateEngineStableCallback {
+   public:
+    UECallback() = default;
+
+    // android::os::BnUpdateEngineStableCallback overrides.
+    ndk::ScopedAStatus onStatusUpdate(int status_code, float progress) override;
+    ndk::ScopedAStatus onPayloadApplicationComplete(int error_code) override;
+  };
+
+  static std::vector<std::string> ParseHeaders(const std::string& arg);
+
+  const ndk::ScopedAIBinder_DeathRecipient death_recipient_{
+      AIBinder_DeathRecipient_new(&UpdateEngineServiceDied)};
+  std::shared_ptr<aidl::android::os::IUpdateEngineStable> service_;
+  std::shared_ptr<aidl::android::os::BnUpdateEngineStableCallback> callback_;
+};
+
+ndk::ScopedAStatus UpdateEngineClientAndroid::UECallback::onStatusUpdate(
+    int status_code, float progress) {
+  LOG(INFO) << "onStatusUpdate(" << status_code << ", " << progress << ")";
+  return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus
+UpdateEngineClientAndroid::UECallback::onPayloadApplicationComplete(
+    int error_code) {
+  LOG(INFO) << "onPayloadApplicationComplete(" << error_code << ")";
+  auto code = static_cast<ErrorCode>(error_code);
+  Exit((code == ErrorCode::kSuccess || code == ErrorCode::kUpdatedButNotActive)
+           ? EX_OK
+           : EX_SOFTWARE);
+}
+
+int UpdateEngineClientAndroid::Run() {
+  service_ = aidl::android::os::IUpdateEngineStable::fromBinder(ndk::SpAIBinder(
+      AServiceManager_getService("android.os.UpdateEngineStableService")));
+  if (service_ == nullptr) {
+    LOG(ERROR)
+        << "Failed to get IUpdateEngineStable binder from service manager.";
+    return EX_SOFTWARE;
+  }
+
+  // Register a callback object with the service.
+  callback_ = ndk::SharedRefBase::make<UECallback>();
+  bool bound;
+  if (!service_->bind(callback_, &bound).isOk() || !bound) {
+    LOG(ERROR) << "Failed to bind() the UpdateEngine daemon.";
+    return EX_SOFTWARE;
+  }
+
+  auto headers = ParseHeaders(FLAGS_headers);
+  ndk::ScopedAStatus status;
+  const char* payload_path;
+  std::string file_prefix = "file://";
+  if (android::base::StartsWith(FLAGS_payload, file_prefix)) {
+    payload_path = FLAGS_payload.data() + file_prefix.length();
+  } else {
+    payload_path = FLAGS_payload.data();
+  }
+  ndk::ScopedFileDescriptor ufd(
+      TEMP_FAILURE_RETRY(open(payload_path, O_RDONLY)));
+  if (ufd.get() < 0) {
+    PLOG(ERROR) << "Can't open " << payload_path;
+    return EX_SOFTWARE;
+  }
+  status = service_->applyPayloadFd(ufd, FLAGS_offset, FLAGS_size, headers);
+  if (!status.isOk()) {
+    LOG(ERROR) << "Cannot apply payload: " << status.getDescription();
+    return EX_SOFTWARE;
+  }
+
+  // When following updates status changes, exit if the update_engine daemon
+  // dies.
+  if (AIBinder_linkToDeath(service_->asBinder().get(),
+                           death_recipient_.get(),
+                           nullptr) != STATUS_OK) {
+    return EX_SOFTWARE;
+  }
+
+  return EX_OK;
+}
+
+std::vector<std::string> UpdateEngineClientAndroid::ParseHeaders(
+    const std::string& arg) {
+  std::vector<std::string> lines = android::base::Split(arg, "\n");
+  std::vector<std::string> headers;
+  for (const auto& line : lines) {
+    auto header = android::base::Trim(line);
+    if (!header.empty()) {
+      headers.push_back(header);
+    }
+  }
+  return headers;
+}
+
+}  // namespace chromeos_update_engine::internal
+
+int main(int argc, char** argv) {
+  android::base::InitLogging(argv);
+  gflags::ParseCommandLineFlags(&argc, &argv, true);
+
+  // Unlike other update_engine* processes that uses message loops,
+  // update_engine_stable_client uses a thread pool model. However, number of
+  // threads is limited to 1; that is, 0 additional threads should be spawned.
+  // This avoids some race conditions.
+  if (!ABinderProcess_setThreadPoolMaxThreadCount(0)) {
+    LOG(ERROR) << "Cannot set thread pool max thread count";
+    return EX_SOFTWARE;
+  }
+  ABinderProcess_startThreadPool();
+
+  chromeos_update_engine::internal::UpdateEngineClientAndroid client{};
+  int code = client.Run();
+  if (code != EX_OK)
+    return code;
+
+  ABinderProcess_joinThreadPool();
+  LOG(ERROR) << "Exited from joinThreadPool.";
+  return EX_SOFTWARE;
+}
diff --git a/test_config.xml b/test_config.xml
new file mode 100644
index 0000000..fe3cbfd
--- /dev/null
+++ b/test_config.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<configuration description="Config to run update_engine_unittests on device">
+    <option name="test-suite-tag" value="apct" />
+    <option name="test-suite-tag" value="apct-native" />
+    <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer" />
+    <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+        <option name="cleanup" value="true" />
+        <option name="push" value="update_engine_unittests->/data/nativetest/update_engine_unittests" />
+    </target_preparer>
+
+    <test class="com.android.tradefed.testtype.GTest" >
+        <option name="native-test-device-path" value="/data/nativetest" />
+        <!-- The following rules avoid test runner from calling the following helper executables
+             directly as gtests. -->
+        <option name="file-exclusion-filter-regex" value=".*/delta_generator$" />
+        <option name="file-exclusion-filter-regex" value=".*/test_http_server$" />
+        <option name="file-exclusion-filter-regex" value=".*/test_subprocess$" />
+        <option name="module-name" value="update_engine_unittests" />
+    </test>
+</configuration>
diff --git a/test_http_server.cc b/test_http_server.cc
index 4fc89e5..cf6f10d 100644
--- a/test_http_server.cc
+++ b/test_http_server.cc
@@ -189,7 +189,8 @@
   ret = WriteString(fd,
                     string("HTTP/1.1 ") + Itoa(return_code) + " " +
                         GetHttpResponseDescription(return_code) +
-                        EOL "Content-Type: application/octet-stream" EOL);
+                        EOL "Content-Type: application/octet-stream" EOL
+                        "Connection: close" EOL);
   if (ret < 0)
     return -1;
   written += ret;
@@ -406,6 +407,7 @@
   if ((ret = WriteString(fd, "HTTP/1.1 " + Itoa(code) + " " + status + EOL)) <
       0)
     return;
+  WriteString(fd, "Connection: close" EOL);
   WriteString(fd, "Location: " + url + EOL);
 }
 
diff --git a/unittest_key_EC.pem b/unittest_key_EC.pem
new file mode 100644
index 0000000..9e65a68
--- /dev/null
+++ b/unittest_key_EC.pem
@@ -0,0 +1,5 @@
+-----BEGIN PRIVATE KEY-----
+MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgGaguGj8Yb1KkqKHd
+ISblUsjtOCbzAuVpX81i02sm8FWhRANCAARBnuotwKOsuvjH6iwTDhOAi7Q5pLWz
+xDkZjg2pcfbfi9FFTvLYETas7B2W6fx9PUezUmHTFTDV2JZuMYYFdZOw
+-----END PRIVATE KEY-----
diff --git a/unittest_key_RSA4096.pem b/unittest_key_RSA4096.pem
new file mode 100644
index 0000000..5613910
--- /dev/null
+++ b/unittest_key_RSA4096.pem
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKgIBAAKCAgEAu8vBB+DvDPWgO1IWli5VuXHkMWYtD+wzToKIP/NkiGYdf1b9
+EvCgrvS3J4CutKCHoz1N5Pc29DKZduDhLbqWzqvUldJ8kSKd967PRCdM5vJHmdox
+H8E7m3YROBrppcJG32B5TD5qbn6jqrsSnwjlGZN2RAuURQwalSxq3/ZttiEzEMDR
+o/7v5rQINlF0Rsud+HlNXrxzhR/LQyMV3d+/JvoEWPmz4unzpKyYAMOjdNTcMLB0
+ccULFYKfJtRCqTyfgEUbT+mGLTbDQSkzl6mcNfDg+3hu8lTjQH+6EjjDUSu6oOmV
+OmDf8tZPmGRdw/9R1PUx6A1yKQR6FkdkqRDOt1GvIpUpsfT36Gg59u1a6YyHKudB
+6igFzr3JcTgslQ9KUZK68j6Xr2AlbsffyJ5ltyuHT4gMtCppkuGUi1CZ6+i55LfA
+t1cVx6meE+zJYK46zu9GKgsXg72BNCi/3v1HievZwDcm5A04YQZMZDgtb74pN1Vz
+uCEzxCgpyx3b3Y/K8JI6n9xMeQNIPF2h9JeiO9qasV4vJ51PbnS61BHR1E9Sh845
+2QVT+mqJnA8uRp22+sF0wU5Ae33UAZxnKk9Ax+uz6iRP6wlILlUpl+Chsjla6+Ut
+RkE4NRJh2xdin+j9GCaXFVOzx0RPLAYrD9/cM6BOV7CKZ/2iqVnaXH6sY00CAwEA
+AQKCAgEAgf77ci68i6YD8sxSIkeUw7gZFDD8hIcmxPQKyOn874ZwaNOYf+Hd+P4h
+QtELHrH48oDfSN3wRn44SVhFx9ffyNSdZdC8teLWort5cl4aF8wi0Fd/pdGnJ2hF
+ZycKEdo4ISyxCpwyJKa5ONgifcA0hs3TEitJybolNJn4qWv2ahr1NGWlOPairnp1
+LNSZvt/4TCX77tZYyRBHLgQ9gMb/lUWAeO7xHOSB6b4nwm+q2Jb4jSO4l4CkuZEg
+BkrskiYK60nrLBgk72t3IcYZlqSsI5LIyoqFkm48mUtRTIfKfIfeusC2siCZJYpA
+subXGXPF+7p3f4C/Q7F7qaxl+7pMvN2UtnCY4lYppMKETquVT2vtEgZjSVkQTb0X
+wEYmipMsonVcSLL4xTQxT0KdY2JczH7xicIJwolED+eob64t4q64ljfC3tlPJakE
+O2GgyynLH+oojBsH8XCvuhODFL+/e1eQmV0gu0az3fcHY+lCKrYEhlVB9wVe5Afn
+2GH71deZKY+iF2E8RJXwGdmpN4PbLyWepqCm0TdMUrn5A37TBr++3B9d7/eyMt1o
+afMxDzAZ27HaT6eg6SB/LJdkezvs67jLcur18NFMevSxKc+G+B8g+7euc00sKaLu
+WIX2bf8kc6L2sLECERpXOBNRSQxY33vS72szF6P7rN0+Szb+aQECggEBAOSqtUSY
+DJbyjnxmhGodcbuubNyNZRrs/vXNcYHmBZbWvR9rxtWgA1LYJiAY4wvtc+Ciw/Y6
+468MG6ip4rLu/6lDkekH/ARxW6zErCfCDDg91cP1FXP/Cwo6COJ9V2imCbTsiMSi
+bVigG4Lb4FTgBkreOyxDjnIVzZwCrwtxZWpNwA2GlEBS6IsJqelnUi/roi4rLpCj
+Y5mLvL8YYPduead9TwesYsXdK2qBf6A034GNXxvzhV70HfhnI60ydi6pNRrWamru
+TBJEuY7CipzyevqM3drfkFZDKyEBEVnk7We4IpiaOkBfLsshvFqk9asWzts1eDa8
+GpOqM0RYRCXZya0CggEBANI+YWOrK2qSLxFBbCupNaxG++/tafOXsgjkmGaRxCHt
+IcvPTIzFBQOSCobF51KoZIriVEVKw1bXHFlibeQ1IEuh8yVBo3l6NO8JKlc/tdJf
+pfkUh5XjQWJW8UqWULb5CkJCEheenG0oy8zhjOERPDcQXRYOhIo8JSpWfJFtWSWk
+L/X7kfkEvQxV0omFCUg4sCxdBeqIEItYd0Td0SCmHPZIs2KgSmpLIPBH0BMibNkY
+ZeSaz5nWbw06Unhkas+ulm3S+IEjb7neuAWGPlIXnPch9hw2pdZf49XRW4fjc7Nr
++G+U2Jgjv81+Rn7nFK2Whh22XKL5aP2myoVESlvzdCECggEBAIc9DwgKhSehPPQG
+DbpUv7coaennFizejcwCPWd+C0AysJesvmQJxu1wONwy29VqEmaA3TT7jz0wBAu0
+rgb1ou8Qr3MK7doS0Q1VJBw/f8qjh5HlmVKJPJZHzIlnaBLUYFlIq3rgNZt81ciH
+Eh4ggJg25vg+3DhM/NWQIMa7wick5LkbJwMEBdR1WrBYExuUWM7FazzP5VAifPbo
+DDFKfVi5m8wGAETVkZ/kBv9RRf7xBZcaZ37JEhCfr1H3zj26hVXiCf5EAWmsi7IL
+DL/WCTW1qmCQaGUcRJ24a/KmmmIFXTCzxk/b+2jYAvX5KfKOArlS3k5A4dcDil6Z
+dXSNYeECggEBAIHzRMcSOde5W5ZS1cV25VIC3h5CpMaH8OdGRFzBpHVD2SvcifhI
+nvzB+/epw3130A14L5ZUy8CVXVRyXnI71CZrh5pzo9OmEaneBGnBW2UY8cGvSs7+
+lJ9wFdyAZIt0Cz9BD2XCB/YAzVdp4mYK/Skb2C2V855t5prwsjZBXGTDw1FLmcJN
+h3xkX6nYrRAS2fHR3aJFT9SRbccHRAfmJOilrxs68EQbA9UAzj/Fe3oEdpaCiecQ
+f7uxXOBFUS/lPd3MFQXdHWXJn/zqKQMczUyDlVeC/6YtxumWafjoQc+Y4Qo2+lmv
+XxJpBrHRqxpQe71JxqCFgLunqG4O89c594ECggEAcMlYhrO2+R7mEPFDZLwSvTMV
+BOof6hxLIW8PkLQ/8HHTTacC02sKFiR921sw9NadQ7G0cf23Fecmolg4brJUh9sQ
+evjdYMdqIYPJT5hYSkIkdTk0Ny+tN2Pt4fBTTv3N2D3Da/5ODfrVSj0ib89DXG5D
+bPahlFLIhKaVbXNe1RQL/8j4nFf8D9LwuEMOMYrUpSMw9ULT5dB34QN2TOnwW9JW
+Md7aSY5pK1j1Y8FoWCAFSw+o+yWq5DbTFvcEhttWrUoFl9YxTolbLt6sw6TLy12x
+9haQDvbfvRkg3Es31DEC8plsltfg5S9KwRqCchKKUm7cnAJFhB2/2C6JX2k0XQ==
+-----END RSA PRIVATE KEY-----
diff --git a/update_attempter.cc b/update_attempter.cc
index 0f0605d..24562e2 100644
--- a/update_attempter.cc
+++ b/update_attempter.cc
@@ -824,8 +824,8 @@
                                            system_state_->hardware()),
       false,
       session_id_);
-  auto filesystem_verifier_action =
-      std::make_unique<FilesystemVerifierAction>();
+  auto filesystem_verifier_action = std::make_unique<FilesystemVerifierAction>(
+      system_state_->boot_control()->GetDynamicPartitionControl());
   auto update_complete_action = std::make_unique<OmahaRequestAction>(
       system_state_,
       new OmahaEvent(OmahaEvent::kTypeUpdateComplete),
@@ -1383,6 +1383,7 @@
         case UpdateStatus::REPORTING_ERROR_EVENT:
         case UpdateStatus::ATTEMPTING_ROLLBACK:
         case UpdateStatus::DISABLED:
+        case UpdateStatus::CLEANUP_PREVIOUS_UPDATE:
           MarkDeltaUpdateFailure();
           break;
       }
diff --git a/update_attempter_android.cc b/update_attempter_android.cc
index c738e4e..7fc13e1 100644
--- a/update_attempter_android.cc
+++ b/update_attempter_android.cc
@@ -22,6 +22,7 @@
 #include <utility>
 
 #include <android-base/properties.h>
+#include <android-base/unique_fd.h>
 #include <base/bind.h>
 #include <base/logging.h>
 #include <base/strings/string_number_conversions.h>
@@ -30,6 +31,7 @@
 #include <brillo/strings/string_utils.h>
 #include <log/log_safetynet.h>
 
+#include "update_engine/cleanup_previous_update_action.h"
 #include "update_engine/common/constants.h"
 #include "update_engine/common/error_code_utils.h"
 #include "update_engine/common/file_fetcher.h"
@@ -38,6 +40,7 @@
 #include "update_engine/metrics_reporter_interface.h"
 #include "update_engine/metrics_utils.h"
 #include "update_engine/network_selector.h"
+#include "update_engine/payload_consumer/certificate_parser_interface.h"
 #include "update_engine/payload_consumer/delta_performer.h"
 #include "update_engine/payload_consumer/download_action.h"
 #include "update_engine/payload_consumer/file_descriptor.h"
@@ -45,6 +48,7 @@
 #include "update_engine/payload_consumer/filesystem_verifier_action.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/payload_metadata.h"
+#include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/payload_consumer/postinstall_runner_action.h"
 #include "update_engine/update_boot_flags_action.h"
 #include "update_engine/update_status_utils.h"
@@ -55,6 +59,7 @@
 #include "update_engine/libcurl_http_fetcher.h"
 #endif
 
+using android::base::unique_fd;
 using base::Bind;
 using base::Time;
 using base::TimeDelta;
@@ -94,6 +99,34 @@
   return default_value;
 }
 
+bool ParseKeyValuePairHeaders(const vector<string>& key_value_pair_headers,
+                              std::map<string, string>* headers,
+                              brillo::ErrorPtr* error) {
+  for (const string& key_value_pair : key_value_pair_headers) {
+    string key;
+    string value;
+    if (!brillo::string_utils::SplitAtFirst(
+            key_value_pair, "=", &key, &value, false)) {
+      return LogAndSetError(
+          error, FROM_HERE, "Passed invalid header: " + key_value_pair);
+    }
+    if (!headers->emplace(key, value).second)
+      return LogAndSetError(error, FROM_HERE, "Passed repeated key: " + key);
+  }
+  return true;
+}
+
+// Unique identifier for the payload. An empty string means that the payload
+// can't be resumed.
+string GetPayloadId(const std::map<string, string>& headers) {
+  return (headers.count(kPayloadPropertyFileHash)
+              ? headers.at(kPayloadPropertyFileHash)
+              : "") +
+         (headers.count(kPayloadPropertyMetadataHash)
+              ? headers.at(kPayloadPropertyMetadataHash)
+              : "");
+}
+
 }  // namespace
 
 UpdateAttempterAndroid::UpdateAttempterAndroid(
@@ -125,6 +158,12 @@
   } else {
     SetStatusAndNotify(UpdateStatus::IDLE);
     UpdatePrefsAndReportUpdateMetricsOnReboot();
+#ifdef _UE_SIDELOAD
+    LOG(INFO) << "Skip ScheduleCleanupPreviousUpdate in sideload because "
+              << "ApplyPayload will call it later.";
+#else
+    ScheduleCleanupPreviousUpdate();
+#endif
   }
 }
 
@@ -145,22 +184,11 @@
   DCHECK(status_ == UpdateStatus::IDLE);
 
   std::map<string, string> headers;
-  for (const string& key_value_pair : key_value_pair_headers) {
-    string key;
-    string value;
-    if (!brillo::string_utils::SplitAtFirst(
-            key_value_pair, "=", &key, &value, false)) {
-      return LogAndSetError(
-          error, FROM_HERE, "Passed invalid header: " + key_value_pair);
-    }
-    if (!headers.emplace(key, value).second)
-      return LogAndSetError(error, FROM_HERE, "Passed repeated key: " + key);
+  if (!ParseKeyValuePairHeaders(key_value_pair_headers, &headers, error)) {
+    return false;
   }
 
-  // Unique identifier for the payload. An empty string means that the payload
-  // can't be resumed.
-  string payload_id = (headers[kPayloadPropertyFileHash] +
-                       headers[kPayloadPropertyMetadataHash]);
+  string payload_id = GetPayloadId(headers);
 
   // Setup the InstallPlan based on the request.
   install_plan_ = InstallPlan();
@@ -196,15 +224,22 @@
   install_plan_.is_resume = !payload_id.empty() &&
                             DeltaPerformer::CanResumeUpdate(prefs_, payload_id);
   if (!install_plan_.is_resume) {
-    if (!DeltaPerformer::ResetUpdateProgress(prefs_, false)) {
+    // No need to reset dynamic_partititon_metadata_updated. If previous calls
+    // to AllocateSpaceForPayload uses the same payload_id, reuse preallocated
+    // space. Otherwise, DeltaPerformer re-allocates space when the payload is
+    // applied.
+    if (!DeltaPerformer::ResetUpdateProgress(
+            prefs_,
+            false /* quick */,
+            true /* skip_dynamic_partititon_metadata_updated */)) {
       LOG(WARNING) << "Unable to reset the update progress.";
     }
     if (!prefs_->SetString(kPrefsUpdateCheckResponseHash, payload_id)) {
       LOG(WARNING) << "Unable to save the update check response hash.";
     }
   }
-  install_plan_.source_slot = boot_control_->GetCurrentSlot();
-  install_plan_.target_slot = install_plan_.source_slot == 0 ? 1 : 0;
+  install_plan_.source_slot = GetCurrentSlot();
+  install_plan_.target_slot = GetTargetSlot();
 
   install_plan_.powerwash_required =
       GetHeaderAsBool(headers[kPayloadPropertyPowerwash], false);
@@ -212,20 +247,8 @@
   install_plan_.switch_slot_on_reboot =
       GetHeaderAsBool(headers[kPayloadPropertySwitchSlotOnReboot], true);
 
-  install_plan_.run_post_install = true;
-  // Optionally skip post install if and only if:
-  // a) we're resuming
-  // b) post install has already succeeded before
-  // c) RUN_POST_INSTALL is set to 0.
-  if (install_plan_.is_resume && prefs_->Exists(kPrefsPostInstallSucceeded)) {
-    bool post_install_succeeded = false;
-    if (prefs_->GetBoolean(kPrefsPostInstallSucceeded,
-                           &post_install_succeeded) &&
-        post_install_succeeded) {
-      install_plan_.run_post_install =
-          GetHeaderAsBool(headers[kPayloadPropertyRunPostInstall], true);
-    }
-  }
+  install_plan_.run_post_install =
+      GetHeaderAsBool(headers[kPayloadPropertyRunPostInstall], true);
 
   // Skip writing verity if we're resuming and verity has already been written.
   install_plan_.write_verity = true;
@@ -288,6 +311,19 @@
   return true;
 }
 
+bool UpdateAttempterAndroid::ApplyPayload(
+    int fd,
+    int64_t payload_offset,
+    int64_t payload_size,
+    const vector<string>& key_value_pair_headers,
+    brillo::ErrorPtr* error) {
+  payload_fd_.reset(dup(fd));
+  const string payload_url = "fd://" + std::to_string(payload_fd_.get());
+
+  return ApplyPayload(
+      payload_url, payload_offset, payload_size, key_value_pair_headers, error);
+}
+
 bool UpdateAttempterAndroid::SuspendUpdate(brillo::ErrorPtr* error) {
   if (!processor_->IsRunning())
     return LogAndSetError(error, FROM_HERE, "No ongoing update to suspend.");
@@ -314,18 +350,19 @@
             << UpdateStatusToString(status_) << " to UpdateStatus::IDLE";
 
   switch (status_) {
-    case UpdateStatus::IDLE:
+    case UpdateStatus::IDLE: {
+      if (!boot_control_->GetDynamicPartitionControl()->ResetUpdate(prefs_)) {
+        LOG(WARNING) << "Failed to reset snapshots. UpdateStatus is IDLE but"
+                     << "space might not be freed.";
+      }
       return true;
+    }
 
     case UpdateStatus::UPDATED_NEED_REBOOT: {
-      // Remove the reboot marker so that if the machine is rebooted
-      // after resetting to idle state, it doesn't go back to
-      // UpdateStatus::UPDATED_NEED_REBOOT state.
-      bool ret_value = prefs_->Delete(kPrefsUpdateCompletedOnBootId);
-      ClearMetricsPrefs();
+      bool ret_value = true;
 
       // Update the boot flags so the current slot has higher priority.
-      if (!boot_control_->SetActiveBootSlot(boot_control_->GetCurrentSlot()))
+      if (!boot_control_->SetActiveBootSlot(GetCurrentSlot()))
         ret_value = false;
 
       // Mark the current slot as successful again, since marking it as active
@@ -334,6 +371,20 @@
       if (!boot_control_->MarkBootSuccessfulAsync(Bind([](bool successful) {})))
         ret_value = false;
 
+      // Resets the warm reset property since we won't switch the slot.
+      hardware_->SetWarmReset(false);
+
+      // Remove update progress for DeltaPerformer and remove snapshots.
+      if (!boot_control_->GetDynamicPartitionControl()->ResetUpdate(prefs_))
+        ret_value = false;
+
+      // Remove the reboot marker so that if the machine is rebooted
+      // after resetting to idle state, it doesn't go back to
+      // UpdateStatus::UPDATED_NEED_REBOOT state.
+      if (!prefs_->Delete(kPrefsUpdateCompletedOnBootId))
+        ret_value = false;
+      ClearMetricsPrefs();
+
       if (!ret_value) {
         return LogAndSetError(
             error, FROM_HERE, "Failed to reset the status to ");
@@ -352,8 +403,10 @@
   }
 }
 
-bool UpdateAttempterAndroid::VerifyPayloadApplicable(
-    const std::string& metadata_filename, brillo::ErrorPtr* error) {
+bool UpdateAttempterAndroid::VerifyPayloadParseManifest(
+    const std::string& metadata_filename,
+    DeltaArchiveManifest* manifest,
+    brillo::ErrorPtr* error) {
   FileDescriptorPtr fd(new EintrSafeFileDescriptor);
   if (!fd->Open(metadata_filename.c_str(), O_RDONLY)) {
     return LogAndSetError(
@@ -395,24 +448,39 @@
   }
   fd->Close();
 
-  string public_key;
-  if (!utils::ReadFile(constants::kUpdatePayloadPublicKeyPath, &public_key)) {
-    return LogAndSetError(error, FROM_HERE, "Failed to read public key.");
+  auto payload_verifier = PayloadVerifier::CreateInstanceFromZipPath(
+      constants::kUpdateCertificatesPath);
+  if (!payload_verifier) {
+    return LogAndSetError(error,
+                          FROM_HERE,
+                          "Failed to create the payload verifier from " +
+                              std::string(constants::kUpdateCertificatesPath));
   }
-  errorcode =
-      payload_metadata.ValidateMetadataSignature(metadata, "", public_key);
+  errorcode = payload_metadata.ValidateMetadataSignature(
+      metadata, "", *payload_verifier);
   if (errorcode != ErrorCode::kSuccess) {
     return LogAndSetError(error,
                           FROM_HERE,
                           "Failed to validate metadata signature: " +
                               utils::ErrorCodeToString(errorcode));
   }
-  DeltaArchiveManifest manifest;
-  if (!payload_metadata.GetManifest(metadata, &manifest)) {
+  if (!payload_metadata.GetManifest(metadata, manifest)) {
     return LogAndSetError(error, FROM_HERE, "Failed to parse manifest.");
   }
 
-  BootControlInterface::Slot current_slot = boot_control_->GetCurrentSlot();
+  return true;
+}
+
+bool UpdateAttempterAndroid::VerifyPayloadApplicable(
+    const std::string& metadata_filename, brillo::ErrorPtr* error) {
+  DeltaArchiveManifest manifest;
+  TEST_AND_RETURN_FALSE(
+      VerifyPayloadParseManifest(metadata_filename, &manifest, error));
+
+  FileDescriptorPtr fd(new EintrSafeFileDescriptor);
+  ErrorCode errorcode;
+
+  BootControlInterface::Slot current_slot = GetCurrentSlot();
   for (const PartitionUpdate& partition : manifest.partitions()) {
     if (!partition.has_old_partition_info())
       continue;
@@ -453,6 +521,11 @@
                                             ErrorCode code) {
   LOG(INFO) << "Processing Done.";
 
+  if (status_ == UpdateStatus::CLEANUP_PREVIOUS_UPDATE) {
+    TerminateUpdateAndNotify(code);
+    return;
+  }
+
   switch (code) {
     case ErrorCode::kSuccess:
       // Update succeeded.
@@ -497,9 +570,15 @@
   // Reset download progress regardless of whether or not the download
   // action succeeded.
   const string type = action->Type();
-  if (type == DownloadAction::StaticType()) {
-    download_progress_ = 0;
+  if (type == CleanupPreviousUpdateAction::StaticType() ||
+      (type == NoOpAction::StaticType() &&
+       status_ == UpdateStatus::CLEANUP_PREVIOUS_UPDATE)) {
+    cleanup_previous_update_code_ = code;
+    NotifyCleanupPreviousUpdateCallbacksAndClear();
   }
+  // download_progress_ is actually used by other actions, such as
+  // filesystem_verify_action. Therefore we always clear it.
+  download_progress_ = 0;
   if (type == PostinstallRunnerAction::StaticType()) {
     bool succeeded =
         code == ErrorCode::kSuccess || code == ErrorCode::kUpdatedButNotActive;
@@ -509,9 +588,15 @@
     // If an action failed, the ActionProcessor will cancel the whole thing.
     return;
   }
+  if (type == UpdateBootFlagsAction::StaticType()) {
+    SetStatusAndNotify(UpdateStatus::CLEANUP_PREVIOUS_UPDATE);
+  }
   if (type == DownloadAction::StaticType()) {
-    SetStatusAndNotify(UpdateStatus::FINALIZING);
+    auto download_action = static_cast<DownloadAction*>(action);
+    install_plan_ = *download_action->install_plan();
+    SetStatusAndNotify(UpdateStatus::VERIFYING);
   } else if (type == FilesystemVerifierAction::StaticType()) {
+    SetStatusAndNotify(UpdateStatus::FINALIZING);
     prefs_->SetBoolean(kPrefsVerityWritten, true);
   }
 }
@@ -562,6 +647,11 @@
   }
 }
 
+void UpdateAttempterAndroid::OnVerifyProgressUpdate(double progress) {
+  assert(status_ == UpdateStatus::VERIFYING);
+  ProgressUpdate(progress);
+}
+
 void UpdateAttempterAndroid::ScheduleProcessingStart() {
   LOG(INFO) << "Scheduling an action processor start.";
   brillo::MessageLoop::current()->PostTask(
@@ -576,13 +666,22 @@
     return;
   }
 
-  boot_control_->Cleanup();
+  if (status_ == UpdateStatus::CLEANUP_PREVIOUS_UPDATE) {
+    LOG(INFO) << "Terminating cleanup previous update.";
+    SetStatusAndNotify(UpdateStatus::IDLE);
+    for (auto observer : daemon_state_->service_observers())
+      observer->SendPayloadApplicationComplete(error_code);
+    return;
+  }
+
+  boot_control_->GetDynamicPartitionControl()->Cleanup();
 
   download_progress_ = 0;
   UpdateStatus new_status =
       (error_code == ErrorCode::kSuccess ? UpdateStatus::UPDATED_NEED_REBOOT
                                          : UpdateStatus::IDLE);
   SetStatusAndNotify(new_status);
+  payload_fd_.reset();
 
   // The network id is only applicable to one download attempt and once it's
   // done the network id should not be re-used anymore.
@@ -596,6 +695,9 @@
   CollectAndReportUpdateMetricsOnUpdateFinished(error_code);
   ClearMetricsPrefs();
   if (error_code == ErrorCode::kSuccess) {
+    // We should only reset the PayloadAttemptNumber if the update succeeds, or
+    // we switch to a different payload.
+    prefs_->Delete(kPrefsPayloadAttemptNumber);
     metrics_utils::SetSystemUpdatedMarker(clock_.get(), prefs_);
     // Clear the total bytes downloaded if and only if the update succeeds.
     prefs_->SetInt64(kPrefsTotalBytesDownloaded, 0);
@@ -623,6 +725,9 @@
   // Actions:
   auto update_boot_flags_action =
       std::make_unique<UpdateBootFlagsAction>(boot_control_);
+  auto cleanup_previous_update_action =
+      boot_control_->GetDynamicPartitionControl()
+          ->GetCleanupPreviousUpdateAction(boot_control_, prefs_, this);
   auto install_plan_action = std::make_unique<InstallPlanAction>(install_plan_);
   auto download_action =
       std::make_unique<DownloadAction>(prefs_,
@@ -633,10 +738,11 @@
                                        true /* interactive */);
   download_action->set_delegate(this);
   download_action->set_base_offset(base_offset_);
-  auto filesystem_verifier_action =
-      std::make_unique<FilesystemVerifierAction>();
+  auto filesystem_verifier_action = std::make_unique<FilesystemVerifierAction>(
+      boot_control_->GetDynamicPartitionControl());
   auto postinstall_runner_action =
       std::make_unique<PostinstallRunnerAction>(boot_control_, hardware_);
+  filesystem_verifier_action->set_delegate(this);
   postinstall_runner_action->set_delegate(this);
 
   // Bond them together. We have to use the leaf-types when calling
@@ -647,6 +753,7 @@
               postinstall_runner_action.get());
 
   processor_->EnqueueAction(std::move(update_boot_flags_action));
+  processor_->EnqueueAction(std::move(cleanup_previous_update_action));
   processor_->EnqueueAction(std::move(install_plan_action));
   processor_->EnqueueAction(std::move(download_action));
   processor_->EnqueueAction(std::move(filesystem_verifier_action));
@@ -731,11 +838,15 @@
         total_bytes_downloaded;
 
     int download_overhead_percentage = 0;
-    if (current_bytes_downloaded > 0) {
+    if (total_bytes_downloaded >= payload_size) {
+      CHECK_GT(payload_size, 0);
       download_overhead_percentage =
-          (total_bytes_downloaded - current_bytes_downloaded) * 100ull /
-          current_bytes_downloaded;
+          (total_bytes_downloaded - payload_size) * 100ull / payload_size;
+    } else {
+      LOG(WARNING) << "Downloaded bytes " << total_bytes_downloaded
+                   << " is smaller than the payload size " << payload_size;
     }
+
     metrics_reporter_->ReportSuccessfulUpdateMetrics(
         static_cast<int>(attempt_number),
         0,  // update abandoned count
@@ -826,10 +937,118 @@
   CHECK(prefs_);
   prefs_->Delete(kPrefsCurrentBytesDownloaded);
   prefs_->Delete(kPrefsNumReboots);
-  prefs_->Delete(kPrefsPayloadAttemptNumber);
   prefs_->Delete(kPrefsSystemUpdatedMarker);
   prefs_->Delete(kPrefsUpdateTimestampStart);
   prefs_->Delete(kPrefsUpdateBootTimestampStart);
 }
 
+BootControlInterface::Slot UpdateAttempterAndroid::GetCurrentSlot() const {
+  return boot_control_->GetCurrentSlot();
+}
+
+BootControlInterface::Slot UpdateAttempterAndroid::GetTargetSlot() const {
+  return GetCurrentSlot() == 0 ? 1 : 0;
+}
+
+uint64_t UpdateAttempterAndroid::AllocateSpaceForPayload(
+    const std::string& metadata_filename,
+    const vector<string>& key_value_pair_headers,
+    brillo::ErrorPtr* error) {
+  DeltaArchiveManifest manifest;
+  if (!VerifyPayloadParseManifest(metadata_filename, &manifest, error)) {
+    return 0;
+  }
+  std::map<string, string> headers;
+  if (!ParseKeyValuePairHeaders(key_value_pair_headers, &headers, error)) {
+    return 0;
+  }
+
+  string payload_id = GetPayloadId(headers);
+  uint64_t required_size = 0;
+  if (!DeltaPerformer::PreparePartitionsForUpdate(prefs_,
+                                                  boot_control_,
+                                                  GetTargetSlot(),
+                                                  manifest,
+                                                  payload_id,
+                                                  &required_size)) {
+    if (required_size == 0) {
+      LogAndSetError(error, FROM_HERE, "Failed to allocate space for payload.");
+      return 0;
+    } else {
+      LOG(ERROR) << "Insufficient space for payload: " << required_size
+                 << " bytes";
+      return required_size;
+    }
+  }
+
+  LOG(INFO) << "Successfully allocated space for payload.";
+  return 0;
+}
+
+void UpdateAttempterAndroid::CleanupSuccessfulUpdate(
+    std::unique_ptr<CleanupSuccessfulUpdateCallbackInterface> callback,
+    brillo::ErrorPtr* error) {
+  if (cleanup_previous_update_code_.has_value()) {
+    LOG(INFO) << "CleanupSuccessfulUpdate has previously completed with "
+              << utils::ErrorCodeToString(*cleanup_previous_update_code_);
+    if (callback) {
+      callback->OnCleanupComplete(
+          static_cast<int32_t>(*cleanup_previous_update_code_));
+    }
+    return;
+  }
+  if (callback) {
+    auto callback_ptr = callback.get();
+    cleanup_previous_update_callbacks_.emplace_back(std::move(callback));
+    callback_ptr->RegisterForDeathNotifications(
+        base::Bind(&UpdateAttempterAndroid::RemoveCleanupPreviousUpdateCallback,
+                   base::Unretained(this),
+                   base::Unretained(callback_ptr)));
+  }
+  ScheduleCleanupPreviousUpdate();
+}
+
+void UpdateAttempterAndroid::ScheduleCleanupPreviousUpdate() {
+  // If a previous CleanupSuccessfulUpdate call has not finished, or an update
+  // is in progress, skip enqueueing the action.
+  if (processor_->IsRunning()) {
+    LOG(INFO) << "Already processing an update. CleanupPreviousUpdate should "
+              << "be done when the current update finishes.";
+    return;
+  }
+  LOG(INFO) << "Scheduling CleanupPreviousUpdateAction.";
+  auto action =
+      boot_control_->GetDynamicPartitionControl()
+          ->GetCleanupPreviousUpdateAction(boot_control_, prefs_, this);
+  processor_->EnqueueAction(std::move(action));
+  processor_->set_delegate(this);
+  SetStatusAndNotify(UpdateStatus::CLEANUP_PREVIOUS_UPDATE);
+  processor_->StartProcessing();
+}
+
+void UpdateAttempterAndroid::OnCleanupProgressUpdate(double progress) {
+  for (auto&& callback : cleanup_previous_update_callbacks_) {
+    callback->OnCleanupProgressUpdate(progress);
+  }
+}
+
+void UpdateAttempterAndroid::NotifyCleanupPreviousUpdateCallbacksAndClear() {
+  CHECK(cleanup_previous_update_code_.has_value());
+  for (auto&& callback : cleanup_previous_update_callbacks_) {
+    callback->OnCleanupComplete(
+        static_cast<int32_t>(*cleanup_previous_update_code_));
+  }
+  cleanup_previous_update_callbacks_.clear();
+}
+
+void UpdateAttempterAndroid::RemoveCleanupPreviousUpdateCallback(
+    CleanupSuccessfulUpdateCallbackInterface* callback) {
+  auto end_it =
+      std::remove_if(cleanup_previous_update_callbacks_.begin(),
+                     cleanup_previous_update_callbacks_.end(),
+                     [&](const auto& e) { return e.get() == callback; });
+  cleanup_previous_update_callbacks_.erase(
+      end_it, cleanup_previous_update_callbacks_.end());
+}
+
 }  // namespace chromeos_update_engine
diff --git a/update_attempter_android.h b/update_attempter_android.h
index e4b40de..55003a0 100644
--- a/update_attempter_android.h
+++ b/update_attempter_android.h
@@ -23,6 +23,7 @@
 #include <string>
 #include <vector>
 
+#include <android-base/unique_fd.h>
 #include <base/time/time.h>
 
 #include "update_engine/client_library/include/update_engine/update_status.h"
@@ -36,6 +37,7 @@
 #include "update_engine/metrics_utils.h"
 #include "update_engine/network_selector_interface.h"
 #include "update_engine/payload_consumer/download_action.h"
+#include "update_engine/payload_consumer/filesystem_verifier_action.h"
 #include "update_engine/payload_consumer/postinstall_runner_action.h"
 #include "update_engine/service_delegate_android_interface.h"
 #include "update_engine/service_observer_interface.h"
@@ -46,7 +48,9 @@
     : public ServiceDelegateAndroidInterface,
       public ActionProcessorDelegate,
       public DownloadActionDelegate,
-      public PostinstallRunnerAction::DelegateInterface {
+      public FilesystemVerifyDelegate,
+      public PostinstallRunnerAction::DelegateInterface,
+      public CleanupPreviousUpdateActionDelegateInterface {
  public:
   using UpdateStatus = update_engine::UpdateStatus;
 
@@ -65,12 +69,24 @@
                     int64_t payload_size,
                     const std::vector<std::string>& key_value_pair_headers,
                     brillo::ErrorPtr* error) override;
+  bool ApplyPayload(int fd,
+                    int64_t payload_offset,
+                    int64_t payload_size,
+                    const std::vector<std::string>& key_value_pair_headers,
+                    brillo::ErrorPtr* error) override;
   bool SuspendUpdate(brillo::ErrorPtr* error) override;
   bool ResumeUpdate(brillo::ErrorPtr* error) override;
   bool CancelUpdate(brillo::ErrorPtr* error) override;
   bool ResetStatus(brillo::ErrorPtr* error) override;
   bool VerifyPayloadApplicable(const std::string& metadata_filename,
                                brillo::ErrorPtr* error) override;
+  uint64_t AllocateSpaceForPayload(
+      const std::string& metadata_filename,
+      const std::vector<std::string>& key_value_pair_headers,
+      brillo::ErrorPtr* error) override;
+  void CleanupSuccessfulUpdate(
+      std::unique_ptr<CleanupSuccessfulUpdateCallbackInterface> callback,
+      brillo::ErrorPtr* error) override;
 
   // ActionProcessorDelegate methods:
   void ProcessingDone(const ActionProcessor* processor,
@@ -87,9 +103,15 @@
   bool ShouldCancel(ErrorCode* cancel_reason) override;
   void DownloadComplete() override;
 
+  // FilesystemVerifyDelegate overrides
+  void OnVerifyProgressUpdate(double progress) override;
+
   // PostinstallRunnerAction::DelegateInterface
   void ProgressUpdate(double progress) override;
 
+  // CleanupPreviousUpdateActionDelegateInterface
+  void OnCleanupProgressUpdate(double progress) override;
+
  private:
   friend class UpdateAttempterAndroidTest;
 
@@ -151,11 +173,31 @@
   void UpdatePrefsOnUpdateStart(bool is_resume);
 
   // Prefs to delete:
-  //   |kPrefsNumReboots|, |kPrefsPayloadAttemptNumber|,
+  //   |kPrefsNumReboots|, |kPrefsCurrentBytesDownloaded|
   //   |kPrefsSystemUpdatedMarker|, |kPrefsUpdateTimestampStart|,
-  //   |kPrefsUpdateBootTimestampStart|, |kPrefsCurrentBytesDownloaded|
+  //   |kPrefsUpdateBootTimestampStart|
   void ClearMetricsPrefs();
 
+  // Return source and target slots for update.
+  BootControlInterface::Slot GetCurrentSlot() const;
+  BootControlInterface::Slot GetTargetSlot() const;
+
+  // Helper of public VerifyPayloadApplicable. Return the parsed manifest in
+  // |manifest|.
+  static bool VerifyPayloadParseManifest(const std::string& metadata_filename,
+                                         DeltaArchiveManifest* manifest,
+                                         brillo::ErrorPtr* error);
+
+  // Enqueue and run a CleanupPreviousUpdateAction.
+  void ScheduleCleanupPreviousUpdate();
+
+  // Notify and clear |cleanup_previous_update_callbacks_|.
+  void NotifyCleanupPreviousUpdateCallbacksAndClear();
+
+  // Remove |callback| from |cleanup_previous_update_callbacks_|.
+  void RemoveCleanupPreviousUpdateCallback(
+      CleanupSuccessfulUpdateCallbackInterface* callback);
+
   DaemonStateInterface* daemon_state_;
 
   // DaemonStateAndroid pointers.
@@ -191,6 +233,14 @@
 
   std::unique_ptr<MetricsReporterInterface> metrics_reporter_;
 
+  ::android::base::unique_fd payload_fd_;
+
+  std::vector<std::unique_ptr<CleanupSuccessfulUpdateCallbackInterface>>
+      cleanup_previous_update_callbacks_;
+  // Result of previous CleanupPreviousUpdateAction. Nullopt If
+  // CleanupPreviousUpdateAction has not been executed.
+  std::optional<ErrorCode> cleanup_previous_update_code_{std::nullopt};
+
   DISALLOW_COPY_AND_ASSIGN(UpdateAttempterAndroid);
 };
 
diff --git a/update_attempter_android_unittest.cc b/update_attempter_android_unittest.cc
index 2593d44..721b735 100644
--- a/update_attempter_android_unittest.cc
+++ b/update_attempter_android_unittest.cc
@@ -18,6 +18,7 @@
 
 #include <memory>
 #include <string>
+#include <utility>
 
 #include <android-base/properties.h>
 #include <base/time/time.h>
@@ -57,6 +58,11 @@
     update_attempter_android_.status_ = status;
   }
 
+  void AddPayload(InstallPlan::Payload&& payload) {
+    update_attempter_android_.install_plan_.payloads.push_back(
+        std::move(payload));
+  }
+
   UpdateAttempterAndroid update_attempter_android_{
       &daemon_state_, &prefs_, &boot_control_, &hardware_};
 
@@ -111,9 +117,10 @@
   update_attempter_android_.Init();
   // Check that we reset the metric prefs.
   EXPECT_FALSE(prefs_.Exists(kPrefsNumReboots));
-  EXPECT_FALSE(prefs_.Exists(kPrefsPayloadAttemptNumber));
   EXPECT_FALSE(prefs_.Exists(kPrefsUpdateTimestampStart));
   EXPECT_FALSE(prefs_.Exists(kPrefsSystemUpdatedMarker));
+  // PayloadAttemptNumber should persist across reboots.
+  EXPECT_TRUE(prefs_.Exists(kPrefsPayloadAttemptNumber));
 }
 
 TEST_F(UpdateAttempterAndroidTest, ReportMetricsOnUpdateTerminated) {
@@ -142,9 +149,13 @@
       .Times(1);
   EXPECT_CALL(*metrics_reporter_,
               ReportSuccessfulUpdateMetrics(
-                  2, 0, _, _, _, _, duration, duration_uptime, 3, _))
+                  2, 0, _, 50, _, _, duration, duration_uptime, 3, _))
       .Times(1);
 
+  // Adds a payload of 50 bytes to the InstallPlan.
+  InstallPlan::Payload payload;
+  payload.size = 50;
+  AddPayload(std::move(payload));
   SetUpdateStatus(UpdateStatus::UPDATE_AVAILABLE);
   update_attempter_android_.ProcessingDone(nullptr, ErrorCode::kSuccess);
 
@@ -178,15 +189,20 @@
                   _,
                   _,
                   _,
-                  _,
+                  50,
                   test_utils::DownloadSourceMatcher(total_bytes),
-                  125,
+                  80,
                   _,
                   _,
                   _,
                   _))
       .Times(1);
 
+  // Adds a payload of 50 bytes to the InstallPlan.
+  InstallPlan::Payload payload;
+  payload.size = 50;
+  AddPayload(std::move(payload));
+
   // The first update fails after receiving 50 bytes in total.
   update_attempter_android_.BytesReceived(30, 50, 200);
   update_attempter_android_.ProcessingDone(nullptr, ErrorCode::kError);
@@ -198,7 +214,7 @@
       metrics_utils::GetPersistedValue(kPrefsTotalBytesDownloaded, &prefs_));
 
   // The second update succeeds after receiving 40 bytes, which leads to a
-  // overhead of 50 / 40 = 125%.
+  // overhead of (90 - 50) / 50 = 80%.
   update_attempter_android_.BytesReceived(40, 40, 50);
   update_attempter_android_.ProcessingDone(nullptr, ErrorCode::kSuccess);
   // Both prefs should be cleared.
diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc
index 7466aba..354416e 100644
--- a/update_attempter_unittest.cc
+++ b/update_attempter_unittest.cc
@@ -665,7 +665,8 @@
   EXPECT_EQ(
       ErrorCode::kOmahaResponseHandlerError,
       GetErrorCodeForAction(&omaha_response_handler_action, ErrorCode::kError));
-  FilesystemVerifierAction filesystem_verifier_action;
+  DynamicPartitionControlStub dynamic_control_stub;
+  FilesystemVerifierAction filesystem_verifier_action(&dynamic_control_stub);
   EXPECT_EQ(
       ErrorCode::kFilesystemVerifierError,
       GetErrorCodeForAction(&filesystem_verifier_action, ErrorCode::kError));
diff --git a/update_boot_flags_action.cc b/update_boot_flags_action.cc
index 97ef7f2..ee92ae0 100644
--- a/update_boot_flags_action.cc
+++ b/update_boot_flags_action.cc
@@ -50,8 +50,11 @@
   }
 }
 
-void UpdateBootFlagsAction::CompleteUpdateBootFlags(bool successful) {
+void UpdateBootFlagsAction::TerminateProcessing() {
   is_running_ = false;
+}
+
+void UpdateBootFlagsAction::CompleteUpdateBootFlags(bool successful) {
   if (!successful) {
     // We ignore the failure for now because if the updating boot flags is flaky
     // or has a bug in a specific release, then blocking the update can cause
@@ -61,6 +64,18 @@
     // TODO(ahassani): Add new error code metric for kUpdateBootFlagsFailed.
     LOG(ERROR) << "Updating boot flags failed, but ignoring its failure.";
   }
+
+  // As the callback to MarkBootSuccessfulAsync, this function can still be
+  // called even after the current UpdateBootFlagsAction object get destroyed by
+  // the action processor. In this case, check the value of the static variable
+  // |is_running_| and skip executing the callback function.
+  if (!is_running_) {
+    LOG(INFO) << "UpdateBootFlagsAction is no longer running.";
+    return;
+  }
+
+  is_running_ = false;
+
   updated_boot_flags_ = true;
   processor_->ActionComplete(this, ErrorCode::kSuccess);
 }
diff --git a/update_boot_flags_action.h b/update_boot_flags_action.h
index afa2c3f..892aab7 100644
--- a/update_boot_flags_action.h
+++ b/update_boot_flags_action.h
@@ -30,6 +30,8 @@
 
   void PerformAction() override;
 
+  void TerminateProcessing() override;
+
   static std::string StaticType() { return "UpdateBootFlagsAction"; }
   std::string Type() const override { return StaticType(); }
 
diff --git a/update_engine.conf b/update_engine.conf
index af213ad..b6ca3c4 100644
--- a/update_engine.conf
+++ b/update_engine.conf
@@ -1,2 +1,2 @@
 PAYLOAD_MAJOR_VERSION=2
-PAYLOAD_MINOR_VERSION=6
+PAYLOAD_MINOR_VERSION=7
diff --git a/update_engine.rc b/update_engine.rc
index a7d6235..b9f80fc 100644
--- a/update_engine.rc
+++ b/update_engine.rc
@@ -1,8 +1,8 @@
 service update_engine /system/bin/update_engine --logtostderr --logtofile --foreground
     class late_start
     user root
-    group root system wakelock inet cache
-    writepid /dev/cpuset/system-background/tasks
+    group root system wakelock inet cache media_rw
+    writepid /dev/cpuset/system-background/tasks /dev/blkio/background/tasks
     disabled
 
 on property:ro.boot.slot_suffix=*
diff --git a/update_engine_client_android.cc b/update_engine_client_android.cc
index 6863799..1a68cf4 100644
--- a/update_engine_client_android.cc
+++ b/update_engine_client_android.cc
@@ -72,12 +72,15 @@
   // Called whenever the UpdateEngine daemon dies.
   void UpdateEngineServiceDied();
 
+  static std::vector<android::String16> ParseHeaders(const std::string& arg);
+
   // Copy of argc and argv passed to main().
   int argc_;
   char** argv_;
 
   android::sp<android::os::IUpdateEngine> service_;
   android::sp<android::os::BnUpdateEngineCallback> callback_;
+  android::sp<android::os::BnUpdateEngineCallback> cleanup_callback_;
 
   brillo::BinderWatcher binder_watcher_;
 };
@@ -123,15 +126,16 @@
   DEFINE_string(headers,
                 "",
                 "A list of key-value pairs, one element of the list per line. "
-                "Used when --update is passed.");
+                "Used when --update or --allocate is passed.");
 
   DEFINE_bool(verify,
               false,
               "Given payload metadata, verify if the payload is applicable.");
+  DEFINE_bool(allocate, false, "Given payload metadata, allocate space.");
   DEFINE_string(metadata,
                 "/data/ota_package/metadata",
                 "The path to the update payload metadata. "
-                "Used when --verify is passed.");
+                "Used when --verify or --allocate is passed.");
 
   DEFINE_bool(suspend, false, "Suspend an ongoing update and exit.");
   DEFINE_bool(resume, false, "Resume a suspended update.");
@@ -141,7 +145,10 @@
               false,
               "Follow status update changes until a final state is reached. "
               "Exit status is 0 if the update succeeded, and 1 otherwise.");
-
+  DEFINE_bool(merge,
+              false,
+              "Wait for previous update to merge. "
+              "Only available after rebooting to new slot.");
   // Boilerplate init commands.
   base::CommandLine::Init(argc_, argv_);
   brillo::FlagHelper::Init(argc_, argv_, "Android Update Engine Client");
@@ -200,6 +207,36 @@
     return ExitWhenIdle(status);
   }
 
+  if (FLAGS_allocate) {
+    auto headers = ParseHeaders(FLAGS_headers);
+    int64_t ret = 0;
+    Status status = service_->allocateSpaceForPayload(
+        android::String16{FLAGS_metadata.data(), FLAGS_metadata.size()},
+        headers,
+        &ret);
+    if (status.isOk()) {
+      if (ret == 0) {
+        LOG(INFO) << "Successfully allocated space for payload.";
+      } else {
+        LOG(INFO) << "Insufficient space; required " << ret << " bytes.";
+      }
+    } else {
+      LOG(INFO) << "Allocation failed.";
+    }
+    return ExitWhenIdle(status);
+  }
+
+  if (FLAGS_merge) {
+    // Register a callback object with the service.
+    cleanup_callback_ = new UECallback(this);
+    Status status = service_->cleanupSuccessfulUpdate(cleanup_callback_);
+    if (!status.isOk()) {
+      LOG(ERROR) << "Failed to call cleanupSuccessfulUpdate.";
+      return ExitWhenIdle(status);
+    }
+    keep_running = true;
+  }
+
   if (FLAGS_follow) {
     // Register a callback object with the service.
     callback_ = new UECallback(this);
@@ -212,12 +249,7 @@
   }
 
   if (FLAGS_update) {
-    std::vector<std::string> headers = base::SplitString(
-        FLAGS_headers, "\n", base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
-    std::vector<android::String16> and_headers;
-    for (const auto& header : headers) {
-      and_headers.push_back(android::String16{header.data(), header.size()});
-    }
+    auto and_headers = ParseHeaders(FLAGS_headers);
     Status status = service_->applyPayload(
         android::String16{FLAGS_payload.data(), FLAGS_payload.size()},
         FLAGS_offset,
@@ -261,6 +293,17 @@
   QuitWithExitCode(1);
 }
 
+std::vector<android::String16> UpdateEngineClientAndroid::ParseHeaders(
+    const std::string& arg) {
+  std::vector<std::string> headers = base::SplitString(
+      arg, "\n", base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
+  std::vector<android::String16> and_headers;
+  for (const auto& header : headers) {
+    and_headers.push_back(android::String16{header.data(), header.size()});
+  }
+  return and_headers;
+}
+
 }  // namespace internal
 }  // namespace chromeos_update_engine
 
diff --git a/update_manager/boxed_value.cc b/update_manager/boxed_value.cc
index ee7236c..ba84a41 100644
--- a/update_manager/boxed_value.cc
+++ b/update_manager/boxed_value.cc
@@ -178,6 +178,8 @@
       return "Reporting Error Event";
     case Stage::kAttemptingRollback:
       return "Attempting Rollback";
+    case Stage::kCleanupPreviousUpdate:
+      return "Cleanup Previous Update";
   }
   NOTREACHED();
   return "Unknown";
diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc
index a4926f4..85cc3ae 100644
--- a/update_manager/chromeos_policy.cc
+++ b/update_manager/chromeos_policy.cc
@@ -154,6 +154,8 @@
     case ErrorCode::kInternalLibCurlError:
     case ErrorCode::kUnresolvedHostError:
     case ErrorCode::kUnresolvedHostRecovered:
+    case ErrorCode::kNotEnoughSpace:
+    case ErrorCode::kDeviceCorrupted:
     case ErrorCode::kPackageExcludedFromUpdate:
       LOG(INFO) << "Not changing URL index or failure count due to error "
                 << chromeos_update_engine::utils::ErrorCodeToString(err_code)
@@ -598,7 +600,6 @@
     string* error,
     UpdateBackoffAndDownloadUrlResult* result,
     const UpdateState& update_state) const {
-  // Sanity checks.
   DCHECK_GE(update_state.download_errors_max, 0);
 
   // Set default result values.
@@ -670,7 +671,7 @@
   Time prev_err_time;
   bool is_first = true;
   for (const auto& err_tuple : update_state.download_errors) {
-    // Do some sanity checks.
+    // Do some validation checks.
     int used_url_idx = get<0>(err_tuple);
     if (is_first && url_idx >= 0 && used_url_idx != url_idx) {
       LOG(WARNING) << "First URL in error log (" << used_url_idx
diff --git a/update_manager/real_updater_provider.cc b/update_manager/real_updater_provider.cc
index 8465718..1548d57 100644
--- a/update_manager/real_updater_provider.cc
+++ b/update_manager/real_updater_provider.cc
@@ -170,6 +170,8 @@
      Stage::kReportingErrorEvent},
     {update_engine::kUpdateStatusAttemptingRollback,
      Stage::kAttemptingRollback},
+    {update_engine::kUpdateStatusCleanupPreviousUpdate,
+     Stage::kCleanupPreviousUpdate},
 };
 
 const Stage* StageVariable::GetValue(TimeDelta /* timeout */, string* errmsg) {
diff --git a/update_manager/updater_provider.h b/update_manager/updater_provider.h
index 98fd6d1..86af1c8 100644
--- a/update_manager/updater_provider.h
+++ b/update_manager/updater_provider.h
@@ -36,6 +36,7 @@
   kUpdatedNeedReboot,
   kReportingErrorEvent,
   kAttemptingRollback,
+  kCleanupPreviousUpdate,
 };
 
 enum class UpdateRequestStatus {
diff --git a/update_metadata.proto b/update_metadata.proto
index 3d136ca..373ee5e 100644
--- a/update_metadata.proto
+++ b/update_metadata.proto
@@ -14,24 +14,26 @@
 // limitations under the License.
 //
 
-// Update file format: A delta update file contains all the deltas needed
-// to update a system from one specific version to another specific
-// version. The update format is represented by this struct pseudocode:
+// Update file format: An update file contains all the operations needed
+// to update a system to a specific version. It can be a full payload which
+// can update from any version, or a delta payload which can only update
+// from a specific version.
+// The update format is represented by this struct pseudocode:
 // struct delta_update_file {
 //   char magic[4] = "CrAU";
-//   uint64 file_format_version;
+//   uint64 file_format_version;  // payload major version
 //   uint64 manifest_size;  // Size of protobuf DeltaArchiveManifest
 //
-//   // Only present if format_version > 1:
+//   // Only present if format_version >= 2:
 //   uint32 metadata_signature_size;
 //
-//   // The Bzip2 compressed DeltaArchiveManifest
-//   char manifest[];
+//   // The DeltaArchiveManifest protobuf serialized, not compressed.
+//   char manifest[manifest_size];
 //
 //   // The signature of the metadata (from the beginning of the payload up to
 //   // this location, not including the signature itself). This is a serialized
 //   // Signatures message.
-//   char medatada_signature_message[metadata_signature_size];
+//   char metadata_signature_message[metadata_signature_size];
 //
 //   // Data blobs for files, no specific format. The specific offset
 //   // and length of each data blob is recorded in the DeltaArchiveManifest.
@@ -39,9 +41,12 @@
 //     char data[];
 //   } blobs[];
 //
-//   // These two are not signed:
+//   // The signature of the entire payload, everything up to this location,
+//   // except that metadata_signature_message is skipped to simplify signing
+//   // process. These two are not signed:
 //   uint64 payload_signatures_message_size;
-//   char payload_signatures_message[];
+//   // This is a serialized Signatures message.
+//   char payload_signatures_message[payload_signatures_message_size];
 //
 // };
 
@@ -61,13 +66,13 @@
 //   dst_extents on the drive, zero padding to block size.
 // - MOVE: Copy the data in src_extents to dst_extents. Extents may overlap,
 //   so it may be desirable to read all src_extents data into memory before
-//   writing it out.
+//   writing it out. (deprecated)
 // - SOURCE_COPY: Copy the data in src_extents in the old partition to
 //   dst_extents in the new partition. There's no overlapping of data because
 //   the extents are in different partitions.
 // - BSDIFF: Read src_length bytes from src_extents into memory, perform
 //   bspatch with attached data, write new data to dst_extents, zero padding
-//   to block size.
+//   to block size. (deprecated)
 // - SOURCE_BSDIFF: Read the data in src_extents in the old partition, perform
 //   bspatch with the attached data and write the new data to dst_extents in the
 //   new partition.
@@ -102,6 +107,11 @@
 // A sentinel value (kuint64max) as the start block denotes a sparse-hole
 // in a file whose block-length is specified by num_blocks.
 
+message Extent {
+  optional uint64 start_block = 1;
+  optional uint64 num_blocks = 2;
+}
+
 // Signatures: Updates may be signed by the OS vendor. The client verifies
 // an update's signature by hashing the entire download. The section of the
 // download that contains the signature is at the end of the file, so when
@@ -114,15 +124,19 @@
 // to verify the download. The public key is expected to be part of the
 // client.
 
-message Extent {
-  optional uint64 start_block = 1;
-  optional uint64 num_blocks = 2;
-}
-
 message Signatures {
   message Signature {
-    optional uint32 version = 1;
+    optional uint32 version = 1 [deprecated = true];
     optional bytes data = 2;
+
+    // The DER encoded signature size of EC keys is nondeterministic for
+    // different input of sha256 hash. However, we need the size of the
+    // serialized signatures protobuf string to be fixed before signing;
+    // because this size is part of the content to be signed. Therefore, we
+    // always pad the signature data to the maximum possible signature size of
+    // a given key. And the payload verifier will truncate the signature to
+    // its correct size based on the value of |unpadded_signature_size|.
+    optional fixed32 unpadded_signature_size = 3;
   }
   repeated Signature signatures = 1;
 }
@@ -211,6 +225,22 @@
   optional bytes src_sha256_hash = 9;
 }
 
+// Hints to VAB snapshot to skip writing some blocks if these blocks are
+// identical to the ones on the source image. The src & dst extents for each
+// CowMergeOperation should be contiguous, and they're a subset of an OTA
+// InstallOperation.
+// During merge time, we need to follow the pre-computed sequence to avoid
+// read after write, similar to the inplace update schema.
+message CowMergeOperation {
+  enum Type {
+    COW_COPY = 0;  // identical blocks
+  }
+  optional Type type = 1;
+
+  optional Extent src_extent = 2;
+  optional Extent dst_extent = 3;
+}
+
 // Describes the update to apply to a single partition.
 message PartitionUpdate {
   // A platform-specific name to identify the partition set being updated. For
@@ -274,6 +304,16 @@
 
   // The number of FEC roots.
   optional uint32 fec_roots = 16 [default = 2];
+
+  // Per-partition version used for downgrade detection, added
+  // as an effort to support partial updates. For most partitions,
+  // this is the build timestamp.
+  optional string version = 17;
+
+  // A sorted list of CowMergeOperation. When writing cow, we can choose to
+  // skip writing the raw bytes for these extents. During snapshot merge, the
+  // bytes will read from the source partitions instead.
+  repeated CowMergeOperation merge_operations = 18;
 }
 
 message DynamicPartitionGroup {
@@ -290,13 +330,19 @@
 
 // Metadata related to all dynamic partitions.
 message DynamicPartitionMetadata {
-  // All updateable groups present in |partitions| of this DeltaArchiveManifest.
+  // All updatable groups present in |partitions| of this DeltaArchiveManifest.
   // - If an updatable group is on the device but not in the manifest, it is
   //   not updated. Hence, the group will not be resized, and partitions cannot
   //   be added to or removed from the group.
   // - If an updatable group is in the manifest but not on the device, the group
   //   is added to the device.
   repeated DynamicPartitionGroup groups = 1;
+
+  // Whether dynamic partitions have snapshots during the update. If this is
+  // set to true, the update_engine daemon creates snapshots for all dynamic
+  // partitions if possible. If this is unset, the update_engine daemon MUST
+  // NOT create snapshots for dynamic partitions.
+  optional bool snapshot_enabled = 2;
 }
 
 message DeltaArchiveManifest {
@@ -330,6 +376,7 @@
   optional ImageInfo new_image_info = 11;
 
   // The minor version, also referred as "delta version", of the payload.
+  // Minor version 0 is full payload, everything else is delta payload.
   optional uint32 minor_version = 12 [default = 0];
 
   // Only present in major version >= 2. List of partitions that will be
@@ -346,4 +393,7 @@
 
   // Metadata related to all dynamic partitions.
   optional DynamicPartitionMetadata dynamic_partition_metadata = 15;
+
+  // If the payload only updates a subset of partitions on the device.
+  optional bool partial_update = 16;
 }
diff --git a/update_status_utils.cc b/update_status_utils.cc
index 6c618ec..a702c61 100644
--- a/update_status_utils.cc
+++ b/update_status_utils.cc
@@ -66,6 +66,8 @@
       return update_engine::kUpdateStatusAttemptingRollback;
     case UpdateStatus::DISABLED:
       return update_engine::kUpdateStatusDisabled;
+    case UpdateStatus::CLEANUP_PREVIOUS_UPDATE:
+      return update_engine::kUpdateStatusCleanupPreviousUpdate;
   }
 
   NOTREACHED();