Merge remote-tracking branch 'aosp/upstream-master' into merge

It's a merge from chrome OS with some reverts.
1. the fd watcher change, because the libbrillo version isn't
compatible in aosp.
commit 6955bcc4ffe4cc9d62a88186b9a7e75d095a7897
commit 493fecb3f48c8478fd3ef244d631d857730dd14d
2. two libcurl unittest. Because the RunOnce() of the fake message
loop seems to have different behavior in aosp.
commit d3d84218cafbc1a95e7d6bbb775b495d1bebf4d2

Put preprocessor guards to use the old code in aosp. And we can
switch to the new code in the other path after adopting the new
libbrillo & libchrome.

Test: unit tests pass, apply an OTA
Change-Id: Id613599834b0f44f92841dbeae6303601db5490d
diff --git a/.clang-format b/.clang-format
index aed0ce8..3044f59 100644
--- a/.clang-format
+++ b/.clang-format
@@ -34,6 +34,7 @@
 BinPackParameters: false
 CommentPragmas: NOLINT:.*
 DerivePointerAlignment: false
+IncludeBlocks: Preserve
 PointerAlignment: Left
 TabWidth: 2
 
diff --git a/Android.bp b/Android.bp
index 47f0318..ecf3585 100644
--- a/Android.bp
+++ b/Android.bp
@@ -120,6 +120,7 @@
         "libbz",
         "libbspatch",
         "libbrotli",
+        "libc++fs",
         "libfec_rs",
         "libpuffpatch",
         "libverity_tree",
@@ -128,6 +129,7 @@
         "libbase",
         "libcrypto",
         "libfec",
+        "libziparchive",
     ],
 }
 
@@ -146,6 +148,7 @@
         "common/clock.cc",
         "common/constants.cc",
         "common/cpu_limiter.cc",
+        "common/dynamic_partition_control_stub.cc",
         "common/error_code_utils.cc",
         "common/file_fetcher.cc",
         "common/hash_calculator.cc",
@@ -161,6 +164,7 @@
         "common/utils.cc",
         "payload_consumer/bzip_extent_writer.cc",
         "payload_consumer/cached_file_descriptor.cc",
+        "payload_consumer/certificate_parser_android.cc",
         "payload_consumer/delta_performer.cc",
         "payload_consumer/download_action.cc",
         "payload_consumer/extent_reader.cc",
@@ -178,6 +182,7 @@
         "payload_consumer/verity_writer_android.cc",
         "payload_consumer/xz_extent_writer.cc",
         "payload_consumer/fec_file_descriptor.cc",
+        "payload_consumer/partition_update_generator_android.cc",
     ],
 }
 
@@ -188,16 +193,41 @@
     name: "libupdate_engine_boot_control_exports",
     defaults: ["update_metadata-protos_exports"],
 
-    static_libs: ["update_metadata-protos"],
+    static_libs: [
+        "libcutils",
+        "libfs_mgr_binder",
+        "libgsi",
+        "libpayload_consumer",
+        "libsnapshot",
+        "update_metadata-protos",
+    ],
     shared_libs: [
         "libbootloader_message",
-        "libfs_mgr",
-        "libhwbinder",
         "libhidlbase",
         "liblp",
+        "libstatslog",
         "libutils",
         "android.hardware.boot@1.0",
+        "android.hardware.boot@1.1",
     ],
+    header_libs: [
+        "avb_headers",
+    ],
+    target: {
+        recovery: {
+            static_libs: [
+                "libfs_mgr",
+                "libsnapshot_nobinder",
+            ],
+            exclude_static_libs: [
+                "libfs_mgr_binder",
+                "libsnapshot",
+            ],
+            exclude_shared_libs: [
+                "libstatslog",
+            ],
+        },
+    },
 }
 
 cc_library_static {
@@ -205,12 +235,15 @@
     defaults: [
         "ue_defaults",
         "libupdate_engine_boot_control_exports",
+        "libpayload_consumer_exports",
     ],
     recovery_available: true,
 
     srcs: [
         "boot_control_android.cc",
+        "cleanup_previous_update_action.cc",
         "dynamic_partition_control_android.cc",
+        "dynamic_partition_utils.cc",
     ],
 }
 
@@ -240,8 +273,8 @@
         "libcurl",
         "libcutils",
         "liblog",
-        "libmetricslogger",
         "libssl",
+        "libstatslog",
         "libutils",
     ],
 }
@@ -270,6 +303,7 @@
         "daemon_state_android.cc",
         "hardware_android.cc",
         "libcurl_http_fetcher.cc",
+        "logging_android.cc",
         "metrics_reporter_android.cc",
         "metrics_utils.cc",
         "network_selector_android.cc",
@@ -290,7 +324,10 @@
     ],
 
     static_libs: ["libupdate_engine_android"],
-    required: ["cacerts_google"],
+    required: [
+        "cacerts_google",
+        "otacerts",
+    ],
 
     srcs: ["main.cc"],
     init_rc: ["update_engine.rc"],
@@ -318,6 +355,7 @@
 
     srcs: [
         "hardware_android.cc",
+        "logging_android.cc",
         "metrics_reporter_stub.cc",
         "metrics_utils.cc",
         "network_selector_stub.cc",
@@ -355,7 +393,6 @@
         recovery: {
             exclude_shared_libs: [
                 "libprotobuf-cpp-lite",
-                "libhwbinder",
                 "libbrillo-stream",
                 "libbrillo",
                 "libchrome",
@@ -363,7 +400,9 @@
         },
     },
 
-    required: ["android.hardware.boot@1.0-impl-wrapper.recovery"],
+    required: [
+        "otacerts.recovery",
+    ],
 }
 
 // update_engine_client (type: executable)
@@ -407,6 +446,9 @@
         "update_metadata-protos_exports",
     ],
 
+    header_libs: [
+        "bootimg_headers",
+    ],
     static_libs: [
         "libavb",
         "libbrotli",
@@ -496,8 +538,6 @@
 
     gtest: false,
     stem: "delta_generator",
-    relative_install_path: "update_engine_unittests",
-    no_named_install_directory: true,
 }
 
 // test_http_server (type: executable)
@@ -512,8 +552,6 @@
     ],
 
     gtest: false,
-    relative_install_path: "update_engine_unittests",
-    no_named_install_directory: true,
 }
 
 // test_subprocess (type: executable)
@@ -525,8 +563,6 @@
     srcs: ["test_subprocess.cc"],
 
     gtest: false,
-    relative_install_path: "update_engine_unittests",
-    no_named_install_directory: true,
 }
 
 // Public keys for unittests.
@@ -534,14 +570,20 @@
 genrule {
     name: "ue_unittest_keys",
     cmd: "openssl rsa -in $(location unittest_key.pem) -pubout -out $(location unittest_key.pub.pem) &&" +
-        "openssl rsa -in $(location unittest_key2.pem) -pubout -out $(location unittest_key2.pub.pem)",
+        "openssl rsa -in $(location unittest_key2.pem) -pubout -out $(location unittest_key2.pub.pem) &&" +
+        "openssl rsa -in $(location unittest_key_RSA4096.pem) -pubout -out $(location unittest_key_RSA4096.pub.pem) &&" +
+        "openssl pkey -in $(location unittest_key_EC.pem) -pubout -out $(location unittest_key_EC.pub.pem)",
     srcs: [
         "unittest_key.pem",
         "unittest_key2.pem",
+        "unittest_key_RSA4096.pem",
+        "unittest_key_EC.pem",
     ],
     out: [
         "unittest_key.pub.pem",
         "unittest_key2.pub.pem",
+        "unittest_key_RSA4096.pub.pem",
+        "unittest_key_EC.pub.pem",
     ],
 }
 
@@ -571,11 +613,6 @@
         "libpayload_generator_exports",
         "libupdate_engine_android_exports",
     ],
-    required: [
-        "test_http_server",
-        "test_subprocess",
-        "ue_unittest_delta_generator",
-    ],
 
     static_libs: [
         "libpayload_generator",
@@ -583,21 +620,33 @@
         "libgmock",
         "libchrome_test_helpers",
         "libupdate_engine_android",
+        "libdm",
     ],
-    shared_libs: [
-        "libhidltransport",
+
+    header_libs: [
+        "libstorage_literals_headers",
     ],
 
     data: [
+        ":test_http_server",
+        ":test_subprocess",
+        ":ue_unittest_delta_generator",
         ":ue_unittest_disk_imgs",
         ":ue_unittest_keys",
+        "otacerts.zip",
         "unittest_key.pem",
         "unittest_key2.pem",
+        "unittest_key_RSA4096.pem",
+        "unittest_key_EC.pem",
         "update_engine.conf",
     ],
 
+    // We cannot use the default generated AndroidTest.xml because of the use of helper modules
+    // (i.e. test_http_server, test_subprocess, ue_unittest_delta_generator).
+    test_config: "test_config.xml",
+    test_suites: ["device-tests"],
+
     srcs: [
-        "boot_control_android_unittest.cc",
         "certificate_checker_unittest.cc",
         "common/action_pipe_unittest.cc",
         "common/action_processor_unittest.cc",
@@ -615,9 +664,11 @@
         "common/terminator_unittest.cc",
         "common/test_utils.cc",
         "common/utils_unittest.cc",
+        "dynamic_partition_control_android_unittest.cc",
         "libcurl_http_fetcher_unittest.cc",
         "payload_consumer/bzip_extent_writer_unittest.cc",
         "payload_consumer/cached_file_descriptor_unittest.cc",
+        "payload_consumer/certificate_parser_android_unittest.cc",
         "payload_consumer/delta_performer_integration_test.cc",
         "payload_consumer/delta_performer_unittest.cc",
         "payload_consumer/extent_reader_unittest.cc",
@@ -626,6 +677,7 @@
         "payload_consumer/file_descriptor_utils_unittest.cc",
         "payload_consumer/file_writer_unittest.cc",
         "payload_consumer/filesystem_verifier_action_unittest.cc",
+        "payload_consumer/partition_update_generator_android_unittest.cc",
         "payload_consumer/postinstall_runner_action_unittest.cc",
         "payload_consumer/verity_writer_android_unittest.cc",
         "payload_consumer/xz_extent_writer_unittest.cc",
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..d97975c
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,3 @@
+third_party {
+  license_type: NOTICE
+}
diff --git a/binder_bindings/android/os/IUpdateEngine.aidl b/binder_bindings/android/os/IUpdateEngine.aidl
index c0e29f5..c9580da 100644
--- a/binder_bindings/android/os/IUpdateEngine.aidl
+++ b/binder_bindings/android/os/IUpdateEngine.aidl
@@ -17,6 +17,7 @@
 package android.os;
 
 import android.os.IUpdateEngineCallback;
+import android.os.ParcelFileDescriptor;
 
 /** @hide */
 interface IUpdateEngine {
@@ -26,6 +27,11 @@
                     in long payload_size,
                     in String[] headerKeyValuePairs);
   /** @hide */
+  void applyPayloadFd(in ParcelFileDescriptor pfd,
+                      in long payload_offset,
+                      in long payload_size,
+                      in String[] headerKeyValuePairs);
+  /** @hide */
   boolean bind(IUpdateEngineCallback callback);
   /** @hide */
   boolean unbind(IUpdateEngineCallback callback);
@@ -39,4 +45,30 @@
   void resetStatus();
   /** @hide */
   boolean verifyPayloadApplicable(in String metadataFilename);
+  /**
+   * Allocate space on userdata partition.
+   *
+   * @return 0 indicates allocation is successful.
+   *   Non-zero indicates space is insufficient. The returned value is the
+   *   total required space (in bytes) on userdata partition.
+   *
+   * @throws ServiceSpecificException for other errors.
+   *
+   * @hide
+   */
+  long allocateSpaceForPayload(in String metadataFilename,
+                               in String[] headerKeyValuePairs);
+  /** @hide
+   *
+   * Wait for merge to finish, and clean up necessary files.
+   *
+   * @param callback Report status updates in callback (not the one previously
+   * bound with {@link #bind()}).
+   * {@link IUpdateEngineCallback#onStatusUpdate} is called with
+   * CLEANUP_PREVIOUS_UPDATE and a progress value during the cleanup.
+   * {@link IUpdateEngineCallback#onPayloadApplicationComplete} is called at
+   * the end with SUCCESS if successful. ERROR if transient errors (e.g. merged
+   * but needs reboot). DEVICE_CORRUPTED for permanent errors.
+   */
+  void cleanupSuccessfulUpdate(IUpdateEngineCallback callback);
 }
diff --git a/binder_bindings/android/os/IUpdateEngineCallback.aidl b/binder_bindings/android/os/IUpdateEngineCallback.aidl
index ee15c8b..4bacf9a 100644
--- a/binder_bindings/android/os/IUpdateEngineCallback.aidl
+++ b/binder_bindings/android/os/IUpdateEngineCallback.aidl
@@ -19,6 +19,7 @@
 /** @hide */
 oneway interface IUpdateEngineCallback {
   /** @hide */
+  @UnsupportedAppUsage
   void onStatusUpdate(int status_code, float percentage);
   /** @hide */
   void onPayloadApplicationComplete(int error_code);
diff --git a/binder_service_android.cc b/binder_service_android.cc
index 137694a..6b8a552 100644
--- a/binder_service_android.cc
+++ b/binder_service_android.cc
@@ -16,6 +16,8 @@
 
 #include "update_engine/binder_service_android.h"
 
+#include <memory>
+
 #include <base/bind.h>
 #include <base/logging.h>
 #include <binderwrapper/binder_wrapper.h>
@@ -24,6 +26,9 @@
 
 using android::binder::Status;
 using android::os::IUpdateEngineCallback;
+using android::os::ParcelFileDescriptor;
+using std::string;
+using std::vector;
 using update_engine::UpdateEngineStatus;
 
 namespace {
@@ -31,6 +36,16 @@
   return Status::fromServiceSpecificError(
       1, android::String8{error->GetMessage().c_str()});
 }
+
+vector<string> ToVecString(const vector<android::String16>& inp) {
+  vector<string> out;
+  out.reserve(inp.size());
+  for (const auto& e : inp) {
+    out.emplace_back(android::String8{e}.string());
+  }
+  return out;
+}
+
 }  // namespace
 
 namespace chromeos_update_engine {
@@ -57,6 +72,20 @@
 
 Status BinderUpdateEngineAndroidService::bind(
     const android::sp<IUpdateEngineCallback>& callback, bool* return_value) {
+  // Send an status update on connection (except when no update sent so far).
+  // Even though the status update is oneway, it still returns an erroneous
+  // status in case of a selinux denial. We should at least check this status
+  // and fails the binding.
+  if (last_status_ != -1) {
+    auto status = callback->onStatusUpdate(last_status_, last_progress_);
+    if (!status.isOk()) {
+      LOG(ERROR) << "Failed to call onStatusUpdate() from callback: "
+                 << status.toString8();
+      *return_value = false;
+      return Status::ok();
+    }
+  }
+
   callbacks_.emplace_back(callback);
 
   const android::sp<IBinder>& callback_binder =
@@ -69,12 +98,6 @@
           base::Unretained(this),
           base::Unretained(callback_binder.get())));
 
-  // Send an status update on connection (except when no update sent so far),
-  // since the status update is oneway and we don't need to wait for the
-  // response.
-  if (last_status_ != -1)
-    callback->onStatusUpdate(last_status_, last_progress_);
-
   *return_value = true;
   return Status::ok();
 }
@@ -94,13 +117,9 @@
     const android::String16& url,
     int64_t payload_offset,
     int64_t payload_size,
-    const std::vector<android::String16>& header_kv_pairs) {
-  const std::string payload_url{android::String8{url}.string()};
-  std::vector<std::string> str_headers;
-  str_headers.reserve(header_kv_pairs.size());
-  for (const auto& header : header_kv_pairs) {
-    str_headers.emplace_back(android::String8{header}.string());
-  }
+    const vector<android::String16>& header_kv_pairs) {
+  const string payload_url{android::String8{url}.string()};
+  vector<string> str_headers = ToVecString(header_kv_pairs);
 
   brillo::ErrorPtr error;
   if (!service_delegate_->ApplyPayload(
@@ -110,6 +129,21 @@
   return Status::ok();
 }
 
+Status BinderUpdateEngineAndroidService::applyPayloadFd(
+    const ParcelFileDescriptor& pfd,
+    int64_t payload_offset,
+    int64_t payload_size,
+    const vector<android::String16>& header_kv_pairs) {
+  vector<string> str_headers = ToVecString(header_kv_pairs);
+
+  brillo::ErrorPtr error;
+  if (!service_delegate_->ApplyPayload(
+          pfd.get(), payload_offset, payload_size, str_headers, &error)) {
+    return ErrorPtrToStatus(error);
+  }
+  return Status::ok();
+}
+
 Status BinderUpdateEngineAndroidService::suspend() {
   brillo::ErrorPtr error;
   if (!service_delegate_->SuspendUpdate(&error))
@@ -167,4 +201,58 @@
   return true;
 }
 
+Status BinderUpdateEngineAndroidService::allocateSpaceForPayload(
+    const android::String16& metadata_filename,
+    const vector<android::String16>& header_kv_pairs,
+    int64_t* return_value) {
+  const std::string payload_metadata{
+      android::String8{metadata_filename}.string()};
+  vector<string> str_headers = ToVecString(header_kv_pairs);
+  LOG(INFO) << "Received a request of allocating space for " << payload_metadata
+            << ".";
+  brillo::ErrorPtr error;
+  *return_value =
+      static_cast<int64_t>(service_delegate_->AllocateSpaceForPayload(
+          payload_metadata, str_headers, &error));
+  if (error != nullptr)
+    return ErrorPtrToStatus(error);
+  return Status::ok();
+}
+
+class CleanupSuccessfulUpdateCallback
+    : public CleanupSuccessfulUpdateCallbackInterface {
+ public:
+  CleanupSuccessfulUpdateCallback(
+      const android::sp<IUpdateEngineCallback>& callback)
+      : callback_(callback) {}
+  void OnCleanupComplete(int32_t error_code) {
+    ignore_result(callback_->onPayloadApplicationComplete(error_code));
+  }
+  void OnCleanupProgressUpdate(double progress) {
+    ignore_result(callback_->onStatusUpdate(
+        static_cast<int32_t>(
+            update_engine::UpdateStatus::CLEANUP_PREVIOUS_UPDATE),
+        progress));
+  }
+  void RegisterForDeathNotifications(base::Closure unbind) {
+    const android::sp<android::IBinder>& callback_binder =
+        IUpdateEngineCallback::asBinder(callback_);
+    auto binder_wrapper = android::BinderWrapper::Get();
+    binder_wrapper->RegisterForDeathNotifications(callback_binder, unbind);
+  }
+
+ private:
+  android::sp<IUpdateEngineCallback> callback_;
+};
+
+Status BinderUpdateEngineAndroidService::cleanupSuccessfulUpdate(
+    const android::sp<IUpdateEngineCallback>& callback) {
+  brillo::ErrorPtr error;
+  service_delegate_->CleanupSuccessfulUpdate(
+      std::make_unique<CleanupSuccessfulUpdateCallback>(callback), &error);
+  if (error != nullptr)
+    return ErrorPtrToStatus(error);
+  return Status::ok();
+}
+
 }  // namespace chromeos_update_engine
diff --git a/binder_service_android.h b/binder_service_android.h
index d8c4e9c..5f28225 100644
--- a/binder_service_android.h
+++ b/binder_service_android.h
@@ -53,6 +53,11 @@
       int64_t payload_offset,
       int64_t payload_size,
       const std::vector<android::String16>& header_kv_pairs) override;
+  android::binder::Status applyPayloadFd(
+      const ::android::os::ParcelFileDescriptor& pfd,
+      int64_t payload_offset,
+      int64_t payload_size,
+      const std::vector<android::String16>& header_kv_pairs) override;
   android::binder::Status bind(
       const android::sp<android::os::IUpdateEngineCallback>& callback,
       bool* return_value) override;
@@ -65,6 +70,12 @@
   android::binder::Status resetStatus() override;
   android::binder::Status verifyPayloadApplicable(
       const android::String16& metadata_filename, bool* return_value) override;
+  android::binder::Status allocateSpaceForPayload(
+      const android::String16& metadata_filename,
+      const std::vector<android::String16>& header_kv_pairs,
+      int64_t* return_value) override;
+  android::binder::Status cleanupSuccessfulUpdate(
+      const android::sp<android::os::IUpdateEngineCallback>& callback) override;
 
  private:
   // Remove the passed |callback| from the list of registered callbacks. Called
diff --git a/boot_control_android.cc b/boot_control_android.cc
index 8909cd9..dee5fa8 100644
--- a/boot_control_android.cc
+++ b/boot_control_android.cc
@@ -22,11 +22,8 @@
 
 #include <base/bind.h>
 #include <base/logging.h>
-#include <base/strings/string_util.h>
 #include <bootloader_message/bootloader_message.h>
 #include <brillo/message_loops/message_loop.h>
-#include <fs_mgr.h>
-#include <fs_mgr_overlayfs.h>
 
 #include "update_engine/common/utils.h"
 #include "update_engine/dynamic_partition_control_android.h"
@@ -34,15 +31,12 @@
 using std::string;
 
 using android::dm::DmDeviceState;
-using android::fs_mgr::Partition;
 using android::hardware::hidl_string;
 using android::hardware::Return;
 using android::hardware::boot::V1_0::BoolResult;
 using android::hardware::boot::V1_0::CommandResult;
 using android::hardware::boot::V1_0::IBootControl;
 using Slot = chromeos_update_engine::BootControlInterface::Slot;
-using PartitionMetadata =
-    chromeos_update_engine::BootControlInterface::PartitionMetadata;
 
 namespace {
 
@@ -80,10 +74,6 @@
   return true;
 }
 
-void BootControlAndroid::Cleanup() {
-  dynamic_control_->Cleanup();
-}
-
 unsigned int BootControlAndroid::GetNumSlots() const {
   return module_->getNumberSlots();
 }
@@ -92,140 +82,24 @@
   return module_->getCurrentSlot();
 }
 
-bool BootControlAndroid::GetSuffix(Slot slot, string* suffix) const {
-  auto store_suffix_cb = [&suffix](hidl_string cb_suffix) {
-    *suffix = cb_suffix.c_str();
-  };
-  Return<void> ret = module_->getSuffix(slot, store_suffix_cb);
-
-  if (!ret.isOk()) {
-    LOG(ERROR) << "boot_control impl returned no suffix for slot "
-               << SlotName(slot);
-    return false;
-  }
-  return true;
-}
-
-bool BootControlAndroid::IsSuperBlockDevice(
-    const base::FilePath& device_dir,
-    Slot slot,
-    const string& partition_name_suffix) const {
-  string source_device =
-      device_dir.Append(fs_mgr_get_super_partition_name(slot)).value();
-  auto source_metadata = dynamic_control_->LoadMetadataBuilder(
-      source_device, slot, BootControlInterface::kInvalidSlot);
-  return source_metadata->HasBlockDevice(partition_name_suffix);
-}
-
-BootControlAndroid::DynamicPartitionDeviceStatus
-BootControlAndroid::GetDynamicPartitionDevice(
-    const base::FilePath& device_dir,
-    const string& partition_name_suffix,
-    Slot slot,
-    string* device) const {
-  string super_device =
-      device_dir.Append(fs_mgr_get_super_partition_name(slot)).value();
-
-  auto builder = dynamic_control_->LoadMetadataBuilder(
-      super_device, slot, BootControlInterface::kInvalidSlot);
-
-  if (builder == nullptr) {
-    LOG(ERROR) << "No metadata in slot "
-               << BootControlInterface::SlotName(slot);
-    return DynamicPartitionDeviceStatus::ERROR;
-  }
-
-  if (builder->FindPartition(partition_name_suffix) == nullptr) {
-    LOG(INFO) << partition_name_suffix
-              << " is not in super partition metadata.";
-
-    Slot current_slot = GetCurrentSlot();
-    if (IsSuperBlockDevice(device_dir, current_slot, partition_name_suffix)) {
-      LOG(ERROR) << "The static partition " << partition_name_suffix
-                 << " is a block device for current metadata ("
-                 << fs_mgr_get_super_partition_name(current_slot) << ", slot "
-                 << BootControlInterface::SlotName(current_slot)
-                 << "). It cannot be used as a logical partition.";
-      return DynamicPartitionDeviceStatus::ERROR;
-    }
-
-    return DynamicPartitionDeviceStatus::TRY_STATIC;
-  }
-
-  DmDeviceState state = dynamic_control_->GetState(partition_name_suffix);
-
-  // Device is mapped in the previous GetPartitionDevice() call. Just return
-  // the path.
-  if (state == DmDeviceState::ACTIVE) {
-    if (dynamic_control_->GetDmDevicePathByName(partition_name_suffix,
-                                                device)) {
-      LOG(INFO) << partition_name_suffix
-                << " is mapped on device mapper: " << *device;
-      return DynamicPartitionDeviceStatus::SUCCESS;
-    }
-    LOG(ERROR) << partition_name_suffix << " is mapped but path is unknown.";
-    return DynamicPartitionDeviceStatus::ERROR;
-  }
-
-  if (state == DmDeviceState::INVALID) {
-    bool force_writable = slot != GetCurrentSlot();
-    if (dynamic_control_->MapPartitionOnDeviceMapper(super_device,
-                                                     partition_name_suffix,
-                                                     slot,
-                                                     force_writable,
-                                                     device)) {
-      return DynamicPartitionDeviceStatus::SUCCESS;
-    }
-    return DynamicPartitionDeviceStatus::ERROR;
-  }
-
-  LOG(ERROR) << partition_name_suffix
-             << " is mapped on device mapper but state is unknown: "
-             << static_cast<std::underlying_type_t<DmDeviceState>>(state);
-  return DynamicPartitionDeviceStatus::ERROR;
+bool BootControlAndroid::GetPartitionDevice(const std::string& partition_name,
+                                            BootControlInterface::Slot slot,
+                                            bool not_in_payload,
+                                            std::string* device,
+                                            bool* is_dynamic) const {
+  return dynamic_control_->GetPartitionDevice(partition_name,
+                                              slot,
+                                              GetCurrentSlot(),
+                                              not_in_payload,
+                                              device,
+                                              is_dynamic);
 }
 
 bool BootControlAndroid::GetPartitionDevice(const string& partition_name,
-                                            Slot slot,
+                                            BootControlInterface::Slot slot,
                                             string* device) const {
-  string suffix;
-  if (!GetSuffix(slot, &suffix)) {
-    return false;
-  }
-  const string partition_name_suffix = partition_name + suffix;
-
-  string device_dir_str;
-  if (!dynamic_control_->GetDeviceDir(&device_dir_str)) {
-    return false;
-  }
-  base::FilePath device_dir(device_dir_str);
-
-  // When looking up target partition devices, treat them as static if the
-  // current payload doesn't encode them as dynamic partitions. This may happen
-  // when applying a retrofit update on top of a dynamic-partitions-enabled
-  // build.
-  if (dynamic_control_->IsDynamicPartitionsEnabled() &&
-      (slot == GetCurrentSlot() || is_target_dynamic_)) {
-    switch (GetDynamicPartitionDevice(
-        device_dir, partition_name_suffix, slot, device)) {
-      case DynamicPartitionDeviceStatus::SUCCESS:
-        return true;
-      case DynamicPartitionDeviceStatus::TRY_STATIC:
-        break;
-      case DynamicPartitionDeviceStatus::ERROR:  // fallthrough
-      default:
-        return false;
-    }
-  }
-
-  base::FilePath path = device_dir.Append(partition_name_suffix);
-  if (!dynamic_control_->DeviceExists(path.value())) {
-    LOG(ERROR) << "Device file " << path.value() << " does not exist.";
-    return false;
-  }
-
-  *device = path.value();
-  return true;
+  return GetPartitionDevice(
+      partition_name, slot, false /* not_in_payload */, device, nullptr);
 }
 
 bool BootControlAndroid::IsSlotBootable(Slot slot) const {
@@ -288,160 +162,25 @@
          brillo::MessageLoop::kTaskIdNull;
 }
 
-namespace {
-
-bool UpdatePartitionMetadata(DynamicPartitionControlInterface* dynamic_control,
-                             Slot source_slot,
-                             Slot target_slot,
-                             const string& target_suffix,
-                             const PartitionMetadata& partition_metadata) {
-  string device_dir_str;
-  if (!dynamic_control->GetDeviceDir(&device_dir_str)) {
+bool BootControlAndroid::IsSlotMarkedSuccessful(
+    BootControlInterface::Slot slot) const {
+  Return<BoolResult> ret = module_->isSlotMarkedSuccessful(slot);
+  CommandResult result;
+  if (!ret.isOk()) {
+    LOG(ERROR) << "Unable to determine if slot " << SlotName(slot)
+               << " is marked successful: " << ret.description();
     return false;
   }
-  base::FilePath device_dir(device_dir_str);
-  auto source_device =
-      device_dir.Append(fs_mgr_get_super_partition_name(source_slot)).value();
-
-  auto builder = dynamic_control->LoadMetadataBuilder(
-      source_device, source_slot, target_slot);
-  if (builder == nullptr) {
-    // TODO(elsk): allow reconstructing metadata from partition_metadata
-    // in recovery sideload.
-    LOG(ERROR) << "No metadata at "
-               << BootControlInterface::SlotName(source_slot);
+  if (ret == BoolResult::INVALID_SLOT) {
+    LOG(ERROR) << "Invalid slot: " << SlotName(slot);
     return false;
   }
-
-  std::vector<string> groups = builder->ListGroups();
-  for (const auto& group_name : groups) {
-    if (base::EndsWith(
-            group_name, target_suffix, base::CompareCase::SENSITIVE)) {
-      LOG(INFO) << "Removing group " << group_name;
-      builder->RemoveGroupAndPartitions(group_name);
-    }
-  }
-
-  uint64_t total_size = 0;
-  for (const auto& group : partition_metadata.groups) {
-    total_size += group.size;
-  }
-
-  string expr;
-  uint64_t allocatable_space = builder->AllocatableSpace();
-  if (!dynamic_control->IsDynamicPartitionsRetrofit()) {
-    allocatable_space /= 2;
-    expr = "half of ";
-  }
-  if (total_size > allocatable_space) {
-    LOG(ERROR) << "The maximum size of all groups with suffix " << target_suffix
-               << " (" << total_size << ") has exceeded " << expr
-               << " allocatable space for dynamic partitions "
-               << allocatable_space << ".";
-    return false;
-  }
-
-  for (const auto& group : partition_metadata.groups) {
-    auto group_name_suffix = group.name + target_suffix;
-    if (!builder->AddGroup(group_name_suffix, group.size)) {
-      LOG(ERROR) << "Cannot add group " << group_name_suffix << " with size "
-                 << group.size;
-      return false;
-    }
-    LOG(INFO) << "Added group " << group_name_suffix << " with size "
-              << group.size;
-
-    for (const auto& partition : group.partitions) {
-      auto partition_name_suffix = partition.name + target_suffix;
-      Partition* p = builder->AddPartition(
-          partition_name_suffix, group_name_suffix, LP_PARTITION_ATTR_READONLY);
-      if (!p) {
-        LOG(ERROR) << "Cannot add partition " << partition_name_suffix
-                   << " to group " << group_name_suffix;
-        return false;
-      }
-      if (!builder->ResizePartition(p, partition.size)) {
-        LOG(ERROR) << "Cannot resize partition " << partition_name_suffix
-                   << " to size " << partition.size << ". Not enough space?";
-        return false;
-      }
-      LOG(INFO) << "Added partition " << partition_name_suffix << " to group "
-                << group_name_suffix << " with size " << partition.size;
-    }
-  }
-
-  auto target_device =
-      device_dir.Append(fs_mgr_get_super_partition_name(target_slot)).value();
-  return dynamic_control->StoreMetadata(
-      target_device, builder.get(), target_slot);
+  return ret == BoolResult::TRUE;
 }
 
-bool UnmapTargetPartitions(DynamicPartitionControlInterface* dynamic_control,
-                           const string& target_suffix,
-                           const PartitionMetadata& partition_metadata) {
-  for (const auto& group : partition_metadata.groups) {
-    for (const auto& partition : group.partitions) {
-      if (!dynamic_control->UnmapPartitionOnDeviceMapper(
-              partition.name + target_suffix, true /* wait */)) {
-        return false;
-      }
-    }
-  }
-  return true;
-}
-
-}  // namespace
-
-bool BootControlAndroid::InitPartitionMetadata(
-    Slot target_slot,
-    const PartitionMetadata& partition_metadata,
-    bool update_metadata) {
-  if (fs_mgr_overlayfs_is_setup()) {
-    // Non DAP devices can use overlayfs as well.
-    LOG(WARNING)
-        << "overlayfs overrides are active and can interfere with our "
-           "resources.\n"
-        << "run adb enable-verity to deactivate if required and try again.";
-  }
-  if (!dynamic_control_->IsDynamicPartitionsEnabled()) {
-    return true;
-  }
-
-  auto source_slot = GetCurrentSlot();
-  if (target_slot == source_slot) {
-    LOG(ERROR) << "Cannot call InitPartitionMetadata on current slot.";
-    return false;
-  }
-
-  // Although the current build supports dynamic partitions, the given payload
-  // doesn't use it for target partitions. This could happen when applying a
-  // retrofit update. Skip updating the partition metadata for the target slot.
-  is_target_dynamic_ = !partition_metadata.groups.empty();
-  if (!is_target_dynamic_) {
-    return true;
-  }
-
-  if (!update_metadata) {
-    return true;
-  }
-
-  string target_suffix;
-  if (!GetSuffix(target_slot, &target_suffix)) {
-    return false;
-  }
-
-  // Unmap all the target dynamic partitions because they would become
-  // inconsistent with the new metadata.
-  if (!UnmapTargetPartitions(
-          dynamic_control_.get(), target_suffix, partition_metadata)) {
-    return false;
-  }
-
-  return UpdatePartitionMetadata(dynamic_control_.get(),
-                                 source_slot,
-                                 target_slot,
-                                 target_suffix,
-                                 partition_metadata);
+DynamicPartitionControlInterface*
+BootControlAndroid::GetDynamicPartitionControl() {
+  return dynamic_control_.get();
 }
 
 }  // namespace chromeos_update_engine
diff --git a/boot_control_android.h b/boot_control_android.h
index a6f33be..5009dbd 100644
--- a/boot_control_android.h
+++ b/boot_control_android.h
@@ -22,11 +22,11 @@
 #include <string>
 
 #include <android/hardware/boot/1.0/IBootControl.h>
-#include <base/files/file_util.h>
 #include <liblp/builder.h>
 
 #include "update_engine/common/boot_control.h"
-#include "update_engine/dynamic_partition_control_interface.h"
+#include "update_engine/common/dynamic_partition_control_interface.h"
+#include "update_engine/dynamic_partition_control_android.h"
 
 namespace chromeos_update_engine {
 
@@ -46,47 +46,25 @@
   BootControlInterface::Slot GetCurrentSlot() const override;
   bool GetPartitionDevice(const std::string& partition_name,
                           BootControlInterface::Slot slot,
+                          bool not_in_payload,
+                          std::string* device,
+                          bool* is_dynamic) const override;
+  bool GetPartitionDevice(const std::string& partition_name,
+                          BootControlInterface::Slot slot,
                           std::string* device) const override;
   bool IsSlotBootable(BootControlInterface::Slot slot) const override;
   bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
   bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
   bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
-  bool InitPartitionMetadata(Slot slot,
-                             const PartitionMetadata& partition_metadata,
-                             bool update_metadata) override;
-  void Cleanup() override;
+  bool IsSlotMarkedSuccessful(BootControlInterface::Slot slot) const override;
+  DynamicPartitionControlInterface* GetDynamicPartitionControl() override;
 
  private:
   ::android::sp<::android::hardware::boot::V1_0::IBootControl> module_;
-  std::unique_ptr<DynamicPartitionControlInterface> dynamic_control_;
+  std::unique_ptr<DynamicPartitionControlAndroid> dynamic_control_;
 
   friend class BootControlAndroidTest;
 
-  // Wrapper method of IBootControl::getSuffix().
-  bool GetSuffix(Slot slot, std::string* out) const;
-
-  enum class DynamicPartitionDeviceStatus {
-    SUCCESS,
-    ERROR,
-    TRY_STATIC,
-  };
-
-  DynamicPartitionDeviceStatus GetDynamicPartitionDevice(
-      const base::FilePath& device_dir,
-      const std::string& partition_name_suffix,
-      Slot slot,
-      std::string* device) const;
-
-  // Return true if |partition_name_suffix| is a block device of
-  // super partition metadata slot |slot|.
-  bool IsSuperBlockDevice(const base::FilePath& device_dir,
-                          Slot slot,
-                          const std::string& partition_name_suffix) const;
-
-  // Whether the target partitions should be loaded as dynamic partitions. Set
-  // by InitPartitionMetadata() per each update.
-  bool is_target_dynamic_{false};
-
   DISALLOW_COPY_AND_ASSIGN(BootControlAndroid);
 };
 
diff --git a/boot_control_android_unittest.cc b/boot_control_android_unittest.cc
deleted file mode 100644
index bb9903e..0000000
--- a/boot_control_android_unittest.cc
+++ /dev/null
@@ -1,853 +0,0 @@
-//
-// Copyright (C) 2018 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/boot_control_android.h"
-
-#include <set>
-#include <vector>
-
-#include <base/logging.h>
-#include <base/strings/string_util.h>
-#include <fs_mgr.h>
-#include <gmock/gmock.h>
-#include <gtest/gtest.h>
-#include <libdm/dm.h>
-
-#include "update_engine/mock_boot_control_hal.h"
-#include "update_engine/mock_dynamic_partition_control.h"
-
-using android::dm::DmDeviceState;
-using android::fs_mgr::MetadataBuilder;
-using android::hardware::Void;
-using std::string;
-using testing::_;
-using testing::AnyNumber;
-using testing::Contains;
-using testing::Eq;
-using testing::Invoke;
-using testing::Key;
-using testing::MakeMatcher;
-using testing::Matcher;
-using testing::MatcherInterface;
-using testing::MatchResultListener;
-using testing::NiceMock;
-using testing::Not;
-using testing::Return;
-
-namespace chromeos_update_engine {
-
-constexpr const uint32_t kMaxNumSlots = 2;
-constexpr const char* kSlotSuffixes[kMaxNumSlots] = {"_a", "_b"};
-constexpr const char* kFakeDevicePath = "/fake/dev/path/";
-constexpr const char* kFakeDmDevicePath = "/fake/dm/dev/path/";
-constexpr const uint32_t kFakeMetadataSize = 65536;
-constexpr const char* kDefaultGroup = "foo";
-
-// A map describing the size of each partition.
-// "{name, size}"
-using PartitionSizes = std::map<string, uint64_t>;
-
-// "{name_a, size}"
-using PartitionSuffixSizes = std::map<string, uint64_t>;
-
-using PartitionMetadata = BootControlInterface::PartitionMetadata;
-
-// C++ standards do not allow uint64_t (aka unsigned long) to be the parameter
-// of user-defined literal operators.
-constexpr unsigned long long operator"" _MiB(unsigned long long x) {  // NOLINT
-  return x << 20;
-}
-constexpr unsigned long long operator"" _GiB(unsigned long long x) {  // NOLINT
-  return x << 30;
-}
-
-constexpr uint64_t kDefaultGroupSize = 5_GiB;
-// Super device size. 1 MiB for metadata.
-constexpr uint64_t kDefaultSuperSize = kDefaultGroupSize * 2 + 1_MiB;
-
-template <typename U, typename V>
-std::ostream& operator<<(std::ostream& os, const std::map<U, V>& param) {
-  os << "{";
-  bool first = true;
-  for (const auto& pair : param) {
-    if (!first)
-      os << ", ";
-    os << pair.first << ":" << pair.second;
-    first = false;
-  }
-  return os << "}";
-}
-
-template <typename T>
-std::ostream& operator<<(std::ostream& os, const std::vector<T>& param) {
-  os << "[";
-  bool first = true;
-  for (const auto& e : param) {
-    if (!first)
-      os << ", ";
-    os << e;
-    first = false;
-  }
-  return os << "]";
-}
-
-std::ostream& operator<<(std::ostream& os,
-                         const PartitionMetadata::Partition& p) {
-  return os << "{" << p.name << ", " << p.size << "}";
-}
-
-std::ostream& operator<<(std::ostream& os, const PartitionMetadata::Group& g) {
-  return os << "{" << g.name << ", " << g.size << ", " << g.partitions << "}";
-}
-
-std::ostream& operator<<(std::ostream& os, const PartitionMetadata& m) {
-  return os << m.groups;
-}
-
-inline string GetDevice(const string& name) {
-  return kFakeDevicePath + name;
-}
-
-inline string GetDmDevice(const string& name) {
-  return kFakeDmDevicePath + name;
-}
-
-// TODO(elsk): fs_mgr_get_super_partition_name should be mocked.
-inline string GetSuperDevice(uint32_t slot) {
-  return GetDevice(fs_mgr_get_super_partition_name(slot));
-}
-
-struct TestParam {
-  uint32_t source;
-  uint32_t target;
-};
-std::ostream& operator<<(std::ostream& os, const TestParam& param) {
-  return os << "{source: " << param.source << ", target:" << param.target
-            << "}";
-}
-
-// To support legacy tests, auto-convert {name_a: size} map to
-// PartitionMetadata.
-PartitionMetadata partitionSuffixSizesToMetadata(
-    const PartitionSuffixSizes& partition_sizes) {
-  PartitionMetadata metadata;
-  for (const char* suffix : kSlotSuffixes) {
-    metadata.groups.push_back(
-        {string(kDefaultGroup) + suffix, kDefaultGroupSize, {}});
-  }
-  for (const auto& pair : partition_sizes) {
-    for (size_t suffix_idx = 0; suffix_idx < kMaxNumSlots; ++suffix_idx) {
-      if (base::EndsWith(pair.first,
-                         kSlotSuffixes[suffix_idx],
-                         base::CompareCase::SENSITIVE)) {
-        metadata.groups[suffix_idx].partitions.push_back(
-            {pair.first, pair.second});
-      }
-    }
-  }
-  return metadata;
-}
-
-// To support legacy tests, auto-convert {name: size} map to PartitionMetadata.
-PartitionMetadata partitionSizesToMetadata(
-    const PartitionSizes& partition_sizes) {
-  PartitionMetadata metadata;
-  metadata.groups.push_back({string{kDefaultGroup}, kDefaultGroupSize, {}});
-  for (const auto& pair : partition_sizes) {
-    metadata.groups[0].partitions.push_back({pair.first, pair.second});
-  }
-  return metadata;
-}
-
-std::unique_ptr<MetadataBuilder> NewFakeMetadata(
-    const PartitionMetadata& metadata) {
-  auto builder =
-      MetadataBuilder::New(kDefaultSuperSize, kFakeMetadataSize, kMaxNumSlots);
-  EXPECT_GE(builder->AllocatableSpace(), kDefaultGroupSize * 2);
-  EXPECT_NE(nullptr, builder);
-  if (builder == nullptr)
-    return nullptr;
-  for (const auto& group : metadata.groups) {
-    EXPECT_TRUE(builder->AddGroup(group.name, group.size));
-    for (const auto& partition : group.partitions) {
-      auto p = builder->AddPartition(partition.name, group.name, 0 /* attr */);
-      EXPECT_TRUE(p && builder->ResizePartition(p, partition.size));
-    }
-  }
-  return builder;
-}
-
-class MetadataMatcher : public MatcherInterface<MetadataBuilder*> {
- public:
-  explicit MetadataMatcher(const PartitionSuffixSizes& partition_sizes)
-      : partition_metadata_(partitionSuffixSizesToMetadata(partition_sizes)) {}
-  explicit MetadataMatcher(const PartitionMetadata& partition_metadata)
-      : partition_metadata_(partition_metadata) {}
-
-  bool MatchAndExplain(MetadataBuilder* metadata,
-                       MatchResultListener* listener) const override {
-    bool success = true;
-    for (const auto& group : partition_metadata_.groups) {
-      for (const auto& partition : group.partitions) {
-        auto p = metadata->FindPartition(partition.name);
-        if (p == nullptr) {
-          if (!success)
-            *listener << "; ";
-          *listener << "No partition " << partition.name;
-          success = false;
-          continue;
-        }
-        if (p->size() != partition.size) {
-          if (!success)
-            *listener << "; ";
-          *listener << "Partition " << partition.name << " has size "
-                    << p->size() << ", expected " << partition.size;
-          success = false;
-        }
-        if (p->group_name() != group.name) {
-          if (!success)
-            *listener << "; ";
-          *listener << "Partition " << partition.name << " has group "
-                    << p->group_name() << ", expected " << group.name;
-          success = false;
-        }
-      }
-    }
-    return success;
-  }
-
-  void DescribeTo(std::ostream* os) const override {
-    *os << "expect: " << partition_metadata_;
-  }
-
-  void DescribeNegationTo(std::ostream* os) const override {
-    *os << "expect not: " << partition_metadata_;
-  }
-
- private:
-  PartitionMetadata partition_metadata_;
-};
-
-inline Matcher<MetadataBuilder*> MetadataMatches(
-    const PartitionSuffixSizes& partition_sizes) {
-  return MakeMatcher(new MetadataMatcher(partition_sizes));
-}
-
-inline Matcher<MetadataBuilder*> MetadataMatches(
-    const PartitionMetadata& partition_metadata) {
-  return MakeMatcher(new MetadataMatcher(partition_metadata));
-}
-
-MATCHER_P(HasGroup, group, " has group " + group) {
-  auto groups = arg->ListGroups();
-  return std::find(groups.begin(), groups.end(), group) != groups.end();
-}
-
-class BootControlAndroidTest : public ::testing::Test {
- protected:
-  void SetUp() override {
-    // Fake init bootctl_
-    bootctl_.module_ = new NiceMock<MockBootControlHal>();
-    bootctl_.dynamic_control_ =
-        std::make_unique<NiceMock<MockDynamicPartitionControl>>();
-
-    ON_CALL(module(), getNumberSlots()).WillByDefault(Invoke([] {
-      return kMaxNumSlots;
-    }));
-    ON_CALL(module(), getSuffix(_, _))
-        .WillByDefault(Invoke([](auto slot, auto cb) {
-          EXPECT_LE(slot, kMaxNumSlots);
-          cb(slot < kMaxNumSlots ? kSlotSuffixes[slot] : "");
-          return Void();
-        }));
-
-    ON_CALL(dynamicControl(), IsDynamicPartitionsEnabled())
-        .WillByDefault(Return(true));
-    ON_CALL(dynamicControl(), IsDynamicPartitionsRetrofit())
-        .WillByDefault(Return(false));
-    ON_CALL(dynamicControl(), DeviceExists(_)).WillByDefault(Return(true));
-    ON_CALL(dynamicControl(), GetDeviceDir(_))
-        .WillByDefault(Invoke([](auto path) {
-          *path = kFakeDevicePath;
-          return true;
-        }));
-    ON_CALL(dynamicControl(), GetDmDevicePathByName(_, _))
-        .WillByDefault(Invoke([](auto partition_name_suffix, auto device) {
-          *device = GetDmDevice(partition_name_suffix);
-          return true;
-        }));
-  }
-
-  // Return the mocked HAL module.
-  NiceMock<MockBootControlHal>& module() {
-    return static_cast<NiceMock<MockBootControlHal>&>(*bootctl_.module_);
-  }
-
-  // Return the mocked DynamicPartitionControlInterface.
-  NiceMock<MockDynamicPartitionControl>& dynamicControl() {
-    return static_cast<NiceMock<MockDynamicPartitionControl>&>(
-        *bootctl_.dynamic_control_);
-  }
-
-  // Set the fake metadata to return when LoadMetadataBuilder is called on
-  // |slot|.
-  void SetMetadata(uint32_t slot, const PartitionSuffixSizes& sizes) {
-    SetMetadata(slot, partitionSuffixSizesToMetadata(sizes));
-  }
-
-  void SetMetadata(uint32_t slot, const PartitionMetadata& metadata) {
-    EXPECT_CALL(dynamicControl(),
-                LoadMetadataBuilder(GetSuperDevice(slot), slot, _))
-        .Times(AnyNumber())
-        .WillRepeatedly(Invoke([metadata](auto, auto, auto) {
-          return NewFakeMetadata(metadata);
-        }));
-  }
-
-  // Expect that UnmapPartitionOnDeviceMapper is called on target() metadata
-  // slot with each partition in |partitions|.
-  void ExpectUnmap(const std::set<string>& partitions) {
-    // Error when UnmapPartitionOnDeviceMapper is called on unknown arguments.
-    ON_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_, _))
-        .WillByDefault(Return(false));
-
-    for (const auto& partition : partitions) {
-      EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(partition, _))
-          .WillOnce(Invoke([this](auto partition, auto) {
-            mapped_devices_.erase(partition);
-            return true;
-          }));
-    }
-  }
-
-  void ExpectDevicesAreMapped(const std::set<string>& partitions) {
-    ASSERT_EQ(partitions.size(), mapped_devices_.size());
-    for (const auto& partition : partitions) {
-      EXPECT_THAT(mapped_devices_, Contains(Key(Eq(partition))))
-          << "Expect that " << partition << " is mapped, but it is not.";
-    }
-  }
-
-  void ExpectStoreMetadata(const PartitionSuffixSizes& partition_sizes) {
-    ExpectStoreMetadataMatch(MetadataMatches(partition_sizes));
-  }
-
-  virtual void ExpectStoreMetadataMatch(
-      const Matcher<MetadataBuilder*>& matcher) {
-    EXPECT_CALL(dynamicControl(),
-                StoreMetadata(GetSuperDevice(target()), matcher, target()))
-        .WillOnce(Return(true));
-  }
-
-  uint32_t source() { return slots_.source; }
-
-  uint32_t target() { return slots_.target; }
-
-  // Return partition names with suffix of source().
-  string S(const string& name) { return name + kSlotSuffixes[source()]; }
-
-  // Return partition names with suffix of target().
-  string T(const string& name) { return name + kSlotSuffixes[target()]; }
-
-  // Set source and target slots to use before testing.
-  void SetSlots(const TestParam& slots) {
-    slots_ = slots;
-
-    ON_CALL(module(), getCurrentSlot()).WillByDefault(Invoke([this] {
-      return source();
-    }));
-    // Should not store metadata to source slot.
-    EXPECT_CALL(dynamicControl(),
-                StoreMetadata(GetSuperDevice(source()), _, source()))
-        .Times(0);
-    // Should not load metadata from target slot.
-    EXPECT_CALL(dynamicControl(),
-                LoadMetadataBuilder(GetSuperDevice(target()), target(), _))
-        .Times(0);
-  }
-
-  bool InitPartitionMetadata(uint32_t slot,
-                             PartitionSizes partition_sizes,
-                             bool update_metadata = true) {
-    auto m = partitionSizesToMetadata(partition_sizes);
-    LOG(INFO) << m;
-    return bootctl_.InitPartitionMetadata(slot, m, update_metadata);
-  }
-
-  BootControlAndroid bootctl_;  // BootControlAndroid under test.
-  TestParam slots_;
-  // mapped devices through MapPartitionOnDeviceMapper.
-  std::map<string, string> mapped_devices_;
-};
-
-class BootControlAndroidTestP
-    : public BootControlAndroidTest,
-      public ::testing::WithParamInterface<TestParam> {
- public:
-  void SetUp() override {
-    BootControlAndroidTest::SetUp();
-    SetSlots(GetParam());
-  }
-};
-
-// Test resize case. Grow if target metadata contains a partition with a size
-// less than expected.
-TEST_P(BootControlAndroidTestP, NeedGrowIfSizeNotMatchWhenResizing) {
-  SetMetadata(source(),
-              {{S("system"), 2_GiB},
-               {S("vendor"), 1_GiB},
-               {T("system"), 2_GiB},
-               {T("vendor"), 1_GiB}});
-  ExpectStoreMetadata({{S("system"), 2_GiB},
-                       {S("vendor"), 1_GiB},
-                       {T("system"), 3_GiB},
-                       {T("vendor"), 1_GiB}});
-  ExpectUnmap({T("system"), T("vendor")});
-
-  EXPECT_TRUE(
-      InitPartitionMetadata(target(), {{"system", 3_GiB}, {"vendor", 1_GiB}}));
-}
-
-// Test resize case. Shrink if target metadata contains a partition with a size
-// greater than expected.
-TEST_P(BootControlAndroidTestP, NeedShrinkIfSizeNotMatchWhenResizing) {
-  SetMetadata(source(),
-              {{S("system"), 2_GiB},
-               {S("vendor"), 1_GiB},
-               {T("system"), 2_GiB},
-               {T("vendor"), 1_GiB}});
-  ExpectStoreMetadata({{S("system"), 2_GiB},
-                       {S("vendor"), 1_GiB},
-                       {T("system"), 2_GiB},
-                       {T("vendor"), 150_MiB}});
-  ExpectUnmap({T("system"), T("vendor")});
-
-  EXPECT_TRUE(InitPartitionMetadata(target(),
-                                    {{"system", 2_GiB}, {"vendor", 150_MiB}}));
-}
-
-// Test adding partitions on the first run.
-TEST_P(BootControlAndroidTestP, AddPartitionToEmptyMetadata) {
-  SetMetadata(source(), PartitionSuffixSizes{});
-  ExpectStoreMetadata({{T("system"), 2_GiB}, {T("vendor"), 1_GiB}});
-  ExpectUnmap({T("system"), T("vendor")});
-
-  EXPECT_TRUE(
-      InitPartitionMetadata(target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}));
-}
-
-// Test subsequent add case.
-TEST_P(BootControlAndroidTestP, AddAdditionalPartition) {
-  SetMetadata(source(), {{S("system"), 2_GiB}, {T("system"), 2_GiB}});
-  ExpectStoreMetadata(
-      {{S("system"), 2_GiB}, {T("system"), 2_GiB}, {T("vendor"), 1_GiB}});
-  ExpectUnmap({T("system"), T("vendor")});
-
-  EXPECT_TRUE(
-      InitPartitionMetadata(target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}));
-}
-
-// Test delete one partition.
-TEST_P(BootControlAndroidTestP, DeletePartition) {
-  SetMetadata(source(),
-              {{S("system"), 2_GiB},
-               {S("vendor"), 1_GiB},
-               {T("system"), 2_GiB},
-               {T("vendor"), 1_GiB}});
-  // No T("vendor")
-  ExpectStoreMetadata(
-      {{S("system"), 2_GiB}, {S("vendor"), 1_GiB}, {T("system"), 2_GiB}});
-  ExpectUnmap({T("system")});
-
-  EXPECT_TRUE(InitPartitionMetadata(target(), {{"system", 2_GiB}}));
-}
-
-// Test delete all partitions.
-TEST_P(BootControlAndroidTestP, DeleteAll) {
-  SetMetadata(source(),
-              {{S("system"), 2_GiB},
-               {S("vendor"), 1_GiB},
-               {T("system"), 2_GiB},
-               {T("vendor"), 1_GiB}});
-  ExpectStoreMetadata({{S("system"), 2_GiB}, {S("vendor"), 1_GiB}});
-
-  EXPECT_TRUE(InitPartitionMetadata(target(), {}));
-}
-
-// Test corrupt source metadata case.
-TEST_P(BootControlAndroidTestP, CorruptedSourceMetadata) {
-  EXPECT_CALL(dynamicControl(),
-              LoadMetadataBuilder(GetSuperDevice(source()), source(), _))
-      .WillOnce(Invoke([](auto, auto, auto) { return nullptr; }));
-  ExpectUnmap({T("system")});
-
-  EXPECT_FALSE(InitPartitionMetadata(target(), {{"system", 1_GiB}}))
-      << "Should not be able to continue with corrupt source metadata";
-}
-
-// Test that InitPartitionMetadata fail if there is not enough space on the
-// device.
-TEST_P(BootControlAndroidTestP, NotEnoughSpace) {
-  SetMetadata(source(),
-              {{S("system"), 3_GiB},
-               {S("vendor"), 2_GiB},
-               {T("system"), 0},
-               {T("vendor"), 0}});
-  EXPECT_FALSE(
-      InitPartitionMetadata(target(), {{"system", 3_GiB}, {"vendor", 3_GiB}}))
-      << "Should not be able to fit 11GiB data into 10GiB space";
-}
-
-TEST_P(BootControlAndroidTestP, NotEnoughSpaceForSlot) {
-  SetMetadata(source(),
-              {{S("system"), 1_GiB},
-               {S("vendor"), 1_GiB},
-               {T("system"), 0},
-               {T("vendor"), 0}});
-  EXPECT_FALSE(
-      InitPartitionMetadata(target(), {{"system", 3_GiB}, {"vendor", 3_GiB}}))
-      << "Should not be able to grow over size of super / 2";
-}
-
-// Test applying retrofit update on a build with dynamic partitions enabled.
-TEST_P(BootControlAndroidTestP,
-       ApplyRetrofitUpdateOnDynamicPartitionsEnabledBuild) {
-  SetMetadata(source(),
-              {{S("system"), 2_GiB},
-               {S("vendor"), 1_GiB},
-               {T("system"), 2_GiB},
-               {T("vendor"), 1_GiB}});
-  // Should not try to unmap any target partition.
-  EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_, _)).Times(0);
-  // Should not store metadata to target slot.
-  EXPECT_CALL(dynamicControl(),
-              StoreMetadata(GetSuperDevice(target()), _, target()))
-      .Times(0);
-
-  // Not calling through BootControlAndroidTest::InitPartitionMetadata(), since
-  // we don't want any default group in the PartitionMetadata.
-  EXPECT_TRUE(bootctl_.InitPartitionMetadata(target(), {}, true));
-
-  // Should use dynamic source partitions.
-  EXPECT_CALL(dynamicControl(), GetState(S("system")))
-      .Times(1)
-      .WillOnce(Return(DmDeviceState::ACTIVE));
-  string system_device;
-  EXPECT_TRUE(bootctl_.GetPartitionDevice("system", source(), &system_device));
-  EXPECT_EQ(GetDmDevice(S("system")), system_device);
-
-  // Should use static target partitions without querying dynamic control.
-  EXPECT_CALL(dynamicControl(), GetState(T("system"))).Times(0);
-  EXPECT_TRUE(bootctl_.GetPartitionDevice("system", target(), &system_device));
-  EXPECT_EQ(GetDevice(T("system")), system_device);
-
-  // Static partition "bar".
-  EXPECT_CALL(dynamicControl(), GetState(S("bar"))).Times(0);
-  std::string bar_device;
-  EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", source(), &bar_device));
-  EXPECT_EQ(GetDevice(S("bar")), bar_device);
-
-  EXPECT_CALL(dynamicControl(), GetState(T("bar"))).Times(0);
-  EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", target(), &bar_device));
-  EXPECT_EQ(GetDevice(T("bar")), bar_device);
-}
-
-TEST_P(BootControlAndroidTestP, GetPartitionDeviceWhenResumingUpdate) {
-  // Both of the two slots contain valid partition metadata, since this is
-  // resuming an update.
-  SetMetadata(source(),
-              {{S("system"), 2_GiB},
-               {S("vendor"), 1_GiB},
-               {T("system"), 2_GiB},
-               {T("vendor"), 1_GiB}});
-  SetMetadata(target(),
-              {{S("system"), 2_GiB},
-               {S("vendor"), 1_GiB},
-               {T("system"), 2_GiB},
-               {T("vendor"), 1_GiB}});
-  EXPECT_CALL(dynamicControl(),
-              StoreMetadata(GetSuperDevice(target()), _, target()))
-      .Times(0);
-  EXPECT_TRUE(InitPartitionMetadata(
-      target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}, false));
-
-  // Dynamic partition "system".
-  EXPECT_CALL(dynamicControl(), GetState(S("system")))
-      .Times(1)
-      .WillOnce(Return(DmDeviceState::ACTIVE));
-  string system_device;
-  EXPECT_TRUE(bootctl_.GetPartitionDevice("system", source(), &system_device));
-  EXPECT_EQ(GetDmDevice(S("system")), system_device);
-
-  EXPECT_CALL(dynamicControl(), GetState(T("system")))
-      .Times(1)
-      .WillOnce(Return(DmDeviceState::ACTIVE));
-  EXPECT_TRUE(bootctl_.GetPartitionDevice("system", target(), &system_device));
-  EXPECT_EQ(GetDmDevice(T("system")), system_device);
-
-  // Static partition "bar".
-  EXPECT_CALL(dynamicControl(), GetState(S("bar"))).Times(0);
-  std::string bar_device;
-  EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", source(), &bar_device));
-  EXPECT_EQ(GetDevice(S("bar")), bar_device);
-
-  EXPECT_CALL(dynamicControl(), GetState(T("bar"))).Times(0);
-  EXPECT_TRUE(bootctl_.GetPartitionDevice("bar", target(), &bar_device));
-  EXPECT_EQ(GetDevice(T("bar")), bar_device);
-}
-
-INSTANTIATE_TEST_CASE_P(BootControlAndroidTest,
-                        BootControlAndroidTestP,
-                        testing::Values(TestParam{0, 1}, TestParam{1, 0}));
-
-const PartitionSuffixSizes update_sizes_0() {
-  // Initial state is 0 for "other" slot.
-  return {
-      {"grown_a", 2_GiB},
-      {"shrunk_a", 1_GiB},
-      {"same_a", 100_MiB},
-      {"deleted_a", 150_MiB},
-      // no added_a
-      {"grown_b", 200_MiB},
-      // simulate system_other
-      {"shrunk_b", 0},
-      {"same_b", 0},
-      {"deleted_b", 0},
-      // no added_b
-  };
-}
-
-const PartitionSuffixSizes update_sizes_1() {
-  return {
-      {"grown_a", 2_GiB},
-      {"shrunk_a", 1_GiB},
-      {"same_a", 100_MiB},
-      {"deleted_a", 150_MiB},
-      // no added_a
-      {"grown_b", 3_GiB},
-      {"shrunk_b", 150_MiB},
-      {"same_b", 100_MiB},
-      {"added_b", 150_MiB},
-      // no deleted_b
-  };
-}
-
-const PartitionSuffixSizes update_sizes_2() {
-  return {
-      {"grown_a", 4_GiB},
-      {"shrunk_a", 100_MiB},
-      {"same_a", 100_MiB},
-      {"deleted_a", 64_MiB},
-      // no added_a
-      {"grown_b", 3_GiB},
-      {"shrunk_b", 150_MiB},
-      {"same_b", 100_MiB},
-      {"added_b", 150_MiB},
-      // no deleted_b
-  };
-}
-
-// Test case for first update after the device is manufactured, in which
-// case the "other" slot is likely of size "0" (except system, which is
-// non-zero because of system_other partition)
-TEST_F(BootControlAndroidTest, SimulatedFirstUpdate) {
-  SetSlots({0, 1});
-
-  SetMetadata(source(), update_sizes_0());
-  SetMetadata(target(), update_sizes_0());
-  ExpectStoreMetadata(update_sizes_1());
-  ExpectUnmap({"grown_b", "shrunk_b", "same_b", "added_b"});
-
-  EXPECT_TRUE(InitPartitionMetadata(target(),
-                                    {{"grown", 3_GiB},
-                                     {"shrunk", 150_MiB},
-                                     {"same", 100_MiB},
-                                     {"added", 150_MiB}}));
-}
-
-// After first update, test for the second update. In the second update, the
-// "added" partition is deleted and "deleted" partition is re-added.
-TEST_F(BootControlAndroidTest, SimulatedSecondUpdate) {
-  SetSlots({1, 0});
-
-  SetMetadata(source(), update_sizes_1());
-  SetMetadata(target(), update_sizes_0());
-
-  ExpectStoreMetadata(update_sizes_2());
-  ExpectUnmap({"grown_a", "shrunk_a", "same_a", "deleted_a"});
-
-  EXPECT_TRUE(InitPartitionMetadata(target(),
-                                    {{"grown", 4_GiB},
-                                     {"shrunk", 100_MiB},
-                                     {"same", 100_MiB},
-                                     {"deleted", 64_MiB}}));
-}
-
-TEST_F(BootControlAndroidTest, ApplyingToCurrentSlot) {
-  SetSlots({1, 1});
-  EXPECT_FALSE(InitPartitionMetadata(target(), {}))
-      << "Should not be able to apply to current slot.";
-}
-
-class BootControlAndroidGroupTestP : public BootControlAndroidTestP {
- public:
-  void SetUp() override {
-    BootControlAndroidTestP::SetUp();
-    SetMetadata(
-        source(),
-        {.groups = {SimpleGroup(S("android"), 3_GiB, S("system"), 2_GiB),
-                    SimpleGroup(S("oem"), 2_GiB, S("vendor"), 1_GiB),
-                    SimpleGroup(T("android"), 3_GiB, T("system"), 0),
-                    SimpleGroup(T("oem"), 2_GiB, T("vendor"), 0)}});
-  }
-
-  // Return a simple group with only one partition.
-  PartitionMetadata::Group SimpleGroup(const string& group,
-                                       uint64_t group_size,
-                                       const string& partition,
-                                       uint64_t partition_size) {
-    return {.name = group,
-            .size = group_size,
-            .partitions = {{.name = partition, .size = partition_size}}};
-  }
-
-  void ExpectStoreMetadata(const PartitionMetadata& partition_metadata) {
-    ExpectStoreMetadataMatch(MetadataMatches(partition_metadata));
-  }
-
-  // Expect that target slot is stored with target groups.
-  void ExpectStoreMetadataMatch(
-      const Matcher<MetadataBuilder*>& matcher) override {
-    BootControlAndroidTestP::ExpectStoreMetadataMatch(AllOf(
-        MetadataMatches(PartitionMetadata{
-            .groups = {SimpleGroup(S("android"), 3_GiB, S("system"), 2_GiB),
-                       SimpleGroup(S("oem"), 2_GiB, S("vendor"), 1_GiB)}}),
-        matcher));
-  }
-};
-
-// Allow to resize within group.
-TEST_P(BootControlAndroidGroupTestP, ResizeWithinGroup) {
-  ExpectStoreMetadata(PartitionMetadata{
-      .groups = {SimpleGroup(T("android"), 3_GiB, T("system"), 3_GiB),
-                 SimpleGroup(T("oem"), 2_GiB, T("vendor"), 2_GiB)}});
-  ExpectUnmap({T("system"), T("vendor")});
-
-  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
-      target(),
-      PartitionMetadata{
-          .groups = {SimpleGroup("android", 3_GiB, "system", 3_GiB),
-                     SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}},
-      true));
-}
-
-TEST_P(BootControlAndroidGroupTestP, NotEnoughSpaceForGroup) {
-  EXPECT_FALSE(bootctl_.InitPartitionMetadata(
-      target(),
-      PartitionMetadata{
-          .groups = {SimpleGroup("android", 3_GiB, "system", 1_GiB),
-                     SimpleGroup("oem", 2_GiB, "vendor", 3_GiB)}},
-      true))
-      << "Should not be able to grow over maximum size of group";
-}
-
-TEST_P(BootControlAndroidGroupTestP, GroupTooBig) {
-  EXPECT_FALSE(bootctl_.InitPartitionMetadata(
-      target(),
-      PartitionMetadata{.groups = {{.name = "android", .size = 3_GiB},
-                                   {.name = "oem", .size = 3_GiB}}},
-      true))
-      << "Should not be able to grow over size of super / 2";
-}
-
-TEST_P(BootControlAndroidGroupTestP, AddPartitionToGroup) {
-  ExpectStoreMetadata(PartitionMetadata{
-      .groups = {
-          {.name = T("android"),
-           .size = 3_GiB,
-           .partitions = {{.name = T("system"), .size = 2_GiB},
-                          {.name = T("product_services"), .size = 1_GiB}}}}});
-  ExpectUnmap({T("system"), T("vendor"), T("product_services")});
-
-  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
-      target(),
-      PartitionMetadata{
-          .groups = {{.name = "android",
-                      .size = 3_GiB,
-                      .partitions = {{.name = "system", .size = 2_GiB},
-                                     {.name = "product_services",
-                                      .size = 1_GiB}}},
-                     SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}},
-      true));
-}
-
-TEST_P(BootControlAndroidGroupTestP, RemovePartitionFromGroup) {
-  ExpectStoreMetadata(PartitionMetadata{
-      .groups = {{.name = T("android"), .size = 3_GiB, .partitions = {}}}});
-  ExpectUnmap({T("vendor")});
-
-  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
-      target(),
-      PartitionMetadata{
-          .groups = {{.name = "android", .size = 3_GiB, .partitions = {}},
-                     SimpleGroup("oem", 2_GiB, "vendor", 2_GiB)}},
-      true));
-}
-
-TEST_P(BootControlAndroidGroupTestP, AddGroup) {
-  ExpectStoreMetadata(PartitionMetadata{
-      .groups = {
-          SimpleGroup(T("new_group"), 2_GiB, T("new_partition"), 2_GiB)}});
-  ExpectUnmap({T("system"), T("vendor"), T("new_partition")});
-
-  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
-      target(),
-      PartitionMetadata{
-          .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB),
-                     SimpleGroup("oem", 1_GiB, "vendor", 1_GiB),
-                     SimpleGroup("new_group", 2_GiB, "new_partition", 2_GiB)}},
-      true));
-}
-
-TEST_P(BootControlAndroidGroupTestP, RemoveGroup) {
-  ExpectStoreMetadataMatch(Not(HasGroup(T("oem"))));
-  ExpectUnmap({T("system")});
-  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
-      target(),
-      PartitionMetadata{
-          .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB)}},
-      true));
-}
-
-TEST_P(BootControlAndroidGroupTestP, ResizeGroup) {
-  ExpectStoreMetadata(PartitionMetadata{
-      .groups = {SimpleGroup(T("android"), 2_GiB, T("system"), 2_GiB),
-                 SimpleGroup(T("oem"), 3_GiB, T("vendor"), 3_GiB)}});
-  ExpectUnmap({T("system"), T("vendor")});
-
-  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
-      target(),
-      PartitionMetadata{
-          .groups = {SimpleGroup("android", 2_GiB, "system", 2_GiB),
-                     SimpleGroup("oem", 3_GiB, "vendor", 3_GiB)}},
-      true));
-}
-
-INSTANTIATE_TEST_CASE_P(BootControlAndroidTest,
-                        BootControlAndroidGroupTestP,
-                        testing::Values(TestParam{0, 1}, TestParam{1, 0}));
-
-}  // namespace chromeos_update_engine
diff --git a/boot_control_chromeos.cc b/boot_control_chromeos.cc
index 3f1eac4..95456f0 100644
--- a/boot_control_chromeos.cc
+++ b/boot_control_chromeos.cc
@@ -34,6 +34,7 @@
 }
 
 #include "update_engine/common/boot_control.h"
+#include "update_engine/common/dynamic_partition_control_stub.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/utils.h"
 
@@ -134,6 +135,8 @@
     return false;
   }
 
+  dynamic_partition_control_.reset(new DynamicPartitionControlStub());
+
   LOG(INFO) << "Booted from slot " << current_slot_ << " (slot "
             << SlotName(current_slot_) << ") of " << num_slots_
             << " slots present on disk " << boot_disk_name_;
@@ -173,9 +176,11 @@
   return true;
 }
 
-bool BootControlChromeOS::GetPartitionDevice(const string& partition_name,
-                                             unsigned int slot,
-                                             string* device) const {
+bool BootControlChromeOS::GetPartitionDevice(const std::string& partition_name,
+                                             BootControlInterface::Slot slot,
+                                             bool not_in_payload,
+                                             std::string* device,
+                                             bool* is_dynamic) const {
   // Partition name prefixed with |kPartitionNamePrefixDlc| is a DLC module.
   if (base::StartsWith(partition_name,
                        kPartitionNamePrefixDlc,
@@ -201,9 +206,18 @@
     return false;
 
   *device = part_device;
+  if (is_dynamic) {
+    *is_dynamic = false;
+  }
   return true;
 }
 
+bool BootControlChromeOS::GetPartitionDevice(const string& partition_name,
+                                             BootControlInterface::Slot slot,
+                                             string* device) const {
+  return GetPartitionDevice(partition_name, slot, false, device, nullptr);
+}
+
 bool BootControlChromeOS::IsSlotBootable(Slot slot) const {
   int partition_num = GetPartitionNumber(kChromeOSPartitionNameKernel, slot);
   if (partition_num < 0)
@@ -350,13 +364,14 @@
   return -1;
 }
 
-bool BootControlChromeOS::InitPartitionMetadata(
-    Slot slot,
-    const PartitionMetadata& partition_metadata,
-    bool update_metadata) {
-  return true;
+bool BootControlChromeOS::IsSlotMarkedSuccessful(Slot slot) const {
+  LOG(ERROR) << __func__ << " not supported.";
+  return false;
 }
 
-void BootControlChromeOS::Cleanup() {}
+DynamicPartitionControlInterface*
+BootControlChromeOS::GetDynamicPartitionControl() {
+  return dynamic_partition_control_.get();
+}
 
 }  // namespace chromeos_update_engine
diff --git a/boot_control_chromeos.h b/boot_control_chromeos.h
index 109197f..f90e65b 100644
--- a/boot_control_chromeos.h
+++ b/boot_control_chromeos.h
@@ -17,12 +17,14 @@
 #ifndef UPDATE_ENGINE_BOOT_CONTROL_CHROMEOS_H_
 #define UPDATE_ENGINE_BOOT_CONTROL_CHROMEOS_H_
 
+#include <memory>
 #include <string>
 
 #include <base/callback.h>
 #include <gtest/gtest_prod.h>  // for FRIEND_TEST
 
 #include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/dynamic_partition_control_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -45,15 +47,18 @@
   BootControlInterface::Slot GetCurrentSlot() const override;
   bool GetPartitionDevice(const std::string& partition_name,
                           BootControlInterface::Slot slot,
+                          bool not_in_payload,
+                          std::string* device,
+                          bool* is_dynamic) const override;
+  bool GetPartitionDevice(const std::string& partition_name,
+                          BootControlInterface::Slot slot,
                           std::string* device) const override;
   bool IsSlotBootable(BootControlInterface::Slot slot) const override;
   bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
   bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
   bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
-  bool InitPartitionMetadata(Slot slot,
-                             const PartitionMetadata& partition_metadata,
-                             bool update_metadata) override;
-  void Cleanup() override;
+  bool IsSlotMarkedSuccessful(BootControlInterface::Slot slot) const override;
+  DynamicPartitionControlInterface* GetDynamicPartitionControl() override;
 
  private:
   friend class BootControlChromeOSTest;
@@ -89,6 +94,8 @@
   // The block device of the disk we booted from, without the partition number.
   std::string boot_disk_name_;
 
+  std::unique_ptr<DynamicPartitionControlInterface> dynamic_partition_control_;
+
   DISALLOW_COPY_AND_ASSIGN(BootControlChromeOS);
 };
 
diff --git a/cleanup_previous_update_action.cc b/cleanup_previous_update_action.cc
new file mode 100644
index 0000000..dd9a1ca
--- /dev/null
+++ b/cleanup_previous_update_action.cc
@@ -0,0 +1,415 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "update_engine/cleanup_previous_update_action.h"
+
+#include <chrono>  // NOLINT(build/c++11) -- for merge times
+#include <functional>
+#include <string>
+#include <type_traits>
+
+#include <android-base/properties.h>
+#include <base/bind.h>
+
+#ifndef __ANDROID_RECOVERY__
+#include <statslog.h>
+#endif
+
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/delta_performer.h"
+
+using android::base::GetBoolProperty;
+using android::snapshot::ISnapshotManager;
+using android::snapshot::SnapshotMergeStats;
+using android::snapshot::UpdateState;
+using brillo::MessageLoop;
+
+constexpr char kBootCompletedProp[] = "sys.boot_completed";
+// Interval to check sys.boot_completed.
+constexpr auto kCheckBootCompletedInterval = base::TimeDelta::FromSeconds(2);
+// Interval to check IBootControl::isSlotMarkedSuccessful
+constexpr auto kCheckSlotMarkedSuccessfulInterval =
+    base::TimeDelta::FromSeconds(2);
+// Interval to call SnapshotManager::ProcessUpdateState
+constexpr auto kWaitForMergeInterval = base::TimeDelta::FromSeconds(2);
+
+#ifdef __ANDROID_RECOVERY__
+static constexpr bool kIsRecovery = true;
+#else
+static constexpr bool kIsRecovery = false;
+#endif
+
+namespace chromeos_update_engine {
+
+CleanupPreviousUpdateAction::CleanupPreviousUpdateAction(
+    PrefsInterface* prefs,
+    BootControlInterface* boot_control,
+    android::snapshot::ISnapshotManager* snapshot,
+    CleanupPreviousUpdateActionDelegateInterface* delegate)
+    : prefs_(prefs),
+      boot_control_(boot_control),
+      snapshot_(snapshot),
+      delegate_(delegate),
+      running_(false),
+      cancel_failed_(false),
+      last_percentage_(0),
+      merge_stats_(nullptr) {}
+
+void CleanupPreviousUpdateAction::PerformAction() {
+  ResumeAction();
+}
+
+void CleanupPreviousUpdateAction::TerminateProcessing() {
+  SuspendAction();
+}
+
+void CleanupPreviousUpdateAction::ResumeAction() {
+  CHECK(prefs_);
+  CHECK(boot_control_);
+
+  LOG(INFO) << "Starting/resuming CleanupPreviousUpdateAction";
+  running_ = true;
+  StartActionInternal();
+}
+
+void CleanupPreviousUpdateAction::SuspendAction() {
+  LOG(INFO) << "Stopping/suspending CleanupPreviousUpdateAction";
+  running_ = false;
+}
+
+void CleanupPreviousUpdateAction::ActionCompleted(ErrorCode error_code) {
+  running_ = false;
+  ReportMergeStats();
+  metadata_device_ = nullptr;
+}
+
+std::string CleanupPreviousUpdateAction::Type() const {
+  return StaticType();
+}
+
+std::string CleanupPreviousUpdateAction::StaticType() {
+  return "CleanupPreviousUpdateAction";
+}
+
+void CleanupPreviousUpdateAction::StartActionInternal() {
+  // Do nothing on non-VAB device.
+  if (!boot_control_->GetDynamicPartitionControl()
+           ->GetVirtualAbFeatureFlag()
+           .IsEnabled()) {
+    processor_->ActionComplete(this, ErrorCode::kSuccess);
+    return;
+  }
+  // SnapshotManager must be available on VAB devices.
+  CHECK(snapshot_ != nullptr);
+  merge_stats_ = snapshot_->GetSnapshotMergeStatsInstance();
+  CHECK(merge_stats_ != nullptr);
+  WaitBootCompletedOrSchedule();
+}
+
+void CleanupPreviousUpdateAction::ScheduleWaitBootCompleted() {
+  TEST_AND_RETURN(running_);
+  MessageLoop::current()->PostDelayedTask(
+      FROM_HERE,
+      base::Bind(&CleanupPreviousUpdateAction::WaitBootCompletedOrSchedule,
+                 base::Unretained(this)),
+      kCheckBootCompletedInterval);
+}
+
+void CleanupPreviousUpdateAction::WaitBootCompletedOrSchedule() {
+  TEST_AND_RETURN(running_);
+  if (!kIsRecovery &&
+      !android::base::GetBoolProperty(kBootCompletedProp, false)) {
+    // repeat
+    ScheduleWaitBootCompleted();
+    return;
+  }
+
+  LOG(INFO) << "Boot completed, waiting on markBootSuccessful()";
+  CheckSlotMarkedSuccessfulOrSchedule();
+}
+
+void CleanupPreviousUpdateAction::ScheduleWaitMarkBootSuccessful() {
+  TEST_AND_RETURN(running_);
+  MessageLoop::current()->PostDelayedTask(
+      FROM_HERE,
+      base::Bind(
+          &CleanupPreviousUpdateAction::CheckSlotMarkedSuccessfulOrSchedule,
+          base::Unretained(this)),
+      kCheckSlotMarkedSuccessfulInterval);
+}
+
+void CleanupPreviousUpdateAction::CheckSlotMarkedSuccessfulOrSchedule() {
+  TEST_AND_RETURN(running_);
+  if (!kIsRecovery &&
+      !boot_control_->IsSlotMarkedSuccessful(boot_control_->GetCurrentSlot())) {
+    ScheduleWaitMarkBootSuccessful();
+  }
+
+  if (metadata_device_ == nullptr) {
+    metadata_device_ = snapshot_->EnsureMetadataMounted();
+  }
+
+  if (metadata_device_ == nullptr) {
+    LOG(ERROR) << "Failed to mount /metadata.";
+    // If metadata is erased but not formatted, it is possible to not mount
+    // it in recovery. It is safe to skip CleanupPreviousUpdateAction.
+    processor_->ActionComplete(
+        this, kIsRecovery ? ErrorCode::kSuccess : ErrorCode::kError);
+    return;
+  }
+
+  if (kIsRecovery) {
+    auto snapshots_created =
+        snapshot_->RecoveryCreateSnapshotDevices(metadata_device_);
+    switch (snapshots_created) {
+      case android::snapshot::CreateResult::CREATED: {
+        // If previous update has not finished merging, snapshots exists and are
+        // created here so that ProcessUpdateState can proceed.
+        LOG(INFO) << "Snapshot devices are created";
+        break;
+      }
+      case android::snapshot::CreateResult::NOT_CREATED: {
+        // If there is no previous update, no snapshot devices are created and
+        // ProcessUpdateState will return immediately. Hence, NOT_CREATED is not
+        // considered an error.
+        LOG(INFO) << "Snapshot devices are not created";
+        break;
+      }
+      case android::snapshot::CreateResult::ERROR:
+      default: {
+        LOG(ERROR)
+            << "Failed to create snapshot devices (CreateResult = "
+            << static_cast<
+                   std::underlying_type_t<android::snapshot::CreateResult>>(
+                   snapshots_created);
+        processor_->ActionComplete(this, ErrorCode::kError);
+        return;
+      }
+    }
+  }
+
+  if (!merge_stats_->Start()) {
+    // Not an error because CleanupPreviousUpdateAction may be paused and
+    // resumed while kernel continues merging snapshots in the background.
+    LOG(WARNING) << "SnapshotMergeStats::Start failed.";
+  }
+  LOG(INFO) << "Waiting for any previous merge request to complete. "
+            << "This can take up to several minutes.";
+  WaitForMergeOrSchedule();
+}
+
+void CleanupPreviousUpdateAction::ScheduleWaitForMerge() {
+  TEST_AND_RETURN(running_);
+  MessageLoop::current()->PostDelayedTask(
+      FROM_HERE,
+      base::Bind(&CleanupPreviousUpdateAction::WaitForMergeOrSchedule,
+                 base::Unretained(this)),
+      kWaitForMergeInterval);
+}
+
+void CleanupPreviousUpdateAction::WaitForMergeOrSchedule() {
+  TEST_AND_RETURN(running_);
+  auto state = snapshot_->ProcessUpdateState(
+      std::bind(&CleanupPreviousUpdateAction::OnMergePercentageUpdate, this),
+      std::bind(&CleanupPreviousUpdateAction::BeforeCancel, this));
+  merge_stats_->set_state(state);
+
+  switch (state) {
+    case UpdateState::None: {
+      LOG(INFO) << "Can't find any snapshot to merge.";
+      ErrorCode error_code = ErrorCode::kSuccess;
+      if (!snapshot_->CancelUpdate()) {
+        error_code = ErrorCode::kError;
+        LOG(INFO) << "Failed to call SnapshotManager::CancelUpdate().";
+      }
+      processor_->ActionComplete(this, error_code);
+      return;
+    }
+
+    case UpdateState::Initiated: {
+      LOG(ERROR) << "Previous update has not been completed, not cleaning up";
+      processor_->ActionComplete(this, ErrorCode::kSuccess);
+      return;
+    }
+
+    case UpdateState::Unverified: {
+      InitiateMergeAndWait();
+      return;
+    }
+
+    case UpdateState::Merging: {
+      ScheduleWaitForMerge();
+      return;
+    }
+
+    case UpdateState::MergeNeedsReboot: {
+      LOG(ERROR) << "Need reboot to finish merging.";
+      processor_->ActionComplete(this, ErrorCode::kError);
+      return;
+    }
+
+    case UpdateState::MergeCompleted: {
+      LOG(INFO) << "Merge finished with state MergeCompleted.";
+      processor_->ActionComplete(this, ErrorCode::kSuccess);
+      return;
+    }
+
+    case UpdateState::MergeFailed: {
+      LOG(ERROR) << "Merge failed. Device may be corrupted.";
+      processor_->ActionComplete(this, ErrorCode::kDeviceCorrupted);
+      return;
+    }
+
+    case UpdateState::Cancelled: {
+      // DeltaPerformer::ResetUpdateProgress failed, hence snapshots are
+      // not deleted to avoid inconsistency.
+      // Nothing can be done here; just try next time.
+      ErrorCode error_code =
+          cancel_failed_ ? ErrorCode::kError : ErrorCode::kSuccess;
+      processor_->ActionComplete(this, error_code);
+      return;
+    }
+
+    default: {
+      // Protobuf has some reserved enum values, so a default case is needed.
+      LOG(FATAL) << "SnapshotManager::ProcessUpdateState returns "
+                 << static_cast<int32_t>(state);
+    }
+  }
+}
+
+bool CleanupPreviousUpdateAction::OnMergePercentageUpdate() {
+  double percentage = 0.0;
+  snapshot_->GetUpdateState(&percentage);
+  if (delegate_) {
+    // libsnapshot uses [0, 100] percentage but update_engine uses [0, 1].
+    delegate_->OnCleanupProgressUpdate(percentage / 100);
+  }
+
+  // Log if percentage increments by at least 1.
+  if (last_percentage_ < static_cast<unsigned int>(percentage)) {
+    last_percentage_ = percentage;
+    LOG(INFO) << "Waiting for merge to complete: " << last_percentage_ << "%.";
+  }
+
+  // Do not continue to wait for merge. Instead, let ProcessUpdateState
+  // return Merging directly so that we can ScheduleWaitForMerge() in
+  // MessageLoop.
+  return false;
+}
+
+bool CleanupPreviousUpdateAction::BeforeCancel() {
+  if (DeltaPerformer::ResetUpdateProgress(
+          prefs_,
+          false /* quick */,
+          false /* skip dynamic partitions metadata*/)) {
+    return true;
+  }
+
+  // ResetUpdateProgress might not work on stub prefs. Do additional checks.
+  LOG(WARNING) << "ProcessUpdateState returns Cancelled but cleanup failed.";
+
+  std::string val;
+  ignore_result(prefs_->GetString(kPrefsDynamicPartitionMetadataUpdated, &val));
+  if (val.empty()) {
+    LOG(INFO) << kPrefsDynamicPartitionMetadataUpdated
+              << " is empty, assuming successful cleanup";
+    return true;
+  }
+  LOG(WARNING)
+      << kPrefsDynamicPartitionMetadataUpdated << " is " << val
+      << ", not deleting snapshots even though UpdateState is Cancelled.";
+  cancel_failed_ = true;
+  return false;
+}
+
+void CleanupPreviousUpdateAction::InitiateMergeAndWait() {
+  TEST_AND_RETURN(running_);
+  LOG(INFO) << "Attempting to initiate merge.";
+  // suspend the VAB merge when running a DSU
+  if (GetBoolProperty("ro.gsid.image_running", false)) {
+    LOG(WARNING) << "Suspend the VAB merge when running a DSU.";
+    processor_->ActionComplete(this, ErrorCode::kError);
+    return;
+  }
+
+  if (snapshot_->InitiateMerge()) {
+    WaitForMergeOrSchedule();
+    return;
+  }
+
+  LOG(WARNING) << "InitiateMerge failed.";
+  auto state = snapshot_->GetUpdateState();
+  merge_stats_->set_state(state);
+  if (state == UpdateState::Unverified) {
+    // We are stuck at unverified state. This can happen if the update has
+    // been applied, but it has not even been attempted yet (in libsnapshot,
+    // rollback indicator does not exist); for example, if update_engine
+    // restarts before the device reboots, then this state may be reached.
+    // Nothing should be done here.
+    LOG(WARNING) << "InitiateMerge leaves the device at "
+                 << "UpdateState::Unverified. (Did update_engine "
+                 << "restarted?)";
+    processor_->ActionComplete(this, ErrorCode::kSuccess);
+    return;
+  }
+
+  // State does seems to be advanced.
+  // It is possibly racy. For example, on a userdebug build, the user may
+  // manually initiate a merge with snapshotctl between last time
+  // update_engine checks UpdateState. Hence, just call
+  // WaitForMergeOrSchedule one more time.
+  LOG(WARNING) << "IniitateMerge failed but GetUpdateState returned "
+               << android::snapshot::UpdateState_Name(state)
+               << ", try to wait for merge again.";
+  WaitForMergeOrSchedule();
+  return;
+}
+
+void CleanupPreviousUpdateAction::ReportMergeStats() {
+  auto result = merge_stats_->Finish();
+  if (result == nullptr) {
+    LOG(WARNING) << "Not reporting merge stats because "
+                    "SnapshotMergeStats::Finish failed.";
+    return;
+  }
+
+#ifdef __ANDROID_RECOVERY__
+  LOG(INFO) << "Skip reporting merge stats in recovery.";
+#else
+  const auto& report = result->report();
+
+  if (report.state() == UpdateState::None ||
+      report.state() == UpdateState::Initiated ||
+      report.state() == UpdateState::Unverified) {
+    LOG(INFO) << "Not reporting merge stats because state is "
+              << android::snapshot::UpdateState_Name(report.state());
+    return;
+  }
+
+  auto passed_ms = std::chrono::duration_cast<std::chrono::milliseconds>(
+      result->merge_time());
+  LOG(INFO) << "Reporting merge stats: "
+            << android::snapshot::UpdateState_Name(report.state()) << " in "
+            << passed_ms.count() << "ms (resumed " << report.resume_count()
+            << " times)";
+  android::util::stats_write(android::util::SNAPSHOT_MERGE_REPORTED,
+                             static_cast<int32_t>(report.state()),
+                             static_cast<int64_t>(passed_ms.count()),
+                             static_cast<int32_t>(report.resume_count()));
+#endif
+}
+
+}  // namespace chromeos_update_engine
diff --git a/cleanup_previous_update_action.h b/cleanup_previous_update_action.h
new file mode 100644
index 0000000..6f6ce07
--- /dev/null
+++ b/cleanup_previous_update_action.h
@@ -0,0 +1,95 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_H_
+#define UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_H_
+
+#include <chrono>  // NOLINT(build/c++11) -- for merge times
+#include <memory>
+#include <string>
+
+#include <brillo/message_loops/message_loop.h>
+#include <libsnapshot/snapshot.h>
+#include <libsnapshot/snapshot_stats.h>
+
+#include "update_engine/common/action.h"
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/cleanup_previous_update_action_delegate.h"
+#include "update_engine/common/error_code.h"
+#include "update_engine/common/prefs_interface.h"
+
+namespace chromeos_update_engine {
+
+class CleanupPreviousUpdateAction;
+
+template <>
+class ActionTraits<CleanupPreviousUpdateAction> {
+ public:
+  typedef NoneType InputObjectType;
+  typedef NoneType OutputObjectType;
+};
+
+// On Android Virtual A/B devices, clean up snapshots from previous update
+// attempt. See DynamicPartitionControlAndroid::CleanupSuccessfulUpdate.
+class CleanupPreviousUpdateAction : public Action<CleanupPreviousUpdateAction> {
+ public:
+  CleanupPreviousUpdateAction(
+      PrefsInterface* prefs,
+      BootControlInterface* boot_control,
+      android::snapshot::ISnapshotManager* snapshot,
+      CleanupPreviousUpdateActionDelegateInterface* delegate);
+
+  void PerformAction() override;
+  void SuspendAction() override;
+  void ResumeAction() override;
+  void TerminateProcessing() override;
+  void ActionCompleted(ErrorCode error_code) override;
+  std::string Type() const override;
+  static std::string StaticType();
+  typedef ActionTraits<CleanupPreviousUpdateAction>::InputObjectType
+      InputObjectType;
+  typedef ActionTraits<CleanupPreviousUpdateAction>::OutputObjectType
+      OutputObjectType;
+
+ private:
+  PrefsInterface* prefs_;
+  BootControlInterface* boot_control_;
+  android::snapshot::ISnapshotManager* snapshot_;
+  CleanupPreviousUpdateActionDelegateInterface* delegate_;
+  std::unique_ptr<android::snapshot::AutoDevice> metadata_device_;
+  bool running_{false};
+  bool cancel_failed_{false};
+  unsigned int last_percentage_{0};
+  android::snapshot::ISnapshotMergeStats* merge_stats_;
+
+  void StartActionInternal();
+  void ScheduleWaitBootCompleted();
+  void WaitBootCompletedOrSchedule();
+  void ScheduleWaitMarkBootSuccessful();
+  void CheckSlotMarkedSuccessfulOrSchedule();
+  void ScheduleWaitForMerge();
+  void WaitForMergeOrSchedule();
+  void InitiateMergeAndWait();
+  void ReportMergeStats();
+
+  // Callbacks to ProcessUpdateState.
+  bool OnMergePercentageUpdate();
+  bool BeforeCancel();
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_H_
diff --git a/client_library/include/update_engine/update_status.h b/client_library/include/update_engine/update_status.h
index b1cf1f8..043a36e 100644
--- a/client_library/include/update_engine/update_status.h
+++ b/client_library/include/update_engine/update_status.h
@@ -48,6 +48,7 @@
   // Broadcast this state when an update aborts because user preferences do not
   // allow updates, e.g. over cellular network.
   NEED_PERMISSION_TO_UPDATE = 10,
+  CLEANUP_PREVIOUS_UPDATE = 11,
 
   // This value is exclusively used in Chrome. DO NOT define nor use it.
   // TODO(crbug.com/977320): Remove this value from chrome by refactoring the
diff --git a/common/action.h b/common/action.h
index 9e2f5ff..fd82c2d 100644
--- a/common/action.h
+++ b/common/action.h
@@ -222,6 +222,17 @@
       out_pipe_;
 };
 
+// An action that does nothing and completes with kSuccess immediately.
+class NoOpAction : public AbstractAction {
+ public:
+  ~NoOpAction() override {}
+  void PerformAction() override {
+    processor_->ActionComplete(this, ErrorCode::kSuccess);
+  }
+  static std::string StaticType() { return "NoOpAction"; }
+  std::string Type() const override { return StaticType(); }
+};
+
 };  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_COMMON_ACTION_H_
diff --git a/common/boot_control_interface.h b/common/boot_control_interface.h
index 392d785..c93de5c 100644
--- a/common/boot_control_interface.h
+++ b/common/boot_control_interface.h
@@ -25,6 +25,9 @@
 #include <base/callback.h>
 #include <base/macros.h>
 
+#include "update_engine/common/dynamic_partition_control_interface.h"
+#include "update_engine/update_metadata.pb.h"
+
 namespace chromeos_update_engine {
 
 // The abstract boot control interface defines the interaction with the
@@ -35,19 +38,6 @@
  public:
   using Slot = unsigned int;
 
-  struct PartitionMetadata {
-    struct Partition {
-      std::string name;
-      uint64_t size;
-    };
-    struct Group {
-      std::string name;
-      uint64_t size;
-      std::vector<Partition> partitions;
-    };
-    std::vector<Group> groups;
-  };
-
   static const Slot kInvalidSlot = UINT_MAX;
 
   virtual ~BootControlInterface() = default;
@@ -67,9 +57,20 @@
   // The |slot| number must be between 0 and GetNumSlots() - 1 and the
   // |partition_name| is a platform-specific name that identifies a partition on
   // every slot. In order to access the dynamic partitions in the target slot,
-  // InitPartitionMetadata() must be called (once per payload) prior to calling
-  // this function. On success, returns true and stores the block device in
-  // |device|.
+  // GetDynamicPartitionControl()->PreparePartitionsForUpdate() must be called
+  // (with |update| == true for the first time for a payload, and |false| for
+  // for the rest of the times) prior to calling this function.
+  // The handling may be different based on whether the partition is included
+  // in the update payload. On success, returns true; and stores the block
+  // device in |device|, if the partition is dynamic in |is_dynamic|.
+  virtual bool GetPartitionDevice(const std::string& partition_name,
+                                  Slot slot,
+                                  bool not_in_payload,
+                                  std::string* device,
+                                  bool* is_dynamic) const = 0;
+
+  // Overload of the above function. We assume the partition is always included
+  // in the payload.
   virtual bool GetPartitionDevice(const std::string& partition_name,
                                   Slot slot,
                                   std::string* device) const = 0;
@@ -94,17 +95,11 @@
   // of the operation.
   virtual bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) = 0;
 
-  // Initializes the metadata of the underlying partitions for a given |slot|
-  // and sets up the states for accessing dynamic partitions.
-  // |partition_metadata| will be written to the specified |slot| if
-  // |update_metadata| is set.
-  virtual bool InitPartitionMetadata(
-      Slot slot,
-      const PartitionMetadata& partition_metadata,
-      bool update_metadata) = 0;
+  // Check if |slot| is marked boot successfully.
+  virtual bool IsSlotMarkedSuccessful(Slot slot) const = 0;
 
-  // Do necessary clean-up operations after the whole update.
-  virtual void Cleanup() = 0;
+  // Return the dynamic partition control interface.
+  virtual DynamicPartitionControlInterface* GetDynamicPartitionControl() = 0;
 
   // Return a human-readable slot name used for logging.
   static std::string SlotName(Slot slot) {
diff --git a/common/boot_control_stub.cc b/common/boot_control_stub.cc
index 0fe8a98..907f670 100644
--- a/common/boot_control_stub.cc
+++ b/common/boot_control_stub.cc
@@ -15,6 +15,7 @@
 //
 
 #include "update_engine/common/boot_control_stub.h"
+#include "update_engine/common/dynamic_partition_control_stub.h"
 
 #include <base/logging.h>
 
@@ -22,6 +23,9 @@
 
 namespace chromeos_update_engine {
 
+BootControlStub::BootControlStub()
+    : dynamic_partition_control_(new DynamicPartitionControlStub()) {}
+
 unsigned int BootControlStub::GetNumSlots() const {
   return 0;
 }
@@ -31,6 +35,15 @@
   return 0;
 }
 
+bool BootControlStub::GetPartitionDevice(const std::string& partition_name,
+                                         BootControlInterface::Slot slot,
+                                         bool not_in_payload,
+                                         std::string* device,
+                                         bool* is_dynamic) const {
+  LOG(ERROR) << __FUNCTION__ << " should never be called.";
+  return false;
+}
+
 bool BootControlStub::GetPartitionDevice(const string& partition_name,
                                          Slot slot,
                                          string* device) const {
@@ -59,16 +72,14 @@
   return false;
 }
 
-bool BootControlStub::InitPartitionMetadata(
-    Slot slot,
-    const PartitionMetadata& partition_metadata,
-    bool update_metadata) {
+bool BootControlStub::IsSlotMarkedSuccessful(Slot slot) const {
   LOG(ERROR) << __FUNCTION__ << " should never be called.";
   return false;
 }
 
-void BootControlStub::Cleanup() {
-  LOG(ERROR) << __FUNCTION__ << " should never be called.";
+DynamicPartitionControlInterface*
+BootControlStub::GetDynamicPartitionControl() {
+  return dynamic_partition_control_.get();
 }
 
 }  // namespace chromeos_update_engine
diff --git a/common/boot_control_stub.h b/common/boot_control_stub.h
index 8dfaffc..a1bdb96 100644
--- a/common/boot_control_stub.h
+++ b/common/boot_control_stub.h
@@ -17,9 +17,11 @@
 #ifndef UPDATE_ENGINE_COMMON_BOOT_CONTROL_STUB_H_
 #define UPDATE_ENGINE_COMMON_BOOT_CONTROL_STUB_H_
 
+#include <memory>
 #include <string>
 
 #include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/dynamic_partition_control_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -32,25 +34,30 @@
 // implementation is in use.
 class BootControlStub : public BootControlInterface {
  public:
-  BootControlStub() = default;
+  BootControlStub();
   ~BootControlStub() = default;
 
   // BootControlInterface overrides.
   unsigned int GetNumSlots() const override;
   BootControlInterface::Slot GetCurrentSlot() const override;
   bool GetPartitionDevice(const std::string& partition_name,
+                          Slot slot,
+                          bool not_in_payload,
+                          std::string* device,
+                          bool* is_dynamic) const override;
+  bool GetPartitionDevice(const std::string& partition_name,
                           BootControlInterface::Slot slot,
                           std::string* device) const override;
   bool IsSlotBootable(BootControlInterface::Slot slot) const override;
   bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
   bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
   bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
-  bool InitPartitionMetadata(Slot slot,
-                             const PartitionMetadata& partition_metadata,
-                             bool update_metadata) override;
-  void Cleanup() override;
+  bool IsSlotMarkedSuccessful(BootControlInterface::Slot slot) const override;
+  DynamicPartitionControlInterface* GetDynamicPartitionControl() override;
 
  private:
+  std::unique_ptr<DynamicPartitionControlInterface> dynamic_partition_control_;
+
   DISALLOW_COPY_AND_ASSIGN(BootControlStub);
 };
 
diff --git a/common/cleanup_previous_update_action_delegate.h b/common/cleanup_previous_update_action_delegate.h
new file mode 100644
index 0000000..7dad9c5
--- /dev/null
+++ b/common/cleanup_previous_update_action_delegate.h
@@ -0,0 +1,32 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_
+#define UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_
+
+namespace chromeos_update_engine {
+
+// Delegate interface for CleanupPreviousUpdateAction.
+class CleanupPreviousUpdateActionDelegateInterface {
+ public:
+  virtual ~CleanupPreviousUpdateActionDelegateInterface() {}
+  // |progress| is within [0, 1]
+  virtual void OnCleanupProgressUpdate(double progress) = 0;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_CLEANUP_PREVIOUS_UPDATE_ACTION_DELEGETE_H_
diff --git a/common/constants.cc b/common/constants.cc
index ac652ea..fa13a38 100644
--- a/common/constants.cc
+++ b/common/constants.cc
@@ -128,8 +128,7 @@
 // Set "SWITCH_SLOT_ON_REBOOT=0" to skip marking the updated partitions active.
 // The default is 1 (always switch slot if update succeeded).
 const char kPayloadPropertySwitchSlotOnReboot[] = "SWITCH_SLOT_ON_REBOOT";
-// Set "RUN_POST_INSTALL=0" to skip running post install, this will only be
-// honored if we're resuming an update and post install has already succeeded.
+// Set "RUN_POST_INSTALL=0" to skip running optional post install.
 // The default is 1 (always run post install).
 const char kPayloadPropertyRunPostInstall[] = "RUN_POST_INSTALL";
 
diff --git a/common/constants.h b/common/constants.h
index 248fd05..eb489fc 100644
--- a/common/constants.h
+++ b/common/constants.h
@@ -214,6 +214,9 @@
 const int kDownloadConnectTimeoutSeconds = 30;
 const int kDownloadP2PConnectTimeoutSeconds = 5;
 
+// Size in bytes of SHA256 hash.
+const int kSHA256Size = 32;
+
 }  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_COMMON_CONSTANTS_H_
diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h
new file mode 100644
index 0000000..7289dee
--- /dev/null
+++ b/common/dynamic_partition_control_interface.h
@@ -0,0 +1,137 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
+#define UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "update_engine/common/action.h"
+#include "update_engine/common/cleanup_previous_update_action_delegate.h"
+#include "update_engine/common/error_code.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+
+struct FeatureFlag {
+  enum class Value { NONE = 0, RETROFIT, LAUNCH };
+  constexpr explicit FeatureFlag(Value value) : value_(value) {}
+  constexpr bool IsEnabled() const { return value_ != Value::NONE; }
+  constexpr bool IsRetrofit() const { return value_ == Value::RETROFIT; }
+  constexpr bool IsLaunch() const { return value_ == Value::LAUNCH; }
+
+ private:
+  Value value_;
+};
+
+class BootControlInterface;
+class PrefsInterface;
+
+class DynamicPartitionControlInterface {
+ public:
+  virtual ~DynamicPartitionControlInterface() = default;
+
+  // Return the feature flags of dynamic partitions on this device.
+  // Return RETROFIT iff dynamic partitions is retrofitted on this device,
+  //        LAUNCH iff this device is launched with dynamic partitions,
+  //        NONE iff dynamic partitions is disabled on this device.
+  virtual FeatureFlag GetDynamicPartitionsFeatureFlag() = 0;
+
+  // Return the feature flags of Virtual A/B on this device.
+  virtual FeatureFlag GetVirtualAbFeatureFlag() = 0;
+
+  // Attempt to optimize |operation|.
+  // If successful, |optimized| contains an operation with extents that
+  // needs to be written.
+  // If failed, no optimization is available, and caller should perform
+  // |operation| directly.
+  // |partition_name| should not have the slot suffix; implementation of
+  // DynamicPartitionControlInterface checks partition at the target slot
+  // previously set with PreparePartitionsForUpdate().
+  virtual bool OptimizeOperation(const std::string& partition_name,
+                                 const InstallOperation& operation,
+                                 InstallOperation* optimized) = 0;
+
+  // Do necessary cleanups before destroying the object.
+  virtual void Cleanup() = 0;
+
+  // Prepare all partitions for an update specified in |manifest|.
+  // This is needed before calling MapPartitionOnDeviceMapper(), otherwise the
+  // device would be mapped in an inconsistent way.
+  // If |update| is set, create snapshots and writes super partition metadata.
+  // If |required_size| is not null and call fails due to insufficient space,
+  // |required_size| will be set to total free space required on userdata
+  // partition to apply the update. Otherwise (call succeeds, or fails
+  // due to other errors), |required_size| is set to zero.
+  virtual bool PreparePartitionsForUpdate(uint32_t source_slot,
+                                          uint32_t target_slot,
+                                          const DeltaArchiveManifest& manifest,
+                                          bool update,
+                                          uint64_t* required_size) = 0;
+
+  // After writing to new partitions, before rebooting into the new slot, call
+  // this function to indicate writes to new partitions are done.
+  virtual bool FinishUpdate(bool powerwash_required) = 0;
+
+  // Get an action to clean up previous update.
+  // Return NoOpAction on non-Virtual A/B devices.
+  // Before applying the next update, run this action to clean up previous
+  // update files. This function blocks until delta files are merged into
+  // current OS partitions and finished cleaning up.
+  // - If successful, action completes with kSuccess.
+  // - If any error, but caller should retry after reboot, action completes with
+  //   kError.
+  // - If any irrecoverable failures, action completes with kDeviceCorrupted.
+  //
+  // See ResetUpdate for differences between CleanuPreviousUpdateAction and
+  // ResetUpdate.
+  virtual std::unique_ptr<AbstractAction> GetCleanupPreviousUpdateAction(
+      BootControlInterface* boot_control,
+      PrefsInterface* prefs,
+      CleanupPreviousUpdateActionDelegateInterface* delegate) = 0;
+
+  // Called after an unwanted payload has been successfully applied and the
+  // device has not yet been rebooted.
+  //
+  // For snapshot updates (Virtual A/B), it calls
+  // DeltaPerformer::ResetUpdateProgress(false /* quick */) and
+  // frees previously allocated space; the next update will need to be
+  // started over.
+  //
+  // Note: CleanupPreviousUpdateAction does not do anything if an update is in
+  // progress, while ResetUpdate() forcefully free previously
+  // allocated space for snapshot updates.
+  virtual bool ResetUpdate(PrefsInterface* prefs) = 0;
+
+  // Reads the dynamic partitions metadata from the current slot, and puts the
+  // name of the dynamic partitions with the current suffix to |partitions|.
+  // Returns true on success.
+  virtual bool ListDynamicPartitionsForSlot(
+      uint32_t current_slot, std::vector<std::string>* partitions) = 0;
+
+  // Finds a possible location that list all block devices by name; and puts
+  // the result in |path|. Returns true on success.
+  // Sample result: /dev/block/by-name/
+  virtual bool GetDeviceDir(std::string* path) = 0;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
diff --git a/common/dynamic_partition_control_stub.cc b/common/dynamic_partition_control_stub.cc
new file mode 100644
index 0000000..cde36af
--- /dev/null
+++ b/common/dynamic_partition_control_stub.cc
@@ -0,0 +1,79 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include <base/logging.h>
+
+#include "update_engine/common/dynamic_partition_control_stub.h"
+
+namespace chromeos_update_engine {
+
+FeatureFlag DynamicPartitionControlStub::GetDynamicPartitionsFeatureFlag() {
+  return FeatureFlag(FeatureFlag::Value::NONE);
+}
+
+FeatureFlag DynamicPartitionControlStub::GetVirtualAbFeatureFlag() {
+  return FeatureFlag(FeatureFlag::Value::NONE);
+}
+
+bool DynamicPartitionControlStub::OptimizeOperation(
+    const std::string& partition_name,
+    const InstallOperation& operation,
+    InstallOperation* optimized) {
+  return false;
+}
+
+void DynamicPartitionControlStub::Cleanup() {}
+
+bool DynamicPartitionControlStub::PreparePartitionsForUpdate(
+    uint32_t source_slot,
+    uint32_t target_slot,
+    const DeltaArchiveManifest& manifest,
+    bool update,
+    uint64_t* required_size) {
+  return true;
+}
+
+bool DynamicPartitionControlStub::FinishUpdate(bool powerwash_required) {
+  return true;
+}
+
+std::unique_ptr<AbstractAction>
+DynamicPartitionControlStub::GetCleanupPreviousUpdateAction(
+    BootControlInterface* boot_control,
+    PrefsInterface* prefs,
+    CleanupPreviousUpdateActionDelegateInterface* delegate) {
+  return std::make_unique<NoOpAction>();
+}
+
+bool DynamicPartitionControlStub::ResetUpdate(PrefsInterface* prefs) {
+  return false;
+}
+
+bool DynamicPartitionControlStub::ListDynamicPartitionsForSlot(
+    uint32_t current_slot, std::vector<std::string>* partitions) {
+  return true;
+}
+
+bool DynamicPartitionControlStub::GetDeviceDir(std::string* path) {
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h
new file mode 100644
index 0000000..28e3e6a
--- /dev/null
+++ b/common/dynamic_partition_control_stub.h
@@ -0,0 +1,58 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_STUB_H_
+#define UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_STUB_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "update_engine/common/dynamic_partition_control_interface.h"
+
+namespace chromeos_update_engine {
+
+class DynamicPartitionControlStub : public DynamicPartitionControlInterface {
+ public:
+  FeatureFlag GetDynamicPartitionsFeatureFlag() override;
+  FeatureFlag GetVirtualAbFeatureFlag() override;
+  bool OptimizeOperation(const std::string& partition_name,
+                         const InstallOperation& operation,
+                         InstallOperation* optimized) override;
+  void Cleanup() override;
+  bool PreparePartitionsForUpdate(uint32_t source_slot,
+                                  uint32_t target_slot,
+                                  const DeltaArchiveManifest& manifest,
+                                  bool update,
+                                  uint64_t* required_size) override;
+
+  bool FinishUpdate(bool powerwash_required) override;
+  std::unique_ptr<AbstractAction> GetCleanupPreviousUpdateAction(
+      BootControlInterface* boot_control,
+      PrefsInterface* prefs,
+      CleanupPreviousUpdateActionDelegateInterface* delegate) override;
+  bool ResetUpdate(PrefsInterface* prefs) override;
+
+  bool ListDynamicPartitionsForSlot(
+      uint32_t current_slot, std::vector<std::string>* partitions) override;
+  bool GetDeviceDir(std::string* path) override;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_COMMON_DYNAMIC_PARTITION_CONTROL_STUB_H_
diff --git a/common/error_code.h b/common/error_code.h
index 3dd7402..e473a05 100644
--- a/common/error_code.h
+++ b/common/error_code.h
@@ -83,6 +83,8 @@
   kInternalLibCurlError = 57,
   kUnresolvedHostError = 58,
   kUnresolvedHostRecovered = 59,
+  kNotEnoughSpace = 60,
+  kDeviceCorrupted = 61,
 
   // VERY IMPORTANT! When adding new error codes:
   //
diff --git a/common/error_code_utils.cc b/common/error_code_utils.cc
index 397cdf2..64df24a 100644
--- a/common/error_code_utils.cc
+++ b/common/error_code_utils.cc
@@ -167,6 +167,10 @@
       return "ErrorCode::kUnresolvedHostError";
     case ErrorCode::kUnresolvedHostRecovered:
       return "ErrorCode::kUnresolvedHostRecovered";
+    case ErrorCode::kNotEnoughSpace:
+      return "ErrorCode::kNotEnoughSpace";
+    case ErrorCode::kDeviceCorrupted:
+      return "ErrorCode::kDeviceCorrupted";
       // Don't add a default case to let the compiler warn about newly added
       // error codes which should be added here.
   }
diff --git a/common/fake_boot_control.h b/common/fake_boot_control.h
index 3d65075..adbacd6 100644
--- a/common/fake_boot_control.h
+++ b/common/fake_boot_control.h
@@ -18,12 +18,14 @@
 #define UPDATE_ENGINE_COMMON_FAKE_BOOT_CONTROL_H_
 
 #include <map>
+#include <memory>
 #include <string>
 #include <vector>
 
 #include <base/time/time.h>
 
 #include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/dynamic_partition_control_stub.h"
 
 namespace chromeos_update_engine {
 
@@ -34,6 +36,8 @@
     SetNumSlots(num_slots_);
     // The current slot should be bootable.
     is_bootable_[current_slot_] = true;
+
+    dynamic_partition_control_.reset(new DynamicPartitionControlStub());
   }
 
   // BootControlInterface overrides.
@@ -44,7 +48,9 @@
 
   bool GetPartitionDevice(const std::string& partition_name,
                           BootControlInterface::Slot slot,
-                          std::string* device) const override {
+                          bool not_in_payload,
+                          std::string* device,
+                          bool* is_dynamic) const override {
     if (slot >= num_slots_)
       return false;
     auto part_it = devices_[slot].find(partition_name);
@@ -54,6 +60,12 @@
     return true;
   }
 
+  bool GetPartitionDevice(const std::string& partition_name,
+                          BootControlInterface::Slot slot,
+                          std::string* device) const override {
+    return GetPartitionDevice(partition_name, slot, false, device, nullptr);
+  }
+
   bool IsSlotBootable(BootControlInterface::Slot slot) const override {
     return slot < num_slots_ && is_bootable_[slot];
   }
@@ -70,22 +82,20 @@
   bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override {
     // We run the callback directly from here to avoid having to setup a message
     // loop in the test environment.
+    is_marked_successful_[GetCurrentSlot()] = true;
     callback.Run(true);
     return true;
   }
 
-  bool InitPartitionMetadata(Slot slot,
-                             const PartitionMetadata& partition_metadata,
-                             bool update_metadata) override {
-    return true;
+  bool IsSlotMarkedSuccessful(Slot slot) const override {
+    return slot < num_slots_ && is_marked_successful_[slot];
   }
 
-  void Cleanup() override {}
-
   // Setters
   void SetNumSlots(unsigned int num_slots) {
     num_slots_ = num_slots;
     is_bootable_.resize(num_slots_, false);
+    is_marked_successful_.resize(num_slots_, false);
     devices_.resize(num_slots_);
   }
 
@@ -103,13 +113,20 @@
     is_bootable_[slot] = bootable;
   }
 
+  DynamicPartitionControlInterface* GetDynamicPartitionControl() {
+    return dynamic_partition_control_.get();
+  }
+
  private:
   BootControlInterface::Slot num_slots_{2};
   BootControlInterface::Slot current_slot_{0};
 
   std::vector<bool> is_bootable_;
+  std::vector<bool> is_marked_successful_;
   std::vector<std::map<std::string, std::string>> devices_;
 
+  std::unique_ptr<DynamicPartitionControlInterface> dynamic_partition_control_;
+
   DISALLOW_COPY_AND_ASSIGN(FakeBootControl);
 };
 
diff --git a/common/fake_hardware.h b/common/fake_hardware.h
index 6604534..2a8e81d 100644
--- a/common/fake_hardware.h
+++ b/common/fake_hardware.h
@@ -132,6 +132,8 @@
 
   int64_t GetBuildTimestamp() const override { return build_timestamp_; }
 
+  bool AllowDowngrade() const override { return false; }
+
   bool GetFirstActiveOmahaPingSent() const override {
     return first_active_omaha_ping_sent_;
   }
@@ -197,6 +199,8 @@
     build_timestamp_ = build_timestamp;
   }
 
+  void SetWarmReset(bool warm_reset) { warm_reset_ = warm_reset; }
+
   // Getters to verify state.
   int GetMaxKernelKeyRollforward() const { return kernel_max_rollforward_; }
 
@@ -225,6 +229,7 @@
   bool save_rollback_data_{false};
   int64_t build_timestamp_{0};
   bool first_active_omaha_ping_sent_{false};
+  bool warm_reset_{false};
 
   DISALLOW_COPY_AND_ASSIGN(FakeHardware);
 };
diff --git a/common/file_fetcher.cc b/common/file_fetcher.cc
index 3836e54..7134fd6 100644
--- a/common/file_fetcher.cc
+++ b/common/file_fetcher.cc
@@ -43,8 +43,9 @@
 // static
 bool FileFetcher::SupportedUrl(const string& url) {
   // Note that we require the file path to start with a "/".
-  return base::StartsWith(
-      url, "file:///", base::CompareCase::INSENSITIVE_ASCII);
+  return (
+      base::StartsWith(url, "file:///", base::CompareCase::INSENSITIVE_ASCII) ||
+      base::StartsWith(url, "fd://", base::CompareCase::INSENSITIVE_ASCII));
 }
 
 FileFetcher::~FileFetcher() {
@@ -67,12 +68,20 @@
     return;
   }
 
-  string file_path = url.substr(strlen("file://"));
-  stream_ =
-      brillo::FileStream::Open(base::FilePath(file_path),
-                               brillo::Stream::AccessMode::READ,
-                               brillo::FileStream::Disposition::OPEN_EXISTING,
-                               nullptr);
+  string file_path;
+
+  if (base::StartsWith(url, "fd://", base::CompareCase::INSENSITIVE_ASCII)) {
+    int fd = std::stoi(url.substr(strlen("fd://")));
+    file_path = url;
+    stream_ = brillo::FileStream::FromFileDescriptor(fd, false, nullptr);
+  } else {
+    file_path = url.substr(strlen("file://"));
+    stream_ =
+        brillo::FileStream::Open(base::FilePath(file_path),
+                                 brillo::Stream::AccessMode::READ,
+                                 brillo::FileStream::Disposition::OPEN_EXISTING,
+                                 nullptr);
+  }
 
   if (!stream_) {
     LOG(ERROR) << "Couldn't open " << file_path;
@@ -183,5 +192,4 @@
   transfer_in_progress_ = false;
   transfer_paused_ = false;
 }
-
 }  // namespace chromeos_update_engine
diff --git a/common/hardware_interface.h b/common/hardware_interface.h
index da9f10e..4f0305f 100644
--- a/common/hardware_interface.h
+++ b/common/hardware_interface.h
@@ -126,6 +126,10 @@
   // Returns the timestamp of the current OS build.
   virtual int64_t GetBuildTimestamp() const = 0;
 
+  // Returns true if the current OS build allows installing the payload with an
+  // older timestamp.
+  virtual bool AllowDowngrade() const = 0;
+
   // Returns whether the first active ping was sent to Omaha at some point, and
   // that the value is persisted across recovery (and powerwash) once set with
   // |SetFirstActiveOmahaPingSent()|.
@@ -134,6 +138,10 @@
   // Persist the fact that first active ping was sent to omaha and returns false
   // if failed to persist it.
   virtual bool SetFirstActiveOmahaPingSent() = 0;
+
+  // If |warm_reset| is true, sets the warm reset to indicate a warm reset is
+  // needed on the next reboot. Otherwise, clears the flag.
+  virtual void SetWarmReset(bool warm_reset) = 0;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/common/http_fetcher_unittest.cc b/common/http_fetcher_unittest.cc
index 3ecb996..589579e 100644
--- a/common/http_fetcher_unittest.cc
+++ b/common/http_fetcher_unittest.cc
@@ -37,7 +37,7 @@
 #include <brillo/message_loops/base_message_loop.h>
 #include <brillo/message_loops/message_loop.h>
 #include <brillo/message_loops/message_loop_utils.h>
-#include <brillo/process/process.h>
+#include <brillo/process.h>
 #include <brillo/streams/file_stream.h>
 #include <brillo/streams/stream.h>
 #include <gtest/gtest.h>
diff --git a/common/platform_constants.h b/common/platform_constants.h
index 6eaa940..243af69 100644
--- a/common/platform_constants.h
+++ b/common/platform_constants.h
@@ -38,6 +38,10 @@
 // whole payload.
 extern const char kUpdatePayloadPublicKeyPath[];
 
+// Path to the location of the zip archive file that contains PEM encoded X509
+// certificates. e.g. 'system/etc/security/otacerts.zip'.
+extern const char kUpdateCertificatesPath[];
+
 // Path to the directory containing all the SSL certificates accepted by
 // update_engine when sending requests to Omaha and the download server (if
 // HTTPS is used for that as well).
diff --git a/common/platform_constants_android.cc b/common/platform_constants_android.cc
index 9d8d30e..f468c3b 100644
--- a/common/platform_constants_android.cc
+++ b/common/platform_constants_android.cc
@@ -25,8 +25,8 @@
     "https://clients2.google.com/service/update2/brillo";
 const char kOmahaUpdaterID[] = "Brillo";
 const char kOmahaPlatformName[] = "Brillo";
-const char kUpdatePayloadPublicKeyPath[] =
-    "/etc/update_engine/update-payload-key.pub.pem";
+const char kUpdatePayloadPublicKeyPath[] = "";
+const char kUpdateCertificatesPath[] = "/system/etc/security/otacerts.zip";
 const char kCACertificatesPath[] = "/system/etc/security/cacerts_google";
 // No deadline file API support on Android.
 const char kOmahaResponseDeadlineFile[] = "";
diff --git a/common/platform_constants_chromeos.cc b/common/platform_constants_chromeos.cc
index f1ac490..fe94a45 100644
--- a/common/platform_constants_chromeos.cc
+++ b/common/platform_constants_chromeos.cc
@@ -27,6 +27,7 @@
 const char kOmahaPlatformName[] = "Chrome OS";
 const char kUpdatePayloadPublicKeyPath[] =
     "/usr/share/update_engine/update-payload-key.pub.pem";
+const char kUpdateCertificatesPath[] = "";
 const char kCACertificatesPath[] = "/usr/share/chromeos-ca-certificates";
 const char kOmahaResponseDeadlineFile[] = "/tmp/update-check-response-deadline";
 // This directory is wiped during powerwash.
diff --git a/common/subprocess.cc b/common/subprocess.cc
index ff37472..298a65c 100644
--- a/common/subprocess.cc
+++ b/common/subprocess.cc
@@ -32,11 +32,12 @@
 #include <base/stl_util.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
-#include <brillo/process/process.h>
+#include <brillo/process.h>
 #include <brillo/secure_blob.h>
 
 #include "update_engine/common/utils.h"
 
+using brillo::MessageLoop;
 using std::string;
 using std::unique_ptr;
 using std::vector;
@@ -128,7 +129,12 @@
     if (!ok || eof) {
       // There was either an error or an EOF condition, so we are done watching
       // the file descriptor.
+#ifdef __ANDROID__
+      MessageLoop::current()->CancelTask(record->stdout_task_id);
+      record->stdout_task_id = MessageLoop::kTaskIdNull;
+#else
       record->stdout_controller.reset();
+#endif  // __ANDROID__
       return;
     }
   } while (bytes_read);
@@ -143,7 +149,12 @@
   // Make sure we read any remaining process output and then close the pipe.
   OnStdoutReady(record);
 
+#ifdef __ANDROID__
+  MessageLoop::current()->CancelTask(record->stdout_task_id);
+  record->stdout_task_id = MessageLoop::kTaskIdNull;
+#else
   record->stdout_controller.reset();
+#endif  // __ANDROID__
 
   // Don't print any log if the subprocess exited with exit code 0.
   if (info.si_code != CLD_EXITED) {
@@ -198,9 +209,18 @@
                << record->stdout_fd << ".";
   }
 
+#ifdef __ANDROID__
+  record->stdout_task_id = MessageLoop::current()->WatchFileDescriptor(
+      FROM_HERE,
+      record->stdout_fd,
+      MessageLoop::WatchMode::kWatchRead,
+      true,
+      base::Bind(&Subprocess::OnStdoutReady, record.get()));
+#else
   record->stdout_controller = base::FileDescriptorWatcher::WatchReadable(
       record->stdout_fd,
       base::BindRepeating(&Subprocess::OnStdoutReady, record.get()));
+#endif  // __ANDROID__
 
   subprocess_records_[pid] = std::move(record);
   return pid;
diff --git a/common/subprocess.h b/common/subprocess.h
index e1a7ce3..f1b9f1f 100644
--- a/common/subprocess.h
+++ b/common/subprocess.h
@@ -30,8 +30,8 @@
 #include <base/macros.h>
 #include <brillo/asynchronous_signal_handler_interface.h>
 #include <brillo/message_loops/message_loop.h>
-#include <brillo/process/process.h>
-#include <brillo/process/process_reaper.h>
+#include <brillo/process.h>
+#include <brillo/process_reaper.h>
 #include <gtest/gtest_prod.h>  // for FRIEND_TEST
 
 // The Subprocess class is a singleton. It's used to spawn off a subprocess
@@ -123,7 +123,12 @@
 
     // These are used to monitor the stdout of the running process, including
     // the stderr if it was redirected.
+#ifdef __ANDROID__
+    brillo::MessageLoop::TaskId stdout_task_id{
+        brillo::MessageLoop::kTaskIdNull};
+#else
     std::unique_ptr<base::FileDescriptorWatcher::Controller> stdout_controller;
+#endif  // __ANDROID__
     int stdout_fd{-1};
     std::string stdout;
   };
diff --git a/common/subprocess_unittest.cc b/common/subprocess_unittest.cc
index 19b24f4..bc52b83 100644
--- a/common/subprocess_unittest.cc
+++ b/common/subprocess_unittest.cc
@@ -74,7 +74,10 @@
   brillo::BaseMessageLoop loop_{&base_loop_};
   brillo::AsynchronousSignalHandler async_signal_handler_;
   Subprocess subprocess_;
+#ifndef __ANDROID__
   unique_ptr<base::FileDescriptorWatcher::Controller> watcher_;
+#endif  // __ANDROID__
+
 };
 
 namespace {
@@ -258,6 +261,23 @@
   int fifo_fd = HANDLE_EINTR(open(fifo_path.c_str(), O_RDONLY));
   EXPECT_GE(fifo_fd, 0);
 
+#ifdef __ANDROID__
+  loop_.WatchFileDescriptor(FROM_HERE,
+                            fifo_fd,
+                            MessageLoop::WatchMode::kWatchRead,
+                            false,
+                            base::Bind(
+                                [](int fifo_fd, uint32_t tag) {
+                                  char c;
+                                  EXPECT_EQ(1,
+                                            HANDLE_EINTR(read(fifo_fd, &c, 1)));
+                                  EXPECT_EQ('X', c);
+                                  LOG(INFO) << "Killing tag " << tag;
+                                  Subprocess::Get().KillExec(tag);
+                                },
+                                fifo_fd,
+                                tag));
+#else
   watcher_ = base::FileDescriptorWatcher::WatchReadable(
       fifo_fd,
       base::Bind(
@@ -275,6 +295,7 @@
           base::Unretained(&watcher_),
           fifo_fd,
           tag));
+#endif  // __ANDROID__
 
   // This test would leak a callback that runs when the child process exits
   // unless we wait for it to run.
diff --git a/common/test_utils.cc b/common/test_utils.cc
index 50b0962..bd69d03 100644
--- a/common/test_utils.cc
+++ b/common/test_utils.cc
@@ -37,6 +37,10 @@
 #include <base/files/file_util.h>
 #include <base/logging.h>
 
+#ifdef __ANDROID__
+#include <libdm/loop_control.h>
+#endif
+
 #include "update_engine/common/error_code_utils.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/file_writer.h"
@@ -44,16 +48,7 @@
 using std::set;
 using std::string;
 using std::vector;
-
-namespace {
-
-#ifdef __ANDROID__
-#define kLoopDevicePrefix "/dev/block/loop"
-#else
-#define kLoopDevicePrefix "/dev/loop"
-#endif  // __ANDROID__
-
-}  // namespace
+using namespace std::chrono_literals;
 
 namespace chromeos_update_engine {
 
@@ -112,17 +107,43 @@
   return utils::WriteFile(path.c_str(), data.data(), data.size());
 }
 
-bool BindToUnusedLoopDevice(const string& filename,
-                            bool writable,
-                            string* out_lo_dev_name) {
-  CHECK(out_lo_dev_name);
+bool SetLoopDeviceStatus(int loop_device_fd,
+                         const std::string& filename,
+                         int loop_number,
+                         bool writable) {
+  struct loop_info64 device_info {};
+  device_info.lo_offset = 0;
+  device_info.lo_sizelimit = 0;  // 0 means whole file.
+  device_info.lo_flags = (writable ? 0 : LO_FLAGS_READ_ONLY);
+  device_info.lo_number = loop_number;
+  strncpy(reinterpret_cast<char*>(device_info.lo_file_name),
+          base::FilePath(filename).BaseName().value().c_str(),
+          LO_NAME_SIZE - 1);
+  device_info.lo_file_name[LO_NAME_SIZE - 1] = '\0';
+  TEST_AND_RETURN_FALSE_ERRNO(
+      ioctl(loop_device_fd, LOOP_SET_STATUS64, &device_info) == 0);
+  if (writable) {
+    // Make sure loop device isn't read only.
+    int ro = 0;
+    if (ioctl(loop_device_fd, BLKROSET, &ro) != 0) {
+      PLOG(WARNING) << "Failed to mark loop device writable.";
+    }
+  }
+
+  return true;
+}
+
+bool BindToUnusedLoopDeviceLegacy(int data_fd,
+                                  const string& filename,
+                                  bool writable,
+                                  string* out_lo_dev_name) {
   // Get the next available loop-device.
   int control_fd =
       HANDLE_EINTR(open("/dev/loop-control", O_RDWR | O_LARGEFILE));
   TEST_AND_RETURN_FALSE_ERRNO(control_fd >= 0);
   int loop_number = ioctl(control_fd, LOOP_CTL_GET_FREE);
   IGNORE_EINTR(close(control_fd));
-  *out_lo_dev_name = kLoopDevicePrefix + std::to_string(loop_number);
+  *out_lo_dev_name = "/dev/loop" + std::to_string(loop_number);
 
   // Double check that the loop exists and is free.
   int loop_device_fd =
@@ -146,32 +167,35 @@
     return false;
   }
 
-  // Open our data file and assign it to the loop device.
+  // Assign the data fd to the loop device.
+  TEST_AND_RETURN_FALSE_ERRNO(ioctl(loop_device_fd, LOOP_SET_FD, data_fd) == 0);
+  return SetLoopDeviceStatus(loop_device_fd, filename, loop_number, writable);
+}
+
+bool BindToUnusedLoopDevice(const string& filename,
+                            bool writable,
+                            string* out_lo_dev_name) {
+  CHECK(out_lo_dev_name);
   int data_fd = open(filename.c_str(),
                      (writable ? O_RDWR : O_RDONLY) | O_LARGEFILE | O_CLOEXEC);
   TEST_AND_RETURN_FALSE_ERRNO(data_fd >= 0);
   ScopedFdCloser data_fd_closer(&data_fd);
-  TEST_AND_RETURN_FALSE_ERRNO(ioctl(loop_device_fd, LOOP_SET_FD, data_fd) == 0);
 
-  memset(&device_info, 0, sizeof(device_info));
-  device_info.lo_offset = 0;
-  device_info.lo_sizelimit = 0;  // 0 means whole file.
-  device_info.lo_flags = (writable ? 0 : LO_FLAGS_READ_ONLY);
-  device_info.lo_number = loop_number;
-  strncpy(reinterpret_cast<char*>(device_info.lo_file_name),
-          base::FilePath(filename).BaseName().value().c_str(),
-          LO_NAME_SIZE - 1);
-  device_info.lo_file_name[LO_NAME_SIZE - 1] = '\0';
-  TEST_AND_RETURN_FALSE_ERRNO(
-      ioctl(loop_device_fd, LOOP_SET_STATUS64, &device_info) == 0);
-  if (writable) {
-    // Make sure loop device isn't read only.
-    int ro = 0;
-    if (ioctl(loop_device_fd, BLKROSET, &ro) != 0) {
-      PLOG(WARNING) << "Failed to mark loop device writable.";
-    }
-  }
-  return true;
+#ifdef __ANDROID__
+  // Use libdm to bind a free loop device. The library internally handles the
+  // race condition.
+  android::dm::LoopControl loop_control;
+  TEST_AND_RETURN_FALSE(loop_control.Attach(data_fd, 5s, out_lo_dev_name));
+  int loop_device_fd = open(out_lo_dev_name->c_str(), O_RDWR | O_CLOEXEC);
+  ScopedFdCloser loop_fd_closer(&loop_device_fd);
+  int loop_number;
+  TEST_AND_RETURN_FALSE(
+      sscanf(out_lo_dev_name->c_str(), "/dev/block/loop%d", &loop_number) == 1);
+  return SetLoopDeviceStatus(loop_device_fd, filename, loop_number, writable);
+#else
+  return BindToUnusedLoopDeviceLegacy(
+      data_fd, filename, writable, out_lo_dev_name);
+#endif
 }
 
 bool UnbindLoopDevice(const string& lo_dev_name) {
diff --git a/common/utils.cc b/common/utils.cc
index 644493d..3e3d830 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -30,6 +30,7 @@
 #include <sys/resource.h>
 #include <sys/stat.h>
 #include <sys/types.h>
+#include <time.h>
 #include <unistd.h>
 
 #include <algorithm>
@@ -959,6 +960,24 @@
   }
 }
 
+string GetFilePath(int fd) {
+  base::FilePath proc("/proc/self/fd/" + std::to_string(fd));
+  base::FilePath file_name;
+
+  if (!base::ReadSymbolicLink(proc, &file_name)) {
+    return "not found";
+  }
+  return file_name.value();
+}
+
+string GetTimeAsString(time_t utime) {
+  struct tm tm;
+  CHECK_EQ(localtime_r(&utime, &tm), &tm);
+  char str[16];
+  CHECK_EQ(strftime(str, sizeof(str), "%Y%m%d-%H%M%S", &tm), 15u);
+  return str;
+}
+
 string GetExclusionName(const string& str_to_convert) {
   return base::NumberToString(base::StringPieceHash()(str_to_convert));
 }
diff --git a/common/utils.h b/common/utils.h
index ee2dce0..23ac03d 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -18,6 +18,7 @@
 #define UPDATE_ENGINE_COMMON_UTILS_H_
 
 #include <errno.h>
+#include <time.h>
 #include <unistd.h>
 
 #include <algorithm>
@@ -291,6 +292,9 @@
 // reboot. Returns whether it succeeded getting the boot_id.
 bool GetBootId(std::string* boot_id);
 
+// This function gets the file path of the file pointed to by FileDiscriptor.
+std::string GetFilePath(int fd);
+
 // Divide |x| by |y| and round up to the nearest integer.
 constexpr uint64_t DivRoundUp(uint64_t x, uint64_t y) {
   return (x + y - 1) / y;
@@ -313,6 +317,8 @@
                              uint16_t* high_version,
                              uint16_t* low_version);
 
+// Return a string representation of |utime| for log file names.
+std::string GetTimeAsString(time_t utime);
 // Returns the string format of the hashed |str_to_convert| that can be used
 // with |Excluder| as the exclusion name.
 std::string GetExclusionName(const std::string& str_to_convert);
diff --git a/common/utils_unittest.cc b/common/utils_unittest.cc
index f9eb596..ebcc548 100644
--- a/common/utils_unittest.cc
+++ b/common/utils_unittest.cc
@@ -472,4 +472,13 @@
   ExpectInvalidParseRollbackKeyVersion("1.99999");
 }
 
+TEST(UtilsTest, GetFilePathTest) {
+  test_utils::ScopedTempFile file;
+  int fd = HANDLE_EINTR(open(file.path().c_str(), O_RDONLY));
+  EXPECT_GE(fd, 0);
+  EXPECT_EQ(file.path(), utils::GetFilePath(fd));
+  EXPECT_EQ("not found", utils::GetFilePath(-1));
+  IGNORE_EINTR(close(fd));
+}
+
 }  // namespace chromeos_update_engine
diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc
index bd34ea9..6817c21 100644
--- a/dynamic_partition_control_android.cc
+++ b/dynamic_partition_control_android.cc
@@ -16,60 +16,162 @@
 
 #include "update_engine/dynamic_partition_control_android.h"
 
+#include <chrono>  // NOLINT(build/c++11) - using libsnapshot / liblp API
+#include <map>
 #include <memory>
 #include <set>
 #include <string>
+#include <string_view>
+#include <utility>
+#include <vector>
 
 #include <android-base/properties.h>
 #include <android-base/strings.h>
 #include <base/files/file_util.h>
 #include <base/logging.h>
+#include <base/strings/string_util.h>
 #include <bootloader_message/bootloader_message.h>
+#include <fs_mgr.h>
 #include <fs_mgr_dm_linear.h>
+#include <fs_mgr_overlayfs.h>
+#include <libavb/libavb.h>
+#include <libdm/dm.h>
+#include <libsnapshot/snapshot.h>
+#include <libsnapshot/snapshot_stub.h>
 
+#include "update_engine/cleanup_previous_update_action.h"
 #include "update_engine/common/boot_control_interface.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/dynamic_partition_utils.h"
+#include "update_engine/payload_consumer/delta_performer.h"
 
 using android::base::GetBoolProperty;
+using android::base::GetProperty;
 using android::base::Join;
 using android::dm::DeviceMapper;
 using android::dm::DmDeviceState;
 using android::fs_mgr::CreateLogicalPartition;
+using android::fs_mgr::CreateLogicalPartitionParams;
 using android::fs_mgr::DestroyLogicalPartition;
+using android::fs_mgr::Fstab;
 using android::fs_mgr::MetadataBuilder;
+using android::fs_mgr::Partition;
 using android::fs_mgr::PartitionOpener;
+using android::fs_mgr::SlotSuffixForSlotNumber;
+using android::snapshot::OptimizeSourceCopyOperation;
+using android::snapshot::Return;
+using android::snapshot::SnapshotManager;
+using android::snapshot::SnapshotManagerStub;
+using android::snapshot::UpdateState;
 
 namespace chromeos_update_engine {
 
 constexpr char kUseDynamicPartitions[] = "ro.boot.dynamic_partitions";
 constexpr char kRetrfoitDynamicPartitions[] =
     "ro.boot.dynamic_partitions_retrofit";
-constexpr uint64_t kMapTimeoutMillis = 1000;
+constexpr char kVirtualAbEnabled[] = "ro.virtual_ab.enabled";
+constexpr char kVirtualAbRetrofit[] = "ro.virtual_ab.retrofit";
+constexpr char kPostinstallFstabPrefix[] = "ro.postinstall.fstab.prefix";
+// Map timeout for dynamic partitions.
+constexpr std::chrono::milliseconds kMapTimeout{1000};
+// Map timeout for dynamic partitions with snapshots. Since several devices
+// needs to be mapped, this timeout is longer than |kMapTimeout|.
+constexpr std::chrono::milliseconds kMapSnapshotTimeout{5000};
+
+#ifdef __ANDROID_RECOVERY__
+constexpr bool kIsRecovery = true;
+#else
+constexpr bool kIsRecovery = false;
+#endif
 
 DynamicPartitionControlAndroid::~DynamicPartitionControlAndroid() {
-  CleanupInternal(false /* wait */);
+  Cleanup();
 }
 
-bool DynamicPartitionControlAndroid::IsDynamicPartitionsEnabled() {
-  return GetBoolProperty(kUseDynamicPartitions, false);
+static FeatureFlag GetFeatureFlag(const char* enable_prop,
+                                  const char* retrofit_prop) {
+  bool retrofit = GetBoolProperty(retrofit_prop, false);
+  bool enabled = GetBoolProperty(enable_prop, false);
+  if (retrofit && !enabled) {
+    LOG(ERROR) << retrofit_prop << " is true but " << enable_prop
+               << " is not. These sysprops are inconsistent. Assume that "
+               << enable_prop << " is true from now on.";
+  }
+  if (retrofit) {
+    return FeatureFlag(FeatureFlag::Value::RETROFIT);
+  }
+  if (enabled) {
+    return FeatureFlag(FeatureFlag::Value::LAUNCH);
+  }
+  return FeatureFlag(FeatureFlag::Value::NONE);
 }
 
-bool DynamicPartitionControlAndroid::IsDynamicPartitionsRetrofit() {
-  return GetBoolProperty(kRetrfoitDynamicPartitions, false);
+DynamicPartitionControlAndroid::DynamicPartitionControlAndroid()
+    : dynamic_partitions_(
+          GetFeatureFlag(kUseDynamicPartitions, kRetrfoitDynamicPartitions)),
+      virtual_ab_(GetFeatureFlag(kVirtualAbEnabled, kVirtualAbRetrofit)) {
+  if (GetVirtualAbFeatureFlag().IsEnabled()) {
+    snapshot_ = SnapshotManager::New();
+  } else {
+    snapshot_ = SnapshotManagerStub::New();
+  }
+  CHECK(snapshot_ != nullptr) << "Cannot initialize SnapshotManager.";
 }
 
-bool DynamicPartitionControlAndroid::MapPartitionOnDeviceMapper(
+FeatureFlag DynamicPartitionControlAndroid::GetDynamicPartitionsFeatureFlag() {
+  return dynamic_partitions_;
+}
+
+FeatureFlag DynamicPartitionControlAndroid::GetVirtualAbFeatureFlag() {
+  return virtual_ab_;
+}
+
+bool DynamicPartitionControlAndroid::OptimizeOperation(
+    const std::string& partition_name,
+    const InstallOperation& operation,
+    InstallOperation* optimized) {
+  switch (operation.type()) {
+    case InstallOperation::SOURCE_COPY:
+      return target_supports_snapshot_ &&
+             GetVirtualAbFeatureFlag().IsEnabled() &&
+             mapped_devices_.count(partition_name +
+                                   SlotSuffixForSlotNumber(target_slot_)) > 0 &&
+             OptimizeSourceCopyOperation(operation, optimized);
+      break;
+    default:
+      break;
+  }
+  return false;
+}
+
+bool DynamicPartitionControlAndroid::MapPartitionInternal(
     const std::string& super_device,
     const std::string& target_partition_name,
     uint32_t slot,
     bool force_writable,
     std::string* path) {
-  if (!CreateLogicalPartition(super_device.c_str(),
-                              slot,
-                              target_partition_name,
-                              force_writable,
-                              std::chrono::milliseconds(kMapTimeoutMillis),
-                              path)) {
+  CreateLogicalPartitionParams params = {
+      .block_device = super_device,
+      .metadata_slot = slot,
+      .partition_name = target_partition_name,
+      .force_writable = force_writable,
+  };
+  bool success = false;
+  if (GetVirtualAbFeatureFlag().IsEnabled() && target_supports_snapshot_ &&
+      force_writable && ExpectMetadataMounted()) {
+    // Only target partitions are mapped with force_writable. On Virtual
+    // A/B devices, target partitions may overlap with source partitions, so
+    // they must be mapped with snapshot.
+    // One exception is when /metadata is not mounted. Fallback to
+    // CreateLogicalPartition as snapshots are not created in the first place.
+    params.timeout_ms = kMapSnapshotTimeout;
+    success = snapshot_->MapUpdateSnapshot(params, path);
+  } else {
+    params.timeout_ms = kMapTimeout;
+    success = CreateLogicalPartition(params, path);
+  }
+
+  if (!success) {
     LOG(ERROR) << "Cannot map " << target_partition_name << " in "
                << super_device << " on device mapper.";
     return false;
@@ -81,13 +183,71 @@
   return true;
 }
 
+bool DynamicPartitionControlAndroid::MapPartitionOnDeviceMapper(
+    const std::string& super_device,
+    const std::string& target_partition_name,
+    uint32_t slot,
+    bool force_writable,
+    std::string* path) {
+  DmDeviceState state = GetState(target_partition_name);
+  if (state == DmDeviceState::ACTIVE) {
+    if (mapped_devices_.find(target_partition_name) != mapped_devices_.end()) {
+      if (GetDmDevicePathByName(target_partition_name, path)) {
+        LOG(INFO) << target_partition_name
+                  << " is mapped on device mapper: " << *path;
+        return true;
+      }
+      LOG(ERROR) << target_partition_name << " is mapped but path is unknown.";
+      return false;
+    }
+    // If target_partition_name is not in mapped_devices_ but state is ACTIVE,
+    // the device might be mapped incorrectly before. Attempt to unmap it.
+    // Note that for source partitions, if GetState() == ACTIVE, callers (e.g.
+    // BootControlAndroid) should not call MapPartitionOnDeviceMapper, but
+    // should directly call GetDmDevicePathByName.
+    if (!UnmapPartitionOnDeviceMapper(target_partition_name)) {
+      LOG(ERROR) << target_partition_name
+                 << " is mapped before the update, and it cannot be unmapped.";
+      return false;
+    }
+    state = GetState(target_partition_name);
+    if (state != DmDeviceState::INVALID) {
+      LOG(ERROR) << target_partition_name << " is unmapped but state is "
+                 << static_cast<std::underlying_type_t<DmDeviceState>>(state);
+      return false;
+    }
+  }
+  if (state == DmDeviceState::INVALID) {
+    return MapPartitionInternal(
+        super_device, target_partition_name, slot, force_writable, path);
+  }
+
+  LOG(ERROR) << target_partition_name
+             << " is mapped on device mapper but state is unknown: "
+             << static_cast<std::underlying_type_t<DmDeviceState>>(state);
+  return false;
+}
+
 bool DynamicPartitionControlAndroid::UnmapPartitionOnDeviceMapper(
-    const std::string& target_partition_name, bool wait) {
+    const std::string& target_partition_name) {
   if (DeviceMapper::Instance().GetState(target_partition_name) !=
       DmDeviceState::INVALID) {
-    if (!DestroyLogicalPartition(
-            target_partition_name,
-            std::chrono::milliseconds(wait ? kMapTimeoutMillis : 0))) {
+    // Partitions at target slot on non-Virtual A/B devices are mapped as
+    // dm-linear. Also, on Virtual A/B devices, system_other may be mapped for
+    // preopt apps as dm-linear.
+    // Call DestroyLogicalPartition to handle these cases.
+    bool success = DestroyLogicalPartition(target_partition_name);
+
+    // On a Virtual A/B device, |target_partition_name| may be a leftover from
+    // a paused update. Clean up any underlying devices.
+    if (ExpectMetadataMounted()) {
+      success &= snapshot_->UnmapUpdateSnapshot(target_partition_name);
+    } else {
+      LOG(INFO) << "Skip UnmapUpdateSnapshot(" << target_partition_name
+                << ") because metadata is not mounted";
+    }
+
+    if (!success) {
       LOG(ERROR) << "Cannot unmap " << target_partition_name
                  << " from device mapper.";
       return false;
@@ -99,18 +259,22 @@
   return true;
 }
 
-void DynamicPartitionControlAndroid::CleanupInternal(bool wait) {
+void DynamicPartitionControlAndroid::UnmapAllPartitions() {
+  if (mapped_devices_.empty()) {
+    return;
+  }
   // UnmapPartitionOnDeviceMapper removes objects from mapped_devices_, hence
   // a copy is needed for the loop.
   std::set<std::string> mapped = mapped_devices_;
   LOG(INFO) << "Destroying [" << Join(mapped, ", ") << "] from device mapper";
   for (const auto& partition_name : mapped) {
-    ignore_result(UnmapPartitionOnDeviceMapper(partition_name, wait));
+    ignore_result(UnmapPartitionOnDeviceMapper(partition_name));
   }
 }
 
 void DynamicPartitionControlAndroid::Cleanup() {
-  CleanupInternal(true /* wait */);
+  UnmapAllPartitions();
+  metadata_device_.reset();
 }
 
 bool DynamicPartitionControlAndroid::DeviceExists(const std::string& path) {
@@ -129,18 +293,27 @@
 
 std::unique_ptr<MetadataBuilder>
 DynamicPartitionControlAndroid::LoadMetadataBuilder(
+    const std::string& super_device, uint32_t source_slot) {
+  return LoadMetadataBuilder(
+      super_device, source_slot, BootControlInterface::kInvalidSlot);
+}
+
+std::unique_ptr<MetadataBuilder>
+DynamicPartitionControlAndroid::LoadMetadataBuilder(
     const std::string& super_device,
     uint32_t source_slot,
     uint32_t target_slot) {
   std::unique_ptr<MetadataBuilder> builder;
-
-  if (target_slot != BootControlInterface::kInvalidSlot &&
-      IsDynamicPartitionsRetrofit()) {
-    builder = MetadataBuilder::NewForUpdate(
-        PartitionOpener(), super_device, source_slot, target_slot);
-  } else {
+  if (target_slot == BootControlInterface::kInvalidSlot) {
     builder =
         MetadataBuilder::New(PartitionOpener(), super_device, source_slot);
+  } else {
+    bool always_keep_source_slot = !target_supports_snapshot_;
+    builder = MetadataBuilder::NewForUpdate(PartitionOpener(),
+                                            super_device,
+                                            source_slot,
+                                            target_slot,
+                                            always_keep_source_slot);
   }
 
   if (builder == nullptr) {
@@ -167,7 +340,7 @@
     return false;
   }
 
-  if (IsDynamicPartitionsRetrofit()) {
+  if (GetDynamicPartitionsFeatureFlag().IsRetrofit()) {
     if (!FlashPartitionTable(super_device, *metadata)) {
       LOG(ERROR) << "Cannot write metadata to " << super_device;
       return false;
@@ -222,4 +395,749 @@
   *out = base::FilePath(misc_device).DirName().value();
   return true;
 }
+
+bool DynamicPartitionControlAndroid::PreparePartitionsForUpdate(
+    uint32_t source_slot,
+    uint32_t target_slot,
+    const DeltaArchiveManifest& manifest,
+    bool update,
+    uint64_t* required_size) {
+  source_slot_ = source_slot;
+  target_slot_ = target_slot;
+  if (required_size != nullptr) {
+    *required_size = 0;
+  }
+
+  if (fs_mgr_overlayfs_is_setup()) {
+    // Non DAP devices can use overlayfs as well.
+    LOG(WARNING)
+        << "overlayfs overrides are active and can interfere with our "
+           "resources.\n"
+        << "run adb enable-verity to deactivate if required and try again.";
+  }
+
+  // If metadata is erased but not formatted, it is possible to not mount
+  // it in recovery. It is acceptable to skip mounting and choose fallback path
+  // (PrepareDynamicPartitionsForUpdate) when sideloading full OTAs.
+  TEST_AND_RETURN_FALSE(EnsureMetadataMounted() || IsRecovery());
+
+  if (update) {
+    TEST_AND_RETURN_FALSE(EraseSystemOtherAvbFooter(source_slot, target_slot));
+  }
+
+  if (!GetDynamicPartitionsFeatureFlag().IsEnabled()) {
+    return true;
+  }
+
+  if (target_slot == source_slot) {
+    LOG(ERROR) << "Cannot call PreparePartitionsForUpdate on current slot.";
+    return false;
+  }
+
+  // Although the current build supports dynamic partitions, the given payload
+  // doesn't use it for target partitions. This could happen when applying a
+  // retrofit update. Skip updating the partition metadata for the target slot.
+  is_target_dynamic_ = !manifest.dynamic_partition_metadata().groups().empty();
+  if (!is_target_dynamic_) {
+    return true;
+  }
+
+  target_supports_snapshot_ =
+      manifest.dynamic_partition_metadata().snapshot_enabled();
+
+  if (!update)
+    return true;
+
+  bool delete_source = false;
+
+  if (GetVirtualAbFeatureFlag().IsEnabled()) {
+    // On Virtual A/B device, either CancelUpdate() or BeginUpdate() must be
+    // called before calling UnmapUpdateSnapshot.
+    // - If target_supports_snapshot_, PrepareSnapshotPartitionsForUpdate()
+    //   calls BeginUpdate() which resets update state
+    // - If !target_supports_snapshot_ or PrepareSnapshotPartitionsForUpdate
+    //   failed in recovery, explicitly CancelUpdate().
+    if (target_supports_snapshot_) {
+      if (PrepareSnapshotPartitionsForUpdate(
+              source_slot, target_slot, manifest, required_size)) {
+        return true;
+      }
+
+      // Virtual A/B device doing Virtual A/B update in Android mode must use
+      // snapshots.
+      if (!IsRecovery()) {
+        LOG(ERROR) << "PrepareSnapshotPartitionsForUpdate failed in Android "
+                   << "mode";
+        return false;
+      }
+
+      delete_source = true;
+      LOG(INFO) << "PrepareSnapshotPartitionsForUpdate failed in recovery. "
+                << "Attempt to overwrite existing partitions if possible";
+    } else {
+      // Downgrading to an non-Virtual A/B build or is secondary OTA.
+      LOG(INFO) << "Using regular A/B on Virtual A/B because package disabled "
+                << "snapshots.";
+    }
+
+    // In recovery, if /metadata is not mounted, it is likely that metadata
+    // partition is erased and not formatted yet. After sideloading, when
+    // rebooting into the new version, init will erase metadata partition,
+    // hence the failure of CancelUpdate() can be ignored here.
+    // However, if metadata is mounted and CancelUpdate fails, sideloading
+    // should not proceed because during next boot, snapshots will overlay on
+    // the devices incorrectly.
+    if (ExpectMetadataMounted()) {
+      TEST_AND_RETURN_FALSE(snapshot_->CancelUpdate());
+    } else {
+      LOG(INFO) << "Skip canceling previous update because metadata is not "
+                << "mounted";
+    }
+  }
+
+  TEST_AND_RETURN_FALSE(PrepareDynamicPartitionsForUpdate(
+      source_slot, target_slot, manifest, delete_source));
+
+  if (required_size != nullptr) {
+    *required_size = 0;
+  }
+  return true;
+}
+
+namespace {
+// Try our best to erase AVB footer.
+class AvbFooterEraser {
+ public:
+  explicit AvbFooterEraser(const std::string& path) : path_(path) {}
+  bool Erase() {
+    // Try to mark the block device read-only. Ignore any
+    // failure since this won't work when passing regular files.
+    ignore_result(utils::SetBlockDeviceReadOnly(path_, false /* readonly */));
+
+    fd_.reset(new EintrSafeFileDescriptor());
+    int flags = O_WRONLY | O_TRUNC | O_CLOEXEC | O_SYNC;
+    TEST_AND_RETURN_FALSE(fd_->Open(path_.c_str(), flags));
+
+    // Need to write end-AVB_FOOTER_SIZE to end.
+    static_assert(AVB_FOOTER_SIZE > 0);
+    off64_t offset = fd_->Seek(-AVB_FOOTER_SIZE, SEEK_END);
+    TEST_AND_RETURN_FALSE_ERRNO(offset >= 0);
+    uint64_t write_size = AVB_FOOTER_SIZE;
+    LOG(INFO) << "Zeroing " << path_ << " @ [" << offset << ", "
+              << (offset + write_size) << "] (" << write_size << " bytes)";
+    brillo::Blob zeros(write_size);
+    TEST_AND_RETURN_FALSE(utils::WriteAll(fd_, zeros.data(), zeros.size()));
+    return true;
+  }
+  ~AvbFooterEraser() {
+    TEST_AND_RETURN(fd_ != nullptr && fd_->IsOpen());
+    if (!fd_->Close()) {
+      LOG(WARNING) << "Failed to close fd for " << path_;
+    }
+  }
+
+ private:
+  std::string path_;
+  FileDescriptorPtr fd_;
+};
+
+}  // namespace
+
+std::optional<bool>
+DynamicPartitionControlAndroid::IsAvbEnabledOnSystemOther() {
+  auto prefix = GetProperty(kPostinstallFstabPrefix, "");
+  if (prefix.empty()) {
+    LOG(WARNING) << "Cannot get " << kPostinstallFstabPrefix;
+    return std::nullopt;
+  }
+  auto path = base::FilePath(prefix).Append("etc/fstab.postinstall").value();
+  return IsAvbEnabledInFstab(path);
+}
+
+std::optional<bool> DynamicPartitionControlAndroid::IsAvbEnabledInFstab(
+    const std::string& path) {
+  Fstab fstab;
+  if (!ReadFstabFromFile(path, &fstab)) {
+    PLOG(WARNING) << "Cannot read fstab from " << path;
+    if (errno == ENOENT) {
+      return false;
+    }
+    return std::nullopt;
+  }
+  for (const auto& entry : fstab) {
+    if (!entry.avb_keys.empty()) {
+      return true;
+    }
+  }
+  return false;
+}
+
+bool DynamicPartitionControlAndroid::GetSystemOtherPath(
+    uint32_t source_slot,
+    uint32_t target_slot,
+    const std::string& partition_name_suffix,
+    std::string* path,
+    bool* should_unmap) {
+  path->clear();
+  *should_unmap = false;
+
+  // Check that AVB is enabled on system_other before erasing.
+  auto has_avb = IsAvbEnabledOnSystemOther();
+  TEST_AND_RETURN_FALSE(has_avb.has_value());
+  if (!has_avb.value()) {
+    LOG(INFO) << "AVB is not enabled on system_other. Skip erasing.";
+    return true;
+  }
+
+  if (!IsRecovery()) {
+    // Found unexpected avb_keys for system_other on devices retrofitting
+    // dynamic partitions. Previous crash in update_engine may leave logical
+    // partitions mapped on physical system_other partition. It is difficult to
+    // handle these cases. Just fail.
+    if (GetDynamicPartitionsFeatureFlag().IsRetrofit()) {
+      LOG(ERROR) << "Cannot erase AVB footer on system_other on devices with "
+                 << "retrofit dynamic partitions. They should not have AVB "
+                 << "enabled on system_other.";
+      return false;
+    }
+  }
+
+  std::string device_dir_str;
+  TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str));
+  base::FilePath device_dir(device_dir_str);
+
+  // On devices without dynamic partition, search for static partitions.
+  if (!GetDynamicPartitionsFeatureFlag().IsEnabled()) {
+    *path = device_dir.Append(partition_name_suffix).value();
+    TEST_AND_RETURN_FALSE(DeviceExists(*path));
+    return true;
+  }
+
+  auto source_super_device =
+      device_dir.Append(GetSuperPartitionName(source_slot)).value();
+
+  auto builder = LoadMetadataBuilder(source_super_device, source_slot);
+  if (builder == nullptr) {
+    if (IsRecovery()) {
+      // It might be corrupted for some reason. It should still be able to
+      // sideload.
+      LOG(WARNING) << "Super partition metadata cannot be read from the source "
+                   << "slot, skip erasing.";
+      return true;
+    } else {
+      // Device has booted into Android mode, indicating that the super
+      // partition metadata should be there.
+      LOG(ERROR) << "Super partition metadata cannot be read from the source "
+                 << "slot. This is unexpected on devices with dynamic "
+                 << "partitions enabled.";
+      return false;
+    }
+  }
+  auto p = builder->FindPartition(partition_name_suffix);
+  if (p == nullptr) {
+    // If the source slot is flashed without system_other, it does not exist
+    // in super partition metadata at source slot. It is safe to skip it.
+    LOG(INFO) << "Can't find " << partition_name_suffix
+              << " in metadata source slot, skip erasing.";
+    return true;
+  }
+  // System_other created by flashing tools should be erased.
+  // If partition is created by update_engine (via NewForUpdate), it is a
+  // left-over partition from the previous update and does not contain
+  // system_other, hence there is no need to erase.
+  // Note the reverse is not necessary true. If the flag is not set, we don't
+  // know if the partition is created by update_engine or by flashing tools
+  // because older versions of super partition metadata does not contain this
+  // flag. It is okay to erase the AVB footer anyways.
+  if (p->attributes() & LP_PARTITION_ATTR_UPDATED) {
+    LOG(INFO) << partition_name_suffix
+              << " does not contain system_other, skip erasing.";
+    return true;
+  }
+
+  if (p->size() < AVB_FOOTER_SIZE) {
+    LOG(INFO) << partition_name_suffix << " has length " << p->size()
+              << "( < AVB_FOOTER_SIZE " << AVB_FOOTER_SIZE
+              << "), skip erasing.";
+    return true;
+  }
+
+  // Delete any pre-existing device with name |partition_name_suffix| and
+  // also remove it from |mapped_devices_|.
+  // In recovery, metadata might not be mounted, and
+  // UnmapPartitionOnDeviceMapper might fail. However,
+  // it is unusual that system_other has already been mapped. Hence, just skip.
+  TEST_AND_RETURN_FALSE(UnmapPartitionOnDeviceMapper(partition_name_suffix));
+  // Use CreateLogicalPartition directly to avoid mapping with existing
+  // snapshots.
+  CreateLogicalPartitionParams params = {
+      .block_device = source_super_device,
+      .metadata_slot = source_slot,
+      .partition_name = partition_name_suffix,
+      .force_writable = true,
+      .timeout_ms = kMapTimeout,
+  };
+  TEST_AND_RETURN_FALSE(CreateLogicalPartition(params, path));
+  *should_unmap = true;
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::EraseSystemOtherAvbFooter(
+    uint32_t source_slot, uint32_t target_slot) {
+  LOG(INFO) << "Erasing AVB footer of system_other partition before update.";
+
+  const std::string target_suffix = SlotSuffixForSlotNumber(target_slot);
+  const std::string partition_name_suffix = "system" + target_suffix;
+
+  std::string path;
+  bool should_unmap = false;
+
+  TEST_AND_RETURN_FALSE(GetSystemOtherPath(
+      source_slot, target_slot, partition_name_suffix, &path, &should_unmap));
+
+  if (path.empty()) {
+    return true;
+  }
+
+  bool ret = AvbFooterEraser(path).Erase();
+
+  // Delete |partition_name_suffix| from device mapper and from
+  // |mapped_devices_| again so that it does not interfere with update process.
+  // In recovery, metadata might not be mounted, and
+  // UnmapPartitionOnDeviceMapper might fail. However, DestroyLogicalPartition
+  // should be called. If DestroyLogicalPartition does fail, it is still okay
+  // to skip the error here and let Prepare*() fail later.
+  if (should_unmap) {
+    TEST_AND_RETURN_FALSE(UnmapPartitionOnDeviceMapper(partition_name_suffix));
+  }
+
+  return ret;
+}
+
+bool DynamicPartitionControlAndroid::PrepareDynamicPartitionsForUpdate(
+    uint32_t source_slot,
+    uint32_t target_slot,
+    const DeltaArchiveManifest& manifest,
+    bool delete_source) {
+  const std::string target_suffix = SlotSuffixForSlotNumber(target_slot);
+
+  // Unmap all the target dynamic partitions because they would become
+  // inconsistent with the new metadata.
+  for (const auto& group : manifest.dynamic_partition_metadata().groups()) {
+    for (const auto& partition_name : group.partition_names()) {
+      if (!UnmapPartitionOnDeviceMapper(partition_name + target_suffix)) {
+        return false;
+      }
+    }
+  }
+
+  std::string device_dir_str;
+  if (!GetDeviceDir(&device_dir_str)) {
+    return false;
+  }
+  base::FilePath device_dir(device_dir_str);
+  auto source_device =
+      device_dir.Append(GetSuperPartitionName(source_slot)).value();
+
+  auto builder = LoadMetadataBuilder(source_device, source_slot, target_slot);
+  if (builder == nullptr) {
+    LOG(ERROR) << "No metadata at "
+               << BootControlInterface::SlotName(source_slot);
+    return false;
+  }
+
+  if (delete_source) {
+    TEST_AND_RETURN_FALSE(
+        DeleteSourcePartitions(builder.get(), source_slot, manifest));
+  }
+
+  if (!UpdatePartitionMetadata(builder.get(), target_slot, manifest)) {
+    return false;
+  }
+
+  auto target_device =
+      device_dir.Append(GetSuperPartitionName(target_slot)).value();
+  return StoreMetadata(target_device, builder.get(), target_slot);
+}
+
+bool DynamicPartitionControlAndroid::PrepareSnapshotPartitionsForUpdate(
+    uint32_t source_slot,
+    uint32_t target_slot,
+    const DeltaArchiveManifest& manifest,
+    uint64_t* required_size) {
+  TEST_AND_RETURN_FALSE(ExpectMetadataMounted());
+  if (!snapshot_->BeginUpdate()) {
+    LOG(ERROR) << "Cannot begin new update.";
+    return false;
+  }
+  auto ret = snapshot_->CreateUpdateSnapshots(manifest);
+  if (!ret) {
+    LOG(ERROR) << "Cannot create update snapshots: " << ret.string();
+    if (required_size != nullptr &&
+        ret.error_code() == Return::ErrorCode::NO_SPACE) {
+      *required_size = ret.required_size();
+    }
+    return false;
+  }
+  return true;
+}
+
+std::string DynamicPartitionControlAndroid::GetSuperPartitionName(
+    uint32_t slot) {
+  return fs_mgr_get_super_partition_name(slot);
+}
+
+bool DynamicPartitionControlAndroid::UpdatePartitionMetadata(
+    MetadataBuilder* builder,
+    uint32_t target_slot,
+    const DeltaArchiveManifest& manifest) {
+  // If applying downgrade from Virtual A/B to non-Virtual A/B, the left-over
+  // COW group needs to be deleted to ensure there are enough space to create
+  // target partitions.
+  builder->RemoveGroupAndPartitions(android::snapshot::kCowGroupName);
+
+  const std::string target_suffix = SlotSuffixForSlotNumber(target_slot);
+  DeleteGroupsWithSuffix(builder, target_suffix);
+
+  uint64_t total_size = 0;
+  for (const auto& group : manifest.dynamic_partition_metadata().groups()) {
+    total_size += group.size();
+  }
+
+  std::string expr;
+  uint64_t allocatable_space = builder->AllocatableSpace();
+  if (!GetDynamicPartitionsFeatureFlag().IsRetrofit()) {
+    allocatable_space /= 2;
+    expr = "half of ";
+  }
+  if (total_size > allocatable_space) {
+    LOG(ERROR) << "The maximum size of all groups with suffix " << target_suffix
+               << " (" << total_size << ") has exceeded " << expr
+               << "allocatable space for dynamic partitions "
+               << allocatable_space << ".";
+    return false;
+  }
+
+  // name of partition(e.g. "system") -> size in bytes
+  std::map<std::string, uint64_t> partition_sizes;
+  for (const auto& partition : manifest.partitions()) {
+    partition_sizes.emplace(partition.partition_name(),
+                            partition.new_partition_info().size());
+  }
+
+  for (const auto& group : manifest.dynamic_partition_metadata().groups()) {
+    auto group_name_suffix = group.name() + target_suffix;
+    if (!builder->AddGroup(group_name_suffix, group.size())) {
+      LOG(ERROR) << "Cannot add group " << group_name_suffix << " with size "
+                 << group.size();
+      return false;
+    }
+    LOG(INFO) << "Added group " << group_name_suffix << " with size "
+              << group.size();
+
+    for (const auto& partition_name : group.partition_names()) {
+      auto partition_sizes_it = partition_sizes.find(partition_name);
+      if (partition_sizes_it == partition_sizes.end()) {
+        // TODO(tbao): Support auto-filling partition info for framework-only
+        // OTA.
+        LOG(ERROR) << "dynamic_partition_metadata contains partition "
+                   << partition_name << " but it is not part of the manifest. "
+                   << "This is not supported.";
+        return false;
+      }
+      uint64_t partition_size = partition_sizes_it->second;
+
+      auto partition_name_suffix = partition_name + target_suffix;
+      Partition* p = builder->AddPartition(
+          partition_name_suffix, group_name_suffix, LP_PARTITION_ATTR_READONLY);
+      if (!p) {
+        LOG(ERROR) << "Cannot add partition " << partition_name_suffix
+                   << " to group " << group_name_suffix;
+        return false;
+      }
+      if (!builder->ResizePartition(p, partition_size)) {
+        LOG(ERROR) << "Cannot resize partition " << partition_name_suffix
+                   << " to size " << partition_size << ". Not enough space?";
+        return false;
+      }
+      LOG(INFO) << "Added partition " << partition_name_suffix << " to group "
+                << group_name_suffix << " with size " << partition_size;
+    }
+  }
+
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::FinishUpdate(bool powerwash_required) {
+  if (ExpectMetadataMounted()) {
+    if (snapshot_->GetUpdateState() == UpdateState::Initiated) {
+      LOG(INFO) << "Snapshot writes are done.";
+      return snapshot_->FinishedSnapshotWrites(powerwash_required);
+    }
+  } else {
+    LOG(INFO) << "Skip FinishedSnapshotWrites() because /metadata is not "
+              << "mounted";
+  }
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::GetPartitionDevice(
+    const std::string& partition_name,
+    uint32_t slot,
+    uint32_t current_slot,
+    bool not_in_payload,
+    std::string* device,
+    bool* is_dynamic) {
+  const auto& partition_name_suffix =
+      partition_name + SlotSuffixForSlotNumber(slot);
+  std::string device_dir_str;
+  TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str));
+  base::FilePath device_dir(device_dir_str);
+
+  if (is_dynamic) {
+    *is_dynamic = false;
+  }
+
+  // When looking up target partition devices, treat them as static if the
+  // current payload doesn't encode them as dynamic partitions. This may happen
+  // when applying a retrofit update on top of a dynamic-partitions-enabled
+  // build.
+  if (GetDynamicPartitionsFeatureFlag().IsEnabled() &&
+      (slot == current_slot || is_target_dynamic_)) {
+    switch (GetDynamicPartitionDevice(device_dir,
+                                      partition_name_suffix,
+                                      slot,
+                                      current_slot,
+                                      not_in_payload,
+                                      device)) {
+      case DynamicPartitionDeviceStatus::SUCCESS:
+        if (is_dynamic) {
+          *is_dynamic = true;
+        }
+        return true;
+      case DynamicPartitionDeviceStatus::TRY_STATIC:
+        break;
+      case DynamicPartitionDeviceStatus::ERROR:  // fallthrough
+      default:
+        return false;
+    }
+  }
+  base::FilePath path = device_dir.Append(partition_name_suffix);
+  if (!DeviceExists(path.value())) {
+    LOG(ERROR) << "Device file " << path.value() << " does not exist.";
+    return false;
+  }
+
+  *device = path.value();
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::GetPartitionDevice(
+    const std::string& partition_name,
+    uint32_t slot,
+    uint32_t current_slot,
+    std::string* device) {
+  return GetPartitionDevice(
+      partition_name, slot, current_slot, false, device, nullptr);
+}
+
+bool DynamicPartitionControlAndroid::IsSuperBlockDevice(
+    const base::FilePath& device_dir,
+    uint32_t current_slot,
+    const std::string& partition_name_suffix) {
+  std::string source_device =
+      device_dir.Append(GetSuperPartitionName(current_slot)).value();
+  auto source_metadata = LoadMetadataBuilder(source_device, current_slot);
+  return source_metadata->HasBlockDevice(partition_name_suffix);
+}
+
+DynamicPartitionControlAndroid::DynamicPartitionDeviceStatus
+DynamicPartitionControlAndroid::GetDynamicPartitionDevice(
+    const base::FilePath& device_dir,
+    const std::string& partition_name_suffix,
+    uint32_t slot,
+    uint32_t current_slot,
+    bool not_in_payload,
+    std::string* device) {
+  std::string super_device =
+      device_dir.Append(GetSuperPartitionName(slot)).value();
+
+  auto builder = LoadMetadataBuilder(super_device, slot);
+  if (builder == nullptr) {
+    LOG(ERROR) << "No metadata in slot "
+               << BootControlInterface::SlotName(slot);
+    return DynamicPartitionDeviceStatus::ERROR;
+  }
+  if (builder->FindPartition(partition_name_suffix) == nullptr) {
+    LOG(INFO) << partition_name_suffix
+              << " is not in super partition metadata.";
+
+    if (IsSuperBlockDevice(device_dir, current_slot, partition_name_suffix)) {
+      LOG(ERROR) << "The static partition " << partition_name_suffix
+                 << " is a block device for current metadata."
+                 << "It cannot be used as a logical partition.";
+      return DynamicPartitionDeviceStatus::ERROR;
+    }
+
+    return DynamicPartitionDeviceStatus::TRY_STATIC;
+  }
+
+  if (slot == current_slot) {
+    if (GetState(partition_name_suffix) != DmDeviceState::ACTIVE) {
+      LOG(WARNING) << partition_name_suffix << " is at current slot but it is "
+                   << "not mapped. Now try to map it.";
+    } else {
+      if (GetDmDevicePathByName(partition_name_suffix, device)) {
+        LOG(INFO) << partition_name_suffix
+                  << " is mapped on device mapper: " << *device;
+        return DynamicPartitionDeviceStatus::SUCCESS;
+      }
+      LOG(ERROR) << partition_name_suffix << "is mapped but path is unknown.";
+      return DynamicPartitionDeviceStatus::ERROR;
+    }
+  }
+
+  bool force_writable = (slot != current_slot) && !not_in_payload;
+  if (MapPartitionOnDeviceMapper(
+          super_device, partition_name_suffix, slot, force_writable, device)) {
+    return DynamicPartitionDeviceStatus::SUCCESS;
+  }
+  return DynamicPartitionDeviceStatus::ERROR;
+}
+
+void DynamicPartitionControlAndroid::set_fake_mapped_devices(
+    const std::set<std::string>& fake) {
+  mapped_devices_ = fake;
+}
+
+bool DynamicPartitionControlAndroid::IsRecovery() {
+  return kIsRecovery;
+}
+
+static bool IsIncrementalUpdate(const DeltaArchiveManifest& manifest) {
+  const auto& partitions = manifest.partitions();
+  return std::any_of(partitions.begin(), partitions.end(), [](const auto& p) {
+    return p.has_old_partition_info();
+  });
+}
+
+bool DynamicPartitionControlAndroid::DeleteSourcePartitions(
+    MetadataBuilder* builder,
+    uint32_t source_slot,
+    const DeltaArchiveManifest& manifest) {
+  TEST_AND_RETURN_FALSE(IsRecovery());
+
+  if (IsIncrementalUpdate(manifest)) {
+    LOG(ERROR) << "Cannot sideload incremental OTA because snapshots cannot "
+               << "be created.";
+    if (GetVirtualAbFeatureFlag().IsLaunch()) {
+      LOG(ERROR) << "Sideloading incremental updates on devices launches "
+                 << " Virtual A/B is not supported.";
+    }
+    return false;
+  }
+
+  LOG(INFO) << "Will overwrite existing partitions. Slot "
+            << BootControlInterface::SlotName(source_slot)
+            << "may be unbootable until update finishes!";
+  const std::string source_suffix = SlotSuffixForSlotNumber(source_slot);
+  DeleteGroupsWithSuffix(builder, source_suffix);
+
+  return true;
+}
+
+std::unique_ptr<AbstractAction>
+DynamicPartitionControlAndroid::GetCleanupPreviousUpdateAction(
+    BootControlInterface* boot_control,
+    PrefsInterface* prefs,
+    CleanupPreviousUpdateActionDelegateInterface* delegate) {
+  if (!GetVirtualAbFeatureFlag().IsEnabled()) {
+    return std::make_unique<NoOpAction>();
+  }
+  return std::make_unique<CleanupPreviousUpdateAction>(
+      prefs, boot_control, snapshot_.get(), delegate);
+}
+
+bool DynamicPartitionControlAndroid::ResetUpdate(PrefsInterface* prefs) {
+  if (!GetVirtualAbFeatureFlag().IsEnabled()) {
+    return true;
+  }
+
+  LOG(INFO) << __func__ << " resetting update state and deleting snapshots.";
+  TEST_AND_RETURN_FALSE(prefs != nullptr);
+
+  // If the device has already booted into the target slot,
+  // ResetUpdateProgress may pass but CancelUpdate fails.
+  // This is expected. A scheduled CleanupPreviousUpdateAction should free
+  // space when it is done.
+  TEST_AND_RETURN_FALSE(DeltaPerformer::ResetUpdateProgress(
+      prefs, false /* quick */, false /* skip dynamic partitions metadata */));
+
+  if (ExpectMetadataMounted()) {
+    TEST_AND_RETURN_FALSE(snapshot_->CancelUpdate());
+  } else {
+    LOG(INFO) << "Skip cancelling update in ResetUpdate because /metadata is "
+              << "not mounted";
+  }
+
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::ListDynamicPartitionsForSlot(
+    uint32_t current_slot, std::vector<std::string>* partitions) {
+  if (!GetDynamicPartitionsFeatureFlag().IsEnabled()) {
+    LOG(ERROR) << "Dynamic partition is not enabled";
+    return false;
+  }
+
+  std::string device_dir_str;
+  TEST_AND_RETURN_FALSE(GetDeviceDir(&device_dir_str));
+  base::FilePath device_dir(device_dir_str);
+  auto super_device =
+      device_dir.Append(GetSuperPartitionName(current_slot)).value();
+  auto builder = LoadMetadataBuilder(super_device, current_slot);
+  TEST_AND_RETURN_FALSE(builder != nullptr);
+
+  std::vector<std::string> result;
+  auto suffix = SlotSuffixForSlotNumber(current_slot);
+  for (const auto& group : builder->ListGroups()) {
+    for (const auto& partition : builder->ListPartitionsInGroup(group)) {
+      std::string_view partition_name = partition->name();
+      if (!android::base::ConsumeSuffix(&partition_name, suffix)) {
+        continue;
+      }
+      result.emplace_back(partition_name);
+    }
+  }
+  *partitions = std::move(result);
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::ExpectMetadataMounted() {
+  // No need to mount metadata for non-Virtual A/B devices.
+  if (!GetVirtualAbFeatureFlag().IsEnabled()) {
+    return false;
+  }
+  // Intentionally not checking |metadata_device_| in Android mode.
+  // /metadata should always be mounted in Android mode. If it isn't, let caller
+  // fails when calling into SnapshotManager.
+  if (!IsRecovery()) {
+    return true;
+  }
+  // In recovery mode, explicitly check |metadata_device_|.
+  return metadata_device_ != nullptr;
+}
+
+bool DynamicPartitionControlAndroid::EnsureMetadataMounted() {
+  // No need to mount metadata for non-Virtual A/B devices.
+  if (!GetVirtualAbFeatureFlag().IsEnabled()) {
+    return true;
+  }
+
+  if (metadata_device_ == nullptr) {
+    metadata_device_ = snapshot_->EnsureMetadataMounted();
+  }
+  return metadata_device_ != nullptr;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h
index 0ccab4e..69026a4 100644
--- a/dynamic_partition_control_android.h
+++ b/dynamic_partition_control_android.h
@@ -17,45 +17,268 @@
 #ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
 #define UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
 
-#include "update_engine/dynamic_partition_control_interface.h"
-
 #include <memory>
 #include <set>
 #include <string>
+#include <vector>
+
+#include <base/files/file_util.h>
+#include <libsnapshot/auto_device.h>
+#include <libsnapshot/snapshot.h>
+
+#include "update_engine/common/dynamic_partition_control_interface.h"
 
 namespace chromeos_update_engine {
 
 class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface {
  public:
-  DynamicPartitionControlAndroid() = default;
+  DynamicPartitionControlAndroid();
   ~DynamicPartitionControlAndroid();
-  bool IsDynamicPartitionsEnabled() override;
-  bool IsDynamicPartitionsRetrofit() override;
-  bool MapPartitionOnDeviceMapper(const std::string& super_device,
-                                  const std::string& target_partition_name,
-                                  uint32_t slot,
-                                  bool force_writable,
-                                  std::string* path) override;
-  bool UnmapPartitionOnDeviceMapper(const std::string& target_partition_name,
-                                    bool wait) override;
+  FeatureFlag GetDynamicPartitionsFeatureFlag() override;
+  FeatureFlag GetVirtualAbFeatureFlag() override;
+  bool OptimizeOperation(const std::string& partition_name,
+                         const InstallOperation& operation,
+                         InstallOperation* optimized) override;
   void Cleanup() override;
-  bool DeviceExists(const std::string& path) override;
-  android::dm::DmDeviceState GetState(const std::string& name) override;
-  bool GetDmDevicePathByName(const std::string& name,
-                             std::string* path) override;
-  std::unique_ptr<android::fs_mgr::MetadataBuilder> LoadMetadataBuilder(
-      const std::string& super_device,
-      uint32_t source_slot,
-      uint32_t target_slot) override;
-  bool StoreMetadata(const std::string& super_device,
-                     android::fs_mgr::MetadataBuilder* builder,
-                     uint32_t target_slot) override;
+
+  bool PreparePartitionsForUpdate(uint32_t source_slot,
+                                  uint32_t target_slot,
+                                  const DeltaArchiveManifest& manifest,
+                                  bool update,
+                                  uint64_t* required_size) override;
+  bool FinishUpdate(bool powerwash_required) override;
+  std::unique_ptr<AbstractAction> GetCleanupPreviousUpdateAction(
+      BootControlInterface* boot_control,
+      PrefsInterface* prefs,
+      CleanupPreviousUpdateActionDelegateInterface* delegate) override;
+
+  bool ResetUpdate(PrefsInterface* prefs) override;
+
+  bool ListDynamicPartitionsForSlot(
+      uint32_t current_slot, std::vector<std::string>* partitions) override;
+
   bool GetDeviceDir(std::string* path) override;
 
- private:
-  std::set<std::string> mapped_devices_;
+  // Return the device for partition |partition_name| at slot |slot|.
+  // |current_slot| should be set to the current active slot.
+  // Note: this function is only used by BootControl*::GetPartitionDevice.
+  // Other callers should prefer BootControl*::GetPartitionDevice over
+  // BootControl*::GetDynamicPartitionControl()->GetPartitionDevice().
+  bool GetPartitionDevice(const std::string& partition_name,
+                          uint32_t slot,
+                          uint32_t current_slot,
+                          bool not_in_payload,
+                          std::string* device,
+                          bool* is_dynamic);
 
-  void CleanupInternal(bool wait);
+  bool GetPartitionDevice(const std::string& partition_name,
+                          uint32_t slot,
+                          uint32_t current_slot,
+                          std::string* device);
+
+ protected:
+  // These functions are exposed for testing.
+
+  // Unmap logical partition on device mapper. This is the reverse operation
+  // of MapPartitionOnDeviceMapper.
+  // Returns true if unmapped successfully.
+  virtual bool UnmapPartitionOnDeviceMapper(
+      const std::string& target_partition_name);
+
+  // Retrieve metadata from |super_device| at slot |source_slot|.
+  //
+  // If |target_slot| != kInvalidSlot, before returning the metadata, this
+  // function modifies the metadata so that during updates, the metadata can be
+  // written to |target_slot|. In particular, on retrofit devices, the returned
+  // metadata automatically includes block devices at |target_slot|.
+  //
+  // If |target_slot| == kInvalidSlot, this function returns metadata at
+  // |source_slot| without modifying it. This is the same as
+  // LoadMetadataBuilder(const std::string&, uint32_t).
+  virtual std::unique_ptr<android::fs_mgr::MetadataBuilder> LoadMetadataBuilder(
+      const std::string& super_device,
+      uint32_t source_slot,
+      uint32_t target_slot);
+
+  // Write metadata |builder| to |super_device| at slot |target_slot|.
+  virtual bool StoreMetadata(const std::string& super_device,
+                             android::fs_mgr::MetadataBuilder* builder,
+                             uint32_t target_slot);
+
+  // Map logical partition on device-mapper.
+  // |super_device| is the device path of the physical partition ("super").
+  // |target_partition_name| is the identifier used in metadata; for example,
+  // "vendor_a"
+  // |slot| is the selected slot to mount; for example, 0 for "_a".
+  // Returns true if mapped successfully; if so, |path| is set to the device
+  // path of the mapped logical partition.
+  virtual bool MapPartitionOnDeviceMapper(
+      const std::string& super_device,
+      const std::string& target_partition_name,
+      uint32_t slot,
+      bool force_writable,
+      std::string* path);
+
+  // Return true if a static partition exists at device path |path|.
+  virtual bool DeviceExists(const std::string& path);
+
+  // Returns the current state of the underlying device mapper device
+  // with given name.
+  // One of INVALID, SUSPENDED or ACTIVE.
+  virtual android::dm::DmDeviceState GetState(const std::string& name);
+
+  // Returns the path to the device mapper device node in '/dev' corresponding
+  // to 'name'. If the device does not exist, false is returned, and the path
+  // parameter is not set.
+  virtual bool GetDmDevicePathByName(const std::string& name,
+                                     std::string* path);
+
+  // Retrieve metadata from |super_device| at slot |source_slot|.
+  virtual std::unique_ptr<android::fs_mgr::MetadataBuilder> LoadMetadataBuilder(
+      const std::string& super_device, uint32_t source_slot);
+
+  // Return the name of the super partition (which stores super partition
+  // metadata) for a given slot.
+  virtual std::string GetSuperPartitionName(uint32_t slot);
+
+  virtual void set_fake_mapped_devices(const std::set<std::string>& fake);
+
+  // Allow mock objects to override this to test recovery mode.
+  virtual bool IsRecovery();
+
+  // Determine path for system_other partition.
+  // |source_slot| should be current slot.
+  // |target_slot| should be "other" slot.
+  // |partition_name_suffix| should be "system" + suffix(|target_slot|).
+  // Return true and set |path| if successful.
+  // Set |path| to empty if no need to erase system_other.
+  // Set |should_unmap| to true if path needs to be unmapped later.
+  //
+  // Note: system_other cannot use GetPartitionDevice or
+  // GetDynamicPartitionDevice because:
+  // - super partition metadata may be loaded from the source slot
+  // - UPDATED flag needs to be check to skip erasing if partition is not
+  //   created by flashing tools
+  // - Snapshots from previous update attempts should not be used.
+  virtual bool GetSystemOtherPath(uint32_t source_slot,
+                                  uint32_t target_slot,
+                                  const std::string& partition_name_suffix,
+                                  std::string* path,
+                                  bool* should_unmap);
+
+  // Returns true if any entry in the fstab file in |path| has AVB enabled,
+  // false if not enabled, and nullopt for any error.
+  virtual std::optional<bool> IsAvbEnabledInFstab(const std::string& path);
+
+  // Returns true if system_other has AVB enabled, false if not enabled, and
+  // nullopt for any error.
+  virtual std::optional<bool> IsAvbEnabledOnSystemOther();
+
+  // Erase system_other partition that may contain system_other.img.
+  // After the update, the content of system_other may be corrupted but with
+  // valid AVB footer. If the update is rolled back and factory data reset is
+  // triggered, system_b fails to be mapped with verity errors (see
+  // b/152444348). Erase the system_other so that mapping system_other is
+  // skipped.
+  virtual bool EraseSystemOtherAvbFooter(uint32_t source_slot,
+                                         uint32_t target_slot);
+
+  // Helper for PreparePartitionsForUpdate. Used for devices with dynamic
+  // partitions updating without snapshots.
+  // If |delete_source| is set, source partitions are deleted before resizing
+  // target partitions (using DeleteSourcePartitions).
+  virtual bool PrepareDynamicPartitionsForUpdate(
+      uint32_t source_slot,
+      uint32_t target_slot,
+      const DeltaArchiveManifest& manifest,
+      bool delete_source);
+
+ private:
+  friend class DynamicPartitionControlAndroidTest;
+  friend class SnapshotPartitionTestP;
+
+  void UnmapAllPartitions();
+  bool MapPartitionInternal(const std::string& super_device,
+                            const std::string& target_partition_name,
+                            uint32_t slot,
+                            bool force_writable,
+                            std::string* path);
+
+  // Update |builder| according to |partition_metadata|, assuming the device
+  // does not have Virtual A/B.
+  bool UpdatePartitionMetadata(android::fs_mgr::MetadataBuilder* builder,
+                               uint32_t target_slot,
+                               const DeltaArchiveManifest& manifest);
+
+  // Helper for PreparePartitionsForUpdate. Used for snapshotted partitions for
+  // Virtual A/B update.
+  bool PrepareSnapshotPartitionsForUpdate(uint32_t source_slot,
+                                          uint32_t target_slot,
+                                          const DeltaArchiveManifest& manifest,
+                                          uint64_t* required_size);
+
+  enum class DynamicPartitionDeviceStatus {
+    SUCCESS,
+    ERROR,
+    TRY_STATIC,
+  };
+
+  // Return SUCCESS and path in |device| if partition is dynamic.
+  // Return ERROR if any error.
+  // Return TRY_STATIC if caller should resolve the partition as a static
+  // partition instead.
+  DynamicPartitionDeviceStatus GetDynamicPartitionDevice(
+      const base::FilePath& device_dir,
+      const std::string& partition_name_suffix,
+      uint32_t slot,
+      uint32_t current_slot,
+      bool not_in_payload,
+      std::string* device);
+
+  // Return true if |partition_name_suffix| is a block device of
+  // super partition metadata slot |slot|.
+  bool IsSuperBlockDevice(const base::FilePath& device_dir,
+                          uint32_t current_slot,
+                          const std::string& partition_name_suffix);
+
+  // If sideloading a full OTA, delete source partitions from |builder|.
+  bool DeleteSourcePartitions(android::fs_mgr::MetadataBuilder* builder,
+                              uint32_t source_slot,
+                              const DeltaArchiveManifest& manifest);
+
+  // Returns true if metadata is expected to be mounted, false otherwise.
+  // Note that it returns false on non-Virtual A/B devices.
+  //
+  // Almost all functions of SnapshotManager depends on metadata being mounted.
+  // - In Android mode for Virtual A/B devices, assume it is mounted. If not,
+  //   let caller fails when calling into SnapshotManager.
+  // - In recovery for Virtual A/B devices, it is possible that metadata is not
+  //   formatted, hence it cannot be mounted. Caller should not call into
+  //   SnapshotManager.
+  // - On non-Virtual A/B devices, updates do not depend on metadata partition.
+  //   Caller should not call into SnapshotManager.
+  //
+  // This function does NOT mount metadata partition. Use EnsureMetadataMounted
+  // to mount metadata partition.
+  bool ExpectMetadataMounted();
+
+  // Ensure /metadata is mounted. Returns true if successful, false otherwise.
+  //
+  // Note that this function returns true on non-Virtual A/B devices without
+  // doing anything.
+  bool EnsureMetadataMounted();
+
+  std::set<std::string> mapped_devices_;
+  const FeatureFlag dynamic_partitions_;
+  const FeatureFlag virtual_ab_;
+  std::unique_ptr<android::snapshot::ISnapshotManager> snapshot_;
+  std::unique_ptr<android::snapshot::AutoDevice> metadata_device_;
+  bool target_supports_snapshot_ = false;
+  // Whether the target partitions should be loaded as dynamic partitions. Set
+  // by PreparePartitionsForUpdate() per each update.
+  bool is_target_dynamic_ = false;
+  uint32_t source_slot_ = UINT32_MAX;
+  uint32_t target_slot_ = UINT32_MAX;
 
   DISALLOW_COPY_AND_ASSIGN(DynamicPartitionControlAndroid);
 };
diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc
new file mode 100644
index 0000000..3738170
--- /dev/null
+++ b/dynamic_partition_control_android_unittest.cc
@@ -0,0 +1,1016 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/dynamic_partition_control_android.h"
+
+#include <set>
+#include <vector>
+
+#include <base/logging.h>
+#include <base/strings/string_util.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <libavb/libavb.h>
+#include <libsnapshot/mock_snapshot.h>
+
+#include "update_engine/common/mock_prefs.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/dynamic_partition_test_utils.h"
+#include "update_engine/mock_dynamic_partition_control.h"
+
+using android::dm::DmDeviceState;
+using android::snapshot::MockSnapshotManager;
+using chromeos_update_engine::test_utils::ScopedLoopbackDeviceBinder;
+using chromeos_update_engine::test_utils::ScopedTempFile;
+using std::string;
+using testing::_;
+using testing::AnyNumber;
+using testing::AnyOf;
+using testing::Invoke;
+using testing::NiceMock;
+using testing::Not;
+using testing::Optional;
+using testing::Return;
+
+namespace chromeos_update_engine {
+
+class DynamicPartitionControlAndroidTest : public ::testing::Test {
+ public:
+  void SetUp() override {
+    module_ = std::make_unique<NiceMock<MockDynamicPartitionControlAndroid>>();
+
+    ON_CALL(dynamicControl(), GetDynamicPartitionsFeatureFlag())
+        .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
+    ON_CALL(dynamicControl(), GetVirtualAbFeatureFlag())
+        .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::NONE)));
+
+    ON_CALL(dynamicControl(), GetDeviceDir(_))
+        .WillByDefault(Invoke([](auto path) {
+          *path = kFakeDevicePath;
+          return true;
+        }));
+
+    ON_CALL(dynamicControl(), GetSuperPartitionName(_))
+        .WillByDefault(Return(kFakeSuper));
+
+    ON_CALL(dynamicControl(), GetDmDevicePathByName(_, _))
+        .WillByDefault(Invoke([](auto partition_name_suffix, auto device) {
+          *device = GetDmDevice(partition_name_suffix);
+          return true;
+        }));
+
+    ON_CALL(dynamicControl(), EraseSystemOtherAvbFooter(_, _))
+        .WillByDefault(Return(true));
+
+    ON_CALL(dynamicControl(), IsRecovery()).WillByDefault(Return(false));
+
+    ON_CALL(dynamicControl(), PrepareDynamicPartitionsForUpdate(_, _, _, _))
+        .WillByDefault(Invoke([&](uint32_t source_slot,
+                                  uint32_t target_slot,
+                                  const DeltaArchiveManifest& manifest,
+                                  bool delete_source) {
+          return dynamicControl().RealPrepareDynamicPartitionsForUpdate(
+              source_slot, target_slot, manifest, delete_source);
+        }));
+  }
+
+  // Return the mocked DynamicPartitionControlInterface.
+  NiceMock<MockDynamicPartitionControlAndroid>& dynamicControl() {
+    return static_cast<NiceMock<MockDynamicPartitionControlAndroid>&>(*module_);
+  }
+
+  std::string GetSuperDevice(uint32_t slot) {
+    return GetDevice(dynamicControl().GetSuperPartitionName(slot));
+  }
+
+  uint32_t source() { return slots_.source; }
+  uint32_t target() { return slots_.target; }
+
+  // Return partition names with suffix of source().
+  std::string S(const std::string& name) {
+    return name + kSlotSuffixes[source()];
+  }
+
+  // Return partition names with suffix of target().
+  std::string T(const std::string& name) {
+    return name + kSlotSuffixes[target()];
+  }
+
+  // Set the fake metadata to return when LoadMetadataBuilder is called on
+  // |slot|.
+  void SetMetadata(uint32_t slot,
+                   const PartitionSuffixSizes& sizes,
+                   uint32_t partition_attr = 0) {
+    EXPECT_CALL(dynamicControl(),
+                LoadMetadataBuilder(GetSuperDevice(slot), slot, _))
+        .Times(AnyNumber())
+        .WillRepeatedly(Invoke([sizes, partition_attr](auto, auto, auto) {
+          return NewFakeMetadata(PartitionSuffixSizesToManifest(sizes),
+                                 partition_attr);
+        }));
+  }
+
+  void ExpectStoreMetadata(const PartitionSuffixSizes& partition_sizes) {
+    EXPECT_CALL(dynamicControl(),
+                StoreMetadata(GetSuperDevice(target()),
+                              MetadataMatches(partition_sizes),
+                              target()))
+        .WillOnce(Return(true));
+  }
+
+  // Expect that UnmapPartitionOnDeviceMapper is called on target() metadata
+  // slot with each partition in |partitions|.
+  void ExpectUnmap(const std::set<std::string>& partitions) {
+    // Error when UnmapPartitionOnDeviceMapper is called on unknown arguments.
+    ON_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_))
+        .WillByDefault(Return(false));
+
+    for (const auto& partition : partitions) {
+      EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(partition))
+          .WillOnce(Return(true));
+    }
+  }
+  bool PreparePartitionsForUpdate(const PartitionSizes& partition_sizes) {
+    return dynamicControl().PreparePartitionsForUpdate(
+        source(),
+        target(),
+        PartitionSizesToManifest(partition_sizes),
+        true,
+        nullptr);
+  }
+  void SetSlots(const TestParam& slots) { slots_ = slots; }
+
+  void SetSnapshotEnabled(bool enabled) {
+    dynamicControl().target_supports_snapshot_ = enabled;
+  }
+
+  struct Listener : public ::testing::MatchResultListener {
+    explicit Listener(std::ostream* os) : MatchResultListener(os) {}
+  };
+
+  testing::AssertionResult UpdatePartitionMetadata(
+      const PartitionSuffixSizes& source_metadata,
+      const PartitionSizes& update_metadata,
+      const PartitionSuffixSizes& expected) {
+    return UpdatePartitionMetadata(
+        PartitionSuffixSizesToManifest(source_metadata),
+        PartitionSizesToManifest(update_metadata),
+        PartitionSuffixSizesToManifest(expected));
+  }
+  testing::AssertionResult UpdatePartitionMetadata(
+      const DeltaArchiveManifest& source_manifest,
+      const DeltaArchiveManifest& update_manifest,
+      const DeltaArchiveManifest& expected) {
+    return UpdatePartitionMetadata(
+        source_manifest, update_manifest, MetadataMatches(expected));
+  }
+  testing::AssertionResult UpdatePartitionMetadata(
+      const DeltaArchiveManifest& source_manifest,
+      const DeltaArchiveManifest& update_manifest,
+      const Matcher<MetadataBuilder*>& matcher) {
+    auto super_metadata = NewFakeMetadata(source_manifest);
+    if (!module_->UpdatePartitionMetadata(
+            super_metadata.get(), target(), update_manifest)) {
+      return testing::AssertionFailure()
+             << "UpdatePartitionMetadataInternal failed";
+    }
+    std::stringstream ss;
+    Listener listener(&ss);
+    if (matcher.MatchAndExplain(super_metadata.get(), &listener)) {
+      return testing::AssertionSuccess() << ss.str();
+    } else {
+      return testing::AssertionFailure() << ss.str();
+    }
+  }
+
+  std::unique_ptr<DynamicPartitionControlAndroid> module_;
+  TestParam slots_;
+};
+
+class DynamicPartitionControlAndroidTestP
+    : public DynamicPartitionControlAndroidTest,
+      public ::testing::WithParamInterface<TestParam> {
+ public:
+  void SetUp() override {
+    DynamicPartitionControlAndroidTest::SetUp();
+    SetSlots(GetParam());
+  }
+};
+
+// Test resize case. Grow if target metadata contains a partition with a size
+// less than expected.
+TEST_P(DynamicPartitionControlAndroidTestP,
+       NeedGrowIfSizeNotMatchWhenResizing) {
+  PartitionSuffixSizes source_metadata{{S("system"), 2_GiB},
+                                       {S("vendor"), 1_GiB},
+                                       {T("system"), 2_GiB},
+                                       {T("vendor"), 1_GiB}};
+  PartitionSuffixSizes expected{{S("system"), 2_GiB},
+                                {S("vendor"), 1_GiB},
+                                {T("system"), 3_GiB},
+                                {T("vendor"), 1_GiB}};
+  PartitionSizes update_metadata{{"system", 3_GiB}, {"vendor", 1_GiB}};
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_metadata, update_metadata, expected));
+}
+
+// Test resize case. Shrink if target metadata contains a partition with a size
+// greater than expected.
+TEST_P(DynamicPartitionControlAndroidTestP,
+       NeedShrinkIfSizeNotMatchWhenResizing) {
+  PartitionSuffixSizes source_metadata{{S("system"), 2_GiB},
+                                       {S("vendor"), 1_GiB},
+                                       {T("system"), 2_GiB},
+                                       {T("vendor"), 1_GiB}};
+  PartitionSuffixSizes expected{{S("system"), 2_GiB},
+                                {S("vendor"), 1_GiB},
+                                {T("system"), 2_GiB},
+                                {T("vendor"), 150_MiB}};
+  PartitionSizes update_metadata{{"system", 2_GiB}, {"vendor", 150_MiB}};
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_metadata, update_metadata, expected));
+}
+
+// Test adding partitions on the first run.
+TEST_P(DynamicPartitionControlAndroidTestP, AddPartitionToEmptyMetadata) {
+  PartitionSuffixSizes source_metadata{};
+  PartitionSuffixSizes expected{{T("system"), 2_GiB}, {T("vendor"), 1_GiB}};
+  PartitionSizes update_metadata{{"system", 2_GiB}, {"vendor", 1_GiB}};
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_metadata, update_metadata, expected));
+}
+
+// Test subsequent add case.
+TEST_P(DynamicPartitionControlAndroidTestP, AddAdditionalPartition) {
+  PartitionSuffixSizes source_metadata{{S("system"), 2_GiB},
+                                       {T("system"), 2_GiB}};
+  PartitionSuffixSizes expected{
+      {S("system"), 2_GiB}, {T("system"), 2_GiB}, {T("vendor"), 1_GiB}};
+  PartitionSizes update_metadata{{"system", 2_GiB}, {"vendor", 1_GiB}};
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_metadata, update_metadata, expected));
+}
+
+// Test delete one partition.
+TEST_P(DynamicPartitionControlAndroidTestP, DeletePartition) {
+  PartitionSuffixSizes source_metadata{{S("system"), 2_GiB},
+                                       {S("vendor"), 1_GiB},
+                                       {T("system"), 2_GiB},
+                                       {T("vendor"), 1_GiB}};
+  // No T("vendor")
+  PartitionSuffixSizes expected{
+      {S("system"), 2_GiB}, {S("vendor"), 1_GiB}, {T("system"), 2_GiB}};
+  PartitionSizes update_metadata{{"system", 2_GiB}};
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_metadata, update_metadata, expected));
+}
+
+// Test delete all partitions.
+TEST_P(DynamicPartitionControlAndroidTestP, DeleteAll) {
+  PartitionSuffixSizes source_metadata{{S("system"), 2_GiB},
+                                       {S("vendor"), 1_GiB},
+                                       {T("system"), 2_GiB},
+                                       {T("vendor"), 1_GiB}};
+  PartitionSuffixSizes expected{{S("system"), 2_GiB}, {S("vendor"), 1_GiB}};
+  PartitionSizes update_metadata{};
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_metadata, update_metadata, expected));
+}
+
+// Test corrupt source metadata case.
+TEST_P(DynamicPartitionControlAndroidTestP, CorruptedSourceMetadata) {
+  EXPECT_CALL(dynamicControl(),
+              LoadMetadataBuilder(GetSuperDevice(source()), source(), _))
+      .WillOnce(Invoke([](auto, auto, auto) { return nullptr; }));
+  ExpectUnmap({T("system")});
+
+  EXPECT_FALSE(PreparePartitionsForUpdate({{"system", 1_GiB}}))
+      << "Should not be able to continue with corrupt source metadata";
+}
+
+// Test that UpdatePartitionMetadata fails if there is not enough space on the
+// device.
+TEST_P(DynamicPartitionControlAndroidTestP, NotEnoughSpace) {
+  PartitionSuffixSizes source_metadata{{S("system"), 3_GiB},
+                                       {S("vendor"), 2_GiB},
+                                       {T("system"), 0},
+                                       {T("vendor"), 0}};
+  PartitionSizes update_metadata{{"system", 3_GiB}, {"vendor", 3_GiB}};
+
+  EXPECT_FALSE(UpdatePartitionMetadata(source_metadata, update_metadata, {}))
+      << "Should not be able to fit 11GiB data into 10GiB space";
+}
+
+TEST_P(DynamicPartitionControlAndroidTestP, NotEnoughSpaceForSlot) {
+  PartitionSuffixSizes source_metadata{{S("system"), 1_GiB},
+                                       {S("vendor"), 1_GiB},
+                                       {T("system"), 0},
+                                       {T("vendor"), 0}};
+  PartitionSizes update_metadata{{"system", 3_GiB}, {"vendor", 3_GiB}};
+  EXPECT_FALSE(UpdatePartitionMetadata(source_metadata, update_metadata, {}))
+      << "Should not be able to grow over size of super / 2";
+}
+
+TEST_P(DynamicPartitionControlAndroidTestP,
+       ApplyRetrofitUpdateOnDynamicPartitionsEnabledBuild) {
+  ON_CALL(dynamicControl(), GetDynamicPartitionsFeatureFlag())
+      .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::RETROFIT)));
+  // Static partition {system,bar}_{a,b} exists.
+  EXPECT_CALL(dynamicControl(),
+              DeviceExists(AnyOf(GetDevice(S("bar")),
+                                 GetDevice(T("bar")),
+                                 GetDevice(S("system")),
+                                 GetDevice(T("system")))))
+      .WillRepeatedly(Return(true));
+
+  SetMetadata(source(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 2_GiB},
+               {T("vendor"), 1_GiB}});
+
+  // Not calling through
+  // DynamicPartitionControlAndroidTest::PreparePartitionsForUpdate(), since we
+  // don't want any default group in the PartitionMetadata.
+  EXPECT_TRUE(dynamicControl().PreparePartitionsForUpdate(
+      source(), target(), {}, true, nullptr));
+
+  // Should use dynamic source partitions.
+  EXPECT_CALL(dynamicControl(), GetState(S("system")))
+      .Times(1)
+      .WillOnce(Return(DmDeviceState::ACTIVE));
+  string system_device;
+  EXPECT_TRUE(dynamicControl().GetPartitionDevice(
+      "system", source(), source(), &system_device));
+  EXPECT_EQ(GetDmDevice(S("system")), system_device);
+
+  // Should use static target partitions without querying dynamic control.
+  EXPECT_CALL(dynamicControl(), GetState(T("system"))).Times(0);
+  EXPECT_TRUE(dynamicControl().GetPartitionDevice(
+      "system", target(), source(), &system_device));
+  EXPECT_EQ(GetDevice(T("system")), system_device);
+
+  // Static partition "bar".
+  EXPECT_CALL(dynamicControl(), GetState(S("bar"))).Times(0);
+  std::string bar_device;
+  EXPECT_TRUE(dynamicControl().GetPartitionDevice(
+      "bar", source(), source(), &bar_device));
+  EXPECT_EQ(GetDevice(S("bar")), bar_device);
+
+  EXPECT_CALL(dynamicControl(), GetState(T("bar"))).Times(0);
+  EXPECT_TRUE(dynamicControl().GetPartitionDevice(
+      "bar", target(), source(), &bar_device));
+  EXPECT_EQ(GetDevice(T("bar")), bar_device);
+}
+
+TEST_P(DynamicPartitionControlAndroidTestP,
+       GetPartitionDeviceWhenResumingUpdate) {
+  // Static partition bar_{a,b} exists.
+  EXPECT_CALL(dynamicControl(),
+              DeviceExists(AnyOf(GetDevice(S("bar")), GetDevice(T("bar")))))
+      .WillRepeatedly(Return(true));
+
+  // Both of the two slots contain valid partition metadata, since this is
+  // resuming an update.
+  SetMetadata(source(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 2_GiB},
+               {T("vendor"), 1_GiB}});
+  SetMetadata(target(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 2_GiB},
+               {T("vendor"), 1_GiB}});
+
+  EXPECT_TRUE(dynamicControl().PreparePartitionsForUpdate(
+      source(),
+      target(),
+      PartitionSizesToManifest({{"system", 2_GiB}, {"vendor", 1_GiB}}),
+      false,
+      nullptr));
+
+  // Dynamic partition "system".
+  EXPECT_CALL(dynamicControl(), GetState(S("system")))
+      .Times(1)
+      .WillOnce(Return(DmDeviceState::ACTIVE));
+  string system_device;
+  EXPECT_TRUE(dynamicControl().GetPartitionDevice(
+      "system", source(), source(), &system_device));
+  EXPECT_EQ(GetDmDevice(S("system")), system_device);
+
+  EXPECT_CALL(dynamicControl(), GetState(T("system")))
+      .Times(AnyNumber())
+      .WillOnce(Return(DmDeviceState::ACTIVE));
+  EXPECT_CALL(dynamicControl(),
+              MapPartitionOnDeviceMapper(
+                  GetSuperDevice(target()), T("system"), target(), _, _))
+      .Times(AnyNumber())
+      .WillRepeatedly(
+          Invoke([](const auto&, const auto& name, auto, auto, auto* device) {
+            *device = "/fake/remapped/" + name;
+            return true;
+          }));
+  EXPECT_TRUE(dynamicControl().GetPartitionDevice(
+      "system", target(), source(), &system_device));
+  EXPECT_EQ("/fake/remapped/" + T("system"), system_device);
+
+  // Static partition "bar".
+  EXPECT_CALL(dynamicControl(), GetState(S("bar"))).Times(0);
+  std::string bar_device;
+  EXPECT_TRUE(dynamicControl().GetPartitionDevice(
+      "bar", source(), source(), &bar_device));
+  EXPECT_EQ(GetDevice(S("bar")), bar_device);
+
+  EXPECT_CALL(dynamicControl(), GetState(T("bar"))).Times(0);
+  EXPECT_TRUE(dynamicControl().GetPartitionDevice(
+      "bar", target(), source(), &bar_device));
+  EXPECT_EQ(GetDevice(T("bar")), bar_device);
+}
+
+INSTANTIATE_TEST_CASE_P(DynamicPartitionControlAndroidTest,
+                        DynamicPartitionControlAndroidTestP,
+                        testing::Values(TestParam{0, 1}, TestParam{1, 0}));
+
+class DynamicPartitionControlAndroidGroupTestP
+    : public DynamicPartitionControlAndroidTestP {
+ public:
+  DeltaArchiveManifest source_manifest;
+  void SetUp() override {
+    DynamicPartitionControlAndroidTestP::SetUp();
+    AddGroupAndPartition(
+        &source_manifest, S("android"), 3_GiB, S("system"), 2_GiB);
+    AddGroupAndPartition(&source_manifest, S("oem"), 2_GiB, S("vendor"), 1_GiB);
+    AddGroupAndPartition(&source_manifest, T("android"), 3_GiB, T("system"), 0);
+    AddGroupAndPartition(&source_manifest, T("oem"), 2_GiB, T("vendor"), 0);
+  }
+
+  void AddGroupAndPartition(DeltaArchiveManifest* manifest,
+                            const string& group,
+                            uint64_t group_size,
+                            const string& partition,
+                            uint64_t partition_size) {
+    auto* g = AddGroup(manifest, group, group_size);
+    AddPartition(manifest, g, partition, partition_size);
+  }
+};
+
+// Allow to resize within group.
+TEST_P(DynamicPartitionControlAndroidGroupTestP, ResizeWithinGroup) {
+  DeltaArchiveManifest expected;
+  AddGroupAndPartition(&expected, T("android"), 3_GiB, T("system"), 3_GiB);
+  AddGroupAndPartition(&expected, T("oem"), 2_GiB, T("vendor"), 2_GiB);
+
+  DeltaArchiveManifest update_manifest;
+  AddGroupAndPartition(&update_manifest, "android", 3_GiB, "system", 3_GiB);
+  AddGroupAndPartition(&update_manifest, "oem", 2_GiB, "vendor", 2_GiB);
+
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_manifest, update_manifest, expected));
+}
+
+TEST_P(DynamicPartitionControlAndroidGroupTestP, NotEnoughSpaceForGroup) {
+  DeltaArchiveManifest update_manifest;
+  AddGroupAndPartition(&update_manifest, "android", 3_GiB, "system", 1_GiB),
+      AddGroupAndPartition(&update_manifest, "oem", 2_GiB, "vendor", 3_GiB);
+  EXPECT_FALSE(UpdatePartitionMetadata(source_manifest, update_manifest, {}))
+      << "Should not be able to grow over maximum size of group";
+}
+
+TEST_P(DynamicPartitionControlAndroidGroupTestP, GroupTooBig) {
+  DeltaArchiveManifest update_manifest;
+  AddGroup(&update_manifest, "android", 3_GiB);
+  AddGroup(&update_manifest, "oem", 3_GiB);
+  EXPECT_FALSE(UpdatePartitionMetadata(source_manifest, update_manifest, {}))
+      << "Should not be able to grow over size of super / 2";
+}
+
+TEST_P(DynamicPartitionControlAndroidGroupTestP, AddPartitionToGroup) {
+  DeltaArchiveManifest expected;
+  auto* g = AddGroup(&expected, T("android"), 3_GiB);
+  AddPartition(&expected, g, T("system"), 2_GiB);
+  AddPartition(&expected, g, T("system_ext"), 1_GiB);
+
+  DeltaArchiveManifest update_manifest;
+  g = AddGroup(&update_manifest, "android", 3_GiB);
+  AddPartition(&update_manifest, g, "system", 2_GiB);
+  AddPartition(&update_manifest, g, "system_ext", 1_GiB);
+  AddGroupAndPartition(&update_manifest, "oem", 2_GiB, "vendor", 2_GiB);
+
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_manifest, update_manifest, expected));
+}
+
+TEST_P(DynamicPartitionControlAndroidGroupTestP, RemovePartitionFromGroup) {
+  DeltaArchiveManifest expected;
+  AddGroup(&expected, T("android"), 3_GiB);
+
+  DeltaArchiveManifest update_manifest;
+  AddGroup(&update_manifest, "android", 3_GiB);
+  AddGroupAndPartition(&update_manifest, "oem", 2_GiB, "vendor", 2_GiB);
+
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_manifest, update_manifest, expected));
+}
+
+TEST_P(DynamicPartitionControlAndroidGroupTestP, AddGroup) {
+  DeltaArchiveManifest expected;
+  AddGroupAndPartition(
+      &expected, T("new_group"), 2_GiB, T("new_partition"), 2_GiB);
+
+  DeltaArchiveManifest update_manifest;
+  AddGroupAndPartition(&update_manifest, "android", 2_GiB, "system", 2_GiB);
+  AddGroupAndPartition(&update_manifest, "oem", 1_GiB, "vendor", 1_GiB);
+  AddGroupAndPartition(
+      &update_manifest, "new_group", 2_GiB, "new_partition", 2_GiB);
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_manifest, update_manifest, expected));
+}
+
+TEST_P(DynamicPartitionControlAndroidGroupTestP, RemoveGroup) {
+  DeltaArchiveManifest update_manifest;
+  AddGroupAndPartition(&update_manifest, "android", 2_GiB, "system", 2_GiB);
+
+  EXPECT_TRUE(UpdatePartitionMetadata(
+      source_manifest, update_manifest, Not(HasGroup(T("oem")))));
+}
+
+TEST_P(DynamicPartitionControlAndroidGroupTestP, ResizeGroup) {
+  DeltaArchiveManifest expected;
+  AddGroupAndPartition(&expected, T("android"), 2_GiB, T("system"), 2_GiB);
+  AddGroupAndPartition(&expected, T("oem"), 3_GiB, T("vendor"), 3_GiB);
+  DeltaArchiveManifest update_manifest;
+  AddGroupAndPartition(&update_manifest, "android", 2_GiB, "system", 2_GiB),
+      AddGroupAndPartition(&update_manifest, "oem", 3_GiB, "vendor", 3_GiB);
+  EXPECT_TRUE(
+      UpdatePartitionMetadata(source_manifest, update_manifest, expected));
+}
+
+INSTANTIATE_TEST_CASE_P(DynamicPartitionControlAndroidTest,
+                        DynamicPartitionControlAndroidGroupTestP,
+                        testing::Values(TestParam{0, 1}, TestParam{1, 0}));
+
+const PartitionSuffixSizes update_sizes_0() {
+  // Initial state is 0 for "other" slot.
+  return {
+      {"grown_a", 2_GiB},
+      {"shrunk_a", 1_GiB},
+      {"same_a", 100_MiB},
+      {"deleted_a", 150_MiB},
+      // no added_a
+      {"grown_b", 200_MiB},
+      // simulate system_other
+      {"shrunk_b", 0},
+      {"same_b", 0},
+      {"deleted_b", 0},
+      // no added_b
+  };
+}
+
+const PartitionSuffixSizes update_sizes_1() {
+  return {
+      {"grown_a", 2_GiB},
+      {"shrunk_a", 1_GiB},
+      {"same_a", 100_MiB},
+      {"deleted_a", 150_MiB},
+      // no added_a
+      {"grown_b", 3_GiB},
+      {"shrunk_b", 150_MiB},
+      {"same_b", 100_MiB},
+      {"added_b", 150_MiB},
+      // no deleted_b
+  };
+}
+
+const PartitionSuffixSizes update_sizes_2() {
+  return {
+      {"grown_a", 4_GiB},
+      {"shrunk_a", 100_MiB},
+      {"same_a", 100_MiB},
+      {"deleted_a", 64_MiB},
+      // no added_a
+      {"grown_b", 3_GiB},
+      {"shrunk_b", 150_MiB},
+      {"same_b", 100_MiB},
+      {"added_b", 150_MiB},
+      // no deleted_b
+  };
+}
+
+// Test case for first update after the device is manufactured, in which
+// case the "other" slot is likely of size "0" (except system, which is
+// non-zero because of system_other partition)
+TEST_F(DynamicPartitionControlAndroidTest, SimulatedFirstUpdate) {
+  SetSlots({0, 1});
+
+  SetMetadata(source(), update_sizes_0());
+  SetMetadata(target(), update_sizes_0());
+  ExpectStoreMetadata(update_sizes_1());
+  ExpectUnmap({"grown_b", "shrunk_b", "same_b", "added_b"});
+
+  EXPECT_TRUE(PreparePartitionsForUpdate({{"grown", 3_GiB},
+                                          {"shrunk", 150_MiB},
+                                          {"same", 100_MiB},
+                                          {"added", 150_MiB}}));
+}
+
+// After first update, test for the second update. In the second update, the
+// "added" partition is deleted and "deleted" partition is re-added.
+TEST_F(DynamicPartitionControlAndroidTest, SimulatedSecondUpdate) {
+  SetSlots({1, 0});
+
+  SetMetadata(source(), update_sizes_1());
+  SetMetadata(target(), update_sizes_0());
+
+  ExpectStoreMetadata(update_sizes_2());
+  ExpectUnmap({"grown_a", "shrunk_a", "same_a", "deleted_a"});
+
+  EXPECT_TRUE(PreparePartitionsForUpdate({{"grown", 4_GiB},
+                                          {"shrunk", 100_MiB},
+                                          {"same", 100_MiB},
+                                          {"deleted", 64_MiB}}));
+}
+
+TEST_F(DynamicPartitionControlAndroidTest, ApplyingToCurrentSlot) {
+  SetSlots({1, 1});
+  EXPECT_FALSE(PreparePartitionsForUpdate({}))
+      << "Should not be able to apply to current slot.";
+}
+
+TEST_P(DynamicPartitionControlAndroidTestP, OptimizeOperationTest) {
+  ASSERT_TRUE(dynamicControl().PreparePartitionsForUpdate(
+      source(),
+      target(),
+      PartitionSizesToManifest({{"foo", 4_MiB}}),
+      false,
+      nullptr));
+  dynamicControl().set_fake_mapped_devices({T("foo")});
+
+  InstallOperation iop;
+  InstallOperation optimized;
+  Extent *se, *de;
+
+  // Not a SOURCE_COPY operation, cannot skip.
+  iop.set_type(InstallOperation::REPLACE);
+  EXPECT_FALSE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+
+  iop.set_type(InstallOperation::SOURCE_COPY);
+
+  // By default GetVirtualAbFeatureFlag is disabled. Cannot skip operation.
+  EXPECT_FALSE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+
+  // Enable GetVirtualAbFeatureFlag in the mock interface.
+  ON_CALL(dynamicControl(), GetVirtualAbFeatureFlag())
+      .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
+
+  // By default target_supports_snapshot_ is set to false. Cannot skip
+  // operation.
+  EXPECT_FALSE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+
+  SetSnapshotEnabled(true);
+
+  // Empty source and destination. Skip.
+  EXPECT_TRUE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+  EXPECT_TRUE(optimized.src_extents().empty());
+  EXPECT_TRUE(optimized.dst_extents().empty());
+
+  se = iop.add_src_extents();
+  se->set_start_block(0);
+  se->set_num_blocks(1);
+
+  // There is something in sources, but destinations are empty. Cannot skip.
+  EXPECT_FALSE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+
+  InstallOperation iop2;
+
+  de = iop2.add_dst_extents();
+  de->set_start_block(0);
+  de->set_num_blocks(1);
+
+  // There is something in destinations, but sources are empty. Cannot skip.
+  EXPECT_FALSE(dynamicControl().OptimizeOperation("foo", iop2, &optimized));
+
+  de = iop.add_dst_extents();
+  de->set_start_block(0);
+  de->set_num_blocks(1);
+
+  // Sources and destinations are identical. Skip.
+  EXPECT_TRUE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+  EXPECT_TRUE(optimized.src_extents().empty());
+  EXPECT_TRUE(optimized.dst_extents().empty());
+
+  se = iop.add_src_extents();
+  se->set_start_block(1);
+  se->set_num_blocks(5);
+
+  // There is something in source, but not in destination. Cannot skip.
+  EXPECT_FALSE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+
+  de = iop.add_dst_extents();
+  de->set_start_block(1);
+  de->set_num_blocks(5);
+
+  // There is source and destination are equal. Skip.
+  EXPECT_TRUE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+  EXPECT_TRUE(optimized.src_extents().empty());
+  EXPECT_TRUE(optimized.dst_extents().empty());
+
+  de = iop.add_dst_extents();
+  de->set_start_block(6);
+  de->set_num_blocks(5);
+
+  // There is something extra in dest. Cannot skip.
+  EXPECT_FALSE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+
+  se = iop.add_src_extents();
+  se->set_start_block(6);
+  se->set_num_blocks(5);
+
+  // Source and dest are identical again. Skip.
+  EXPECT_TRUE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+  EXPECT_TRUE(optimized.src_extents().empty());
+  EXPECT_TRUE(optimized.dst_extents().empty());
+
+  iop.Clear();
+  iop.set_type(InstallOperation::SOURCE_COPY);
+  se = iop.add_src_extents();
+  se->set_start_block(1);
+  se->set_num_blocks(1);
+  se = iop.add_src_extents();
+  se->set_start_block(3);
+  se->set_num_blocks(2);
+  se = iop.add_src_extents();
+  se->set_start_block(7);
+  se->set_num_blocks(2);
+  de = iop.add_dst_extents();
+  de->set_start_block(2);
+  de->set_num_blocks(5);
+
+  // [1, 3, 4, 7, 8] -> [2, 3, 4, 5, 6] should return [1, 7, 8] -> [2, 5, 6]
+  EXPECT_TRUE(dynamicControl().OptimizeOperation("foo", iop, &optimized));
+  ASSERT_EQ(2, optimized.src_extents_size());
+  ASSERT_EQ(2, optimized.dst_extents_size());
+  EXPECT_EQ(1u, optimized.src_extents(0).start_block());
+  EXPECT_EQ(1u, optimized.src_extents(0).num_blocks());
+  EXPECT_EQ(2u, optimized.dst_extents(0).start_block());
+  EXPECT_EQ(1u, optimized.dst_extents(0).num_blocks());
+  EXPECT_EQ(7u, optimized.src_extents(1).start_block());
+  EXPECT_EQ(2u, optimized.src_extents(1).num_blocks());
+  EXPECT_EQ(5u, optimized.dst_extents(1).start_block());
+  EXPECT_EQ(2u, optimized.dst_extents(1).num_blocks());
+
+  // Don't skip for static partitions.
+  EXPECT_FALSE(dynamicControl().OptimizeOperation("bar", iop, &optimized));
+}
+
+TEST_F(DynamicPartitionControlAndroidTest, ResetUpdate) {
+  MockPrefs prefs;
+  ASSERT_TRUE(dynamicControl().ResetUpdate(&prefs));
+}
+
+TEST_F(DynamicPartitionControlAndroidTest, IsAvbNotEnabledInFstab) {
+  // clang-format off
+  std::string fstab_content =
+      "system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other,logical\n"  // NOLINT(whitespace/line_length)
+      "/dev/block/by-name/system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other\n";  // NOLINT(whitespace/line_length)
+  // clang-format on
+  ScopedTempFile fstab;
+  ASSERT_TRUE(test_utils::WriteFileString(fstab.path(), fstab_content));
+  ASSERT_THAT(dynamicControl().RealIsAvbEnabledInFstab(fstab.path()),
+              Optional(false));
+}
+
+TEST_F(DynamicPartitionControlAndroidTest, IsAvbEnabledInFstab) {
+  // clang-format off
+  std::string fstab_content =
+      "system /postinstall ext4 ro,nosuid,nodev,noexec slotselect_other,logical,avb_keys=/foo\n";  // NOLINT(whitespace/line_length)
+  // clang-format on
+  ScopedTempFile fstab;
+  ASSERT_TRUE(test_utils::WriteFileString(fstab.path(), fstab_content));
+  ASSERT_THAT(dynamicControl().RealIsAvbEnabledInFstab(fstab.path()),
+              Optional(true));
+}
+
+TEST_P(DynamicPartitionControlAndroidTestP, AvbNotEnabledOnSystemOther) {
+  ON_CALL(dynamicControl(), GetSystemOtherPath(_, _, _, _, _))
+      .WillByDefault(Invoke([&](auto source_slot,
+                                auto target_slot,
+                                const auto& name,
+                                auto path,
+                                auto should_unmap) {
+        return dynamicControl().RealGetSystemOtherPath(
+            source_slot, target_slot, name, path, should_unmap);
+      }));
+  ON_CALL(dynamicControl(), IsAvbEnabledOnSystemOther())
+      .WillByDefault(Return(false));
+  EXPECT_TRUE(
+      dynamicControl().RealEraseSystemOtherAvbFooter(source(), target()));
+}
+
+TEST_P(DynamicPartitionControlAndroidTestP, NoSystemOtherToErase) {
+  SetMetadata(source(), {{S("system"), 100_MiB}});
+  ON_CALL(dynamicControl(), IsAvbEnabledOnSystemOther())
+      .WillByDefault(Return(true));
+  std::string path;
+  bool should_unmap;
+  ASSERT_TRUE(dynamicControl().RealGetSystemOtherPath(
+      source(), target(), T("system"), &path, &should_unmap));
+  ASSERT_TRUE(path.empty()) << path;
+  ASSERT_FALSE(should_unmap);
+  ON_CALL(dynamicControl(), GetSystemOtherPath(_, _, _, _, _))
+      .WillByDefault(Invoke([&](auto source_slot,
+                                auto target_slot,
+                                const auto& name,
+                                auto path,
+                                auto should_unmap) {
+        return dynamicControl().RealGetSystemOtherPath(
+            source_slot, target_slot, name, path, should_unmap);
+      }));
+  EXPECT_TRUE(
+      dynamicControl().RealEraseSystemOtherAvbFooter(source(), target()));
+}
+
+TEST_P(DynamicPartitionControlAndroidTestP, SkipEraseUpdatedSystemOther) {
+  PartitionSuffixSizes sizes{{S("system"), 100_MiB}, {T("system"), 100_MiB}};
+  SetMetadata(source(), sizes, LP_PARTITION_ATTR_UPDATED);
+  ON_CALL(dynamicControl(), IsAvbEnabledOnSystemOther())
+      .WillByDefault(Return(true));
+  std::string path;
+  bool should_unmap;
+  ASSERT_TRUE(dynamicControl().RealGetSystemOtherPath(
+      source(), target(), T("system"), &path, &should_unmap));
+  ASSERT_TRUE(path.empty()) << path;
+  ASSERT_FALSE(should_unmap);
+  ON_CALL(dynamicControl(), GetSystemOtherPath(_, _, _, _, _))
+      .WillByDefault(Invoke([&](auto source_slot,
+                                auto target_slot,
+                                const auto& name,
+                                auto path,
+                                auto should_unmap) {
+        return dynamicControl().RealGetSystemOtherPath(
+            source_slot, target_slot, name, path, should_unmap);
+      }));
+  EXPECT_TRUE(
+      dynamicControl().RealEraseSystemOtherAvbFooter(source(), target()));
+}
+
+TEST_P(DynamicPartitionControlAndroidTestP, EraseSystemOtherAvbFooter) {
+  constexpr uint64_t file_size = 1_MiB;
+  static_assert(file_size > AVB_FOOTER_SIZE);
+  ScopedTempFile system_other;
+  brillo::Blob original(file_size, 'X');
+  ASSERT_TRUE(test_utils::WriteFileVector(system_other.path(), original));
+  std::string mnt_path;
+  ScopedLoopbackDeviceBinder dev(system_other.path(), true, &mnt_path);
+  ASSERT_TRUE(dev.is_bound());
+
+  brillo::Blob device_content;
+  ASSERT_TRUE(utils::ReadFile(mnt_path, &device_content));
+  ASSERT_EQ(original, device_content);
+
+  PartitionSuffixSizes sizes{{S("system"), 100_MiB}, {T("system"), file_size}};
+  SetMetadata(source(), sizes);
+  ON_CALL(dynamicControl(), IsAvbEnabledOnSystemOther())
+      .WillByDefault(Return(true));
+  EXPECT_CALL(dynamicControl(),
+              GetSystemOtherPath(source(), target(), T("system"), _, _))
+      .WillRepeatedly(
+          Invoke([&](auto, auto, const auto&, auto path, auto should_unmap) {
+            *path = mnt_path;
+            *should_unmap = false;
+            return true;
+          }));
+  ASSERT_TRUE(
+      dynamicControl().RealEraseSystemOtherAvbFooter(source(), target()));
+
+  device_content.clear();
+  ASSERT_TRUE(utils::ReadFile(mnt_path, &device_content));
+  brillo::Blob new_expected(original);
+  // Clear the last AVB_FOOTER_SIZE bytes.
+  new_expected.resize(file_size - AVB_FOOTER_SIZE);
+  new_expected.resize(file_size, '\0');
+  ASSERT_EQ(new_expected, device_content);
+}
+
+class FakeAutoDevice : public android::snapshot::AutoDevice {
+ public:
+  FakeAutoDevice() : AutoDevice("") {}
+};
+
+class SnapshotPartitionTestP : public DynamicPartitionControlAndroidTestP {
+ public:
+  void SetUp() override {
+    DynamicPartitionControlAndroidTestP::SetUp();
+    ON_CALL(dynamicControl(), GetVirtualAbFeatureFlag())
+        .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
+
+    snapshot_ = new NiceMock<MockSnapshotManager>();
+    dynamicControl().snapshot_.reset(snapshot_);  // takes ownership
+    EXPECT_CALL(*snapshot_, BeginUpdate()).WillOnce(Return(true));
+    EXPECT_CALL(*snapshot_, EnsureMetadataMounted())
+        .WillRepeatedly(
+            Invoke([]() { return std::make_unique<FakeAutoDevice>(); }));
+
+    manifest_ =
+        PartitionSizesToManifest({{"system", 3_GiB}, {"vendor", 1_GiB}});
+  }
+  void ExpectCreateUpdateSnapshots(android::snapshot::Return val) {
+    manifest_.mutable_dynamic_partition_metadata()->set_snapshot_enabled(true);
+    EXPECT_CALL(*snapshot_, CreateUpdateSnapshots(_))
+        .WillRepeatedly(Invoke([&, val](const auto& manifest) {
+          // Deep comparison requires full protobuf library. Comparing the
+          // pointers are sufficient.
+          EXPECT_EQ(&manifest_, &manifest);
+          LOG(WARNING) << "CreateUpdateSnapshots returning " << val.string();
+          return val;
+        }));
+  }
+  bool PreparePartitionsForUpdate(uint64_t* required_size) {
+    return dynamicControl().PreparePartitionsForUpdate(
+        source(), target(), manifest_, true /* update */, required_size);
+  }
+  MockSnapshotManager* snapshot_ = nullptr;
+  DeltaArchiveManifest manifest_;
+};
+
+// Test happy path of PreparePartitionsForUpdate on a Virtual A/B device.
+TEST_P(SnapshotPartitionTestP, PreparePartitions) {
+  ExpectCreateUpdateSnapshots(android::snapshot::Return::Ok());
+  uint64_t required_size = 0;
+  EXPECT_TRUE(PreparePartitionsForUpdate(&required_size));
+  EXPECT_EQ(0u, required_size);
+}
+
+// Test that if not enough space, required size returned by SnapshotManager is
+// passed up.
+TEST_P(SnapshotPartitionTestP, PreparePartitionsNoSpace) {
+  ExpectCreateUpdateSnapshots(android::snapshot::Return::NoSpace(1_GiB));
+  uint64_t required_size = 0;
+  EXPECT_FALSE(PreparePartitionsForUpdate(&required_size));
+  EXPECT_EQ(1_GiB, required_size);
+}
+
+// Test that in recovery, use empty space in super partition for a snapshot
+// update first.
+TEST_P(SnapshotPartitionTestP, RecoveryUseSuperEmpty) {
+  ExpectCreateUpdateSnapshots(android::snapshot::Return::Ok());
+  EXPECT_CALL(dynamicControl(), IsRecovery()).WillRepeatedly(Return(true));
+  // Must not call PrepareDynamicPartitionsForUpdate if
+  // PrepareSnapshotPartitionsForUpdate succeeds.
+  EXPECT_CALL(dynamicControl(), PrepareDynamicPartitionsForUpdate(_, _, _, _))
+      .Times(0);
+  uint64_t required_size = 0;
+  EXPECT_TRUE(PreparePartitionsForUpdate(&required_size));
+  EXPECT_EQ(0u, required_size);
+}
+
+// Test that in recovery, if CreateUpdateSnapshots throws an error, try
+// the flashing path for full updates.
+TEST_P(SnapshotPartitionTestP, RecoveryErrorShouldDeleteSource) {
+  // Expectation on PreparePartitionsForUpdate
+  ExpectCreateUpdateSnapshots(android::snapshot::Return::NoSpace(1_GiB));
+  EXPECT_CALL(dynamicControl(), IsRecovery()).WillRepeatedly(Return(true));
+  EXPECT_CALL(*snapshot_, CancelUpdate()).WillOnce(Return(true));
+  EXPECT_CALL(dynamicControl(), PrepareDynamicPartitionsForUpdate(_, _, _, _))
+      .WillRepeatedly(Invoke([&](auto source_slot,
+                                 auto target_slot,
+                                 const auto& manifest,
+                                 auto delete_source) {
+        EXPECT_EQ(source(), source_slot);
+        EXPECT_EQ(target(), target_slot);
+        // Deep comparison requires full protobuf library. Comparing the
+        // pointers are sufficient.
+        EXPECT_EQ(&manifest_, &manifest);
+        EXPECT_TRUE(delete_source);
+        return dynamicControl().RealPrepareDynamicPartitionsForUpdate(
+            source_slot, target_slot, manifest, delete_source);
+      }));
+  // Expectation on PrepareDynamicPartitionsForUpdate
+  SetMetadata(source(), {{S("system"), 2_GiB}, {S("vendor"), 1_GiB}});
+  ExpectUnmap({T("system"), T("vendor")});
+  // Expect that the source partitions aren't present in target super metadata.
+  ExpectStoreMetadata({{T("system"), 3_GiB}, {T("vendor"), 1_GiB}});
+
+  uint64_t required_size = 0;
+  EXPECT_TRUE(PreparePartitionsForUpdate(&required_size));
+  EXPECT_EQ(0u, required_size);
+}
+
+INSTANTIATE_TEST_CASE_P(DynamicPartitionControlAndroidTest,
+                        SnapshotPartitionTestP,
+                        testing::Values(TestParam{0, 1}, TestParam{1, 0}));
+
+}  // namespace chromeos_update_engine
diff --git a/dynamic_partition_control_interface.h b/dynamic_partition_control_interface.h
deleted file mode 100644
index 86a0730..0000000
--- a/dynamic_partition_control_interface.h
+++ /dev/null
@@ -1,98 +0,0 @@
-//
-// Copyright (C) 2018 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
-#define UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
-
-#include <stdint.h>
-
-#include <memory>
-#include <string>
-
-#include <base/files/file_util.h>
-#include <libdm/dm.h>
-#include <liblp/builder.h>
-
-namespace chromeos_update_engine {
-
-class DynamicPartitionControlInterface {
- public:
-  virtual ~DynamicPartitionControlInterface() = default;
-
-  // Return true iff dynamic partitions is enabled on this device.
-  virtual bool IsDynamicPartitionsEnabled() = 0;
-
-  // Return true iff dynamic partitions is retrofitted on this device.
-  virtual bool IsDynamicPartitionsRetrofit() = 0;
-
-  // Map logical partition on device-mapper.
-  // |super_device| is the device path of the physical partition ("super").
-  // |target_partition_name| is the identifier used in metadata; for example,
-  // "vendor_a"
-  // |slot| is the selected slot to mount; for example, 0 for "_a".
-  // Returns true if mapped successfully; if so, |path| is set to the device
-  // path of the mapped logical partition.
-  virtual bool MapPartitionOnDeviceMapper(
-      const std::string& super_device,
-      const std::string& target_partition_name,
-      uint32_t slot,
-      bool force_writable,
-      std::string* path) = 0;
-
-  // Unmap logical partition on device mapper. This is the reverse operation
-  // of MapPartitionOnDeviceMapper.
-  // If |wait| is set, wait until the device is unmapped.
-  // Returns true if unmapped successfully.
-  virtual bool UnmapPartitionOnDeviceMapper(
-      const std::string& target_partition_name, bool wait) = 0;
-
-  // Do necessary cleanups before destroying the object.
-  virtual void Cleanup() = 0;
-
-  // Return true if a static partition exists at device path |path|.
-  virtual bool DeviceExists(const std::string& path) = 0;
-
-  // Returns the current state of the underlying device mapper device
-  // with given name.
-  // One of INVALID, SUSPENDED or ACTIVE.
-  virtual android::dm::DmDeviceState GetState(const std::string& name) = 0;
-
-  // Returns the path to the device mapper device node in '/dev' corresponding
-  // to 'name'. If the device does not exist, false is returned, and the path
-  // parameter is not set.
-  virtual bool GetDmDevicePathByName(const std::string& name,
-                                     std::string* path) = 0;
-
-  // Retrieve metadata from |super_device| at slot |source_slot|.
-  // On retrofit devices, if |target_slot| != kInvalidSlot, the returned
-  // metadata automatically includes block devices at |target_slot|.
-  virtual std::unique_ptr<android::fs_mgr::MetadataBuilder> LoadMetadataBuilder(
-      const std::string& super_device,
-      uint32_t source_slot,
-      uint32_t target_slot) = 0;
-
-  // Write metadata |builder| to |super_device| at slot |target_slot|.
-  virtual bool StoreMetadata(const std::string& super_device,
-                             android::fs_mgr::MetadataBuilder* builder,
-                             uint32_t target_slot) = 0;
-
-  // Return a possible location for devices listed by name.
-  virtual bool GetDeviceDir(std::string* path) = 0;
-};
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
diff --git a/dynamic_partition_test_utils.h b/dynamic_partition_test_utils.h
new file mode 100644
index 0000000..70a176b
--- /dev/null
+++ b/dynamic_partition_test_utils.h
@@ -0,0 +1,286 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_TEST_UTILS_H_
+#define UPDATE_ENGINE_DYNAMIC_PARTITION_TEST_UTILS_H_
+
+#include <stdint.h>
+
+#include <iostream>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <base/strings/string_util.h>
+#include <fs_mgr.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <liblp/builder.h>
+#include <storage_literals/storage_literals.h>
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+
+using android::fs_mgr::MetadataBuilder;
+using testing::_;
+using testing::MakeMatcher;
+using testing::Matcher;
+using testing::MatcherInterface;
+using testing::MatchResultListener;
+using namespace android::storage_literals;  // NOLINT(build/namespaces)
+
+constexpr const uint32_t kMaxNumSlots = 2;
+constexpr const char* kSlotSuffixes[kMaxNumSlots] = {"_a", "_b"};
+constexpr const char* kFakeDevicePath = "/fake/dev/path/";
+constexpr const char* kFakeDmDevicePath = "/fake/dm/dev/path/";
+constexpr const uint32_t kFakeMetadataSize = 65536;
+constexpr const char* kDefaultGroup = "foo";
+constexpr const char* kFakeSuper = "fake_super";
+
+// A map describing the size of each partition.
+// "{name, size}"
+using PartitionSizes = std::map<std::string, uint64_t>;
+
+// "{name_a, size}"
+using PartitionSuffixSizes = std::map<std::string, uint64_t>;
+
+constexpr uint64_t kDefaultGroupSize = 5_GiB;
+// Super device size. 1 MiB for metadata.
+constexpr uint64_t kDefaultSuperSize = kDefaultGroupSize * 2 + 1_MiB;
+
+template <typename U, typename V>
+inline std::ostream& operator<<(std::ostream& os, const std::map<U, V>& param) {
+  os << "{";
+  bool first = true;
+  for (const auto& pair : param) {
+    if (!first)
+      os << ", ";
+    os << pair.first << ":" << pair.second;
+    first = false;
+  }
+  return os << "}";
+}
+
+template <typename V>
+inline void VectorToStream(std::ostream& os, const V& param) {
+  os << "[";
+  bool first = true;
+  for (const auto& e : param) {
+    if (!first)
+      os << ", ";
+    os << e;
+    first = false;
+  }
+  os << "]";
+}
+
+inline std::ostream& operator<<(std::ostream& os, const PartitionUpdate& p) {
+  return os << "{" << p.partition_name() << ", "
+            << p.new_partition_info().size() << "}";
+}
+
+inline std::ostream& operator<<(std::ostream& os,
+                                const DynamicPartitionGroup& g) {
+  os << "{" << g.name() << ", " << g.size() << ", ";
+  VectorToStream(os, g.partition_names());
+  return os << "}";
+}
+
+inline std::ostream& operator<<(std::ostream& os,
+                                const DeltaArchiveManifest& m) {
+  os << "{.groups = ";
+  VectorToStream(os, m.dynamic_partition_metadata().groups());
+  os << ", .partitions = ";
+  VectorToStream(os, m.partitions());
+  return os;
+}
+
+inline std::string GetDevice(const std::string& name) {
+  return kFakeDevicePath + name;
+}
+
+inline std::string GetDmDevice(const std::string& name) {
+  return kFakeDmDevicePath + name;
+}
+
+inline DynamicPartitionGroup* AddGroup(DeltaArchiveManifest* manifest,
+                                       const std::string& group,
+                                       uint64_t group_size) {
+  auto* g = manifest->mutable_dynamic_partition_metadata()->add_groups();
+  g->set_name(group);
+  g->set_size(group_size);
+  return g;
+}
+
+inline void AddPartition(DeltaArchiveManifest* manifest,
+                         DynamicPartitionGroup* group,
+                         const std::string& partition,
+                         uint64_t partition_size) {
+  group->add_partition_names(partition);
+  auto* p = manifest->add_partitions();
+  p->set_partition_name(partition);
+  p->mutable_new_partition_info()->set_size(partition_size);
+}
+
+// To support legacy tests, auto-convert {name_a: size} map to
+// DeltaArchiveManifest.
+inline DeltaArchiveManifest PartitionSuffixSizesToManifest(
+    const PartitionSuffixSizes& partition_sizes) {
+  DeltaArchiveManifest manifest;
+  for (const char* suffix : kSlotSuffixes) {
+    AddGroup(&manifest, std::string(kDefaultGroup) + suffix, kDefaultGroupSize);
+  }
+  for (const auto& pair : partition_sizes) {
+    for (size_t suffix_idx = 0; suffix_idx < kMaxNumSlots; ++suffix_idx) {
+      if (base::EndsWith(pair.first,
+                         kSlotSuffixes[suffix_idx],
+                         base::CompareCase::SENSITIVE)) {
+        AddPartition(
+            &manifest,
+            manifest.mutable_dynamic_partition_metadata()->mutable_groups(
+                suffix_idx),
+            pair.first,
+            pair.second);
+      }
+    }
+  }
+  return manifest;
+}
+
+// To support legacy tests, auto-convert {name: size} map to PartitionMetadata.
+inline DeltaArchiveManifest PartitionSizesToManifest(
+    const PartitionSizes& partition_sizes) {
+  DeltaArchiveManifest manifest;
+  auto* g = AddGroup(&manifest, std::string(kDefaultGroup), kDefaultGroupSize);
+  for (const auto& pair : partition_sizes) {
+    AddPartition(&manifest, g, pair.first, pair.second);
+  }
+  return manifest;
+}
+
+inline std::unique_ptr<MetadataBuilder> NewFakeMetadata(
+    const DeltaArchiveManifest& manifest, uint32_t partition_attr = 0) {
+  auto builder =
+      MetadataBuilder::New(kDefaultSuperSize, kFakeMetadataSize, kMaxNumSlots);
+  for (const auto& group : manifest.dynamic_partition_metadata().groups()) {
+    EXPECT_TRUE(builder->AddGroup(group.name(), group.size()));
+    for (const auto& partition_name : group.partition_names()) {
+      EXPECT_NE(
+          nullptr,
+          builder->AddPartition(partition_name, group.name(), partition_attr));
+    }
+  }
+  for (const auto& partition : manifest.partitions()) {
+    auto p = builder->FindPartition(partition.partition_name());
+    EXPECT_TRUE(p && builder->ResizePartition(
+                         p, partition.new_partition_info().size()));
+  }
+  return builder;
+}
+
+class MetadataMatcher : public MatcherInterface<MetadataBuilder*> {
+ public:
+  explicit MetadataMatcher(const PartitionSuffixSizes& partition_sizes)
+      : manifest_(PartitionSuffixSizesToManifest(partition_sizes)) {}
+  explicit MetadataMatcher(const DeltaArchiveManifest& manifest)
+      : manifest_(manifest) {}
+
+  bool MatchAndExplain(MetadataBuilder* metadata,
+                       MatchResultListener* listener) const override {
+    bool success = true;
+    for (const auto& group : manifest_.dynamic_partition_metadata().groups()) {
+      for (const auto& partition_name : group.partition_names()) {
+        auto p = metadata->FindPartition(partition_name);
+        if (p == nullptr) {
+          if (!success)
+            *listener << "; ";
+          *listener << "No partition " << partition_name;
+          success = false;
+          continue;
+        }
+        const auto& partition_updates = manifest_.partitions();
+        auto it = std::find_if(partition_updates.begin(),
+                               partition_updates.end(),
+                               [&](const auto& p) {
+                                 return p.partition_name() == partition_name;
+                               });
+        if (it == partition_updates.end()) {
+          *listener << "Can't find partition update " << partition_name;
+          success = false;
+          continue;
+        }
+        auto partition_size = it->new_partition_info().size();
+        if (p->size() != partition_size) {
+          if (!success)
+            *listener << "; ";
+          *listener << "Partition " << partition_name << " has size "
+                    << p->size() << ", expected " << partition_size;
+          success = false;
+        }
+        if (p->group_name() != group.name()) {
+          if (!success)
+            *listener << "; ";
+          *listener << "Partition " << partition_name << " has group "
+                    << p->group_name() << ", expected " << group.name();
+          success = false;
+        }
+      }
+    }
+    return success;
+  }
+
+  void DescribeTo(std::ostream* os) const override {
+    *os << "expect: " << manifest_;
+  }
+
+  void DescribeNegationTo(std::ostream* os) const override {
+    *os << "expect not: " << manifest_;
+  }
+
+ private:
+  DeltaArchiveManifest manifest_;
+};
+
+inline Matcher<MetadataBuilder*> MetadataMatches(
+    const PartitionSuffixSizes& partition_sizes) {
+  return MakeMatcher(new MetadataMatcher(partition_sizes));
+}
+
+inline Matcher<MetadataBuilder*> MetadataMatches(
+    const DeltaArchiveManifest& manifest) {
+  return MakeMatcher(new MetadataMatcher(manifest));
+}
+
+MATCHER_P(HasGroup, group, " has group " + group) {
+  auto groups = arg->ListGroups();
+  return std::find(groups.begin(), groups.end(), group) != groups.end();
+}
+
+struct TestParam {
+  uint32_t source;
+  uint32_t target;
+};
+inline std::ostream& operator<<(std::ostream& os, const TestParam& param) {
+  return os << "{source: " << param.source << ", target:" << param.target
+            << "}";
+}
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_DYNAMIC_PARTITION_TEST_UTILS_H_
diff --git a/dynamic_partition_utils.cc b/dynamic_partition_utils.cc
new file mode 100644
index 0000000..f9bd886
--- /dev/null
+++ b/dynamic_partition_utils.cc
@@ -0,0 +1,39 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/dynamic_partition_utils.h"
+
+#include <vector>
+
+#include <base/logging.h>
+#include <base/strings/string_util.h>
+
+using android::fs_mgr::MetadataBuilder;
+
+namespace chromeos_update_engine {
+
+void DeleteGroupsWithSuffix(MetadataBuilder* builder,
+                            const std::string& suffix) {
+  std::vector<std::string> groups = builder->ListGroups();
+  for (const auto& group_name : groups) {
+    if (base::EndsWith(group_name, suffix, base::CompareCase::SENSITIVE)) {
+      LOG(INFO) << "Removing group " << group_name;
+      builder->RemoveGroupAndPartitions(group_name);
+    }
+  }
+}
+
+}  // namespace chromeos_update_engine
diff --git a/dynamic_partition_utils.h b/dynamic_partition_utils.h
new file mode 100644
index 0000000..09fce00
--- /dev/null
+++ b/dynamic_partition_utils.h
@@ -0,0 +1,33 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_
+#define UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_
+
+#include <string>
+
+#include <liblp/builder.h>
+
+namespace chromeos_update_engine {
+
+// Delete all groups (and their partitions) in |builder| that have names
+// ending with |suffix|.
+void DeleteGroupsWithSuffix(android::fs_mgr::MetadataBuilder* builder,
+                            const std::string& suffix);
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_DYNAMIC_PARTITION_UTILS_H_
diff --git a/hardware_android.cc b/hardware_android.cc
index 82f1b9a..ac6cf16 100644
--- a/hardware_android.cc
+++ b/hardware_android.cc
@@ -198,6 +198,13 @@
   return GetIntProperty<int64_t>(kPropBuildDateUTC, 0);
 }
 
+// Returns true if the device runs an userdebug build, and explicitly allows OTA
+// downgrade.
+bool HardwareAndroid::AllowDowngrade() const {
+  return GetBoolProperty("ro.ota.allow_downgrade", false) &&
+         GetBoolProperty("ro.debuggable", false);
+}
+
 bool HardwareAndroid::GetFirstActiveOmahaPingSent() const {
   LOG(WARNING) << "STUB: Assuming first active omaha was never set.";
   return false;
@@ -209,4 +216,11 @@
   return true;
 }
 
+void HardwareAndroid::SetWarmReset(bool warm_reset) {
+  constexpr char warm_reset_prop[] = "ota.warm_reset";
+  if (!android::base::SetProperty(warm_reset_prop, warm_reset ? "1" : "0")) {
+    LOG(WARNING) << "Failed to set prop " << warm_reset_prop;
+  }
+}
+
 }  // namespace chromeos_update_engine
diff --git a/hardware_android.h b/hardware_android.h
index 6edf468..e0368f9 100644
--- a/hardware_android.h
+++ b/hardware_android.h
@@ -54,8 +54,10 @@
   bool GetNonVolatileDirectory(base::FilePath* path) const override;
   bool GetPowerwashSafeDirectory(base::FilePath* path) const override;
   int64_t GetBuildTimestamp() const override;
+  bool AllowDowngrade() const override;
   bool GetFirstActiveOmahaPingSent() const override;
   bool SetFirstActiveOmahaPingSent() override;
+  void SetWarmReset(bool warm_reset) override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HardwareAndroid);
diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc
index de1d7c0..5ff1b29 100644
--- a/hardware_chromeos.cc
+++ b/hardware_chromeos.cc
@@ -382,4 +382,6 @@
   return true;
 }
 
+void HardwareChromeOS::SetWarmReset(bool warm_reset) {}
+
 }  // namespace chromeos_update_engine
diff --git a/hardware_chromeos.h b/hardware_chromeos.h
index 230e864..e14ae9a 100644
--- a/hardware_chromeos.h
+++ b/hardware_chromeos.h
@@ -59,8 +59,10 @@
   bool GetNonVolatileDirectory(base::FilePath* path) const override;
   bool GetPowerwashSafeDirectory(base::FilePath* path) const override;
   int64_t GetBuildTimestamp() const override;
+  bool AllowDowngrade() const override { return false; }
   bool GetFirstActiveOmahaPingSent() const override;
   bool SetFirstActiveOmahaPingSent() override;
+  void SetWarmReset(bool warm_reset) override;
 
  private:
   friend class HardwareChromeOSTest;
diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc
index d317d48..7c53a2d 100644
--- a/libcurl_http_fetcher.cc
+++ b/libcurl_http_fetcher.cc
@@ -78,11 +78,26 @@
 #ifdef __ANDROID__
   qtaguid_untagSocket(item);
 #endif  // __ANDROID__
+
   LibcurlHttpFetcher* fetcher = static_cast<LibcurlHttpFetcher*>(clientp);
   // Stop watching the socket before closing it.
+#ifdef __ANDROID__
+  for (size_t t = 0; t < arraysize(fetcher->fd_task_maps_); ++t) {
+    const auto fd_task_pair = fetcher->fd_task_maps_[t].find(item);
+    if (fd_task_pair != fetcher->fd_task_maps_[t].end()) {
+      if (!MessageLoop::current()->CancelTask(fd_task_pair->second)) {
+        LOG(WARNING) << "Error canceling the watch task "
+                     << fd_task_pair->second << " for "
+                     << (t ? "writing" : "reading") << " the fd " << item;
+      }
+      fetcher->fd_task_maps_[t].erase(item);
+    }
+  }
+#else
   for (size_t t = 0; t < base::size(fetcher->fd_controller_maps_); ++t) {
     fetcher->fd_controller_maps_[t].erase(item);
   }
+#endif  // __ANDROID__
 
   // Documentation for this callback says to return 0 on success or 1 on error.
   if (!IGNORE_EINTR(close(item)))
@@ -676,6 +691,63 @@
 
   // We should iterate through all file descriptors up to libcurl's fd_max or
   // the highest one we're tracking, whichever is larger.
+#ifdef __ANDROID__
+  for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) {
+    if (!fd_task_maps_[t].empty())
+      fd_max = max(fd_max, fd_task_maps_[t].rbegin()->first);
+  }
+
+  // For each fd, if we're not tracking it, track it. If we are tracking it, but
+  // libcurl doesn't care about it anymore, stop tracking it. After this loop,
+  // there should be exactly as many tasks scheduled in fd_task_maps_[0|1] as
+  // there are read/write fds that we're tracking.
+  for (int fd = 0; fd <= fd_max; ++fd) {
+    // Note that fd_exc is unused in the current version of libcurl so is_exc
+    // should always be false.
+    bool is_exc = FD_ISSET(fd, &fd_exc) != 0;
+    bool must_track[2] = {
+        is_exc || (FD_ISSET(fd, &fd_read) != 0),  // track 0 -- read
+        is_exc || (FD_ISSET(fd, &fd_write) != 0)  // track 1 -- write
+    };
+    MessageLoop::WatchMode watch_modes[2] = {
+        MessageLoop::WatchMode::kWatchRead,
+        MessageLoop::WatchMode::kWatchWrite,
+    };
+
+    for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) {
+      auto fd_task_it = fd_task_maps_[t].find(fd);
+      bool tracked = fd_task_it != fd_task_maps_[t].end();
+
+      if (!must_track[t]) {
+        // If we have an outstanding io_channel, remove it.
+        if (tracked) {
+          MessageLoop::current()->CancelTask(fd_task_it->second);
+          fd_task_maps_[t].erase(fd_task_it);
+        }
+        continue;
+      }
+
+      // If we are already tracking this fd, continue -- nothing to do.
+      if (tracked)
+        continue;
+
+      // Track a new fd.
+      fd_task_maps_[t][fd] = MessageLoop::current()->WatchFileDescriptor(
+          FROM_HERE,
+          fd,
+          watch_modes[t],
+          true,  // persistent
+          base::Bind(&LibcurlHttpFetcher::CurlPerformOnce,
+                     base::Unretained(this)));
+
+      static int io_counter = 0;
+      io_counter++;
+      if (io_counter % 50 == 0) {
+        LOG(INFO) << "io_counter = " << io_counter;
+      }
+    }
+  }
+#else
   for (size_t t = 0; t < base::size(fd_controller_maps_); ++t) {
     if (!fd_controller_maps_[t].empty())
       fd_max = max(fd_max, fd_controller_maps_[t].rbegin()->first);
@@ -731,6 +803,7 @@
       }
     }
   }
+#endif  // __ANDROID__
 
   // Set up a timeout callback for libcurl.
   if (timeout_id_ == MessageLoop::kTaskIdNull) {
@@ -775,9 +848,22 @@
   MessageLoop::current()->CancelTask(timeout_id_);
   timeout_id_ = MessageLoop::kTaskIdNull;
 
+#ifdef __ANDROID__
+  for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) {
+    for (const auto& fd_taks_pair : fd_task_maps_[t]) {
+      if (!MessageLoop::current()->CancelTask(fd_taks_pair.second)) {
+        LOG(WARNING) << "Error canceling the watch task " << fd_taks_pair.second
+                     << " for " << (t ? "writing" : "reading") << " the fd "
+                     << fd_taks_pair.first;
+      }
+    }
+    fd_task_maps_[t].clear();
+  }
+#else
   for (size_t t = 0; t < base::size(fd_controller_maps_); ++t) {
     fd_controller_maps_[t].clear();
   }
+#endif  // __ANDROID__
 
   if (curl_http_headers_) {
     curl_slist_free_all(curl_http_headers_);
diff --git a/libcurl_http_fetcher.h b/libcurl_http_fetcher.h
index 97a9a87..4854f40 100644
--- a/libcurl_http_fetcher.h
+++ b/libcurl_http_fetcher.h
@@ -218,7 +218,7 @@
   }
 
   // Cleans up the following if they are non-null:
-  // curl(m) handles, fd_controller_maps_, timeout_id_.
+  // curl(m) handles, fd_controller_maps_(fd_task_maps_), timeout_id_.
   void CleanUp();
 
   // Force terminate the transfer. This will invoke the delegate's (if any)
@@ -255,8 +255,12 @@
   // the message loop. libcurl may open/close descriptors and switch their
   // directions so maintain two separate lists so that watch conditions can be
   // set appropriately.
+#ifdef __ANDROID__
+  std::map<int, brillo::MessageLoop::TaskId> fd_task_maps_[2];
+#else
   std::map<int, std::unique_ptr<base::FileDescriptorWatcher::Controller>>
       fd_controller_maps_[2];
+#endif  // __ANDROID__
 
   // The TaskId of the timer we're waiting on. kTaskIdNull if we are not waiting
   // on it.
diff --git a/libcurl_http_fetcher_unittest.cc b/libcurl_http_fetcher_unittest.cc
index 20f05b9..8064b99 100644
--- a/libcurl_http_fetcher_unittest.cc
+++ b/libcurl_http_fetcher_unittest.cc
@@ -94,6 +94,29 @@
             no_network_max_retries);
 }
 
+#ifdef __ANDROID__
+TEST_F(LibcurlHttpFetcherTest, CouldntResolveHostTest) {
+  int no_network_max_retries = 1;
+  libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries);
+
+  // This test actually sends request to internet but according to
+  // https://tools.ietf.org/html/rfc2606#section-2, .invalid domain names are
+  // reserved and sure to be invalid. Ideally we should mock libcurl or
+  // reorganize LibcurlHttpFetcher so the part that sends request can be mocked
+  // easily.
+  // TODO(xiaochu) Refactor LibcurlHttpFetcher (and its relates) so it's
+  // easier to mock the part that depends on internet connectivity.
+  libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid");
+  while (loop_.PendingTasks()) {
+    loop_.RunOnce(true);
+  }
+
+  // If libcurl fails to resolve the name, we call res_init() to reload
+  // resolv.conf and retry exactly once more. See crbug.com/982813 for details.
+  EXPECT_EQ(libcurl_fetcher_.get_no_network_max_retries(),
+            no_network_max_retries + 1);
+}
+#else
 TEST_F(LibcurlHttpFetcherTest, CouldNotResolveHostTest) {
   int no_network_max_retries = 1;
   libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries);
@@ -163,6 +186,7 @@
   EXPECT_EQ(libcurl_fetcher_.get_no_network_max_retries(),
             no_network_max_retries + 1);
 }
+#endif  // __ANDROID__
 
 TEST_F(LibcurlHttpFetcherTest, HttpFetcherStateMachineRetryFailedTest) {
   state_machine_.UpdateState(true);
diff --git a/logging.cc b/logging.cc
new file mode 100644
index 0000000..6320e36
--- /dev/null
+++ b/logging.cc
@@ -0,0 +1,87 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <string>
+
+#include <base/files/file_util.h>
+#include <base/logging.h>
+#include <base/strings/string_util.h>
+#include <base/strings/stringprintf.h>
+
+#include "update_engine/common/utils.h"
+#include "update_engine/logging.h"
+
+using std::string;
+
+namespace chromeos_update_engine {
+
+namespace {
+
+constexpr char kSystemLogsRoot[] = "/var/log";
+
+void SetupLogSymlink(const string& symlink_path, const string& log_path) {
+  // TODO(petkov): To ensure a smooth transition between non-timestamped and
+  // timestamped logs, move an existing log to start the first timestamped
+  // one. This code can go away once all clients are switched to this version or
+  // we stop caring about the old-style logs.
+  if (utils::FileExists(symlink_path.c_str()) &&
+      !utils::IsSymlink(symlink_path.c_str())) {
+    base::ReplaceFile(
+        base::FilePath(symlink_path), base::FilePath(log_path), nullptr);
+  }
+  base::DeleteFile(base::FilePath(symlink_path), true);
+  if (symlink(log_path.c_str(), symlink_path.c_str()) == -1) {
+    PLOG(ERROR) << "Unable to create symlink " << symlink_path
+                << " pointing at " << log_path;
+  }
+}
+
+string SetupLogFile(const string& kLogsRoot) {
+  const string kLogSymlink = kLogsRoot + "/update_engine.log";
+  const string kLogsDir = kLogsRoot + "/update_engine";
+  const string kLogPath =
+      base::StringPrintf("%s/update_engine.%s",
+                         kLogsDir.c_str(),
+                         utils::GetTimeAsString(::time(nullptr)).c_str());
+  mkdir(kLogsDir.c_str(), 0755);
+  SetupLogSymlink(kLogSymlink, kLogPath);
+  return kLogSymlink;
+}
+
+}  // namespace
+
+void SetupLogging(bool log_to_system, bool log_to_file) {
+  logging::LoggingSettings log_settings;
+  log_settings.lock_log = logging::DONT_LOCK_LOG_FILE;
+  log_settings.logging_dest = static_cast<logging::LoggingDestination>(
+      (log_to_system ? logging::LOG_TO_SYSTEM_DEBUG_LOG : 0) |
+      (log_to_file ? logging::LOG_TO_FILE : 0));
+  log_settings.log_file = nullptr;
+
+  string log_file;
+  if (log_to_file) {
+    log_file = SetupLogFile(kSystemLogsRoot);
+    log_settings.delete_old = logging::APPEND_TO_OLD_LOG_FILE;
+    log_settings.log_file = log_file.c_str();
+  }
+  logging::InitLogging(log_settings);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/logging.h b/logging.h
new file mode 100644
index 0000000..c9e7483
--- /dev/null
+++ b/logging.h
@@ -0,0 +1,23 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+namespace chromeos_update_engine {
+
+// Set up logging. |log_to_system| and |log_to_file| specifies
+// the destination of logs.
+void SetupLogging(bool log_to_system, bool log_to_file);
+
+}  // namespace chromeos_update_engine
diff --git a/logging_android.cc b/logging_android.cc
new file mode 100644
index 0000000..0219075
--- /dev/null
+++ b/logging_android.cc
@@ -0,0 +1,276 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <functional>
+#include <iomanip>
+#include <string>
+#include <string_view>
+#include <vector>
+
+#include <android-base/file.h>
+#include <android-base/strings.h>
+#include <android-base/unique_fd.h>
+#include <base/files/dir_reader_posix.h>
+#include <base/logging.h>
+#include <base/strings/string_util.h>
+#include <base/strings/stringprintf.h>
+#include <log/log.h>
+
+#include "update_engine/common/utils.h"
+
+using std::string;
+
+#ifdef _UE_SIDELOAD
+constexpr bool kSideload = true;
+#else
+constexpr bool kSideload = false;
+#endif
+
+namespace chromeos_update_engine {
+namespace {
+
+constexpr char kSystemLogsRoot[] = "/data/misc/update_engine_log";
+constexpr size_t kLogCount = 5;
+
+// Keep the most recent |kLogCount| logs but remove the old ones in
+// "/data/misc/update_engine_log/".
+void DeleteOldLogs(const string& kLogsRoot) {
+  base::DirReaderPosix reader(kLogsRoot.c_str());
+  if (!reader.IsValid()) {
+    LOG(ERROR) << "Failed to read " << kLogsRoot;
+    return;
+  }
+
+  std::vector<string> old_logs;
+  while (reader.Next()) {
+    if (reader.name()[0] == '.')
+      continue;
+
+    // Log files are in format "update_engine.%Y%m%d-%H%M%S",
+    // e.g. update_engine.20090103-231425
+    uint64_t date;
+    uint64_t local_time;
+    if (sscanf(reader.name(),
+               "update_engine.%" PRIu64 "-%" PRIu64 "",
+               &date,
+               &local_time) == 2) {
+      old_logs.push_back(reader.name());
+    } else {
+      LOG(WARNING) << "Unrecognized log file " << reader.name();
+    }
+  }
+
+  std::sort(old_logs.begin(), old_logs.end(), std::greater<string>());
+  for (size_t i = kLogCount; i < old_logs.size(); i++) {
+    string log_path = kLogsRoot + "/" + old_logs[i];
+    if (unlink(log_path.c_str()) == -1) {
+      PLOG(WARNING) << "Failed to unlink " << log_path;
+    }
+  }
+}
+
+string SetupLogFile(const string& kLogsRoot) {
+  DeleteOldLogs(kLogsRoot);
+
+  return base::StringPrintf("%s/update_engine.%s",
+                            kLogsRoot.c_str(),
+                            utils::GetTimeAsString(::time(nullptr)).c_str());
+}
+
+const char* LogPriorityToCString(int priority) {
+  switch (priority) {
+    case ANDROID_LOG_VERBOSE:
+      return "VERBOSE";
+    case ANDROID_LOG_DEBUG:
+      return "DEBUG";
+    case ANDROID_LOG_INFO:
+      return "INFO";
+    case ANDROID_LOG_WARN:
+      return "WARN";
+    case ANDROID_LOG_ERROR:
+      return "ERROR";
+    case ANDROID_LOG_FATAL:
+      return "FATAL";
+    default:
+      return "UNKNOWN";
+  }
+}
+
+using LoggerFunction = std::function<void(const struct __android_log_message*)>;
+
+class FileLogger {
+ public:
+  explicit FileLogger(const string& path) {
+    fd_.reset(TEMP_FAILURE_RETRY(
+        open(path.c_str(),
+             O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC | O_NOFOLLOW | O_SYNC,
+             0644)));
+    if (fd_ == -1) {
+      // Use ALOGE that logs to logd before __android_log_set_logger.
+      ALOGE("Cannot open persistent log %s: %s", path.c_str(), strerror(errno));
+      return;
+    }
+    // The log file will have AID_LOG as group ID; this GID is inherited from
+    // the parent directory "/data/misc/update_engine_log" which sets the SGID
+    // bit.
+    if (fchmod(fd_.get(), 0640) == -1) {
+      // Use ALOGE that logs to logd before __android_log_set_logger.
+      ALOGE("Cannot chmod 0640 persistent log %s: %s",
+            path.c_str(),
+            strerror(errno));
+      return;
+    }
+  }
+  // Copy-constructor needed to be converted to std::function.
+  FileLogger(const FileLogger& other) { fd_.reset(dup(other.fd_)); }
+  void operator()(const struct __android_log_message* log_message) {
+    if (fd_ == -1) {
+      return;
+    }
+
+    std::string_view message_str =
+        log_message->message != nullptr ? log_message->message : "";
+
+    WriteToFd(GetPrefix(log_message));
+    WriteToFd(message_str);
+    WriteToFd("\n");
+  }
+
+ private:
+  android::base::unique_fd fd_;
+  void WriteToFd(std::string_view message) {
+    ignore_result(
+        android::base::WriteFully(fd_, message.data(), message.size()));
+  }
+
+  string GetPrefix(const struct __android_log_message* log_message) {
+    std::stringstream ss;
+    timeval tv;
+    gettimeofday(&tv, nullptr);
+    time_t t = tv.tv_sec;
+    struct tm local_time;
+    localtime_r(&t, &local_time);
+    struct tm* tm_time = &local_time;
+    ss << "[" << std::setfill('0') << std::setw(2) << 1 + tm_time->tm_mon
+       << std::setw(2) << tm_time->tm_mday << '/' << std::setw(2)
+       << tm_time->tm_hour << std::setw(2) << tm_time->tm_min << std::setw(2)
+       << tm_time->tm_sec << '.' << std::setw(6) << tv.tv_usec << "] ";
+    // libchrome logs prepends |message| with severity, file and line, but
+    // leave logger_data->file as nullptr.
+    // libbase / liblog logs doesn't. Hence, add them to match the style.
+    // For liblog logs that doesn't set logger_data->file, not printing the
+    // priority is acceptable.
+    if (log_message->file) {
+      ss << "[" << LogPriorityToCString(log_message->priority) << ':'
+         << log_message->file << '(' << log_message->line << ")] ";
+    }
+    return ss.str();
+  }
+};
+
+class CombinedLogger {
+ public:
+  CombinedLogger(bool log_to_system, bool log_to_file) {
+    if (log_to_system) {
+      if (kSideload) {
+        // No logd in sideload. Use stdout.
+        // recovery has already redirected stdio properly.
+        loggers_.push_back(__android_log_stderr_logger);
+      } else {
+        loggers_.push_back(__android_log_logd_logger);
+      }
+    }
+    if (log_to_file) {
+      loggers_.push_back(std::move(FileLogger(SetupLogFile(kSystemLogsRoot))));
+    }
+  }
+  void operator()(const struct __android_log_message* log_message) {
+    for (auto&& logger : loggers_) {
+      logger(log_message);
+    }
+  }
+
+ private:
+  std::vector<LoggerFunction> loggers_;
+};
+
+// Redirect all libchrome logs to liblog using our custom handler that does
+// not call __android_log_write and explicitly write to stderr at the same
+// time. The preset CombinedLogger already writes to stderr properly.
+bool RedirectToLiblog(int severity,
+                      const char* file,
+                      int line,
+                      size_t message_start,
+                      const std::string& str_newline) {
+  android_LogPriority priority =
+      (severity < 0) ? ANDROID_LOG_VERBOSE : ANDROID_LOG_UNKNOWN;
+  switch (severity) {
+    case logging::LOG_INFO:
+      priority = ANDROID_LOG_INFO;
+      break;
+    case logging::LOG_WARNING:
+      priority = ANDROID_LOG_WARN;
+      break;
+    case logging::LOG_ERROR:
+      priority = ANDROID_LOG_ERROR;
+      break;
+    case logging::LOG_FATAL:
+      priority = ANDROID_LOG_FATAL;
+      break;
+  }
+  std::string_view sv = str_newline;
+  ignore_result(android::base::ConsumeSuffix(&sv, "\n"));
+  std::string str(sv.data(), sv.size());
+  // This will eventually be redirected to CombinedLogger.
+  // Use nullptr as tag so that liblog infers log tag from getprogname().
+  __android_log_write(priority, nullptr /* tag */, str.c_str());
+  return true;
+}
+
+}  // namespace
+
+void SetupLogging(bool log_to_system, bool log_to_file) {
+  // Note that libchrome logging uses liblog.
+  // By calling liblog's __android_log_set_logger function, all of libchrome
+  // (used by update_engine) / libbase / liblog (used by depended modules)
+  // logging eventually redirects to CombinedLogger.
+  static auto g_logger =
+      std::make_unique<CombinedLogger>(log_to_system, log_to_file);
+  __android_log_set_logger([](const struct __android_log_message* log_message) {
+    (*g_logger)(log_message);
+  });
+
+  // libchrome logging should not log to file.
+  logging::LoggingSettings log_settings;
+  log_settings.lock_log = logging::DONT_LOCK_LOG_FILE;
+  log_settings.logging_dest =
+      static_cast<logging::LoggingDestination>(logging::LOG_NONE);
+  log_settings.log_file = nullptr;
+  logging::InitLogging(log_settings);
+  logging::SetLogItems(false /* enable_process_id */,
+                       false /* enable_thread_id */,
+                       false /* enable_timestamp */,
+                       false /* enable_tickcount */);
+  logging::SetLogMessageHandler(&RedirectToLiblog);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/main.cc b/main.cc
index b435467..ceb5b56 100644
--- a/main.cc
+++ b/main.cc
@@ -14,150 +14,23 @@
 // limitations under the License.
 //
 
-#include <inttypes.h>
 #include <sys/stat.h>
 #include <sys/types.h>
-#include <unistd.h>
 #include <xz.h>
 
-#include <algorithm>
-#include <string>
-#include <vector>
-
 #include <base/at_exit.h>
 #include <base/command_line.h>
-#include <base/files/dir_reader_posix.h>
-#include <base/files/file_util.h>
 #include <base/logging.h>
-#include <base/strings/string_util.h>
-#include <base/strings/stringprintf.h>
 #include <brillo/flag_helper.h>
 
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/terminator.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/daemon_base.h"
+#include "update_engine/logging.h"
 
 using std::string;
 
-namespace chromeos_update_engine {
-namespace {
-
-string GetTimeAsString(time_t utime) {
-  struct tm tm;
-  CHECK_EQ(localtime_r(&utime, &tm), &tm);
-  char str[16];
-  CHECK_EQ(strftime(str, sizeof(str), "%Y%m%d-%H%M%S", &tm), 15u);
-  return str;
-}
-
-#ifdef __ANDROID__
-constexpr char kSystemLogsRoot[] = "/data/misc/update_engine_log";
-constexpr size_t kLogCount = 5;
-
-// Keep the most recent |kLogCount| logs but remove the old ones in
-// "/data/misc/update_engine_log/".
-void DeleteOldLogs(const string& kLogsRoot) {
-  base::DirReaderPosix reader(kLogsRoot.c_str());
-  if (!reader.IsValid()) {
-    LOG(ERROR) << "Failed to read " << kLogsRoot;
-    return;
-  }
-
-  std::vector<string> old_logs;
-  while (reader.Next()) {
-    if (reader.name()[0] == '.')
-      continue;
-
-    // Log files are in format "update_engine.%Y%m%d-%H%M%S",
-    // e.g. update_engine.20090103-231425
-    uint64_t date;
-    uint64_t local_time;
-    if (sscanf(reader.name(),
-               "update_engine.%" PRIu64 "-%" PRIu64 "",
-               &date,
-               &local_time) == 2) {
-      old_logs.push_back(reader.name());
-    } else {
-      LOG(WARNING) << "Unrecognized log file " << reader.name();
-    }
-  }
-
-  std::sort(old_logs.begin(), old_logs.end(), std::greater<string>());
-  for (size_t i = kLogCount; i < old_logs.size(); i++) {
-    string log_path = kLogsRoot + "/" + old_logs[i];
-    if (unlink(log_path.c_str()) == -1) {
-      PLOG(WARNING) << "Failed to unlink " << log_path;
-    }
-  }
-}
-
-string SetupLogFile(const string& kLogsRoot) {
-  DeleteOldLogs(kLogsRoot);
-
-  return base::StringPrintf("%s/update_engine.%s",
-                            kLogsRoot.c_str(),
-                            GetTimeAsString(::time(nullptr)).c_str());
-}
-#else
-constexpr char kSystemLogsRoot[] = "/var/log";
-
-void SetupLogSymlink(const string& symlink_path, const string& log_path) {
-  // TODO(petkov): To ensure a smooth transition between non-timestamped and
-  // timestamped logs, move an existing log to start the first timestamped
-  // one. This code can go away once all clients are switched to this version or
-  // we stop caring about the old-style logs.
-  if (utils::FileExists(symlink_path.c_str()) &&
-      !utils::IsSymlink(symlink_path.c_str())) {
-    base::ReplaceFile(
-        base::FilePath(symlink_path), base::FilePath(log_path), nullptr);
-  }
-  base::DeleteFile(base::FilePath(symlink_path), true);
-  if (symlink(log_path.c_str(), symlink_path.c_str()) == -1) {
-    PLOG(ERROR) << "Unable to create symlink " << symlink_path
-                << " pointing at " << log_path;
-  }
-}
-
-string SetupLogFile(const string& kLogsRoot) {
-  const string kLogSymlink = kLogsRoot + "/update_engine.log";
-  const string kLogsDir = kLogsRoot + "/update_engine";
-  const string kLogPath =
-      base::StringPrintf("%s/update_engine.%s",
-                         kLogsDir.c_str(),
-                         GetTimeAsString(::time(nullptr)).c_str());
-  mkdir(kLogsDir.c_str(), 0755);
-  SetupLogSymlink(kLogSymlink, kLogPath);
-  return kLogSymlink;
-}
-#endif  // __ANDROID__
-
-void SetupLogging(bool log_to_system, bool log_to_file) {
-  logging::LoggingSettings log_settings;
-  log_settings.lock_log = logging::DONT_LOCK_LOG_FILE;
-  log_settings.logging_dest = static_cast<logging::LoggingDestination>(
-      (log_to_system ? logging::LOG_TO_SYSTEM_DEBUG_LOG : 0) |
-      (log_to_file ? logging::LOG_TO_FILE : 0));
-  log_settings.log_file = nullptr;
-
-  string log_file;
-  if (log_to_file) {
-    log_file = SetupLogFile(kSystemLogsRoot);
-    log_settings.delete_old = logging::APPEND_TO_OLD_LOG_FILE;
-    log_settings.log_file = log_file.c_str();
-  }
-  logging::InitLogging(log_settings);
-
-#ifdef __ANDROID__
-  // The log file will have AID_LOG as group ID; this GID is inherited from the
-  // parent directory "/data/misc/update_engine_log" which sets the SGID bit.
-  chmod(log_file.c_str(), 0640);
-#endif
-}
-
-}  // namespace
-}  // namespace chromeos_update_engine
-
 int main(int argc, char** argv) {
   DEFINE_bool(logtofile, false, "Write logs to a file in log_dir.");
   DEFINE_bool(logtostderr,
diff --git a/metrics_reporter_android.cc b/metrics_reporter_android.cc
index 9165f0d..9cef43c 100644
--- a/metrics_reporter_android.cc
+++ b/metrics_reporter_android.cc
@@ -16,17 +16,25 @@
 
 #include "update_engine/metrics_reporter_android.h"
 
+#include <stdint.h>
+
 #include <memory>
 #include <string>
 
-#include <metricslogger/metrics_logger.h>
+#include <android-base/properties.h>
+#include <statslog.h>
 
 #include "update_engine/common/constants.h"
 
 namespace {
-void LogHistogram(const std::string& metrics, int value) {
-  android::metricslogger::LogHistogram(metrics, value);
-  LOG(INFO) << "uploading " << value << " to histogram for metric " << metrics;
+// A number offset adds on top of the enum value. e.g. ErrorCode::SUCCESS will
+// be reported as 10000, and AttemptResult::UPDATE_CANCELED will be reported as
+// 10011. This keeps the ordering of update engine's enum definition when statsd
+// atoms reserve the value 0 for unknown state.
+constexpr auto kMetricsReporterEnumOffset = 10000;
+
+int32_t GetStatsdEnumValue(int32_t value) {
+  return kMetricsReporterEnumOffset + value;
 }
 }  // namespace
 
@@ -34,41 +42,6 @@
 
 namespace metrics {
 
-// The histograms are defined in:
-// depot/google3/analysis/uma/configs/clearcut/TRON/histograms.xml
-constexpr char kMetricsUpdateEngineAttemptNumber[] =
-    "ota_update_engine_attempt_number";
-constexpr char kMetricsUpdateEngineAttemptResult[] =
-    "ota_update_engine_attempt_result";
-constexpr char kMetricsUpdateEngineAttemptDurationInMinutes[] =
-    "ota_update_engine_attempt_fixed_duration_boottime_in_minutes";
-constexpr char kMetricsUpdateEngineAttemptDurationUptimeInMinutes[] =
-    "ota_update_engine_attempt_duration_monotonic_in_minutes";
-constexpr char kMetricsUpdateEngineAttemptErrorCode[] =
-    "ota_update_engine_attempt_error_code";
-constexpr char kMetricsUpdateEngineAttemptPayloadSizeMiB[] =
-    "ota_update_engine_attempt_payload_size_mib";
-constexpr char kMetricsUpdateEngineAttemptPayloadType[] =
-    "ota_update_engine_attempt_payload_type";
-constexpr char kMetricsUpdateEngineAttemptCurrentBytesDownloadedMiB[] =
-    "ota_update_engine_attempt_fixed_current_bytes_downloaded_mib";
-
-constexpr char kMetricsUpdateEngineSuccessfulUpdateAttemptCount[] =
-    "ota_update_engine_successful_update_attempt_count";
-constexpr char kMetricsUpdateEngineSuccessfulUpdateTotalDurationInMinutes[] =
-    "ota_update_engine_successful_update_fixed_total_duration_in_minutes";
-constexpr char kMetricsUpdateEngineSuccessfulUpdatePayloadSizeMiB[] =
-    "ota_update_engine_successful_update_payload_size_mib";
-constexpr char kMetricsUpdateEngineSuccessfulUpdatePayloadType[] =
-    "ota_update_engine_successful_update_payload_type";
-constexpr char kMetricsUpdateEngineSuccessfulUpdateRebootCount[] =
-    "ota_update_engine_successful_update_reboot_count";
-constexpr char kMetricsUpdateEngineSuccessfulUpdateTotalBytesDownloadedMiB[] =
-    "ota_update_engine_successful_update_total_bytes_downloaded_mib";
-constexpr char
-    kMetricsUpdateEngineSuccessfulUpdateDownloadOverheadPercentage[] =
-        "ota_update_engine_successful_update_download_overhead_percentage";
-
 std::unique_ptr<MetricsReporterInterface> CreateMetricsReporter() {
   return std::make_unique<MetricsReporterAndroid>();
 }
@@ -84,22 +57,17 @@
     int64_t payload_size,
     metrics::AttemptResult attempt_result,
     ErrorCode error_code) {
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptNumber, attempt_number);
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptPayloadType,
-               static_cast<int>(payload_type));
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptDurationInMinutes,
-               duration.InMinutes());
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptDurationUptimeInMinutes,
-               duration_uptime.InMinutes());
-
   int64_t payload_size_mib = payload_size / kNumBytesInOneMiB;
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptPayloadSizeMiB,
-               payload_size_mib);
-
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptResult,
-               static_cast<int>(attempt_result));
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptErrorCode,
-               static_cast<int>(error_code));
+  android::util::stats_write(
+      android::util::UPDATE_ENGINE_UPDATE_ATTEMPT_REPORTED,
+      attempt_number,
+      GetStatsdEnumValue(static_cast<int32_t>(payload_type)),
+      duration.InMinutes(),
+      duration_uptime.InMinutes(),
+      payload_size_mib,
+      GetStatsdEnumValue(static_cast<int32_t>(attempt_result)),
+      GetStatsdEnumValue(static_cast<int32_t>(error_code)),
+      android::base::GetProperty("ro.build.fingerprint", "").c_str());
 }
 
 void MetricsReporterAndroid::ReportUpdateAttemptDownloadMetrics(
@@ -108,8 +76,9 @@
     DownloadSource /* download_source */,
     metrics::DownloadErrorCode /* payload_download_error_code */,
     metrics::ConnectionType /* connection_type */) {
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptCurrentBytesDownloadedMiB,
-               payload_bytes_downloaded / kNumBytesInOneMiB);
+  // TODO(xunchang) add statsd reporting
+  LOG(INFO) << "Current update attempt downloads "
+            << payload_bytes_downloaded / kNumBytesInOneMiB << " bytes data";
 }
 
 void MetricsReporterAndroid::ReportSuccessfulUpdateMetrics(
@@ -123,37 +92,28 @@
     base::TimeDelta /* total_duration_uptime */,
     int reboot_count,
     int /* url_switch_count */) {
-  LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdateAttemptCount,
-               attempt_count);
-  LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdatePayloadType,
-               static_cast<int>(payload_type));
-
   int64_t payload_size_mib = payload_size / kNumBytesInOneMiB;
-  LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdatePayloadSizeMiB,
-               payload_size_mib);
-
   int64_t total_bytes_downloaded = 0;
   for (size_t i = 0; i < kNumDownloadSources; i++) {
     total_bytes_downloaded += num_bytes_downloaded[i] / kNumBytesInOneMiB;
   }
-  LogHistogram(
-      metrics::kMetricsUpdateEngineSuccessfulUpdateTotalBytesDownloadedMiB,
-      total_bytes_downloaded);
-  LogHistogram(
-      metrics::kMetricsUpdateEngineSuccessfulUpdateDownloadOverheadPercentage,
-      download_overhead_percentage);
 
-  LogHistogram(
-      metrics::kMetricsUpdateEngineSuccessfulUpdateTotalDurationInMinutes,
-      total_duration.InMinutes());
-  LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdateRebootCount,
-               reboot_count);
+  android::util::stats_write(
+      android::util::UPDATE_ENGINE_SUCCESSFUL_UPDATE_REPORTED,
+      attempt_count,
+      GetStatsdEnumValue(static_cast<int32_t>(payload_type)),
+      payload_size_mib,
+      total_bytes_downloaded,
+      download_overhead_percentage,
+      total_duration.InMinutes(),
+      reboot_count);
 }
 
 void MetricsReporterAndroid::ReportAbnormallyTerminatedUpdateAttemptMetrics() {
   int attempt_result =
       static_cast<int>(metrics::AttemptResult::kAbnormalTermination);
-  LogHistogram(metrics::kMetricsUpdateEngineAttemptResult, attempt_result);
+  // TODO(xunchang) add statsd reporting
+  LOG(INFO) << "Abnormally terminated update attempt result " << attempt_result;
 }
 
 };  // namespace chromeos_update_engine
diff --git a/metrics_utils.cc b/metrics_utils.cc
index efbd067..da3a2c3 100644
--- a/metrics_utils.cc
+++ b/metrics_utils.cc
@@ -72,6 +72,8 @@
     case ErrorCode::kFilesystemCopierError:
     case ErrorCode::kFilesystemVerifierError:
     case ErrorCode::kVerityCalculationError:
+    case ErrorCode::kNotEnoughSpace:
+    case ErrorCode::kDeviceCorrupted:
       return metrics::AttemptResult::kOperationExecutionError;
 
     case ErrorCode::kDownloadMetadataSignatureMismatch:
@@ -236,6 +238,8 @@
     case ErrorCode::kRollbackNotPossible:
     case ErrorCode::kFirstActiveOmahaPingSentPersistenceError:
     case ErrorCode::kVerityCalculationError:
+    case ErrorCode::kNotEnoughSpace:
+    case ErrorCode::kDeviceCorrupted:
       break;
 
     // Special flags. These can't happen (we mask them out above) but
diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h
index 24aca06..1aaebd8 100644
--- a/mock_dynamic_partition_control.h
+++ b/mock_dynamic_partition_control.h
@@ -17,37 +17,98 @@
 #include <stdint.h>
 
 #include <memory>
+#include <set>
 #include <string>
 
 #include <gmock/gmock.h>
 
-#include "update_engine/dynamic_partition_control_interface.h"
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/dynamic_partition_control_interface.h"
+#include "update_engine/dynamic_partition_control_android.h"
 
 namespace chromeos_update_engine {
 
-class MockDynamicPartitionControl : public DynamicPartitionControlInterface {
+class MockDynamicPartitionControlAndroid
+    : public DynamicPartitionControlAndroid {
  public:
-  MOCK_METHOD5(MapPartitionOnDeviceMapper,
-               bool(const std::string&,
-                    const std::string&,
-                    uint32_t,
-                    bool,
-                    std::string*));
-  MOCK_METHOD2(UnmapPartitionOnDeviceMapper, bool(const std::string&, bool));
-  MOCK_METHOD0(Cleanup, void());
-  MOCK_METHOD1(DeviceExists, bool(const std::string&));
-  MOCK_METHOD1(GetState, ::android::dm::DmDeviceState(const std::string&));
-  MOCK_METHOD2(GetDmDevicePathByName, bool(const std::string&, std::string*));
-  MOCK_METHOD3(LoadMetadataBuilder,
-               std::unique_ptr<::android::fs_mgr::MetadataBuilder>(
-                   const std::string&, uint32_t, uint32_t));
-  MOCK_METHOD3(StoreMetadata,
-               bool(const std::string&,
-                    android::fs_mgr::MetadataBuilder*,
-                    uint32_t));
-  MOCK_METHOD1(GetDeviceDir, bool(std::string*));
-  MOCK_METHOD0(IsDynamicPartitionsEnabled, bool());
-  MOCK_METHOD0(IsDynamicPartitionsRetrofit, bool());
+  MOCK_METHOD(
+      bool,
+      MapPartitionOnDeviceMapper,
+      (const std::string&, const std::string&, uint32_t, bool, std::string*),
+      (override));
+  MOCK_METHOD(bool,
+              UnmapPartitionOnDeviceMapper,
+              (const std::string&),
+              (override));
+  MOCK_METHOD(void, Cleanup, (), (override));
+  MOCK_METHOD(bool, DeviceExists, (const std::string&), (override));
+  MOCK_METHOD(::android::dm::DmDeviceState,
+              GetState,
+              (const std::string&),
+              (override));
+  MOCK_METHOD(bool,
+              GetDmDevicePathByName,
+              (const std::string&, std::string*),
+              (override));
+  MOCK_METHOD(std::unique_ptr<::android::fs_mgr::MetadataBuilder>,
+              LoadMetadataBuilder,
+              (const std::string&, uint32_t, uint32_t),
+              (override));
+  MOCK_METHOD(bool,
+              StoreMetadata,
+              (const std::string&, android::fs_mgr::MetadataBuilder*, uint32_t),
+              (override));
+  MOCK_METHOD(bool, GetDeviceDir, (std::string*), (override));
+  MOCK_METHOD(FeatureFlag, GetDynamicPartitionsFeatureFlag, (), (override));
+  MOCK_METHOD(std::string, GetSuperPartitionName, (uint32_t), (override));
+  MOCK_METHOD(FeatureFlag, GetVirtualAbFeatureFlag, (), (override));
+  MOCK_METHOD(bool, FinishUpdate, (bool), (override));
+  MOCK_METHOD(bool,
+              GetSystemOtherPath,
+              (uint32_t, uint32_t, const std::string&, std::string*, bool*),
+              (override));
+  MOCK_METHOD(bool,
+              EraseSystemOtherAvbFooter,
+              (uint32_t, uint32_t),
+              (override));
+  MOCK_METHOD(std::optional<bool>, IsAvbEnabledOnSystemOther, (), (override));
+  MOCK_METHOD(bool, IsRecovery, (), (override));
+  MOCK_METHOD(bool,
+              PrepareDynamicPartitionsForUpdate,
+              (uint32_t, uint32_t, const DeltaArchiveManifest&, bool),
+              (override));
+
+  void set_fake_mapped_devices(const std::set<std::string>& fake) override {
+    DynamicPartitionControlAndroid::set_fake_mapped_devices(fake);
+  }
+
+  bool RealGetSystemOtherPath(uint32_t source_slot,
+                              uint32_t target_slot,
+                              const std::string& partition_name_suffix,
+                              std::string* path,
+                              bool* should_unmap) {
+    return DynamicPartitionControlAndroid::GetSystemOtherPath(
+        source_slot, target_slot, partition_name_suffix, path, should_unmap);
+  }
+
+  bool RealEraseSystemOtherAvbFooter(uint32_t source_slot,
+                                     uint32_t target_slot) {
+    return DynamicPartitionControlAndroid::EraseSystemOtherAvbFooter(
+        source_slot, target_slot);
+  }
+
+  std::optional<bool> RealIsAvbEnabledInFstab(const std::string& path) {
+    return DynamicPartitionControlAndroid::IsAvbEnabledInFstab(path);
+  }
+
+  bool RealPrepareDynamicPartitionsForUpdate(
+      uint32_t source_slot,
+      uint32_t target_slot,
+      const DeltaArchiveManifest& manifest,
+      bool delete_source) {
+    return DynamicPartitionControlAndroid::PrepareDynamicPartitionsForUpdate(
+        source_slot, target_slot, manifest, delete_source);
+  }
 };
 
 }  // namespace chromeos_update_engine
diff --git a/otacerts.zip b/otacerts.zip
new file mode 100644
index 0000000..00a5a51
--- /dev/null
+++ b/otacerts.zip
Binary files differ
diff --git a/payload_consumer/certificate_parser_android.cc b/payload_consumer/certificate_parser_android.cc
new file mode 100644
index 0000000..4a20547
--- /dev/null
+++ b/payload_consumer/certificate_parser_android.cc
@@ -0,0 +1,121 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/certificate_parser_android.h"
+
+#include <memory>
+#include <utility>
+
+#include <base/logging.h>
+#include <openssl/bio.h>
+#include <openssl/pem.h>
+#include <ziparchive/zip_archive.h>
+
+#include "update_engine/payload_consumer/certificate_parser_interface.h"
+
+namespace {
+bool IterateZipEntriesAndSearchForKeys(
+    const ZipArchiveHandle& handle, std::vector<std::vector<uint8_t>>* result) {
+  void* cookie;
+  int32_t iter_status = StartIteration(handle, &cookie, "", "x509.pem");
+  if (iter_status != 0) {
+    LOG(ERROR) << "Failed to iterate over entries in the certificate zipfile: "
+               << ErrorCodeString(iter_status);
+    return false;
+  }
+  std::unique_ptr<void, decltype(&EndIteration)> guard(cookie, EndIteration);
+
+  std::vector<std::vector<uint8_t>> pem_keys;
+  std::string_view name;
+  ZipEntry entry;
+  while ((iter_status = Next(cookie, &entry, &name)) == 0) {
+    std::vector<uint8_t> pem_content(entry.uncompressed_length);
+    if (int32_t extract_status = ExtractToMemory(
+            handle, &entry, pem_content.data(), pem_content.size());
+        extract_status != 0) {
+      LOG(ERROR) << "Failed to extract " << name << ": "
+                 << ErrorCodeString(extract_status);
+      return false;
+    }
+    pem_keys.push_back(pem_content);
+  }
+
+  if (iter_status != -1) {
+    LOG(ERROR) << "Error while iterating over zip entries: "
+               << ErrorCodeString(iter_status);
+    return false;
+  }
+
+  *result = std::move(pem_keys);
+  return true;
+}
+
+}  // namespace
+
+namespace chromeos_update_engine {
+bool CertificateParserAndroid::ReadPublicKeysFromCertificates(
+    const std::string& path,
+    std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>*
+        out_public_keys) {
+  out_public_keys->clear();
+
+  ZipArchiveHandle handle;
+  if (int32_t open_status = OpenArchive(path.c_str(), &handle);
+      open_status != 0) {
+    LOG(ERROR) << "Failed to open " << path << ": "
+               << ErrorCodeString(open_status);
+    return false;
+  }
+
+  std::vector<std::vector<uint8_t>> pem_certs;
+  if (!IterateZipEntriesAndSearchForKeys(handle, &pem_certs)) {
+    CloseArchive(handle);
+    return false;
+  }
+  CloseArchive(handle);
+
+  // Convert the certificates into public keys. Stop and return false if we
+  // encounter an error.
+  std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>> result;
+  for (const auto& cert : pem_certs) {
+    std::unique_ptr<BIO, decltype(&BIO_free)> input(
+        BIO_new_mem_buf(cert.data(), cert.size()), BIO_free);
+
+    std::unique_ptr<X509, decltype(&X509_free)> x509(
+        PEM_read_bio_X509(input.get(), nullptr, nullptr, nullptr), X509_free);
+    if (!x509) {
+      LOG(ERROR) << "Failed to read x509 certificate";
+      return false;
+    }
+
+    std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)> public_key(
+        X509_get_pubkey(x509.get()), EVP_PKEY_free);
+    if (!public_key) {
+      LOG(ERROR) << "Failed to extract the public key from x509 certificate";
+      return false;
+    }
+    result.push_back(std::move(public_key));
+  }
+
+  *out_public_keys = std::move(result);
+  return true;
+}
+
+std::unique_ptr<CertificateParserInterface> CreateCertificateParser() {
+  return std::make_unique<CertificateParserAndroid>();
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/certificate_parser_android.h b/payload_consumer/certificate_parser_android.h
new file mode 100644
index 0000000..ccb9293
--- /dev/null
+++ b/payload_consumer/certificate_parser_android.h
@@ -0,0 +1,46 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CERTIFICATE_PARSER_ANDROID_H_
+#define UPDATE_ENGINE_CERTIFICATE_PARSER_ANDROID_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <base/macros.h>
+
+#include "payload_consumer/certificate_parser_interface.h"
+
+namespace chromeos_update_engine {
+// This class parses the certificates from a zip file. Because the Android
+// build system stores the certs in otacerts.zip.
+class CertificateParserAndroid : public CertificateParserInterface {
+ public:
+  CertificateParserAndroid() = default;
+
+  bool ReadPublicKeysFromCertificates(
+      const std::string& path,
+      std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>*
+          out_public_keys) override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CertificateParserAndroid);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/certificate_parser_android_unittest.cc b/payload_consumer/certificate_parser_android_unittest.cc
new file mode 100644
index 0000000..e300414
--- /dev/null
+++ b/payload_consumer/certificate_parser_android_unittest.cc
@@ -0,0 +1,61 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/certificate_parser_interface.h"
+
+#include <string>
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/payload_verifier.h"
+#include "update_engine/payload_generator/payload_signer.h"
+
+namespace chromeos_update_engine {
+
+extern const char* kUnittestPrivateKeyPath;
+const char* kUnittestOtacertsPath = "otacerts.zip";
+
+TEST(CertificateParserAndroidTest, ParseZipArchive) {
+  std::string ota_cert =
+      test_utils::GetBuildArtifactsPath(kUnittestOtacertsPath);
+  ASSERT_TRUE(utils::FileExists(ota_cert.c_str()));
+
+  std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>> keys;
+  auto parser = CreateCertificateParser();
+  ASSERT_TRUE(parser->ReadPublicKeysFromCertificates(ota_cert, &keys));
+  ASSERT_EQ(1u, keys.size());
+}
+
+TEST(CertificateParserAndroidTest, VerifySignature) {
+  brillo::Blob hash_blob;
+  ASSERT_TRUE(HashCalculator::RawHashOfData({'x'}, &hash_blob));
+  brillo::Blob sig_blob;
+  ASSERT_TRUE(PayloadSigner::SignHash(
+      hash_blob,
+      test_utils::GetBuildArtifactsPath(kUnittestPrivateKeyPath),
+      &sig_blob));
+
+  auto verifier = PayloadVerifier::CreateInstanceFromZipPath(
+      test_utils::GetBuildArtifactsPath(kUnittestOtacertsPath));
+  ASSERT_TRUE(verifier != nullptr);
+  ASSERT_TRUE(verifier->VerifyRawSignature(sig_blob, hash_blob, nullptr));
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/certificate_parser_interface.h b/payload_consumer/certificate_parser_interface.h
new file mode 100644
index 0000000..dad23d2
--- /dev/null
+++ b/payload_consumer/certificate_parser_interface.h
@@ -0,0 +1,44 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CERTIFICATE_PARSER_INTERFACE_H_
+#define UPDATE_ENGINE_CERTIFICATE_PARSER_INTERFACE_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <openssl/pem.h>
+
+namespace chromeos_update_engine {
+
+// This class parses the PEM encoded X509 certificates from |path|; and
+// passes the parsed public keys to the caller.
+class CertificateParserInterface {
+ public:
+  virtual ~CertificateParserInterface() = default;
+
+  virtual bool ReadPublicKeysFromCertificates(
+      const std::string& path,
+      std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>*
+          out_public_keys) = 0;
+};
+
+std::unique_ptr<CertificateParserInterface> CreateCertificateParser();
+
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/certificate_parser_stub.cc b/payload_consumer/certificate_parser_stub.cc
new file mode 100644
index 0000000..95fd6e8
--- /dev/null
+++ b/payload_consumer/certificate_parser_stub.cc
@@ -0,0 +1,31 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <payload_consumer/certificate_parser_stub.h>
+
+namespace chromeos_update_engine {
+bool CertificateParserStub::ReadPublicKeysFromCertificates(
+    const std::string& path,
+    std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>*
+        out_public_keys) {
+  return true;
+}
+
+std::unique_ptr<CertificateParserInterface> CreateCertificateParser() {
+  return std::make_unique<CertificateParserStub>();
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/certificate_parser_stub.h b/payload_consumer/certificate_parser_stub.h
new file mode 100644
index 0000000..f4f8825
--- /dev/null
+++ b/payload_consumer/certificate_parser_stub.h
@@ -0,0 +1,44 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CERTIFICATE_PARSER_STUB_H_
+#define UPDATE_ENGINE_CERTIFICATE_PARSER_STUB_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <base/macros.h>
+
+#include "payload_consumer/certificate_parser_interface.h"
+
+namespace chromeos_update_engine {
+class CertificateParserStub : public CertificateParserInterface {
+ public:
+  CertificateParserStub() = default;
+
+  bool ReadPublicKeysFromCertificates(
+      const std::string& path,
+      std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>*
+          out_public_keys) override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CertificateParserStub);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_CERTIFICATE_PARSER_STUB_H_
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index 11cf006..af1baa4 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -23,6 +23,7 @@
 #include <cstring>
 #include <map>
 #include <memory>
+#include <set>
 #include <string>
 #include <utility>
 #include <vector>
@@ -46,9 +47,11 @@
 #include "update_engine/common/terminator.h"
 #include "update_engine/payload_consumer/bzip_extent_writer.h"
 #include "update_engine/payload_consumer/cached_file_descriptor.h"
+#include "update_engine/payload_consumer/certificate_parser_interface.h"
 #include "update_engine/payload_consumer/download_action.h"
 #include "update_engine/payload_consumer/extent_reader.h"
 #include "update_engine/payload_consumer/extent_writer.h"
+#include "update_engine/payload_consumer/partition_update_generator_interface.h"
 #if USE_FEC
 #include "update_engine/payload_consumer/fec_file_descriptor.h"
 #endif  // USE_FEC
@@ -315,8 +318,14 @@
       install_plan_->partitions.size() - partitions_.size();
   const InstallPlan::Partition& install_part =
       install_plan_->partitions[num_previous_partitions + current_partition_];
-  // Open source fds if we have a delta payload.
-  if (payload_->type == InstallPayloadType::kDelta) {
+  // Open source fds if we have a delta payload, or for partitions in the
+  // partial update.
+  bool source_may_exist = manifest_.partial_update() ||
+                          payload_->type == InstallPayloadType::kDelta;
+  // We shouldn't open the source partition in certain cases, e.g. some dynamic
+  // partitions in delta payload, partitions included in the full payload for
+  // partial updates. Use the source size as the indicator.
+  if (source_may_exist && install_part.source_size > 0) {
     source_path_ = install_part.source_path;
     int err;
     source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, false, &err);
@@ -495,17 +504,19 @@
                  << "Trusting metadata size in payload = " << metadata_size_;
   }
 
-  string public_key;
-  if (!GetPublicKey(&public_key)) {
-    LOG(ERROR) << "Failed to get public key.";
+  auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
+  if (!payload_verifier) {
+    LOG(ERROR) << "Failed to create payload verifier.";
     *error = ErrorCode::kDownloadMetadataSignatureVerificationError;
-    return MetadataParseResult::kError;
+    if (perform_verification) {
+      return MetadataParseResult::kError;
+    }
+  } else {
+    // We have the full metadata in |payload|. Verify its integrity
+    // and authenticity based on the information we have in Omaha response.
+    *error = payload_metadata_.ValidateMetadataSignature(
+        payload, payload_->metadata_signature, *payload_verifier);
   }
-
-  // We have the full metadata in |payload|. Verify its integrity
-  // and authenticity based on the information we have in Omaha response.
-  *error = payload_metadata_.ValidateMetadataSignature(
-      payload, payload_->metadata_signature, public_key);
   if (*error != ErrorCode::kSuccess) {
     if (install_plan_->hash_checks_mandatory) {
       // The autoupdate_CatchBadSignatures test checks for this string
@@ -767,7 +778,44 @@
   for (const PartitionUpdate& partition : manifest_.partitions()) {
     partitions_.push_back(partition);
   }
+
+  // For VAB and partial updates, the partition preparation will copy the
+  // dynamic partitions metadata to the target metadata slot, and rename the
+  // slot suffix of the partitions in the metadata.
+  if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) {
+    uint64_t required_size = 0;
+    if (!PreparePartitionsForUpdate(&required_size)) {
+      if (required_size > 0) {
+        *error = ErrorCode::kNotEnoughSpace;
+      } else {
+        *error = ErrorCode::kInstallDeviceOpenError;
+      }
+      return false;
+    }
+  }
+
+  // Partitions in manifest are no longer needed after preparing partitions.
   manifest_.clear_partitions();
+  // TODO(xunchang) TBD: allow partial update only on devices with dynamic
+  // partition.
+  if (manifest_.partial_update()) {
+    std::set<std::string> touched_partitions;
+    for (const auto& partition_update : partitions_) {
+      touched_partitions.insert(partition_update.partition_name());
+    }
+
+    auto generator = partition_update_generator::Create(boot_control_,
+                                                        manifest_.block_size());
+    std::vector<PartitionUpdate> other_partitions;
+    TEST_AND_RETURN_FALSE(
+        generator->GenerateOperationsForPartitionsNotInPayload(
+            install_plan_->source_slot,
+            install_plan_->target_slot,
+            touched_partitions,
+            &other_partitions));
+    partitions_.insert(
+        partitions_.end(), other_partitions.begin(), other_partitions.end());
+  }
 
   // Fill in the InstallPlan::partitions based on the partitions from the
   // payload.
@@ -842,13 +890,9 @@
     install_plan_->partitions.push_back(install_part);
   }
 
-  if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) {
-    if (!InitPartitionMetadata()) {
-      *error = ErrorCode::kInstallDeviceOpenError;
-      return false;
-    }
-  }
-
+  // TODO(xunchang) only need to load the partitions for those in payload.
+  // Because we have already loaded the other once when generating SOURCE_COPY
+  // operations.
   if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) {
     LOG(ERROR) << "Unable to determine all the partition devices.";
     *error = ErrorCode::kInstallDeviceOpenError;
@@ -858,45 +902,57 @@
   return true;
 }
 
-bool DeltaPerformer::InitPartitionMetadata() {
-  BootControlInterface::PartitionMetadata partition_metadata;
-  if (manifest_.has_dynamic_partition_metadata()) {
-    std::map<string, uint64_t> partition_sizes;
-    for (const auto& partition : install_plan_->partitions) {
-      partition_sizes.emplace(partition.name, partition.target_size);
-    }
-    for (const auto& group : manifest_.dynamic_partition_metadata().groups()) {
-      BootControlInterface::PartitionMetadata::Group e;
-      e.name = group.name();
-      e.size = group.size();
-      for (const auto& partition_name : group.partition_names()) {
-        auto it = partition_sizes.find(partition_name);
-        if (it == partition_sizes.end()) {
-          // TODO(tbao): Support auto-filling partition info for framework-only
-          // OTA.
-          LOG(ERROR) << "dynamic_partition_metadata contains partition "
-                     << partition_name
-                     << " but it is not part of the manifest. "
-                     << "This is not supported.";
-          return false;
-        }
-        e.partitions.push_back({partition_name, it->second});
-      }
-      partition_metadata.groups.push_back(std::move(e));
-    }
+bool DeltaPerformer::PreparePartitionsForUpdate(uint64_t* required_size) {
+  // Call static PreparePartitionsForUpdate with hash from
+  // kPrefsUpdateCheckResponseHash to ensure hash of payload that space is
+  // preallocated for is the same as the hash of payload being applied.
+  string update_check_response_hash;
+  ignore_result(prefs_->GetString(kPrefsUpdateCheckResponseHash,
+                                  &update_check_response_hash));
+  return PreparePartitionsForUpdate(prefs_,
+                                    boot_control_,
+                                    install_plan_->target_slot,
+                                    manifest_,
+                                    update_check_response_hash,
+                                    required_size);
+}
+
+bool DeltaPerformer::PreparePartitionsForUpdate(
+    PrefsInterface* prefs,
+    BootControlInterface* boot_control,
+    BootControlInterface::Slot target_slot,
+    const DeltaArchiveManifest& manifest,
+    const std::string& update_check_response_hash,
+    uint64_t* required_size) {
+  string last_hash;
+  ignore_result(
+      prefs->GetString(kPrefsDynamicPartitionMetadataUpdated, &last_hash));
+
+  bool is_resume = !update_check_response_hash.empty() &&
+                   last_hash == update_check_response_hash;
+
+  if (is_resume) {
+    LOG(INFO) << "Using previously prepared partitions for update. hash = "
+              << last_hash;
+  } else {
+    LOG(INFO) << "Preparing partitions for new update. last hash = "
+              << last_hash << ", new hash = " << update_check_response_hash;
   }
 
-  bool metadata_updated = false;
-  prefs_->GetBoolean(kPrefsDynamicPartitionMetadataUpdated, &metadata_updated);
-  if (!boot_control_->InitPartitionMetadata(
-          install_plan_->target_slot, partition_metadata, !metadata_updated)) {
+  if (!boot_control->GetDynamicPartitionControl()->PreparePartitionsForUpdate(
+          boot_control->GetCurrentSlot(),
+          target_slot,
+          manifest,
+          !is_resume /* should update */,
+          required_size)) {
     LOG(ERROR) << "Unable to initialize partition metadata for slot "
-               << BootControlInterface::SlotName(install_plan_->target_slot);
+               << BootControlInterface::SlotName(target_slot);
     return false;
   }
-  TEST_AND_RETURN_FALSE(
-      prefs_->SetBoolean(kPrefsDynamicPartitionMetadataUpdated, true));
-  LOG(INFO) << "InitPartitionMetadata done.";
+
+  TEST_AND_RETURN_FALSE(prefs->SetString(kPrefsDynamicPartitionMetadataUpdated,
+                                         update_check_response_hash));
+  LOG(INFO) << "PreparePartitionsForUpdate done.";
 
   return true;
 }
@@ -1031,7 +1087,21 @@
   if (operation.has_dst_length())
     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
 
+  TEST_AND_RETURN_FALSE(source_fd_ != nullptr);
+
+  // The device may optimize the SOURCE_COPY operation.
+  // Being this a device-specific optimization let DynamicPartitionController
+  // decide it the operation should be skipped.
+  const PartitionUpdate& partition = partitions_[current_partition_];
+  const auto& partition_control = boot_control_->GetDynamicPartitionControl();
+
+  InstallOperation buf;
+  bool should_optimize = partition_control->OptimizeOperation(
+      partition.partition_name(), operation, &buf);
+  const InstallOperation& optimized = should_optimize ? buf : operation;
+
   if (operation.has_src_sha256_hash()) {
+    bool read_ok;
     brillo::Blob source_hash;
     brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
                                       operation.src_sha256_hash().end());
@@ -1040,12 +1110,26 @@
     // device doesn't match or there was an error reading the source partition.
     // Note that this code will also fall back if writing the target partition
     // fails.
-    bool read_ok = fd_utils::CopyAndHashExtents(source_fd_,
-                                                operation.src_extents(),
-                                                target_fd_,
-                                                operation.dst_extents(),
-                                                block_size_,
-                                                &source_hash);
+    if (should_optimize) {
+      // Hash operation.src_extents(), then copy optimized.src_extents to
+      // optimized.dst_extents.
+      read_ok =
+          fd_utils::ReadAndHashExtents(
+              source_fd_, operation.src_extents(), block_size_, &source_hash) &&
+          fd_utils::CopyAndHashExtents(source_fd_,
+                                       optimized.src_extents(),
+                                       target_fd_,
+                                       optimized.dst_extents(),
+                                       block_size_,
+                                       nullptr /* skip hashing */);
+    } else {
+      read_ok = fd_utils::CopyAndHashExtents(source_fd_,
+                                             operation.src_extents(),
+                                             target_fd_,
+                                             operation.dst_extents(),
+                                             block_size_,
+                                             &source_hash);
+    }
     if (read_ok && expected_source_hash == source_hash)
       return true;
 
@@ -1062,12 +1146,25 @@
                  << base::HexEncode(expected_source_hash.data(),
                                     expected_source_hash.size());
 
-    TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_ecc_fd_,
-                                                       operation.src_extents(),
-                                                       target_fd_,
-                                                       operation.dst_extents(),
-                                                       block_size_,
-                                                       &source_hash));
+    if (should_optimize) {
+      TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
+          source_ecc_fd_, operation.src_extents(), block_size_, &source_hash));
+      TEST_AND_RETURN_FALSE(
+          fd_utils::CopyAndHashExtents(source_ecc_fd_,
+                                       optimized.src_extents(),
+                                       target_fd_,
+                                       optimized.dst_extents(),
+                                       block_size_,
+                                       nullptr /* skip hashing */));
+    } else {
+      TEST_AND_RETURN_FALSE(
+          fd_utils::CopyAndHashExtents(source_ecc_fd_,
+                                       operation.src_extents(),
+                                       target_fd_,
+                                       operation.dst_extents(),
+                                       block_size_,
+                                       &source_hash));
+    }
     TEST_AND_RETURN_FALSE(
         ValidateSourceHash(source_hash, operation, source_ecc_fd_, error));
     // At this point reading from the the error corrected device worked, but
@@ -1079,19 +1176,20 @@
     // corrected device first since we can't verify the block in the raw device
     // at this point, but we fall back to the raw device since the error
     // corrected device can be shorter or not available.
+
     if (OpenCurrentECCPartition() &&
         fd_utils::CopyAndHashExtents(source_ecc_fd_,
-                                     operation.src_extents(),
+                                     optimized.src_extents(),
                                      target_fd_,
-                                     operation.dst_extents(),
+                                     optimized.dst_extents(),
                                      block_size_,
                                      nullptr)) {
       return true;
     }
     TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_,
-                                                       operation.src_extents(),
+                                                       optimized.src_extents(),
                                                        target_fd_,
-                                                       operation.dst_extents(),
+                                                       optimized.dst_extents(),
                                                        block_size_,
                                                        nullptr));
   }
@@ -1100,6 +1198,11 @@
 
 FileDescriptorPtr DeltaPerformer::ChooseSourceFD(
     const InstallOperation& operation, ErrorCode* error) {
+  if (source_fd_ == nullptr) {
+    LOG(ERROR) << "ChooseSourceFD fail: source_fd_ == nullptr";
+    return nullptr;
+  }
+
   if (!operation.has_src_sha256_hash()) {
     // When the operation doesn't include a source hash, we attempt the error
     // corrected device first since we can't verify the block in the raw device
@@ -1396,8 +1499,7 @@
   // blob and the signed sha-256 context.
   LOG_IF(WARNING,
          !prefs_->SetString(kPrefsUpdateStateSignatureBlob,
-                            string(signatures_message_data_.begin(),
-                                   signatures_message_data_.end())))
+                            signatures_message_data_))
       << "Unable to store the signature blob.";
 
   LOG(INFO) << "Extracted signature data of size "
@@ -1421,14 +1523,35 @@
     return brillo::data_encoding::Base64Decode(install_plan_->public_key_rsa,
                                                out_public_key);
   }
-
+  LOG(INFO) << "No public keys found for verification.";
   return true;
 }
 
+std::pair<std::unique_ptr<PayloadVerifier>, bool>
+DeltaPerformer::CreatePayloadVerifier() {
+  if (utils::FileExists(update_certificates_path_.c_str())) {
+    LOG(INFO) << "Verifying using certificates: " << update_certificates_path_;
+    return {
+        PayloadVerifier::CreateInstanceFromZipPath(update_certificates_path_),
+        true};
+  }
+
+  string public_key;
+  if (!GetPublicKey(&public_key)) {
+    LOG(ERROR) << "Failed to read public key";
+    return {nullptr, true};
+  }
+
+  // Skips the verification if the public key is empty.
+  if (public_key.empty()) {
+    return {nullptr, false};
+  }
+  return {PayloadVerifier::CreateInstance(public_key), true};
+}
+
 ErrorCode DeltaPerformer::ValidateManifest() {
   // Perform assorted checks to sanity check the manifest, make sure it
   // matches data from other sources, and that it is a supported version.
-
   bool has_old_fields = std::any_of(manifest_.partitions().begin(),
                                     manifest_.partitions().end(),
                                     [](const PartitionUpdate& partition) {
@@ -1453,8 +1576,8 @@
                << "' payload.";
     return ErrorCode::kPayloadMismatchedType;
   }
-
   // Check that the minor version is compatible.
+  // TODO(xunchang) increment minor version & add check for partial update
   if (actual_payload_type == InstallPayloadType::kFull) {
     if (manifest_.minor_version() != kFullPayloadMinorVersion) {
       LOG(ERROR) << "Manifest contains minor version "
@@ -1488,7 +1611,11 @@
                << hardware_->GetBuildTimestamp()
                << ") is newer than the maximum timestamp in the manifest ("
                << manifest_.max_timestamp() << ")";
-    return ErrorCode::kPayloadTimestampError;
+    if (!hardware_->AllowDowngrade()) {
+      return ErrorCode::kPayloadTimestampError;
+    }
+    LOG(INFO) << "The current OS build allows downgrade, continuing to apply"
+                 " the payload with an older timestamp.";
   }
 
   // TODO(crbug.com/37661) we should be adding more and more manifest checks,
@@ -1571,12 +1698,6 @@
 ErrorCode DeltaPerformer::VerifyPayload(
     const brillo::Blob& update_check_response_hash,
     const uint64_t update_check_response_size) {
-  string public_key;
-  if (!GetPublicKey(&public_key)) {
-    LOG(ERROR) << "Failed to get public key.";
-    return ErrorCode::kDownloadPayloadPubKeyVerificationError;
-  }
-
   // Verifies the download size.
   if (update_check_response_size !=
       metadata_size_ + metadata_signature_size_ + buffer_offset_) {
@@ -1594,21 +1715,22 @@
       ErrorCode::kPayloadHashMismatchError,
       payload_hash_calculator_.raw_hash() == update_check_response_hash);
 
-  // Verifies the signed payload hash.
-  if (public_key.empty()) {
-    LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
-    return ErrorCode::kSuccess;
-  }
   TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError,
                       !signatures_message_data_.empty());
   brillo::Blob hash_data = signed_hash_calculator_.raw_hash();
   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
-                      PayloadVerifier::PadRSA2048SHA256Hash(&hash_data));
-  TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
-                      !hash_data.empty());
+                      hash_data.size() == kSHA256Size);
 
-  if (!PayloadVerifier::VerifySignature(
-          signatures_message_data_, public_key, hash_data)) {
+  auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
+  if (!perform_verification) {
+    LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
+    return ErrorCode::kSuccess;
+  }
+  if (!payload_verifier) {
+    LOG(ERROR) << "Failed to create the payload verifier.";
+    return ErrorCode::kDownloadPayloadPubKeyVerificationError;
+  }
+  if (!payload_verifier->VerifySignature(signatures_message_data_, hash_data)) {
     // The autoupdate_CatchBadSignatures test checks for this string
     // in log-files. Keep in sync.
     LOG(ERROR) << "Public key verification failed, thus update failed.";
@@ -1678,7 +1800,10 @@
   return true;
 }
 
-bool DeltaPerformer::ResetUpdateProgress(PrefsInterface* prefs, bool quick) {
+bool DeltaPerformer::ResetUpdateProgress(
+    PrefsInterface* prefs,
+    bool quick,
+    bool skip_dynamic_partititon_metadata_updated) {
   TEST_AND_RETURN_FALSE(prefs->SetInt64(kPrefsUpdateStateNextOperation,
                                         kUpdateStateOperationInvalid));
   if (!quick) {
@@ -1692,7 +1817,11 @@
     prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
     prefs->Delete(kPrefsPostInstallSucceeded);
     prefs->Delete(kPrefsVerityWritten);
-    prefs->Delete(kPrefsDynamicPartitionMetadataUpdated);
+
+    if (!skip_dynamic_partititon_metadata_updated) {
+      LOG(INFO) << "Resetting recorded hash for prepared partitions.";
+      prefs->Delete(kPrefsDynamicPartitionMetadataUpdated);
+    }
   }
   return true;
 }
@@ -1766,11 +1895,7 @@
         signed_hash_calculator_.SetContext(signed_hash_context));
   }
 
-  string signature_blob;
-  if (prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signature_blob)) {
-    signatures_message_data_.assign(signature_blob.begin(),
-                                    signature_blob.end());
-  }
+  prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signatures_message_data_);
 
   string hash_context;
   TEST_AND_RETURN_FALSE(
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index 7860747..7b30a83 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -20,7 +20,9 @@
 #include <inttypes.h>
 
 #include <limits>
+#include <memory>
 #include <string>
+#include <utility>
 #include <vector>
 
 #include <base/time/time.h>
@@ -34,6 +36,7 @@
 #include "update_engine/payload_consumer/file_writer.h"
 #include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/payload_consumer/payload_metadata.h"
+#include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/update_metadata.pb.h"
 
 namespace chromeos_update_engine {
@@ -140,9 +143,14 @@
 
   // Resets the persistent update progress state to indicate that an update
   // can't be resumed. Performs a quick update-in-progress reset if |quick| is
-  // true, otherwise resets all progress-related update state. Returns true on
-  // success, false otherwise.
-  static bool ResetUpdateProgress(PrefsInterface* prefs, bool quick);
+  // true, otherwise resets all progress-related update state.
+  // If |skip_dynamic_partititon_metadata_updated| is true, do not reset
+  // dynamic-partition-metadata-updated.
+  // Returns true on success, false otherwise.
+  static bool ResetUpdateProgress(
+      PrefsInterface* prefs,
+      bool quick,
+      bool skip_dynamic_partititon_metadata_updated = false);
 
   // Attempts to parse the update metadata starting from the beginning of
   // |payload|. On success, returns kMetadataParseSuccess. Returns
@@ -156,6 +164,11 @@
     public_key_path_ = public_key_path;
   }
 
+  void set_update_certificates_path(
+      const std::string& update_certificates_path) {
+    update_certificates_path_ = update_certificates_path;
+  }
+
   // Return true if header parsing is finished and no errors occurred.
   bool IsHeaderParsed() const;
 
@@ -171,6 +184,24 @@
                                  const FileDescriptorPtr source_fd,
                                  ErrorCode* error);
 
+  // Initialize partitions and allocate required space for an update with the
+  // given |manifest|. |update_check_response_hash| is used to check if the
+  // previous call to this function corresponds to the same payload.
+  // - Same payload: not make any persistent modifications (not write to disk)
+  // - Different payload: make persistent modifications (write to disk)
+  // In both cases, in-memory flags are updated. This function must be called
+  // on the payload at least once (to update in-memory flags) before writing
+  // (applying) the payload.
+  // If error due to insufficient space, |required_size| is set to the required
+  // size on the device to apply the payload.
+  static bool PreparePartitionsForUpdate(
+      PrefsInterface* prefs,
+      BootControlInterface* boot_control,
+      BootControlInterface::Slot target_slot,
+      const DeltaArchiveManifest& manifest,
+      const std::string& update_check_response_hash,
+      uint64_t* required_size);
+
  private:
   friend class DeltaPerformerTest;
   friend class DeltaPerformerIntegrationTest;
@@ -266,9 +297,16 @@
   // |out_public_key|. Returns false on failures.
   bool GetPublicKey(std::string* out_public_key);
 
+  // Creates a PayloadVerifier from the zip file containing certificates. If the
+  // path to the zip file doesn't exist, falls back to use the public key.
+  // Returns a tuple with the created PayloadVerifier and if we should perform
+  // the verification.
+  std::pair<std::unique_ptr<PayloadVerifier>, bool> CreatePayloadVerifier();
+
   // After install_plan_ is filled with partition names and sizes, initialize
   // metadata of partitions and map necessary devices before opening devices.
-  bool InitPartitionMetadata();
+  // Also see comment for the static PreparePartitionsForUpdate().
+  bool PreparePartitionsForUpdate(uint64_t* required_size);
 
   // Update Engine preference store.
   PrefsInterface* prefs_;
@@ -370,12 +408,15 @@
   HashCalculator signed_hash_calculator_;
 
   // Signatures message blob extracted directly from the payload.
-  brillo::Blob signatures_message_data_;
+  std::string signatures_message_data_;
 
   // The public key to be used. Provided as a member so that tests can
   // override with test keys.
   std::string public_key_path_{constants::kUpdatePayloadPublicKeyPath};
 
+  // The path to the zip file with X509 certificates.
+  std::string update_certificates_path_{constants::kUpdateCertificatesPath};
+
   // The number of bytes received so far, used for progress tracking.
   size_t total_bytes_received_{0};
 
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index af6682a..16641c6 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -61,6 +61,8 @@
 extern const char* kUnittestPublicKeyPath;
 extern const char* kUnittestPrivateKey2Path;
 extern const char* kUnittestPublicKey2Path;
+extern const char* kUnittestPrivateKeyECPath;
+extern const char* kUnittestPublicKeyECPath;
 
 static const uint32_t kDefaultKernelSize = 4096;  // Something small for a test
 // clang-format off
@@ -109,6 +111,7 @@
   kSignatureGeneratedPlaceholder,  // Insert placeholder signatures, then real.
   kSignatureGeneratedPlaceholderMismatch,  // Insert a wrong sized placeholder.
   kSignatureGeneratedShell,  // Sign the generated payload through shell cmds.
+  kSignatureGeneratedShellECKey,      // Sign with a EC key through shell cmds.
   kSignatureGeneratedShellBadKey,     // Sign with a bad key through shell cmds.
   kSignatureGeneratedShellRotateCl1,  // Rotate key, test client v1
   kSignatureGeneratedShellRotateCl2,  // Rotate key, test client v2
@@ -166,29 +169,26 @@
   return true;
 }
 
-static size_t GetSignatureSize(const string& private_key_path) {
-  const brillo::Blob data(1, 'x');
-  brillo::Blob hash;
-  EXPECT_TRUE(HashCalculator::RawHashOfData(data, &hash));
-  brillo::Blob signature;
-  EXPECT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature));
-  return signature.size();
-}
-
-static bool InsertSignaturePlaceholder(int signature_size,
+static bool InsertSignaturePlaceholder(size_t signature_size,
                                        const string& payload_path,
                                        uint64_t* out_metadata_size) {
   vector<brillo::Blob> signatures;
   signatures.push_back(brillo::Blob(signature_size, 0));
 
-  return PayloadSigner::AddSignatureToPayload(
-      payload_path, signatures, {}, payload_path, out_metadata_size);
+  return PayloadSigner::AddSignatureToPayload(payload_path,
+                                              {signature_size},
+                                              signatures,
+                                              {},
+                                              payload_path,
+                                              out_metadata_size);
 }
 
 static void SignGeneratedPayload(const string& payload_path,
                                  uint64_t* out_metadata_size) {
   string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath);
-  int signature_size = GetSignatureSize(private_key_path);
+  size_t signature_size;
+  ASSERT_TRUE(PayloadSigner::GetMaximumSignatureSize(private_key_path,
+                                                   &signature_size));
   brillo::Blob metadata_hash, payload_hash;
   ASSERT_TRUE(PayloadSigner::HashPayloadForSigning(
       payload_path, {signature_size}, &payload_hash, &metadata_hash));
@@ -198,6 +198,7 @@
   ASSERT_TRUE(PayloadSigner::SignHash(
       metadata_hash, private_key_path, &metadata_signature));
   ASSERT_TRUE(PayloadSigner::AddSignatureToPayload(payload_path,
+                                                   {signature_size},
                                                    {payload_signature},
                                                    {metadata_signature},
                                                    payload_path,
@@ -206,28 +207,108 @@
       payload_path, GetBuildArtifactsPath(kUnittestPublicKeyPath)));
 }
 
-static void SignHashToFile(const string& hash_file,
-                           const string& signature_file,
-                           const string& private_key_file) {
-  brillo::Blob hash, signature;
-  ASSERT_TRUE(utils::ReadFile(hash_file, &hash));
-  ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_file, &signature));
-  ASSERT_TRUE(test_utils::WriteFileVector(signature_file, signature));
+static void SignGeneratedShellPayloadWithKeys(
+    const string& payload_path,
+    const vector<string>& private_key_paths,
+    const string& public_key_path,
+    bool verification_success) {
+  vector<string> signature_size_strings;
+  for (const auto& key_path : private_key_paths) {
+    size_t signature_size;
+    ASSERT_TRUE(
+        PayloadSigner::GetMaximumSignatureSize(key_path, &signature_size));
+    signature_size_strings.push_back(base::StringPrintf("%zu", signature_size));
+  }
+  string signature_size_string = base::JoinString(signature_size_strings, ":");
+
+  test_utils::ScopedTempFile hash_file("hash.XXXXXX"),
+      metadata_hash_file("hash.XXXXXX");
+  string delta_generator_path = GetBuildArtifactsPath("delta_generator");
+  ASSERT_EQ(0,
+            System(base::StringPrintf(
+                 "%s -in_file=%s -signature_size=%s -out_hash_file=%s "
+                 "-out_metadata_hash_file=%s",
+                delta_generator_path.c_str(),
+                payload_path.c_str(),
+                signature_size_string.c_str(),
+                hash_file.path().c_str(), metadata_hash_file.path().c_str())));
+
+  // Sign the hash with all private keys.
+  vector<test_utils::ScopedTempFile> sig_files, metadata_sig_files;
+  vector<string> sig_file_paths, metadata_sig_file_paths;
+  for (const auto& key_path : private_key_paths) {
+    brillo::Blob hash, signature;
+    ASSERT_TRUE(utils::ReadFile(hash_file.path(), &hash));
+    ASSERT_TRUE(PayloadSigner::SignHash(hash, key_path, &signature));
+
+    test_utils::ScopedTempFile sig_file("signature.XXXXXX");
+    ASSERT_TRUE(test_utils::WriteFileVector(sig_file.path(), signature));
+    sig_file_paths.push_back(sig_file.path());
+    sig_files.push_back(std::move(sig_file));
+
+    brillo::Blob metadata_hash, metadata_signature;
+    ASSERT_TRUE(utils::ReadFile(metadata_hash_file.path(), &metadata_hash));
+    ASSERT_TRUE(PayloadSigner::SignHash(metadata_hash, key_path, &metadata_signature));
+
+    test_utils::ScopedTempFile metadata_sig_file("signature.XXXXXX");
+    ASSERT_TRUE(test_utils::WriteFileVector(metadata_sig_file.path(), metadata_signature));
+
+    metadata_sig_file_paths.push_back(metadata_sig_file.path());
+    metadata_sig_files.push_back(std::move(metadata_sig_file));
+  }
+  string sig_files_string = base::JoinString(sig_file_paths, ":");
+  string metadata_sig_files_string = base::JoinString(metadata_sig_file_paths, ":");
+
+  // Add the signature to the payload.
+  ASSERT_EQ(0,
+            System(base::StringPrintf("%s --signature_size=%s -in_file=%s "
+                                      "-payload_signature_file=%s "
+                                      "-metadata_signature_file=%s "
+                                      "-out_file=%s",
+                                      delta_generator_path.c_str(),
+                                      signature_size_string.c_str(),
+                                      payload_path.c_str(),
+                                      sig_files_string.c_str(),
+                                      metadata_sig_files_string.c_str(),
+                                      payload_path.c_str())));
+
+  int verify_result = System(base::StringPrintf("%s -in_file=%s -public_key=%s",
+                                                delta_generator_path.c_str(),
+                                                payload_path.c_str(),
+                                                public_key_path.c_str()));
+
+  if (verification_success) {
+    ASSERT_EQ(0, verify_result);
+  } else {
+    ASSERT_NE(0, verify_result);
+  }
 }
 
 static void SignGeneratedShellPayload(SignatureTest signature_test,
                                       const string& payload_path) {
-  string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath);
+  vector<SignatureTest> supported_test = {
+      kSignatureGeneratedShell,
+      kSignatureGeneratedShellBadKey,
+      kSignatureGeneratedShellECKey,
+      kSignatureGeneratedShellRotateCl1,
+      kSignatureGeneratedShellRotateCl2,
+  };
+  ASSERT_TRUE(std::find(supported_test.begin(),
+                        supported_test.end(),
+                        signature_test) != supported_test.end());
+
+  string private_key_path;
   if (signature_test == kSignatureGeneratedShellBadKey) {
     ASSERT_TRUE(utils::MakeTempFile("key.XXXXXX", &private_key_path, nullptr));
+  } else if (signature_test == kSignatureGeneratedShellECKey) {
+    private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyECPath);
   } else {
-    ASSERT_TRUE(signature_test == kSignatureGeneratedShell ||
-                signature_test == kSignatureGeneratedShellRotateCl1 ||
-                signature_test == kSignatureGeneratedShellRotateCl2);
+    private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath);
   }
   ScopedPathUnlinker key_unlinker(private_key_path);
   key_unlinker.set_should_remove(signature_test ==
                                  kSignatureGeneratedShellBadKey);
+
   // Generates a new private key that will not match the public key.
   if (signature_test == kSignatureGeneratedShellBadKey) {
     LOG(INFO) << "Generating a mismatched private key.";
@@ -246,78 +327,26 @@
     fclose(fprikey);
     RSA_free(rsa);
   }
-  int signature_size = GetSignatureSize(private_key_path);
-  test_utils::ScopedTempFile payload_hash_file("hash.XXXXXX"),
-      metadata_hash_file("hash.XXXXXX");
-  string signature_size_string;
-  if (signature_test == kSignatureGeneratedShellRotateCl1 ||
-      signature_test == kSignatureGeneratedShellRotateCl2)
-    signature_size_string =
-        base::StringPrintf("%d:%d", signature_size, signature_size);
-  else
-    signature_size_string = base::StringPrintf("%d", signature_size);
-  string delta_generator_path = GetBuildArtifactsPath("delta_generator");
-  ASSERT_EQ(0,
-            System(base::StringPrintf(
-                "%s -in_file=%s -signature_size=%s -out_hash_file=%s "
-                "-out_metadata_hash_file=%s",
-                delta_generator_path.c_str(),
-                payload_path.c_str(),
-                signature_size_string.c_str(),
-                payload_hash_file.path().c_str(),
-                metadata_hash_file.path().c_str())));
 
-  // Sign the payload hash.
-  test_utils::ScopedTempFile payload_signature_file("signature.XXXXXX");
-  SignHashToFile(payload_hash_file.path(),
-                 payload_signature_file.path(),
-                 private_key_path);
-  string payload_sig_files = payload_signature_file.path();
-  // Sign the metadata hash.
-  test_utils::ScopedTempFile metadata_signature_file("signature.XXXXXX");
-  SignHashToFile(metadata_hash_file.path(),
-                 metadata_signature_file.path(),
-                 private_key_path);
-  string metadata_sig_files = metadata_signature_file.path();
-
-  test_utils::ScopedTempFile payload_signature_file2("signature.XXXXXX");
-  test_utils::ScopedTempFile metadata_signature_file2("signature.XXXXXX");
+  vector<string> private_key_paths = {private_key_path};
   if (signature_test == kSignatureGeneratedShellRotateCl1 ||
       signature_test == kSignatureGeneratedShellRotateCl2) {
-    SignHashToFile(payload_hash_file.path(),
-                   payload_signature_file2.path(),
-                   GetBuildArtifactsPath(kUnittestPrivateKey2Path));
-    SignHashToFile(metadata_hash_file.path(),
-                   metadata_signature_file2.path(),
-                   GetBuildArtifactsPath(kUnittestPrivateKey2Path));
-    // Append second sig file to first path
-    payload_sig_files += ":" + payload_signature_file2.path();
-    metadata_sig_files += ":" + metadata_signature_file2.path();
+    private_key_paths.push_back(
+        GetBuildArtifactsPath(kUnittestPrivateKey2Path));
   }
 
-  ASSERT_EQ(
-      0,
-      System(base::StringPrintf("%s -in_file=%s -payload_signature_file=%s "
-                                "-metadata_signature_file=%s -out_file=%s",
-                                delta_generator_path.c_str(),
-                                payload_path.c_str(),
-                                payload_sig_files.c_str(),
-                                metadata_sig_files.c_str(),
-                                payload_path.c_str())));
-  int verify_result = System(base::StringPrintf(
-      "%s -in_file=%s -public_key=%s -public_key_version=%d",
-      delta_generator_path.c_str(),
-      payload_path.c_str(),
-      (signature_test == kSignatureGeneratedShellRotateCl2
-           ? GetBuildArtifactsPath(kUnittestPublicKey2Path)
-           : GetBuildArtifactsPath(kUnittestPublicKeyPath))
-          .c_str(),
-      signature_test == kSignatureGeneratedShellRotateCl2 ? 2 : 1));
-  if (signature_test == kSignatureGeneratedShellBadKey) {
-    ASSERT_NE(0, verify_result);
+  std::string public_key;
+  if (signature_test == kSignatureGeneratedShellRotateCl2) {
+    public_key = GetBuildArtifactsPath(kUnittestPublicKey2Path);
+  } else if (signature_test == kSignatureGeneratedShellECKey) {
+    public_key = GetBuildArtifactsPath(kUnittestPublicKeyECPath);
   } else {
-    ASSERT_EQ(0, verify_result);
+    public_key = GetBuildArtifactsPath(kUnittestPublicKeyPath);
   }
+
+  bool verification_success = signature_test != kSignatureGeneratedShellBadKey;
+  SignGeneratedShellPayloadWithKeys(
+      payload_path, private_key_paths, public_key, verification_success);
 }
 
 static void GenerateDeltaFile(bool full_kernel,
@@ -549,8 +578,9 @@
 
   if (signature_test == kSignatureGeneratedPlaceholder ||
       signature_test == kSignatureGeneratedPlaceholderMismatch) {
-    int signature_size =
-        GetSignatureSize(GetBuildArtifactsPath(kUnittestPrivateKeyPath));
+    size_t signature_size;
+    ASSERT_TRUE(PayloadSigner::GetMaximumSignatureSize(
+        GetBuildArtifactsPath(kUnittestPrivateKeyPath), &signature_size));
     LOG(INFO) << "Inserting placeholder signature.";
     ASSERT_TRUE(InsertSignaturePlaceholder(
         signature_size, state->delta_path, &state->metadata_size));
@@ -573,6 +603,7 @@
     LOG(INFO) << "Signing payload.";
     SignGeneratedPayload(state->delta_path, &state->metadata_size);
   } else if (signature_test == kSignatureGeneratedShell ||
+             signature_test == kSignatureGeneratedShellECKey ||
              signature_test == kSignatureGeneratedShellBadKey ||
              signature_test == kSignatureGeneratedShellRotateCl1 ||
              signature_test == kSignatureGeneratedShellRotateCl2) {
@@ -617,15 +648,16 @@
         EXPECT_EQ(2, sigs_message.signatures_size());
       else
         EXPECT_EQ(1, sigs_message.signatures_size());
-      const Signatures_Signature& signature = sigs_message.signatures(0);
-      EXPECT_EQ(1U, signature.version());
+      const Signatures::Signature& signature = sigs_message.signatures(0);
 
-      uint64_t expected_sig_data_length = 0;
       vector<string> key_paths{GetBuildArtifactsPath(kUnittestPrivateKeyPath)};
-      if (signature_test == kSignatureGeneratedShellRotateCl1 ||
-          signature_test == kSignatureGeneratedShellRotateCl2) {
+      if (signature_test == kSignatureGeneratedShellECKey) {
+        key_paths = {GetBuildArtifactsPath(kUnittestPrivateKeyECPath)};
+      } else if (signature_test == kSignatureGeneratedShellRotateCl1 ||
+                 signature_test == kSignatureGeneratedShellRotateCl2) {
         key_paths.push_back(GetBuildArtifactsPath(kUnittestPrivateKey2Path));
       }
+      uint64_t expected_sig_data_length = 0;
       EXPECT_TRUE(PayloadSigner::SignatureBlobLength(
           key_paths, &expected_sig_data_length));
       EXPECT_EQ(expected_sig_data_length, manifest.signatures_size());
@@ -701,7 +733,7 @@
       .WillRepeatedly(Return(true));
   EXPECT_CALL(prefs, SetString(kPrefsUpdateStateSignedSHA256Context, _))
       .WillRepeatedly(Return(true));
-  EXPECT_CALL(prefs, SetBoolean(kPrefsDynamicPartitionMetadataUpdated, _))
+  EXPECT_CALL(prefs, SetString(kPrefsDynamicPartitionMetadataUpdated, _))
       .WillRepeatedly(Return(true));
   if (op_hash_test == kValidOperationData && signature_test != kSignatureNone) {
     EXPECT_CALL(prefs, SetString(kPrefsUpdateStateSignatureBlob, _))
@@ -733,7 +765,9 @@
   ASSERT_TRUE(PayloadSigner::GetMetadataSignature(
       state->delta.data(),
       state->metadata_size,
-      GetBuildArtifactsPath(kUnittestPrivateKeyPath),
+      (signature_test == kSignatureGeneratedShellECKey)
+          ? GetBuildArtifactsPath(kUnittestPrivateKeyECPath)
+          : GetBuildArtifactsPath(kUnittestPrivateKeyPath),
       &install_plan->payloads[0].metadata_signature));
   EXPECT_FALSE(install_plan->payloads[0].metadata_signature.empty());
 
@@ -744,9 +778,12 @@
                                   install_plan,
                                   &install_plan->payloads[0],
                                   false /* interactive */);
-  string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath);
+  string public_key_path = signature_test == kSignatureGeneratedShellECKey
+                               ? GetBuildArtifactsPath(kUnittestPublicKeyECPath)
+                               : GetBuildArtifactsPath(kUnittestPublicKeyPath);
   EXPECT_TRUE(utils::FileExists(public_key_path.c_str()));
   (*performer)->set_public_key_path(public_key_path);
+  (*performer)->set_update_certificates_path("");
 
   EXPECT_EQ(static_cast<off_t>(state->image_size),
             HashCalculator::RawHashOfFile(
@@ -1017,6 +1054,16 @@
 }
 
 TEST(DeltaPerformerIntegrationTest,
+     RunAsRootSmallImageSignGeneratedShellECKeyTest) {
+  DoSmallImageTest(false,
+                   false,
+                   -1,
+                   kSignatureGeneratedShellECKey,
+                   false,
+                   kSourceMinorPayloadVersion);
+}
+
+TEST(DeltaPerformerIntegrationTest,
      RunAsRootSmallImageSignGeneratedShellBadKeyTest) {
   DoSmallImageTest(false,
                    false,
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index 3901195..44107cd 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -161,6 +161,11 @@
     install_plan_.target_slot = 1;
     EXPECT_CALL(mock_delegate_, ShouldCancel(_))
         .WillRepeatedly(testing::Return(false));
+    performer_.set_update_certificates_path("");
+    // Set the public key corresponding to the unittest private key.
+    string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath);
+    EXPECT_TRUE(utils::FileExists(public_key_path.c_str()));
+    performer_.set_public_key_path(public_key_path);
   }
 
   // Test helper placed where it can easily be friended from DeltaPerformer.
@@ -179,19 +184,22 @@
 
   brillo::Blob GeneratePayload(const brillo::Blob& blob_data,
                                const vector<AnnotatedOperation>& aops,
-                               bool sign_payload) {
+                               bool sign_payload,
+                               PartitionConfig* old_part = nullptr) {
     return GeneratePayload(blob_data,
                            aops,
                            sign_payload,
                            kMaxSupportedMajorPayloadVersion,
-                           kMaxSupportedMinorPayloadVersion);
+                           kMaxSupportedMinorPayloadVersion,
+                           old_part);
   }
 
   brillo::Blob GeneratePayload(const brillo::Blob& blob_data,
                                const vector<AnnotatedOperation>& aops,
                                bool sign_payload,
                                uint64_t major_version,
-                               uint32_t minor_version) {
+                               uint32_t minor_version,
+                               PartitionConfig* old_part = nullptr) {
     test_utils::ScopedTempFile blob_file("Blob-XXXXXX");
     EXPECT_TRUE(test_utils::WriteFileVector(blob_file.path(), blob_data));
 
@@ -202,24 +210,29 @@
     PayloadFile payload;
     EXPECT_TRUE(payload.Init(config));
 
-    PartitionConfig old_part(kPartitionNameRoot);
+    std::unique_ptr<PartitionConfig> old_part_uptr;
+    if (!old_part) {
+      old_part_uptr = std::make_unique<PartitionConfig>(kPartitionNameRoot);
+      old_part = old_part_uptr.get();
+    }
     if (minor_version != kFullPayloadMinorVersion) {
       // When generating a delta payload we need to include the old partition
       // information to mark it as a delta payload.
-      old_part.path = "/dev/null";
-      old_part.size = 0;
+      if (old_part->path.empty()) {
+        old_part->path = "/dev/null";
+      }
     }
     PartitionConfig new_part(kPartitionNameRoot);
     new_part.path = "/dev/zero";
     new_part.size = 1234;
 
-    payload.AddPartition(old_part, new_part, aops);
+    payload.AddPartition(*old_part, new_part, aops);
 
     // We include a kernel partition without operations.
-    old_part.name = kPartitionNameKernel;
+    old_part->name = kPartitionNameKernel;
     new_part.name = kPartitionNameKernel;
     new_part.size = 0;
-    payload.AddPartition(old_part, new_part, {});
+    payload.AddPartition(*old_part, new_part, {});
 
     test_utils::ScopedTempFile payload_file("Payload-XXXXXX");
     string private_key =
@@ -235,7 +248,8 @@
   }
 
   brillo::Blob GenerateSourceCopyPayload(const brillo::Blob& copied_data,
-                                         bool add_hash) {
+                                         bool add_hash,
+                                         PartitionConfig* old_part = nullptr) {
     PayloadGenerationConfig config;
     const uint64_t kDefaultBlockSize = config.block_size;
     EXPECT_EQ(0U, copied_data.size() % kDefaultBlockSize);
@@ -249,7 +263,7 @@
     if (add_hash)
       aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
 
-    return GeneratePayload(brillo::Blob(), {aop}, false);
+    return GeneratePayload(brillo::Blob(), {aop}, false, old_part);
   }
 
   // Apply |payload_data| on partition specified in |source_path|.
@@ -316,7 +330,7 @@
     // When filling in size in manifest, exclude the size of the 24-byte header.
     uint64_t size_in_manifest = htobe64(actual_metadata_size - 24);
     performer_.Write(&size_in_manifest, 8, &error_code);
-    uint32_t signature_size = htobe64(10);
+    auto signature_size = htobe64(10);
     bool result = performer_.Write(&signature_size, 4, &error_code);
     if (expected_metadata_size == actual_metadata_size ||
         !hash_checks_mandatory) {
@@ -389,12 +403,6 @@
       expected_error = ErrorCode::kSuccess;
     }
 
-    // Use the public key corresponding to the private key used above to
-    // sign the metadata.
-    string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath);
-    EXPECT_TRUE(utils::FileExists(public_key_path.c_str()));
-    performer_.set_public_key_path(public_key_path);
-
     // Init actual_error with an invalid value so that we make sure
     // ParsePayloadMetadata properly populates it in all cases.
     actual_error = ErrorCode::kUmaReportedMax;
@@ -581,11 +589,16 @@
   EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash));
   aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
 
-  brillo::Blob payload_data = GeneratePayload(brillo::Blob(), {aop}, false);
-
   test_utils::ScopedTempFile source("Source-XXXXXX");
   EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data));
 
+  PartitionConfig old_part(kPartitionNameRoot);
+  old_part.path = source.path();
+  old_part.size = expected_data.size();
+
+  brillo::Blob payload_data =
+      GeneratePayload(brillo::Blob(), {aop}, false, &old_part);
+
   EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
 }
 
@@ -604,11 +617,16 @@
   EXPECT_TRUE(HashCalculator::RawHashOfData(src, &src_hash));
   aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
 
-  brillo::Blob payload_data = GeneratePayload(puffdiff_payload, {aop}, false);
-
   test_utils::ScopedTempFile source("Source-XXXXXX");
   EXPECT_TRUE(test_utils::WriteFileVector(source.path(), src));
 
+  PartitionConfig old_part(kPartitionNameRoot);
+  old_part.path = source.path();
+  old_part.size = src.size();
+
+  brillo::Blob payload_data =
+      GeneratePayload(puffdiff_payload, {aop}, false, &old_part);
+
   brillo::Blob dst(std::begin(dst_deflates), std::end(dst_deflates));
   EXPECT_EQ(dst, ApplyPayload(payload_data, source.path(), true));
 }
@@ -627,11 +645,16 @@
   EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash));
   aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
 
-  brillo::Blob payload_data = GeneratePayload(brillo::Blob(), {aop}, false);
-
   test_utils::ScopedTempFile source("Source-XXXXXX");
   EXPECT_TRUE(test_utils::WriteFileVector(source.path(), actual_data));
 
+  PartitionConfig old_part(kPartitionNameRoot);
+  old_part.path = source.path();
+  old_part.size = actual_data.size();
+
+  brillo::Blob payload_data =
+      GeneratePayload(brillo::Blob(), {aop}, false, &old_part);
+
   EXPECT_EQ(actual_data, ApplyPayload(payload_data, source.path(), false));
 }
 
@@ -650,7 +673,12 @@
   FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize);
   brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize);
 
-  brillo::Blob payload_data = GenerateSourceCopyPayload(expected_data, true);
+  PartitionConfig old_part(kPartitionNameRoot);
+  old_part.path = source.path();
+  old_part.size = invalid_data.size();
+
+  brillo::Blob payload_data =
+      GenerateSourceCopyPayload(expected_data, true, &old_part);
   EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
   // Verify that the fake_fec was actually used.
   EXPECT_EQ(1U, fake_fec->GetReadOps().size());
@@ -671,8 +699,13 @@
   // the expected.
   FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize / 2);
 
+  PartitionConfig old_part(kPartitionNameRoot);
+  old_part.path = source.path();
+  old_part.size = expected_data.size();
+
   // The payload operation doesn't include an operation hash.
-  brillo::Blob payload_data = GenerateSourceCopyPayload(expected_data, false);
+  brillo::Blob payload_data =
+      GenerateSourceCopyPayload(expected_data, false, &old_part);
   EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
   // Verify that the fake_fec was attempted to be used. Since the file
   // descriptor is shorter it can actually do more than one read to realize it
@@ -968,7 +1001,6 @@
   brillo::Blob payload_data = GeneratePayload(
       {}, {}, true, kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion);
   install_plan_.hash_checks_mandatory = true;
-  performer_.set_public_key_path(GetBuildArtifactsPath(kUnittestPublicKeyPath));
   payload_.size = payload_data.size();
   ErrorCode error;
   EXPECT_EQ(MetadataParseResult::kSuccess,
diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc
index 4638fbe..a313627 100644
--- a/payload_consumer/install_plan.cc
+++ b/payload_consumer/install_plan.cc
@@ -87,11 +87,18 @@
         base::StringPrintf(", system_version: %s", system_version.c_str());
   }
 
+  string url_str = download_url;
+  if (base::StartsWith(
+          url_str, "fd://", base::CompareCase::INSENSITIVE_ASCII)) {
+    int fd = std::stoi(url_str.substr(strlen("fd://")));
+    url_str = utils::GetFilePath(fd);
+  }
+
   LOG(INFO) << "InstallPlan: " << (is_resume ? "resume" : "new_update")
             << version_str
             << ", source_slot: " << BootControlInterface::SlotName(source_slot)
             << ", target_slot: " << BootControlInterface::SlotName(target_slot)
-            << ", initial url: " << download_url << payloads_str
+            << ", initial url: " << url_str << payloads_str
             << partitions_str << ", hash_checks_mandatory: "
             << utils::ToString(hash_checks_mandatory)
             << ", powerwash_required: " << utils::ToString(powerwash_required)
@@ -105,7 +112,8 @@
 bool InstallPlan::LoadPartitionsFromSlots(BootControlInterface* boot_control) {
   bool result = true;
   for (Partition& partition : partitions) {
-    if (source_slot != BootControlInterface::kInvalidSlot) {
+    if (source_slot != BootControlInterface::kInvalidSlot &&
+        partition.source_size > 0) {
       result = boot_control->GetPartitionDevice(
                    partition.name, source_slot, &partition.source_path) &&
                result;
diff --git a/payload_consumer/partition_update_generator_android.cc b/payload_consumer/partition_update_generator_android.cc
new file mode 100644
index 0000000..aa3f2e5
--- /dev/null
+++ b/payload_consumer/partition_update_generator_android.cc
@@ -0,0 +1,257 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/partition_update_generator_android.h"
+
+#include <filesystem>
+#include <memory>
+#include <set>
+#include <string_view>
+#include <utility>
+
+#include <android-base/strings.h>
+#include <base/logging.h>
+
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/utils.h"
+
+namespace {
+// TODO(xunchang) use definition in fs_mgr, e.g. fs_mgr_get_slot_suffix
+const char* SUFFIX_A = "_a";
+const char* SUFFIX_B = "_b";
+}  // namespace
+
+namespace chromeos_update_engine {
+
+PartitionUpdateGeneratorAndroid::PartitionUpdateGeneratorAndroid(
+    BootControlInterface* boot_control,
+    std::string device_dir,
+    size_t block_size)
+    : boot_control_(boot_control),
+      block_device_dir_(std::move(device_dir)),
+      block_size_(block_size) {}
+
+bool PartitionUpdateGeneratorAndroid::
+    GenerateOperationsForPartitionsNotInPayload(
+        BootControlInterface::Slot source_slot,
+        BootControlInterface::Slot target_slot,
+        const std::set<std::string>& partitions_in_payload,
+        std::vector<PartitionUpdate>* update_list) {
+  auto ret = GetStaticAbPartitionsOnDevice();
+  if (!ret.has_value()) {
+    LOG(ERROR) << "Failed to load static a/b partitions";
+    return false;
+  }
+  auto ab_partitions = ret.value();
+
+  // Add the dynamic partitions.
+  auto dynamic_control = boot_control_->GetDynamicPartitionControl();
+  std::vector<std::string> dynamic_partitions;
+  if (!dynamic_control->ListDynamicPartitionsForSlot(source_slot,
+                                                     &dynamic_partitions)) {
+    LOG(ERROR) << "Failed to load dynamic partitions from slot " << source_slot;
+    return false;
+  }
+  ab_partitions.insert(dynamic_partitions.begin(), dynamic_partitions.end());
+
+  std::vector<PartitionUpdate> partition_updates;
+  for (const auto& partition_name : ab_partitions) {
+    if (partitions_in_payload.find(partition_name) !=
+        partitions_in_payload.end()) {
+      LOG(INFO) << partition_name << " has included in payload";
+      continue;
+    }
+
+    auto partition_update =
+        CreatePartitionUpdate(partition_name, source_slot, target_slot);
+    if (!partition_update.has_value()) {
+      LOG(ERROR) << "Failed to create partition update for " << partition_name;
+      return false;
+    }
+    partition_updates.push_back(partition_update.value());
+  }
+  *update_list = std::move(partition_updates);
+  return true;
+}
+
+std::optional<std::set<std::string>>
+PartitionUpdateGeneratorAndroid::GetStaticAbPartitionsOnDevice() {
+  if (std::error_code error_code;
+      !std::filesystem::exists(block_device_dir_, error_code) || error_code) {
+    LOG(ERROR) << "Failed to find " << block_device_dir_ << " "
+               << error_code.message();
+    return std::nullopt;
+  }
+
+  std::error_code error_code;
+  auto it = std::filesystem::directory_iterator(block_device_dir_, error_code);
+  if (error_code) {
+    LOG(ERROR) << "Failed to iterate " << block_device_dir_ << " "
+               << error_code.message();
+    return std::nullopt;
+  }
+
+  std::set<std::string> partitions_with_suffix;
+  for (const auto& entry : it) {
+    auto partition_name = entry.path().filename().string();
+    if (android::base::EndsWith(partition_name, SUFFIX_A) ||
+        android::base::EndsWith(partition_name, SUFFIX_B)) {
+      partitions_with_suffix.insert(partition_name);
+    }
+  }
+
+  // Second iteration to add the partition name without suffixes.
+  std::set<std::string> ab_partitions;
+  for (std::string_view name : partitions_with_suffix) {
+    if (!android::base::ConsumeSuffix(&name, SUFFIX_A)) {
+      continue;
+    }
+
+    // Add to the output list if the partition exist for both slot a and b.
+    auto base_name = std::string(name);
+    if (partitions_with_suffix.find(base_name + SUFFIX_B) !=
+        partitions_with_suffix.end()) {
+      ab_partitions.insert(base_name);
+    } else {
+      LOG(WARNING) << "Failed to find the b partition for " << base_name;
+    }
+  }
+
+  return ab_partitions;
+}
+
+std::optional<PartitionUpdate>
+PartitionUpdateGeneratorAndroid::CreatePartitionUpdate(
+    const std::string& partition_name,
+    BootControlInterface::Slot source_slot,
+    BootControlInterface::Slot target_slot) {
+  bool is_source_dynamic = false;
+  std::string source_device;
+  if (!boot_control_->GetPartitionDevice(partition_name,
+                                         source_slot,
+                                         true, /* not_in_payload */
+                                         &source_device,
+                                         &is_source_dynamic)) {
+    LOG(ERROR) << "Failed to load source " << partition_name;
+    return std::nullopt;
+  }
+  bool is_target_dynamic = false;
+  std::string target_device;
+  if (!boot_control_->GetPartitionDevice(partition_name,
+                                         target_slot,
+                                         true,
+                                         &target_device,
+                                         &is_target_dynamic)) {
+    LOG(ERROR) << "Failed to load target " << partition_name;
+    return std::nullopt;
+  }
+
+  if (is_source_dynamic != is_target_dynamic) {
+    LOG(ERROR) << "Source slot " << source_slot << " for partition "
+               << partition_name << " is " << (is_source_dynamic ? "" : "not")
+               << " dynamic, but target slot " << target_slot << " is "
+               << (is_target_dynamic ? "" : "not") << " dynamic.";
+    return std::nullopt;
+  }
+  auto source_size = utils::FileSize(source_device);
+  auto target_size = utils::FileSize(target_device);
+  if (source_size == -1 || target_size == -1 || source_size != target_size ||
+      source_size % block_size_ != 0) {
+    LOG(ERROR) << "Invalid partition size. source size " << source_size
+               << ", target size " << target_size;
+    return std::nullopt;
+  }
+
+  return CreatePartitionUpdate(partition_name,
+                               source_device,
+                               target_device,
+                               source_size,
+                               is_source_dynamic);
+}
+
+std::optional<PartitionUpdate>
+PartitionUpdateGeneratorAndroid::CreatePartitionUpdate(
+    const std::string& partition_name,
+    const std::string& source_device,
+    const std::string& target_device,
+    int64_t partition_size,
+    bool is_dynamic) {
+  PartitionUpdate partition_update;
+  partition_update.set_partition_name(partition_name);
+  auto old_partition_info = partition_update.mutable_old_partition_info();
+  old_partition_info->set_size(partition_size);
+
+  auto raw_hash = CalculateHashForPartition(source_device, partition_size);
+  if (!raw_hash.has_value()) {
+    return {};
+  }
+  old_partition_info->set_hash(raw_hash->data(), raw_hash->size());
+  auto new_partition_info = partition_update.mutable_new_partition_info();
+  new_partition_info->set_size(partition_size);
+  new_partition_info->set_hash(raw_hash->data(), raw_hash->size());
+  // TODO(xunchang) TBD, should we skip hashing and verification of the
+  // dynamic partitions not in payload?
+  if (!is_dynamic) {
+    auto copy_operation = partition_update.add_operations();
+    copy_operation->set_type(InstallOperation::SOURCE_COPY);
+    Extent copy_extent;
+    copy_extent.set_start_block(0);
+    copy_extent.set_num_blocks(partition_size / block_size_);
+
+    *copy_operation->add_src_extents() = copy_extent;
+    *copy_operation->add_dst_extents() = copy_extent;
+  }
+
+  return partition_update;
+}
+
+std::optional<brillo::Blob>
+PartitionUpdateGeneratorAndroid::CalculateHashForPartition(
+    const std::string& block_device, int64_t partition_size) {
+  // TODO(xunchang) compute the hash with ecc partitions first, the hashing
+  // behavior should match the one in SOURCE_COPY. Also, we don't have the
+  // correct hash for source partition.
+  // An alternative way is to verify the written bytes match the read bytes
+  // during filesystem verification. This could probably save us a read of
+  // partitions here.
+  brillo::Blob raw_hash;
+  if (HashCalculator::RawHashOfFile(block_device, partition_size, &raw_hash) !=
+      partition_size) {
+    LOG(ERROR) << "Failed to calculate hash for " << block_device;
+    return std::nullopt;
+  }
+
+  return raw_hash;
+}
+
+namespace partition_update_generator {
+std::unique_ptr<PartitionUpdateGeneratorInterface> Create(
+    BootControlInterface* boot_control, size_t block_size) {
+  CHECK(boot_control);
+  auto dynamic_control = boot_control->GetDynamicPartitionControl();
+  CHECK(dynamic_control);
+  std::string dir_path;
+  if (!dynamic_control->GetDeviceDir(&dir_path)) {
+    return nullptr;
+  }
+
+  return std::unique_ptr<PartitionUpdateGeneratorInterface>(
+      new PartitionUpdateGeneratorAndroid(
+          boot_control, std::move(dir_path), block_size));
+}
+}  // namespace partition_update_generator
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/partition_update_generator_android.h b/payload_consumer/partition_update_generator_android.h
new file mode 100644
index 0000000..8f33077
--- /dev/null
+++ b/payload_consumer/partition_update_generator_android.h
@@ -0,0 +1,78 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_ANDROID_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_ANDROID_H_
+
+#include <optional>
+#include <set>
+#include <string>
+#include <vector>
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest_prod.h>  // for FRIEND_TEST
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/payload_consumer/partition_update_generator_interface.h"
+
+namespace chromeos_update_engine {
+class PartitionUpdateGeneratorAndroid
+    : public PartitionUpdateGeneratorInterface {
+ public:
+  PartitionUpdateGeneratorAndroid(BootControlInterface* boot_control,
+                                  std::string device_dir,
+                                  size_t block_size);
+
+  bool GenerateOperationsForPartitionsNotInPayload(
+      BootControlInterface::Slot source_slot,
+      BootControlInterface::Slot target_slot,
+      const std::set<std::string>& partitions_in_payload,
+      std::vector<PartitionUpdate>* update_list) override;
+
+ private:
+  friend class PartitionUpdateGeneratorAndroidTest;
+  FRIEND_TEST(PartitionUpdateGeneratorAndroidTest, GetStaticPartitions);
+  FRIEND_TEST(PartitionUpdateGeneratorAndroidTest, CreatePartitionUpdate);
+
+  // Gets the name of the static a/b partitions on the device.
+  std::optional<std::set<std::string>> GetStaticAbPartitionsOnDevice();
+
+  // Creates a PartitionUpdate object for a given partition to update from
+  // source to target. Returns std::nullopt on failure.
+  std::optional<PartitionUpdate> CreatePartitionUpdate(
+      const std::string& partition_name,
+      const std::string& source_device,
+      const std::string& target_device,
+      int64_t partition_size,
+      bool is_dynamic);
+
+  std::optional<PartitionUpdate> CreatePartitionUpdate(
+      const std::string& partition_name,
+      BootControlInterface::Slot source_slot,
+      BootControlInterface::Slot target_slot);
+
+  std::optional<brillo::Blob> CalculateHashForPartition(
+      const std::string& block_device, int64_t partition_size);
+
+  BootControlInterface* boot_control_;
+  // Path to look for a/b partitions
+  std::string block_device_dir_;
+  size_t block_size_;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/partition_update_generator_android_unittest.cc b/payload_consumer/partition_update_generator_android_unittest.cc
new file mode 100644
index 0000000..c3be9db
--- /dev/null
+++ b/payload_consumer/partition_update_generator_android_unittest.cc
@@ -0,0 +1,162 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/partition_update_generator_android.h"
+
+#include <map>
+#include <memory>
+#include <set>
+#include <vector>
+
+#include <android-base/strings.h>
+#include <brillo/secure_blob.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/fake_boot_control.h"
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+class PartitionUpdateGeneratorAndroidTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    ASSERT_TRUE(device_dir_.CreateUniqueTempDir());
+    boot_control_ = std::make_unique<FakeBootControl>();
+    boot_control_->SetNumSlots(2);
+    auto generator =
+        partition_update_generator::Create(boot_control_.get(), 4096);
+    generator_.reset(
+        static_cast<PartitionUpdateGeneratorAndroid*>(generator.release()));
+    ASSERT_TRUE(boot_control_);
+    ASSERT_TRUE(generator_);
+    generator_->block_device_dir_ = device_dir_.GetPath().value();
+  }
+
+  std::unique_ptr<PartitionUpdateGeneratorAndroid> generator_;
+  std::unique_ptr<FakeBootControl> boot_control_;
+
+  base::ScopedTempDir device_dir_;
+
+  void SetUpBlockDevice(const std::map<std::string, std::string>& contents) {
+    for (const auto& [name, content] : contents) {
+      auto path = generator_->block_device_dir_ + "/" + name;
+      ASSERT_TRUE(
+          utils::WriteFile(path.c_str(), content.data(), content.size()));
+
+      if (android::base::EndsWith(name, "_a")) {
+        boot_control_->SetPartitionDevice(
+            name.substr(0, name.size() - 2), 0, path);
+      } else if (android::base::EndsWith(name, "_b")) {
+        boot_control_->SetPartitionDevice(
+            name.substr(0, name.size() - 2), 1, path);
+      }
+    }
+  }
+
+  void CheckPartitionUpdate(const std::string& name,
+                            const std::string& content,
+                            const PartitionUpdate& partition_update) {
+    ASSERT_EQ(name, partition_update.partition_name());
+
+    brillo::Blob out_hash;
+    ASSERT_TRUE(HashCalculator::RawHashOfBytes(
+        content.data(), content.size(), &out_hash));
+    ASSERT_EQ(std::string(out_hash.begin(), out_hash.end()),
+              partition_update.old_partition_info().hash());
+    ASSERT_EQ(std::string(out_hash.begin(), out_hash.end()),
+              partition_update.new_partition_info().hash());
+
+    ASSERT_EQ(1, partition_update.operations_size());
+    const auto& operation = partition_update.operations(0);
+    ASSERT_EQ(InstallOperation::SOURCE_COPY, operation.type());
+
+    ASSERT_EQ(1, operation.src_extents_size());
+    ASSERT_EQ(0u, operation.src_extents(0).start_block());
+    ASSERT_EQ(content.size() / 4096, operation.src_extents(0).num_blocks());
+
+    ASSERT_EQ(1, operation.dst_extents_size());
+    ASSERT_EQ(0u, operation.dst_extents(0).start_block());
+    ASSERT_EQ(content.size() / 4096, operation.dst_extents(0).num_blocks());
+  }
+};
+
+TEST_F(PartitionUpdateGeneratorAndroidTest, GetStaticPartitions) {
+  std::map<std::string, std::string> contents = {
+      {"system_a", ""},
+      {"system_b", ""},
+      {"vendor_a", ""},
+      {"vendor_b", ""},
+      {"persist", ""},
+      {"vbmeta_a", ""},
+      {"vbmeta_b", ""},
+      {"boot_a", ""},
+      {"boot_b", ""},
+  };
+
+  SetUpBlockDevice(contents);
+  auto partitions = generator_->GetStaticAbPartitionsOnDevice();
+  ASSERT_EQ(std::set<std::string>({"system", "vendor", "vbmeta", "boot"}),
+            partitions);
+}
+
+TEST_F(PartitionUpdateGeneratorAndroidTest, CreatePartitionUpdate) {
+  auto system_contents = std::string(4096 * 2, '1');
+  auto boot_contents = std::string(4096 * 5, 'b');
+  std::map<std::string, std::string> contents = {
+      {"system_a", system_contents},
+      {"system_b", std::string(4096 * 2, 0)},
+      {"boot_a", boot_contents},
+      {"boot_b", std::string(4096 * 5, 0)},
+  };
+  SetUpBlockDevice(contents);
+
+  auto system_partition_update =
+      generator_->CreatePartitionUpdate("system", 0, 1);
+  ASSERT_TRUE(system_partition_update.has_value());
+  CheckPartitionUpdate(
+      "system", system_contents, system_partition_update.value());
+
+  auto boot_partition_update = generator_->CreatePartitionUpdate("boot", 0, 1);
+  ASSERT_TRUE(boot_partition_update.has_value());
+  CheckPartitionUpdate("boot", boot_contents, boot_partition_update.value());
+}
+
+TEST_F(PartitionUpdateGeneratorAndroidTest, GenerateOperations) {
+  auto system_contents = std::string(4096 * 10, '2');
+  auto boot_contents = std::string(4096 * 5, 'b');
+  std::map<std::string, std::string> contents = {
+      {"system_a", system_contents},
+      {"system_b", std::string(4096 * 10, 0)},
+      {"boot_a", boot_contents},
+      {"boot_b", std::string(4096 * 5, 0)},
+      {"vendor_a", ""},
+      {"vendor_b", ""},
+      {"persist", ""},
+  };
+  SetUpBlockDevice(contents);
+
+  std::vector<PartitionUpdate> update_list;
+  ASSERT_TRUE(generator_->GenerateOperationsForPartitionsNotInPayload(
+      0, 1, std::set<std::string>{"vendor"}, &update_list));
+
+  ASSERT_EQ(2u, update_list.size());
+  CheckPartitionUpdate("boot", boot_contents, update_list[0]);
+  CheckPartitionUpdate("system", system_contents, update_list[1]);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/partition_update_generator_interface.h b/payload_consumer/partition_update_generator_interface.h
new file mode 100644
index 0000000..3fa3dfb
--- /dev/null
+++ b/payload_consumer/partition_update_generator_interface.h
@@ -0,0 +1,55 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_INTERFACE_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_INTERFACE_H_
+
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "update_engine/common/boot_control_interface.h"
+
+namespace chromeos_update_engine {
+class PartitionUpdate;
+
+// This class parses the partitions that are not included in the payload of a
+// partial A/B update. And it generates additional operations for these
+// partitions to make the update complete.
+class PartitionUpdateGeneratorInterface {
+ public:
+  virtual ~PartitionUpdateGeneratorInterface() = default;
+
+  // Adds PartitionUpdate for partitions not included in the payload. For static
+  // partitions, it generates SOURCE_COPY operations to copy the bytes from the
+  // source slot to target slot. For dynamic partitions, it only calculates the
+  // partition hash for the filesystem verification later.
+  virtual bool GenerateOperationsForPartitionsNotInPayload(
+      BootControlInterface::Slot source_slot,
+      BootControlInterface::Slot target_slot,
+      const std::set<std::string>& partitions_in_payload,
+      std::vector<PartitionUpdate>* update_list) = 0;
+};
+
+namespace partition_update_generator {
+std::unique_ptr<PartitionUpdateGeneratorInterface> Create(
+    BootControlInterface* boot_control, size_t block_size);
+}
+
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/partition_update_generator_stub.cc b/payload_consumer/partition_update_generator_stub.cc
new file mode 100644
index 0000000..e2b64ec
--- /dev/null
+++ b/payload_consumer/partition_update_generator_stub.cc
@@ -0,0 +1,38 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/partition_update_generator_stub.h"
+
+#include <memory>
+
+namespace chromeos_update_engine {
+
+bool PartitionUpdateGeneratorStub::GenerateOperationsForPartitionsNotInPayload(
+    chromeos_update_engine::BootControlInterface::Slot source_slot,
+    chromeos_update_engine::BootControlInterface::Slot target_slot,
+    const std::set<std::string>& partitions_in_payload,
+    std::vector<PartitionUpdate>* update_list) {
+  return true;
+}
+
+namespace partition_update_generator {
+std::unique_ptr<PartitionUpdateGeneratorInterface> Create(
+    BootControlInterface* boot_control) {
+  return std::make_unique<PartitionUpdateGeneratorStub>();
+}
+}  // namespace partition_update_generator
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/partition_update_generator_stub.h b/payload_consumer/partition_update_generator_stub.h
new file mode 100644
index 0000000..282875e
--- /dev/null
+++ b/payload_consumer/partition_update_generator_stub.h
@@ -0,0 +1,40 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_STUB_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_STUB_H_
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/payload_consumer/partition_update_generator_interface.h"
+
+namespace chromeos_update_engine {
+class PartitionUpdateGeneratorStub : public PartitionUpdateGeneratorInterface {
+ public:
+  PartitionUpdateGeneratorStub() = default;
+  bool GenerateOperationsForPartitionsNotInPayload(
+      BootControlInterface::Slot source_slot,
+      BootControlInterface::Slot target_slot,
+      const std::set<std::string>& partitions_in_payload,
+      std::vector<PartitionUpdate>* update_list) override;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc
index 908a893..1c987bd 100644
--- a/payload_consumer/payload_constants.cc
+++ b/payload_consumer/payload_constants.cc
@@ -44,7 +44,7 @@
 
 const char kDeltaMagic[4] = {'C', 'r', 'A', 'U'};
 
-const char* InstallOperationTypeName(InstallOperation_Type op_type) {
+const char* InstallOperationTypeName(InstallOperation::Type op_type) {
   switch (op_type) {
     case InstallOperation::REPLACE:
       return "REPLACE";
diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h
index 888fa2a..5c2d17c 100644
--- a/payload_consumer/payload_constants.h
+++ b/payload_consumer/payload_constants.h
@@ -77,7 +77,7 @@
 const uint64_t kSparseHole = std::numeric_limits<uint64_t>::max();
 
 // Return the name of the operation type.
-const char* InstallOperationTypeName(InstallOperation_Type op_type);
+const char* InstallOperationTypeName(InstallOperation::Type op_type);
 
 }  // namespace chromeos_update_engine
 
diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc
index b83001a..01f3b62 100644
--- a/payload_consumer/payload_metadata.cc
+++ b/payload_consumer/payload_metadata.cc
@@ -20,6 +20,7 @@
 
 #include <brillo/data_encoding.h>
 
+#include "update_engine/common/constants.h"
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/payload_constants.h"
@@ -131,12 +132,16 @@
 
 ErrorCode PayloadMetadata::ValidateMetadataSignature(
     const brillo::Blob& payload,
-    const std::string& metadata_signature,
-    const std::string& pem_public_key) const {
+    const string& metadata_signature,
+    const PayloadVerifier& payload_verifier) const {
   if (payload.size() < metadata_size_ + metadata_signature_size_)
     return ErrorCode::kDownloadMetadataSignatureError;
 
-  brillo::Blob metadata_signature_blob, metadata_signature_protobuf_blob;
+  // A single signature in raw bytes.
+  brillo::Blob metadata_signature_blob;
+  // The serialized Signatures protobuf message stored in major version >=2
+  // payload, it may contain multiple signatures.
+  string metadata_signature_protobuf;
   if (!metadata_signature.empty()) {
     // Convert base64-encoded signature to raw bytes.
     if (!brillo::data_encoding::Base64Decode(metadata_signature,
@@ -146,49 +151,43 @@
       return ErrorCode::kDownloadMetadataSignatureError;
     }
   } else {
-    metadata_signature_protobuf_blob.assign(
+    metadata_signature_protobuf.assign(
         payload.begin() + metadata_size_,
         payload.begin() + metadata_size_ + metadata_signature_size_);
   }
 
-  if (metadata_signature_blob.empty() &&
-      metadata_signature_protobuf_blob.empty()) {
+  if (metadata_signature_blob.empty() && metadata_signature_protobuf.empty()) {
     LOG(ERROR) << "Missing mandatory metadata signature in both Omaha "
                << "response and payload.";
     return ErrorCode::kDownloadMetadataSignatureMissingError;
   }
 
-  brillo::Blob calculated_metadata_hash;
+  brillo::Blob metadata_hash;
   if (!HashCalculator::RawHashOfBytes(
-          payload.data(), metadata_size_, &calculated_metadata_hash)) {
+          payload.data(), metadata_size_, &metadata_hash)) {
     LOG(ERROR) << "Unable to compute actual hash of manifest";
     return ErrorCode::kDownloadMetadataSignatureVerificationError;
   }
 
-  PayloadVerifier::PadRSA2048SHA256Hash(&calculated_metadata_hash);
-  if (calculated_metadata_hash.empty()) {
-    LOG(ERROR) << "Computed actual hash of metadata is empty.";
+  if (metadata_hash.size() != kSHA256Size) {
+    LOG(ERROR) << "Computed actual hash of metadata has incorrect size: "
+               << metadata_hash.size();
     return ErrorCode::kDownloadMetadataSignatureVerificationError;
   }
 
   if (!metadata_signature_blob.empty()) {
-    brillo::Blob expected_metadata_hash;
-    if (!PayloadVerifier::GetRawHashFromSignature(
-            metadata_signature_blob, pem_public_key, &expected_metadata_hash)) {
-      LOG(ERROR) << "Unable to compute expected hash from metadata signature";
-      return ErrorCode::kDownloadMetadataSignatureError;
-    }
-    if (calculated_metadata_hash != expected_metadata_hash) {
-      LOG(ERROR) << "Manifest hash verification failed. Expected hash = ";
-      utils::HexDumpVector(expected_metadata_hash);
-      LOG(ERROR) << "Calculated hash = ";
-      utils::HexDumpVector(calculated_metadata_hash);
+    brillo::Blob decrypted_signature;
+    if (!payload_verifier.VerifyRawSignature(
+            metadata_signature_blob, metadata_hash, &decrypted_signature)) {
+      LOG(ERROR) << "Manifest hash verification failed. Decrypted hash = ";
+      utils::HexDumpVector(decrypted_signature);
+      LOG(ERROR) << "Calculated hash before padding = ";
+      utils::HexDumpVector(metadata_hash);
       return ErrorCode::kDownloadMetadataSignatureMismatch;
     }
   } else {
-    if (!PayloadVerifier::VerifySignature(metadata_signature_protobuf_blob,
-                                          pem_public_key,
-                                          calculated_metadata_hash)) {
+    if (!payload_verifier.VerifySignature(metadata_signature_protobuf,
+                                          metadata_hash)) {
       LOG(ERROR) << "Manifest hash verification failed.";
       return ErrorCode::kDownloadMetadataSignatureMismatch;
     }
diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h
index be43c41..cc42253 100644
--- a/payload_consumer/payload_metadata.h
+++ b/payload_consumer/payload_metadata.h
@@ -27,6 +27,7 @@
 
 #include "update_engine/common/error_code.h"
 #include "update_engine/common/platform_constants.h"
+#include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/update_metadata.pb.h"
 
 namespace chromeos_update_engine {
@@ -65,9 +66,10 @@
   // metadata is parsed so that a man-in-the-middle attack on the SSL connection
   // to the payload server doesn't exploit any vulnerability in the code that
   // parses the protocol buffer.
-  ErrorCode ValidateMetadataSignature(const brillo::Blob& payload,
-                                      const std::string& metadata_signature,
-                                      const std::string& pem_public_key) const;
+  ErrorCode ValidateMetadataSignature(
+      const brillo::Blob& payload,
+      const std::string& metadata_signature,
+      const PayloadVerifier& payload_verifier) const;
 
   // Returns the major payload version. If the version was not yet parsed,
   // returns zero.
diff --git a/payload_consumer/payload_verifier.cc b/payload_consumer/payload_verifier.cc
index 2f7c133..24e337e 100644
--- a/payload_consumer/payload_verifier.cc
+++ b/payload_consumer/payload_verifier.cc
@@ -16,13 +16,16 @@
 
 #include "update_engine/payload_consumer/payload_verifier.h"
 
+#include <utility>
 #include <vector>
 
 #include <base/logging.h>
 #include <openssl/pem.h>
 
+#include "update_engine/common/constants.h"
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/certificate_parser_interface.h"
 #include "update_engine/update_metadata.pb.h"
 
 using std::string;
@@ -31,61 +34,73 @@
 
 namespace {
 
-// The following is a standard PKCS1-v1_5 padding for SHA256 signatures, as
-// defined in RFC3447. It is prepended to the actual signature (32 bytes) to
-// form a sequence of 256 bytes (2048 bits) that is amenable to RSA signing. The
-// padded hash will look as follows:
+// The ASN.1 DigestInfo prefix for encoding SHA256 digest. The complete 51-byte
+// DigestInfo consists of 19-byte SHA256_DIGEST_INFO_PREFIX and 32-byte SHA256
+// digest.
 //
-//    0x00 0x01 0xff ... 0xff 0x00  ASN1HEADER  SHA256HASH
-//   |--------------205-----------||----19----||----32----|
-//
-// where ASN1HEADER is the ASN.1 description of the signed data. The complete 51
-// bytes of actual data (i.e. the ASN.1 header complete with the hash) are
-// packed as follows:
-//
-//  SEQUENCE(2+49) {
+// SEQUENCE(2+49) {
 //   SEQUENCE(2+13) {
-//    OBJECT(2+9) id-sha256
-//    NULL(2+0)
+//     OBJECT(2+9) id-sha256
+//     NULL(2+0)
 //   }
 //   OCTET STRING(2+32) <actual signature bytes...>
-//  }
-// clang-format off
-const uint8_t kRSA2048SHA256Padding[] = {
-    // PKCS1-v1_5 padding
-    0x00, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-    0x00,
-    // ASN.1 header
-    0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03,
-    0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20,
+// }
+const uint8_t kSHA256DigestInfoPrefix[] = {
+    0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01,
+    0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20,
 };
-// clang-format on
 
 }  // namespace
 
-bool PayloadVerifier::VerifySignature(const brillo::Blob& signature_blob,
-                                      const string& pem_public_key,
-                                      const brillo::Blob& hash_data) {
+std::unique_ptr<PayloadVerifier> PayloadVerifier::CreateInstance(
+    const std::string& pem_public_key) {
+  std::unique_ptr<BIO, decltype(&BIO_free)> bp(
+      BIO_new_mem_buf(pem_public_key.data(), pem_public_key.size()), BIO_free);
+  if (!bp) {
+    LOG(ERROR) << "Failed to read " << pem_public_key << " into buffer.";
+    return nullptr;
+  }
+
+  auto pub_key = std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>(
+      PEM_read_bio_PUBKEY(bp.get(), nullptr, nullptr, nullptr), EVP_PKEY_free);
+  if (!pub_key) {
+    LOG(ERROR) << "Failed to parse the public key in: " << pem_public_key;
+    return nullptr;
+  }
+
+  std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>> keys;
+  keys.emplace_back(std::move(pub_key));
+  return std::unique_ptr<PayloadVerifier>(new PayloadVerifier(std::move(keys)));
+}
+
+std::unique_ptr<PayloadVerifier> PayloadVerifier::CreateInstanceFromZipPath(
+    const std::string& certificate_zip_path) {
+  auto parser = CreateCertificateParser();
+  if (!parser) {
+    LOG(ERROR) << "Failed to create certificate parser from "
+               << certificate_zip_path;
+    return nullptr;
+  }
+
+  std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>> public_keys;
+  if (!parser->ReadPublicKeysFromCertificates(certificate_zip_path,
+                                              &public_keys) ||
+      public_keys.empty()) {
+    LOG(ERROR) << "Failed to parse public keys in: " << certificate_zip_path;
+    return nullptr;
+  }
+
+  return std::unique_ptr<PayloadVerifier>(
+      new PayloadVerifier(std::move(public_keys)));
+}
+
+bool PayloadVerifier::VerifySignature(
+    const string& signature_proto, const brillo::Blob& sha256_hash_data) const {
+  TEST_AND_RETURN_FALSE(!public_keys_.empty());
+
   Signatures signatures;
-  LOG(INFO) << "signature blob size = " << signature_blob.size();
-  TEST_AND_RETURN_FALSE(
-      signatures.ParseFromArray(signature_blob.data(), signature_blob.size()));
+  LOG(INFO) << "signature blob size = " << signature_proto.size();
+  TEST_AND_RETURN_FALSE(signatures.ParseFromString(signature_proto));
 
   if (!signatures.signatures_size()) {
     LOG(ERROR) << "No signatures stored in the blob.";
@@ -95,47 +110,105 @@
   std::vector<brillo::Blob> tested_hashes;
   // Tries every signature in the signature blob.
   for (int i = 0; i < signatures.signatures_size(); i++) {
-    const Signatures_Signature& signature = signatures.signatures(i);
-    brillo::Blob sig_data(signature.data().begin(), signature.data().end());
-    brillo::Blob sig_hash_data;
-    if (!GetRawHashFromSignature(sig_data, pem_public_key, &sig_hash_data))
-      continue;
+    const Signatures::Signature& signature = signatures.signatures(i);
+    brillo::Blob sig_data;
+    if (signature.has_unpadded_signature_size()) {
+      TEST_AND_RETURN_FALSE(signature.unpadded_signature_size() <=
+                            signature.data().size());
+      LOG(INFO) << "Truncating the signature to its unpadded size: "
+                << signature.unpadded_signature_size() << ".";
+      sig_data.assign(
+          signature.data().begin(),
+          signature.data().begin() + signature.unpadded_signature_size());
+    } else {
+      sig_data.assign(signature.data().begin(), signature.data().end());
+    }
 
-    if (hash_data == sig_hash_data) {
+    brillo::Blob sig_hash_data;
+    if (VerifyRawSignature(sig_data, sha256_hash_data, &sig_hash_data)) {
       LOG(INFO) << "Verified correct signature " << i + 1 << " out of "
                 << signatures.signatures_size() << " signatures.";
       return true;
     }
-    tested_hashes.push_back(sig_hash_data);
+    if (!sig_hash_data.empty()) {
+      tested_hashes.push_back(sig_hash_data);
+    }
   }
   LOG(ERROR) << "None of the " << signatures.signatures_size()
-             << " signatures is correct. Expected:";
-  utils::HexDumpVector(hash_data);
-  LOG(ERROR) << "But found decrypted hashes:";
+             << " signatures is correct. Expected hash before padding:";
+  utils::HexDumpVector(sha256_hash_data);
+  LOG(ERROR) << "But found RSA decrypted hashes:";
   for (const auto& sig_hash_data : tested_hashes) {
     utils::HexDumpVector(sig_hash_data);
   }
   return false;
 }
 
-bool PayloadVerifier::GetRawHashFromSignature(const brillo::Blob& sig_data,
-                                              const string& pem_public_key,
-                                              brillo::Blob* out_hash_data) {
+bool PayloadVerifier::VerifyRawSignature(
+    const brillo::Blob& sig_data,
+    const brillo::Blob& sha256_hash_data,
+    brillo::Blob* decrypted_sig_data) const {
+  TEST_AND_RETURN_FALSE(!public_keys_.empty());
+
+  for (const auto& public_key : public_keys_) {
+    int key_type = EVP_PKEY_id(public_key.get());
+    if (key_type == EVP_PKEY_RSA) {
+      brillo::Blob sig_hash_data;
+      if (!GetRawHashFromSignature(
+              sig_data, public_key.get(), &sig_hash_data)) {
+        LOG(WARNING)
+            << "Failed to get the raw hash with RSA key. Trying other keys.";
+        continue;
+      }
+
+      if (decrypted_sig_data != nullptr) {
+        *decrypted_sig_data = sig_hash_data;
+      }
+
+      brillo::Blob padded_hash_data = sha256_hash_data;
+      TEST_AND_RETURN_FALSE(
+          PadRSASHA256Hash(&padded_hash_data, sig_hash_data.size()));
+
+      if (padded_hash_data == sig_hash_data) {
+        return true;
+      }
+    }
+
+    if (key_type == EVP_PKEY_EC) {
+      EC_KEY* ec_key = EVP_PKEY_get0_EC_KEY(public_key.get());
+      TEST_AND_RETURN_FALSE(ec_key != nullptr);
+      if (ECDSA_verify(0,
+                       sha256_hash_data.data(),
+                       sha256_hash_data.size(),
+                       sig_data.data(),
+                       sig_data.size(),
+                       ec_key) == 1) {
+        return true;
+      }
+    }
+
+    LOG(ERROR) << "Unsupported key type " << key_type;
+    return false;
+  }
+  LOG(INFO) << "Failed to verify the signature with " << public_keys_.size()
+            << " keys.";
+  return false;
+}
+
+bool PayloadVerifier::GetRawHashFromSignature(
+    const brillo::Blob& sig_data,
+    const EVP_PKEY* public_key,
+    brillo::Blob* out_hash_data) const {
   // The code below executes the equivalent of:
   //
-  // openssl rsautl -verify -pubin -inkey <(echo |pem_public_key|)
+  // openssl rsautl -verify -pubin -inkey <(echo pem_public_key)
   //   -in |sig_data| -out |out_hash_data|
-
-  BIO* bp = BIO_new_mem_buf(pem_public_key.data(), pem_public_key.size());
-  char dummy_password[] = {' ', 0};  // Ensure no password is read from stdin.
-  RSA* rsa = PEM_read_bio_RSA_PUBKEY(bp, nullptr, nullptr, dummy_password);
-  BIO_free(bp);
+  RSA* rsa = EVP_PKEY_get0_RSA(public_key);
 
   TEST_AND_RETURN_FALSE(rsa != nullptr);
   unsigned int keysize = RSA_size(rsa);
   if (sig_data.size() > 2 * keysize) {
     LOG(ERROR) << "Signature size is too big for public key size.";
-    RSA_free(rsa);
     return false;
   }
 
@@ -143,7 +216,6 @@
   brillo::Blob hash_data(keysize);
   int decrypt_size = RSA_public_decrypt(
       sig_data.size(), sig_data.data(), hash_data.data(), rsa, RSA_NO_PADDING);
-  RSA_free(rsa);
   TEST_AND_RETURN_FALSE(decrypt_size > 0 &&
                         decrypt_size <= static_cast<int>(hash_data.size()));
   hash_data.resize(decrypt_size);
@@ -151,13 +223,30 @@
   return true;
 }
 
-bool PayloadVerifier::PadRSA2048SHA256Hash(brillo::Blob* hash) {
-  TEST_AND_RETURN_FALSE(hash->size() == 32);
-  hash->insert(hash->begin(),
-               reinterpret_cast<const char*>(kRSA2048SHA256Padding),
-               reinterpret_cast<const char*>(kRSA2048SHA256Padding +
-                                             sizeof(kRSA2048SHA256Padding)));
-  TEST_AND_RETURN_FALSE(hash->size() == 256);
+bool PayloadVerifier::PadRSASHA256Hash(brillo::Blob* hash, size_t rsa_size) {
+  TEST_AND_RETURN_FALSE(hash->size() == kSHA256Size);
+  TEST_AND_RETURN_FALSE(rsa_size == 256 || rsa_size == 512);
+
+  // The following is a standard PKCS1-v1_5 padding for SHA256 signatures, as
+  // defined in RFC3447 section 9.2. It is prepended to the actual signature
+  // (32 bytes) to form a sequence of 256|512 bytes (2048|4096 bits) that is
+  // amenable to RSA signing. The padded hash will look as follows:
+  //
+  //    0x00 0x01 0xff ... 0xff 0x00  ASN1HEADER  SHA256HASH
+  //   |-----------205|461----------||----19----||----32----|
+  size_t padding_string_size =
+      rsa_size - hash->size() - sizeof(kSHA256DigestInfoPrefix) - 3;
+  brillo::Blob padded_result = brillo::CombineBlobs({
+      {0x00, 0x01},
+      brillo::Blob(padding_string_size, 0xff),
+      {0x00},
+      brillo::Blob(kSHA256DigestInfoPrefix,
+                   kSHA256DigestInfoPrefix + sizeof(kSHA256DigestInfoPrefix)),
+      *hash,
+  });
+
+  *hash = std::move(padded_result);
+  TEST_AND_RETURN_FALSE(hash->size() == rsa_size);
   return true;
 }
 
diff --git a/payload_consumer/payload_verifier.h b/payload_consumer/payload_verifier.h
index ec23ef2..bc5231f 100644
--- a/payload_consumer/payload_verifier.h
+++ b/payload_consumer/payload_verifier.h
@@ -17,47 +17,72 @@
 #ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PAYLOAD_VERIFIER_H_
 #define UPDATE_ENGINE_PAYLOAD_CONSUMER_PAYLOAD_VERIFIER_H_
 
+#include <memory>
 #include <string>
+#include <utility>
+#include <vector>
 
-#include <base/macros.h>
 #include <brillo/secure_blob.h>
+#include <openssl/evp.h>
 
 #include "update_engine/update_metadata.pb.h"
 
-// This class encapsulates methods used for payload signature verification.
-// See payload_generator/payload_signer.h for payload signing.
+// This class holds the public keys and implements methods used for payload
+// signature verification. See payload_generator/payload_signer.h for payload
+// signing.
 
 namespace chromeos_update_engine {
 
 class PayloadVerifier {
  public:
-  // Interprets |signature_blob| as a protocol buffer containing the Signatures
-  // message and decrypts each signature data using the |pem_public_key|.
-  // |pem_public_key| should be a PEM format RSA public key data.
-  // Returns whether *any* of the decrypted hashes matches the |hash_data|.
-  // In case of any error parsing the signatures or the public key, returns
-  // false.
-  static bool VerifySignature(const brillo::Blob& signature_blob,
-                              const std::string& pem_public_key,
-                              const brillo::Blob& hash_data);
+  // Pads a SHA256 hash so that it may be encrypted/signed with RSA2048 or
+  // RSA4096 using the PKCS#1 v1.5 scheme.
+  // hash should be a pointer to vector of exactly 256 bits. |rsa_size| must be
+  // one of 256 or 512 bytes. The vector will be modified in place and will
+  // result in having a length of 2048 or 4096 bits, depending on the rsa size.
+  // Returns true on success, false otherwise.
+  static bool PadRSASHA256Hash(brillo::Blob* hash, size_t rsa_size);
 
-  // Decrypts |sig_data| with the given |pem_public_key| and populates
-  // |out_hash_data| with the decoded raw hash. |pem_public_key| should be a PEM
-  // format RSA public key data. Returns true if successful, false otherwise.
-  static bool GetRawHashFromSignature(const brillo::Blob& sig_data,
-                                      const std::string& pem_public_key,
-                                      brillo::Blob* out_hash_data);
+  // Parses the input as a PEM encoded public string. And creates a
+  // PayloadVerifier with that public key for signature verification.
+  static std::unique_ptr<PayloadVerifier> CreateInstance(
+      const std::string& pem_public_key);
 
-  // Pads a SHA256 hash so that it may be encrypted/signed with RSA2048
-  // using the PKCS#1 v1.5 scheme.
-  // hash should be a pointer to vector of exactly 256 bits. The vector
-  // will be modified in place and will result in having a length of
-  // 2048 bits. Returns true on success, false otherwise.
-  static bool PadRSA2048SHA256Hash(brillo::Blob* hash);
+  // Extracts the public keys from the certificates contained in the input
+  // zip file. And creates a PayloadVerifier with these public keys.
+  static std::unique_ptr<PayloadVerifier> CreateInstanceFromZipPath(
+      const std::string& certificate_zip_path);
+
+  // Interprets |signature_proto| as a protocol buffer containing the
+  // |Signatures| message and decrypts each signature data using the stored
+  // public key. Pads the 32 bytes |sha256_hash_data| to 256 or 512 bytes
+  // according to the PKCS#1 v1.5 standard; and returns whether *any* of the
+  // decrypted hashes matches the padded hash data. In case of any error parsing
+  // the signatures, returns false.
+  bool VerifySignature(const std::string& signature_proto,
+                       const brillo::Blob& sha256_hash_data) const;
+
+  // Verifies if |sig_data| is a raw signature of the hash |sha256_hash_data|.
+  // If PayloadVerifier is using RSA as the public key, further puts the
+  // decrypted data of |sig_data| into |decrypted_sig_data|.
+  bool VerifyRawSignature(const brillo::Blob& sig_data,
+                          const brillo::Blob& sha256_hash_data,
+                          brillo::Blob* decrypted_sig_data) const;
 
  private:
-  // This should never be constructed
-  DISALLOW_IMPLICIT_CONSTRUCTORS(PayloadVerifier);
+  explicit PayloadVerifier(
+      std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>&&
+          public_keys)
+      : public_keys_(std::move(public_keys)) {}
+
+  // Decrypts |sig_data| with the given |public_key| and populates
+  // |out_hash_data| with the decoded raw hash. Returns true if successful,
+  // false otherwise.
+  bool GetRawHashFromSignature(const brillo::Blob& sig_data,
+                               const EVP_PKEY* public_key,
+                               brillo::Blob* out_hash_data) const;
+
+  std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>> public_keys_;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc
index a0b67ea..c520c7e 100644
--- a/payload_consumer/postinstall_runner_action.cc
+++ b/payload_consumer/postinstall_runner_action.cc
@@ -50,6 +50,7 @@
 
 namespace chromeos_update_engine {
 
+using brillo::MessageLoop;
 using std::string;
 using std::vector;
 
@@ -75,10 +76,17 @@
   partition_weight_.resize(install_plan_.partitions.size());
   total_weight_ = 0;
   for (size_t i = 0; i < install_plan_.partitions.size(); ++i) {
+    auto& partition = install_plan_.partitions[i];
+    if (!install_plan_.run_post_install && partition.postinstall_optional) {
+      partition.run_postinstall = false;
+      LOG(INFO) << "Skipping optional post-install for partition "
+                << partition.name << " according to install plan.";
+    }
+
     // TODO(deymo): This code sets the weight to all the postinstall commands,
     // but we could remember how long they took in the past and use those
     // values.
-    partition_weight_[i] = install_plan_.partitions[i].run_postinstall;
+    partition_weight_[i] = partition.run_postinstall;
     total_weight_ += partition_weight_[i];
   }
   accumulated_weight_ = 0;
@@ -88,11 +96,6 @@
 }
 
 void PostinstallRunnerAction::PerformPartitionPostinstall() {
-  if (!install_plan_.run_post_install) {
-    LOG(INFO) << "Skipping post-install according to install plan.";
-    return CompletePostinstall(ErrorCode::kSuccess);
-  }
-
   if (install_plan_.download_url.empty()) {
     LOG(INFO) << "Skipping post-install during rollback";
     return CompletePostinstall(ErrorCode::kSuccess);
@@ -217,10 +220,21 @@
     PLOG(ERROR) << "Unable to set non-blocking I/O mode on fd " << progress_fd_;
   }
 
+#ifdef __ANDROID__
+  progress_task_ = MessageLoop::current()->WatchFileDescriptor(
+      FROM_HERE,
+      progress_fd_,
+      MessageLoop::WatchMode::kWatchRead,
+      true,
+      base::Bind(&PostinstallRunnerAction::OnProgressFdReady,
+                 base::Unretained(this)));
+#else
   progress_controller_ = base::FileDescriptorWatcher::WatchReadable(
       progress_fd_,
       base::BindRepeating(&PostinstallRunnerAction::OnProgressFdReady,
                           base::Unretained(this)));
+#endif  // __ANDROID__
+
 }
 
 void PostinstallRunnerAction::OnProgressFdReady() {
@@ -245,7 +259,12 @@
     if (!ok || eof) {
       // There was either an error or an EOF condition, so we are done watching
       // the file descriptor.
+#ifdef __ANDROID__
+      MessageLoop::current()->CancelTask(progress_task_);
+      progress_task_ = MessageLoop::kTaskIdNull;
+#else
       progress_controller_.reset();
+#endif  // __ANDROID__
       return;
     }
   } while (bytes_read);
@@ -289,7 +308,15 @@
   fs_mount_dir_.clear();
 
   progress_fd_ = -1;
+#ifdef __ANDROID__
+  if (progress_task_ != MessageLoop::kTaskIdNull) {
+    MessageLoop::current()->CancelTask(progress_task_);
+    progress_task_ = MessageLoop::kTaskIdNull;
+  }
+#else
   progress_controller_.reset();
+#endif  // __ANDROID__
+
   progress_buffer_.clear();
 }
 
@@ -336,8 +363,13 @@
   // steps succeeded.
   if (error_code == ErrorCode::kSuccess) {
     if (install_plan_.switch_slot_on_reboot) {
-      if (!boot_control_->SetActiveBootSlot(install_plan_.target_slot)) {
+      if (!boot_control_->GetDynamicPartitionControl()->FinishUpdate(
+              install_plan_.powerwash_required) ||
+          !boot_control_->SetActiveBootSlot(install_plan_.target_slot)) {
         error_code = ErrorCode::kPostinstallRunnerError;
+      } else {
+        // Schedules warm reset on next reboot, ignores the error.
+        hardware_->SetWarmReset(true);
       }
     } else {
       error_code = ErrorCode::kUpdatedButNotActive;
diff --git a/payload_consumer/postinstall_runner_action.h b/payload_consumer/postinstall_runner_action.h
index 838b235..e5dfc40 100644
--- a/payload_consumer/postinstall_runner_action.h
+++ b/payload_consumer/postinstall_runner_action.h
@@ -141,7 +141,12 @@
   // The parent progress file descriptor used to watch for progress reports from
   // the postinstall program and the task watching for them.
   int progress_fd_{-1};
+
+#ifdef __ANDROID__
+  brillo::MessageLoop::TaskId progress_task_{brillo::MessageLoop::kTaskIdNull};
+#else
   std::unique_ptr<base::FileDescriptorWatcher::Controller> progress_controller_;
+#endif  // __ANDROID__
 
   // A buffer of a partial read line from the progress file descriptor.
   std::string progress_buffer_;
diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc
index 84f2c2c..0041d31 100644
--- a/payload_consumer/postinstall_runner_action_unittest.cc
+++ b/payload_consumer/postinstall_runner_action_unittest.cc
@@ -103,6 +103,8 @@
                             bool is_rollback,
                             bool save_rollback_data);
 
+  void RunPostinstallActionWithInstallPlan(const InstallPlan& install_plan);
+
  public:
   void ResumeRunningAction() {
     ASSERT_NE(nullptr, postinstall_action_);
@@ -180,9 +182,6 @@
     bool powerwash_required,
     bool is_rollback,
     bool save_rollback_data) {
-  ActionProcessor processor;
-  processor_ = &processor;
-  auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
   InstallPlan::Partition part;
   part.name = "part";
   part.target_path = device_path;
@@ -194,6 +193,14 @@
   install_plan.powerwash_required = powerwash_required;
   install_plan.is_rollback = is_rollback;
   install_plan.rollback_data_save_requested = save_rollback_data;
+  RunPostinstallActionWithInstallPlan(install_plan);
+}
+
+void PostinstallRunnerActionTest::RunPostinstallActionWithInstallPlan(
+    const chromeos_update_engine::InstallPlan& install_plan) {
+  ActionProcessor processor;
+  processor_ = &processor;
+  auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
   feeder_action->set_obj(install_plan);
   auto runner_action = std::make_unique<PostinstallRunnerAction>(
       &fake_boot_control_, &fake_hardware_);
@@ -335,6 +342,27 @@
   EXPECT_FALSE(fake_hardware_.GetIsRollbackPowerwashScheduled());
 }
 
+TEST_F(PostinstallRunnerActionTest, RunAsRootSkipOptionalPostinstallTest) {
+  InstallPlan::Partition part;
+  part.name = "part";
+  part.target_path = "/dev/null";
+  part.run_postinstall = true;
+  part.postinstall_path = kPostinstallDefaultScript;
+  part.postinstall_optional = true;
+  InstallPlan install_plan;
+  install_plan.partitions = {part};
+  install_plan.download_url = "http://127.0.0.1:8080/update";
+
+  // Optional postinstalls will be skipped, and the postinstall action succeeds.
+  RunPostinstallActionWithInstallPlan(install_plan);
+  EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
+
+  part.postinstall_optional = false;
+  install_plan.partitions = {part};
+  RunPostinstallActionWithInstallPlan(install_plan);
+  EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
+}
+
 // Check that the failures from the postinstall script cause the action to
 // fail.
 TEST_F(PostinstallRunnerActionTest, RunAsRootErrScriptTest) {
diff --git a/payload_consumer/verity_writer_android.cc b/payload_consumer/verity_writer_android.cc
index 06d1489..d5437b6 100644
--- a/payload_consumer/verity_writer_android.cc
+++ b/payload_consumer/verity_writer_android.cc
@@ -41,6 +41,9 @@
 bool VerityWriterAndroid::Init(const InstallPlan::Partition& partition) {
   partition_ = &partition;
 
+  if (partition_->hash_tree_size != 0 || partition_->fec_size != 0) {
+    utils::SetBlockDeviceReadOnly(partition_->target_path, false);
+  }
   if (partition_->hash_tree_size != 0) {
     auto hash_function =
         HashTreeBuilder::HashFunction(partition_->hash_tree_algorithm);
diff --git a/payload_generator/ab_generator.cc b/payload_generator/ab_generator.cc
index f4cc9fb..d9b9d88 100644
--- a/payload_generator/ab_generator.cc
+++ b/payload_generator/ab_generator.cc
@@ -276,7 +276,7 @@
       target_part_path, dst_extents, &data, data.size(), kBlockSize));
 
   brillo::Blob blob;
-  InstallOperation_Type op_type;
+  InstallOperation::Type op_type;
   TEST_AND_RETURN_FALSE(
       diff_utils::GenerateBestFullOperation(data, version, &blob, &op_type));
 
diff --git a/payload_generator/ab_generator_unittest.cc b/payload_generator/ab_generator_unittest.cc
index 170e0e3..7a95284 100644
--- a/payload_generator/ab_generator_unittest.cc
+++ b/payload_generator/ab_generator_unittest.cc
@@ -49,7 +49,7 @@
 }
 
 // Tests splitting of a REPLACE/REPLACE_XZ operation.
-void TestSplitReplaceOrReplaceXzOperation(InstallOperation_Type orig_type,
+void TestSplitReplaceOrReplaceXzOperation(InstallOperation::Type orig_type,
                                           bool compressible) {
   const size_t op_ex1_start_block = 2;
   const size_t op_ex1_num_blocks = 2;
@@ -124,7 +124,7 @@
       version, aop, part_file.path(), &result_ops, &blob_file));
 
   // Check the result.
-  InstallOperation_Type expected_type =
+  InstallOperation::Type expected_type =
       compressible ? InstallOperation::REPLACE_XZ : InstallOperation::REPLACE;
 
   ASSERT_EQ(2U, result_ops.size());
@@ -200,7 +200,7 @@
 }
 
 // Tests merging of REPLACE/REPLACE_XZ operations.
-void TestMergeReplaceOrReplaceXzOperations(InstallOperation_Type orig_type,
+void TestMergeReplaceOrReplaceXzOperations(InstallOperation::Type orig_type,
                                            bool compressible) {
   const size_t first_op_num_blocks = 1;
   const size_t second_op_num_blocks = 2;
@@ -287,7 +287,7 @@
       &aops, version, 5, part_file.path(), &blob_file));
 
   // Check the result.
-  InstallOperation_Type expected_op_type =
+  InstallOperation::Type expected_op_type =
       compressible ? InstallOperation::REPLACE_XZ : InstallOperation::REPLACE;
   EXPECT_EQ(1U, aops.size());
   InstallOperation new_op = aops[0].op;
diff --git a/payload_generator/boot_img_filesystem.cc b/payload_generator/boot_img_filesystem.cc
index 19de410..89b175e 100644
--- a/payload_generator/boot_img_filesystem.cc
+++ b/payload_generator/boot_img_filesystem.cc
@@ -17,6 +17,7 @@
 #include "update_engine/payload_generator/boot_img_filesystem.h"
 
 #include <base/logging.h>
+#include <bootimg.h>
 #include <brillo/secure_blob.h>
 #include <puffin/utils.h>
 
@@ -35,16 +36,61 @@
   if (filename.empty())
     return nullptr;
 
-  brillo::Blob header;
-  if (!utils::ReadFileChunk(filename, 0, sizeof(boot_img_hdr), &header) ||
-      header.size() != sizeof(boot_img_hdr) ||
-      memcmp(header.data(), BOOT_MAGIC, BOOT_MAGIC_SIZE) != 0) {
+  if (brillo::Blob header_magic;
+      !utils::ReadFileChunk(filename, 0, BOOT_MAGIC_SIZE, &header_magic) ||
+      memcmp(header_magic.data(), BOOT_MAGIC, BOOT_MAGIC_SIZE) != 0) {
+    return nullptr;
+  }
+
+  // The order of image header fields are different in version 3 from the
+  // previous versions. But the position of "header_version" is fixed at #9
+  // across all image headers.
+  // See details in system/tools/mkbootimg/include/bootimg/bootimg.h
+  constexpr size_t header_version_offset =
+      BOOT_MAGIC_SIZE + 8 * sizeof(uint32_t);
+  brillo::Blob header_version_blob;
+  if (!utils::ReadFileChunk(filename,
+                            header_version_offset,
+                            sizeof(uint32_t),
+                            &header_version_blob)) {
+    return nullptr;
+  }
+  uint32_t header_version =
+      *reinterpret_cast<uint32_t*>(header_version_blob.data());
+  if (header_version > 3) {
+    LOG(WARNING) << "Boot image header version " << header_version
+                 << " isn't supported for parsing";
+    return nullptr;
+  }
+
+  // Read the bytes of boot image header based on the header version.
+  size_t header_size =
+      header_version == 3 ? sizeof(boot_img_hdr_v3) : sizeof(boot_img_hdr_v0);
+  brillo::Blob header_blob;
+  if (!utils::ReadFileChunk(filename, 0, header_size, &header_blob)) {
     return nullptr;
   }
 
   unique_ptr<BootImgFilesystem> result(new BootImgFilesystem());
   result->filename_ = filename;
-  memcpy(&result->hdr_, header.data(), header.size());
+  if (header_version < 3) {
+    auto hdr_v0 = reinterpret_cast<boot_img_hdr_v0*>(header_blob.data());
+    CHECK_EQ(0, memcmp(hdr_v0->magic, BOOT_MAGIC, BOOT_MAGIC_SIZE));
+    CHECK_LT(hdr_v0->header_version, 3u);
+    result->kernel_size_ = hdr_v0->kernel_size;
+    result->ramdisk_size_ = hdr_v0->ramdisk_size;
+    result->page_size_ = hdr_v0->page_size;
+  } else {
+    auto hdr_v3 = reinterpret_cast<boot_img_hdr_v3*>(header_blob.data());
+    CHECK_EQ(0, memcmp(hdr_v3->magic, BOOT_MAGIC, BOOT_MAGIC_SIZE));
+    CHECK_EQ(3u, hdr_v3->header_version);
+    result->kernel_size_ = hdr_v3->kernel_size;
+    result->ramdisk_size_ = hdr_v3->ramdisk_size;
+    result->page_size_ = 4096;
+  }
+
+  CHECK_GT(result->page_size_, 0u);
+
   return result;
 }
 
@@ -87,13 +133,13 @@
   files->clear();
   const uint64_t file_size = utils::FileSize(filename_);
   // The first page is header.
-  uint64_t offset = hdr_.page_size;
-  if (hdr_.kernel_size > 0 && offset + hdr_.kernel_size <= file_size) {
-    files->emplace_back(GetFile("<kernel>", offset, hdr_.kernel_size));
+  uint64_t offset = page_size_;
+  if (kernel_size_ > 0 && offset + kernel_size_ <= file_size) {
+    files->emplace_back(GetFile("<kernel>", offset, kernel_size_));
   }
-  offset += utils::RoundUp(hdr_.kernel_size, hdr_.page_size);
-  if (hdr_.ramdisk_size > 0 && offset + hdr_.ramdisk_size <= file_size) {
-    files->emplace_back(GetFile("<ramdisk>", offset, hdr_.ramdisk_size));
+  offset += utils::RoundUp(kernel_size_, page_size_);
+  if (ramdisk_size_ > 0 && offset + ramdisk_size_ <= file_size) {
+    files->emplace_back(GetFile("<ramdisk>", offset, ramdisk_size_));
   }
   return true;
 }
diff --git a/payload_generator/boot_img_filesystem.h b/payload_generator/boot_img_filesystem.h
index 87725d4..61f755c 100644
--- a/payload_generator/boot_img_filesystem.h
+++ b/payload_generator/boot_img_filesystem.h
@@ -52,23 +52,9 @@
   // The boot.img file path.
   std::string filename_;
 
-// https://android.googlesource.com/platform/system/core/+/master/mkbootimg/include/bootimg/bootimg.h
-#define BOOT_MAGIC "ANDROID!"
-#define BOOT_MAGIC_SIZE 8
-  struct boot_img_hdr {
-    // Must be BOOT_MAGIC.
-    uint8_t magic[BOOT_MAGIC_SIZE];
-    uint32_t kernel_size;  /* size in bytes */
-    uint32_t kernel_addr;  /* physical load addr */
-    uint32_t ramdisk_size; /* size in bytes */
-    uint32_t ramdisk_addr; /* physical load addr */
-    uint32_t second_size;  /* size in bytes */
-    uint32_t second_addr;  /* physical load addr */
-    uint32_t tags_addr;    /* physical addr for kernel tags */
-    uint32_t page_size;    /* flash page size we assume */
-  } __attribute__((packed));
-  // The boot image header.
-  boot_img_hdr hdr_;
+  uint32_t kernel_size_;  /* size in bytes */
+  uint32_t ramdisk_size_; /* size in bytes */
+  uint32_t page_size_;    /* flash page size we assume */
 
   DISALLOW_COPY_AND_ASSIGN(BootImgFilesystem);
 };
diff --git a/payload_generator/boot_img_filesystem_stub.cc b/payload_generator/boot_img_filesystem_stub.cc
new file mode 100644
index 0000000..4928fa1
--- /dev/null
+++ b/payload_generator/boot_img_filesystem_stub.cc
@@ -0,0 +1,48 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/boot_img_filesystem.h"
+
+namespace chromeos_update_engine {
+std::unique_ptr<BootImgFilesystem> BootImgFilesystem::CreateFromFile(
+    const std::string& /* filename */) {
+  return nullptr;
+}
+
+size_t BootImgFilesystem::GetBlockSize() const {
+  return 4096;
+}
+
+size_t BootImgFilesystem::GetBlockCount() const {
+  return 0;
+}
+
+FilesystemInterface::File BootImgFilesystem::GetFile(
+    const std::string& /* name */,
+    uint64_t /* offset */,
+    uint64_t /* size */) const {
+  return {};
+}
+
+bool BootImgFilesystem::GetFiles(std::vector<File>* /* files */) const {
+  return false;
+}
+
+bool BootImgFilesystem::LoadSettings(brillo::KeyValueStore* /* store */) const {
+  return false;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/boot_img_filesystem_unittest.cc b/payload_generator/boot_img_filesystem_unittest.cc
index b1e0d99..0b115e0 100644
--- a/payload_generator/boot_img_filesystem_unittest.cc
+++ b/payload_generator/boot_img_filesystem_unittest.cc
@@ -18,6 +18,7 @@
 
 #include <vector>
 
+#include <bootimg.h>
 #include <brillo/secure_blob.h>
 #include <gtest/gtest.h>
 
@@ -32,18 +33,32 @@
 class BootImgFilesystemTest : public ::testing::Test {
  protected:
   brillo::Blob GetBootImg(const brillo::Blob& kernel,
-                          const brillo::Blob& ramdisk) {
+                          const brillo::Blob& ramdisk,
+                          bool header_version3 = false) {
     brillo::Blob boot_img(16 * 1024);
-    BootImgFilesystem::boot_img_hdr hdr;
-    memcpy(hdr.magic, BOOT_MAGIC, BOOT_MAGIC_SIZE);
-    hdr.kernel_size = kernel.size();
-    hdr.ramdisk_size = ramdisk.size();
-    hdr.page_size = 4096;
+    constexpr uint32_t page_size = 4096;
+
     size_t offset = 0;
-    memcpy(boot_img.data() + offset, &hdr, sizeof(hdr));
-    offset += utils::RoundUp(sizeof(hdr), hdr.page_size);
+    if (header_version3) {
+      boot_img_hdr_v3 hdr_v3{};
+      memcpy(hdr_v3.magic, BOOT_MAGIC, BOOT_MAGIC_SIZE);
+      hdr_v3.kernel_size = kernel.size();
+      hdr_v3.ramdisk_size = ramdisk.size();
+      hdr_v3.header_version = 3;
+      memcpy(boot_img.data() + offset, &hdr_v3, sizeof(hdr_v3));
+      offset += utils::RoundUp(sizeof(hdr_v3), page_size);
+    } else {
+      boot_img_hdr_v0 hdr_v0{};
+      memcpy(hdr_v0.magic, BOOT_MAGIC, BOOT_MAGIC_SIZE);
+      hdr_v0.kernel_size = kernel.size();
+      hdr_v0.ramdisk_size = ramdisk.size();
+      hdr_v0.page_size = page_size;
+      hdr_v0.header_version = 0;
+      memcpy(boot_img.data() + offset, &hdr_v0, sizeof(hdr_v0));
+      offset += utils::RoundUp(sizeof(hdr_v0), page_size);
+    }
     memcpy(boot_img.data() + offset, kernel.data(), kernel.size());
-    offset += utils::RoundUp(kernel.size(), hdr.page_size);
+    offset += utils::RoundUp(kernel.size(), page_size);
     memcpy(boot_img.data() + offset, ramdisk.data(), ramdisk.size());
     return boot_img;
   }
@@ -76,6 +91,31 @@
   EXPECT_TRUE(files[1].deflates.empty());
 }
 
+TEST_F(BootImgFilesystemTest, ImageHeaderVersion3) {
+  test_utils::WriteFileVector(
+      boot_file_.path(),
+      GetBootImg(brillo::Blob(1000, 'k'), brillo::Blob(5000, 'r'), true));
+  unique_ptr<BootImgFilesystem> fs =
+      BootImgFilesystem::CreateFromFile(boot_file_.path());
+  EXPECT_NE(nullptr, fs);
+
+  vector<FilesystemInterface::File> files;
+  EXPECT_TRUE(fs->GetFiles(&files));
+  ASSERT_EQ(2u, files.size());
+
+  EXPECT_EQ("<kernel>", files[0].name);
+  EXPECT_EQ(1u, files[0].extents.size());
+  EXPECT_EQ(1u, files[0].extents[0].start_block());
+  EXPECT_EQ(1u, files[0].extents[0].num_blocks());
+  EXPECT_TRUE(files[0].deflates.empty());
+
+  EXPECT_EQ("<ramdisk>", files[1].name);
+  EXPECT_EQ(1u, files[1].extents.size());
+  EXPECT_EQ(2u, files[1].extents[0].start_block());
+  EXPECT_EQ(2u, files[1].extents[0].num_blocks());
+  EXPECT_TRUE(files[1].deflates.empty());
+}
+
 TEST_F(BootImgFilesystemTest, BadImageTest) {
   brillo::Blob boot_img = GetBootImg({}, {});
   boot_img[7] = '?';
diff --git a/payload_generator/deflate_utils.cc b/payload_generator/deflate_utils.cc
index 5d7a766..c874bfd 100644
--- a/payload_generator/deflate_utils.cc
+++ b/payload_generator/deflate_utils.cc
@@ -74,6 +74,15 @@
   return false;
 }
 
+bool IsRegularFile(const FilesystemInterface::File& file) {
+  // If inode is 0, then stat information is invalid for some psuedo files
+  if (file.file_stat.st_ino != 0 &&
+      (file.file_stat.st_mode & S_IFMT) == S_IFREG) {
+    return true;
+  }
+  return false;
+}
+
 // Realigns subfiles |files| of a splitted file |file| into its correct
 // positions. This can be used for squashfs, zip, apk, etc.
 bool RealignSplittedFiles(const FilesystemInterface::File& file,
@@ -265,7 +274,9 @@
   result_files->reserve(tmp_files.size());
 
   for (auto& file : tmp_files) {
-    if (IsSquashfsImage(part.path, file)) {
+    auto is_regular_file = IsRegularFile(file);
+
+    if (is_regular_file && IsSquashfsImage(part.path, file)) {
       // Read the image into a file.
       base::FilePath path;
       TEST_AND_RETURN_FALSE(base::CreateTemporaryFile(&path));
@@ -296,7 +307,7 @@
       }
     }
 
-    if (extract_deflates && !file.is_compressed) {
+    if (is_regular_file && extract_deflates && !file.is_compressed) {
       // Search for deflates if the file is in zip or gzip format.
       // .zvoice files may eventually move out of rootfs. If that happens,
       // remove ".zvoice" (crbug.com/782918).
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index 53a3cf1..22752e8 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -637,7 +637,7 @@
 bool GenerateBestFullOperation(const brillo::Blob& new_data,
                                const PayloadVersion& version,
                                brillo::Blob* out_blob,
-                               InstallOperation_Type* out_type) {
+                               InstallOperation::Type* out_type) {
   if (new_data.empty())
     return false;
 
@@ -739,7 +739,7 @@
 
   // Try generating a full operation for the given new data, regardless of the
   // old_data.
-  InstallOperation_Type op_type;
+  InstallOperation::Type op_type;
   TEST_AND_RETURN_FALSE(
       GenerateBestFullOperation(new_data, version, &data_blob, &op_type));
   operation.set_type(op_type);
@@ -766,7 +766,7 @@
         ScopedPathUnlinker unlinker(patch.value());
 
         std::unique_ptr<bsdiff::PatchWriterInterface> bsdiff_patch_writer;
-        InstallOperation_Type operation_type = InstallOperation::SOURCE_BSDIFF;
+        InstallOperation::Type operation_type = InstallOperation::SOURCE_BSDIFF;
         if (version.OperationAllowed(InstallOperation::BROTLI_BSDIFF)) {
           bsdiff_patch_writer =
               bsdiff::CreateBSDF2PatchWriter(patch.value(),
@@ -872,13 +872,13 @@
   return true;
 }
 
-bool IsAReplaceOperation(InstallOperation_Type op_type) {
+bool IsAReplaceOperation(InstallOperation::Type op_type) {
   return (op_type == InstallOperation::REPLACE ||
           op_type == InstallOperation::REPLACE_BZ ||
           op_type == InstallOperation::REPLACE_XZ);
 }
 
-bool IsNoSourceOperation(InstallOperation_Type op_type) {
+bool IsNoSourceOperation(InstallOperation::Type op_type) {
   return (IsAReplaceOperation(op_type) || op_type == InstallOperation::ZERO ||
           op_type == InstallOperation::DISCARD);
 }
diff --git a/payload_generator/delta_diff_utils.h b/payload_generator/delta_diff_utils.h
index a062327..c75d16d 100644
--- a/payload_generator/delta_diff_utils.h
+++ b/payload_generator/delta_diff_utils.h
@@ -119,13 +119,13 @@
 bool GenerateBestFullOperation(const brillo::Blob& new_data,
                                const PayloadVersion& version,
                                brillo::Blob* out_blob,
-                               InstallOperation_Type* out_type);
+                               InstallOperation::Type* out_type);
 
 // Returns whether |op_type| is one of the REPLACE full operations.
-bool IsAReplaceOperation(InstallOperation_Type op_type);
+bool IsAReplaceOperation(InstallOperation::Type op_type);
 
 // Returns true if an operation with type |op_type| has no |src_extents|.
-bool IsNoSourceOperation(InstallOperation_Type op_type);
+bool IsNoSourceOperation(InstallOperation::Type op_type);
 
 bool InitializePartitionInfo(const PartitionConfig& partition,
                              PartitionInfo* info);
diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc
index 8a97b1b..0857f9c 100644
--- a/payload_generator/delta_diff_utils_unittest.cc
+++ b/payload_generator/delta_diff_utils_unittest.cc
@@ -231,7 +231,7 @@
     EXPECT_FALSE(data.empty());
 
     EXPECT_TRUE(op.has_type());
-    const InstallOperation_Type expected_type =
+    const InstallOperation::Type expected_type =
         (i == 0 ? InstallOperation::REPLACE : InstallOperation::REPLACE_BZ);
     EXPECT_EQ(expected_type, op.type());
     EXPECT_FALSE(op.has_data_offset());
diff --git a/payload_generator/full_update_generator.cc b/payload_generator/full_update_generator.cc
index 4d8b2f9..94a43ab 100644
--- a/payload_generator/full_update_generator.cc
+++ b/payload_generator/full_update_generator.cc
@@ -99,7 +99,7 @@
       fd_, buffer_in_.data(), buffer_in_.size(), offset_, &bytes_read));
   TEST_AND_RETURN_FALSE(bytes_read == static_cast<ssize_t>(size_));
 
-  InstallOperation_Type op_type;
+  InstallOperation::Type op_type;
   TEST_AND_RETURN_FALSE(diff_utils::GenerateBestFullOperation(
       buffer_in_, version_, &op_blob, &op_type));
 
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index 69ac8bb..f7df211 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -58,18 +58,15 @@
 constexpr char kPayloadPropertiesFormatJson[] = "json";
 
 void ParseSignatureSizes(const string& signature_sizes_flag,
-                         vector<int>* signature_sizes) {
+                         vector<size_t>* signature_sizes) {
   signature_sizes->clear();
   vector<string> split_strings = base::SplitString(
       signature_sizes_flag, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
   for (const string& str : split_strings) {
-    int size = 0;
-    bool parsing_successful = base::StringToInt(str, &size);
+    size_t size = 0;
+    bool parsing_successful = base::StringToSizeT(str, &size);
     LOG_IF(FATAL, !parsing_successful) << "Invalid signature size: " << str;
 
-    LOG_IF(FATAL, size != (2048 / 8))
-        << "Only signature sizes of 256 bytes are supported.";
-
     signature_sizes->push_back(size);
   }
 }
@@ -106,7 +103,7 @@
   return true;
 }
 
-void CalculateHashForSigning(const vector<int>& sizes,
+void CalculateHashForSigning(const vector<size_t>& sizes,
                              const string& out_hash_file,
                              const string& out_metadata_hash_file,
                              const string& in_file) {
@@ -142,6 +139,7 @@
 
 void SignPayload(const string& in_file,
                  const string& out_file,
+                 const vector<size_t>& signature_sizes,
                  const string& payload_signature_file,
                  const string& metadata_signature_file,
                  const string& out_metadata_size_file) {
@@ -155,6 +153,7 @@
   SignatureFileFlagToBlobs(metadata_signature_file, &metadata_signatures);
   uint64_t final_metadata_size;
   CHECK(PayloadSigner::AddSignatureToPayload(in_file,
+                                             signature_sizes,
                                              payload_signatures,
                                              metadata_signatures,
                                              out_file,
@@ -439,6 +438,13 @@
                 "",
                 "An info file specifying dynamic partition metadata. "
                 "Only allowed in major version 2 or newer.");
+  DEFINE_bool(disable_fec_computation,
+              false,
+              "Disables the fec data computation on device.");
+  DEFINE_string(
+      out_maximum_signature_size_file,
+      "",
+      "Path to the output maximum signature size given a private key.");
 
   brillo::FlagHelper::Init(
       argc,
@@ -460,8 +466,34 @@
   // Initialize the Xz compressor.
   XzCompressInit();
 
-  vector<int> signature_sizes;
-  ParseSignatureSizes(FLAGS_signature_size, &signature_sizes);
+  if (!FLAGS_out_maximum_signature_size_file.empty()) {
+    LOG_IF(FATAL, FLAGS_private_key.empty())
+        << "Private key is not provided when calculating the maximum signature "
+           "size.";
+
+    size_t maximum_signature_size;
+    if (!PayloadSigner::GetMaximumSignatureSize(FLAGS_private_key,
+                                                &maximum_signature_size)) {
+      LOG(ERROR) << "Failed to get the maximum signature size of private key: "
+                 << FLAGS_private_key;
+      return 1;
+    }
+    // Write the size string to output file.
+    string signature_size_string = std::to_string(maximum_signature_size);
+    if (!utils::WriteFile(FLAGS_out_maximum_signature_size_file.c_str(),
+                          signature_size_string.c_str(),
+                          signature_size_string.size())) {
+      PLOG(ERROR) << "Failed to write the maximum signature size to "
+                  << FLAGS_out_maximum_signature_size_file << ".";
+      return 1;
+    }
+    return 0;
+  }
+
+  vector<size_t> signature_sizes;
+  if (!FLAGS_signature_size.empty()) {
+    ParseSignatureSizes(FLAGS_signature_size, &signature_sizes);
+  }
 
   if (!FLAGS_out_hash_file.empty() || !FLAGS_out_metadata_hash_file.empty()) {
     CHECK(FLAGS_out_metadata_size_file.empty());
@@ -474,6 +506,7 @@
   if (!FLAGS_payload_signature_file.empty()) {
     SignPayload(FLAGS_in_file,
                 FLAGS_out_file,
+                signature_sizes,
                 FLAGS_payload_signature_file,
                 FLAGS_metadata_signature_file,
                 FLAGS_out_metadata_size_file);
@@ -542,6 +575,8 @@
         << "Partition name can't be empty, see --partition_names.";
     payload_config.target.partitions.emplace_back(partition_names[i]);
     payload_config.target.partitions.back().path = new_partitions[i];
+    payload_config.target.partitions.back().disable_fec_computation =
+        FLAGS_disable_fec_computation;
     if (i < new_mapfiles.size())
       payload_config.target.partitions.back().mapfile_path = new_mapfiles[i];
   }
diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc
index b55d03c..69325d7 100644
--- a/payload_generator/payload_file.cc
+++ b/payload_generator/payload_file.cc
@@ -167,7 +167,7 @@
   uint64_t signature_blob_length = 0;
   if (!private_key_path.empty()) {
     TEST_AND_RETURN_FALSE(PayloadSigner::SignatureBlobLength(
-        vector<string>(1, private_key_path), &signature_blob_length));
+        {private_key_path}, &signature_blob_length));
     PayloadSigner::AddSignatureToManifest(
         next_blob_offset,
         signature_blob_length,
@@ -176,7 +176,7 @@
 
   // Serialize protobuf
   string serialized_manifest;
-  TEST_AND_RETURN_FALSE(manifest_.AppendToString(&serialized_manifest));
+  TEST_AND_RETURN_FALSE(manifest_.SerializeToString(&serialized_manifest));
 
   uint64_t metadata_size =
       sizeof(kDeltaMagic) + 2 * sizeof(uint64_t) + serialized_manifest.size();
@@ -215,13 +215,12 @@
 
   // Write metadata signature blob.
   if (!private_key_path.empty()) {
-    brillo::Blob metadata_hash, metadata_signature;
+    brillo::Blob metadata_hash;
     TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile(
         payload_file, metadata_size, &metadata_hash));
-    TEST_AND_RETURN_FALSE(
-        PayloadSigner::SignHashWithKeys(metadata_hash,
-                                        vector<string>(1, private_key_path),
-                                        &metadata_signature));
+    string metadata_signature;
+    TEST_AND_RETURN_FALSE(PayloadSigner::SignHashWithKeys(
+        metadata_hash, {private_key_path}, &metadata_signature));
     TEST_AND_RETURN_FALSE_ERRNO(
         writer.Write(metadata_signature.data(), metadata_signature.size()));
   }
@@ -245,16 +244,16 @@
   // Write payload signature blob.
   if (!private_key_path.empty()) {
     LOG(INFO) << "Signing the update...";
-    brillo::Blob signature_blob;
+    string signature;
     TEST_AND_RETURN_FALSE(PayloadSigner::SignPayload(
         payload_file,
-        vector<string>(1, private_key_path),
+        {private_key_path},
         metadata_size,
         metadata_signature_size,
         metadata_size + metadata_signature_size + manifest_.signatures_offset(),
-        &signature_blob));
+        &signature));
     TEST_AND_RETURN_FALSE_ERRNO(
-        writer.Write(signature_blob.data(), signature_blob.size()));
+        writer.Write(signature.data(), signature.size()));
   }
 
   ReportPayloadUsage(metadata_size);
@@ -330,15 +329,15 @@
     const DeltaObject& object = object_count.first;
     // Use printf() instead of LOG(INFO) because timestamp makes it difficult to
     // compare two reports.
-    printf(
-        kFormatString,
-        object.size * 100.0 / total_size,
-        object.size,
-        (object.type >= 0 ? InstallOperationTypeName(
-                                static_cast<InstallOperation_Type>(object.type))
-                          : "-"),
-        object.name.c_str(),
-        object_count.second);
+    printf(kFormatString,
+           object.size * 100.0 / total_size,
+           object.size,
+           (object.type >= 0
+                ? InstallOperationTypeName(
+                      static_cast<InstallOperation::Type>(object.type))
+                : "-"),
+           object.name.c_str(),
+           object_count.second);
   }
   printf(kFormatString, 100.0, total_size, "", "<total>", total_op);
   fflush(stdout);
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index e1f700a..b653a03 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -150,25 +150,34 @@
   for (const auto& group_name : group_names) {
     DynamicPartitionGroup* group = metadata->add_groups();
     group->set_name(group_name);
-    if (!store.GetString(group_name + "_size", &buf)) {
-      LOG(ERROR) << "Missing " << group_name + "_size.";
+    if (!store.GetString("super_" + group_name + "_group_size", &buf) &&
+        !store.GetString(group_name + "_size", &buf)) {
+      LOG(ERROR) << "Missing super_" << group_name + "_group_size or "
+                 << group_name << "_size.";
       return false;
     }
 
     uint64_t max_size;
     if (!base::StringToUint64(buf, &max_size)) {
-      LOG(ERROR) << group_name << "_size=" << buf << " is not an integer.";
+      LOG(ERROR) << "Group size for " << group_name << " = " << buf
+                 << " is not an integer.";
       return false;
     }
     group->set_size(max_size);
 
-    if (store.GetString(group_name + "_partition_list", &buf)) {
+    if (store.GetString("super_" + group_name + "_partition_list", &buf) ||
+        store.GetString(group_name + "_partition_list", &buf)) {
       auto partition_names = brillo::string_utils::Split(buf, " ");
       for (const auto& partition_name : partition_names) {
         group->add_partition_names()->assign(partition_name);
       }
     }
   }
+
+  bool snapshot_enabled = false;
+  store.GetBoolean("virtual_ab", &snapshot_enabled);
+  metadata->set_snapshot_enabled(snapshot_enabled);
+
   dynamic_partition_metadata = std::move(metadata);
   return true;
 }
@@ -229,7 +238,7 @@
   return true;
 }
 
-bool PayloadVersion::OperationAllowed(InstallOperation_Type operation) const {
+bool PayloadVersion::OperationAllowed(InstallOperation::Type operation) const {
   switch (operation) {
     // Full operations:
     case InstallOperation::REPLACE:
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index 32f1229..af6f181 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -116,6 +116,9 @@
 
   PostInstallConfig postinstall;
   VerityConfig verity;
+
+  // Enables the on device fec data computation by default.
+  bool disable_fec_computation = false;
 };
 
 // The ImageConfig struct describes a pair of binaries kernel and rootfs and the
@@ -165,7 +168,7 @@
   bool Validate() const;
 
   // Return whether the passed |operation| is allowed by this payload.
-  bool OperationAllowed(InstallOperation_Type operation) const;
+  bool OperationAllowed(InstallOperation::Type operation) const;
 
   // Whether this payload version is a delta payload.
   bool IsDelta() const;
diff --git a/payload_generator/payload_generation_config_android.cc b/payload_generator/payload_generation_config_android.cc
index 90c053f..d950092 100644
--- a/payload_generator/payload_generation_config_android.cc
+++ b/payload_generator/payload_generation_config_android.cc
@@ -63,11 +63,13 @@
   part->verity.hash_tree_extent = ExtentForBytes(
       hashtree.hash_block_size, hashtree.tree_offset, hashtree.tree_size);
 
-  part->verity.fec_data_extent =
-      ExtentForBytes(hashtree.data_block_size, 0, hashtree.fec_offset);
-  part->verity.fec_extent = ExtentForBytes(
-      hashtree.data_block_size, hashtree.fec_offset, hashtree.fec_size);
-  part->verity.fec_roots = hashtree.fec_num_roots;
+  if (!part->disable_fec_computation) {
+    part->verity.fec_data_extent =
+        ExtentForBytes(hashtree.data_block_size, 0, hashtree.fec_offset);
+    part->verity.fec_extent = ExtentForBytes(
+        hashtree.data_block_size, hashtree.fec_offset, hashtree.fec_size);
+    part->verity.fec_roots = hashtree.fec_num_roots;
+  }
   return true;
 }
 
@@ -205,7 +207,8 @@
               ExtentForRange(hash_start_block, tree_size / block_size);
         }
         fec_ecc_metadata ecc_data;
-        if (fh.get_ecc_metadata(ecc_data) && ecc_data.valid) {
+        if (!part.disable_fec_computation && fh.get_ecc_metadata(ecc_data) &&
+            ecc_data.valid) {
           TEST_AND_RETURN_FALSE(block_size == FEC_BLOCKSIZE);
           part.verity.fec_data_extent = ExtentForRange(0, ecc_data.blocks);
           part.verity.fec_extent =
diff --git a/payload_generator/payload_generation_config_android_unittest.cc b/payload_generator/payload_generation_config_android_unittest.cc
index 53378c2..44eaf55 100644
--- a/payload_generator/payload_generation_config_android_unittest.cc
+++ b/payload_generator/payload_generation_config_android_unittest.cc
@@ -160,6 +160,24 @@
   EXPECT_EQ(2u, verity.fec_roots);
 }
 
+TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigDisableFecTest) {
+  brillo::Blob part = GetAVBPartition();
+  test_utils::WriteFileVector(temp_file_.path(), part);
+  image_config_.partitions[0].disable_fec_computation = true;
+  EXPECT_TRUE(image_config_.LoadImageSize());
+  EXPECT_TRUE(image_config_.partitions[0].OpenFilesystem());
+  EXPECT_TRUE(image_config_.LoadVerityConfig());
+  const VerityConfig& verity = image_config_.partitions[0].verity;
+  EXPECT_FALSE(verity.IsEmpty());
+  EXPECT_EQ(ExtentForRange(0, 2), verity.hash_tree_data_extent);
+  EXPECT_EQ(ExtentForRange(2, 1), verity.hash_tree_extent);
+  EXPECT_EQ("sha1", verity.hash_tree_algorithm);
+  brillo::Blob salt(kHashTreeSalt, std::end(kHashTreeSalt));
+  EXPECT_EQ(salt, verity.hash_tree_salt);
+  EXPECT_EQ(0u, verity.fec_data_extent.num_blocks());
+  EXPECT_EQ(0u, verity.fec_extent.num_blocks());
+}
+
 TEST_F(PayloadGenerationConfigAndroidTest,
        LoadVerityConfigInvalidHashTreeTest) {
   brillo::Blob part = GetAVBPartition();
diff --git a/payload_generator/payload_generation_config_unittest.cc b/payload_generator/payload_generation_config_unittest.cc
index 70a3df3..aca9655 100644
--- a/payload_generator/payload_generation_config_unittest.cc
+++ b/payload_generator/payload_generation_config_unittest.cc
@@ -59,7 +59,7 @@
   ASSERT_TRUE(
       store.LoadFromString("super_partition_groups=group_a group_b\n"
                            "group_a_size=3221225472\n"
-                           "group_a_partition_list=system product_services\n"
+                           "group_a_partition_list=system system_ext\n"
                            "group_b_size=2147483648\n"
                            "group_b_partition_list=vendor\n"));
   EXPECT_TRUE(image_config.LoadDynamicPartitionMetadata(store));
@@ -72,7 +72,7 @@
   EXPECT_EQ(3221225472u, group_a.size());
   ASSERT_EQ(2, group_a.partition_names_size());
   EXPECT_EQ("system", group_a.partition_names(0));
-  EXPECT_EQ("product_services", group_a.partition_names(1));
+  EXPECT_EQ("system_ext", group_a.partition_names(1));
 
   const auto& group_b = image_config.dynamic_partition_metadata->groups(1);
   EXPECT_EQ("group_b", group_b.name());
@@ -108,17 +108,17 @@
 
   PartitionConfig system("system");
   system.size = 2147483648u;
-  PartitionConfig product_services("product_services");
-  product_services.size = 1073741824u;
+  PartitionConfig system_ext("system_ext");
+  system_ext.size = 1073741824u;
 
   image_config.partitions.push_back(std::move(system));
-  image_config.partitions.push_back(std::move(product_services));
+  image_config.partitions.push_back(std::move(system_ext));
 
   brillo::KeyValueStore store;
   ASSERT_TRUE(
       store.LoadFromString("super_partition_groups=foo\n"
                            "foo_size=3221225472\n"
-                           "foo_partition_list=system product_services\n"));
+                           "foo_partition_list=system system_ext\n"));
   EXPECT_TRUE(image_config.LoadDynamicPartitionMetadata(store));
   EXPECT_NE(nullptr, image_config.dynamic_partition_metadata);
 
diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc
index 420329f..7e5fd4e 100644
--- a/payload_generator/payload_signer.cc
+++ b/payload_generator/payload_signer.cc
@@ -18,6 +18,7 @@
 
 #include <endian.h>
 
+#include <memory>
 #include <utility>
 
 #include <base/logging.h>
@@ -28,6 +29,7 @@
 #include <openssl/err.h>
 #include <openssl/pem.h>
 
+#include "update_engine/common/constants.h"
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/utils.h"
@@ -45,45 +47,49 @@
 namespace chromeos_update_engine {
 
 namespace {
-
-// The payload verifier will check all the signatures included in the payload
-// regardless of the version field. Old version of the verifier require the
-// version field to be included and be 1.
-const uint32_t kSignatureMessageLegacyVersion = 1;
-
 // Given raw |signatures|, packs them into a protobuf and serializes it into a
-// binary blob. Returns true on success, false otherwise.
-bool ConvertSignatureToProtobufBlob(const vector<brillo::Blob>& signatures,
-                                    brillo::Blob* out_signature_blob) {
+// string. Returns true on success, false otherwise.
+bool ConvertSignaturesToProtobuf(const vector<brillo::Blob>& signatures,
+                                 const vector<size_t>& padded_signature_sizes,
+                                 string* out_serialized_signature) {
+  TEST_AND_RETURN_FALSE(signatures.size() == padded_signature_sizes.size());
   // Pack it into a protobuf
   Signatures out_message;
-  for (const brillo::Blob& signature : signatures) {
-    Signatures_Signature* sig_message = out_message.add_signatures();
-    // Set all the signatures with the same version number.
-    sig_message->set_version(kSignatureMessageLegacyVersion);
-    sig_message->set_data(signature.data(), signature.size());
+  for (size_t i = 0; i < signatures.size(); i++) {
+    const auto& signature = signatures[i];
+    const auto& padded_signature_size = padded_signature_sizes[i];
+    TEST_AND_RETURN_FALSE(padded_signature_size >= signature.size());
+    Signatures::Signature* sig_message = out_message.add_signatures();
+    // Skip assigning the same version number because we don't need to be
+    // compatible with old major version 1 client anymore.
+
+    // TODO(Xunchang) don't need to set the unpadded_signature_size field for
+    // RSA key signed signatures.
+    sig_message->set_unpadded_signature_size(signature.size());
+    brillo::Blob padded_signature = signature;
+    padded_signature.insert(
+        padded_signature.end(), padded_signature_size - signature.size(), 0);
+    sig_message->set_data(padded_signature.data(), padded_signature.size());
   }
 
   // Serialize protobuf
-  string serialized;
-  TEST_AND_RETURN_FALSE(out_message.AppendToString(&serialized));
-  out_signature_blob->insert(
-      out_signature_blob->end(), serialized.begin(), serialized.end());
-  LOG(INFO) << "Signature blob size: " << out_signature_blob->size();
+  TEST_AND_RETURN_FALSE(
+      out_message.SerializeToString(out_serialized_signature));
+  LOG(INFO) << "Signature blob size: " << out_serialized_signature->size();
   return true;
 }
 
-// Given an unsigned payload under |payload_path| and the |signature_blob| and
-// |metadata_signature_blob| generates an updated payload that includes the
+// Given an unsigned payload under |payload_path| and the |payload_signature|
+// and |metadata_signature| generates an updated payload that includes the
 // signatures. It populates |out_metadata_size| with the size of the final
 // manifest after adding the dummy signature operation, and
 // |out_signatures_offset| with the expected offset for the new blob, and
-// |out_metadata_signature_size| which will be size of |metadata_signature_blob|
+// |out_metadata_signature_size| which will be size of |metadata_signature|
 // if the payload major version supports metadata signature, 0 otherwise.
 // Returns true on success, false otherwise.
 bool AddSignatureBlobToPayload(const string& payload_path,
-                               const brillo::Blob& signature_blob,
-                               const brillo::Blob& metadata_signature_blob,
+                               const string& payload_signature,
+                               const string& metadata_signature,
                                brillo::Blob* out_payload,
                                uint64_t* out_metadata_size,
                                uint32_t* out_metadata_signature_size,
@@ -99,7 +105,7 @@
   uint32_t metadata_signature_size =
       payload_metadata.GetMetadataSignatureSize();
   // Write metadata signature size in header.
-  uint32_t metadata_signature_size_be = htobe32(metadata_signature_blob.size());
+  uint32_t metadata_signature_size_be = htobe32(metadata_signature.size());
   memcpy(payload.data() + manifest_offset,
          &metadata_signature_size_be,
          sizeof(metadata_signature_size_be));
@@ -108,9 +114,9 @@
   payload.erase(payload.begin() + metadata_size,
                 payload.begin() + metadata_size + metadata_signature_size);
   payload.insert(payload.begin() + metadata_size,
-                 metadata_signature_blob.begin(),
-                 metadata_signature_blob.end());
-  metadata_signature_size = metadata_signature_blob.size();
+                 metadata_signature.begin(),
+                 metadata_signature.end());
+  metadata_signature_size = metadata_signature.size();
   LOG(INFO) << "Metadata signature size: " << metadata_signature_size;
 
   DeltaArchiveManifest manifest;
@@ -122,10 +128,10 @@
     // contents. We don't allow the manifest to change if there is already an op
     // present, because that might invalidate previously generated
     // hashes/signatures.
-    if (manifest.signatures_size() != signature_blob.size()) {
+    if (manifest.signatures_size() != payload_signature.size()) {
       LOG(ERROR) << "Attempt to insert different signature sized blob. "
                  << "(current:" << manifest.signatures_size()
-                 << "new:" << signature_blob.size() << ")";
+                 << "new:" << payload_signature.size() << ")";
       return false;
     }
 
@@ -134,7 +140,7 @@
     // Updates the manifest to include the signature operation.
     PayloadSigner::AddSignatureToManifest(
         payload.size() - metadata_size - metadata_signature_size,
-        signature_blob.size(),
+        payload_signature.size(),
         &manifest);
 
     // Updates the payload to include the new manifest.
@@ -160,8 +166,8 @@
   LOG(INFO) << "Signature Blob Offset: " << signatures_offset;
   payload.resize(signatures_offset);
   payload.insert(payload.begin() + signatures_offset,
-                 signature_blob.begin(),
-                 signature_blob.end());
+                 payload_signature.begin(),
+                 payload_signature.end());
 
   *out_payload = std::move(payload);
   *out_metadata_size = metadata_size;
@@ -201,8 +207,35 @@
   return true;
 }
 
+std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)> CreatePrivateKeyFromPath(
+    const string& private_key_path) {
+  FILE* fprikey = fopen(private_key_path.c_str(), "rb");
+  if (!fprikey) {
+    PLOG(ERROR) << "Failed to read " << private_key_path;
+    return {nullptr, nullptr};
+  }
+
+  auto private_key = std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>(
+      PEM_read_PrivateKey(fprikey, nullptr, nullptr, nullptr), EVP_PKEY_free);
+  fclose(fprikey);
+  return private_key;
+}
+
 }  // namespace
 
+bool PayloadSigner::GetMaximumSignatureSize(const string& private_key_path,
+                                            size_t* signature_size) {
+  *signature_size = 0;
+  auto private_key = CreatePrivateKeyFromPath(private_key_path);
+  if (!private_key) {
+    LOG(ERROR) << "Failed to create private key from " << private_key_path;
+    return false;
+  }
+
+  *signature_size = EVP_PKEY_size(private_key.get());
+  return true;
+}
+
 void PayloadSigner::AddSignatureToManifest(uint64_t signature_blob_offset,
                                            uint64_t signature_blob_length,
                                            DeltaArchiveManifest* manifest) {
@@ -236,21 +269,22 @@
                                                  signatures_offset,
                                                  &payload_hash,
                                                  &metadata_hash));
-  brillo::Blob signature_blob(payload.begin() + signatures_offset,
-                              payload.end());
+  string signature(payload.begin() + signatures_offset, payload.end());
   string public_key;
   TEST_AND_RETURN_FALSE(utils::ReadFile(public_key_path, &public_key));
-  TEST_AND_RETURN_FALSE(PayloadVerifier::PadRSA2048SHA256Hash(&payload_hash));
-  TEST_AND_RETURN_FALSE(PayloadVerifier::VerifySignature(
-      signature_blob, public_key, payload_hash));
+  TEST_AND_RETURN_FALSE(payload_hash.size() == kSHA256Size);
+
+  auto payload_verifier = PayloadVerifier::CreateInstance(public_key);
+  TEST_AND_RETURN_FALSE(payload_verifier != nullptr);
+
+  TEST_AND_RETURN_FALSE(
+      payload_verifier->VerifySignature(signature, payload_hash));
   if (metadata_signature_size) {
-    signature_blob.assign(
-        payload.begin() + metadata_size,
-        payload.begin() + metadata_size + metadata_signature_size);
+    signature.assign(payload.begin() + metadata_size,
+                     payload.begin() + metadata_size + metadata_signature_size);
+    TEST_AND_RETURN_FALSE(metadata_hash.size() == kSHA256Size);
     TEST_AND_RETURN_FALSE(
-        PayloadVerifier::PadRSA2048SHA256Hash(&metadata_hash));
-    TEST_AND_RETURN_FALSE(PayloadVerifier::VerifySignature(
-        signature_blob, public_key, metadata_hash));
+        payload_verifier->VerifySignature(signature, metadata_hash));
   }
   return true;
 }
@@ -260,49 +294,88 @@
                              brillo::Blob* out_signature) {
   LOG(INFO) << "Signing hash with private key: " << private_key_path;
   // We expect unpadded SHA256 hash coming in
-  TEST_AND_RETURN_FALSE(hash.size() == 32);
-  brillo::Blob padded_hash(hash);
-  PayloadVerifier::PadRSA2048SHA256Hash(&padded_hash);
-
+  TEST_AND_RETURN_FALSE(hash.size() == kSHA256Size);
   // The code below executes the equivalent of:
   //
   // openssl rsautl -raw -sign -inkey |private_key_path|
   //   -in |padded_hash| -out |out_signature|
 
-  FILE* fprikey = fopen(private_key_path.c_str(), "rb");
-  TEST_AND_RETURN_FALSE(fprikey != nullptr);
-  RSA* rsa = PEM_read_RSAPrivateKey(fprikey, nullptr, nullptr, nullptr);
-  fclose(fprikey);
-  TEST_AND_RETURN_FALSE(rsa != nullptr);
-  brillo::Blob signature(RSA_size(rsa));
-  ssize_t signature_size = RSA_private_encrypt(padded_hash.size(),
-                                               padded_hash.data(),
-                                               signature.data(),
-                                               rsa,
-                                               RSA_NO_PADDING);
-  RSA_free(rsa);
-  if (signature_size < 0) {
-    LOG(ERROR) << "Signing hash failed: "
-               << ERR_error_string(ERR_get_error(), nullptr);
+  auto private_key = CreatePrivateKeyFromPath(private_key_path);
+  if (!private_key) {
+    LOG(ERROR) << "Failed to create private key from " << private_key_path;
     return false;
   }
-  TEST_AND_RETURN_FALSE(static_cast<size_t>(signature_size) ==
-                        signature.size());
+
+  int key_type = EVP_PKEY_id(private_key.get());
+  brillo::Blob signature;
+  if (key_type == EVP_PKEY_RSA) {
+    RSA* rsa = EVP_PKEY_get0_RSA(private_key.get());
+    TEST_AND_RETURN_FALSE(rsa != nullptr);
+
+    brillo::Blob padded_hash = hash;
+    PayloadVerifier::PadRSASHA256Hash(&padded_hash, RSA_size(rsa));
+
+    signature.resize(RSA_size(rsa));
+    ssize_t signature_size = RSA_private_encrypt(padded_hash.size(),
+                                                 padded_hash.data(),
+                                                 signature.data(),
+                                                 rsa,
+                                                 RSA_NO_PADDING);
+
+    if (signature_size < 0) {
+      LOG(ERROR) << "Signing hash failed: "
+                 << ERR_error_string(ERR_get_error(), nullptr);
+      return false;
+    }
+    TEST_AND_RETURN_FALSE(static_cast<size_t>(signature_size) ==
+                          signature.size());
+  } else if (key_type == EVP_PKEY_EC) {
+    EC_KEY* ec_key = EVP_PKEY_get0_EC_KEY(private_key.get());
+    TEST_AND_RETURN_FALSE(ec_key != nullptr);
+
+    signature.resize(ECDSA_size(ec_key));
+    unsigned int signature_size;
+    if (ECDSA_sign(0,
+                   hash.data(),
+                   hash.size(),
+                   signature.data(),
+                   &signature_size,
+                   ec_key) != 1) {
+      LOG(ERROR) << "Signing hash failed: "
+                 << ERR_error_string(ERR_get_error(), nullptr);
+      return false;
+    }
+
+    // NIST P-256
+    LOG(ERROR) << "signature max size " << signature.size() << " size "
+               << signature_size;
+    TEST_AND_RETURN_FALSE(signature.size() >= signature_size);
+    signature.resize(signature_size);
+  } else {
+    LOG(ERROR) << "key_type " << key_type << " isn't supported for signing";
+    return false;
+  }
   out_signature->swap(signature);
   return true;
 }
 
 bool PayloadSigner::SignHashWithKeys(const brillo::Blob& hash_data,
                                      const vector<string>& private_key_paths,
-                                     brillo::Blob* out_signature_blob) {
+                                     string* out_serialized_signature) {
   vector<brillo::Blob> signatures;
+  vector<size_t> padded_signature_sizes;
   for (const string& path : private_key_paths) {
     brillo::Blob signature;
     TEST_AND_RETURN_FALSE(SignHash(hash_data, path, &signature));
     signatures.push_back(signature);
+
+    size_t padded_signature_size;
+    TEST_AND_RETURN_FALSE(
+        GetMaximumSignatureSize(path, &padded_signature_size));
+    padded_signature_sizes.push_back(padded_signature_size);
   }
-  TEST_AND_RETURN_FALSE(
-      ConvertSignatureToProtobufBlob(signatures, out_signature_blob));
+  TEST_AND_RETURN_FALSE(ConvertSignaturesToProtobuf(
+      signatures, padded_signature_sizes, out_serialized_signature));
   return true;
 }
 
@@ -311,7 +384,7 @@
                                 const uint64_t metadata_size,
                                 const uint32_t metadata_signature_size,
                                 const uint64_t signatures_offset,
-                                brillo::Blob* out_signature_blob) {
+                                string* out_serialized_signature) {
   brillo::Blob payload;
   TEST_AND_RETURN_FALSE(utils::ReadFile(unsigned_payload_path, &payload));
   brillo::Blob hash_data;
@@ -322,16 +395,16 @@
                                                  &hash_data,
                                                  nullptr));
   TEST_AND_RETURN_FALSE(
-      SignHashWithKeys(hash_data, private_key_paths, out_signature_blob));
+      SignHashWithKeys(hash_data, private_key_paths, out_serialized_signature));
   return true;
 }
 
 bool PayloadSigner::SignatureBlobLength(const vector<string>& private_key_paths,
                                         uint64_t* out_length) {
   DCHECK(out_length);
-  brillo::Blob x_blob(1, 'x'), hash_blob, sig_blob;
-  TEST_AND_RETURN_FALSE(
-      HashCalculator::RawHashOfBytes(x_blob.data(), x_blob.size(), &hash_blob));
+  brillo::Blob hash_blob;
+  TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfData({'x'}, &hash_blob));
+  string sig_blob;
   TEST_AND_RETURN_FALSE(
       SignHashWithKeys(hash_blob, private_key_paths, &sig_blob));
   *out_length = sig_blob.size();
@@ -339,7 +412,7 @@
 }
 
 bool PayloadSigner::HashPayloadForSigning(const string& payload_path,
-                                          const vector<int>& signature_sizes,
+                                          const vector<size_t>& signature_sizes,
                                           brillo::Blob* out_payload_hash_data,
                                           brillo::Blob* out_metadata_hash) {
   // Create a signature blob with signatures filled with 0.
@@ -348,17 +421,17 @@
   for (int signature_size : signature_sizes) {
     signatures.emplace_back(signature_size, 0);
   }
-  brillo::Blob signature_blob;
+  string signature;
   TEST_AND_RETURN_FALSE(
-      ConvertSignatureToProtobufBlob(signatures, &signature_blob));
+      ConvertSignaturesToProtobuf(signatures, signature_sizes, &signature));
 
   brillo::Blob payload;
   uint64_t metadata_size, signatures_offset;
   uint32_t metadata_signature_size;
   // Prepare payload for hashing.
   TEST_AND_RETURN_FALSE(AddSignatureBlobToPayload(payload_path,
-                                                  signature_blob,
-                                                  signature_blob,
+                                                  signature,
+                                                  signature,
                                                   &payload,
                                                   &metadata_size,
                                                   &metadata_signature_size,
@@ -374,6 +447,7 @@
 
 bool PayloadSigner::AddSignatureToPayload(
     const string& payload_path,
+    const vector<size_t>& padded_signature_sizes,
     const vector<brillo::Blob>& payload_signatures,
     const vector<brillo::Blob>& metadata_signatures,
     const string& signed_payload_path,
@@ -381,19 +455,19 @@
   // TODO(petkov): Reduce memory usage -- the payload is manipulated in memory.
 
   // Loads the payload and adds the signature op to it.
-  brillo::Blob signature_blob, metadata_signature_blob;
-  TEST_AND_RETURN_FALSE(
-      ConvertSignatureToProtobufBlob(payload_signatures, &signature_blob));
+  string payload_signature, metadata_signature;
+  TEST_AND_RETURN_FALSE(ConvertSignaturesToProtobuf(
+      payload_signatures, padded_signature_sizes, &payload_signature));
   if (!metadata_signatures.empty()) {
-    TEST_AND_RETURN_FALSE(ConvertSignatureToProtobufBlob(
-        metadata_signatures, &metadata_signature_blob));
+    TEST_AND_RETURN_FALSE(ConvertSignaturesToProtobuf(
+        metadata_signatures, padded_signature_sizes, &metadata_signature));
   }
   brillo::Blob payload;
   uint64_t signatures_offset;
   uint32_t metadata_signature_size;
   TEST_AND_RETURN_FALSE(AddSignatureBlobToPayload(payload_path,
-                                                  signature_blob,
-                                                  metadata_signature_blob,
+                                                  payload_signature,
+                                                  metadata_signature,
                                                   &payload,
                                                   out_metadata_size,
                                                   &metadata_signature_size,
diff --git a/payload_generator/payload_signer.h b/payload_generator/payload_signer.h
index 71f4983..06e4823 100644
--- a/payload_generator/payload_signer.h
+++ b/payload_generator/payload_signer.h
@@ -51,17 +51,17 @@
                        brillo::Blob* out_signature);
 
   // Sign |hash_data| blob with all private keys in |private_key_paths|, then
-  // convert the signatures to protobuf blob.
+  // convert the signatures to serialized protobuf.
   static bool SignHashWithKeys(
       const brillo::Blob& hash_data,
       const std::vector<std::string>& private_key_paths,
-      brillo::Blob* out_signature_blob);
+      std::string* out_serialized_signature);
 
   // Given an unsigned payload in |unsigned_payload_path|, private keys in
   // |private_key_path|, metadata size in |metadata_size|, metadata signature
   // size in |metadata_signature_size| and signatures offset in
   // |signatures_offset|, calculates the payload signature blob into
-  // |out_signature_blob|. Note that the payload must already have an
+  // |out_serialized_signature|. Note that the payload must already have an
   // updated manifest that includes the dummy signature op and correct metadata
   // signature size in header. Returns true on success, false otherwise.
   static bool SignPayload(const std::string& unsigned_payload_path,
@@ -69,9 +69,9 @@
                           const uint64_t metadata_size,
                           const uint32_t metadata_signature_size,
                           const uint64_t signatures_offset,
-                          brillo::Blob* out_signature_blob);
+                          std::string* out_serialized_signature);
 
-  // Returns the length of out_signature_blob that will result in a call
+  // Returns the length of out_serialized_signature that will result in a call
   // to SignPayload with the given private keys. Returns true on success.
   static bool SignatureBlobLength(
       const std::vector<std::string>& private_key_paths, uint64_t* out_length);
@@ -88,7 +88,7 @@
   //
   // The changes to payload are not preserved or written to disk.
   static bool HashPayloadForSigning(const std::string& payload_path,
-                                    const std::vector<int>& signature_sizes,
+                                    const std::vector<size_t>& signature_sizes,
                                     brillo::Blob* out_payload_hash_data,
                                     brillo::Blob* out_metadata_hash);
 
@@ -102,6 +102,7 @@
   // otherwise.
   static bool AddSignatureToPayload(
       const std::string& payload_path,
+      const std::vector<size_t>& padded_signature_sizes,
       const std::vector<brillo::Blob>& payload_signatures,
       const std::vector<brillo::Blob>& metadata_signatures,
       const std::string& signed_payload_path,
@@ -116,6 +117,16 @@
                                    const std::string& private_key_path,
                                    std::string* out_signature);
 
+  static bool ExtractPayloadProperties(const std::string& payload_path,
+                                       brillo::KeyValueStore* properties);
+
+  // This function calculates the maximum size, in bytes, of a signature signed
+  // by private_key_path. For an RSA key, this returns the number of bytes
+  // needed to represent the modulus. For an EC key, this returns the maximum
+  // size of a DER-encoded ECDSA signature.
+  static bool GetMaximumSignatureSize(const std::string& private_key_path,
+                                      size_t* signature_size);
+
  private:
   // This should never be constructed
   DISALLOW_IMPLICIT_CONSTRUCTORS(PayloadSigner);
diff --git a/payload_generator/payload_signer_unittest.cc b/payload_generator/payload_signer_unittest.cc
index f7f9c69..fe62997 100644
--- a/payload_generator/payload_signer_unittest.cc
+++ b/payload_generator/payload_signer_unittest.cc
@@ -45,6 +45,10 @@
 const char* kUnittestPublicKeyPath = "unittest_key.pub.pem";
 const char* kUnittestPrivateKey2Path = "unittest_key2.pem";
 const char* kUnittestPublicKey2Path = "unittest_key2.pub.pem";
+const char* kUnittestPrivateKeyRSA4096Path = "unittest_key_RSA4096.pem";
+const char* kUnittestPublicKeyRSA4096Path = "unittest_key_RSA4096.pub.pem";
+const char* kUnittestPrivateKeyECPath = "unittest_key_EC.pem";
+const char* kUnittestPublicKeyECPath = "unittest_key_EC.pub.pem";
 
 // Some data and its corresponding hash and signature:
 const char kDataToSign[] = "This is some data to sign.";
@@ -87,44 +91,34 @@
     0x43, 0xb9, 0xab, 0x7d};
 
 namespace {
-void SignSampleData(brillo::Blob* out_signature_blob,
-                    const vector<string>& private_keys) {
-  brillo::Blob data_blob(std::begin(kDataToSign),
-                         std::begin(kDataToSign) + strlen(kDataToSign));
+void SignSampleData(string* out_signature, const vector<string>& private_keys) {
   uint64_t length = 0;
   EXPECT_TRUE(PayloadSigner::SignatureBlobLength(private_keys, &length));
   EXPECT_GT(length, 0U);
   brillo::Blob hash_blob;
   EXPECT_TRUE(HashCalculator::RawHashOfBytes(
-      data_blob.data(), data_blob.size(), &hash_blob));
-  EXPECT_TRUE(PayloadSigner::SignHashWithKeys(
-      hash_blob, private_keys, out_signature_blob));
-  EXPECT_EQ(length, out_signature_blob->size());
+      kDataToSign, strlen(kDataToSign), &hash_blob));
+  EXPECT_TRUE(
+      PayloadSigner::SignHashWithKeys(hash_blob, private_keys, out_signature));
+  EXPECT_EQ(length, out_signature->size());
 }
 }  // namespace
 
 class PayloadSignerTest : public ::testing::Test {
  protected:
-  void SetUp() override {
-    PayloadVerifier::PadRSA2048SHA256Hash(&padded_hash_data_);
-  }
-
-  brillo::Blob padded_hash_data_{std::begin(kDataHash), std::end(kDataHash)};
+  brillo::Blob hash_data_{std::begin(kDataHash), std::end(kDataHash)};
 };
 
 TEST_F(PayloadSignerTest, SignSimpleTextTest) {
-  brillo::Blob signature_blob;
-  SignSampleData(&signature_blob,
-                 {GetBuildArtifactsPath(kUnittestPrivateKeyPath)});
+  string signature;
+  SignSampleData(&signature, {GetBuildArtifactsPath(kUnittestPrivateKeyPath)});
 
   // Check the signature itself
   Signatures signatures;
-  EXPECT_TRUE(
-      signatures.ParseFromArray(signature_blob.data(), signature_blob.size()));
+  EXPECT_TRUE(signatures.ParseFromString(signature));
   EXPECT_EQ(1, signatures.signatures_size());
-  const Signatures_Signature& signature = signatures.signatures(0);
-  EXPECT_EQ(1U, signature.version());
-  const string& sig_data = signature.data();
+  const Signatures::Signature& sig = signatures.signatures(0);
+  const string& sig_data = sig.data();
   ASSERT_EQ(base::size(kDataSignature), sig_data.size());
   for (size_t i = 0; i < base::size(kDataSignature); i++) {
     EXPECT_EQ(kDataSignature[i], static_cast<uint8_t>(sig_data[i]));
@@ -132,38 +126,44 @@
 }
 
 TEST_F(PayloadSignerTest, VerifyAllSignatureTest) {
-  brillo::Blob signature_blob;
-  SignSampleData(&signature_blob,
+  string signature;
+  SignSampleData(&signature,
                  {GetBuildArtifactsPath(kUnittestPrivateKeyPath),
-                  GetBuildArtifactsPath(kUnittestPrivateKey2Path)});
+                  GetBuildArtifactsPath(kUnittestPrivateKey2Path),
+                  GetBuildArtifactsPath(kUnittestPrivateKeyRSA4096Path),
+                  GetBuildArtifactsPath(kUnittestPrivateKeyECPath)});
 
   // Either public key should pass the verification.
-  string public_key;
-  EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(kUnittestPublicKeyPath),
-                              &public_key));
-  EXPECT_TRUE(PayloadVerifier::VerifySignature(
-      signature_blob, public_key, padded_hash_data_));
-  EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(kUnittestPublicKey2Path),
-                              &public_key));
-  EXPECT_TRUE(PayloadVerifier::VerifySignature(
-      signature_blob, public_key, padded_hash_data_));
+  for (const auto& path : {kUnittestPublicKeyPath,
+                           kUnittestPublicKey2Path,
+                           kUnittestPublicKeyRSA4096Path,
+                           kUnittestPublicKeyECPath}) {
+    string public_key;
+    EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(path), &public_key));
+    auto payload_verifier = PayloadVerifier::CreateInstance(public_key);
+    EXPECT_TRUE(payload_verifier != nullptr);
+    EXPECT_TRUE(payload_verifier->VerifySignature(signature, hash_data_));
+  }
 }
 
 TEST_F(PayloadSignerTest, VerifySignatureTest) {
-  brillo::Blob signature_blob;
-  SignSampleData(&signature_blob,
-                 {GetBuildArtifactsPath(kUnittestPrivateKeyPath)});
+  string signature;
+  SignSampleData(&signature, {GetBuildArtifactsPath(kUnittestPrivateKeyPath)});
 
   string public_key;
   EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(kUnittestPublicKeyPath),
                               &public_key));
-  EXPECT_TRUE(PayloadVerifier::VerifySignature(
-      signature_blob, public_key, padded_hash_data_));
+  auto payload_verifier = PayloadVerifier::CreateInstance(public_key);
+  EXPECT_TRUE(payload_verifier != nullptr);
+  EXPECT_TRUE(payload_verifier->VerifySignature(signature, hash_data_));
+
   // Passing the invalid key should fail the verification.
+  public_key.clear();
   EXPECT_TRUE(utils::ReadFile(GetBuildArtifactsPath(kUnittestPublicKey2Path),
                               &public_key));
-  EXPECT_TRUE(PayloadVerifier::VerifySignature(
-      signature_blob, public_key, padded_hash_data_));
+  payload_verifier = PayloadVerifier::CreateInstance(public_key);
+  EXPECT_TRUE(payload_verifier != nullptr);
+  EXPECT_FALSE(payload_verifier->VerifySignature(signature, hash_data_));
 }
 
 TEST_F(PayloadSignerTest, SkipMetadataSignatureTest) {
@@ -175,7 +175,7 @@
   uint64_t metadata_size;
   EXPECT_TRUE(payload.WritePayload(
       payload_file.path(), "/dev/null", "", &metadata_size));
-  const vector<int> sizes = {256};
+  const vector<size_t> sizes = {256};
   brillo::Blob unsigned_payload_hash, unsigned_metadata_hash;
   EXPECT_TRUE(PayloadSigner::HashPayloadForSigning(payload_file.path(),
                                                    sizes,
diff --git a/payload_state.cc b/payload_state.cc
index cf3aab9..bde7999 100644
--- a/payload_state.cc
+++ b/payload_state.cc
@@ -373,6 +373,8 @@
     case ErrorCode::kInternalLibCurlError:
     case ErrorCode::kUnresolvedHostError:
     case ErrorCode::kUnresolvedHostRecovered:
+    case ErrorCode::kNotEnoughSpace:
+    case ErrorCode::kDeviceCorrupted:
       LOG(INFO) << "Not incrementing URL index or failure count for this error";
       break;
 
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index c88709c..d9c18ff 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -28,12 +28,16 @@
 #  check       verify a payload using paycheck (static testing)
 #
 #  Generate command arguments:
-#  --payload             generated unsigned payload output file
-#  --source_image        if defined, generate a delta payload from the specified
-#                        image to the target_image
-#  --target_image        the target image that should be sent to clients
-#  --metadata_size_file  if defined, generate a file containing the size of the
-#                        payload metadata in bytes to the specified file
+#  --payload                  generated unsigned payload output file
+#  --source_image             if defined, generate a delta payload from the
+#                             specified image to the target_image
+#  --target_image             the target image that should be sent to clients
+#  --metadata_size_file       if defined, generate a file containing the size
+#                             of the ayload metadata in bytes to the specified
+#                             file
+#  --disable_fec_computation  Disable the on device fec data computation for
+#                             incremental update. This feature is enabled by
+#                             default
 #
 #  Hash command arguments:
 #  --unsigned_payload    the input unsigned payload to generate the hash from
@@ -182,6 +186,9 @@
     "Optional: The maximum unix timestamp of the OS allowed to apply this \
 payload, should be set to a number higher than the build timestamp of the \
 system running on the device, 0 if not specified."
+  DEFINE_string disable_fec_computation "" \
+    "Optional: Disables the on device fec data computation for incremental \
+update. This feature is enabled by default."
 fi
 if [[ "${COMMAND}" == "hash" || "${COMMAND}" == "sign" ]]; then
   DEFINE_string unsigned_payload "" "Path to the input unsigned payload."
@@ -248,6 +255,9 @@
 # List of partition names in order.
 declare -a PARTITIONS_ORDER
 
+# A list of PIDs of the extract_image workers.
+EXTRACT_IMAGE_PIDS=()
+
 # A list of temporary files to remove during cleanup.
 CLEANUP_FILES=()
 
@@ -271,7 +281,7 @@
   local option_key="$2"
   local default_value="${3:-}"
   local value
-  if value=$(look "${option_key}=" "${file_txt}" | tail -n 1); then
+  if value=$(grep "^${option_key}=" "${file_txt}" | tail -n 1); then
     if value=$(echo "${value}" | cut -f 2- -d "=" | grep -E "^[0-9]+$"); then
       echo "${value}"
       return
@@ -324,6 +334,25 @@
 trap cleanup_on_error INT TERM ERR
 trap cleanup_on_exit EXIT
 
+# extract_file <zip_file> <entry_name> <destination>
+#
+# Extracts |entry_name| from |zip_file| to |destination|.
+extract_file() {
+  local zip_file="$1"
+  local entry_name="$2"
+  local destination="$3"
+
+  # unzip -p won't report error upon ENOSPC. Therefore, create a temp directory
+  # as the destination of the unzip, and move the file to the intended
+  # destination.
+  local output_directory=$(
+    mktemp --directory --tmpdir="${FLAGS_work_dir}" "TEMP.XXXXXX")
+  unzip "${zip_file}" "${entry_name}" -d "${output_directory}" ||
+    { rm -rf "${output_directory}"; die "Failed to extract ${entry_name}"; }
+
+  mv "${output_directory}/${entry_name}" "${destination}"
+  rm -rf "${output_directory}"
+}
 
 # extract_image <image> <partitions_array> [partitions_order]
 #
@@ -414,7 +443,7 @@
     fi
   done
   [[ -n "${path_in_zip}" ]] || die "Failed to find ${part}.img"
-  unzip -p "${image}" "${path_in_zip}/${part}.img" >"${part_file}"
+  extract_file "${image}" "${path_in_zip}/${part}.img" "${part_file}"
 
   # If the partition is stored as an Android sparse image file, we need to
   # convert them to a raw image for the update.
@@ -428,8 +457,9 @@
   fi
 
   # Extract the .map file (if one is available).
-  unzip -p "${image}" "${path_in_zip}/${part}.map" >"${part_map_file}" \
-    2>/dev/null || true
+  if unzip -l "${image}" "${path_in_zip}/${part}.map" > /dev/null; then
+    extract_file "${image}" "${path_in_zip}/${part}.map" "${part_map_file}"
+  fi
 
   # delta_generator only supports images multiple of 4 KiB. For target images
   # we pad the data with zeros if needed, but for source images we truncate
@@ -463,7 +493,8 @@
   local ab_partitions_list
   ab_partitions_list=$(create_tempfile "ab_partitions_list.XXXXXX")
   CLEANUP_FILES+=("${ab_partitions_list}")
-  if unzip -p "${image}" "META/ab_partitions.txt" >"${ab_partitions_list}"; then
+  if unzip -l "${image}" "META/ab_partitions.txt" > /dev/null; then
+    extract_file "${image}" "META/ab_partitions.txt" "${ab_partitions_list}"
     if grep -v -E '^[a-zA-Z0-9_-]*$' "${ab_partitions_list}" >&2; then
       die "Invalid partition names found in the partition list."
     fi
@@ -488,8 +519,9 @@
     # Source image
     local ue_config=$(create_tempfile "ue_config.XXXXXX")
     CLEANUP_FILES+=("${ue_config}")
-    if ! unzip -p "${image}" "META/update_engine_config.txt" \
-        >"${ue_config}"; then
+    if unzip -l "${image}" "META/update_engine_config.txt" > /dev/null; then
+      extract_file "${image}" "META/update_engine_config.txt" "${ue_config}"
+    else
       warn "No update_engine_config.txt found. Assuming pre-release image, \
 using payload minor version 2"
     fi
@@ -510,14 +542,16 @@
     # Target image
     local postinstall_config=$(create_tempfile "postinstall_config.XXXXXX")
     CLEANUP_FILES+=("${postinstall_config}")
-    if unzip -p "${image}" "META/postinstall_config.txt" \
-        >"${postinstall_config}"; then
+    if unzip -l "${image}" "META/postinstall_config.txt" > /dev/null; then
+      extract_file "${image}" "META/postinstall_config.txt" \
+        "${postinstall_config}"
       POSTINSTALL_CONFIG_FILE="${postinstall_config}"
     fi
     local dynamic_partitions_info=$(create_tempfile "dynamic_partitions_info.XXXXXX")
     CLEANUP_FILES+=("${dynamic_partitions_info}")
-    if unzip -p "${image}" "META/dynamic_partitions_info.txt" \
-        >"${dynamic_partitions_info}"; then
+    if unzip -l "${image}" "META/dynamic_partitions_info.txt" > /dev/null; then
+      extract_file "${image}" "META/dynamic_partitions_info.txt" \
+        "${dynamic_partitions_info}"
       DYNAMIC_PARTITION_INFO_FILE="${dynamic_partitions_info}"
     fi
   fi
@@ -530,6 +564,7 @@
     # Extract partitions in background.
     extract_partition_brillo "${image}" "${partitions_array}" "${part}" \
         "${part_file}" "${part_map_file}" &
+    EXTRACT_IMAGE_PIDS+=("$!")
     eval "${partitions_array}[\"${part}\"]=\"${part_file}\""
     eval "${partitions_array}_MAP[\"${part}\"]=\"${part_map_file}\""
   done
@@ -559,8 +594,12 @@
     extract_image "${FLAGS_source_image}" SRC_PARTITIONS
   fi
   extract_image "${FLAGS_target_image}" DST_PARTITIONS PARTITIONS_ORDER
-  # Wait for all subprocesses.
-  wait
+  # Wait for all subprocesses to finish. Not using `wait` since it doesn't die
+  # on non-zero subprocess exit code. Not using `wait ${EXTRACT_IMAGE_PIDS[@]}`
+  # as it gives the status of the last process it has waited for.
+  for pid in ${EXTRACT_IMAGE_PIDS[@]}; do
+    wait ${pid}
+  done
   cleanup_partition_array SRC_PARTITIONS
   cleanup_partition_array SRC_PARTITIONS_MAP
   cleanup_partition_array DST_PARTITIONS
@@ -624,6 +663,10 @@
     if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
       GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
     fi
+    if [[ -n "${FLAGS_disable_fec_computation}" ]]; then
+      GENERATOR_ARGS+=(
+        --disable_fec_computation="${FLAGS_disable_fec_computation}" )
+    fi
   fi
 
   if [[ -n "${FORCE_MAJOR_VERSION}" ]]; then
diff --git a/scripts/update_device.py b/scripts/update_device.py
index f970bd3..7be3edb 100755
--- a/scripts/update_device.py
+++ b/scripts/update_device.py
@@ -87,17 +87,24 @@
   # Android OTA package file paths.
   OTA_PAYLOAD_BIN = 'payload.bin'
   OTA_PAYLOAD_PROPERTIES_TXT = 'payload_properties.txt'
+  SECONDARY_OTA_PAYLOAD_BIN = 'secondary/payload.bin'
+  SECONDARY_OTA_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt'
 
-  def __init__(self, otafilename):
+  def __init__(self, otafilename, secondary_payload=False):
     self.otafilename = otafilename
 
     otazip = zipfile.ZipFile(otafilename, 'r')
-    payload_info = otazip.getinfo(self.OTA_PAYLOAD_BIN)
+    payload_entry = (self.SECONDARY_OTA_PAYLOAD_BIN if secondary_payload else
+                     self.OTA_PAYLOAD_BIN)
+    payload_info = otazip.getinfo(payload_entry)
     self.offset = payload_info.header_offset
     self.offset += zipfile.sizeFileHeader
     self.offset += len(payload_info.extra) + len(payload_info.filename)
     self.size = payload_info.file_size
-    self.properties = otazip.read(self.OTA_PAYLOAD_PROPERTIES_TXT)
+
+    property_entry = (self.SECONDARY_OTA_PAYLOAD_PROPERTIES_TXT if
+                      secondary_payload else self.OTA_PAYLOAD_PROPERTIES_TXT)
+    self.properties = otazip.read(property_entry)
 
 
 class UpdateHandler(BaseHTTPServer.BaseHTTPRequestHandler):
@@ -280,9 +287,9 @@
   return t
 
 
-def AndroidUpdateCommand(ota_filename, payload_url, extra_headers):
+def AndroidUpdateCommand(ota_filename, secondary, payload_url, extra_headers):
   """Return the command to run to start the update in the Android device."""
-  ota = AndroidOTAPackage(ota_filename)
+  ota = AndroidOTAPackage(ota_filename, secondary)
   headers = ota.properties
   headers += 'USER_AGENT=Dalvik (something, something)\n'
   headers += 'NETWORK_ID=0\n'
@@ -365,6 +372,8 @@
                       help='Override the public key used to verify payload.')
   parser.add_argument('--extra-headers', type=str, default='',
                       help='Extra headers to pass to the device.')
+  parser.add_argument('--secondary', action='store_true',
+                      help='Update with the secondary payload in the package.')
   args = parser.parse_args()
   logging.basicConfig(
       level=logging.WARNING if args.no_verbose else logging.INFO)
@@ -400,7 +409,7 @@
     # command.
     payload_url = 'http://127.0.0.1:%d/payload' % DEVICE_PORT
     if use_omaha and zipfile.is_zipfile(args.otafile):
-      ota = AndroidOTAPackage(args.otafile)
+      ota = AndroidOTAPackage(args.otafile, args.secondary)
       serving_range = (ota.offset, ota.size)
     else:
       serving_range = (0, os.stat(args.otafile).st_size)
@@ -428,8 +437,8 @@
       update_cmd = \
           OmahaUpdateCommand('http://127.0.0.1:%d/update' % DEVICE_PORT)
     else:
-      update_cmd = \
-          AndroidUpdateCommand(args.otafile, payload_url, args.extra_headers)
+      update_cmd = AndroidUpdateCommand(args.otafile, args.secondary,
+                                        payload_url, args.extra_headers)
     cmds.append(['shell', 'su', '0'] + update_cmd)
 
     for cmd in cmds:
diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py
index 907cc18..d41c1da 100644
--- a/scripts/update_payload/update_metadata_pb2.py
+++ b/scripts/update_payload/update_metadata_pb2.py
@@ -20,7 +20,7 @@
   package='chromeos_update_engine',
   syntax='proto2',
   serialized_options=_b('H\003'),
-  serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"Y\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\"\xc9\x06\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadataB\x02H\x03')
+  serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"\x9f\x01\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1aO\n\tSignature\x12\x13\n\x07version\x18\x01 \x01(\rB\x02\x18\x01\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x1f\n\x17unpadded_signature_size\x18\x03 \x01(\x07\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"s\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\"\xe1\x06\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadata\x12\x16\n\x0epartial_update\x18\x10 \x01(\x08\x42\x02H\x03')
 )
 
 
@@ -78,8 +78,8 @@
   ],
   containing_type=None,
   serialized_options=None,
-  serialized_start=712,
-  serialized_end=885,
+  serialized_start=750,
+  serialized_end=923,
 )
 _sym_db.RegisterEnumDescriptor(_INSTALLOPERATION_TYPE)
 
@@ -135,7 +135,7 @@
       has_default_value=False, default_value=0,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='data', full_name='chromeos_update_engine.Signatures.Signature.data', index=1,
       number=2, type=12, cpp_type=9, label=1,
@@ -143,6 +143,13 @@
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='unpadded_signature_size', full_name='chromeos_update_engine.Signatures.Signature.unpadded_signature_size', index=2,
+      number=3, type=7, cpp_type=3, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -155,8 +162,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=180,
-  serialized_end=222,
+  serialized_start=181,
+  serialized_end=260,
 )
 
 _SIGNATURES = _descriptor.Descriptor(
@@ -185,8 +192,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=100,
-  serialized_end=222,
+  serialized_start=101,
+  serialized_end=260,
 )
 
 
@@ -223,8 +230,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=224,
-  serialized_end=267,
+  serialized_start=262,
+  serialized_end=305,
 )
 
 
@@ -289,8 +296,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=269,
-  serialized_end=388,
+  serialized_start=307,
+  serialized_end=426,
 )
 
 
@@ -377,8 +384,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=391,
-  serialized_end=885,
+  serialized_start=429,
+  serialized_end=923,
 )
 
 
@@ -513,8 +520,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=888,
-  serialized_end=1615,
+  serialized_start=926,
+  serialized_end=1653,
 )
 
 
@@ -558,8 +565,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1617,
-  serialized_end=1693,
+  serialized_start=1655,
+  serialized_end=1731,
 )
 
 
@@ -577,6 +584,13 @@
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='snapshot_enabled', full_name='chromeos_update_engine.DynamicPartitionMetadata.snapshot_enabled', index=1,
+      number=2, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -589,8 +603,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1695,
-  serialized_end=1784,
+  serialized_start=1733,
+  serialized_end=1848,
 )
 
 
@@ -706,6 +720,13 @@
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='partial_update', full_name='chromeos_update_engine.DeltaArchiveManifest.partial_update', index=15,
+      number=16, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -718,8 +739,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1787,
-  serialized_end=2628,
+  serialized_start=1851,
+  serialized_end=2716,
 )
 
 _SIGNATURES_SIGNATURE.containing_type = _SIGNATURES
@@ -758,79 +779,80 @@
 DESCRIPTOR.message_types_by_name['DeltaArchiveManifest'] = _DELTAARCHIVEMANIFEST
 _sym_db.RegisterFileDescriptor(DESCRIPTOR)
 
-Extent = _reflection.GeneratedProtocolMessageType('Extent', (_message.Message,), dict(
-  DESCRIPTOR = _EXTENT,
-  __module__ = 'update_metadata_pb2'
+Extent = _reflection.GeneratedProtocolMessageType('Extent', (_message.Message,), {
+  'DESCRIPTOR' : _EXTENT,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.Extent)
-  ))
+  })
 _sym_db.RegisterMessage(Extent)
 
-Signatures = _reflection.GeneratedProtocolMessageType('Signatures', (_message.Message,), dict(
+Signatures = _reflection.GeneratedProtocolMessageType('Signatures', (_message.Message,), {
 
-  Signature = _reflection.GeneratedProtocolMessageType('Signature', (_message.Message,), dict(
-    DESCRIPTOR = _SIGNATURES_SIGNATURE,
-    __module__ = 'update_metadata_pb2'
+  'Signature' : _reflection.GeneratedProtocolMessageType('Signature', (_message.Message,), {
+    'DESCRIPTOR' : _SIGNATURES_SIGNATURE,
+    '__module__' : 'update_metadata_pb2'
     # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures.Signature)
-    ))
+    })
   ,
-  DESCRIPTOR = _SIGNATURES,
-  __module__ = 'update_metadata_pb2'
+  'DESCRIPTOR' : _SIGNATURES,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures)
-  ))
+  })
 _sym_db.RegisterMessage(Signatures)
 _sym_db.RegisterMessage(Signatures.Signature)
 
-PartitionInfo = _reflection.GeneratedProtocolMessageType('PartitionInfo', (_message.Message,), dict(
-  DESCRIPTOR = _PARTITIONINFO,
-  __module__ = 'update_metadata_pb2'
+PartitionInfo = _reflection.GeneratedProtocolMessageType('PartitionInfo', (_message.Message,), {
+  'DESCRIPTOR' : _PARTITIONINFO,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionInfo)
-  ))
+  })
 _sym_db.RegisterMessage(PartitionInfo)
 
-ImageInfo = _reflection.GeneratedProtocolMessageType('ImageInfo', (_message.Message,), dict(
-  DESCRIPTOR = _IMAGEINFO,
-  __module__ = 'update_metadata_pb2'
+ImageInfo = _reflection.GeneratedProtocolMessageType('ImageInfo', (_message.Message,), {
+  'DESCRIPTOR' : _IMAGEINFO,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.ImageInfo)
-  ))
+  })
 _sym_db.RegisterMessage(ImageInfo)
 
-InstallOperation = _reflection.GeneratedProtocolMessageType('InstallOperation', (_message.Message,), dict(
-  DESCRIPTOR = _INSTALLOPERATION,
-  __module__ = 'update_metadata_pb2'
+InstallOperation = _reflection.GeneratedProtocolMessageType('InstallOperation', (_message.Message,), {
+  'DESCRIPTOR' : _INSTALLOPERATION,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.InstallOperation)
-  ))
+  })
 _sym_db.RegisterMessage(InstallOperation)
 
-PartitionUpdate = _reflection.GeneratedProtocolMessageType('PartitionUpdate', (_message.Message,), dict(
-  DESCRIPTOR = _PARTITIONUPDATE,
-  __module__ = 'update_metadata_pb2'
+PartitionUpdate = _reflection.GeneratedProtocolMessageType('PartitionUpdate', (_message.Message,), {
+  'DESCRIPTOR' : _PARTITIONUPDATE,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionUpdate)
-  ))
+  })
 _sym_db.RegisterMessage(PartitionUpdate)
 
-DynamicPartitionGroup = _reflection.GeneratedProtocolMessageType('DynamicPartitionGroup', (_message.Message,), dict(
-  DESCRIPTOR = _DYNAMICPARTITIONGROUP,
-  __module__ = 'update_metadata_pb2'
+DynamicPartitionGroup = _reflection.GeneratedProtocolMessageType('DynamicPartitionGroup', (_message.Message,), {
+  'DESCRIPTOR' : _DYNAMICPARTITIONGROUP,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionGroup)
-  ))
+  })
 _sym_db.RegisterMessage(DynamicPartitionGroup)
 
-DynamicPartitionMetadata = _reflection.GeneratedProtocolMessageType('DynamicPartitionMetadata', (_message.Message,), dict(
-  DESCRIPTOR = _DYNAMICPARTITIONMETADATA,
-  __module__ = 'update_metadata_pb2'
+DynamicPartitionMetadata = _reflection.GeneratedProtocolMessageType('DynamicPartitionMetadata', (_message.Message,), {
+  'DESCRIPTOR' : _DYNAMICPARTITIONMETADATA,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionMetadata)
-  ))
+  })
 _sym_db.RegisterMessage(DynamicPartitionMetadata)
 
-DeltaArchiveManifest = _reflection.GeneratedProtocolMessageType('DeltaArchiveManifest', (_message.Message,), dict(
-  DESCRIPTOR = _DELTAARCHIVEMANIFEST,
-  __module__ = 'update_metadata_pb2'
+DeltaArchiveManifest = _reflection.GeneratedProtocolMessageType('DeltaArchiveManifest', (_message.Message,), {
+  'DESCRIPTOR' : _DELTAARCHIVEMANIFEST,
+  '__module__' : 'update_metadata_pb2'
   # @@protoc_insertion_point(class_scope:chromeos_update_engine.DeltaArchiveManifest)
-  ))
+  })
 _sym_db.RegisterMessage(DeltaArchiveManifest)
 
 
 DESCRIPTOR._options = None
+_SIGNATURES_SIGNATURE.fields_by_name['version']._options = None
 _INSTALLOPERATION_TYPE.values_by_name["MOVE"]._options = None
 _INSTALLOPERATION_TYPE.values_by_name["BSDIFF"]._options = None
 _DELTAARCHIVEMANIFEST.fields_by_name['install_operations']._options = None
diff --git a/service_delegate_android_interface.h b/service_delegate_android_interface.h
index 5267bb0..34a9712 100644
--- a/service_delegate_android_interface.h
+++ b/service_delegate_android_interface.h
@@ -19,6 +19,7 @@
 
 #include <inttypes.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -26,6 +27,18 @@
 
 namespace chromeos_update_engine {
 
+// See ServiceDelegateAndroidInterface.CleanupSuccessfulUpdate
+// Wraps a IUpdateEngineCallback binder object used specifically for
+// CleanupSuccessfulUpdate.
+class CleanupSuccessfulUpdateCallbackInterface {
+ public:
+  virtual ~CleanupSuccessfulUpdateCallbackInterface() {}
+  virtual void OnCleanupProgressUpdate(double progress) = 0;
+  virtual void OnCleanupComplete(int32_t error_code) = 0;
+  // Call RegisterForDeathNotifications on the internal binder object.
+  virtual void RegisterForDeathNotifications(base::Closure unbind) = 0;
+};
+
 // This class defines the interface exposed by the Android version of the
 // daemon service. This interface only includes the method calls that such
 // daemon exposes. For asynchronous events initiated by a class implementing
@@ -47,6 +60,13 @@
       const std::vector<std::string>& key_value_pair_headers,
       brillo::ErrorPtr* error) = 0;
 
+  virtual bool ApplyPayload(
+      int fd,
+      int64_t payload_offset,
+      int64_t payload_size,
+      const std::vector<std::string>& key_value_pair_headers,
+      brillo::ErrorPtr* error) = 0;
+
   // Suspend an ongoing update. Returns true if there was an update ongoing and
   // it was suspended. In case of failure, it returns false and sets |error|
   // accordingly.
@@ -76,6 +96,28 @@
   virtual bool VerifyPayloadApplicable(const std::string& metadata_filename,
                                        brillo::ErrorPtr* error) = 0;
 
+  // Allocates space for a payload.
+  // Returns 0 if space is successfully preallocated.
+  // Return non-zero if not enough space is not available; returned value is
+  // the total space required (in bytes) to be free on the device for this
+  // update to be applied, and |error| is unset.
+  // In case of error, returns 0, and sets |error| accordingly.
+  //
+  // This function may block for several minutes in the worst case.
+  virtual uint64_t AllocateSpaceForPayload(
+      const std::string& metadata_filename,
+      const std::vector<std::string>& key_value_pair_headers,
+      brillo::ErrorPtr* error) = 0;
+
+  // Wait for merge to complete, then clean up merge after an update has been
+  // successful.
+  //
+  // This function returns immediately. Progress updates are provided in
+  // |callback|.
+  virtual void CleanupSuccessfulUpdate(
+      std::unique_ptr<CleanupSuccessfulUpdateCallbackInterface> callback,
+      brillo::ErrorPtr* error) = 0;
+
  protected:
   ServiceDelegateAndroidInterface() = default;
 };
diff --git a/sideload_main.cc b/sideload_main.cc
index 818fa5c..27967cd 100644
--- a/sideload_main.cc
+++ b/sideload_main.cc
@@ -20,7 +20,6 @@
 #include <vector>
 
 #include <base/command_line.h>
-#include <base/logging.h>
 #include <base/strings/string_split.h>
 #include <base/strings/stringprintf.h>
 #include <brillo/asynchronous_signal_handler.h>
@@ -36,6 +35,7 @@
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/terminator.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/logging.h"
 #include "update_engine/update_attempter_android.h"
 
 using std::string;
@@ -46,17 +46,6 @@
 namespace chromeos_update_engine {
 namespace {
 
-void SetupLogging() {
-  string log_file;
-  logging::LoggingSettings log_settings;
-  log_settings.lock_log = logging::DONT_LOCK_LOG_FILE;
-  log_settings.delete_old = logging::APPEND_TO_OLD_LOG_FILE;
-  log_settings.log_file = nullptr;
-  log_settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
-
-  logging::InitLogging(log_settings);
-}
-
 class SideloadDaemonState : public DaemonStateInterface,
                             public ServiceObserverInterface {
  public:
@@ -195,7 +184,7 @@
   DEFINE_int64(status_fd, -1, "A file descriptor to notify the update status.");
 
   chromeos_update_engine::Terminator::Init();
-  chromeos_update_engine::SetupLogging();
+  chromeos_update_engine::SetupLogging(true /* stderr */, false /* file */);
   brillo::FlagHelper::Init(argc, argv, "Update Engine Sideload");
 
   LOG(INFO) << "Update Engine Sideloading starting";
diff --git a/test_config.xml b/test_config.xml
new file mode 100644
index 0000000..fe3cbfd
--- /dev/null
+++ b/test_config.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<configuration description="Config to run update_engine_unittests on device">
+    <option name="test-suite-tag" value="apct" />
+    <option name="test-suite-tag" value="apct-native" />
+    <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer" />
+    <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+        <option name="cleanup" value="true" />
+        <option name="push" value="update_engine_unittests->/data/nativetest/update_engine_unittests" />
+    </target_preparer>
+
+    <test class="com.android.tradefed.testtype.GTest" >
+        <option name="native-test-device-path" value="/data/nativetest" />
+        <!-- The following rules avoid test runner from calling the following helper executables
+             directly as gtests. -->
+        <option name="file-exclusion-filter-regex" value=".*/delta_generator$" />
+        <option name="file-exclusion-filter-regex" value=".*/test_http_server$" />
+        <option name="file-exclusion-filter-regex" value=".*/test_subprocess$" />
+        <option name="module-name" value="update_engine_unittests" />
+    </test>
+</configuration>
diff --git a/unittest_key_EC.pem b/unittest_key_EC.pem
new file mode 100644
index 0000000..9e65a68
--- /dev/null
+++ b/unittest_key_EC.pem
@@ -0,0 +1,5 @@
+-----BEGIN PRIVATE KEY-----
+MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgGaguGj8Yb1KkqKHd
+ISblUsjtOCbzAuVpX81i02sm8FWhRANCAARBnuotwKOsuvjH6iwTDhOAi7Q5pLWz
+xDkZjg2pcfbfi9FFTvLYETas7B2W6fx9PUezUmHTFTDV2JZuMYYFdZOw
+-----END PRIVATE KEY-----
diff --git a/unittest_key_RSA4096.pem b/unittest_key_RSA4096.pem
new file mode 100644
index 0000000..5613910
--- /dev/null
+++ b/unittest_key_RSA4096.pem
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKgIBAAKCAgEAu8vBB+DvDPWgO1IWli5VuXHkMWYtD+wzToKIP/NkiGYdf1b9
+EvCgrvS3J4CutKCHoz1N5Pc29DKZduDhLbqWzqvUldJ8kSKd967PRCdM5vJHmdox
+H8E7m3YROBrppcJG32B5TD5qbn6jqrsSnwjlGZN2RAuURQwalSxq3/ZttiEzEMDR
+o/7v5rQINlF0Rsud+HlNXrxzhR/LQyMV3d+/JvoEWPmz4unzpKyYAMOjdNTcMLB0
+ccULFYKfJtRCqTyfgEUbT+mGLTbDQSkzl6mcNfDg+3hu8lTjQH+6EjjDUSu6oOmV
+OmDf8tZPmGRdw/9R1PUx6A1yKQR6FkdkqRDOt1GvIpUpsfT36Gg59u1a6YyHKudB
+6igFzr3JcTgslQ9KUZK68j6Xr2AlbsffyJ5ltyuHT4gMtCppkuGUi1CZ6+i55LfA
+t1cVx6meE+zJYK46zu9GKgsXg72BNCi/3v1HievZwDcm5A04YQZMZDgtb74pN1Vz
+uCEzxCgpyx3b3Y/K8JI6n9xMeQNIPF2h9JeiO9qasV4vJ51PbnS61BHR1E9Sh845
+2QVT+mqJnA8uRp22+sF0wU5Ae33UAZxnKk9Ax+uz6iRP6wlILlUpl+Chsjla6+Ut
+RkE4NRJh2xdin+j9GCaXFVOzx0RPLAYrD9/cM6BOV7CKZ/2iqVnaXH6sY00CAwEA
+AQKCAgEAgf77ci68i6YD8sxSIkeUw7gZFDD8hIcmxPQKyOn874ZwaNOYf+Hd+P4h
+QtELHrH48oDfSN3wRn44SVhFx9ffyNSdZdC8teLWort5cl4aF8wi0Fd/pdGnJ2hF
+ZycKEdo4ISyxCpwyJKa5ONgifcA0hs3TEitJybolNJn4qWv2ahr1NGWlOPairnp1
+LNSZvt/4TCX77tZYyRBHLgQ9gMb/lUWAeO7xHOSB6b4nwm+q2Jb4jSO4l4CkuZEg
+BkrskiYK60nrLBgk72t3IcYZlqSsI5LIyoqFkm48mUtRTIfKfIfeusC2siCZJYpA
+subXGXPF+7p3f4C/Q7F7qaxl+7pMvN2UtnCY4lYppMKETquVT2vtEgZjSVkQTb0X
+wEYmipMsonVcSLL4xTQxT0KdY2JczH7xicIJwolED+eob64t4q64ljfC3tlPJakE
+O2GgyynLH+oojBsH8XCvuhODFL+/e1eQmV0gu0az3fcHY+lCKrYEhlVB9wVe5Afn
+2GH71deZKY+iF2E8RJXwGdmpN4PbLyWepqCm0TdMUrn5A37TBr++3B9d7/eyMt1o
+afMxDzAZ27HaT6eg6SB/LJdkezvs67jLcur18NFMevSxKc+G+B8g+7euc00sKaLu
+WIX2bf8kc6L2sLECERpXOBNRSQxY33vS72szF6P7rN0+Szb+aQECggEBAOSqtUSY
+DJbyjnxmhGodcbuubNyNZRrs/vXNcYHmBZbWvR9rxtWgA1LYJiAY4wvtc+Ciw/Y6
+468MG6ip4rLu/6lDkekH/ARxW6zErCfCDDg91cP1FXP/Cwo6COJ9V2imCbTsiMSi
+bVigG4Lb4FTgBkreOyxDjnIVzZwCrwtxZWpNwA2GlEBS6IsJqelnUi/roi4rLpCj
+Y5mLvL8YYPduead9TwesYsXdK2qBf6A034GNXxvzhV70HfhnI60ydi6pNRrWamru
+TBJEuY7CipzyevqM3drfkFZDKyEBEVnk7We4IpiaOkBfLsshvFqk9asWzts1eDa8
+GpOqM0RYRCXZya0CggEBANI+YWOrK2qSLxFBbCupNaxG++/tafOXsgjkmGaRxCHt
+IcvPTIzFBQOSCobF51KoZIriVEVKw1bXHFlibeQ1IEuh8yVBo3l6NO8JKlc/tdJf
+pfkUh5XjQWJW8UqWULb5CkJCEheenG0oy8zhjOERPDcQXRYOhIo8JSpWfJFtWSWk
+L/X7kfkEvQxV0omFCUg4sCxdBeqIEItYd0Td0SCmHPZIs2KgSmpLIPBH0BMibNkY
+ZeSaz5nWbw06Unhkas+ulm3S+IEjb7neuAWGPlIXnPch9hw2pdZf49XRW4fjc7Nr
++G+U2Jgjv81+Rn7nFK2Whh22XKL5aP2myoVESlvzdCECggEBAIc9DwgKhSehPPQG
+DbpUv7coaennFizejcwCPWd+C0AysJesvmQJxu1wONwy29VqEmaA3TT7jz0wBAu0
+rgb1ou8Qr3MK7doS0Q1VJBw/f8qjh5HlmVKJPJZHzIlnaBLUYFlIq3rgNZt81ciH
+Eh4ggJg25vg+3DhM/NWQIMa7wick5LkbJwMEBdR1WrBYExuUWM7FazzP5VAifPbo
+DDFKfVi5m8wGAETVkZ/kBv9RRf7xBZcaZ37JEhCfr1H3zj26hVXiCf5EAWmsi7IL
+DL/WCTW1qmCQaGUcRJ24a/KmmmIFXTCzxk/b+2jYAvX5KfKOArlS3k5A4dcDil6Z
+dXSNYeECggEBAIHzRMcSOde5W5ZS1cV25VIC3h5CpMaH8OdGRFzBpHVD2SvcifhI
+nvzB+/epw3130A14L5ZUy8CVXVRyXnI71CZrh5pzo9OmEaneBGnBW2UY8cGvSs7+
+lJ9wFdyAZIt0Cz9BD2XCB/YAzVdp4mYK/Skb2C2V855t5prwsjZBXGTDw1FLmcJN
+h3xkX6nYrRAS2fHR3aJFT9SRbccHRAfmJOilrxs68EQbA9UAzj/Fe3oEdpaCiecQ
+f7uxXOBFUS/lPd3MFQXdHWXJn/zqKQMczUyDlVeC/6YtxumWafjoQc+Y4Qo2+lmv
+XxJpBrHRqxpQe71JxqCFgLunqG4O89c594ECggEAcMlYhrO2+R7mEPFDZLwSvTMV
+BOof6hxLIW8PkLQ/8HHTTacC02sKFiR921sw9NadQ7G0cf23Fecmolg4brJUh9sQ
+evjdYMdqIYPJT5hYSkIkdTk0Ny+tN2Pt4fBTTv3N2D3Da/5ODfrVSj0ib89DXG5D
+bPahlFLIhKaVbXNe1RQL/8j4nFf8D9LwuEMOMYrUpSMw9ULT5dB34QN2TOnwW9JW
+Md7aSY5pK1j1Y8FoWCAFSw+o+yWq5DbTFvcEhttWrUoFl9YxTolbLt6sw6TLy12x
+9haQDvbfvRkg3Es31DEC8plsltfg5S9KwRqCchKKUm7cnAJFhB2/2C6JX2k0XQ==
+-----END RSA PRIVATE KEY-----
diff --git a/update_attempter_android.cc b/update_attempter_android.cc
index c738e4e..b7d119f 100644
--- a/update_attempter_android.cc
+++ b/update_attempter_android.cc
@@ -22,6 +22,7 @@
 #include <utility>
 
 #include <android-base/properties.h>
+#include <android-base/unique_fd.h>
 #include <base/bind.h>
 #include <base/logging.h>
 #include <base/strings/string_number_conversions.h>
@@ -30,6 +31,7 @@
 #include <brillo/strings/string_utils.h>
 #include <log/log_safetynet.h>
 
+#include "update_engine/cleanup_previous_update_action.h"
 #include "update_engine/common/constants.h"
 #include "update_engine/common/error_code_utils.h"
 #include "update_engine/common/file_fetcher.h"
@@ -38,6 +40,7 @@
 #include "update_engine/metrics_reporter_interface.h"
 #include "update_engine/metrics_utils.h"
 #include "update_engine/network_selector.h"
+#include "update_engine/payload_consumer/certificate_parser_interface.h"
 #include "update_engine/payload_consumer/delta_performer.h"
 #include "update_engine/payload_consumer/download_action.h"
 #include "update_engine/payload_consumer/file_descriptor.h"
@@ -45,6 +48,7 @@
 #include "update_engine/payload_consumer/filesystem_verifier_action.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/payload_metadata.h"
+#include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/payload_consumer/postinstall_runner_action.h"
 #include "update_engine/update_boot_flags_action.h"
 #include "update_engine/update_status_utils.h"
@@ -55,6 +59,7 @@
 #include "update_engine/libcurl_http_fetcher.h"
 #endif
 
+using android::base::unique_fd;
 using base::Bind;
 using base::Time;
 using base::TimeDelta;
@@ -94,6 +99,34 @@
   return default_value;
 }
 
+bool ParseKeyValuePairHeaders(const vector<string>& key_value_pair_headers,
+                              std::map<string, string>* headers,
+                              brillo::ErrorPtr* error) {
+  for (const string& key_value_pair : key_value_pair_headers) {
+    string key;
+    string value;
+    if (!brillo::string_utils::SplitAtFirst(
+            key_value_pair, "=", &key, &value, false)) {
+      return LogAndSetError(
+          error, FROM_HERE, "Passed invalid header: " + key_value_pair);
+    }
+    if (!headers->emplace(key, value).second)
+      return LogAndSetError(error, FROM_HERE, "Passed repeated key: " + key);
+  }
+  return true;
+}
+
+// Unique identifier for the payload. An empty string means that the payload
+// can't be resumed.
+string GetPayloadId(const std::map<string, string>& headers) {
+  return (headers.count(kPayloadPropertyFileHash)
+              ? headers.at(kPayloadPropertyFileHash)
+              : "") +
+         (headers.count(kPayloadPropertyMetadataHash)
+              ? headers.at(kPayloadPropertyMetadataHash)
+              : "");
+}
+
 }  // namespace
 
 UpdateAttempterAndroid::UpdateAttempterAndroid(
@@ -125,6 +158,12 @@
   } else {
     SetStatusAndNotify(UpdateStatus::IDLE);
     UpdatePrefsAndReportUpdateMetricsOnReboot();
+#ifdef _UE_SIDELOAD
+    LOG(INFO) << "Skip ScheduleCleanupPreviousUpdate in sideload because "
+              << "ApplyPayload will call it later.";
+#else
+    ScheduleCleanupPreviousUpdate();
+#endif
   }
 }
 
@@ -145,22 +184,11 @@
   DCHECK(status_ == UpdateStatus::IDLE);
 
   std::map<string, string> headers;
-  for (const string& key_value_pair : key_value_pair_headers) {
-    string key;
-    string value;
-    if (!brillo::string_utils::SplitAtFirst(
-            key_value_pair, "=", &key, &value, false)) {
-      return LogAndSetError(
-          error, FROM_HERE, "Passed invalid header: " + key_value_pair);
-    }
-    if (!headers.emplace(key, value).second)
-      return LogAndSetError(error, FROM_HERE, "Passed repeated key: " + key);
+  if (!ParseKeyValuePairHeaders(key_value_pair_headers, &headers, error)) {
+    return false;
   }
 
-  // Unique identifier for the payload. An empty string means that the payload
-  // can't be resumed.
-  string payload_id = (headers[kPayloadPropertyFileHash] +
-                       headers[kPayloadPropertyMetadataHash]);
+  string payload_id = GetPayloadId(headers);
 
   // Setup the InstallPlan based on the request.
   install_plan_ = InstallPlan();
@@ -196,15 +224,22 @@
   install_plan_.is_resume = !payload_id.empty() &&
                             DeltaPerformer::CanResumeUpdate(prefs_, payload_id);
   if (!install_plan_.is_resume) {
-    if (!DeltaPerformer::ResetUpdateProgress(prefs_, false)) {
+    // No need to reset dynamic_partititon_metadata_updated. If previous calls
+    // to AllocateSpaceForPayload uses the same payload_id, reuse preallocated
+    // space. Otherwise, DeltaPerformer re-allocates space when the payload is
+    // applied.
+    if (!DeltaPerformer::ResetUpdateProgress(
+            prefs_,
+            false /* quick */,
+            true /* skip_dynamic_partititon_metadata_updated */)) {
       LOG(WARNING) << "Unable to reset the update progress.";
     }
     if (!prefs_->SetString(kPrefsUpdateCheckResponseHash, payload_id)) {
       LOG(WARNING) << "Unable to save the update check response hash.";
     }
   }
-  install_plan_.source_slot = boot_control_->GetCurrentSlot();
-  install_plan_.target_slot = install_plan_.source_slot == 0 ? 1 : 0;
+  install_plan_.source_slot = GetCurrentSlot();
+  install_plan_.target_slot = GetTargetSlot();
 
   install_plan_.powerwash_required =
       GetHeaderAsBool(headers[kPayloadPropertyPowerwash], false);
@@ -212,20 +247,8 @@
   install_plan_.switch_slot_on_reboot =
       GetHeaderAsBool(headers[kPayloadPropertySwitchSlotOnReboot], true);
 
-  install_plan_.run_post_install = true;
-  // Optionally skip post install if and only if:
-  // a) we're resuming
-  // b) post install has already succeeded before
-  // c) RUN_POST_INSTALL is set to 0.
-  if (install_plan_.is_resume && prefs_->Exists(kPrefsPostInstallSucceeded)) {
-    bool post_install_succeeded = false;
-    if (prefs_->GetBoolean(kPrefsPostInstallSucceeded,
-                           &post_install_succeeded) &&
-        post_install_succeeded) {
-      install_plan_.run_post_install =
-          GetHeaderAsBool(headers[kPayloadPropertyRunPostInstall], true);
-    }
-  }
+  install_plan_.run_post_install =
+      GetHeaderAsBool(headers[kPayloadPropertyRunPostInstall], true);
 
   // Skip writing verity if we're resuming and verity has already been written.
   install_plan_.write_verity = true;
@@ -288,6 +311,19 @@
   return true;
 }
 
+bool UpdateAttempterAndroid::ApplyPayload(
+    int fd,
+    int64_t payload_offset,
+    int64_t payload_size,
+    const vector<string>& key_value_pair_headers,
+    brillo::ErrorPtr* error) {
+  payload_fd_.reset(dup(fd));
+  const string payload_url = "fd://" + std::to_string(payload_fd_.get());
+
+  return ApplyPayload(
+      payload_url, payload_offset, payload_size, key_value_pair_headers, error);
+}
+
 bool UpdateAttempterAndroid::SuspendUpdate(brillo::ErrorPtr* error) {
   if (!processor_->IsRunning())
     return LogAndSetError(error, FROM_HERE, "No ongoing update to suspend.");
@@ -314,18 +350,19 @@
             << UpdateStatusToString(status_) << " to UpdateStatus::IDLE";
 
   switch (status_) {
-    case UpdateStatus::IDLE:
+    case UpdateStatus::IDLE: {
+      if (!boot_control_->GetDynamicPartitionControl()->ResetUpdate(prefs_)) {
+        LOG(WARNING) << "Failed to reset snapshots. UpdateStatus is IDLE but"
+                     << "space might not be freed.";
+      }
       return true;
+    }
 
     case UpdateStatus::UPDATED_NEED_REBOOT: {
-      // Remove the reboot marker so that if the machine is rebooted
-      // after resetting to idle state, it doesn't go back to
-      // UpdateStatus::UPDATED_NEED_REBOOT state.
-      bool ret_value = prefs_->Delete(kPrefsUpdateCompletedOnBootId);
-      ClearMetricsPrefs();
+      bool ret_value = true;
 
       // Update the boot flags so the current slot has higher priority.
-      if (!boot_control_->SetActiveBootSlot(boot_control_->GetCurrentSlot()))
+      if (!boot_control_->SetActiveBootSlot(GetCurrentSlot()))
         ret_value = false;
 
       // Mark the current slot as successful again, since marking it as active
@@ -334,6 +371,20 @@
       if (!boot_control_->MarkBootSuccessfulAsync(Bind([](bool successful) {})))
         ret_value = false;
 
+      // Resets the warm reset property since we won't switch the slot.
+      hardware_->SetWarmReset(false);
+
+      // Remove update progress for DeltaPerformer and remove snapshots.
+      if (!boot_control_->GetDynamicPartitionControl()->ResetUpdate(prefs_))
+        ret_value = false;
+
+      // Remove the reboot marker so that if the machine is rebooted
+      // after resetting to idle state, it doesn't go back to
+      // UpdateStatus::UPDATED_NEED_REBOOT state.
+      if (!prefs_->Delete(kPrefsUpdateCompletedOnBootId))
+        ret_value = false;
+      ClearMetricsPrefs();
+
       if (!ret_value) {
         return LogAndSetError(
             error, FROM_HERE, "Failed to reset the status to ");
@@ -352,8 +403,10 @@
   }
 }
 
-bool UpdateAttempterAndroid::VerifyPayloadApplicable(
-    const std::string& metadata_filename, brillo::ErrorPtr* error) {
+bool UpdateAttempterAndroid::VerifyPayloadParseManifest(
+    const std::string& metadata_filename,
+    DeltaArchiveManifest* manifest,
+    brillo::ErrorPtr* error) {
   FileDescriptorPtr fd(new EintrSafeFileDescriptor);
   if (!fd->Open(metadata_filename.c_str(), O_RDONLY)) {
     return LogAndSetError(
@@ -395,24 +448,39 @@
   }
   fd->Close();
 
-  string public_key;
-  if (!utils::ReadFile(constants::kUpdatePayloadPublicKeyPath, &public_key)) {
-    return LogAndSetError(error, FROM_HERE, "Failed to read public key.");
+  auto payload_verifier = PayloadVerifier::CreateInstanceFromZipPath(
+      constants::kUpdateCertificatesPath);
+  if (!payload_verifier) {
+    return LogAndSetError(error,
+                          FROM_HERE,
+                          "Failed to create the payload verifier from " +
+                              std::string(constants::kUpdateCertificatesPath));
   }
-  errorcode =
-      payload_metadata.ValidateMetadataSignature(metadata, "", public_key);
+  errorcode = payload_metadata.ValidateMetadataSignature(
+      metadata, "", *payload_verifier);
   if (errorcode != ErrorCode::kSuccess) {
     return LogAndSetError(error,
                           FROM_HERE,
                           "Failed to validate metadata signature: " +
                               utils::ErrorCodeToString(errorcode));
   }
-  DeltaArchiveManifest manifest;
-  if (!payload_metadata.GetManifest(metadata, &manifest)) {
+  if (!payload_metadata.GetManifest(metadata, manifest)) {
     return LogAndSetError(error, FROM_HERE, "Failed to parse manifest.");
   }
 
-  BootControlInterface::Slot current_slot = boot_control_->GetCurrentSlot();
+  return true;
+}
+
+bool UpdateAttempterAndroid::VerifyPayloadApplicable(
+    const std::string& metadata_filename, brillo::ErrorPtr* error) {
+  DeltaArchiveManifest manifest;
+  TEST_AND_RETURN_FALSE(
+      VerifyPayloadParseManifest(metadata_filename, &manifest, error));
+
+  FileDescriptorPtr fd(new EintrSafeFileDescriptor);
+  ErrorCode errorcode;
+
+  BootControlInterface::Slot current_slot = GetCurrentSlot();
   for (const PartitionUpdate& partition : manifest.partitions()) {
     if (!partition.has_old_partition_info())
       continue;
@@ -453,6 +521,11 @@
                                             ErrorCode code) {
   LOG(INFO) << "Processing Done.";
 
+  if (status_ == UpdateStatus::CLEANUP_PREVIOUS_UPDATE) {
+    TerminateUpdateAndNotify(code);
+    return;
+  }
+
   switch (code) {
     case ErrorCode::kSuccess:
       // Update succeeded.
@@ -497,6 +570,12 @@
   // Reset download progress regardless of whether or not the download
   // action succeeded.
   const string type = action->Type();
+  if (type == CleanupPreviousUpdateAction::StaticType() ||
+      (type == NoOpAction::StaticType() &&
+       status_ == UpdateStatus::CLEANUP_PREVIOUS_UPDATE)) {
+    cleanup_previous_update_code_ = code;
+    NotifyCleanupPreviousUpdateCallbacksAndClear();
+  }
   if (type == DownloadAction::StaticType()) {
     download_progress_ = 0;
   }
@@ -509,6 +588,9 @@
     // If an action failed, the ActionProcessor will cancel the whole thing.
     return;
   }
+  if (type == UpdateBootFlagsAction::StaticType()) {
+    SetStatusAndNotify(UpdateStatus::CLEANUP_PREVIOUS_UPDATE);
+  }
   if (type == DownloadAction::StaticType()) {
     SetStatusAndNotify(UpdateStatus::FINALIZING);
   } else if (type == FilesystemVerifierAction::StaticType()) {
@@ -576,13 +658,22 @@
     return;
   }
 
-  boot_control_->Cleanup();
+  if (status_ == UpdateStatus::CLEANUP_PREVIOUS_UPDATE) {
+    LOG(INFO) << "Terminating cleanup previous update.";
+    SetStatusAndNotify(UpdateStatus::IDLE);
+    for (auto observer : daemon_state_->service_observers())
+      observer->SendPayloadApplicationComplete(error_code);
+    return;
+  }
+
+  boot_control_->GetDynamicPartitionControl()->Cleanup();
 
   download_progress_ = 0;
   UpdateStatus new_status =
       (error_code == ErrorCode::kSuccess ? UpdateStatus::UPDATED_NEED_REBOOT
                                          : UpdateStatus::IDLE);
   SetStatusAndNotify(new_status);
+  payload_fd_.reset();
 
   // The network id is only applicable to one download attempt and once it's
   // done the network id should not be re-used anymore.
@@ -596,6 +687,9 @@
   CollectAndReportUpdateMetricsOnUpdateFinished(error_code);
   ClearMetricsPrefs();
   if (error_code == ErrorCode::kSuccess) {
+    // We should only reset the PayloadAttemptNumber if the update succeeds, or
+    // we switch to a different payload.
+    prefs_->Delete(kPrefsPayloadAttemptNumber);
     metrics_utils::SetSystemUpdatedMarker(clock_.get(), prefs_);
     // Clear the total bytes downloaded if and only if the update succeeds.
     prefs_->SetInt64(kPrefsTotalBytesDownloaded, 0);
@@ -623,6 +717,9 @@
   // Actions:
   auto update_boot_flags_action =
       std::make_unique<UpdateBootFlagsAction>(boot_control_);
+  auto cleanup_previous_update_action =
+      boot_control_->GetDynamicPartitionControl()
+          ->GetCleanupPreviousUpdateAction(boot_control_, prefs_, this);
   auto install_plan_action = std::make_unique<InstallPlanAction>(install_plan_);
   auto download_action =
       std::make_unique<DownloadAction>(prefs_,
@@ -647,6 +744,7 @@
               postinstall_runner_action.get());
 
   processor_->EnqueueAction(std::move(update_boot_flags_action));
+  processor_->EnqueueAction(std::move(cleanup_previous_update_action));
   processor_->EnqueueAction(std::move(install_plan_action));
   processor_->EnqueueAction(std::move(download_action));
   processor_->EnqueueAction(std::move(filesystem_verifier_action));
@@ -731,11 +829,15 @@
         total_bytes_downloaded;
 
     int download_overhead_percentage = 0;
-    if (current_bytes_downloaded > 0) {
+    if (total_bytes_downloaded >= payload_size) {
+      CHECK_GT(payload_size, 0);
       download_overhead_percentage =
-          (total_bytes_downloaded - current_bytes_downloaded) * 100ull /
-          current_bytes_downloaded;
+          (total_bytes_downloaded - payload_size) * 100ull / payload_size;
+    } else {
+      LOG(WARNING) << "Downloaded bytes " << total_bytes_downloaded
+                   << " is smaller than the payload size " << payload_size;
     }
+
     metrics_reporter_->ReportSuccessfulUpdateMetrics(
         static_cast<int>(attempt_number),
         0,  // update abandoned count
@@ -826,10 +928,118 @@
   CHECK(prefs_);
   prefs_->Delete(kPrefsCurrentBytesDownloaded);
   prefs_->Delete(kPrefsNumReboots);
-  prefs_->Delete(kPrefsPayloadAttemptNumber);
   prefs_->Delete(kPrefsSystemUpdatedMarker);
   prefs_->Delete(kPrefsUpdateTimestampStart);
   prefs_->Delete(kPrefsUpdateBootTimestampStart);
 }
 
+BootControlInterface::Slot UpdateAttempterAndroid::GetCurrentSlot() const {
+  return boot_control_->GetCurrentSlot();
+}
+
+BootControlInterface::Slot UpdateAttempterAndroid::GetTargetSlot() const {
+  return GetCurrentSlot() == 0 ? 1 : 0;
+}
+
+uint64_t UpdateAttempterAndroid::AllocateSpaceForPayload(
+    const std::string& metadata_filename,
+    const vector<string>& key_value_pair_headers,
+    brillo::ErrorPtr* error) {
+  DeltaArchiveManifest manifest;
+  if (!VerifyPayloadParseManifest(metadata_filename, &manifest, error)) {
+    return 0;
+  }
+  std::map<string, string> headers;
+  if (!ParseKeyValuePairHeaders(key_value_pair_headers, &headers, error)) {
+    return 0;
+  }
+
+  string payload_id = GetPayloadId(headers);
+  uint64_t required_size = 0;
+  if (!DeltaPerformer::PreparePartitionsForUpdate(prefs_,
+                                                  boot_control_,
+                                                  GetTargetSlot(),
+                                                  manifest,
+                                                  payload_id,
+                                                  &required_size)) {
+    if (required_size == 0) {
+      LogAndSetError(error, FROM_HERE, "Failed to allocate space for payload.");
+      return 0;
+    } else {
+      LOG(ERROR) << "Insufficient space for payload: " << required_size
+                 << " bytes";
+      return required_size;
+    }
+  }
+
+  LOG(INFO) << "Successfully allocated space for payload.";
+  return 0;
+}
+
+void UpdateAttempterAndroid::CleanupSuccessfulUpdate(
+    std::unique_ptr<CleanupSuccessfulUpdateCallbackInterface> callback,
+    brillo::ErrorPtr* error) {
+  if (cleanup_previous_update_code_.has_value()) {
+    LOG(INFO) << "CleanupSuccessfulUpdate has previously completed with "
+              << utils::ErrorCodeToString(*cleanup_previous_update_code_);
+    if (callback) {
+      callback->OnCleanupComplete(
+          static_cast<int32_t>(*cleanup_previous_update_code_));
+    }
+    return;
+  }
+  if (callback) {
+    auto callback_ptr = callback.get();
+    cleanup_previous_update_callbacks_.emplace_back(std::move(callback));
+    callback_ptr->RegisterForDeathNotifications(
+        base::Bind(&UpdateAttempterAndroid::RemoveCleanupPreviousUpdateCallback,
+                   base::Unretained(this),
+                   base::Unretained(callback_ptr)));
+  }
+  ScheduleCleanupPreviousUpdate();
+}
+
+void UpdateAttempterAndroid::ScheduleCleanupPreviousUpdate() {
+  // If a previous CleanupSuccessfulUpdate call has not finished, or an update
+  // is in progress, skip enqueueing the action.
+  if (processor_->IsRunning()) {
+    LOG(INFO) << "Already processing an update. CleanupPreviousUpdate should "
+              << "be done when the current update finishes.";
+    return;
+  }
+  LOG(INFO) << "Scheduling CleanupPreviousUpdateAction.";
+  auto action =
+      boot_control_->GetDynamicPartitionControl()
+          ->GetCleanupPreviousUpdateAction(boot_control_, prefs_, this);
+  processor_->EnqueueAction(std::move(action));
+  processor_->set_delegate(this);
+  SetStatusAndNotify(UpdateStatus::CLEANUP_PREVIOUS_UPDATE);
+  processor_->StartProcessing();
+}
+
+void UpdateAttempterAndroid::OnCleanupProgressUpdate(double progress) {
+  for (auto&& callback : cleanup_previous_update_callbacks_) {
+    callback->OnCleanupProgressUpdate(progress);
+  }
+}
+
+void UpdateAttempterAndroid::NotifyCleanupPreviousUpdateCallbacksAndClear() {
+  CHECK(cleanup_previous_update_code_.has_value());
+  for (auto&& callback : cleanup_previous_update_callbacks_) {
+    callback->OnCleanupComplete(
+        static_cast<int32_t>(*cleanup_previous_update_code_));
+  }
+  cleanup_previous_update_callbacks_.clear();
+}
+
+void UpdateAttempterAndroid::RemoveCleanupPreviousUpdateCallback(
+    CleanupSuccessfulUpdateCallbackInterface* callback) {
+  auto end_it =
+      std::remove_if(cleanup_previous_update_callbacks_.begin(),
+                     cleanup_previous_update_callbacks_.end(),
+                     [&](const auto& e) { return e.get() == callback; });
+  cleanup_previous_update_callbacks_.erase(
+      end_it, cleanup_previous_update_callbacks_.end());
+}
+
 }  // namespace chromeos_update_engine
diff --git a/update_attempter_android.h b/update_attempter_android.h
index e4b40de..f8c78de 100644
--- a/update_attempter_android.h
+++ b/update_attempter_android.h
@@ -23,6 +23,7 @@
 #include <string>
 #include <vector>
 
+#include <android-base/unique_fd.h>
 #include <base/time/time.h>
 
 #include "update_engine/client_library/include/update_engine/update_status.h"
@@ -46,7 +47,8 @@
     : public ServiceDelegateAndroidInterface,
       public ActionProcessorDelegate,
       public DownloadActionDelegate,
-      public PostinstallRunnerAction::DelegateInterface {
+      public PostinstallRunnerAction::DelegateInterface,
+      public CleanupPreviousUpdateActionDelegateInterface {
  public:
   using UpdateStatus = update_engine::UpdateStatus;
 
@@ -65,12 +67,24 @@
                     int64_t payload_size,
                     const std::vector<std::string>& key_value_pair_headers,
                     brillo::ErrorPtr* error) override;
+  bool ApplyPayload(int fd,
+                    int64_t payload_offset,
+                    int64_t payload_size,
+                    const std::vector<std::string>& key_value_pair_headers,
+                    brillo::ErrorPtr* error) override;
   bool SuspendUpdate(brillo::ErrorPtr* error) override;
   bool ResumeUpdate(brillo::ErrorPtr* error) override;
   bool CancelUpdate(brillo::ErrorPtr* error) override;
   bool ResetStatus(brillo::ErrorPtr* error) override;
   bool VerifyPayloadApplicable(const std::string& metadata_filename,
                                brillo::ErrorPtr* error) override;
+  uint64_t AllocateSpaceForPayload(
+      const std::string& metadata_filename,
+      const std::vector<std::string>& key_value_pair_headers,
+      brillo::ErrorPtr* error) override;
+  void CleanupSuccessfulUpdate(
+      std::unique_ptr<CleanupSuccessfulUpdateCallbackInterface> callback,
+      brillo::ErrorPtr* error) override;
 
   // ActionProcessorDelegate methods:
   void ProcessingDone(const ActionProcessor* processor,
@@ -90,6 +104,9 @@
   // PostinstallRunnerAction::DelegateInterface
   void ProgressUpdate(double progress) override;
 
+  // CleanupPreviousUpdateActionDelegateInterface
+  void OnCleanupProgressUpdate(double progress) override;
+
  private:
   friend class UpdateAttempterAndroidTest;
 
@@ -151,11 +168,31 @@
   void UpdatePrefsOnUpdateStart(bool is_resume);
 
   // Prefs to delete:
-  //   |kPrefsNumReboots|, |kPrefsPayloadAttemptNumber|,
+  //   |kPrefsNumReboots|, |kPrefsCurrentBytesDownloaded|
   //   |kPrefsSystemUpdatedMarker|, |kPrefsUpdateTimestampStart|,
-  //   |kPrefsUpdateBootTimestampStart|, |kPrefsCurrentBytesDownloaded|
+  //   |kPrefsUpdateBootTimestampStart|
   void ClearMetricsPrefs();
 
+  // Return source and target slots for update.
+  BootControlInterface::Slot GetCurrentSlot() const;
+  BootControlInterface::Slot GetTargetSlot() const;
+
+  // Helper of public VerifyPayloadApplicable. Return the parsed manifest in
+  // |manifest|.
+  static bool VerifyPayloadParseManifest(const std::string& metadata_filename,
+                                         DeltaArchiveManifest* manifest,
+                                         brillo::ErrorPtr* error);
+
+  // Enqueue and run a CleanupPreviousUpdateAction.
+  void ScheduleCleanupPreviousUpdate();
+
+  // Notify and clear |cleanup_previous_update_callbacks_|.
+  void NotifyCleanupPreviousUpdateCallbacksAndClear();
+
+  // Remove |callback| from |cleanup_previous_update_callbacks_|.
+  void RemoveCleanupPreviousUpdateCallback(
+      CleanupSuccessfulUpdateCallbackInterface* callback);
+
   DaemonStateInterface* daemon_state_;
 
   // DaemonStateAndroid pointers.
@@ -191,6 +228,14 @@
 
   std::unique_ptr<MetricsReporterInterface> metrics_reporter_;
 
+  ::android::base::unique_fd payload_fd_;
+
+  std::vector<std::unique_ptr<CleanupSuccessfulUpdateCallbackInterface>>
+      cleanup_previous_update_callbacks_;
+  // Result of previous CleanupPreviousUpdateAction. Nullopt If
+  // CleanupPreviousUpdateAction has not been executed.
+  std::optional<ErrorCode> cleanup_previous_update_code_{std::nullopt};
+
   DISALLOW_COPY_AND_ASSIGN(UpdateAttempterAndroid);
 };
 
diff --git a/update_attempter_android_unittest.cc b/update_attempter_android_unittest.cc
index 2593d44..721b735 100644
--- a/update_attempter_android_unittest.cc
+++ b/update_attempter_android_unittest.cc
@@ -18,6 +18,7 @@
 
 #include <memory>
 #include <string>
+#include <utility>
 
 #include <android-base/properties.h>
 #include <base/time/time.h>
@@ -57,6 +58,11 @@
     update_attempter_android_.status_ = status;
   }
 
+  void AddPayload(InstallPlan::Payload&& payload) {
+    update_attempter_android_.install_plan_.payloads.push_back(
+        std::move(payload));
+  }
+
   UpdateAttempterAndroid update_attempter_android_{
       &daemon_state_, &prefs_, &boot_control_, &hardware_};
 
@@ -111,9 +117,10 @@
   update_attempter_android_.Init();
   // Check that we reset the metric prefs.
   EXPECT_FALSE(prefs_.Exists(kPrefsNumReboots));
-  EXPECT_FALSE(prefs_.Exists(kPrefsPayloadAttemptNumber));
   EXPECT_FALSE(prefs_.Exists(kPrefsUpdateTimestampStart));
   EXPECT_FALSE(prefs_.Exists(kPrefsSystemUpdatedMarker));
+  // PayloadAttemptNumber should persist across reboots.
+  EXPECT_TRUE(prefs_.Exists(kPrefsPayloadAttemptNumber));
 }
 
 TEST_F(UpdateAttempterAndroidTest, ReportMetricsOnUpdateTerminated) {
@@ -142,9 +149,13 @@
       .Times(1);
   EXPECT_CALL(*metrics_reporter_,
               ReportSuccessfulUpdateMetrics(
-                  2, 0, _, _, _, _, duration, duration_uptime, 3, _))
+                  2, 0, _, 50, _, _, duration, duration_uptime, 3, _))
       .Times(1);
 
+  // Adds a payload of 50 bytes to the InstallPlan.
+  InstallPlan::Payload payload;
+  payload.size = 50;
+  AddPayload(std::move(payload));
   SetUpdateStatus(UpdateStatus::UPDATE_AVAILABLE);
   update_attempter_android_.ProcessingDone(nullptr, ErrorCode::kSuccess);
 
@@ -178,15 +189,20 @@
                   _,
                   _,
                   _,
-                  _,
+                  50,
                   test_utils::DownloadSourceMatcher(total_bytes),
-                  125,
+                  80,
                   _,
                   _,
                   _,
                   _))
       .Times(1);
 
+  // Adds a payload of 50 bytes to the InstallPlan.
+  InstallPlan::Payload payload;
+  payload.size = 50;
+  AddPayload(std::move(payload));
+
   // The first update fails after receiving 50 bytes in total.
   update_attempter_android_.BytesReceived(30, 50, 200);
   update_attempter_android_.ProcessingDone(nullptr, ErrorCode::kError);
@@ -198,7 +214,7 @@
       metrics_utils::GetPersistedValue(kPrefsTotalBytesDownloaded, &prefs_));
 
   // The second update succeeds after receiving 40 bytes, which leads to a
-  // overhead of 50 / 40 = 125%.
+  // overhead of (90 - 50) / 50 = 80%.
   update_attempter_android_.BytesReceived(40, 40, 50);
   update_attempter_android_.ProcessingDone(nullptr, ErrorCode::kSuccess);
   // Both prefs should be cleared.
diff --git a/update_boot_flags_action.cc b/update_boot_flags_action.cc
index 97ef7f2..ee92ae0 100644
--- a/update_boot_flags_action.cc
+++ b/update_boot_flags_action.cc
@@ -50,8 +50,11 @@
   }
 }
 
-void UpdateBootFlagsAction::CompleteUpdateBootFlags(bool successful) {
+void UpdateBootFlagsAction::TerminateProcessing() {
   is_running_ = false;
+}
+
+void UpdateBootFlagsAction::CompleteUpdateBootFlags(bool successful) {
   if (!successful) {
     // We ignore the failure for now because if the updating boot flags is flaky
     // or has a bug in a specific release, then blocking the update can cause
@@ -61,6 +64,18 @@
     // TODO(ahassani): Add new error code metric for kUpdateBootFlagsFailed.
     LOG(ERROR) << "Updating boot flags failed, but ignoring its failure.";
   }
+
+  // As the callback to MarkBootSuccessfulAsync, this function can still be
+  // called even after the current UpdateBootFlagsAction object get destroyed by
+  // the action processor. In this case, check the value of the static variable
+  // |is_running_| and skip executing the callback function.
+  if (!is_running_) {
+    LOG(INFO) << "UpdateBootFlagsAction is no longer running.";
+    return;
+  }
+
+  is_running_ = false;
+
   updated_boot_flags_ = true;
   processor_->ActionComplete(this, ErrorCode::kSuccess);
 }
diff --git a/update_boot_flags_action.h b/update_boot_flags_action.h
index afa2c3f..892aab7 100644
--- a/update_boot_flags_action.h
+++ b/update_boot_flags_action.h
@@ -30,6 +30,8 @@
 
   void PerformAction() override;
 
+  void TerminateProcessing() override;
+
   static std::string StaticType() { return "UpdateBootFlagsAction"; }
   std::string Type() const override { return StaticType(); }
 
diff --git a/update_engine.rc b/update_engine.rc
index a7d6235..b9f80fc 100644
--- a/update_engine.rc
+++ b/update_engine.rc
@@ -1,8 +1,8 @@
 service update_engine /system/bin/update_engine --logtostderr --logtofile --foreground
     class late_start
     user root
-    group root system wakelock inet cache
-    writepid /dev/cpuset/system-background/tasks
+    group root system wakelock inet cache media_rw
+    writepid /dev/cpuset/system-background/tasks /dev/blkio/background/tasks
     disabled
 
 on property:ro.boot.slot_suffix=*
diff --git a/update_engine_client_android.cc b/update_engine_client_android.cc
index 6863799..1a68cf4 100644
--- a/update_engine_client_android.cc
+++ b/update_engine_client_android.cc
@@ -72,12 +72,15 @@
   // Called whenever the UpdateEngine daemon dies.
   void UpdateEngineServiceDied();
 
+  static std::vector<android::String16> ParseHeaders(const std::string& arg);
+
   // Copy of argc and argv passed to main().
   int argc_;
   char** argv_;
 
   android::sp<android::os::IUpdateEngine> service_;
   android::sp<android::os::BnUpdateEngineCallback> callback_;
+  android::sp<android::os::BnUpdateEngineCallback> cleanup_callback_;
 
   brillo::BinderWatcher binder_watcher_;
 };
@@ -123,15 +126,16 @@
   DEFINE_string(headers,
                 "",
                 "A list of key-value pairs, one element of the list per line. "
-                "Used when --update is passed.");
+                "Used when --update or --allocate is passed.");
 
   DEFINE_bool(verify,
               false,
               "Given payload metadata, verify if the payload is applicable.");
+  DEFINE_bool(allocate, false, "Given payload metadata, allocate space.");
   DEFINE_string(metadata,
                 "/data/ota_package/metadata",
                 "The path to the update payload metadata. "
-                "Used when --verify is passed.");
+                "Used when --verify or --allocate is passed.");
 
   DEFINE_bool(suspend, false, "Suspend an ongoing update and exit.");
   DEFINE_bool(resume, false, "Resume a suspended update.");
@@ -141,7 +145,10 @@
               false,
               "Follow status update changes until a final state is reached. "
               "Exit status is 0 if the update succeeded, and 1 otherwise.");
-
+  DEFINE_bool(merge,
+              false,
+              "Wait for previous update to merge. "
+              "Only available after rebooting to new slot.");
   // Boilerplate init commands.
   base::CommandLine::Init(argc_, argv_);
   brillo::FlagHelper::Init(argc_, argv_, "Android Update Engine Client");
@@ -200,6 +207,36 @@
     return ExitWhenIdle(status);
   }
 
+  if (FLAGS_allocate) {
+    auto headers = ParseHeaders(FLAGS_headers);
+    int64_t ret = 0;
+    Status status = service_->allocateSpaceForPayload(
+        android::String16{FLAGS_metadata.data(), FLAGS_metadata.size()},
+        headers,
+        &ret);
+    if (status.isOk()) {
+      if (ret == 0) {
+        LOG(INFO) << "Successfully allocated space for payload.";
+      } else {
+        LOG(INFO) << "Insufficient space; required " << ret << " bytes.";
+      }
+    } else {
+      LOG(INFO) << "Allocation failed.";
+    }
+    return ExitWhenIdle(status);
+  }
+
+  if (FLAGS_merge) {
+    // Register a callback object with the service.
+    cleanup_callback_ = new UECallback(this);
+    Status status = service_->cleanupSuccessfulUpdate(cleanup_callback_);
+    if (!status.isOk()) {
+      LOG(ERROR) << "Failed to call cleanupSuccessfulUpdate.";
+      return ExitWhenIdle(status);
+    }
+    keep_running = true;
+  }
+
   if (FLAGS_follow) {
     // Register a callback object with the service.
     callback_ = new UECallback(this);
@@ -212,12 +249,7 @@
   }
 
   if (FLAGS_update) {
-    std::vector<std::string> headers = base::SplitString(
-        FLAGS_headers, "\n", base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
-    std::vector<android::String16> and_headers;
-    for (const auto& header : headers) {
-      and_headers.push_back(android::String16{header.data(), header.size()});
-    }
+    auto and_headers = ParseHeaders(FLAGS_headers);
     Status status = service_->applyPayload(
         android::String16{FLAGS_payload.data(), FLAGS_payload.size()},
         FLAGS_offset,
@@ -261,6 +293,17 @@
   QuitWithExitCode(1);
 }
 
+std::vector<android::String16> UpdateEngineClientAndroid::ParseHeaders(
+    const std::string& arg) {
+  std::vector<std::string> headers = base::SplitString(
+      arg, "\n", base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
+  std::vector<android::String16> and_headers;
+  for (const auto& header : headers) {
+    and_headers.push_back(android::String16{header.data(), header.size()});
+  }
+  return and_headers;
+}
+
 }  // namespace internal
 }  // namespace chromeos_update_engine
 
diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc
index dd6cc8d..b96e29d 100644
--- a/update_manager/chromeos_policy.cc
+++ b/update_manager/chromeos_policy.cc
@@ -154,6 +154,8 @@
     case ErrorCode::kInternalLibCurlError:
     case ErrorCode::kUnresolvedHostError:
     case ErrorCode::kUnresolvedHostRecovered:
+    case ErrorCode::kNotEnoughSpace:
+    case ErrorCode::kDeviceCorrupted:
       LOG(INFO) << "Not changing URL index or failure count due to error "
                 << chromeos_update_engine::utils::ErrorCodeToString(err_code)
                 << " (" << static_cast<int>(err_code) << ")";
diff --git a/update_metadata.proto b/update_metadata.proto
index 3d136ca..e6a067e 100644
--- a/update_metadata.proto
+++ b/update_metadata.proto
@@ -14,24 +14,26 @@
 // limitations under the License.
 //
 
-// Update file format: A delta update file contains all the deltas needed
-// to update a system from one specific version to another specific
-// version. The update format is represented by this struct pseudocode:
+// Update file format: An update file contains all the operations needed
+// to update a system to a specific version. It can be a full payload which
+// can update from any version, or a delta payload which can only update
+// from a specific version.
+// The update format is represented by this struct pseudocode:
 // struct delta_update_file {
 //   char magic[4] = "CrAU";
-//   uint64 file_format_version;
+//   uint64 file_format_version;  // payload major version
 //   uint64 manifest_size;  // Size of protobuf DeltaArchiveManifest
 //
-//   // Only present if format_version > 1:
+//   // Only present if format_version >= 2:
 //   uint32 metadata_signature_size;
 //
-//   // The Bzip2 compressed DeltaArchiveManifest
-//   char manifest[];
+//   // The DeltaArchiveManifest protobuf serialized, not compressed.
+//   char manifest[manifest_size];
 //
 //   // The signature of the metadata (from the beginning of the payload up to
 //   // this location, not including the signature itself). This is a serialized
 //   // Signatures message.
-//   char medatada_signature_message[metadata_signature_size];
+//   char metadata_signature_message[metadata_signature_size];
 //
 //   // Data blobs for files, no specific format. The specific offset
 //   // and length of each data blob is recorded in the DeltaArchiveManifest.
@@ -39,9 +41,12 @@
 //     char data[];
 //   } blobs[];
 //
-//   // These two are not signed:
+//   // The signature of the entire payload, everything up to this location,
+//   // except that metadata_signature_message is skipped to simplify signing
+//   // process. These two are not signed:
 //   uint64 payload_signatures_message_size;
-//   char payload_signatures_message[];
+//   // This is a serialized Signatures message.
+//   char payload_signatures_message[payload_signatures_message_size];
 //
 // };
 
@@ -61,13 +66,13 @@
 //   dst_extents on the drive, zero padding to block size.
 // - MOVE: Copy the data in src_extents to dst_extents. Extents may overlap,
 //   so it may be desirable to read all src_extents data into memory before
-//   writing it out.
+//   writing it out. (deprecated)
 // - SOURCE_COPY: Copy the data in src_extents in the old partition to
 //   dst_extents in the new partition. There's no overlapping of data because
 //   the extents are in different partitions.
 // - BSDIFF: Read src_length bytes from src_extents into memory, perform
 //   bspatch with attached data, write new data to dst_extents, zero padding
-//   to block size.
+//   to block size. (deprecated)
 // - SOURCE_BSDIFF: Read the data in src_extents in the old partition, perform
 //   bspatch with the attached data and write the new data to dst_extents in the
 //   new partition.
@@ -102,6 +107,11 @@
 // A sentinel value (kuint64max) as the start block denotes a sparse-hole
 // in a file whose block-length is specified by num_blocks.
 
+message Extent {
+  optional uint64 start_block = 1;
+  optional uint64 num_blocks = 2;
+}
+
 // Signatures: Updates may be signed by the OS vendor. The client verifies
 // an update's signature by hashing the entire download. The section of the
 // download that contains the signature is at the end of the file, so when
@@ -114,15 +124,19 @@
 // to verify the download. The public key is expected to be part of the
 // client.
 
-message Extent {
-  optional uint64 start_block = 1;
-  optional uint64 num_blocks = 2;
-}
-
 message Signatures {
   message Signature {
-    optional uint32 version = 1;
+    optional uint32 version = 1 [deprecated = true];
     optional bytes data = 2;
+
+    // The DER encoded signature size of EC keys is nondeterministic for
+    // different input of sha256 hash. However, we need the size of the
+    // serialized signatures protobuf string to be fixed before signing;
+    // because this size is part of the content to be signed. Therefore, we
+    // always pad the signature data to the maximum possible signature size of
+    // a given key. And the payload verifier will truncate the signature to
+    // its correct size based on the value of |unpadded_signature_size|.
+    optional fixed32 unpadded_signature_size = 3;
   }
   repeated Signature signatures = 1;
 }
@@ -290,13 +304,19 @@
 
 // Metadata related to all dynamic partitions.
 message DynamicPartitionMetadata {
-  // All updateable groups present in |partitions| of this DeltaArchiveManifest.
+  // All updatable groups present in |partitions| of this DeltaArchiveManifest.
   // - If an updatable group is on the device but not in the manifest, it is
   //   not updated. Hence, the group will not be resized, and partitions cannot
   //   be added to or removed from the group.
   // - If an updatable group is in the manifest but not on the device, the group
   //   is added to the device.
   repeated DynamicPartitionGroup groups = 1;
+
+  // Whether dynamic partitions have snapshots during the update. If this is
+  // set to true, the update_engine daemon creates snapshots for all dynamic
+  // partitions if possible. If this is unset, the update_engine daemon MUST
+  // NOT create snapshots for dynamic partitions.
+  optional bool snapshot_enabled = 2;
 }
 
 message DeltaArchiveManifest {
@@ -330,6 +350,7 @@
   optional ImageInfo new_image_info = 11;
 
   // The minor version, also referred as "delta version", of the payload.
+  // Minor version 0 is full payload, everything else is delta payload.
   optional uint32 minor_version = 12 [default = 0];
 
   // Only present in major version >= 2. List of partitions that will be
@@ -346,4 +367,7 @@
 
   // Metadata related to all dynamic partitions.
   optional DynamicPartitionMetadata dynamic_partition_metadata = 15;
+
+  // If the payload only updates a subset of partitions on the device.
+  optional bool partial_update = 16;
 }
diff --git a/update_status_utils.cc b/update_status_utils.cc
index 6c618ec..a702c61 100644
--- a/update_status_utils.cc
+++ b/update_status_utils.cc
@@ -66,6 +66,8 @@
       return update_engine::kUpdateStatusAttemptingRollback;
     case UpdateStatus::DISABLED:
       return update_engine::kUpdateStatusDisabled;
+    case UpdateStatus::CLEANUP_PREVIOUS_UPDATE:
+      return update_engine::kUpdateStatusCleanupPreviousUpdate;
   }
 
   NOTREACHED();