Merge remote-tracking branch 'cros/upstream' into cros/master

BUG=chromium:815356
TEST=unittest
TEST=precq
TEST=cros flash

Change-Id: I8d9c37411708d0cae61613b285123a090ce6deb1
diff --git a/Android.mk b/Android.mk
index d2a83bc..1beada9 100644
--- a/Android.mk
+++ b/Android.mk
@@ -22,6 +22,7 @@
 # by setting BRILLO_USE_* values. Note that we define local variables like
 # local_use_* to prevent leaking our default setting for other packages.
 local_use_binder := $(if $(BRILLO_USE_BINDER),$(BRILLO_USE_BINDER),1)
+local_use_fec := 1
 local_use_hwid_override := \
     $(if $(BRILLO_USE_HWID_OVERRIDE),$(BRILLO_USE_HWID_OVERRIDE),0)
 local_use_mtd := $(if $(BRILLO_USE_MTD),$(BRILLO_USE_MTD),0)
@@ -35,6 +36,7 @@
     -DUSE_BINDER=$(local_use_binder) \
     -DUSE_CHROME_NETWORK_PROXY=$(local_use_chrome_network_proxy) \
     -DUSE_CHROME_KIOSK_APP=$(local_use_chrome_kiosk_app) \
+    -DUSE_FEC=$(local_use_fec) \
     -DUSE_HWID_OVERRIDE=$(local_use_hwid_override) \
     -DUSE_MTD=$(local_use_mtd) \
     -DUSE_OMAHA=$(local_use_omaha) \
@@ -143,17 +145,25 @@
     payload_consumer/install_plan.cc \
     payload_consumer/mount_history.cc \
     payload_consumer/payload_constants.cc \
+    payload_consumer/payload_metadata.cc \
     payload_consumer/payload_verifier.cc \
     payload_consumer/postinstall_runner_action.cc \
     payload_consumer/xz_extent_writer.cc
 
+ifeq ($(local_use_fec),1)
+ue_libpayload_consumer_src_files += \
+    payload_consumer/fec_file_descriptor.cc
+ue_libpayload_consumer_exported_shared_libraries += \
+    libfec
+endif  # local_use_fec == 1
+
 ifeq ($(HOST_OS),linux)
 # Build for the host.
 include $(CLEAR_VARS)
 LOCAL_MODULE := libpayload_consumer
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
+LOCAL_CFLAGS := $(filter-out -DUSE_FEC=%,$(ue_common_cflags)) -DUSE_FEC=0
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := \
@@ -215,8 +225,7 @@
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := \
-    $(ue_common_c_includes) \
-    bootable/recovery
+    $(ue_common_c_includes)
 LOCAL_STATIC_LIBRARIES := \
     $(ue_common_static_libraries) \
     $(ue_libupdate_engine_boot_control_exported_static_libraries)
@@ -238,6 +247,7 @@
 ue_libupdate_engine_exported_static_libraries := \
     libpayload_consumer \
     update_metadata-protos \
+    libbootloader_message \
     libbz \
     libfs_mgr \
     libbase \
@@ -274,8 +284,7 @@
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := \
     $(ue_common_c_includes) \
-    $(ue_libupdate_engine_exported_c_includes) \
-    bootable/recovery
+    $(ue_libupdate_engine_exported_c_includes)
 LOCAL_STATIC_LIBRARIES := \
     libpayload_consumer \
     update_metadata-protos \
@@ -355,6 +364,7 @@
 # loop to apply payloads provided by the upper layer via a Binder interface.
 ue_libupdate_engine_android_exported_static_libraries := \
     libpayload_consumer \
+    libbootloader_message \
     libfs_mgr \
     libbase \
     liblog \
@@ -382,8 +392,7 @@
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := \
-    $(ue_common_c_includes) \
-    bootable/recovery
+    $(ue_common_c_includes)
 #TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
 # out of the DBus interface.
 LOCAL_C_INCLUDES += \
@@ -471,8 +480,7 @@
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := \
-    $(ue_common_c_includes) \
-    bootable/recovery
+    $(ue_common_c_includes)
 #TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
 # out of the DBus interface.
 LOCAL_C_INCLUDES += \
@@ -489,6 +497,7 @@
     update_status_utils.cc \
     utils_android.cc
 LOCAL_STATIC_LIBRARIES := \
+    libbootloader_message \
     libfs_mgr \
     libbase \
     liblog \
@@ -510,6 +519,21 @@
     libmodpb64 \
     libgtest_prod
 
+ifeq ($(local_use_fec),1)
+# The static library "libfec" depends on a bunch of other static libraries, but
+# such dependency is not handled by the build system, so we need to add them
+# here.
+LOCAL_STATIC_LIBRARIES += \
+    libext4_utils \
+    libsquashfs_utils \
+    libcutils \
+    libcrypto_utils \
+    libcrypto \
+    libcutils \
+    libbase \
+    libfec_rs
+endif  # local_use_fec == 1
+
 ifeq ($(strip $(PRODUCT_STATIC_BOOT_CONTROL_HAL)),)
 # No static boot_control HAL defined, so no sideload support. We use a fake
 # boot_control HAL to allow compiling update_engine_sideload for test purposes.
@@ -736,6 +760,7 @@
     $(ue_common_shared_libraries) \
     $(ue_libpayload_consumer_exported_shared_libraries) \
     $(ue_libpayload_generator_exported_shared_libraries)
+LOCAL_SHARED_LIBRARIES := $(filter-out libfec,$(LOCAL_SHARED_LIBRARIES))
 LOCAL_SRC_FILES := $(ue_delta_generator_src_files)
 include $(BUILD_HOST_EXECUTABLE)
 endif  # HOST_OS == linux
diff --git a/CPPLINT.cfg b/CPPLINT.cfg
index 3dd0f35..259fb2f 100644
--- a/CPPLINT.cfg
+++ b/CPPLINT.cfg
@@ -1,3 +1,7 @@
 # This should be kept in sync with platform2/CPPLINT.cfg
 set noparent
-filter=-build/include_order,+build/include_alpha,-build/header_guard
+
+# Header guard should start with UPDATE_ENGINE_
+root=..
+
+filter=-build/include_order,+build/include_alpha
diff --git a/binder_bindings/android/os/IUpdateEngine.aidl b/binder_bindings/android/os/IUpdateEngine.aidl
index 7e26752..c0e29f5 100644
--- a/binder_bindings/android/os/IUpdateEngine.aidl
+++ b/binder_bindings/android/os/IUpdateEngine.aidl
@@ -37,4 +37,6 @@
   void cancel();
   /** @hide */
   void resetStatus();
+  /** @hide */
+  boolean verifyPayloadApplicable(in String metadataFilename);
 }
diff --git a/binder_service_android.cc b/binder_service_android.cc
index 0305727..1702ead 100644
--- a/binder_service_android.cc
+++ b/binder_service_android.cc
@@ -139,6 +139,20 @@
   return Status::ok();
 }
 
+Status BinderUpdateEngineAndroidService::verifyPayloadApplicable(
+    const android::String16& metadata_filename, bool* return_value) {
+  const std::string payload_metadata{
+      android::String8{metadata_filename}.string()};
+  LOG(INFO) << "Received a request of verifying payload metadata in "
+            << payload_metadata << ".";
+  brillo::ErrorPtr error;
+  *return_value =
+      service_delegate_->VerifyPayloadApplicable(payload_metadata, &error);
+  if (error != nullptr)
+    return ErrorPtrToStatus(error);
+  return Status::ok();
+}
+
 bool BinderUpdateEngineAndroidService::UnbindCallback(const IBinder* callback) {
   auto it = std::find_if(
       callbacks_.begin(),
diff --git a/binder_service_android.h b/binder_service_android.h
index eb36e4c..694b80a 100644
--- a/binder_service_android.h
+++ b/binder_service_android.h
@@ -65,6 +65,8 @@
   android::binder::Status resume() override;
   android::binder::Status cancel() override;
   android::binder::Status resetStatus() override;
+  android::binder::Status verifyPayloadApplicable(
+      const android::String16& metadata_filename, bool* return_value) override;
 
  private:
   // Remove the passed |callback| from the list of registered callbacks. Called
diff --git a/client_library/include/update_engine/status_update_handler.h b/client_library/include/update_engine/status_update_handler.h
index d5b8cdb..d2fad34 100644
--- a/client_library/include/update_engine/status_update_handler.h
+++ b/client_library/include/update_engine/status_update_handler.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_STATUS_UPDATE_HANDLER_H_
-#define UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_STATUS_UPDATE_HANDLER_H_
+#ifndef UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_
+#define UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_
 
 #include <string>
 
@@ -44,4 +44,4 @@
 
 }  // namespace update_engine
 
-#endif  // UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_STATUS_UPDATE_HANDLER_H_
+#endif  // UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_
diff --git a/common/boot_control_stub.h b/common/boot_control_stub.h
index 7832adc..9e3b05c 100644
--- a/common/boot_control_stub.h
+++ b/common/boot_control_stub.h
@@ -27,7 +27,7 @@
 // typically used when e.g. an underlying HAL implementation cannot be
 // loaded or doesn't exist.
 //
-// You are gauranteed that the implementation of GetNumSlots() method
+// You are guaranteed that the implementation of GetNumSlots() method
 // always returns 0. This can be used to identify that this
 // implementation is in use.
 class BootControlStub : public BootControlInterface {
diff --git a/common/constants.h b/common/constants.h
index a3d01c9..78353d8 100644
--- a/common/constants.h
+++ b/common/constants.h
@@ -168,9 +168,10 @@
 //
 // For non-official builds (e.g. typically built on a developer's
 // workstation and served via devserver) bump this since it takes time
-// for the workstation to generate the payload. For p2p, make this
-// relatively low since we want to fail fast.
-const int kDownloadLowSpeedTimeSeconds = 90;
+// for the workstation to generate the payload. For normal operation
+// and p2p, make this relatively low since we want to fail fast in
+// those cases.
+const int kDownloadLowSpeedTimeSeconds = 30;
 const int kDownloadDevModeLowSpeedTimeSeconds = 180;
 const int kDownloadP2PLowSpeedTimeSeconds = 60;
 
diff --git a/common/error_code.h b/common/error_code.h
index a7fee2a..0d86a7b 100644
--- a/common/error_code.h
+++ b/common/error_code.h
@@ -118,7 +118,7 @@
   // modify the implementation of ErrorCode into a properly encapsulated class.
   kDevModeFlag = 1 << 31,
 
-  // Set if resuming an interruped update.
+  // Set if resuming an interrupted update.
   kResumedFlag = 1 << 30,
 
   // Set if using a dev/test image as opposed to an MP-signed image.
diff --git a/common/utils.cc b/common/utils.cc
index 68cad51..b06954b 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -777,7 +777,7 @@
   if (size < offsetof(Elf32_Ehdr, e_machine) + sizeof(hdr->e_machine))
     return true;
   uint16_t e_machine;
-  // Fix endianess regardless of the host endianess.
+  // Fix endianness regardless of the host endianness.
   if (ei_data == ELFDATA2LSB)
     e_machine = le16toh(hdr->e_machine);
   else
diff --git a/common/utils.h b/common/utils.h
index 8db0cf8..cbc5eb9 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -110,7 +110,7 @@
 // occurs, -1 is returned.
 off_t BlockDevSize(int fd);
 
-// Returns the size of the file at path, or the file desciptor fd. If the file
+// Returns the size of the file at path, or the file descriptor fd. If the file
 // is actually a block device, this function will automatically call
 // BlockDevSize. If the file doesn't exist or some error occurrs, -1 is
 // returned.
diff --git a/daemon_state_android.cc b/daemon_state_android.cc
index 0960b1a..c9c09b8 100644
--- a/daemon_state_android.cc
+++ b/daemon_state_android.cc
@@ -36,7 +36,7 @@
 
   hardware_ = hardware::CreateHardware();
   if (!hardware_) {
-    LOG(ERROR) << "Error intializing the HardwareInterface.";
+    LOG(ERROR) << "Error initializing the HardwareInterface.";
     return false;
   }
 
diff --git a/hardware_android.cc b/hardware_android.cc
index 0e5abaa..3f0fb59 100644
--- a/hardware_android.cc
+++ b/hardware_android.cc
@@ -23,11 +23,10 @@
 #include <algorithm>
 #include <memory>
 
-#include <bootloader.h>
-
 #include <android-base/properties.h>
 #include <base/files/file_util.h>
 #include <base/strings/stringprintf.h>
+#include <bootloader_message/bootloader_message.h>
 
 #include "update_engine/common/hardware.h"
 #include "update_engine/common/platform_constants.h"
diff --git a/image_properties_android.cc b/image_properties_android.cc
index 4dc2c02..1d82feb 100644
--- a/image_properties_android.cc
+++ b/image_properties_android.cc
@@ -23,7 +23,7 @@
 #include <android-base/properties.h>
 #include <base/logging.h>
 #include <base/strings/string_util.h>
-#include <bootloader.h>
+#include <bootloader_message/bootloader_message.h>
 #include <brillo/osrelease_reader.h>
 #include <brillo/strings/string_utils.h>
 
diff --git a/main.cc b/main.cc
index e7df4f8..67a150e 100644
--- a/main.cc
+++ b/main.cc
@@ -166,7 +166,7 @@
               "Don't daemon()ize; run in foreground.");
 
   chromeos_update_engine::Terminator::Init();
-  brillo::FlagHelper::Init(argc, argv, "Chromium OS Update Engine");
+  brillo::FlagHelper::Init(argc, argv, "A/B Update Engine");
 
   // We have two logging flags "--logtostderr" and "--logtofile"; and the logic
   // to choose the logging destination is:
@@ -179,7 +179,7 @@
   if (!FLAGS_foreground)
     PLOG_IF(FATAL, daemon(0, 0) == 1) << "daemon() failed";
 
-  LOG(INFO) << "Chrome OS Update Engine starting";
+  LOG(INFO) << "A/B Update Engine starting";
 
   // xz-embedded requires to initialize its CRC-32 table once on startup.
   xz_crc32_init();
@@ -196,7 +196,6 @@
 
   chromeos_update_engine::Subprocess::Get().FlushBufferedLogsAtExit();
 
-  LOG(INFO) << "Chrome OS Update Engine terminating with exit code "
-            << exit_code;
+  LOG(INFO) << "A/B Update Engine terminating with exit code " << exit_code;
   return exit_code;
 }
diff --git a/metrics_reporter_android.cc b/metrics_reporter_android.cc
index 3cb356f..a5877cb 100644
--- a/metrics_reporter_android.cc
+++ b/metrics_reporter_android.cc
@@ -120,6 +120,7 @@
     int64_t num_bytes_downloaded[kNumDownloadSources],
     int download_overhead_percentage,
     base::TimeDelta total_duration,
+    base::TimeDelta /* total_duration_uptime */,
     int reboot_count,
     int /* url_switch_count */) {
   LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdateAttemptCount,
diff --git a/metrics_reporter_android.h b/metrics_reporter_android.h
index 847e509..8a27ef6 100644
--- a/metrics_reporter_android.h
+++ b/metrics_reporter_android.h
@@ -72,6 +72,7 @@
       int64_t num_bytes_downloaded[kNumDownloadSources],
       int download_overhead_percentage,
       base::TimeDelta total_duration,
+      base::TimeDelta total_duration_uptime,
       int reboot_count,
       int url_switch_count) override;
 
diff --git a/metrics_reporter_interface.h b/metrics_reporter_interface.h
index 85ffb85..b677aaa 100644
--- a/metrics_reporter_interface.h
+++ b/metrics_reporter_interface.h
@@ -165,6 +165,7 @@
   //  |kMetricSuccessfulUpdateDownloadSourcesUsed|
   //  |kMetricSuccessfulUpdateDownloadOverheadPercentage|
   //  |kMetricSuccessfulUpdateTotalDurationMinutes|
+  //  |kMetricSuccessfulUpdateTotalDurationUptimeMinutes|
   //  |kMetricSuccessfulUpdateRebootCount|
   //  |kMetricSuccessfulUpdateUrlSwitchCount|
   //
@@ -179,6 +180,7 @@
       int64_t num_bytes_downloaded[kNumDownloadSources],
       int download_overhead_percentage,
       base::TimeDelta total_duration,
+      base::TimeDelta total_duration_uptime,
       int reboot_count,
       int url_switch_count) = 0;
 
diff --git a/metrics_reporter_omaha.cc b/metrics_reporter_omaha.cc
index 5e14cca..f0c6643 100644
--- a/metrics_reporter_omaha.cc
+++ b/metrics_reporter_omaha.cc
@@ -96,6 +96,8 @@
     "UpdateEngine.SuccessfulUpdate.RebootCount";
 const char kMetricSuccessfulUpdateTotalDurationMinutes[] =
     "UpdateEngine.SuccessfulUpdate.TotalDurationMinutes";
+const char kMetricSuccessfulUpdateTotalDurationUptimeMinutes[] =
+    "UpdateEngine.SuccessfulUpdate.TotalDurationUptimeMinutes";
 const char kMetricSuccessfulUpdateUpdatesAbandonedCount[] =
     "UpdateEngine.SuccessfulUpdate.UpdatesAbandonedCount";
 const char kMetricSuccessfulUpdateUrlSwitchCount[] =
@@ -394,6 +396,7 @@
     int64_t num_bytes_downloaded[kNumDownloadSources],
     int download_overhead_percentage,
     base::TimeDelta total_duration,
+    base::TimeDelta total_duration_uptime,
     int reboot_count,
     int url_switch_count) {
   string metric = metrics::kMetricSuccessfulUpdatePayloadSizeMiB;
@@ -473,6 +476,15 @@
                           365 * 24 * 60,  // max: 365 days ~= 1 year
                           50);            // num_buckets
 
+  metric = metrics::kMetricSuccessfulUpdateTotalDurationUptimeMinutes;
+  LOG(INFO) << "Uploading " << utils::FormatTimeDelta(total_duration_uptime)
+            << " for metric " << metric;
+  metrics_lib_->SendToUMA(metric,
+                          static_cast<int>(total_duration_uptime.InMinutes()),
+                          0,             // min: 0 min
+                          30 * 24 * 60,  // max: 30 days
+                          50);           // num_buckets
+
   metric = metrics::kMetricSuccessfulUpdateRebootCount;
   LOG(INFO) << "Uploading reboot count of " << reboot_count << " for metric "
             << metric;
diff --git a/metrics_reporter_omaha.h b/metrics_reporter_omaha.h
index d348cdc..10aef86 100644
--- a/metrics_reporter_omaha.h
+++ b/metrics_reporter_omaha.h
@@ -73,6 +73,7 @@
 extern const char kMetricSuccessfulUpdatePayloadSizeMiB[];
 extern const char kMetricSuccessfulUpdateRebootCount[];
 extern const char kMetricSuccessfulUpdateTotalDurationMinutes[];
+extern const char kMetricSuccessfulUpdateTotalDurationUptimeMinutes[];
 extern const char kMetricSuccessfulUpdateUpdatesAbandonedCount[];
 extern const char kMetricSuccessfulUpdateUrlSwitchCount[];
 
@@ -146,6 +147,7 @@
       int64_t num_bytes_downloaded[kNumDownloadSources],
       int download_overhead_percentage,
       base::TimeDelta total_duration,
+      base::TimeDelta total_duration_uptime,
       int reboot_count,
       int url_switch_count) override;
 
diff --git a/metrics_reporter_omaha_unittest.cc b/metrics_reporter_omaha_unittest.cc
index ea72104..878a323 100644
--- a/metrics_reporter_omaha_unittest.cc
+++ b/metrics_reporter_omaha_unittest.cc
@@ -323,6 +323,7 @@
   num_bytes_downloaded[0] = 200 * kNumBytesInOneMiB;
   int download_overhead_percentage = 20;
   TimeDelta total_duration = TimeDelta::FromMinutes(30);
+  TimeDelta total_duration_uptime = TimeDelta::FromMinutes(20);
   int reboot_count = 2;
   int url_switch_count = 2;
 
@@ -371,6 +372,14 @@
       .Times(1);
   EXPECT_CALL(
       *mock_metrics_lib_,
+      SendToUMA(metrics::kMetricSuccessfulUpdateTotalDurationUptimeMinutes,
+                20,
+                _,
+                _,
+                _))
+      .Times(1);
+  EXPECT_CALL(
+      *mock_metrics_lib_,
       SendToUMA(
           metrics::kMetricSuccessfulUpdateRebootCount, reboot_count, _, _, _))
       .Times(1);
@@ -398,6 +407,7 @@
                                           num_bytes_downloaded,
                                           download_overhead_percentage,
                                           total_duration,
+                                          total_duration_uptime,
                                           reboot_count,
                                           url_switch_count);
 }
diff --git a/metrics_reporter_stub.h b/metrics_reporter_stub.h
index 7112385..486dc2f 100644
--- a/metrics_reporter_stub.h
+++ b/metrics_reporter_stub.h
@@ -72,6 +72,7 @@
       int64_t num_bytes_downloaded[kNumDownloadSources],
       int download_overhead_percentage,
       base::TimeDelta total_duration,
+      base::TimeDelta total_duration_uptime,
       int reboot_count,
       int url_switch_count) override {}
 
diff --git a/mock_metrics_reporter.h b/mock_metrics_reporter.h
index e3c1634..c678a80 100644
--- a/mock_metrics_reporter.h
+++ b/mock_metrics_reporter.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_MOCK_METRICS_REPORTER_H
-#define UPDATE_ENGINE_MOCK_METRICS_REPORTER_H
+#ifndef UPDATE_ENGINE_MOCK_METRICS_REPORTER_H_
+#define UPDATE_ENGINE_MOCK_METRICS_REPORTER_H_
 
 #include <string>
 
@@ -61,16 +61,17 @@
 
   MOCK_METHOD0(ReportAbnormallyTerminatedUpdateAttemptMetrics, void());
 
-  MOCK_METHOD9(ReportSuccessfulUpdateMetrics,
-               void(int attempt_count,
-                    int updates_abandoned_count,
-                    PayloadType payload_type,
-                    int64_t payload_size,
-                    int64_t num_bytes_downloaded[kNumDownloadSources],
-                    int download_overhead_percentage,
-                    base::TimeDelta total_duration,
-                    int reboot_count,
-                    int url_switch_count));
+  MOCK_METHOD10(ReportSuccessfulUpdateMetrics,
+                void(int attempt_count,
+                     int updates_abandoned_count,
+                     PayloadType payload_type,
+                     int64_t payload_size,
+                     int64_t num_bytes_downloaded[kNumDownloadSources],
+                     int download_overhead_percentage,
+                     base::TimeDelta total_duration,
+                     base::TimeDelta total_duration_uptime,
+                     int reboot_count,
+                     int url_switch_count));
 
   MOCK_METHOD2(ReportCertificateCheckMetrics,
                void(ServerToCheck server_to_check,
@@ -92,4 +93,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_MOCK_METRICS_REPORTER_H
+#endif  // UPDATE_ENGINE_MOCK_METRICS_REPORTER_H_
diff --git a/omaha_request_action.cc b/omaha_request_action.cc
index 03288ff..ca6e348 100644
--- a/omaha_request_action.cc
+++ b/omaha_request_action.cc
@@ -78,6 +78,7 @@
 static const char* kTagDisableP2PForDownloading = "DisableP2PForDownloading";
 static const char* kTagDisableP2PForSharing = "DisableP2PForSharing";
 static const char* kTagPublicKeyRsa = "PublicKeyRsa";
+static const char* kTagPowerwash = "Powerwash";
 
 static const char* kOmahaUpdaterVersion = "0.1.0.0";
 
@@ -191,7 +192,7 @@
                        const string arg_name,
                        const string prefs_key) {
   // There's nothing wrong with not having a given cohort setting, so we check
-  // existance first to avoid the warning log message.
+  // existence first to avoid the warning log message.
   if (!prefs->Exists(prefs_key))
     return "";
   string cohort_value;
@@ -1111,6 +1112,7 @@
 
   output_object->disable_payload_backoff =
       ParseBool(attrs[kTagDisablePayloadBackoff]);
+  output_object->powerwash_required = ParseBool(attrs[kTagPowerwash]);
 
   return true;
 }
diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc
index f2a83f7..7c9fe41 100644
--- a/omaha_request_action_unittest.cc
+++ b/omaha_request_action_unittest.cc
@@ -153,6 +153,7 @@
            (disable_p2p_for_downloading ? "DisableP2PForDownloading=\"true\" "
                                         : "") +
            (disable_p2p_for_sharing ? "DisableP2PForSharing=\"true\" " : "") +
+           (powerwash ? "Powerwash=\"true\" " : "") +
            "/></actions></manifest></updatecheck></app>" +
            (multi_app
                 ? "<app appid=\"" + app_id2 + "\"" +
@@ -200,6 +201,8 @@
   bool disable_p2p_for_downloading = false;
   bool disable_p2p_for_sharing = false;
 
+  bool powerwash = false;
+
   // Omaha cohorts settings.
   bool include_cohorts = false;
   string cohort = "";
@@ -640,6 +643,7 @@
   EXPECT_EQ(true, response.packages[0].is_delta);
   EXPECT_EQ(fake_update_response_.prompt == "true", response.prompt);
   EXPECT_EQ(fake_update_response_.deadline, response.deadline);
+  EXPECT_FALSE(response.powerwash_required);
   // Omaha cohort attributes are not set in the response, so they should not be
   // persisted.
   EXPECT_FALSE(fake_prefs_.Exists(kPrefsOmahaCohort));
@@ -803,6 +807,22 @@
   EXPECT_EQ(false, response.packages[2].is_delta);
 }
 
+TEST_F(OmahaRequestActionTest, PowerwashTest) {
+  OmahaResponse response;
+  fake_update_response_.powerwash = true;
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
+  EXPECT_TRUE(response.update_exists);
+  EXPECT_TRUE(response.powerwash_required);
+}
+
 TEST_F(OmahaRequestActionTest, ExtraHeadersSentTest) {
   const string http_response = "<?xml invalid response";
   request_params_.set_interactive(true);
diff --git a/omaha_response.h b/omaha_response.h
index e57f291..ef69de2 100644
--- a/omaha_response.h
+++ b/omaha_response.h
@@ -72,6 +72,9 @@
   // True if the Omaha rule instructs us to disable p2p for sharing.
   bool disable_p2p_for_sharing = false;
 
+  // True if the Omaha rule instructs us to powerwash.
+  bool powerwash_required = false;
+
   // If not blank, a base-64 encoded representation of the PEM-encoded
   // public key in the response.
   std::string public_key_rsa;
diff --git a/omaha_response_handler_action.cc b/omaha_response_handler_action.cc
index 3007f29..775c0a8 100644
--- a/omaha_response_handler_action.cc
+++ b/omaha_response_handler_action.cc
@@ -48,7 +48,6 @@
 OmahaResponseHandlerAction::OmahaResponseHandlerAction(
     SystemState* system_state, const string& deadline_file)
     : system_state_(system_state),
-      got_no_update_response_(false),
       key_path_(constants::kUpdatePayloadPublicKeyPath),
       deadline_file_(deadline_file) {}
 
@@ -57,7 +56,6 @@
   ScopedActionCompleter completer(processor_, this);
   const OmahaResponse& response = GetInputObject();
   if (!response.update_exists) {
-    got_no_update_response_ = true;
     LOG(INFO) << "There are no updates. Aborting.";
     completer.set_code(ErrorCode::kNoUpdate);
     return;
@@ -160,7 +158,7 @@
     install_plan_.is_rollback = true;
   }
 
-  if (params->ShouldPowerwash())
+  if (response.powerwash_required || params->ShouldPowerwash())
     install_plan_.powerwash_required = true;
 
   TEST_AND_RETURN(HasOutputPipe());
diff --git a/omaha_response_handler_action.h b/omaha_response_handler_action.h
index f5cc1a6..0a001b8 100644
--- a/omaha_response_handler_action.h
+++ b/omaha_response_handler_action.h
@@ -54,7 +54,6 @@
   // never be called
   void TerminateProcessing() override { CHECK(false); }
 
-  bool GotNoUpdateResponse() const { return got_no_update_response_; }
   const InstallPlan& install_plan() const { return install_plan_; }
 
   // Debugging/logging
@@ -73,9 +72,6 @@
   // The install plan, if we have an update.
   InstallPlan install_plan_;
 
-  // True only if we got a response and the response said no updates
-  bool got_no_update_response_;
-
   // Public key path to use for payload verification.
   std::string key_path_;
 
diff --git a/p2p_manager_unittest.cc b/p2p_manager_unittest.cc
index 5ffb358..01bdc35 100644
--- a/p2p_manager_unittest.cc
+++ b/p2p_manager_unittest.cc
@@ -183,7 +183,7 @@
 }
 
 // Check that we keep files with the .$EXT.p2p extension not older
-// than some specificed age (5 days, in this test).
+// than some specific age (5 days, in this test).
 TEST_F(P2PManagerTest, HousekeepingAgeLimit) {
   // We set the cutoff time to be 1 billion seconds (01:46:40 UTC on 9
   // September 2001 - arbitrary number, but constant to avoid test
diff --git a/parcelable_update_engine_status.h b/parcelable_update_engine_status.h
index 82006e4..3feac76 100644
--- a/parcelable_update_engine_status.h
+++ b/parcelable_update_engine_status.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_UPDATE_ENGINE_STATUS_H_
-#define UPDATE_ENGINE_UPDATE_ENGINE_STATUS_H_
+#ifndef UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_
+#define UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_
 
 #include <binder/Parcelable.h>
 #include <utils/String16.h>
@@ -60,4 +60,4 @@
 }  // namespace brillo
 }  // namespace android
 
-#endif  // UPDATE_ENGINE_UPDATE_ENGINE_STATUS_H_
+#endif  // UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_
diff --git a/payload_consumer/bzip_extent_writer.cc b/payload_consumer/bzip_extent_writer.cc
index 39d9d67..7828589 100644
--- a/payload_consumer/bzip_extent_writer.cc
+++ b/payload_consumer/bzip_extent_writer.cc
@@ -24,6 +24,10 @@
 const brillo::Blob::size_type kOutputBufferLength = 16 * 1024;
 }
 
+BzipExtentWriter::~BzipExtentWriter() {
+  TEST_AND_RETURN(BZ2_bzDecompressEnd(&stream_) == BZ_OK);
+}
+
 bool BzipExtentWriter::Init(FileDescriptorPtr fd,
                             const RepeatedPtrField<Extent>& extents,
                             uint32_t block_size) {
@@ -84,7 +88,6 @@
 
 bool BzipExtentWriter::EndImpl() {
   TEST_AND_RETURN_FALSE(input_buffer_.empty());
-  TEST_AND_RETURN_FALSE(BZ2_bzDecompressEnd(&stream_) == BZ_OK);
   return next_->End();
 }
 
diff --git a/payload_consumer/bzip_extent_writer.h b/payload_consumer/bzip_extent_writer.h
index 86b346a..710727f 100644
--- a/payload_consumer/bzip_extent_writer.h
+++ b/payload_consumer/bzip_extent_writer.h
@@ -38,7 +38,7 @@
       : next_(std::move(next)) {
     memset(&stream_, 0, sizeof(stream_));
   }
-  ~BzipExtentWriter() override = default;
+  ~BzipExtentWriter() override;
 
   bool Init(FileDescriptorPtr fd,
             const google::protobuf::RepeatedPtrField<Extent>& extents,
diff --git a/payload_consumer/cached_file_descriptor_unittest.cc b/payload_consumer/cached_file_descriptor_unittest.cc
index 6a6302a..d2965fc 100644
--- a/payload_consumer/cached_file_descriptor_unittest.cc
+++ b/payload_consumer/cached_file_descriptor_unittest.cc
@@ -159,7 +159,7 @@
   off64_t seek = 10;
   brillo::Blob blob_in(kFileSize, 0);
   std::fill_n(&blob_in[seek], kCacheSize, value_);
-  // We are writing exactly one cache size; Then it should be commited.
+  // We are writing exactly one cache size; Then it should be committed.
   EXPECT_EQ(cfd_->Seek(seek, SEEK_SET), seek);
   Write(&blob_in[seek], kCacheSize);
 
@@ -174,7 +174,7 @@
   EXPECT_EQ(cfd_->Seek(seek, SEEK_SET), seek);
   brillo::Blob blob_in(kFileSize, 0);
   std::fill_n(&blob_in[seek], less_than_cache_size, value_);
-  // We are writing less than one cache size; then it should not be commited.
+  // We are writing less than one cache size; then it should not be committed.
   Write(&blob_in[seek], less_than_cache_size);
 
   // Revert the changes in |blob_in|.
@@ -190,7 +190,7 @@
   EXPECT_EQ(cfd_->Seek(seek, SEEK_SET), seek);
   brillo::Blob blob_in(kFileSize, 0);
   std::fill_n(&blob_in[seek], less_than_cache_size, value_);
-  // We are writing less than  one cache size; then it should not be commited.
+  // We are writing less than  one cache size; then it should not be committed.
   Write(&blob_in[seek], less_than_cache_size);
 
   // Then we seek, it should've written the cache after seek.
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index 0a12801..a986b07 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -16,7 +16,6 @@
 
 #include "update_engine/payload_consumer/delta_performer.h"
 
-#include <endian.h>
 #include <errno.h>
 #include <linux/fs.h>
 
@@ -49,11 +48,14 @@
 #include "update_engine/payload_consumer/download_action.h"
 #include "update_engine/payload_consumer/extent_reader.h"
 #include "update_engine/payload_consumer/extent_writer.h"
+#if USE_FEC
+#include "update_engine/payload_consumer/fec_file_descriptor.h"
+#endif  // USE_FEC
 #include "update_engine/payload_consumer/file_descriptor_utils.h"
 #include "update_engine/payload_consumer/mount_history.h"
 #if USE_MTD
 #include "update_engine/payload_consumer/mtd_file_descriptor.h"
-#endif
+#endif  // USE_MTD
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/payload_consumer/xz_extent_writer.h"
@@ -64,17 +66,6 @@
 using std::vector;
 
 namespace chromeos_update_engine {
-
-const uint64_t DeltaPerformer::kDeltaVersionOffset = sizeof(kDeltaMagic);
-const uint64_t DeltaPerformer::kDeltaVersionSize = 8;
-const uint64_t DeltaPerformer::kDeltaManifestSizeOffset =
-    kDeltaVersionOffset + kDeltaVersionSize;
-const uint64_t DeltaPerformer::kDeltaManifestSizeSize = 8;
-const uint64_t DeltaPerformer::kDeltaMetadataSignatureSizeSize = 4;
-const uint64_t DeltaPerformer::kMaxPayloadHeaderSize = 24;
-const uint64_t DeltaPerformer::kSupportedMajorPayloadVersion = 2;
-const uint32_t DeltaPerformer::kSupportedMinorPayloadVersion = 5;
-
 const unsigned DeltaPerformer::kProgressLogMaxChunks = 10;
 const unsigned DeltaPerformer::kProgressLogTimeoutSeconds = 30;
 const unsigned DeltaPerformer::kProgressDownloadWeight = 50;
@@ -335,6 +326,14 @@
       err = 1;
   }
   source_fd_.reset();
+  if (source_ecc_fd_ && !source_ecc_fd_->Close()) {
+    err = errno;
+    PLOG(ERROR) << "Error closing ECC source partition";
+    if (!err)
+      err = 1;
+  }
+  source_ecc_fd_.reset();
+  source_ecc_open_failure_ = false;
   source_path_.clear();
 
   if (target_fd_ && !target_fd_->Close()) {
@@ -376,11 +375,11 @@
   int err;
 
   int flags = O_RDWR;
-  if (!is_interactive_)
+  if (!interactive_)
     flags |= O_DSYNC;
 
   LOG(INFO) << "Opening " << target_path_ << " partition with"
-            << (is_interactive_ ? "out" : "") << " O_DSYNC";
+            << (interactive_ ? "out" : "") << " O_DSYNC";
 
   target_fd_ = OpenFile(target_path_.c_str(), flags, true, &err);
   if (!target_fd_) {
@@ -401,6 +400,46 @@
   return true;
 }
 
+bool DeltaPerformer::OpenCurrentECCPartition() {
+  if (source_ecc_fd_)
+    return true;
+
+  if (source_ecc_open_failure_)
+    return false;
+
+  if (current_partition_ >= partitions_.size())
+    return false;
+
+  // No support for ECC in minor version 1 or full payloads.
+  if (payload_->type == InstallPayloadType::kFull ||
+      GetMinorVersion() == kInPlaceMinorPayloadVersion)
+    return false;
+
+#if USE_FEC
+  const PartitionUpdate& partition = partitions_[current_partition_];
+  size_t num_previous_partitions =
+      install_plan_->partitions.size() - partitions_.size();
+  const InstallPlan::Partition& install_part =
+      install_plan_->partitions[num_previous_partitions + current_partition_];
+  string path = install_part.source_path;
+  FileDescriptorPtr fd(new FecFileDescriptor());
+  if (!fd->Open(path.c_str(), O_RDONLY, 0)) {
+    PLOG(ERROR) << "Unable to open ECC source partition "
+                << partition.partition_name() << " on slot "
+                << BootControlInterface::SlotName(install_plan_->source_slot)
+                << ", file " << path;
+    source_ecc_open_failure_ = true;
+    return false;
+  }
+  source_ecc_fd_ = fd;
+#else
+  // No support for ECC compiled.
+  source_ecc_open_failure_ = true;
+#endif  // USE_FEC
+
+  return !source_ecc_open_failure_;
+}
+
 namespace {
 
 void LogPartitionInfoHash(const PartitionInfo& info, const string& tag) {
@@ -422,139 +461,44 @@
 
 }  // namespace
 
-bool DeltaPerformer::GetMetadataSignatureSizeOffset(
-    uint64_t* out_offset) const {
-  if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
-    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
-    return true;
-  }
-  return false;
-}
-
-bool DeltaPerformer::GetManifestOffset(uint64_t* out_offset) const {
-  // Actual manifest begins right after the manifest size field or
-  // metadata signature size field if major version >= 2.
-  if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
-    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
-    return true;
-  }
-  if (major_payload_version_ == kBrilloMajorPayloadVersion) {
-    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize +
-                  kDeltaMetadataSignatureSizeSize;
-    return true;
-  }
-  LOG(ERROR) << "Unknown major payload version: " << major_payload_version_;
-  return false;
-}
-
-uint64_t DeltaPerformer::GetMetadataSize() const {
-  return metadata_size_;
-}
-
-uint64_t DeltaPerformer::GetMajorVersion() const {
-  return major_payload_version_;
-}
-
 uint32_t DeltaPerformer::GetMinorVersion() const {
   if (manifest_.has_minor_version()) {
     return manifest_.minor_version();
-  } else {
-    return payload_->type == InstallPayloadType::kDelta
-               ? kSupportedMinorPayloadVersion
-               : kFullPayloadMinorVersion;
   }
-}
-
-bool DeltaPerformer::GetManifest(DeltaArchiveManifest* out_manifest_p) const {
-  if (!manifest_parsed_)
-    return false;
-  *out_manifest_p = manifest_;
-  return true;
+  return payload_->type == InstallPayloadType::kDelta
+             ? kMaxSupportedMinorPayloadVersion
+             : kFullPayloadMinorVersion;
 }
 
 bool DeltaPerformer::IsHeaderParsed() const {
   return metadata_size_ != 0;
 }
 
-DeltaPerformer::MetadataParseResult DeltaPerformer::ParsePayloadMetadata(
+MetadataParseResult DeltaPerformer::ParsePayloadMetadata(
     const brillo::Blob& payload, ErrorCode* error) {
   *error = ErrorCode::kSuccess;
-  uint64_t manifest_offset;
 
   if (!IsHeaderParsed()) {
-    // Ensure we have data to cover the major payload version.
-    if (payload.size() < kDeltaManifestSizeOffset)
-      return kMetadataParseInsufficientData;
+    MetadataParseResult result =
+        payload_metadata_.ParsePayloadHeader(payload, error);
+    if (result != MetadataParseResult::kSuccess)
+      return result;
 
-    // Validate the magic string.
-    if (memcmp(payload.data(), kDeltaMagic, sizeof(kDeltaMagic)) != 0) {
-      LOG(ERROR) << "Bad payload format -- invalid delta magic.";
-      *error = ErrorCode::kDownloadInvalidMetadataMagicString;
-      return kMetadataParseError;
-    }
-
-    // Extract the payload version from the metadata.
-    static_assert(sizeof(major_payload_version_) == kDeltaVersionSize,
-                  "Major payload version size mismatch");
-    memcpy(&major_payload_version_,
-           &payload[kDeltaVersionOffset],
-           kDeltaVersionSize);
-    // switch big endian to host
-    major_payload_version_ = be64toh(major_payload_version_);
-
-    if (major_payload_version_ != supported_major_version_ &&
-        major_payload_version_ != kChromeOSMajorPayloadVersion) {
-      LOG(ERROR) << "Bad payload format -- unsupported payload version: "
-          << major_payload_version_;
-      *error = ErrorCode::kUnsupportedMajorPayloadVersion;
-      return kMetadataParseError;
-    }
-
-    // Get the manifest offset now that we have payload version.
-    if (!GetManifestOffset(&manifest_offset)) {
-      *error = ErrorCode::kUnsupportedMajorPayloadVersion;
-      return kMetadataParseError;
-    }
-    // Check again with the manifest offset.
-    if (payload.size() < manifest_offset)
-      return kMetadataParseInsufficientData;
-
-    // Next, parse the manifest size.
-    static_assert(sizeof(manifest_size_) == kDeltaManifestSizeSize,
-                  "manifest_size size mismatch");
-    memcpy(&manifest_size_,
-           &payload[kDeltaManifestSizeOffset],
-           kDeltaManifestSizeSize);
-    manifest_size_ = be64toh(manifest_size_);  // switch big endian to host
-
-    if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
-      // Parse the metadata signature size.
-      static_assert(sizeof(metadata_signature_size_) ==
-                    kDeltaMetadataSignatureSizeSize,
-                    "metadata_signature_size size mismatch");
-      uint64_t metadata_signature_size_offset;
-      if (!GetMetadataSignatureSizeOffset(&metadata_signature_size_offset)) {
-        *error = ErrorCode::kError;
-        return kMetadataParseError;
-      }
-      memcpy(&metadata_signature_size_,
-             &payload[metadata_signature_size_offset],
-             kDeltaMetadataSignatureSizeSize);
-      metadata_signature_size_ = be32toh(metadata_signature_size_);
-    }
+    metadata_size_ = payload_metadata_.GetMetadataSize();
+    metadata_signature_size_ = payload_metadata_.GetMetadataSignatureSize();
+    major_payload_version_ = payload_metadata_.GetMajorVersion();
 
     // If the metadata size is present in install plan, check for it immediately
     // even before waiting for that many number of bytes to be downloaded in the
     // payload. This will prevent any attack which relies on us downloading data
     // beyond the expected metadata size.
-    metadata_size_ = manifest_offset + manifest_size_;
     if (install_plan_->hash_checks_mandatory) {
       if (payload_->metadata_size != metadata_size_) {
         LOG(ERROR) << "Mandatory metadata size in Omaha response ("
                    << payload_->metadata_size
                    << ") is missing/incorrect, actual = " << metadata_size_;
         *error = ErrorCode::kDownloadInvalidMetadataSize;
-        return kMetadataParseError;
+        return MetadataParseResult::kError;
       }
     }
   }
@@ -562,7 +506,7 @@
   // Now that we have validated the metadata size, we should wait for the full
   // metadata and its signature (if exist) to be read in before we can parse it.
   if (payload.size() < metadata_size_ + metadata_signature_size_)
-    return kMetadataParseInsufficientData;
+    return MetadataParseResult::kInsufficientData;
 
   // Log whether we validated the size or simply trusting what's in the payload
   // here. This is logged here (after we received the full metadata data) so
@@ -579,15 +523,25 @@
                  << "Trusting metadata size in payload = " << metadata_size_;
   }
 
+  // See if we should use the public RSA key in the Omaha response.
+  base::FilePath path_to_public_key(public_key_path_);
+  base::FilePath tmp_key;
+  if (GetPublicKeyFromResponse(&tmp_key))
+    path_to_public_key = tmp_key;
+  ScopedPathUnlinker tmp_key_remover(tmp_key.value());
+  if (tmp_key.empty())
+    tmp_key_remover.set_should_remove(false);
+
   // We have the full metadata in |payload|. Verify its integrity
   // and authenticity based on the information we have in Omaha response.
-  *error = ValidateMetadataSignature(payload);
+  *error = payload_metadata_.ValidateMetadataSignature(
+      payload, payload_->metadata_signature, path_to_public_key);
   if (*error != ErrorCode::kSuccess) {
     if (install_plan_->hash_checks_mandatory) {
       // The autoupdate_CatchBadSignatures test checks for this string
       // in log-files. Keep in sync.
       LOG(ERROR) << "Mandatory metadata signature validation failed";
-      return kMetadataParseError;
+      return MetadataParseResult::kError;
     }
 
     // For non-mandatory cases, just send a UMA stat.
@@ -595,19 +549,15 @@
     *error = ErrorCode::kSuccess;
   }
 
-  if (!GetManifestOffset(&manifest_offset)) {
-    *error = ErrorCode::kUnsupportedMajorPayloadVersion;
-    return kMetadataParseError;
-  }
   // The payload metadata is deemed valid, it's safe to parse the protobuf.
-  if (!manifest_.ParseFromArray(&payload[manifest_offset], manifest_size_)) {
+  if (!payload_metadata_.GetManifest(payload, &manifest_)) {
     LOG(ERROR) << "Unable to parse manifest in update file.";
     *error = ErrorCode::kDownloadManifestParseError;
-    return kMetadataParseError;
+    return MetadataParseResult::kError;
   }
 
   manifest_parsed_ = true;
-  return kMetadataParseSuccess;
+  return MetadataParseResult::kSuccess;
 }
 
 #define OP_DURATION_HISTOGRAM(_op_name, _start_time)      \
@@ -639,9 +589,9 @@
                       metadata_size_ + metadata_signature_size_));
 
     MetadataParseResult result = ParsePayloadMetadata(buffer_, error);
-    if (result == kMetadataParseError)
+    if (result == MetadataParseResult::kError)
       return false;
-    if (result == kMetadataParseInsufficientData) {
+    if (result == MetadataParseResult::kInsufficientData) {
       // If we just processed the header, make an attempt on the manifest.
       if (do_read_header && IsHeaderParsed())
         continue;
@@ -1076,15 +1026,10 @@
   return true;
 }
 
-namespace {
-
-// Compare |calculated_hash| with source hash in |operation|, return false and
-// dump hash and set |error| if don't match.
-// |source_fd| is the file descriptor of the source partition.
-bool ValidateSourceHash(const brillo::Blob& calculated_hash,
-                        const InstallOperation& operation,
-                        const FileDescriptorPtr source_fd,
-                        ErrorCode* error) {
+bool DeltaPerformer::ValidateSourceHash(const brillo::Blob& calculated_hash,
+                                        const InstallOperation& operation,
+                                        const FileDescriptorPtr source_fd,
+                                        ErrorCode* error) {
   brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
                                     operation.src_sha256_hash().end());
   if (calculated_hash != expected_source_hash) {
@@ -1119,8 +1064,6 @@
   return true;
 }
 
-}  // namespace
-
 bool DeltaPerformer::PerformSourceCopyOperation(
     const InstallOperation& operation, ErrorCode* error) {
   if (operation.has_src_length())
@@ -1128,20 +1071,121 @@
   if (operation.has_dst_length())
     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
 
-  brillo::Blob source_hash;
-  TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_,
-                                                     operation.src_extents(),
-                                                     target_fd_,
-                                                     operation.dst_extents(),
-                                                     block_size_,
-                                                     &source_hash));
-
   if (operation.has_src_sha256_hash()) {
+    brillo::Blob source_hash;
+    brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
+                                      operation.src_sha256_hash().end());
+
+    // We fall back to use the error corrected device if the hash of the raw
+    // device doesn't match or there was an error reading the source partition.
+    // Note that this code will also fall back if writing the target partition
+    // fails.
+    bool read_ok = fd_utils::CopyAndHashExtents(source_fd_,
+                                                operation.src_extents(),
+                                                target_fd_,
+                                                operation.dst_extents(),
+                                                block_size_,
+                                                &source_hash);
+    if (read_ok && expected_source_hash == source_hash)
+      return true;
+
+    if (!OpenCurrentECCPartition()) {
+      // The following function call will return false since the source hash
+      // mismatches, but we still want to call it so it prints the appropriate
+      // log message.
+      return ValidateSourceHash(source_hash, operation, source_fd_, error);
+    }
+
+    LOG(WARNING) << "Source hash from RAW device mismatched: found "
+                 << base::HexEncode(source_hash.data(), source_hash.size())
+                 << ", expected "
+                 << base::HexEncode(expected_source_hash.data(),
+                                    expected_source_hash.size());
+
+    TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_ecc_fd_,
+                                                       operation.src_extents(),
+                                                       target_fd_,
+                                                       operation.dst_extents(),
+                                                       block_size_,
+                                                       &source_hash));
     TEST_AND_RETURN_FALSE(
-        ValidateSourceHash(source_hash, operation, source_fd_, error));
+        ValidateSourceHash(source_hash, operation, source_ecc_fd_, error));
+    // At this point reading from the the error corrected device worked, but
+    // reading from the raw device failed, so this is considered a recovered
+    // failure.
+    source_ecc_recovered_failures_++;
+  } else {
+    // When the operation doesn't include a source hash, we attempt the error
+    // corrected device first since we can't verify the block in the raw device
+    // at this point, but we fall back to the raw device since the error
+    // corrected device can be shorter or not available.
+    if (OpenCurrentECCPartition() &&
+        fd_utils::CopyAndHashExtents(source_ecc_fd_,
+                                     operation.src_extents(),
+                                     target_fd_,
+                                     operation.dst_extents(),
+                                     block_size_,
+                                     nullptr)) {
+      return true;
+    }
+    TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_,
+                                                       operation.src_extents(),
+                                                       target_fd_,
+                                                       operation.dst_extents(),
+                                                       block_size_,
+                                                       nullptr));
+  }
+  return true;
+}
+
+FileDescriptorPtr DeltaPerformer::ChooseSourceFD(
+    const InstallOperation& operation, ErrorCode* error) {
+  if (!operation.has_src_sha256_hash()) {
+    // When the operation doesn't include a source hash, we attempt the error
+    // corrected device first since we can't verify the block in the raw device
+    // at this point, but we first need to make sure all extents are readable
+    // since the error corrected device can be shorter or not available.
+    if (OpenCurrentECCPartition() &&
+        fd_utils::ReadAndHashExtents(
+            source_ecc_fd_, operation.src_extents(), block_size_, nullptr)) {
+      return source_ecc_fd_;
+    }
+    return source_fd_;
   }
 
-  return true;
+  brillo::Blob source_hash;
+  brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
+                                    operation.src_sha256_hash().end());
+  if (fd_utils::ReadAndHashExtents(
+          source_fd_, operation.src_extents(), block_size_, &source_hash) &&
+      source_hash == expected_source_hash) {
+    return source_fd_;
+  }
+  // We fall back to use the error corrected device if the hash of the raw
+  // device doesn't match or there was an error reading the source partition.
+  if (!OpenCurrentECCPartition()) {
+    // The following function call will return false since the source hash
+    // mismatches, but we still want to call it so it prints the appropriate
+    // log message.
+    ValidateSourceHash(source_hash, operation, source_fd_, error);
+    return nullptr;
+  }
+  LOG(WARNING) << "Source hash from RAW device mismatched: found "
+               << base::HexEncode(source_hash.data(), source_hash.size())
+               << ", expected "
+               << base::HexEncode(expected_source_hash.data(),
+                                  expected_source_hash.size());
+
+  if (fd_utils::ReadAndHashExtents(
+          source_ecc_fd_, operation.src_extents(), block_size_, &source_hash) &&
+      ValidateSourceHash(source_hash, operation, source_ecc_fd_, error)) {
+    // At this point reading from the the error corrected device worked, but
+    // reading from the raw device failed, so this is considered a recovered
+    // failure.
+    source_ecc_recovered_failures_++;
+    return source_ecc_fd_;
+  }
+  return nullptr;
 }
 
 bool DeltaPerformer::ExtentsToBsdiffPositionsString(
@@ -1286,17 +1330,12 @@
   if (operation.has_dst_length())
     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
 
-  if (operation.has_src_sha256_hash()) {
-    brillo::Blob source_hash;
-    TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
-        source_fd_, operation.src_extents(), block_size_, &source_hash));
-    TEST_AND_RETURN_FALSE(
-        ValidateSourceHash(source_hash, operation, source_fd_, error));
-  }
+  FileDescriptorPtr source_fd = ChooseSourceFD(operation, error);
+  TEST_AND_RETURN_FALSE(source_fd != nullptr);
 
   auto reader = std::make_unique<DirectExtentReader>();
   TEST_AND_RETURN_FALSE(
-      reader->Init(source_fd_, operation.src_extents(), block_size_));
+      reader->Init(source_fd, operation.src_extents(), block_size_));
   auto src_file = std::make_unique<BsdiffExtentFile>(
       std::move(reader),
       utils::BlocksInExtents(operation.src_extents()) * block_size_);
@@ -1403,17 +1442,12 @@
   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
 
-  if (operation.has_src_sha256_hash()) {
-    brillo::Blob source_hash;
-    TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
-        source_fd_, operation.src_extents(), block_size_, &source_hash));
-    TEST_AND_RETURN_FALSE(
-        ValidateSourceHash(source_hash, operation, source_fd_, error));
-  }
+  FileDescriptorPtr source_fd = ChooseSourceFD(operation, error);
+  TEST_AND_RETURN_FALSE(source_fd != nullptr);
 
   auto reader = std::make_unique<DirectExtentReader>();
   TEST_AND_RETURN_FALSE(
-      reader->Init(source_fd_, operation.src_extents(), block_size_));
+      reader->Init(source_fd, operation.src_extents(), block_size_));
   puffin::UniqueStreamPtr src_stream(new PuffinExtentStream(
       std::move(reader),
       utils::BlocksInExtents(operation.src_extents()) * block_size_));
@@ -1489,93 +1523,6 @@
   return true;
 }
 
-ErrorCode DeltaPerformer::ValidateMetadataSignature(
-    const brillo::Blob& payload) {
-  if (payload.size() < metadata_size_ + metadata_signature_size_)
-    return ErrorCode::kDownloadMetadataSignatureError;
-
-  brillo::Blob metadata_signature_blob, metadata_signature_protobuf_blob;
-  if (!payload_->metadata_signature.empty()) {
-    // Convert base64-encoded signature to raw bytes.
-    if (!brillo::data_encoding::Base64Decode(payload_->metadata_signature,
-                                             &metadata_signature_blob)) {
-      LOG(ERROR) << "Unable to decode base64 metadata signature: "
-                 << payload_->metadata_signature;
-      return ErrorCode::kDownloadMetadataSignatureError;
-    }
-  } else if (major_payload_version_ == kBrilloMajorPayloadVersion) {
-    metadata_signature_protobuf_blob.assign(
-        payload.begin() + metadata_size_,
-        payload.begin() + metadata_size_ + metadata_signature_size_);
-  }
-
-  if (metadata_signature_blob.empty() &&
-      metadata_signature_protobuf_blob.empty()) {
-    if (install_plan_->hash_checks_mandatory) {
-      LOG(ERROR) << "Missing mandatory metadata signature in both Omaha "
-                 << "response and payload.";
-      return ErrorCode::kDownloadMetadataSignatureMissingError;
-    }
-
-    LOG(WARNING) << "Cannot validate metadata as the signature is empty";
-    return ErrorCode::kSuccess;
-  }
-
-  // See if we should use the public RSA key in the Omaha response.
-  base::FilePath path_to_public_key(public_key_path_);
-  base::FilePath tmp_key;
-  if (GetPublicKeyFromResponse(&tmp_key))
-    path_to_public_key = tmp_key;
-  ScopedPathUnlinker tmp_key_remover(tmp_key.value());
-  if (tmp_key.empty())
-    tmp_key_remover.set_should_remove(false);
-
-  LOG(INFO) << "Verifying metadata hash signature using public key: "
-            << path_to_public_key.value();
-
-  brillo::Blob calculated_metadata_hash;
-  if (!HashCalculator::RawHashOfBytes(
-          payload.data(), metadata_size_, &calculated_metadata_hash)) {
-    LOG(ERROR) << "Unable to compute actual hash of manifest";
-    return ErrorCode::kDownloadMetadataSignatureVerificationError;
-  }
-
-  PayloadVerifier::PadRSA2048SHA256Hash(&calculated_metadata_hash);
-  if (calculated_metadata_hash.empty()) {
-    LOG(ERROR) << "Computed actual hash of metadata is empty.";
-    return ErrorCode::kDownloadMetadataSignatureVerificationError;
-  }
-
-  if (!metadata_signature_blob.empty()) {
-    brillo::Blob expected_metadata_hash;
-    if (!PayloadVerifier::GetRawHashFromSignature(metadata_signature_blob,
-                                                  path_to_public_key.value(),
-                                                  &expected_metadata_hash)) {
-      LOG(ERROR) << "Unable to compute expected hash from metadata signature";
-      return ErrorCode::kDownloadMetadataSignatureError;
-    }
-    if (calculated_metadata_hash != expected_metadata_hash) {
-      LOG(ERROR) << "Manifest hash verification failed. Expected hash = ";
-      utils::HexDumpVector(expected_metadata_hash);
-      LOG(ERROR) << "Calculated hash = ";
-      utils::HexDumpVector(calculated_metadata_hash);
-      return ErrorCode::kDownloadMetadataSignatureMismatch;
-    }
-  } else {
-    if (!PayloadVerifier::VerifySignature(metadata_signature_protobuf_blob,
-                                          path_to_public_key.value(),
-                                          calculated_metadata_hash)) {
-      LOG(ERROR) << "Manifest hash verification failed.";
-      return ErrorCode::kDownloadMetadataSignatureMismatch;
-    }
-  }
-
-  // The autoupdate_CatchBadSignatures test checks for this string in
-  // log-files. Keep in sync.
-  LOG(INFO) << "Metadata hash signature matches value in Omaha response.";
-  return ErrorCode::kSuccess;
-}
-
 ErrorCode DeltaPerformer::ValidateManifest() {
   // Perform assorted checks to sanity check the manifest, make sure it
   // matches data from other sources, and that it is a supported version.
@@ -1615,11 +1562,13 @@
       return ErrorCode::kUnsupportedMinorPayloadVersion;
     }
   } else {
-    if (manifest_.minor_version() != supported_minor_version_) {
+    if (manifest_.minor_version() < kMinSupportedMinorPayloadVersion ||
+        manifest_.minor_version() > kMaxSupportedMinorPayloadVersion) {
       LOG(ERROR) << "Manifest contains minor version "
                  << manifest_.minor_version()
-                 << " not the supported "
-                 << supported_minor_version_;
+                 << " not in the range of supported minor versions ["
+                 << kMinSupportedMinorPayloadVersion << ", "
+                 << kMaxSupportedMinorPayloadVersion << "].";
       return ErrorCode::kUnsupportedMinorPayloadVersion;
     }
   }
@@ -1766,16 +1715,6 @@
   }
 
   LOG(INFO) << "Payload hash matches value in payload.";
-
-  // At this point, we are guaranteed to have downloaded a full payload, i.e
-  // the one whose size matches the size mentioned in Omaha response. If any
-  // errors happen after this, it's likely a problem with the payload itself or
-  // the state of the system and not a problem with the URL or network.  So,
-  // indicate that to the download delegate so that AU can backoff
-  // appropriately.
-  if (download_delegate_)
-    download_delegate_->DownloadComplete();
-
   return ErrorCode::kSuccess;
 }
 
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index d5ca799..38d2c43 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -33,6 +33,7 @@
 #include "update_engine/payload_consumer/file_descriptor.h"
 #include "update_engine/payload_consumer/file_writer.h"
 #include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_consumer/payload_metadata.h"
 #include "update_engine/update_metadata.pb.h"
 
 namespace chromeos_update_engine {
@@ -47,21 +48,6 @@
 
 class DeltaPerformer : public FileWriter {
  public:
-  enum MetadataParseResult {
-    kMetadataParseSuccess,
-    kMetadataParseError,
-    kMetadataParseInsufficientData,
-  };
-
-  static const uint64_t kDeltaVersionOffset;
-  static const uint64_t kDeltaVersionSize;
-  static const uint64_t kDeltaManifestSizeOffset;
-  static const uint64_t kDeltaManifestSizeSize;
-  static const uint64_t kDeltaMetadataSignatureSizeSize;
-  static const uint64_t kMaxPayloadHeaderSize;
-  static const uint64_t kSupportedMajorPayloadVersion;
-  static const uint32_t kSupportedMinorPayloadVersion;
-
   // Defines the granularity of progress logging in terms of how many "completed
   // chunks" we want to report at the most.
   static const unsigned kProgressLogMaxChunks;
@@ -81,14 +67,14 @@
                  DownloadActionDelegate* download_delegate,
                  InstallPlan* install_plan,
                  InstallPlan::Payload* payload,
-                 bool is_interactive)
+                 bool interactive)
       : prefs_(prefs),
         boot_control_(boot_control),
         hardware_(hardware),
         download_delegate_(download_delegate),
         install_plan_(install_plan),
         payload_(payload),
-        is_interactive_(is_interactive) {}
+        interactive_(interactive) {}
 
   // FileWriter's Write implementation where caller doesn't care about
   // error codes.
@@ -110,6 +96,10 @@
   // work. Returns whether the required file descriptors were successfully open.
   bool OpenCurrentPartition();
 
+  // Attempt to open the error-corrected device for the current partition.
+  // Returns whether the operation succeeded.
+  bool OpenCurrentECCPartition();
+
   // Closes the current partition file descriptors if open. Returns 0 on success
   // or -errno on error.
   int CloseCurrentPartition();
@@ -165,39 +155,27 @@
     public_key_path_ = public_key_path;
   }
 
-  // Set |*out_offset| to the byte offset where the size of the metadata
-  // signature is stored in a payload. Return true on success, if this field is
-  // not present in the payload, return false.
-  bool GetMetadataSignatureSizeOffset(uint64_t* out_offset) const;
-
-  // Set |*out_offset| to the byte offset at which the manifest protobuf begins
-  // in a payload. Return true on success, false if the offset is unknown.
-  bool GetManifestOffset(uint64_t* out_offset) const;
-
-  // Returns the size of the payload metadata, which includes the payload header
-  // and the manifest. If the header was not yet parsed, returns zero.
-  uint64_t GetMetadataSize() const;
-
-  // If the manifest was successfully parsed, copies it to |*out_manifest_p|.
-  // Returns true on success.
-  bool GetManifest(DeltaArchiveManifest* out_manifest_p) const;
-
   // Return true if header parsing is finished and no errors occurred.
   bool IsHeaderParsed() const;
 
-  // Returns the major payload version. If the version was not yet parsed,
-  // returns zero.
-  uint64_t GetMajorVersion() const;
-
   // Returns the delta minor version. If this value is defined in the manifest,
   // it returns that value, otherwise it returns the default value.
   uint32_t GetMinorVersion() const;
 
+  // Compare |calculated_hash| with source hash in |operation|, return false and
+  // dump hash and set |error| if don't match.
+  // |source_fd| is the file descriptor of the source partition.
+  static bool ValidateSourceHash(const brillo::Blob& calculated_hash,
+                                 const InstallOperation& operation,
+                                 const FileDescriptorPtr source_fd,
+                                 ErrorCode* error);
+
  private:
   friend class DeltaPerformerTest;
   friend class DeltaPerformerIntegrationTest;
   FRIEND_TEST(DeltaPerformerTest, BrilloMetadataSignatureSizeTest);
-  FRIEND_TEST(DeltaPerformerTest, BrilloVerifyMetadataSignatureTest);
+  FRIEND_TEST(DeltaPerformerTest, BrilloParsePayloadMetadataTest);
+  FRIEND_TEST(DeltaPerformerTest, ChooseSourceFDTest);
   FRIEND_TEST(DeltaPerformerTest, UsePublicKeyFromResponse);
 
   // Parse and move the update instructions of all partitions into our local
@@ -235,16 +213,6 @@
   // Returns ErrorCode::kSuccess on match or a suitable error code otherwise.
   ErrorCode ValidateOperationHash(const InstallOperation& operation);
 
-  // Given the |payload|, verifies that the signed hash of its metadata matches
-  // what's specified in the install plan from Omaha (if present) or the
-  // metadata signature in payload itself (if present). Returns
-  // ErrorCode::kSuccess on match or a suitable error code otherwise. This
-  // method must be called before any part of the metadata is parsed so that a
-  // man-in-the-middle attack on the SSL connection to the payload server
-  // doesn't exploit any vulnerability in the code that parses the protocol
-  // buffer.
-  ErrorCode ValidateMetadataSignature(const brillo::Blob& payload);
-
   // Returns true on success.
   bool PerformInstallOperation(const InstallOperation& operation);
 
@@ -262,6 +230,13 @@
   bool PerformPuffDiffOperation(const InstallOperation& operation,
                                 ErrorCode* error);
 
+  // For a given operation, choose the source fd to be used (raw device or error
+  // correction device) based on the source operation hash.
+  // Returns nullptr if the source hash mismatch cannot be corrected, and set
+  // the |error| accordingly.
+  FileDescriptorPtr ChooseSourceFD(const InstallOperation& operation,
+                                   ErrorCode* error);
+
   // Extracts the payload signature message from the blob on the |operation| if
   // the offset matches the one specified by the manifest. Returns whether the
   // signature was extracted.
@@ -317,6 +292,22 @@
   // partition when using a delta payload.
   FileDescriptorPtr source_fd_{nullptr};
 
+  // File descriptor of the error corrected source partition. Only set while
+  // updating partition using a delta payload for a partition where error
+  // correction is available. The size of the error corrected device is smaller
+  // than the underlying raw device, since it doesn't include the error
+  // correction blocks.
+  FileDescriptorPtr source_ecc_fd_{nullptr};
+
+  // The total number of operations that failed source hash verification but
+  // passed after falling back to the error-corrected |source_ecc_fd_| device.
+  uint64_t source_ecc_recovered_failures_{0};
+
+  // Whether opening the current partition as an error-corrected device failed.
+  // Used to avoid re-opening the same source partition if it is not actually
+  // error corrected.
+  bool source_ecc_open_failure_{false};
+
   // File descriptor of the target partition. Only set while performing the
   // operations of a given partition.
   FileDescriptorPtr target_fd_{nullptr};
@@ -325,13 +316,14 @@
   std::string source_path_;
   std::string target_path_;
 
+  PayloadMetadata payload_metadata_;
+
   // Parsed manifest. Set after enough bytes to parse the manifest were
   // downloaded.
   DeltaArchiveManifest manifest_;
   bool manifest_parsed_{false};
   bool manifest_valid_{false};
   uint64_t metadata_size_{0};
-  uint64_t manifest_size_{0};
   uint32_t metadata_signature_size_{0};
   uint64_t major_payload_version_{0};
 
@@ -396,7 +388,7 @@
   unsigned last_progress_chunk_{0};
 
   // If |true|, the update is user initiated (vs. periodic update checks).
-  bool is_interactive_{false};
+  bool interactive_{false};
 
   // The timeout after which we should force emitting a progress log (constant),
   // and the actual point in time for the next forced log to be emitted.
@@ -404,12 +396,6 @@
       base::TimeDelta::FromSeconds(kProgressLogTimeoutSeconds)};
   base::Time forced_progress_log_time_;
 
-  // The payload major payload version supported by DeltaPerformer.
-  uint64_t supported_major_version_{kSupportedMajorPayloadVersion};
-
-  // The delta minor payload version supported by DeltaPerformer.
-  uint32_t supported_minor_version_{kSupportedMinorPayloadVersion};
-
   DISALLOW_COPY_AND_ASSIGN(DeltaPerformer);
 };
 
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index 3572a6d..ef99cc9 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -116,13 +116,7 @@
 
 }  // namespace
 
-class DeltaPerformerIntegrationTest : public ::testing::Test {
- public:
-  static void SetSupportedVersion(DeltaPerformer* performer,
-                                  uint64_t minor_version) {
-    performer->supported_minor_version_ = minor_version;
-  }
-};
+class DeltaPerformerIntegrationTest : public ::testing::Test {};
 
 static void CompareFilesByBlock(const string& a_file, const string& b_file,
                                 size_t image_size) {
@@ -748,11 +742,10 @@
                                   &state->mock_delegate_,
                                   install_plan,
                                   &install_plan->payloads[0],
-                                  false /* is_interactive */);
+                                  false /* interactive */);
   string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath);
   EXPECT_TRUE(utils::FileExists(public_key_path.c_str()));
   (*performer)->set_public_key_path(public_key_path);
-  DeltaPerformerIntegrationTest::SetSupportedVersion(*performer, minor_version);
 
   EXPECT_EQ(static_cast<off_t>(state->image_size),
             HashCalculator::RawHashOfFile(
@@ -852,9 +845,6 @@
     return;
   }
 
-  int expected_times = (expected_result == ErrorCode::kSuccess) ? 1 : 0;
-  EXPECT_CALL(state->mock_delegate_, DownloadComplete()).Times(expected_times);
-
   LOG(INFO) << "Verifying payload for expected result " << expected_result;
   brillo::Blob expected_hash;
   HashCalculator::RawHashOfData(state->delta, &expected_hash);
@@ -972,12 +962,14 @@
                    false, kInPlaceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignaturePlaceholderTest) {
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootSmallImageSignaturePlaceholderTest) {
   DoSmallImageTest(false, false, false, -1, kSignatureGeneratedPlaceholder,
                    false, kInPlaceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignaturePlaceholderMismatchTest) {
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootSmallImageSignaturePlaceholderMismatchTest) {
   DeltaState state;
   GenerateDeltaFile(false, false, false, -1,
                     kSignatureGeneratedPlaceholderMismatch, &state,
@@ -1019,17 +1011,20 @@
                    false, kInPlaceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellBadKeyTest) {
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootSmallImageSignGeneratedShellBadKeyTest) {
   DoSmallImageTest(false, false, false, -1, kSignatureGeneratedShellBadKey,
                    false, kInPlaceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellRotateCl1Test) {
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootSmallImageSignGeneratedShellRotateCl1Test) {
   DoSmallImageTest(false, false, false, -1, kSignatureGeneratedShellRotateCl1,
                    false, kInPlaceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellRotateCl2Test) {
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootSmallImageSignGeneratedShellRotateCl2Test) {
   DoSmallImageTest(false, false, false, -1, kSignatureGeneratedShellRotateCl2,
                    false, kInPlaceMinorPayloadVersion);
 }
@@ -1039,7 +1034,8 @@
                    false, kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootMandatoryOperationHashMismatchTest) {
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootMandatoryOperationHashMismatchTest) {
   DoOperationHashMismatchTest(kInvalidOperationData, true);
 }
 
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index bad7bf0..c3e4fdb 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -20,6 +20,7 @@
 #include <inttypes.h>
 #include <time.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -39,6 +40,7 @@
 #include "update_engine/common/fake_prefs.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/fake_file_descriptor.h"
 #include "update_engine/payload_consumer/mock_download_action.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/bzip.h"
@@ -171,9 +173,11 @@
   brillo::Blob GeneratePayload(const brillo::Blob& blob_data,
                                const vector<AnnotatedOperation>& aops,
                                bool sign_payload) {
-    return GeneratePayload(blob_data, aops, sign_payload,
-                           DeltaPerformer::kSupportedMajorPayloadVersion,
-                           DeltaPerformer::kSupportedMinorPayloadVersion);
+    return GeneratePayload(blob_data,
+                           aops,
+                           sign_payload,
+                           kMaxSupportedMajorPayloadVersion,
+                           kMaxSupportedMinorPayloadVersion);
   }
 
   brillo::Blob GeneratePayload(const brillo::Blob& blob_data,
@@ -227,6 +231,24 @@
     return payload_data;
   }
 
+  brillo::Blob GenerateSourceCopyPayload(const brillo::Blob& copied_data,
+                                         bool add_hash) {
+    PayloadGenerationConfig config;
+    const uint64_t kDefaultBlockSize = config.block_size;
+    EXPECT_EQ(0U, copied_data.size() % kDefaultBlockSize);
+    uint64_t num_blocks = copied_data.size() / kDefaultBlockSize;
+    AnnotatedOperation aop;
+    *(aop.op.add_src_extents()) = ExtentForRange(0, num_blocks);
+    *(aop.op.add_dst_extents()) = ExtentForRange(0, num_blocks);
+    aop.op.set_type(InstallOperation::SOURCE_COPY);
+    brillo::Blob src_hash;
+    EXPECT_TRUE(HashCalculator::RawHashOfData(copied_data, &src_hash));
+    if (add_hash)
+      aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
+
+    return GeneratePayload(brillo::Blob(), {aop}, false);
+  }
+
   // Apply |payload_data| on partition specified in |source_path|.
   // Expect result of performer_.Write() to be |expect_success|.
   // Returns the result of the payload application.
@@ -318,20 +340,20 @@
 
     install_plan_.hash_checks_mandatory = hash_checks_mandatory;
 
-    DeltaPerformer::MetadataParseResult expected_result, actual_result;
+    MetadataParseResult expected_result, actual_result;
     ErrorCode expected_error, actual_error;
 
     // Fill up the metadata signature in install plan according to the test.
     switch (metadata_signature_test) {
       case kEmptyMetadataSignature:
         payload_.metadata_signature.clear();
-        expected_result = DeltaPerformer::kMetadataParseError;
+        expected_result = MetadataParseResult::kError;
         expected_error = ErrorCode::kDownloadMetadataSignatureMissingError;
         break;
 
       case kInvalidMetadataSignature:
         payload_.metadata_signature = kBogusMetadataSignature1;
-        expected_result = DeltaPerformer::kMetadataParseError;
+        expected_result = MetadataParseResult::kError;
         expected_error = ErrorCode::kDownloadMetadataSignatureMismatch;
         break;
 
@@ -346,14 +368,14 @@
             GetBuildArtifactsPath(kUnittestPrivateKeyPath),
             &payload_.metadata_signature));
         EXPECT_FALSE(payload_.metadata_signature.empty());
-        expected_result = DeltaPerformer::kMetadataParseSuccess;
+        expected_result = MetadataParseResult::kSuccess;
         expected_error = ErrorCode::kSuccess;
         break;
     }
 
     // Ignore the expected result/error if hash checks are not mandatory.
     if (!hash_checks_mandatory) {
-      expected_result = DeltaPerformer::kMetadataParseSuccess;
+      expected_result = MetadataParseResult::kSuccess;
       expected_error = ErrorCode::kSuccess;
     }
 
@@ -373,25 +395,40 @@
 
     // Check that the parsed metadata size is what's expected. This test
     // implicitly confirms that the metadata signature is valid, if required.
-    EXPECT_EQ(payload_.metadata_size, performer_.GetMetadataSize());
+    EXPECT_EQ(payload_.metadata_size, performer_.metadata_size_);
   }
 
-  void SetSupportedMajorVersion(uint64_t major_version) {
-    performer_.supported_major_version_ = major_version;
+  // Helper function to pretend that the ECC file descriptor was already opened.
+  // Returns a pointer to the created file descriptor.
+  FakeFileDescriptor* SetFakeECCFile(size_t size) {
+    EXPECT_FALSE(performer_.source_ecc_fd_) << "source_ecc_fd_ already open.";
+    FakeFileDescriptor* ret = new FakeFileDescriptor();
+    fake_ecc_fd_.reset(ret);
+    // Call open to simulate it was already opened.
+    ret->Open("", 0);
+    ret->SetFileSize(size);
+    performer_.source_ecc_fd_ = fake_ecc_fd_;
+    return ret;
   }
+
+  uint64_t GetSourceEccRecoveredFailures() const {
+    return performer_.source_ecc_recovered_failures_;
+  }
+
   FakePrefs prefs_;
   InstallPlan install_plan_;
   InstallPlan::Payload payload_;
   FakeBootControl fake_boot_control_;
   FakeHardware fake_hardware_;
   MockDownloadActionDelegate mock_delegate_;
+  FileDescriptorPtr fake_ecc_fd_;
   DeltaPerformer performer_{&prefs_,
                             &fake_boot_control_,
                             &fake_hardware_,
                             &mock_delegate_,
                             &install_plan_,
                             &payload_,
-                            false /* is_interactive*/};
+                            false /* interactive*/};
 };
 
 TEST_F(DeltaPerformerTest, FullPayloadWriteTest) {
@@ -593,6 +630,94 @@
   EXPECT_EQ(actual_data, ApplyPayload(payload_data, source_path, false));
 }
 
+// Test that the error-corrected file descriptor is used to read the partition
+// since the source partition doesn't match the operation hash.
+TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyFallbackTest) {
+  const size_t kCopyOperationSize = 4 * 4096;
+  string source_path;
+  EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
+  ScopedPathUnlinker path_unlinker(source_path);
+  // Write invalid data to the source image, which doesn't match the expected
+  // hash.
+  brillo::Blob invalid_data(kCopyOperationSize, 0x55);
+  EXPECT_TRUE(utils::WriteFile(
+      source_path.c_str(), invalid_data.data(), invalid_data.size()));
+
+  // Setup the fec file descriptor as the fake stream, which matches
+  // |expected_data|.
+  FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize);
+  brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize);
+
+  brillo::Blob payload_data = GenerateSourceCopyPayload(expected_data, true);
+  EXPECT_EQ(expected_data, ApplyPayload(payload_data, source_path, true));
+  // Verify that the fake_fec was actually used.
+  EXPECT_EQ(1U, fake_fec->GetReadOps().size());
+  EXPECT_EQ(1U, GetSourceEccRecoveredFailures());
+}
+
+// Test that the error-corrected file descriptor is used to read a partition
+// when no hash is available for SOURCE_COPY but it falls back to the normal
+// file descriptor when the size of the error corrected one is too small.
+TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyWhenNoHashFallbackTest) {
+  const size_t kCopyOperationSize = 4 * 4096;
+  string source_path;
+  EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
+  ScopedPathUnlinker path_unlinker(source_path);
+  // Setup the source path with the right expected data.
+  brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize);
+  EXPECT_TRUE(utils::WriteFile(
+      source_path.c_str(), expected_data.data(), expected_data.size()));
+
+  // Setup the fec file descriptor as the fake stream, with smaller data than
+  // the expected.
+  FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize / 2);
+
+  // The payload operation doesn't include an operation hash.
+  brillo::Blob payload_data = GenerateSourceCopyPayload(expected_data, false);
+  EXPECT_EQ(expected_data, ApplyPayload(payload_data, source_path, true));
+  // Verify that the fake_fec was attempted to be used. Since the file
+  // descriptor is shorter it can actually do more than one read to realize it
+  // reached the EOF.
+  EXPECT_LE(1U, fake_fec->GetReadOps().size());
+  // This fallback doesn't count as an error-corrected operation since the
+  // operation hash was not available.
+  EXPECT_EQ(0U, GetSourceEccRecoveredFailures());
+}
+
+TEST_F(DeltaPerformerTest, ChooseSourceFDTest) {
+  const size_t kSourceSize = 4 * 4096;
+  string source_path;
+  EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
+  ScopedPathUnlinker path_unlinker(source_path);
+  // Write invalid data to the source image, which doesn't match the expected
+  // hash.
+  brillo::Blob invalid_data(kSourceSize, 0x55);
+  EXPECT_TRUE(utils::WriteFile(
+      source_path.c_str(), invalid_data.data(), invalid_data.size()));
+
+  performer_.source_fd_ = std::make_shared<EintrSafeFileDescriptor>();
+  performer_.source_fd_->Open(source_path.c_str(), O_RDONLY);
+  performer_.block_size_ = 4096;
+
+  // Setup the fec file descriptor as the fake stream, which matches
+  // |expected_data|.
+  FakeFileDescriptor* fake_fec = SetFakeECCFile(kSourceSize);
+  brillo::Blob expected_data = FakeFileDescriptorData(kSourceSize);
+
+  InstallOperation op;
+  *(op.add_src_extents()) = ExtentForRange(0, kSourceSize / 4096);
+  brillo::Blob src_hash;
+  EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash));
+  op.set_src_sha256_hash(src_hash.data(), src_hash.size());
+
+  ErrorCode error = ErrorCode::kSuccess;
+  EXPECT_EQ(performer_.source_ecc_fd_, performer_.ChooseSourceFD(op, &error));
+  EXPECT_EQ(ErrorCode::kSuccess, error);
+  // Verify that the fake_fec was actually used.
+  EXPECT_EQ(1U, fake_fec->GetReadOps().size());
+  EXPECT_EQ(1U, GetSourceEccRecoveredFailures());
+}
+
 TEST_F(DeltaPerformerTest, ExtentsToByteStringTest) {
   uint64_t test[] = {1, 1, 4, 2, 0, 1};
   static_assert(arraysize(test) % 2 == 0, "Array size uneven");
@@ -633,7 +758,22 @@
   manifest.mutable_old_rootfs_info();
   manifest.mutable_new_kernel_info();
   manifest.mutable_new_rootfs_info();
-  manifest.set_minor_version(DeltaPerformer::kSupportedMinorPayloadVersion);
+  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
+
+  RunManifestValidation(manifest,
+                        kChromeOSMajorPayloadVersion,
+                        InstallPayloadType::kDelta,
+                        ErrorCode::kSuccess);
+}
+
+TEST_F(DeltaPerformerTest, ValidateManifestDeltaMinGoodTest) {
+  // The Manifest we are validating.
+  DeltaArchiveManifest manifest;
+  manifest.mutable_old_kernel_info();
+  manifest.mutable_old_rootfs_info();
+  manifest.mutable_new_kernel_info();
+  manifest.mutable_new_rootfs_info();
+  manifest.set_minor_version(kMinSupportedMinorPayloadVersion);
 
   RunManifestValidation(manifest,
                         kChromeOSMajorPayloadVersion,
@@ -646,7 +786,7 @@
   DeltaArchiveManifest manifest;
 
   RunManifestValidation(manifest,
-                        DeltaPerformer::kSupportedMajorPayloadVersion,
+                        kMaxSupportedMajorPayloadVersion,
                         InstallPayloadType::kFull,
                         ErrorCode::kSuccess);
 }
@@ -659,7 +799,7 @@
   manifest.mutable_old_rootfs_info();
 
   RunManifestValidation(manifest,
-                        DeltaPerformer::kSupportedMajorPayloadVersion,
+                        kMaxSupportedMajorPayloadVersion,
                         InstallPayloadType::kDelta,
                         ErrorCode::kUnsupportedMinorPayloadVersion);
 }
@@ -670,7 +810,7 @@
   manifest.mutable_old_kernel_info();
   manifest.mutable_new_kernel_info();
   manifest.mutable_new_rootfs_info();
-  manifest.set_minor_version(DeltaPerformer::kSupportedMinorPayloadVersion);
+  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
 
   RunManifestValidation(manifest,
                         kChromeOSMajorPayloadVersion,
@@ -684,7 +824,7 @@
   manifest.mutable_old_rootfs_info();
   manifest.mutable_new_kernel_info();
   manifest.mutable_new_rootfs_info();
-  manifest.set_minor_version(DeltaPerformer::kSupportedMinorPayloadVersion);
+  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
 
   RunManifestValidation(manifest,
                         kChromeOSMajorPayloadVersion,
@@ -698,7 +838,7 @@
   PartitionUpdate* partition = manifest.add_partitions();
   partition->mutable_old_partition_info();
   partition->mutable_new_partition_info();
-  manifest.set_minor_version(DeltaPerformer::kSupportedMinorPayloadVersion);
+  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
 
   RunManifestValidation(manifest,
                         kBrilloMajorPayloadVersion,
@@ -711,13 +851,12 @@
   DeltaArchiveManifest manifest;
 
   // Generate a bad version number.
-  manifest.set_minor_version(DeltaPerformer::kSupportedMinorPayloadVersion +
-                             10000);
+  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion + 10000);
   // Mark the manifest as a delta payload by setting old_rootfs_info.
   manifest.mutable_old_rootfs_info();
 
   RunManifestValidation(manifest,
-                        DeltaPerformer::kSupportedMajorPayloadVersion,
+                        kMaxSupportedMajorPayloadVersion,
                         InstallPayloadType::kDelta,
                         ErrorCode::kUnsupportedMinorPayloadVersion);
 }
@@ -740,29 +879,21 @@
   EXPECT_LT(performer_.Close(), 0);
 
   EXPECT_TRUE(performer_.IsHeaderParsed());
-  EXPECT_EQ(kBrilloMajorPayloadVersion, performer_.GetMajorVersion());
-  uint64_t manifest_offset;
-  EXPECT_TRUE(performer_.GetManifestOffset(&manifest_offset));
-  EXPECT_EQ(24U, manifest_offset);  // 4 + 8 + 8 + 4
-  EXPECT_EQ(manifest_offset + manifest_size, performer_.GetMetadataSize());
+  EXPECT_EQ(kBrilloMajorPayloadVersion, performer_.major_payload_version_);
+  EXPECT_EQ(24 + manifest_size, performer_.metadata_size_);  // 4 + 8 + 8 + 4
   EXPECT_EQ(metadata_signature_size, performer_.metadata_signature_size_);
 }
 
-TEST_F(DeltaPerformerTest, BrilloVerifyMetadataSignatureTest) {
+TEST_F(DeltaPerformerTest, BrilloParsePayloadMetadataTest) {
   brillo::Blob payload_data = GeneratePayload({}, {}, true,
                                               kBrilloMajorPayloadVersion,
                                               kSourceMinorPayloadVersion);
   install_plan_.hash_checks_mandatory = true;
-  // Just set these value so that we can use ValidateMetadataSignature directly.
-  performer_.major_payload_version_ = kBrilloMajorPayloadVersion;
-  performer_.metadata_size_ = payload_.metadata_size;
-  uint64_t signature_length;
-  EXPECT_TRUE(PayloadSigner::SignatureBlobLength(
-      {GetBuildArtifactsPath(kUnittestPrivateKeyPath)}, &signature_length));
-  performer_.metadata_signature_size_ = signature_length;
   performer_.set_public_key_path(GetBuildArtifactsPath(kUnittestPublicKeyPath));
-  EXPECT_EQ(ErrorCode::kSuccess,
-            performer_.ValidateMetadataSignature(payload_data));
+  ErrorCode error;
+  EXPECT_EQ(MetadataParseResult::kSuccess,
+            performer_.ParsePayloadMetadata(payload_data, &error));
+  EXPECT_EQ(ErrorCode::kSuccess, error);
 }
 
 TEST_F(DeltaPerformerTest, BadDeltaMagicTest) {
@@ -891,18 +1022,18 @@
 
 TEST_F(DeltaPerformerTest, ConfVersionsMatch) {
   // Test that the versions in update_engine.conf that is installed to the
-  // image match the supported delta versions in the update engine.
+  // image match the maximum supported delta versions in the update engine.
   uint32_t minor_version;
   brillo::KeyValueStore store;
   EXPECT_TRUE(store.Load(GetBuildArtifactsPath().Append("update_engine.conf")));
   EXPECT_TRUE(utils::GetMinorVersion(store, &minor_version));
-  EXPECT_EQ(DeltaPerformer::kSupportedMinorPayloadVersion, minor_version);
+  EXPECT_EQ(kMaxSupportedMinorPayloadVersion, minor_version);
 
   string major_version_str;
   uint64_t major_version;
   EXPECT_TRUE(store.GetString("PAYLOAD_MAJOR_VERSION", &major_version_str));
   EXPECT_TRUE(base::StringToUint64(major_version_str, &major_version));
-  EXPECT_EQ(DeltaPerformer::kSupportedMajorPayloadVersion, major_version);
+  EXPECT_EQ(kMaxSupportedMajorPayloadVersion, major_version);
 }
 
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/download_action.cc b/payload_consumer/download_action.cc
index f1b6e33..1654c2a 100644
--- a/payload_consumer/download_action.cc
+++ b/payload_consumer/download_action.cc
@@ -44,13 +44,13 @@
                                HardwareInterface* hardware,
                                SystemState* system_state,
                                HttpFetcher* http_fetcher,
-                               bool is_interactive)
+                               bool interactive)
     : prefs_(prefs),
       boot_control_(boot_control),
       hardware_(hardware),
       system_state_(system_state),
       http_fetcher_(new MultiRangeHttpFetcher(http_fetcher)),
-      is_interactive_(is_interactive),
+      interactive_(interactive),
       writer_(nullptr),
       code_(ErrorCode::kSuccess),
       delegate_(nullptr),
@@ -251,7 +251,7 @@
                                               delegate_,
                                               &install_plan_,
                                               payload_,
-                                              is_interactive_));
+                                              interactive_));
     writer_ = delta_performer_.get();
   }
   if (system_state_ != nullptr) {
@@ -387,8 +387,13 @@
         StartDownloading();
         return;
       }
+
+      // All payloads have been applied and verified.
+      if (delegate_)
+        delegate_->DownloadComplete();
+
       // Log UpdateEngine.DownloadAction.* histograms to help diagnose
-      // long-blocking oeprations.
+      // long-blocking operations.
       std::string histogram_output;
       base::StatisticsRecorder::WriteGraph(
           "UpdateEngine.DownloadAction.", &histogram_output);
diff --git a/payload_consumer/download_action.h b/payload_consumer/download_action.h
index 81d7333..6e6f057 100644
--- a/payload_consumer/download_action.h
+++ b/payload_consumer/download_action.h
@@ -79,7 +79,7 @@
                  HardwareInterface* hardware,
                  SystemState* system_state,
                  HttpFetcher* http_fetcher,
-                 bool is_interactive);
+                 bool interactive);
   ~DownloadAction() override;
 
   // InstallPlanAction overrides.
@@ -158,7 +158,7 @@
   // If |true|, the update is user initiated (vs. periodic update checks). Hence
   // the |delta_performer_| can decide not to use O_DSYNC flag for faster
   // update.
-  bool is_interactive_;
+  bool interactive_;
 
   // The FileWriter that downloaded data should be written to. It will
   // either point to *decompressing_file_writer_ or *delta_performer_.
diff --git a/payload_consumer/download_action_unittest.cc b/payload_consumer/download_action_unittest.cc
index 726972d..a3ad5b9 100644
--- a/payload_consumer/download_action_unittest.cc
+++ b/payload_consumer/download_action_unittest.cc
@@ -167,7 +167,7 @@
                                  fake_system_state.hardware(),
                                  &fake_system_state,
                                  http_fetcher,
-                                 false /* is_interactive */);
+                                 false /* interactive */);
   download_action.SetTestFileWriter(&writer);
   BondActions(&feeder_action, &download_action);
   MockDownloadActionDelegate download_delegate;
@@ -178,6 +178,8 @@
       EXPECT_CALL(download_delegate,
                   BytesReceived(_, kMockHttpFetcherChunkSize, _));
     EXPECT_CALL(download_delegate, BytesReceived(_, _, _)).Times(AtLeast(1));
+    EXPECT_CALL(download_delegate, DownloadComplete())
+        .Times(fail_write == 0 ? 1 : 0);
   }
   ErrorCode expected_code = ErrorCode::kSuccess;
   if (fail_write > 0)
@@ -281,7 +283,7 @@
                                  fake_system_state.hardware(),
                                  &fake_system_state,
                                  http_fetcher,
-                                 false /* is_interactive */);
+                                 false /* interactive */);
   download_action.SetTestFileWriter(&mock_file_writer);
   BondActions(&feeder_action, &download_action);
   MockDownloadActionDelegate download_delegate;
@@ -371,7 +373,7 @@
         fake_system_state_.hardware(),
         &fake_system_state_,
         new MockHttpFetcher(data.data(), data.size(), nullptr),
-        false /* is_interactive */);
+        false /* interactive */);
     download_action.SetTestFileWriter(&writer);
     MockDownloadActionDelegate download_delegate;
     if (use_download_delegate) {
@@ -473,7 +475,7 @@
                                  fake_system_state_.hardware(),
                                  &fake_system_state_,
                                  new MockHttpFetcher("x", 1, nullptr),
-                                 false /* is_interactive */);
+                                 false /* interactive */);
   download_action.SetTestFileWriter(&writer);
 
   DownloadActionTestAction test_action;
@@ -563,7 +565,7 @@
                                               fake_system_state_.hardware(),
                                               &fake_system_state_,
                                               http_fetcher_,
-                                              false /* is_interactive */));
+                                              false /* interactive */));
     download_action_->SetTestFileWriter(&writer);
     BondActions(&feeder_action, download_action_.get());
     DownloadActionTestProcessorDelegate delegate(ErrorCode::kSuccess);
diff --git a/payload_consumer/fake_file_descriptor.cc b/payload_consumer/fake_file_descriptor.cc
index d54856b..63af181 100644
--- a/payload_consumer/fake_file_descriptor.cc
+++ b/payload_consumer/fake_file_descriptor.cc
@@ -73,4 +73,12 @@
   return offset_;
 }
 
+brillo::Blob FakeFileDescriptorData(size_t size) {
+  brillo::Blob ret(size);
+  FakeFileDescriptor fd;
+  fd.SetFileSize(size);
+  fd.Read(ret.data(), size);
+  return ret;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/fake_file_descriptor.h b/payload_consumer/fake_file_descriptor.h
index f17820b..c9fea7d 100644
--- a/payload_consumer/fake_file_descriptor.h
+++ b/payload_consumer/fake_file_descriptor.h
@@ -22,6 +22,8 @@
 #include <utility>
 #include <vector>
 
+#include <brillo/secure_blob.h>
+
 #include "update_engine/payload_consumer/file_descriptor.h"
 
 namespace chromeos_update_engine {
@@ -121,6 +123,9 @@
   DISALLOW_COPY_AND_ASSIGN(FakeFileDescriptor);
 };
 
+// Return a blob with the first |size| bytes of a FakeFileDescriptor stream.
+brillo::Blob FakeFileDescriptorData(size_t size);
+
 }  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_FAKE_FILE_DESCRIPTOR_H_
diff --git a/payload_consumer/fec_file_descriptor.cc b/payload_consumer/fec_file_descriptor.cc
new file mode 100644
index 0000000..de22cf3
--- /dev/null
+++ b/payload_consumer/fec_file_descriptor.cc
@@ -0,0 +1,78 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/fec_file_descriptor.h"
+
+namespace chromeos_update_engine {
+
+bool FecFileDescriptor::Open(const char* path, int flags) {
+  return Open(path, flags, 0600);
+}
+
+bool FecFileDescriptor::Open(const char* path, int flags, mode_t mode) {
+  if (!fh_.open(path, flags, mode))
+    return false;
+
+  if (!fh_.has_ecc()) {
+    LOG(ERROR) << "No ECC data in the passed file";
+    fh_.close();
+    return false;
+  }
+
+  fec_status status;
+  if (!fh_.get_status(status)) {
+    LOG(ERROR) << "Couldn't load ECC status";
+    fh_.close();
+    return false;
+  }
+
+  dev_size_ = status.data_size;
+  return true;
+}
+
+ssize_t FecFileDescriptor::Read(void* buf, size_t count) {
+  return fh_.read(buf, count);
+}
+
+ssize_t FecFileDescriptor::Write(const void* buf, size_t count) {
+  errno = EROFS;
+  return -1;
+}
+
+off64_t FecFileDescriptor::Seek(off64_t offset, int whence) {
+  if (fh_.seek(offset, whence)) {
+    return offset;
+  }
+  return -1;
+}
+
+uint64_t FecFileDescriptor::BlockDevSize() {
+  return dev_size_;
+}
+
+bool FecFileDescriptor::BlkIoctl(int request,
+                                 uint64_t start,
+                                 uint64_t length,
+                                 int* result) {
+  // No IOCTL pass-through in this mode.
+  return false;
+}
+
+bool FecFileDescriptor::Close() {
+  return fh_.close();
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/fec_file_descriptor.h b/payload_consumer/fec_file_descriptor.h
new file mode 100644
index 0000000..e7f2e40
--- /dev/null
+++ b/payload_consumer/fec_file_descriptor.h
@@ -0,0 +1,65 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_FEC_FILE_DESCRIPTOR_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_FEC_FILE_DESCRIPTOR_H_
+
+#include <fec/io.h>
+
+#include "update_engine/payload_consumer/file_descriptor.h"
+
+// A FileDescriptor implementation with error correction based on the "libfec"
+// library. The libfec on the running system allows to parse the error
+// correction blocks stored in partitions that have verity and error correction
+// enabled. This information is present in the raw block device, but of course
+// not available via the dm-verity block device.
+
+namespace chromeos_update_engine {
+
+// An error corrected file based on FEC.
+class FecFileDescriptor : public FileDescriptor {
+ public:
+  FecFileDescriptor() = default;
+  ~FecFileDescriptor() = default;
+
+  // Interface methods.
+  bool Open(const char* path, int flags, mode_t mode) override;
+  bool Open(const char* path, int flags) override;
+  ssize_t Read(void* buf, size_t count) override;
+  ssize_t Write(const void* buf, size_t count) override;
+  off64_t Seek(off64_t offset, int whence) override;
+  uint64_t BlockDevSize() override;
+  bool BlkIoctl(int request,
+                uint64_t start,
+                uint64_t length,
+                int* result) override;
+  bool Flush() override { return true; }
+  bool Close() override;
+  bool IsSettingErrno() override { return true; }
+  bool IsOpen() override {
+    // The bool operator on the fec::io class tells whether the internal
+    // handle is open.
+    return static_cast<bool>(fh_);
+  }
+
+ protected:
+  fec::io fh_;
+  uint64_t dev_size_{0};
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_FEC_FILE_DESCRIPTOR_H_
diff --git a/payload_consumer/file_descriptor_utils.cc b/payload_consumer/file_descriptor_utils.cc
index b1902de..ebfb977 100644
--- a/payload_consumer/file_descriptor_utils.cc
+++ b/payload_consumer/file_descriptor_utils.cc
@@ -96,10 +96,7 @@
                         const RepeatedPtrField<Extent>& extents,
                         uint64_t block_size,
                         brillo::Blob* hash_out) {
-  TEST_AND_RETURN_FALSE(hash_out != nullptr);
-  TEST_AND_RETURN_FALSE(
-      CommonHashExtents(source, extents, nullptr, block_size, hash_out));
-  return true;
+  return CommonHashExtents(source, extents, nullptr, block_size, hash_out);
 }
 
 }  // namespace fd_utils
diff --git a/payload_consumer/file_descriptor_utils.h b/payload_consumer/file_descriptor_utils.h
index 397c35e..68fb001 100644
--- a/payload_consumer/file_descriptor_utils.h
+++ b/payload_consumer/file_descriptor_utils.h
@@ -42,7 +42,7 @@
     uint64_t block_size,
     brillo::Blob* hash_out);
 
-// Reads blocks from |source| and caculates the hash. The blocks to read are
+// Reads blocks from |source| and calculates the hash. The blocks to read are
 // specified by |extents|. Stores the hash in |hash_out| if it is not null. The
 // block sizes are passed as |block_size|. In case of error reading, it returns
 // false and the value pointed by |hash_out| is undefined.
diff --git a/payload_consumer/file_descriptor_utils_unittest.cc b/payload_consumer/file_descriptor_utils_unittest.cc
index 79d2184..48e610f 100644
--- a/payload_consumer/file_descriptor_utils_unittest.cc
+++ b/payload_consumer/file_descriptor_utils_unittest.cc
@@ -175,10 +175,10 @@
   EXPECT_FALSE(fd_utils::ReadAndHashExtents(source_, extents, 4, &hash_out));
 }
 
-// Test that if hash_out is null, then it should fail.
+// Test that if hash_out is null, it still works.
 TEST_F(FileDescriptorUtilsTest, ReadAndHashExtentsWithoutHashingTest) {
   auto extents = CreateExtentList({{0, 5}});
-  EXPECT_FALSE(fd_utils::ReadAndHashExtents(source_, extents, 4, nullptr));
+  EXPECT_TRUE(fd_utils::ReadAndHashExtents(source_, extents, 4, nullptr));
 }
 
 // Tests that it can calculate the hash properly.
diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc
index 29daddc..797e76d 100644
--- a/payload_consumer/payload_constants.cc
+++ b/payload_consumer/payload_constants.cc
@@ -21,6 +21,9 @@
 const uint64_t kChromeOSMajorPayloadVersion = 1;
 const uint64_t kBrilloMajorPayloadVersion = 2;
 
+const uint32_t kMinSupportedMinorPayloadVersion = 1;
+const uint32_t kMaxSupportedMinorPayloadVersion = 5;
+
 const uint32_t kFullPayloadMinorVersion = 0;
 const uint32_t kInPlaceMinorPayloadVersion = 1;
 const uint32_t kSourceMinorPayloadVersion = 2;
@@ -28,6 +31,11 @@
 const uint32_t kBrotliBsdiffMinorPayloadVersion = 4;
 const uint32_t kPuffdiffMinorPayloadVersion = 5;
 
+const uint64_t kMinSupportedMajorPayloadVersion = 1;
+const uint64_t kMaxSupportedMajorPayloadVersion = 2;
+
+const uint64_t kMaxPayloadHeaderSize = 24;
+
 const char kLegacyPartitionNameKernel[] = "boot";
 const char kLegacyPartitionNameRoot[] = "system";
 
diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h
index 9d29afd..43c3137 100644
--- a/payload_consumer/payload_constants.h
+++ b/payload_consumer/payload_constants.h
@@ -31,6 +31,10 @@
 // The major version used by Brillo.
 extern const uint64_t kBrilloMajorPayloadVersion;
 
+// The minimum and maximum supported major version.
+extern const uint64_t kMinSupportedMajorPayloadVersion;
+extern const uint64_t kMaxSupportedMajorPayloadVersion;
+
 // The minor version used for all full payloads.
 extern const uint32_t kFullPayloadMinorVersion;
 
@@ -49,6 +53,13 @@
 // The minor version that allows PUFFDIFF operation.
 extern const uint32_t kPuffdiffMinorPayloadVersion;
 
+// The minimum and maximum supported minor version.
+extern const uint32_t kMinSupportedMinorPayloadVersion;
+extern const uint32_t kMaxSupportedMinorPayloadVersion;
+
+// The maximum size of the payload header (anything before the protobuf).
+extern const uint64_t kMaxPayloadHeaderSize;
+
 // The kernel and rootfs partition names used by the BootControlInterface when
 // handling update payloads with a major version 1. The names of the updated
 // partitions are include in the payload itself for major version 2.
diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc
new file mode 100644
index 0000000..f700228
--- /dev/null
+++ b/payload_consumer/payload_metadata.cc
@@ -0,0 +1,214 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/payload_metadata.h"
+
+#include <endian.h>
+
+#include <brillo/data_encoding.h>
+
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_consumer/payload_verifier.h"
+
+namespace chromeos_update_engine {
+
+const uint64_t PayloadMetadata::kDeltaVersionOffset = sizeof(kDeltaMagic);
+const uint64_t PayloadMetadata::kDeltaVersionSize = 8;
+const uint64_t PayloadMetadata::kDeltaManifestSizeOffset =
+    kDeltaVersionOffset + kDeltaVersionSize;
+const uint64_t PayloadMetadata::kDeltaManifestSizeSize = 8;
+const uint64_t PayloadMetadata::kDeltaMetadataSignatureSizeSize = 4;
+
+bool PayloadMetadata::GetMetadataSignatureSizeOffset(
+    uint64_t* out_offset) const {
+  if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
+    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
+    return true;
+  }
+  return false;
+}
+
+bool PayloadMetadata::GetManifestOffset(uint64_t* out_offset) const {
+  // Actual manifest begins right after the manifest size field or
+  // metadata signature size field if major version >= 2.
+  if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
+    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
+    return true;
+  }
+  if (major_payload_version_ == kBrilloMajorPayloadVersion) {
+    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize +
+                  kDeltaMetadataSignatureSizeSize;
+    return true;
+  }
+  LOG(ERROR) << "Unknown major payload version: " << major_payload_version_;
+  return false;
+}
+
+MetadataParseResult PayloadMetadata::ParsePayloadHeader(
+    const brillo::Blob& payload, ErrorCode* error) {
+  uint64_t manifest_offset;
+  // Ensure we have data to cover the major payload version.
+  if (payload.size() < kDeltaManifestSizeOffset)
+    return MetadataParseResult::kInsufficientData;
+
+  // Validate the magic string.
+  if (memcmp(payload.data(), kDeltaMagic, sizeof(kDeltaMagic)) != 0) {
+    LOG(ERROR) << "Bad payload format -- invalid delta magic.";
+    *error = ErrorCode::kDownloadInvalidMetadataMagicString;
+    return MetadataParseResult::kError;
+  }
+
+  // Extract the payload version from the metadata.
+  static_assert(sizeof(major_payload_version_) == kDeltaVersionSize,
+                "Major payload version size mismatch");
+  memcpy(&major_payload_version_,
+         &payload[kDeltaVersionOffset],
+         kDeltaVersionSize);
+  // Switch big endian to host.
+  major_payload_version_ = be64toh(major_payload_version_);
+
+  if (major_payload_version_ < kMinSupportedMajorPayloadVersion ||
+      major_payload_version_ > kMaxSupportedMajorPayloadVersion) {
+    LOG(ERROR) << "Bad payload format -- unsupported payload version: "
+               << major_payload_version_;
+    *error = ErrorCode::kUnsupportedMajorPayloadVersion;
+    return MetadataParseResult::kError;
+  }
+
+  // Get the manifest offset now that we have payload version.
+  if (!GetManifestOffset(&manifest_offset)) {
+    *error = ErrorCode::kUnsupportedMajorPayloadVersion;
+    return MetadataParseResult::kError;
+  }
+  // Check again with the manifest offset.
+  if (payload.size() < manifest_offset)
+    return MetadataParseResult::kInsufficientData;
+
+  // Next, parse the manifest size.
+  static_assert(sizeof(manifest_size_) == kDeltaManifestSizeSize,
+                "manifest_size size mismatch");
+  memcpy(&manifest_size_,
+         &payload[kDeltaManifestSizeOffset],
+         kDeltaManifestSizeSize);
+  manifest_size_ = be64toh(manifest_size_);  // switch big endian to host
+
+  if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
+    // Parse the metadata signature size.
+    static_assert(
+        sizeof(metadata_signature_size_) == kDeltaMetadataSignatureSizeSize,
+        "metadata_signature_size size mismatch");
+    uint64_t metadata_signature_size_offset;
+    if (!GetMetadataSignatureSizeOffset(&metadata_signature_size_offset)) {
+      *error = ErrorCode::kError;
+      return MetadataParseResult::kError;
+    }
+    memcpy(&metadata_signature_size_,
+           &payload[metadata_signature_size_offset],
+           kDeltaMetadataSignatureSizeSize);
+    metadata_signature_size_ = be32toh(metadata_signature_size_);
+  }
+  metadata_size_ = manifest_offset + manifest_size_;
+  return MetadataParseResult::kSuccess;
+}
+
+bool PayloadMetadata::GetManifest(const brillo::Blob& payload,
+                                  DeltaArchiveManifest* out_manifest) const {
+  uint64_t manifest_offset;
+  if (!GetManifestOffset(&manifest_offset))
+    return false;
+  CHECK_GE(payload.size(), manifest_offset + manifest_size_);
+  return out_manifest->ParseFromArray(&payload[manifest_offset],
+                                      manifest_size_);
+}
+
+ErrorCode PayloadMetadata::ValidateMetadataSignature(
+    const brillo::Blob& payload,
+    std::string metadata_signature,
+    base::FilePath path_to_public_key) const {
+  if (payload.size() < metadata_size_ + metadata_signature_size_)
+    return ErrorCode::kDownloadMetadataSignatureError;
+
+  brillo::Blob metadata_signature_blob, metadata_signature_protobuf_blob;
+  if (!metadata_signature.empty()) {
+    // Convert base64-encoded signature to raw bytes.
+    if (!brillo::data_encoding::Base64Decode(metadata_signature,
+                                             &metadata_signature_blob)) {
+      LOG(ERROR) << "Unable to decode base64 metadata signature: "
+                 << metadata_signature;
+      return ErrorCode::kDownloadMetadataSignatureError;
+    }
+  } else if (major_payload_version_ == kBrilloMajorPayloadVersion) {
+    metadata_signature_protobuf_blob.assign(
+        payload.begin() + metadata_size_,
+        payload.begin() + metadata_size_ + metadata_signature_size_);
+  }
+
+  if (metadata_signature_blob.empty() &&
+      metadata_signature_protobuf_blob.empty()) {
+    LOG(ERROR) << "Missing mandatory metadata signature in both Omaha "
+               << "response and payload.";
+    return ErrorCode::kDownloadMetadataSignatureMissingError;
+  }
+
+  LOG(INFO) << "Verifying metadata hash signature using public key: "
+            << path_to_public_key.value();
+
+  brillo::Blob calculated_metadata_hash;
+  if (!HashCalculator::RawHashOfBytes(
+          payload.data(), metadata_size_, &calculated_metadata_hash)) {
+    LOG(ERROR) << "Unable to compute actual hash of manifest";
+    return ErrorCode::kDownloadMetadataSignatureVerificationError;
+  }
+
+  PayloadVerifier::PadRSA2048SHA256Hash(&calculated_metadata_hash);
+  if (calculated_metadata_hash.empty()) {
+    LOG(ERROR) << "Computed actual hash of metadata is empty.";
+    return ErrorCode::kDownloadMetadataSignatureVerificationError;
+  }
+
+  if (!metadata_signature_blob.empty()) {
+    brillo::Blob expected_metadata_hash;
+    if (!PayloadVerifier::GetRawHashFromSignature(metadata_signature_blob,
+                                                  path_to_public_key.value(),
+                                                  &expected_metadata_hash)) {
+      LOG(ERROR) << "Unable to compute expected hash from metadata signature";
+      return ErrorCode::kDownloadMetadataSignatureError;
+    }
+    if (calculated_metadata_hash != expected_metadata_hash) {
+      LOG(ERROR) << "Manifest hash verification failed. Expected hash = ";
+      utils::HexDumpVector(expected_metadata_hash);
+      LOG(ERROR) << "Calculated hash = ";
+      utils::HexDumpVector(calculated_metadata_hash);
+      return ErrorCode::kDownloadMetadataSignatureMismatch;
+    }
+  } else {
+    if (!PayloadVerifier::VerifySignature(metadata_signature_protobuf_blob,
+                                          path_to_public_key.value(),
+                                          calculated_metadata_hash)) {
+      LOG(ERROR) << "Manifest hash verification failed.";
+      return ErrorCode::kDownloadMetadataSignatureMismatch;
+    }
+  }
+
+  // The autoupdate_CatchBadSignatures test checks for this string in
+  // log-files. Keep in sync.
+  LOG(INFO) << "Metadata hash signature matches value in Omaha response.";
+  return ErrorCode::kSuccess;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h
new file mode 100644
index 0000000..fc1d128
--- /dev/null
+++ b/payload_consumer/payload_metadata.h
@@ -0,0 +1,107 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PAYLOAD_METADATA_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PAYLOAD_METADATA_H_
+
+#include <inttypes.h>
+
+#include <string>
+#include <vector>
+
+#include <base/files/file_path.h>
+#include <brillo/secure_blob.h>
+
+#include "update_engine/common/error_code.h"
+#include "update_engine/common/platform_constants.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+
+enum class MetadataParseResult {
+  kSuccess,
+  kError,
+  kInsufficientData,
+};
+
+// This class parses payload metadata and validate its signature.
+class PayloadMetadata {
+ public:
+  static const uint64_t kDeltaVersionOffset;
+  static const uint64_t kDeltaVersionSize;
+  static const uint64_t kDeltaManifestSizeOffset;
+  static const uint64_t kDeltaManifestSizeSize;
+  static const uint64_t kDeltaMetadataSignatureSizeSize;
+
+  PayloadMetadata() = default;
+
+  // Attempts to parse the update payload header starting from the beginning of
+  // |payload|. On success, returns kMetadataParseSuccess. Returns
+  // kMetadataParseInsufficientData if more data is needed to parse the complete
+  // metadata. Returns kMetadataParseError if the metadata can't be parsed given
+  // the payload.
+  MetadataParseResult ParsePayloadHeader(const brillo::Blob& payload,
+                                         ErrorCode* error);
+
+  // Given the |payload|, verifies that the signed hash of its metadata matches
+  // |metadata_signature| (if present) or the metadata signature in payload
+  // itself (if present). Returns ErrorCode::kSuccess on match or a suitable
+  // error code otherwise. This method must be called before any part of the
+  // metadata is parsed so that a man-in-the-middle attack on the SSL connection
+  // to the payload server doesn't exploit any vulnerability in the code that
+  // parses the protocol buffer.
+  ErrorCode ValidateMetadataSignature(const brillo::Blob& payload,
+                                      std::string metadata_signature,
+                                      base::FilePath path_to_public_key) const;
+
+  // Returns the major payload version. If the version was not yet parsed,
+  // returns zero.
+  uint64_t GetMajorVersion() const { return major_payload_version_; }
+
+  // Returns the size of the payload metadata, which includes the payload header
+  // and the manifest. If the header was not yet parsed, returns zero.
+  uint64_t GetMetadataSize() const { return metadata_size_; }
+
+  // Returns the size of the payload metadata signature. If the header was not
+  // yet parsed, returns zero.
+  uint32_t GetMetadataSignatureSize() const { return metadata_signature_size_; }
+
+  // Set |*out_manifest| to the manifest in |payload|.
+  // Returns true on success.
+  bool GetManifest(const brillo::Blob& payload,
+                   DeltaArchiveManifest* out_manifest) const;
+
+ private:
+  // Set |*out_offset| to the byte offset at which the manifest protobuf begins
+  // in a payload. Return true on success, false if the offset is unknown.
+  bool GetManifestOffset(uint64_t* out_offset) const;
+
+  // Set |*out_offset| to the byte offset where the size of the metadata
+  // signature is stored in a payload. Return true on success, if this field is
+  // not present in the payload, return false.
+  bool GetMetadataSignatureSizeOffset(uint64_t* out_offset) const;
+
+  uint64_t metadata_size_{0};
+  uint64_t manifest_size_{0};
+  uint32_t metadata_signature_size_{0};
+  uint64_t major_payload_version_{0};
+
+  DISALLOW_COPY_AND_ASSIGN(PayloadMetadata);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_PAYLOAD_METADATA_H_
diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc
index c672fef..83d910f 100644
--- a/payload_consumer/postinstall_runner_action.cc
+++ b/payload_consumer/postinstall_runner_action.cc
@@ -265,7 +265,7 @@
 void PostinstallRunnerAction::ReportProgress(double frac) {
   if (!delegate_)
     return;
-  if (current_partition_ >= partition_weight_.size()) {
+  if (current_partition_ >= partition_weight_.size() || total_weight_ == 0) {
     delegate_->ProgressUpdate(1.);
     return;
   }
diff --git a/payload_generator/ab_generator.cc b/payload_generator/ab_generator.cc
index 089dfd9..f24f6c3 100644
--- a/payload_generator/ab_generator.cc
+++ b/payload_generator/ab_generator.cc
@@ -57,8 +57,6 @@
                                                        blob_file));
   LOG(INFO) << "done reading " << new_part.name;
 
-  TEST_AND_RETURN_FALSE(
-      FragmentOperations(config.version, aops, new_part.path, blob_file));
   SortOperationsByDestination(aops);
 
   // Use the soft_chunk_size when merging operations to prevent merging all
@@ -69,8 +67,10 @@
     merge_chunk_blocks = hard_chunk_blocks;
   }
 
+  LOG(INFO) << "Merging " << aops->size() << " operations.";
   TEST_AND_RETURN_FALSE(MergeOperations(
       aops, config.version, merge_chunk_blocks, new_part.path, blob_file));
+  LOG(INFO) << aops->size() << " operations after merge.";
 
   if (config.version.minor >= kOpSrcHashMinorPayloadVersion)
     TEST_AND_RETURN_FALSE(AddSourceHash(aops, old_part.path));
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index 877e13f..75d1016 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -29,6 +29,8 @@
 #include <unistd.h>
 
 #include <algorithm>
+#include <functional>
+#include <list>
 #include <map>
 #include <memory>
 #include <utility>
@@ -38,14 +40,17 @@
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
 #include <base/threading/simple_thread.h>
+#include <base/time/time.h>
 #include <brillo/data_encoding.h>
 #include <bsdiff/bsdiff.h>
 #include <bsdiff/patch_writer_factory.h>
+#include <puffin/utils.h>
 
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_generator/ab_generator.h"
 #include "update_engine/payload_generator/block_mapping.h"
 #include "update_engine/payload_generator/bzip.h"
 #include "update_engine/payload_generator/deflate_utils.h"
@@ -55,6 +60,7 @@
 #include "update_engine/payload_generator/squashfs_filesystem.h"
 #include "update_engine/payload_generator/xz.h"
 
+using std::list;
 using std::map;
 using std::string;
 using std::vector;
@@ -195,13 +201,16 @@
         version_(version),
         old_extents_(old_extents),
         new_extents_(new_extents),
+        new_extents_blocks_(utils::BlocksInExtents(new_extents)),
         old_deflates_(old_deflates),
         new_deflates_(new_deflates),
         name_(name),
         chunk_blocks_(chunk_blocks),
         blob_file_(blob_file) {}
 
-  FileDeltaProcessor(FileDeltaProcessor&& processor) = default;
+  bool operator>(const FileDeltaProcessor& other) const {
+    return new_extents_blocks_ > other.new_extents_blocks_;
+  }
 
   ~FileDeltaProcessor() override = default;
 
@@ -211,34 +220,35 @@
   void Run() override;
 
   // Merge each file processor's ops list to aops.
-  void MergeOperation(vector<AnnotatedOperation>* aops);
+  bool MergeOperation(vector<AnnotatedOperation>* aops);
 
  private:
-  const string& old_part_;
-  const string& new_part_;
+  const string& old_part_;  // NOLINT(runtime/member_string_references)
+  const string& new_part_;  // NOLINT(runtime/member_string_references)
   const PayloadVersion& version_;
 
   // The block ranges of the old/new file within the src/tgt image
   const vector<Extent> old_extents_;
   const vector<Extent> new_extents_;
+  const size_t new_extents_blocks_;
   const vector<puffin::BitExtent> old_deflates_;
   const vector<puffin::BitExtent> new_deflates_;
   const string name_;
   // Block limit of one aop.
-  ssize_t chunk_blocks_;
+  const ssize_t chunk_blocks_;
   BlobFileWriter* blob_file_;
 
   // The list of ops to reach the new file from the old file.
   vector<AnnotatedOperation> file_aops_;
 
+  bool failed_ = false;
+
   DISALLOW_COPY_AND_ASSIGN(FileDeltaProcessor);
 };
 
 void FileDeltaProcessor::Run() {
   TEST_AND_RETURN(blob_file_ != nullptr);
-
-  LOG(INFO) << "Encoding file " << name_ << " ("
-            << utils::BlocksInExtents(new_extents_) << " blocks)";
+  base::Time start = base::Time::Now();
 
   if (!DeltaReadFile(&file_aops_,
                      old_part_,
@@ -252,13 +262,31 @@
                      version_,
                      blob_file_)) {
     LOG(ERROR) << "Failed to generate delta for " << name_ << " ("
-               << utils::BlocksInExtents(new_extents_) << " blocks)";
+               << new_extents_blocks_ << " blocks)";
+    failed_ = true;
+    return;
   }
+
+  if (!version_.InplaceUpdate()) {
+    if (!ABGenerator::FragmentOperations(
+            version_, &file_aops_, new_part_, blob_file_)) {
+      LOG(ERROR) << "Failed to fragment operations for " << name_;
+      failed_ = true;
+      return;
+    }
+  }
+
+  LOG(INFO) << "Encoded file " << name_ << " (" << new_extents_blocks_
+            << " blocks) in " << (base::Time::Now() - start).InSecondsF()
+            << " seconds.";
 }
 
-void FileDeltaProcessor::MergeOperation(vector<AnnotatedOperation>* aops) {
+bool FileDeltaProcessor::MergeOperation(vector<AnnotatedOperation>* aops) {
+  if (failed_)
+    return false;
   aops->reserve(aops->size() + file_aops_.size());
   std::move(file_aops_.begin(), file_aops_.end(), std::back_inserter(*aops));
+  return true;
 }
 
 bool DeltaReadPartition(vector<AnnotatedOperation>* aops,
@@ -298,7 +326,7 @@
   TEST_AND_RETURN_FALSE(deflate_utils::PreprocessParitionFiles(
       new_part, &new_files, puffdiff_allowed));
 
-  vector<FileDeltaProcessor> file_delta_processors;
+  list<FileDeltaProcessor> file_delta_processors;
 
   // The processing is very straightforward here, we generate operations for
   // every file (and pseudo-file such as the metadata) in the new filesystem
@@ -343,8 +371,45 @@
                                        hard_chunk_blocks,
                                        blob_file);
   }
+  // Process all the blocks not included in any file. We provided all the unused
+  // blocks in the old partition as available data.
+  vector<Extent> new_unvisited = {
+      ExtentForRange(0, new_part.size / kBlockSize)};
+  new_unvisited = FilterExtentRanges(new_unvisited, new_visited_blocks);
+  if (!new_unvisited.empty()) {
+    vector<Extent> old_unvisited;
+    if (old_part.fs_interface) {
+      old_unvisited.push_back(ExtentForRange(0, old_part.size / kBlockSize));
+      old_unvisited = FilterExtentRanges(old_unvisited, old_visited_blocks);
+    }
+
+    LOG(INFO) << "Scanning " << utils::BlocksInExtents(new_unvisited)
+              << " unwritten blocks using chunk size of " << soft_chunk_blocks
+              << " blocks.";
+    // We use the soft_chunk_blocks limit for the <non-file-data> as we don't
+    // really know the structure of this data and we should not expect it to
+    // have redundancy between partitions.
+    file_delta_processors.emplace_back(
+        old_part.path,
+        new_part.path,
+        version,
+        std::move(old_unvisited),
+        std::move(new_unvisited),
+        vector<puffin::BitExtent>{},  // old_deflates,
+        vector<puffin::BitExtent>{},  // new_deflates
+        "<non-file-data>",            // operation name
+        soft_chunk_blocks,
+        blob_file);
+  }
 
   size_t max_threads = GetMaxThreads();
+
+  // Sort the files in descending order based on number of new blocks to make
+  // sure we start the largest ones first.
+  if (file_delta_processors.size() > max_threads) {
+    file_delta_processors.sort(std::greater<FileDeltaProcessor>());
+  }
+
   base::DelegateSimpleThreadPool thread_pool("incremental-update-generator",
                                              max_threads);
   thread_pool.Start();
@@ -354,41 +419,9 @@
   thread_pool.JoinAll();
 
   for (auto& processor : file_delta_processors) {
-    processor.MergeOperation(aops);
+    TEST_AND_RETURN_FALSE(processor.MergeOperation(aops));
   }
 
-  // Process all the blocks not included in any file. We provided all the unused
-  // blocks in the old partition as available data.
-  vector<Extent> new_unvisited = {
-      ExtentForRange(0, new_part.size / kBlockSize)};
-  new_unvisited = FilterExtentRanges(new_unvisited, new_visited_blocks);
-  if (new_unvisited.empty())
-    return true;
-
-  vector<Extent> old_unvisited;
-  if (old_part.fs_interface) {
-    old_unvisited.push_back(ExtentForRange(0, old_part.size / kBlockSize));
-    old_unvisited = FilterExtentRanges(old_unvisited, old_visited_blocks);
-  }
-
-  LOG(INFO) << "Scanning " << utils::BlocksInExtents(new_unvisited)
-            << " unwritten blocks using chunk size of " << soft_chunk_blocks
-            << " blocks.";
-  // We use the soft_chunk_blocks limit for the <non-file-data> as we don't
-  // really know the structure of this data and we should not expect it to have
-  // redundancy between partitions.
-  TEST_AND_RETURN_FALSE(DeltaReadFile(aops,
-                                      old_part.path,
-                                      new_part.path,
-                                      old_unvisited,
-                                      new_unvisited,
-                                      {},                 // old_deflates,
-                                      {},                 // new_deflates
-                                      "<non-file-data>",  // operation name
-                                      soft_chunk_blocks,
-                                      version,
-                                      blob_file));
-
   return true;
 }
 
@@ -478,30 +511,44 @@
       old_blocks_map_it->second.pop_back();
   }
 
+  if (chunk_blocks == -1)
+    chunk_blocks = new_num_blocks;
+
   // Produce operations for the zero blocks split per output extent.
-  // TODO(deymo): Produce ZERO operations instead of calling DeltaReadFile().
   size_t num_ops = aops->size();
   new_visited_blocks->AddExtents(new_zeros);
   for (const Extent& extent : new_zeros) {
-    TEST_AND_RETURN_FALSE(DeltaReadFile(aops,
-                                        "",
-                                        new_part,
-                                        vector<Extent>(),        // old_extents
-                                        vector<Extent>{extent},  // new_extents
-                                        {},                      // old_deflates
-                                        {},                      // new_deflates
-                                        "<zeros>",
-                                        chunk_blocks,
-                                        version,
-                                        blob_file));
+    if (version.OperationAllowed(InstallOperation::ZERO)) {
+      for (uint64_t offset = 0; offset < extent.num_blocks();
+           offset += chunk_blocks) {
+        uint64_t num_blocks =
+            std::min(static_cast<uint64_t>(extent.num_blocks()) - offset,
+                     static_cast<uint64_t>(chunk_blocks));
+        InstallOperation operation;
+        operation.set_type(InstallOperation::ZERO);
+        *(operation.add_dst_extents()) =
+            ExtentForRange(extent.start_block() + offset, num_blocks);
+        aops->push_back({.name = "<zeros>", .op = operation});
+      }
+    } else {
+      TEST_AND_RETURN_FALSE(DeltaReadFile(aops,
+                                          "",
+                                          new_part,
+                                          {},        // old_extents
+                                          {extent},  // new_extents
+                                          {},        // old_deflates
+                                          {},        // new_deflates
+                                          "<zeros>",
+                                          chunk_blocks,
+                                          version,
+                                          blob_file));
+    }
   }
   LOG(INFO) << "Produced " << (aops->size() - num_ops) << " operations for "
             << utils::BlocksInExtents(new_zeros) << " zeroed blocks";
 
   // Produce MOVE/SOURCE_COPY operations for the moved blocks.
   num_ops = aops->size();
-  if (chunk_blocks == -1)
-    chunk_blocks = new_num_blocks;
   uint64_t used_blocks = 0;
   old_visited_blocks->AddExtents(old_identical_blocks);
   new_visited_blocks->AddExtents(new_identical_blocks);
@@ -785,25 +832,8 @@
         TEST_AND_RETURN_FALSE(deflate_utils::FindAndCompactDeflates(
             dst_extents, new_deflates, &dst_deflates));
 
-        // Remove equal deflates. TODO(*): We can do a N*N check using
-        // hashing. It will not reduce the payload size, but it will speeds up
-        // the puffing on the client device.
-        auto src = src_deflates.begin();
-        auto dst = dst_deflates.begin();
-        for (; src != src_deflates.end() && dst != dst_deflates.end();) {
-          auto src_in_bytes = deflate_utils::ExpandToByteExtent(*src);
-          auto dst_in_bytes = deflate_utils::ExpandToByteExtent(*dst);
-          if (src_in_bytes.length == dst_in_bytes.length &&
-              !memcmp(old_data.data() + src_in_bytes.offset,
-                      new_data.data() + dst_in_bytes.offset,
-                      src_in_bytes.length)) {
-            src = src_deflates.erase(src);
-            dst = dst_deflates.erase(dst);
-          } else {
-            src++;
-            dst++;
-          }
-        }
+        puffin::RemoveEqualBitExtents(
+            old_data, new_data, &src_deflates, &dst_deflates);
 
         // Only Puffdiff if both files have at least one deflate left.
         if (!src_deflates.empty() && !dst_deflates.empty()) {
diff --git a/payload_generator/extent_ranges_unittest.cc b/payload_generator/extent_ranges_unittest.cc
index 3705bac..d9dd467 100644
--- a/payload_generator/extent_ranges_unittest.cc
+++ b/payload_generator/extent_ranges_unittest.cc
@@ -304,7 +304,7 @@
 }
 
 TEST(ExtentRangesTest, FilterExtentRangesMultipleRanges) {
-  // Two overlaping extents, with three ranges to remove.
+  // Two overlapping extents, with three ranges to remove.
   vector<Extent> extents {
       ExtentForRange(10, 100),
       ExtentForRange(30, 100) };
diff --git a/payload_generator/filesystem_interface.h b/payload_generator/filesystem_interface.h
index b1506e4..08dfd19 100644
--- a/payload_generator/filesystem_interface.h
+++ b/payload_generator/filesystem_interface.h
@@ -19,9 +19,9 @@
 
 // This class is used to abstract a filesystem and iterate the blocks
 // associated with the files and filesystem structures.
-// For the purposes of the update payload generation, a filesystem is a formated
-// partition composed by fixed-size blocks, since that's the interface used in
-// the update payload.
+// For the purposes of the update payload generation, a filesystem is a
+// formatted partition composed by fixed-size blocks, since that's the interface
+// used in the update payload.
 
 #include <sys/stat.h>
 #include <sys/types.h>
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index a20a567..f6409a2 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -226,7 +226,7 @@
                            nullptr,
                            &install_plan,
                            &payload,
-                           true);  // is_interactive
+                           true);  // interactive
 
   brillo::Blob buf(1024 * 1024);
   int fd = open(payload_file.c_str(), O_RDONLY, 0);
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index ca6fb04..d64bf35 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -154,7 +154,7 @@
   // if is_full is false, so we are requested a delta payload.
   ImageConfig source;
 
-  // Wheter the requested payload is a delta payload.
+  // Whether the requested payload is a delta payload.
   bool is_delta = false;
 
   // The major/minor version of the payload.
diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc
index 824195d..0b47dd4 100644
--- a/payload_generator/payload_signer.cc
+++ b/payload_generator/payload_signer.cc
@@ -18,6 +18,8 @@
 
 #include <endian.h>
 
+#include <utility>
+
 #include <base/logging.h>
 #include <base/strings/string_number_conversions.h>
 #include <base/strings/string_split.h>
@@ -248,7 +250,7 @@
   TEST_AND_RETURN_FALSE(payload_file);
   brillo::Blob payload_metadata;
 
-  payload_metadata.resize(DeltaPerformer::kMaxPayloadHeaderSize);
+  payload_metadata.resize(kMaxPayloadHeaderSize);
   TEST_AND_RETURN_FALSE(payload_file->ReadAllBlocking(
       payload_metadata.data(), payload_metadata.size(), nullptr));
 
diff --git a/payload_generator/xz_android.cc b/payload_generator/xz_android.cc
index f3b836d..b2b74b1 100644
--- a/payload_generator/xz_android.cc
+++ b/payload_generator/xz_android.cc
@@ -34,9 +34,8 @@
     Read = &BlobReaderStream::ReadStatic;
   }
 
-  static SRes ReadStatic(void* p, void* buf, size_t* size) {
-    auto* self =
-        static_cast<BlobReaderStream*>(reinterpret_cast<ISeqInStream*>(p));
+  static SRes ReadStatic(const ISeqInStream* p, void* buf, size_t* size) {
+    auto* self = static_cast<BlobReaderStream*>(const_cast<ISeqInStream*>(p));
     *size = std::min(*size, self->data_.size() - self->pos_);
     memcpy(buf, self->data_.data() + self->pos_, *size);
     self->pos_ += *size;
@@ -55,9 +54,10 @@
     Write = &BlobWriterStream::WriteStatic;
   }
 
-  static size_t WriteStatic(void* p, const void* buf, size_t size) {
-    auto* self =
-        static_cast<BlobWriterStream*>(reinterpret_cast<ISeqOutStream*>(p));
+  static size_t WriteStatic(const ISeqOutStream* p,
+                            const void* buf,
+                            size_t size) {
+    auto* self = static_cast<const BlobWriterStream*>(p);
     const uint8_t* buffer = reinterpret_cast<const uint8_t*>(buf);
     self->data_->reserve(self->data_->size() + size);
     self->data_->insert(self->data_->end(), buffer, buffer + size);
@@ -97,7 +97,6 @@
 
   // LZMA2 compression properties.
   CLzma2EncProps lzma2Props;
-  props.lzma2Props = &lzma2Props;
   Lzma2EncProps_Init(&lzma2Props);
   // LZMA compression "level 6" requires 9 MB of RAM to decompress in the worst
   // case.
@@ -106,6 +105,7 @@
   // The input size data is used to reduce the dictionary size if possible.
   lzma2Props.lzmaProps.reduceSize = in.size();
   Lzma2EncProps_Normalize(&lzma2Props);
+  props.lzma2Props = lzma2Props;
 
   BlobWriterStream out_writer(out);
   BlobReaderStream in_reader(in);
diff --git a/payload_state.cc b/payload_state.cc
index c07fe7a..d891da0 100644
--- a/payload_state.cc
+++ b/payload_state.cc
@@ -736,6 +736,7 @@
   SetNumReboots(0);
 
   TimeDelta duration = GetUpdateDuration();
+  TimeDelta duration_uptime = GetUpdateDurationUptime();
 
   prefs_->Delete(kPrefsUpdateTimestampStart);
   prefs_->Delete(kPrefsUpdateDurationUptime);
@@ -756,6 +757,7 @@
       total_bytes_by_source,
       download_overhead_percentage,
       duration,
+      duration_uptime,
       reboot_count,
       url_switch_count);
 }
diff --git a/payload_state.h b/payload_state.h
index 4ea7fdd..e8b0db0 100644
--- a/payload_state.h
+++ b/payload_state.h
@@ -343,7 +343,7 @@
 
   // Loads the number of bytes that have been currently downloaded through the
   // previous attempts from the persisted state for the given source. It's
-  // reset to 0 everytime we begin a full update and is continued from previous
+  // reset to 0 every time we begin a full update and is continued from previous
   // attempt if we're resuming the update.
   void LoadCurrentBytesDownloaded(DownloadSource source);
 
@@ -355,7 +355,7 @@
 
   // Loads the total number of bytes that have been downloaded (since the last
   // successful update) from the persisted state for the given source. It's
-  // reset to 0 everytime we successfully apply an update and counts the bytes
+  // reset to 0 every time we successfully apply an update and counts the bytes
   // downloaded for both successful and failed attempts since then.
   void LoadTotalBytesDownloaded(DownloadSource source);
 
@@ -496,7 +496,7 @@
   int32_t url_switch_count_;
 
   // The current download source based on the current URL. This value is
-  // not persisted as it can be recomputed everytime we update the URL.
+  // not persisted as it can be recomputed every time we update the URL.
   // We're storing this so as not to recompute this on every few bytes of
   // data we read from the socket.
   DownloadSource current_download_source_;
diff --git a/payload_state_unittest.cc b/payload_state_unittest.cc
index bcb196a..637891b 100644
--- a/payload_state_unittest.cc
+++ b/payload_state_unittest.cc
@@ -639,7 +639,7 @@
   PayloadState payload_state;
   FakeSystemState fake_system_state;
   OmahaRequestParams params(&fake_system_state);
-  params.Init("", "", true);  // is_interactive = True.
+  params.Init("", "", true);  // interactive = True.
   fake_system_state.set_request_params(&params);
 
   EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
@@ -662,7 +662,7 @@
   PayloadState payload_state;
   FakeSystemState fake_system_state;
   OmahaRequestParams params(&fake_system_state);
-  params.Init("", "", false);  // is_interactive = False.
+  params.Init("", "", false);  // interactive = False.
   fake_system_state.set_request_params(&params);
 
   EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
@@ -871,7 +871,7 @@
 
   EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
               ReportSuccessfulUpdateMetrics(
-                  1, _, kPayloadTypeFull, _, _, 314, _, _, 3));
+                  1, _, kPayloadTypeFull, _, _, 314, _, _, _, 3));
 
   payload_state.UpdateSucceeded();
 
@@ -920,6 +920,7 @@
                   _,
                   _,
                   _,
+                  _,
                   _))
       .Times(1);
 
@@ -1336,9 +1337,9 @@
 
   // Simulate a successful download and update.
   payload_state.DownloadComplete();
-  EXPECT_CALL(
-      *fake_system_state.mock_metrics_reporter(),
-      ReportSuccessfulUpdateMetrics(_, _, kPayloadTypeDelta, _, _, _, _, _, _));
+  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+              ReportSuccessfulUpdateMetrics(
+                  _, _, kPayloadTypeDelta, _, _, _, _, _, _, _));
   payload_state.UpdateSucceeded();
 
   // Mock the request to a request where the delta was disabled but Omaha sends
@@ -1352,9 +1353,9 @@
 
   payload_state.DownloadComplete();
 
-  EXPECT_CALL(
-      *fake_system_state.mock_metrics_reporter(),
-      ReportSuccessfulUpdateMetrics(_, _, kPayloadTypeDelta, _, _, _, _, _, _));
+  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+              ReportSuccessfulUpdateMetrics(
+                  _, _, kPayloadTypeDelta, _, _, _, _, _, _, _));
   payload_state.UpdateSucceeded();
 }
 
@@ -1377,7 +1378,7 @@
 
   EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
               ReportSuccessfulUpdateMetrics(
-                  _, _, kPayloadTypeForcedFull, _, _, _, _, _, _));
+                  _, _, kPayloadTypeForcedFull, _, _, _, _, _, _, _));
   payload_state.UpdateSucceeded();
 }
 
@@ -1399,9 +1400,9 @@
   // Simulate a successful download and update.
   payload_state.DownloadComplete();
 
-  EXPECT_CALL(
-      *fake_system_state.mock_metrics_reporter(),
-      ReportSuccessfulUpdateMetrics(_, _, kPayloadTypeFull, _, _, _, _, _, _));
+  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+              ReportSuccessfulUpdateMetrics(
+                  _, _, kPayloadTypeFull, _, _, _, _, _, _, _));
   payload_state.UpdateSucceeded();
 }
 
diff --git a/power_manager_chromeos.h b/power_manager_chromeos.h
index ad49889..eeb14d8 100644
--- a/power_manager_chromeos.h
+++ b/power_manager_chromeos.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_POWER_MANAGER_H_
-#define UPDATE_ENGINE_POWER_MANAGER_H_
+#ifndef UPDATE_ENGINE_POWER_MANAGER_CHROMEOS_H_
+#define UPDATE_ENGINE_POWER_MANAGER_CHROMEOS_H_
 
 #include <base/macros.h>
 #include <power_manager/dbus-proxies.h>
@@ -41,4 +41,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_POWER_MANAGER_H_
+#endif  // UPDATE_ENGINE_POWER_MANAGER_CHROMEOS_H_
diff --git a/real_system_state.cc b/real_system_state.cc
index b39b7b3..8f4c731 100644
--- a/real_system_state.cc
+++ b/real_system_state.cc
@@ -62,7 +62,7 @@
 
   hardware_ = hardware::CreateHardware();
   if (!hardware_) {
-    LOG(ERROR) << "Error intializing the HardwareInterface.";
+    LOG(ERROR) << "Error initializing the HardwareInterface.";
     return false;
   }
 
@@ -76,13 +76,13 @@
 
   connection_manager_ = connection_manager::CreateConnectionManager(this);
   if (!connection_manager_) {
-    LOG(ERROR) << "Error intializing the ConnectionManagerInterface.";
+    LOG(ERROR) << "Error initializing the ConnectionManagerInterface.";
     return false;
   }
 
   power_manager_ = power_manager::CreatePowerManager();
   if (!power_manager_) {
-    LOG(ERROR) << "Error intializing the PowerManagerInterface.";
+    LOG(ERROR) << "Error initializing the PowerManagerInterface.";
     return false;
   }
 
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index 61ecadf..0ae4e88 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -450,7 +450,18 @@
   for part in "${partitions[@]}"; do
     part_file=$(create_tempfile "${part}.img.XXXXXX")
     CLEANUP_FILES+=("${part_file}")
-    unzip -p "${image}" "IMAGES/${part}.img" >"${part_file}"
+
+    # For each partition, we in turn look for its image file under IMAGES/ and
+    # RADIO/ in the given target_files zip file.
+    local path path_in_zip
+    for path in IMAGES RADIO; do
+      if unzip -l "${image}" "${path}/${part}.img" >/dev/null; then
+        path_in_zip="${path}"
+        break
+      fi
+    done
+    [[ -n "${path_in_zip}" ]] || die "Failed to find ${part}.img"
+    unzip -p "${image}" "${path_in_zip}/${part}.img" >"${part_file}"
 
     # If the partition is stored as an Android sparse image file, we need to
     # convert them to a raw image for the update.
@@ -469,7 +480,7 @@
     # Extract the .map file (if one is available).
     part_map_file=$(create_tempfile "${part}.map.XXXXXX")
     CLEANUP_FILES+=("${part_map_file}")
-    unzip -p "${image}" "IMAGES/${part}.map" >"${part_map_file}" || \
+    unzip -p "${image}" "${path_in_zip}/${part}.map" >"${part_map_file}" || \
       part_map_file=""
 
     # delta_generator only supports images multiple of 4 KiB. For target images
diff --git a/scripts/payload_info.py b/scripts/payload_info.py
new file mode 100755
index 0000000..09a7cf7
--- /dev/null
+++ b/scripts/payload_info.py
@@ -0,0 +1,249 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""payload_info: Show information about an update payload."""
+
+from __future__ import print_function
+
+import argparse
+import itertools
+import sys
+import textwrap
+
+import update_payload
+
+MAJOR_PAYLOAD_VERSION_CHROMEOS = 1
+MAJOR_PAYLOAD_VERSION_BRILLO = 2
+
+def DisplayValue(key, value):
+  """Print out a key, value pair with values left-aligned."""
+  if value != None:
+    print('%-*s %s' % (28, key + ':', value))
+  else:
+    raise ValueError('Cannot display an empty value.')
+
+
+def DisplayHexData(data, indent=0):
+  """Print out binary data as a hex values."""
+  for off in range(0, len(data), 16):
+    chunk = data[off:off + 16]
+    print(' ' * indent +
+          ' '.join('%.2x' % ord(c) for c in chunk) +
+          '   ' * (16 - len(chunk)) +
+          ' | ' +
+          ''.join(c if 32 <= ord(c) < 127 else '.' for c in chunk))
+
+
+class PayloadCommand(object):
+  """Show basic information about an update payload.
+
+  This command parses an update payload and displays information from
+  its header and manifest.
+  """
+
+  def __init__(self, options):
+    self.options = options
+    self.payload = None
+
+  def _DisplayHeader(self):
+    """Show information from the payload header."""
+    header = self.payload.header
+    DisplayValue('Payload version', header.version)
+    DisplayValue('Manifest length', header.manifest_len)
+
+  def _DisplayManifest(self):
+    """Show information from the payload manifest."""
+    manifest = self.payload.manifest
+    if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
+      DisplayValue('Number of partitions', len(manifest.partitions))
+      for partition in manifest.partitions:
+        DisplayValue('  Number of "%s" ops' % partition.partition_name,
+                     len(partition.operations))
+    else:
+      DisplayValue('Number of operations', len(manifest.install_operations))
+      DisplayValue('Number of kernel ops',
+                   len(manifest.kernel_install_operations))
+    DisplayValue('Block size', manifest.block_size)
+    DisplayValue('Minor version', manifest.minor_version)
+
+  def _DisplaySignatures(self):
+    """Show information about the signatures from the manifest."""
+    header = self.payload.header
+    if header.metadata_signature_len:
+      offset = header.size + header.manifest_len
+      DisplayValue('Metadata signatures blob',
+                   'file_offset=%d (%d bytes)' %
+                   (offset, header.metadata_signature_len))
+      # pylint: disable=invalid-unary-operand-type
+      signatures_blob = self.payload.ReadDataBlob(
+          -header.metadata_signature_len,
+          header.metadata_signature_len)
+      self._DisplaySignaturesBlob('Metadata', signatures_blob)
+    else:
+      print('No metadata signatures stored in the payload')
+
+    manifest = self.payload.manifest
+    if manifest.HasField('signatures_offset'):
+      signature_msg = 'blob_offset=%d' % manifest.signatures_offset
+      if manifest.signatures_size:
+        signature_msg += ' (%d bytes)' % manifest.signatures_size
+      DisplayValue('Payload signatures blob', signature_msg)
+      signatures_blob = self.payload.ReadDataBlob(manifest.signatures_offset,
+                                                  manifest.signatures_size)
+      self._DisplaySignaturesBlob('Payload', signatures_blob)
+    else:
+      print('No payload signatures stored in the payload')
+
+  @staticmethod
+  def _DisplaySignaturesBlob(signature_name, signatures_blob):
+    """Show information about the signatures blob."""
+    signatures = update_payload.update_metadata_pb2.Signatures()
+    signatures.ParseFromString(signatures_blob)
+    print('%s signatures: (%d entries)' %
+          (signature_name, len(signatures.signatures)))
+    for signature in signatures.signatures:
+      print('  version=%s, hex_data: (%d bytes)' %
+            (signature.version if signature.HasField('version') else None,
+             len(signature.data)))
+      DisplayHexData(signature.data, indent=4)
+
+
+  def _DisplayOps(self, name, operations):
+    """Show information about the install operations from the manifest.
+
+    The list shown includes operation type, data offset, data length, source
+    extents, source length, destination extents, and destinations length.
+
+    Args:
+      name: The name you want displayed above the operation table.
+      operations: The install_operations object that you want to display
+                  information about.
+    """
+    def _DisplayExtents(extents, name):
+      """Show information about extents."""
+      num_blocks = sum([ext.num_blocks for ext in extents])
+      ext_str = ' '.join(
+          '(%s,%s)' % (ext.start_block, ext.num_blocks) for ext in extents)
+      # Make extent list wrap around at 80 chars.
+      ext_str = '\n      '.join(textwrap.wrap(ext_str, 74))
+      extent_plural = 's' if len(extents) > 1 else ''
+      block_plural = 's' if num_blocks > 1 else ''
+      print('    %s: %d extent%s (%d block%s)' %
+            (name, len(extents), extent_plural, num_blocks, block_plural))
+      print('      %s' % ext_str)
+
+    op_dict = update_payload.common.OpType.NAMES
+    print('%s:' % name)
+    for op, op_count in itertools.izip(operations, itertools.count()):
+      print('  %d: %s' % (op_count, op_dict[op.type]))
+      if op.HasField('data_offset'):
+        print('    Data offset: %s' % op.data_offset)
+      if op.HasField('data_length'):
+        print('    Data length: %s' % op.data_length)
+      if op.src_extents:
+        _DisplayExtents(op.src_extents, 'Source')
+      if op.dst_extents:
+        _DisplayExtents(op.dst_extents, 'Destination')
+
+  def _GetStats(self, manifest):
+    """Returns various statistics about a payload file.
+
+    Returns a dictionary containing the number of blocks read during payload
+    application, the number of blocks written, and the number of seeks done
+    when writing during operation application.
+    """
+    read_blocks = 0
+    written_blocks = 0
+    num_write_seeks = 0
+    if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
+      partitions_operations = [part.operations for part in manifest.partitions]
+    else:
+      partitions_operations = [manifest.install_operations,
+                               manifest.kernel_install_operations]
+    for operations in partitions_operations:
+      last_ext = None
+      for curr_op in operations:
+        read_blocks += sum([ext.num_blocks for ext in curr_op.src_extents])
+        written_blocks += sum([ext.num_blocks for ext in curr_op.dst_extents])
+        for curr_ext in curr_op.dst_extents:
+          # See if the extent is contiguous with the last extent seen.
+          if last_ext and (curr_ext.start_block !=
+                           last_ext.start_block + last_ext.num_blocks):
+            num_write_seeks += 1
+          last_ext = curr_ext
+
+    if manifest.minor_version == 1:
+      # Rootfs and kernel are written during the filesystem copy in version 1.
+      written_blocks += manifest.old_rootfs_info.size / manifest.block_size
+      written_blocks += manifest.old_kernel_info.size / manifest.block_size
+    # Old and new rootfs and kernel are read once during verification
+    read_blocks += manifest.old_rootfs_info.size / manifest.block_size
+    read_blocks += manifest.old_kernel_info.size / manifest.block_size
+    read_blocks += manifest.new_rootfs_info.size / manifest.block_size
+    read_blocks += manifest.new_kernel_info.size / manifest.block_size
+    stats = {'read_blocks': read_blocks,
+             'written_blocks': written_blocks,
+             'num_write_seeks': num_write_seeks}
+    return stats
+
+  def _DisplayStats(self, manifest):
+    stats = self._GetStats(manifest)
+    DisplayValue('Blocks read', stats['read_blocks'])
+    DisplayValue('Blocks written', stats['written_blocks'])
+    DisplayValue('Seeks when writing', stats['num_write_seeks'])
+
+  def Run(self):
+    """Parse the update payload and display information from it."""
+    self.payload = update_payload.Payload(self.options.payload_file)
+    self.payload.Init()
+    self._DisplayHeader()
+    self._DisplayManifest()
+    if self.options.signatures:
+      self._DisplaySignatures()
+    if self.options.stats:
+      self._DisplayStats(self.payload.manifest)
+    if self.options.list_ops:
+      print()
+      if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
+        for partition in self.payload.manifest.partitions:
+          self._DisplayOps('%s install operations' % partition.partition_name,
+                           partition.operations)
+      else:
+        self._DisplayOps('Install operations',
+                         self.payload.manifest.install_operations)
+        self._DisplayOps('Kernel install operations',
+                         self.payload.manifest.kernel_install_operations)
+
+
+def main():
+  parser = argparse.ArgumentParser(
+      description='Show information about an update payload.')
+  parser.add_argument('payload_file', type=file,
+                      help='The update payload file.')
+  parser.add_argument('--list_ops', default=False, action='store_true',
+                      help='List the install operations and their extents.')
+  parser.add_argument('--stats', default=False, action='store_true',
+                      help='Show information about overall input/output.')
+  parser.add_argument('--signatures', default=False, action='store_true',
+                      help='Show signatures stored in the payload.')
+  args = parser.parse_args()
+
+  PayloadCommand(args).Run()
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py
index 48ed0f4..68c70d4 100644
--- a/scripts/update_payload/checker.py
+++ b/scripts/update_payload/checker.py
@@ -336,7 +336,7 @@
     self.new_kernel_fs_size = 0
     self.minor_version = None
     # TODO(*): When fixing crbug.com/794404, the major version should be
-    # correclty handled in update_payload scripts. So stop forcing
+    # correctly handled in update_payload scripts. So stop forcing
     # major_verions=1 here and set it to the correct value.
     self.major_version = 1
 
@@ -742,7 +742,7 @@
                                            self.block_size,
                                            op_name + '.data_length', 'dst')
     else:
-      # Check: data_length must be smaller than the alotted dst blocks.
+      # Check: data_length must be smaller than the allotted dst blocks.
       if data_length >= total_dst_blocks * self.block_size:
         raise error.PayloadError(
             '%s: data_length (%d) must be less than allotted dst block '
@@ -867,7 +867,7 @@
     if data_length is None:
       raise error.PayloadError('%s: missing data_{offset,length}.' % op_name)
 
-    # Check: data_length is strictly smaller than the alotted dst blocks.
+    # Check: data_length is strictly smaller than the allotted dst blocks.
     if data_length >= total_dst_blocks * self.block_size:
       raise error.PayloadError(
           '%s: data_length (%d) must be smaller than allotted dst space '
diff --git a/service_delegate_android_interface.h b/service_delegate_android_interface.h
index 7dae40f..5267bb0 100644
--- a/service_delegate_android_interface.h
+++ b/service_delegate_android_interface.h
@@ -70,6 +70,12 @@
   // of error, returns false and sets |error| accordingly.
   virtual bool ResetStatus(brillo::ErrorPtr* error) = 0;
 
+  // Verifies whether a payload (delegated by the payload metadata) can be
+  // applied to the current device. Returns whether the payload is applicable.
+  // In case of error, returns false and sets |error| accordingly.
+  virtual bool VerifyPayloadApplicable(const std::string& metadata_filename,
+                                       brillo::ErrorPtr* error) = 0;
+
  protected:
   ServiceDelegateAndroidInterface() = default;
 };
diff --git a/update_attempter.cc b/update_attempter.cc
index 67391a3..67b7471 100644
--- a/update_attempter.cc
+++ b/update_attempter.cc
@@ -880,11 +880,10 @@
       return;
     }
 
-    LOG(INFO) << "Running "
-              << (params.is_interactive ? "interactive" : "periodic")
+    LOG(INFO) << "Running " << (params.interactive ? "interactive" : "periodic")
               << " update.";
 
-    if (!params.is_interactive) {
+    if (!params.interactive) {
       // Cache the update attempt flags that will be used by this update attempt
       // so that they can't be changed mid-way through.
       current_update_attempt_flags_ = update_attempt_flags_;
@@ -899,7 +898,7 @@
            params.target_version_prefix,
            params.rollback_allowed,
            /*obey_proxies=*/false,
-           params.is_interactive);
+           params.interactive);
     // Always clear the forced app_version and omaha_url after an update attempt
     // so the next update uses the defaults.
     forced_app_version_.clear();
@@ -1501,7 +1500,7 @@
 
     // Write out the new value of update_check_count_value.
     if (prefs_->SetInt64(kPrefsUpdateCheckCount, update_check_count_value)) {
-      // We successfully wrote out te new value, so enable the
+      // We successfully wrote out the new value, so enable the
       // update check based wait.
       LOG(INFO) << "New update check count = " << update_check_count_value;
       return true;
diff --git a/update_attempter.h b/update_attempter.h
index 9504f88..44255b0 100644
--- a/update_attempter.h
+++ b/update_attempter.h
@@ -161,7 +161,7 @@
   BootControlInterface::Slot GetRollbackSlot() const;
 
   // Initiates a reboot if the current state is
-  // UPDATED_NEED_REBOOT. Returns true on sucess, false otherwise.
+  // UPDATED_NEED_REBOOT. Returns true on success, false otherwise.
   bool RebootIfNeeded();
 
   // DownloadActionDelegate methods:
@@ -199,7 +199,7 @@
   virtual bool GetBootTimeAtUpdate(base::Time *out_boot_time);
 
   // Returns a version OS version that was being used before the last reboot,
-  // and if that reboot happended to be into an update (current version).
+  // and if that reboot happened to be into an update (current version).
   // This will return an empty string otherwise.
   std::string const& GetPrevVersion() const { return prev_version_; }
 
@@ -342,7 +342,7 @@
 
   // Helper method of Update() to calculate the update-related parameters
   // from various sources and set the appropriate state. Please refer to
-  // Update() method for the meaning of the parametes.
+  // Update() method for the meaning of the parameters.
   bool CalculateUpdateParams(const std::string& app_version,
                              const std::string& omaha_url,
                              const std::string& target_channel,
@@ -355,7 +355,7 @@
   // which type of scattering is enabled, etc.) and also updates/deletes
   // the corresponding prefs file used in scattering. Should be called
   // only after the device policy has been loaded and set in the system_state_.
-  void CalculateScatteringParams(bool is_interactive);
+  void CalculateScatteringParams(bool interactive);
 
   // Sets a random value for the waiting period to wait for before downloading
   // an update, if one available. This value will be upperbounded by the
diff --git a/update_attempter_android.cc b/update_attempter_android.cc
index 5ee0584..2f842ac 100644
--- a/update_attempter_android.cc
+++ b/update_attempter_android.cc
@@ -31,14 +31,20 @@
 #include <brillo/strings/string_utils.h>
 
 #include "update_engine/common/constants.h"
+#include "update_engine/common/error_code_utils.h"
 #include "update_engine/common/file_fetcher.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/daemon_state_interface.h"
 #include "update_engine/metrics_reporter_interface.h"
 #include "update_engine/metrics_utils.h"
 #include "update_engine/network_selector.h"
+#include "update_engine/payload_consumer/delta_performer.h"
 #include "update_engine/payload_consumer/download_action.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
+#include "update_engine/payload_consumer/file_descriptor_utils.h"
 #include "update_engine/payload_consumer/filesystem_verifier_action.h"
+#include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_consumer/payload_metadata.h"
 #include "update_engine/payload_consumer/postinstall_runner_action.h"
 #include "update_engine/update_status_utils.h"
 
@@ -326,6 +332,95 @@
   }
 }
 
+bool UpdateAttempterAndroid::VerifyPayloadApplicable(
+    const std::string& metadata_filename, brillo::ErrorPtr* error) {
+  FileDescriptorPtr fd(new EintrSafeFileDescriptor);
+  if (!fd->Open(metadata_filename.c_str(), O_RDONLY)) {
+    return LogAndSetError(
+        error, FROM_HERE, "Failed to open " + metadata_filename);
+  }
+  brillo::Blob metadata(kMaxPayloadHeaderSize);
+  if (!fd->Read(metadata.data(), metadata.size())) {
+    return LogAndSetError(
+        error,
+        FROM_HERE,
+        "Failed to read payload header from " + metadata_filename);
+  }
+  ErrorCode errorcode;
+  PayloadMetadata payload_metadata;
+  if (payload_metadata.ParsePayloadHeader(metadata, &errorcode) !=
+      MetadataParseResult::kSuccess) {
+    return LogAndSetError(error,
+                          FROM_HERE,
+                          "Failed to parse payload header: " +
+                              utils::ErrorCodeToString(errorcode));
+  }
+  metadata.resize(payload_metadata.GetMetadataSize() +
+                  payload_metadata.GetMetadataSignatureSize());
+  if (metadata.size() < kMaxPayloadHeaderSize) {
+    return LogAndSetError(
+        error,
+        FROM_HERE,
+        "Metadata size too small: " + std::to_string(metadata.size()));
+  }
+  if (!fd->Read(metadata.data() + kMaxPayloadHeaderSize,
+                metadata.size() - kMaxPayloadHeaderSize)) {
+    return LogAndSetError(
+        error,
+        FROM_HERE,
+        "Failed to read metadata and signature from " + metadata_filename);
+  }
+  fd->Close();
+  errorcode = payload_metadata.ValidateMetadataSignature(
+      metadata, "", base::FilePath(constants::kUpdatePayloadPublicKeyPath));
+  if (errorcode != ErrorCode::kSuccess) {
+    return LogAndSetError(error,
+                          FROM_HERE,
+                          "Failed to validate metadata signature: " +
+                              utils::ErrorCodeToString(errorcode));
+  }
+  DeltaArchiveManifest manifest;
+  if (!payload_metadata.GetManifest(metadata, &manifest)) {
+    return LogAndSetError(error, FROM_HERE, "Failed to parse manifest.");
+  }
+
+  BootControlInterface::Slot current_slot = boot_control_->GetCurrentSlot();
+  for (const PartitionUpdate& partition : manifest.partitions()) {
+    if (!partition.has_old_partition_info())
+      continue;
+    string partition_path;
+    if (!boot_control_->GetPartitionDevice(
+            partition.partition_name(), current_slot, &partition_path)) {
+      return LogAndSetError(
+          error,
+          FROM_HERE,
+          "Failed to get partition device for " + partition.partition_name());
+    }
+    if (!fd->Open(partition_path.c_str(), O_RDONLY)) {
+      return LogAndSetError(
+          error, FROM_HERE, "Failed to open " + partition_path);
+    }
+    for (const InstallOperation& operation : partition.operations()) {
+      if (!operation.has_src_sha256_hash())
+        continue;
+      brillo::Blob source_hash;
+      if (!fd_utils::ReadAndHashExtents(fd,
+                                        operation.src_extents(),
+                                        manifest.block_size(),
+                                        &source_hash)) {
+        return LogAndSetError(
+            error, FROM_HERE, "Failed to hash " + partition_path);
+      }
+      if (!DeltaPerformer::ValidateSourceHash(
+              source_hash, operation, fd, &errorcode)) {
+        return false;
+      }
+    }
+    fd->Close();
+  }
+  return true;
+}
+
 void UpdateAttempterAndroid::ProcessingDone(const ActionProcessor* processor,
                                             ErrorCode code) {
   LOG(INFO) << "Processing Done.";
@@ -535,7 +630,7 @@
                          hardware_,
                          nullptr,           // system_state, not used.
                          download_fetcher,  // passes ownership
-                         true /* is_interactive */));
+                         true /* interactive */));
   shared_ptr<FilesystemVerifierAction> filesystem_verifier_action(
       new FilesystemVerifierAction());
 
@@ -653,6 +748,7 @@
         num_bytes_downloaded,
         download_overhead_percentage,
         duration,
+        duration_uptime,
         static_cast<int>(reboot_count),
         0);  // url_switch_count
   }
diff --git a/update_attempter_android.h b/update_attempter_android.h
index 28bf90a..f00692e 100644
--- a/update_attempter_android.h
+++ b/update_attempter_android.h
@@ -69,6 +69,8 @@
   bool ResumeUpdate(brillo::ErrorPtr* error) override;
   bool CancelUpdate(brillo::ErrorPtr* error) override;
   bool ResetStatus(brillo::ErrorPtr* error) override;
+  bool VerifyPayloadApplicable(const std::string& metadata_filename,
+                               brillo::ErrorPtr* error) override;
 
   // ActionProcessorDelegate methods:
   void ProcessingDone(const ActionProcessor* processor,
diff --git a/update_attempter_android_unittest.cc b/update_attempter_android_unittest.cc
index 94452df..6b53a21 100644
--- a/update_attempter_android_unittest.cc
+++ b/update_attempter_android_unittest.cc
@@ -140,7 +140,8 @@
                                  ErrorCode::kSuccess))
       .Times(1);
   EXPECT_CALL(*metrics_reporter_,
-              ReportSuccessfulUpdateMetrics(2, 0, _, _, _, _, duration, 3, _))
+              ReportSuccessfulUpdateMetrics(
+                  2, 0, _, _, _, _, duration, duration_uptime, 3, _))
       .Times(1);
 
   SetUpdateStatus(UpdateStatus::UPDATE_AVAILABLE);
@@ -181,10 +182,11 @@
                   125,
                   _,
                   _,
+                  _,
                   _))
       .Times(1);
 
-  // The first update fails after receving 50 bytes in total.
+  // The first update fails after receiving 50 bytes in total.
   update_attempter_android_.BytesReceived(30, 50, 200);
   update_attempter_android_.ProcessingDone(nullptr, ErrorCode::kError);
   EXPECT_EQ(
diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc
index 29c1971..80c9638 100644
--- a/update_attempter_unittest.cc
+++ b/update_attempter_unittest.cc
@@ -212,7 +212,7 @@
                         nullptr,
                         nullptr,
                         fetcher.release(),
-                        false /* is_interactive */);
+                        false /* interactive */);
   EXPECT_CALL(*prefs_, GetInt64(kPrefsDeltaUpdateFailures, _)).Times(0);
   attempter_.ActionCompleted(nullptr, &action, ErrorCode::kSuccess);
   EXPECT_EQ(UpdateStatus::FINALIZING, attempter_.status());
diff --git a/update_engine.gyp b/update_engine.gyp
index 6da3a28..30daf2a 100644
--- a/update_engine.gyp
+++ b/update_engine.gyp
@@ -50,6 +50,7 @@
       '_POSIX_C_SOURCE=199309L',
       'USE_BINDER=<(USE_binder)',
       'USE_DBUS=<(USE_dbus)',
+      'USE_FEC=0',
       'USE_HWID_OVERRIDE=<(USE_hwid_override)',
       'USE_CHROME_KIOSK_APP=<(USE_chrome_kiosk_app)',
       'USE_CHROME_NETWORK_PROXY=<(USE_chrome_network_proxy)',
@@ -183,6 +184,7 @@
         'payload_consumer/install_plan.cc',
         'payload_consumer/mount_history.cc',
         'payload_consumer/payload_constants.cc',
+        'payload_consumer/payload_metadata.cc',
         'payload_consumer/payload_verifier.cc',
         'payload_consumer/postinstall_runner_action.cc',
         'payload_consumer/xz_extent_writer.cc',
diff --git a/update_engine_client.cc b/update_engine_client.cc
index c41916d..b7096c5 100644
--- a/update_engine_client.cc
+++ b/update_engine_client.cc
@@ -297,7 +297,7 @@
 
   // Boilerplate init commands.
   base::CommandLine::Init(argc_, argv_);
-  brillo::FlagHelper::Init(argc_, argv_, "Chromium OS Update Engine Client");
+  brillo::FlagHelper::Init(argc_, argv_, "A/B Update Engine Client");
 
   // Ensure there are no positional arguments.
   const vector<string> positional_args =
@@ -396,7 +396,7 @@
     string rollback_partition;
 
     if (!client_->GetRollbackPartition(&rollback_partition)) {
-      LOG(ERROR) << "Error while querying rollback partition availabilty.";
+      LOG(ERROR) << "Error while querying rollback partition availability.";
       return 1;
     }
 
diff --git a/update_manager/android_things_policy.cc b/update_manager/android_things_policy.cc
index 259ca2b..4afcf12 100644
--- a/update_manager/android_things_policy.cc
+++ b/update_manager/android_things_policy.cc
@@ -55,7 +55,7 @@
   result->target_version_prefix.clear();
   result->rollback_allowed = false;
   result->rollback_allowed_milestones = -1;
-  result->is_interactive = false;
+  result->interactive = false;
 
   // Build a list of policies to consult.  Note that each policy may modify the
   // result structure, even if it signals kContinue.
@@ -70,12 +70,12 @@
       // A/B updates
       &enough_slots_ab_updates_policy,
 
-      // Unofficial builds should not perform periodic update checks.
-      &only_update_official_builds_policy,
-
       // Check to see if an interactive update was requested.
       &interactive_update_policy,
 
+      // Unofficial builds should not perform periodic update checks.
+      &only_update_official_builds_policy,
+
       // Ensure that periodic update checks are timed properly.
       &next_update_check_time_policy,
   };
diff --git a/update_manager/android_things_policy_unittest.cc b/update_manager/android_things_policy_unittest.cc
index 8a50bc2..6961efc 100644
--- a/update_manager/android_things_policy_unittest.cc
+++ b/update_manager/android_things_policy_unittest.cc
@@ -97,7 +97,7 @@
   ExpectPolicyStatus(
       EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmAndroidThingsPolicyTest,
@@ -140,7 +140,7 @@
   ExpectPolicyStatus(
       EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_TRUE(result.is_interactive);
+  EXPECT_TRUE(result.interactive);
 }
 
 TEST_F(UmAndroidThingsPolicyTest,
@@ -156,7 +156,7 @@
   ExpectPolicyStatus(
       EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmAndroidThingsPolicyTest, UpdateCanBeAppliedOk) {
diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc
index 04d9680..95c47aa 100644
--- a/update_manager/chromeos_policy.cc
+++ b/update_manager/chromeos_policy.cc
@@ -204,7 +204,7 @@
   result->target_version_prefix.clear();
   result->rollback_allowed = false;
   result->rollback_allowed_milestones = -1;
-  result->is_interactive = false;
+  result->interactive = false;
 
   EnoughSlotsAbUpdatesPolicyImpl enough_slots_ab_updates_policy;
   EnterpriseDevicePolicyImpl enterprise_device_policy;
@@ -329,7 +329,7 @@
     bool is_scattering_applicable = false;
     result->scatter_wait_period = kZeroInterval;
     result->scatter_check_threshold = 0;
-    if (!update_state.is_interactive) {
+    if (!update_state.interactive) {
       const bool* is_oobe_enabled_p = ec->GetValue(
           state->config_provider()->var_is_oobe_enabled());
       if (is_oobe_enabled_p && !(*is_oobe_enabled_p)) {
@@ -377,7 +377,7 @@
     // interactive, and other limits haven't been reached.
     if (update_state.p2p_downloading_disabled) {
       LOG(INFO) << "Blocked P2P downloading because it is disabled by Omaha.";
-    } else if (update_state.is_interactive) {
+    } else if (update_state.interactive) {
       LOG(INFO) << "Blocked P2P downloading because update is interactive.";
     } else if (update_state.p2p_num_attempts >= kMaxP2PAttempts) {
       LOG(INFO) << "Blocked P2P downloading as it was attempted too many "
@@ -577,7 +577,7 @@
   bool may_backoff = false;
   if (update_state.is_backoff_disabled) {
     LOG(INFO) << "Backoff disabled by Omaha.";
-  } else if (update_state.is_interactive) {
+  } else if (update_state.interactive) {
     LOG(INFO) << "No backoff for interactive updates.";
   } else if (update_state.is_delta_payload) {
     LOG(INFO) << "No backoff for delta payloads.";
diff --git a/update_manager/chromeos_policy_unittest.cc b/update_manager/chromeos_policy_unittest.cc
index c7380e8..bed4d53 100644
--- a/update_manager/chromeos_policy_unittest.cc
+++ b/update_manager/chromeos_policy_unittest.cc
@@ -140,7 +140,7 @@
   ExpectPolicyStatus(EvalStatus::kSucceeded,
                      &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedWaitsForOOBE) {
@@ -178,7 +178,7 @@
   ExpectPolicyStatus(EvalStatus::kSucceeded,
                      &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedWithAttributes) {
@@ -204,7 +204,7 @@
   EXPECT_EQ("1.2", result.target_version_prefix);
   EXPECT_EQ(5, result.rollback_allowed_milestones);
   EXPECT_EQ("foo-channel", result.target_channel);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackAllowed) {
@@ -319,7 +319,7 @@
   ExpectPolicyStatus(EvalStatus::kSucceeded,
                      &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_TRUE(result.is_interactive);
+  EXPECT_TRUE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedForcedUpdateRequestedPeriodic) {
@@ -334,7 +334,7 @@
   ExpectPolicyStatus(EvalStatus::kSucceeded,
                      &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedKioskPin) {
@@ -356,7 +356,7 @@
                      &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
   EXPECT_EQ("1234.0.0", result.target_version_prefix);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedDisabledWhenNoKioskPin) {
@@ -396,7 +396,7 @@
                      &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
   EXPECT_TRUE(result.target_version_prefix.empty());
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest,
@@ -624,7 +624,7 @@
   update_state.download_errors.emplace_back(
       0, ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(2));
-  update_state.is_interactive = true;
+  update_state.interactive = true;
 
   // Check that UpdateCanStart returns false and a new backoff expiry is
   // generated.
@@ -861,7 +861,7 @@
       new TimeDelta(TimeDelta::FromSeconds(1)));
 
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(1));
-  update_state.is_interactive = true;
+  update_state.interactive = true;
   update_state.scatter_check_threshold = 0;
   update_state.scatter_check_threshold_min = 2;
   update_state.scatter_check_threshold_max = 5;
@@ -891,7 +891,7 @@
   fake_state_.system_provider()->var_is_oobe_complete()->reset(new bool(false));
 
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(1));
-  update_state.is_interactive = true;
+  update_state.interactive = true;
   update_state.scatter_check_threshold = 0;
   update_state.scatter_check_threshold_min = 2;
   update_state.scatter_check_threshold_max = 5;
diff --git a/update_manager/default_policy.cc b/update_manager/default_policy.cc
index 17d0e3e..5509abc 100644
--- a/update_manager/default_policy.cc
+++ b/update_manager/default_policy.cc
@@ -42,7 +42,7 @@
   result->target_version_prefix.clear();
   result->rollback_allowed = false;
   result->rollback_allowed_milestones = -1;  // No version rolls should happen.
-  result->is_interactive = false;
+  result->interactive = false;
 
   // Ensure that the minimum interval is set. If there's no clock, this defaults
   // to always allowing the update.
diff --git a/update_manager/enterprise_device_policy_impl.cc b/update_manager/enterprise_device_policy_impl.cc
index 94ae5e7..e2a0c87 100644
--- a/update_manager/enterprise_device_policy_impl.cc
+++ b/update_manager/enterprise_device_policy_impl.cc
@@ -66,11 +66,8 @@
       }
 
       result->target_version_prefix = *kiosk_required_platform_version_p;
-      LOG(INFO) << "Allow kiosk app to control Chrome version policy is set,"
-                << ", target version is "
-                << (kiosk_required_platform_version_p
-                        ? *kiosk_required_platform_version_p
-                        : std::string("latest"));
+      LOG(INFO) << "Allow kiosk app to control Chrome version policy is set, "
+                << "target version is " << result->target_version_prefix;
       // TODO(hunyadym): Add support of rollback for kiosk apps.
     } else {
       // Determine whether a target version prefix is dictated by policy.
diff --git a/update_manager/evaluation_context.h b/update_manager/evaluation_context.h
index df5816a..0bdbaec 100644
--- a/update_manager/evaluation_context.h
+++ b/update_manager/evaluation_context.h
@@ -114,7 +114,7 @@
   // there's no cached variable, this method returns false.
   //
   // Right before the passed closure is called the EvaluationContext is
-  // reseted, removing all the non-const cached values.
+  // reset, removing all the non-const cached values.
   bool RunOnValueChangeOrTimeout(base::Closure callback);
 
   // Returns a textual representation of the evaluation context,
diff --git a/update_manager/interactive_update_policy_impl.cc b/update_manager/interactive_update_policy_impl.cc
index df7f17b..03af435 100644
--- a/update_manager/interactive_update_policy_impl.cc
+++ b/update_manager/interactive_update_policy_impl.cc
@@ -31,10 +31,10 @@
       ec->GetValue(updater_provider->var_forced_update_requested());
   if (forced_update_requested_p != nullptr &&
       *forced_update_requested_p != UpdateRequestStatus::kNone) {
-    result->is_interactive =
+    result->interactive =
         (*forced_update_requested_p == UpdateRequestStatus::kInteractive);
     LOG(INFO) << "Forced update signaled ("
-              << (result->is_interactive ? "interactive" : "periodic")
+              << (result->interactive ? "interactive" : "periodic")
               << "), allowing update check.";
     return EvalStatus::kSucceeded;
   }
diff --git a/update_manager/interactive_update_policy_impl.h b/update_manager/interactive_update_policy_impl.h
index a431456..18cf565 100644
--- a/update_manager/interactive_update_policy_impl.h
+++ b/update_manager/interactive_update_policy_impl.h
@@ -46,4 +46,4 @@
 
 }  // namespace chromeos_update_manager
 
-#endif  // UPDATE_ENGINE_UPDATE_MANAGER_OFFICIAL_BUILD_CHECK_POLICY_IMPL_H_
+#endif  // UPDATE_ENGINE_UPDATE_MANAGER_INTERACTIVE_UPDATE_POLICY_IMPL_H_
diff --git a/update_manager/policy.h b/update_manager/policy.h
index 7089c30..ee163b3 100644
--- a/update_manager/policy.h
+++ b/update_manager/policy.h
@@ -59,7 +59,7 @@
   std::string target_channel;
 
   // Whether the allowed update is interactive (user-initiated) or periodic.
-  bool is_interactive;
+  bool interactive;
 };
 
 // Input arguments to UpdateCanStart.
@@ -72,7 +72,7 @@
   //
   // Whether the current update check is an interactive one. The caller should
   // feed the value returned by the preceding call to UpdateCheckAllowed().
-  bool is_interactive;
+  bool interactive;
   // Whether it is a delta payload.
   bool is_delta_payload;
   // Wallclock time when payload was first (consecutively) offered by Omaha.
@@ -126,7 +126,7 @@
 
   // Information pertaining to update scattering.
   //
-  // The currently knwon (persisted) scattering wallclock-based wait period and
+  // The currently known (persisted) scattering wallclock-based wait period and
   // update check threshold; zero if none.
   base::TimeDelta scatter_wait_period;
   int scatter_check_threshold;
diff --git a/update_manager/policy_test_utils.cc b/update_manager/policy_test_utils.cc
index fbfcb82..d9a9857 100644
--- a/update_manager/policy_test_utils.cc
+++ b/update_manager/policy_test_utils.cc
@@ -73,7 +73,7 @@
   // This is a non-interactive check returning a delta payload, seen for the
   // first time (|first_seen_period| ago). Clearly, there were no failed
   // attempts so far.
-  update_state.is_interactive = false;
+  update_state.interactive = false;
   update_state.is_delta_payload = false;
   update_state.first_seen = first_seen_time;
   update_state.num_checks = 1;
diff --git a/update_manager/real_updater_provider.cc b/update_manager/real_updater_provider.cc
index 050bd42..094e79c 100644
--- a/update_manager/real_updater_provider.cc
+++ b/update_manager/real_updater_provider.cc
@@ -401,11 +401,11 @@
     return new UpdateRequestStatus(update_request_status_);
   }
 
-  void Reset(bool forced_update_requested, bool is_interactive) {
+  void Reset(bool forced_update_requested, bool interactive) {
     UpdateRequestStatus new_value = UpdateRequestStatus::kNone;
     if (forced_update_requested)
-      new_value = (is_interactive ? UpdateRequestStatus::kInteractive :
-                   UpdateRequestStatus::kPeriodic);
+      new_value = (interactive ? UpdateRequestStatus::kInteractive
+                               : UpdateRequestStatus::kPeriodic);
     if (update_request_status_ != new_value) {
       update_request_status_ = new_value;
       NotifyValueChanged();
diff --git a/update_manager/update_manager_unittest.cc b/update_manager/update_manager_unittest.cc
index 03f1610..9625b53 100644
--- a/update_manager/update_manager_unittest.cc
+++ b/update_manager/update_manager_unittest.cc
@@ -187,7 +187,7 @@
 
 TEST_F(UmUpdateManagerTest, PolicyRequestCallUpdateCanStart) {
   UpdateState update_state = UpdateState();
-  update_state.is_interactive = true;
+  update_state.interactive = true;
   update_state.is_delta_payload = false;
   update_state.first_seen = FixedTime();
   update_state.num_checks = 1;
diff --git a/update_metadata.proto b/update_metadata.proto
index 7ff032a..fe81efb 100644
--- a/update_metadata.proto
+++ b/update_metadata.proto
@@ -82,7 +82,7 @@
 //   the new partition.
 //
 // The operations allowed in the payload (supported by the client) depend on the
-// major and minor version. See InstallOperation.Type bellow for details.
+// major and minor version. See InstallOperation.Type below for details.
 
 syntax = "proto2";