Merge pie-platform-release to aosp-master - DO NOT MERGE

Change-Id: Ie31ce934b9f29261bd37b3681f5325846572daf0
diff --git a/.clang-format b/.clang-format
deleted file mode 120000
index f412743..0000000
--- a/.clang-format
+++ /dev/null
@@ -1 +0,0 @@
-../../build/tools/brillo-clang-format
\ No newline at end of file
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..c1244fe
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,38 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+# This is the .clang-format file used by all Brillo projects, conforming to the
+# style guide defined by Brillo. To use this file create a *relative* symlink in
+# your project pointing to this file, as this repository is expected to be
+# present in all manifests.
+#
+# See go/brillo-c++-style for details about the style guide.
+#
+
+# WARN: We do not symlink this file to the original file because their location
+# are different in AOSP and CrOS. Keep in sync with the original file if
+# possible.
+
+BasedOnStyle: Google
+AllowShortFunctionsOnASingleLine: Inline
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+BinPackArguments: false
+BinPackParameters: false
+CommentPragmas: NOLINT:.*
+DerivePointerAlignment: false
+PointerAlignment: Left
+TabWidth: 2
diff --git a/Android.mk b/Android.mk
index 8f2c8fa..e9c716d 100644
--- a/Android.mk
+++ b/Android.mk
@@ -22,6 +22,7 @@
 # by setting BRILLO_USE_* values. Note that we define local variables like
 # local_use_* to prevent leaking our default setting for other packages.
 local_use_binder := $(if $(BRILLO_USE_BINDER),$(BRILLO_USE_BINDER),1)
+local_use_fec := 1
 local_use_hwid_override := \
     $(if $(BRILLO_USE_HWID_OVERRIDE),$(BRILLO_USE_HWID_OVERRIDE),0)
 local_use_mtd := $(if $(BRILLO_USE_MTD),$(BRILLO_USE_MTD),0)
@@ -35,6 +36,7 @@
     -DUSE_BINDER=$(local_use_binder) \
     -DUSE_CHROME_NETWORK_PROXY=$(local_use_chrome_network_proxy) \
     -DUSE_CHROME_KIOSK_APP=$(local_use_chrome_kiosk_app) \
+    -DUSE_FEC=$(local_use_fec) \
     -DUSE_HWID_OVERRIDE=$(local_use_hwid_override) \
     -DUSE_MTD=$(local_use_mtd) \
     -DUSE_OMAHA=$(local_use_omaha) \
@@ -106,10 +108,14 @@
     libbz \
     libbspatch \
     libbrotli \
+    libfec_rs \
     libpuffpatch \
+    libverity_tree \
     $(ue_update_metadata_protos_exported_static_libraries)
 ue_libpayload_consumer_exported_shared_libraries := \
+    libbase \
     libcrypto \
+    libfec \
     $(ue_update_metadata_protos_exported_shared_libraries)
 
 ue_libpayload_consumer_src_files := \
@@ -127,6 +133,7 @@
     common/multi_range_http_fetcher.cc \
     common/platform_constants_android.cc \
     common/prefs.cc \
+    common/proxy_resolver.cc \
     common/subprocess.cc \
     common/terminator.cc \
     common/utils.cc \
@@ -146,15 +153,21 @@
     payload_consumer/payload_metadata.cc \
     payload_consumer/payload_verifier.cc \
     payload_consumer/postinstall_runner_action.cc \
+    payload_consumer/verity_writer_android.cc \
     payload_consumer/xz_extent_writer.cc
 
+ifeq ($(local_use_fec),1)
+ue_libpayload_consumer_src_files += \
+    payload_consumer/fec_file_descriptor.cc
+endif  # local_use_fec == 1
+
 ifeq ($(HOST_OS),linux)
 # Build for the host.
 include $(CLEAR_VARS)
 LOCAL_MODULE := libpayload_consumer
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CFLAGS := $(ue_common_cflags)
+LOCAL_CFLAGS := $(filter-out -DUSE_FEC=%,$(ue_common_cflags)) -DUSE_FEC=0
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := \
@@ -185,11 +198,11 @@
 LOCAL_STATIC_LIBRARIES := \
     update_metadata-protos \
     $(ue_common_static_libraries) \
-    $(ue_libpayload_consumer_exported_static_libraries:-host=) \
+    $(ue_libpayload_consumer_exported_static_libraries) \
     $(ue_update_metadata_protos_exported_static_libraries)
 LOCAL_SHARED_LIBRARIES := \
     $(ue_common_shared_libraries) \
-    $(ue_libpayload_consumer_exported_shared_libraries:-host=) \
+    $(ue_libpayload_consumer_exported_shared_libraries) \
     $(ue_update_metadata_protos_exported_shared_libraries)
 LOCAL_SRC_FILES := $(ue_libpayload_consumer_src_files)
 include $(BUILD_STATIC_LIBRARY)
@@ -202,8 +215,11 @@
     $(ue_update_metadata_protos_exported_static_libraries)
 
 ue_libupdate_engine_boot_control_exported_shared_libraries := \
+    libbootloader_message \
+    libfs_mgr \
     libhwbinder \
     libhidlbase \
+    liblp \
     libutils \
     android.hardware.boot@1.0 \
     $(ue_update_metadata_protos_exported_shared_libraries)
@@ -216,8 +232,7 @@
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := \
-    $(ue_common_c_includes) \
-    bootable/recovery
+    $(ue_common_c_includes)
 LOCAL_STATIC_LIBRARIES := \
     $(ue_common_static_libraries) \
     $(ue_libupdate_engine_boot_control_exported_static_libraries)
@@ -225,7 +240,8 @@
     $(ue_common_shared_libraries) \
     $(ue_libupdate_engine_boot_control_exported_shared_libraries)
 LOCAL_SRC_FILES := \
-    boot_control_android.cc
+    boot_control_android.cc \
+    dynamic_partition_control_android.cc
 include $(BUILD_STATIC_LIBRARY)
 
 ifeq ($(local_use_omaha),1)
@@ -239,20 +255,19 @@
 ue_libupdate_engine_exported_static_libraries := \
     libpayload_consumer \
     update_metadata-protos \
-    libbz \
-    libfs_mgr \
-    libbase \
-    liblog \
     $(ue_libpayload_consumer_exported_static_libraries) \
     $(ue_update_metadata_protos_exported_static_libraries) \
     libupdate_engine_boot_control \
     $(ue_libupdate_engine_boot_control_exported_static_libraries)
 ue_libupdate_engine_exported_shared_libraries := \
-    libmetrics \
-    libexpat \
+    libbase \
+    libbootloader_message \
     libbrillo-policy \
     libcurl \
     libcutils \
+    libexpat \
+    liblog \
+    libmetrics \
     libssl \
     $(ue_libpayload_consumer_exported_shared_libraries) \
     $(ue_update_metadata_protos_exported_shared_libraries) \
@@ -275,19 +290,18 @@
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := \
     $(ue_common_c_includes) \
-    $(ue_libupdate_engine_exported_c_includes) \
-    bootable/recovery
+    $(ue_libupdate_engine_exported_c_includes)
 LOCAL_STATIC_LIBRARIES := \
     libpayload_consumer \
     update_metadata-protos \
     $(ue_common_static_libraries) \
-    $(ue_libupdate_engine_exported_static_libraries:-host=) \
-    $(ue_libpayload_consumer_exported_static_libraries:-host=) \
+    $(ue_libupdate_engine_exported_static_libraries) \
+    $(ue_libpayload_consumer_exported_static_libraries) \
     $(ue_update_metadata_protos_exported_static_libraries)
 LOCAL_SHARED_LIBRARIES := \
     $(ue_common_shared_libraries) \
-    $(ue_libupdate_engine_exported_shared_libraries:-host=) \
-    $(ue_libpayload_consumer_exported_shared_libraries:-host=) \
+    $(ue_libupdate_engine_exported_shared_libraries) \
+    $(ue_libpayload_consumer_exported_shared_libraries) \
     $(ue_update_metadata_protos_exported_shared_libraries)
 LOCAL_SRC_FILES := \
     certificate_checker.cc \
@@ -307,9 +321,9 @@
     p2p_manager.cc \
     payload_state.cc \
     power_manager_android.cc \
-    proxy_resolver.cc \
     real_system_state.cc \
     update_attempter.cc \
+    update_boot_flags_action.cc \
     update_manager/android_things_policy.cc \
     update_manager/api_restricted_downloads_policy_impl.cc \
     update_manager/boxed_value.cc \
@@ -326,10 +340,12 @@
     update_manager/real_system_provider.cc \
     update_manager/real_time_provider.cc \
     update_manager/real_updater_provider.cc \
+    update_manager/staging_utils.cc \
     update_manager/state_factory.cc \
     update_manager/update_manager.cc \
-    update_status_utils.cc \
-    utils_android.cc
+    update_manager/update_time_restrictions_policy_impl.cc \
+    update_manager/weekly_time.cc \
+    update_status_utils.cc
 ifeq ($(local_use_binder),1)
 LOCAL_AIDL_INCLUDES += $(LOCAL_PATH)/binder_bindings
 LOCAL_SRC_FILES += \
@@ -356,9 +372,6 @@
 # loop to apply payloads provided by the upper layer via a Binder interface.
 ue_libupdate_engine_android_exported_static_libraries := \
     libpayload_consumer \
-    libfs_mgr \
-    libbase \
-    liblog \
     $(ue_libpayload_consumer_exported_static_libraries) \
     libupdate_engine_boot_control \
     $(ue_libupdate_engine_boot_control_exported_static_libraries)
@@ -366,11 +379,14 @@
     $(ue_libpayload_consumer_exported_shared_libraries) \
     $(ue_libupdate_engine_boot_control_exported_shared_libraries) \
     libandroid_net \
+    libbase \
     libbinder \
     libbinderwrapper \
+    libbootloader_message \
     libbrillo-binder \
-    libcutils \
     libcurl \
+    libcutils \
+    liblog \
     libmetricslogger \
     libssl \
     libutils
@@ -383,18 +399,17 @@
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := \
-    $(ue_common_c_includes) \
-    bootable/recovery
+    $(ue_common_c_includes)
 #TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
 # out of the DBus interface.
 LOCAL_C_INCLUDES += \
     external/cros/system_api/dbus
 LOCAL_STATIC_LIBRARIES := \
     $(ue_common_static_libraries) \
-    $(ue_libupdate_engine_android_exported_static_libraries:-host=)
+    $(ue_libupdate_engine_android_exported_static_libraries)
 LOCAL_SHARED_LIBRARIES += \
     $(ue_common_shared_libraries) \
-    $(ue_libupdate_engine_android_exported_shared_libraries:-host=)
+    $(ue_libupdate_engine_android_exported_shared_libraries)
 LOCAL_AIDL_INCLUDES := $(LOCAL_PATH)/binder_bindings
 LOCAL_SRC_FILES += \
     binder_bindings/android/os/IUpdateEngine.aidl \
@@ -408,10 +423,9 @@
     metrics_reporter_android.cc \
     metrics_utils.cc \
     network_selector_android.cc \
-    proxy_resolver.cc \
     update_attempter_android.cc \
-    update_status_utils.cc \
-    utils_android.cc
+    update_boot_flags_action.cc \
+    update_status_utils.cc
 include $(BUILD_STATIC_LIBRARY)
 
 endif  # local_use_omaha == 1
@@ -442,15 +456,15 @@
     $(ue_libupdate_engine_exported_c_includes)
 LOCAL_STATIC_LIBRARIES += \
     libupdate_engine \
-    $(ue_libupdate_engine_exported_static_libraries:-host=)
+    $(ue_libupdate_engine_exported_static_libraries)
 LOCAL_SHARED_LIBRARIES += \
-    $(ue_libupdate_engine_exported_shared_libraries:-host=)
+    $(ue_libupdate_engine_exported_shared_libraries)
 else  # local_use_omaha == 1
 LOCAL_STATIC_LIBRARIES += \
     libupdate_engine_android \
-    $(ue_libupdate_engine_android_exported_static_libraries:-host=)
+    $(ue_libupdate_engine_android_exported_static_libraries)
 LOCAL_SHARED_LIBRARIES += \
-    $(ue_libupdate_engine_android_exported_shared_libraries:-host=)
+    $(ue_libupdate_engine_android_exported_shared_libraries)
 endif  # local_use_omaha == 1
 
 LOCAL_INIT_RC := update_engine.rc
@@ -458,12 +472,12 @@
 
 # update_engine_sideload (type: executable)
 # ========================================================
-# A static binary equivalent to update_engine daemon that installs an update
-# from a local file directly instead of running in the background.
+# A binary executable equivalent to update_engine daemon that installs an update
+# from a local file directly instead of running in the background. Used in
+# recovery image.
 include $(CLEAR_VARS)
 LOCAL_MODULE := update_engine_sideload
-LOCAL_FORCE_STATIC_EXECUTABLE := true
-LOCAL_MODULE_PATH := $(TARGET_RECOVERY_ROOT_OUT)/sbin
+LOCAL_MODULE_PATH := $(TARGET_RECOVERY_ROOT_OUT)/system/bin
 LOCAL_MODULE_CLASS := EXECUTABLES
 LOCAL_CPP_EXTENSION := .cc
 LOCAL_CFLAGS := \
@@ -472,58 +486,50 @@
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := \
-    $(ue_common_c_includes) \
-    bootable/recovery
+    $(ue_common_c_includes)
 #TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
 # out of the DBus interface.
 LOCAL_C_INCLUDES += \
     external/cros/system_api/dbus
 LOCAL_SRC_FILES := \
-    boot_control_recovery.cc \
     hardware_android.cc \
     metrics_reporter_stub.cc \
     metrics_utils.cc \
     network_selector_stub.cc \
-    proxy_resolver.cc \
     sideload_main.cc \
     update_attempter_android.cc \
-    update_status_utils.cc \
-    utils_android.cc
+    update_boot_flags_action.cc \
+    update_status_utils.cc
+# Use commonly used shared libraries. libprotobuf-cpp-lite.so is filtered out,
+# as it doesn't look beneficial to be installed separately due to its size. Note
+# that we explicitly request their recovery variants, so that the expected files
+# will be used and installed.
+LOCAL_SHARED_LIBRARIES := \
+    libbase.recovery \
+    liblog.recovery \
+    $(filter-out libprotobuf-cpp-lite.recovery libhwbinder.recovery,$(ue_libupdate_engine_boot_control_exported_shared_libraries:=.recovery)) \
+    $(filter-out libprotobuf-cpp-lite.recovery,$(ue_libpayload_consumer_exported_shared_libraries:=.recovery))
 LOCAL_STATIC_LIBRARIES := \
-    libfs_mgr \
-    libbase \
-    liblog \
     libpayload_consumer \
+    libupdate_engine_boot_control \
     update_metadata-protos \
     $(ue_common_static_libraries) \
-    $(ue_libpayload_consumer_exported_static_libraries:-host=) \
+    $(ue_libpayload_consumer_exported_static_libraries) \
     $(ue_update_metadata_protos_exported_static_libraries)
-# We add the static versions of the shared libraries since we are forcing this
-# binary to be a static binary, so we also need to include all the static
-# library dependencies of these static libraries.
+# We add the static versions of the shared libraries that are not installed to
+# recovery image due to size concerns. Need to include all the static library
+# dependencies of these static libraries.
 LOCAL_STATIC_LIBRARIES += \
     $(ue_common_shared_libraries) \
-    libbase \
-    liblog \
-    $(ue_libpayload_consumer_exported_shared_libraries:-host=) \
     $(ue_update_metadata_protos_exported_shared_libraries) \
     libevent \
     libmodpb64 \
-    libgtest_prod
+    libgtest_prod \
+    libprotobuf-cpp-lite
 
-ifeq ($(strip $(PRODUCT_STATIC_BOOT_CONTROL_HAL)),)
-# No static boot_control HAL defined, so no sideload support. We use a fake
-# boot_control HAL to allow compiling update_engine_sideload for test purposes.
-ifeq ($(strip $(AB_OTA_UPDATER)),true)
-$(warning No PRODUCT_STATIC_BOOT_CONTROL_HAL configured but AB_OTA_UPDATER is \
-true, no update sideload support.)
-endif  # AB_OTA_UPDATER == true
-LOCAL_SRC_FILES += \
-    boot_control_recovery_stub.cc
-else  # PRODUCT_STATIC_BOOT_CONTROL_HAL != ""
-LOCAL_STATIC_LIBRARIES += \
-    $(PRODUCT_STATIC_BOOT_CONTROL_HAL)
-endif  # PRODUCT_STATIC_BOOT_CONTROL_HAL != ""
+ifneq ($(strip $(PRODUCT_STATIC_BOOT_CONTROL_HAL)),)
+LOCAL_REQUIRED_MODULES += android.hardware.boot@1.0-impl-wrapper.recovery
+endif
 
 include $(BUILD_EXECUTABLE)
 
@@ -611,18 +617,20 @@
 # server-side code. This is used for delta_generator and unittests but not
 # for any client code.
 ue_libpayload_generator_exported_static_libraries := \
+    libavb \
+    libbrotli \
     libbsdiff \
     libdivsufsort \
     libdivsufsort64 \
-    libbrotli \
     liblzma \
     libpayload_consumer \
     libpuffdiff \
-    libz \
+    libverity_tree \
     update_metadata-protos \
     $(ue_libpayload_consumer_exported_static_libraries) \
     $(ue_update_metadata_protos_exported_static_libraries)
 ue_libpayload_generator_exported_shared_libraries := \
+    libbase \
     libext2fs \
     $(ue_libpayload_consumer_exported_shared_libraries) \
     $(ue_update_metadata_protos_exported_shared_libraries)
@@ -632,6 +640,7 @@
     payload_generator/annotated_operation.cc \
     payload_generator/blob_file_writer.cc \
     payload_generator/block_mapping.cc \
+    payload_generator/boot_img_filesystem.cc \
     payload_generator/bzip.cc \
     payload_generator/cycle_breaker.cc \
     payload_generator/deflate_utils.cc \
@@ -646,6 +655,7 @@
     payload_generator/inplace_generator.cc \
     payload_generator/mapfile_filesystem.cc \
     payload_generator/payload_file.cc \
+    payload_generator/payload_generation_config_android.cc \
     payload_generator/payload_generation_config.cc \
     payload_generator/payload_signer.cc \
     payload_generator/raw_filesystem.cc \
@@ -665,6 +675,7 @@
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := $(ue_common_c_includes)
 LOCAL_STATIC_LIBRARIES := \
+    libavb \
     libbsdiff \
     libdivsufsort \
     libdivsufsort64 \
@@ -694,6 +705,7 @@
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := $(ue_common_c_includes)
 LOCAL_STATIC_LIBRARIES := \
+    libavb \
     libbsdiff \
     libdivsufsort \
     libdivsufsort64 \
@@ -701,12 +713,12 @@
     update_metadata-protos \
     liblzma \
     $(ue_common_static_libraries) \
-    $(ue_libpayload_consumer_exported_static_libraries:-host=) \
+    $(ue_libpayload_consumer_exported_static_libraries) \
     $(ue_update_metadata_protos_exported_static_libraries)
 LOCAL_SHARED_LIBRARIES := \
     $(ue_common_shared_libraries) \
-    $(ue_libpayload_generator_exported_shared_libraries:-host=) \
-    $(ue_libpayload_consumer_exported_shared_libraries:-host=) \
+    $(ue_libpayload_generator_exported_shared_libraries) \
+    $(ue_libpayload_consumer_exported_shared_libraries) \
     $(ue_update_metadata_protos_exported_shared_libraries)
 LOCAL_SRC_FILES := $(ue_libpayload_generator_src_files)
 include $(BUILD_STATIC_LIBRARY)
@@ -728,6 +740,7 @@
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := $(ue_common_c_includes)
 LOCAL_STATIC_LIBRARIES := \
+    libavb_host_sysdeps \
     libpayload_consumer \
     libpayload_generator \
     $(ue_common_static_libraries) \
@@ -737,6 +750,7 @@
     $(ue_common_shared_libraries) \
     $(ue_libpayload_consumer_exported_shared_libraries) \
     $(ue_libpayload_generator_exported_shared_libraries)
+LOCAL_SHARED_LIBRARIES := $(filter-out libfec,$(LOCAL_SHARED_LIBRARIES))
 LOCAL_SRC_FILES := $(ue_delta_generator_src_files)
 include $(BUILD_HOST_EXECUTABLE)
 endif  # HOST_OS == linux
@@ -756,12 +770,12 @@
     libpayload_consumer \
     libpayload_generator \
     $(ue_common_static_libraries) \
-    $(ue_libpayload_consumer_exported_static_libraries:-host=) \
-    $(ue_libpayload_generator_exported_static_libraries:-host=)
+    $(ue_libpayload_consumer_exported_static_libraries) \
+    $(ue_libpayload_generator_exported_static_libraries)
 LOCAL_SHARED_LIBRARIES := \
     $(ue_common_shared_libraries) \
-    $(ue_libpayload_consumer_exported_shared_libraries:-host=) \
-    $(ue_libpayload_generator_exported_shared_libraries:-host=)
+    $(ue_libpayload_consumer_exported_shared_libraries) \
+    $(ue_libpayload_generator_exported_shared_libraries)
 LOCAL_SRC_FILES := $(ue_delta_generator_src_files)
 include $(BUILD_EXECUTABLE)
 
@@ -896,10 +910,11 @@
     libgmock \
     libchrome_test_helpers \
     $(ue_common_static_libraries) \
-    $(ue_libpayload_generator_exported_static_libraries:-host=)
+    $(ue_libpayload_generator_exported_static_libraries)
 LOCAL_SHARED_LIBRARIES := \
+    libhidltransport \
     $(ue_common_shared_libraries) \
-    $(ue_libpayload_generator_exported_shared_libraries:-host=)
+    $(ue_libpayload_generator_exported_shared_libraries)
 LOCAL_SRC_FILES := \
     certificate_checker_unittest.cc \
     common/action_pipe_unittest.cc \
@@ -913,6 +928,7 @@
     common/hwid_override_unittest.cc \
     common/mock_http_fetcher.cc \
     common/prefs_unittest.cc \
+    common/proxy_resolver_unittest.cc \
     common/subprocess_unittest.cc \
     common/terminator_unittest.cc \
     common/test_utils.cc \
@@ -928,10 +944,12 @@
     payload_consumer/file_writer_unittest.cc \
     payload_consumer/filesystem_verifier_action_unittest.cc \
     payload_consumer/postinstall_runner_action_unittest.cc \
+    payload_consumer/verity_writer_android_unittest.cc \
     payload_consumer/xz_extent_writer_unittest.cc \
     payload_generator/ab_generator_unittest.cc \
     payload_generator/blob_file_writer_unittest.cc \
     payload_generator/block_mapping_unittest.cc \
+    payload_generator/boot_img_filesystem_unittest.cc \
     payload_generator/cycle_breaker_unittest.cc \
     payload_generator/deflate_utils_unittest.cc \
     payload_generator/delta_diff_utils_unittest.cc \
@@ -944,22 +962,22 @@
     payload_generator/inplace_generator_unittest.cc \
     payload_generator/mapfile_filesystem_unittest.cc \
     payload_generator/payload_file_unittest.cc \
+    payload_generator/payload_generation_config_android_unittest.cc \
     payload_generator/payload_generation_config_unittest.cc \
     payload_generator/payload_signer_unittest.cc \
     payload_generator/squashfs_filesystem_unittest.cc \
     payload_generator/tarjan_unittest.cc \
     payload_generator/topological_sort_unittest.cc \
     payload_generator/zip_unittest.cc \
-    proxy_resolver_unittest.cc \
     testrunner.cc
 ifeq ($(local_use_omaha),1)
 LOCAL_C_INCLUDES += \
     $(ue_libupdate_engine_exported_c_includes)
 LOCAL_STATIC_LIBRARIES += \
     libupdate_engine \
-    $(ue_libupdate_engine_exported_static_libraries:-host=)
+    $(ue_libupdate_engine_exported_static_libraries)
 LOCAL_SHARED_LIBRARIES += \
-    $(ue_libupdate_engine_exported_shared_libraries:-host=)
+    $(ue_libupdate_engine_exported_shared_libraries)
 LOCAL_SRC_FILES += \
     common_service_unittest.cc \
     fake_system_state.cc \
@@ -975,6 +993,7 @@
     payload_state_unittest.cc \
     parcelable_update_engine_status_unittest.cc \
     update_attempter_unittest.cc \
+    update_boot_flags_action_unittest.cc \
     update_manager/android_things_policy_unittest.cc \
     update_manager/boxed_value_unittest.cc \
     update_manager/chromeos_policy.cc \
@@ -991,16 +1010,20 @@
     update_manager/real_system_provider_unittest.cc \
     update_manager/real_time_provider_unittest.cc \
     update_manager/real_updater_provider_unittest.cc \
+    update_manager/staging_utils_unittest.cc \
     update_manager/umtest_utils.cc \
     update_manager/update_manager_unittest.cc \
-    update_manager/variable_unittest.cc
+    update_manager/update_time_restrictions_policy_impl_unittest.cc \
+    update_manager/variable_unittest.cc \
+    update_manager/weekly_time_unittest.cc
 else  # local_use_omaha == 1
 LOCAL_STATIC_LIBRARIES += \
     libupdate_engine_android \
-    $(ue_libupdate_engine_android_exported_static_libraries:-host=)
+    $(ue_libupdate_engine_android_exported_static_libraries)
 LOCAL_SHARED_LIBRARIES += \
-    $(ue_libupdate_engine_android_exported_shared_libraries:-host=)
+    $(ue_libupdate_engine_android_exported_shared_libraries)
 LOCAL_SRC_FILES += \
+    boot_control_android_unittest.cc \
     update_attempter_android_unittest.cc
 endif  # local_use_omaha == 1
 include $(BUILD_NATIVE_TEST)
@@ -1014,7 +1037,6 @@
 LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/update_engine
 LOCAL_MODULE_STEM := update-payload-key.pub.pem
 LOCAL_SRC_FILES := update_payload_key/brillo-update-payload-key.pub.pem
-LOCAL_BUILT_MODULE_STEM := update_payload_key/brillo-update-payload-key.pub.pem
 include $(BUILD_PREBUILT)
 endif  # PRODUCT_IOT
 
diff --git a/CPPLINT.cfg b/CPPLINT.cfg
index 3dd0f35..f7dde21 100644
--- a/CPPLINT.cfg
+++ b/CPPLINT.cfg
@@ -1,3 +1,4 @@
 # This should be kept in sync with platform2/CPPLINT.cfg
 set noparent
+
 filter=-build/include_order,+build/include_alpha,-build/header_guard
diff --git a/PRESUBMIT.cfg b/PRESUBMIT.cfg
index 3b8b271..f2c7831 100644
--- a/PRESUBMIT.cfg
+++ b/PRESUBMIT.cfg
@@ -3,5 +3,6 @@
 hook1=../../../platform2/common-mk/gyplint.py ${PRESUBMIT_FILES}
 
 [Hook Overrides]
+clang_format_check: true
 cros_license_check: false
 aosp_license_check: true
diff --git a/UpdateEngine.conf b/UpdateEngine.conf
index 58cca09..192e6ab 100644
--- a/UpdateEngine.conf
+++ b/UpdateEngine.conf
@@ -53,6 +53,9 @@
            send_member="SetUpdateOverCellularPermission"/>
     <allow send_destination="org.chromium.UpdateEngine"
            send_interface="org.chromium.UpdateEngineInterface"
+           send_member="SetUpdateOverCellularTarget"/>
+    <allow send_destination="org.chromium.UpdateEngine"
+           send_interface="org.chromium.UpdateEngineInterface"
            send_member="GetUpdateOverCellularPermission"/>
     <allow send_destination="org.chromium.UpdateEngine"
            send_interface="org.chromium.UpdateEngineInterface"
diff --git a/WATCHLISTS b/WATCHLISTS
deleted file mode 100644
index bcce0de..0000000
--- a/WATCHLISTS
+++ /dev/null
@@ -1,14 +0,0 @@
-# See http://dev.chromium.org/developers/contributing-code/watchlists for
-# a description of this file's format.
-# Please keep these keys in alphabetical order.
-
-{
-  'WATCHLIST_DEFINITIONS': {
-    'all': {
-      'filepath': '.',
-    },
-  },
-  'WATCHLISTS': {
-    'all': ['adlr@chromium.org', 'petkov@chromium.org']
-  },
-}
diff --git a/binder_bindings/android/brillo/IUpdateEngine.aidl b/binder_bindings/android/brillo/IUpdateEngine.aidl
index e549a4d..56e1524 100644
--- a/binder_bindings/android/brillo/IUpdateEngine.aidl
+++ b/binder_bindings/android/brillo/IUpdateEngine.aidl
@@ -34,6 +34,8 @@
   void SetP2PUpdatePermission(in boolean enabled);
   boolean GetP2PUpdatePermission();
   void SetUpdateOverCellularPermission(in boolean enabled);
+  void SetUpdateOverCellularTarget(in String target_version,
+                                   in long target_size);
   boolean GetUpdateOverCellularPermission();
   long GetDurationSinceUpdate();
   String GetPrevVersion();
diff --git a/binder_service_brillo.cc b/binder_service_brillo.cc
index 3f01e42..d082add 100644
--- a/binder_service_brillo.cc
+++ b/binder_service_brillo.cc
@@ -153,6 +153,13 @@
       &UpdateEngineService::SetUpdateOverCellularPermission, enabled);
 }
 
+Status BinderUpdateEngineBrilloService::SetUpdateOverCellularTarget(
+    const String16& target_version, int64_t target_size) {
+  return CallCommonHandler(&UpdateEngineService::SetUpdateOverCellularTarget,
+                           NormalString(target_version),
+                           target_size);
+}
+
 Status BinderUpdateEngineBrilloService::GetUpdateOverCellularPermission(
     bool* out_cellular_permission) {
   return CallCommonHandler(
diff --git a/binder_service_brillo.h b/binder_service_brillo.h
index c802fca..d0d0dc9 100644
--- a/binder_service_brillo.h
+++ b/binder_service_brillo.h
@@ -75,6 +75,8 @@
       bool* out_p2p_permission) override;
   android::binder::Status SetUpdateOverCellularPermission(
       bool enabled) override;
+  android::binder::Status SetUpdateOverCellularTarget(
+      const android::String16& target_version, int64_t target_size) override;
   android::binder::Status GetUpdateOverCellularPermission(
       bool* out_cellular_permission) override;
   android::binder::Status GetDurationSinceUpdate(
diff --git a/boot_control_android.cc b/boot_control_android.cc
index 8c1603b..ad819ad 100644
--- a/boot_control_android.cc
+++ b/boot_control_android.cc
@@ -16,24 +16,35 @@
 
 #include "update_engine/boot_control_android.h"
 
+#include <memory>
+#include <utility>
+
 #include <base/bind.h>
-#include <base/files/file_util.h>
 #include <base/logging.h>
-#include <base/strings/string_util.h>
+#include <bootloader_message/bootloader_message.h>
 #include <brillo/message_loops/message_loop.h>
+#include <fs_mgr.h>
 
 #include "update_engine/common/utils.h"
-#include "update_engine/utils_android.h"
+#include "update_engine/dynamic_partition_control_android.h"
 
 using std::string;
 
+using android::dm::DmDeviceState;
+using android::fs_mgr::MetadataBuilder;
+using android::fs_mgr::Partition;
+using android::fs_mgr::UpdatePartitionTable;
+using android::hardware::hidl_string;
 using android::hardware::Return;
 using android::hardware::boot::V1_0::BoolResult;
 using android::hardware::boot::V1_0::CommandResult;
 using android::hardware::boot::V1_0::IBootControl;
-using android::hardware::hidl_string;
+using Slot = chromeos_update_engine::BootControlInterface::Slot;
+using PartitionSizes =
+    chromeos_update_engine::BootControlInterface::PartitionSizes;
 
 namespace {
+
 auto StoreResultCallback(CommandResult* dest) {
   return [dest](const CommandResult& result) { *dest = result; };
 }
@@ -45,7 +56,7 @@
 
 // Factory defined in boot_control.h.
 std::unique_ptr<BootControlInterface> CreateBootControl() {
-  std::unique_ptr<BootControlAndroid> boot_control(new BootControlAndroid());
+  auto boot_control = std::make_unique<BootControlAndroid>();
   if (!boot_control->Init()) {
     return nullptr;
   }
@@ -63,9 +74,15 @@
 
   LOG(INFO) << "Loaded boot control hidl hal.";
 
+  dynamic_control_ = std::make_unique<DynamicPartitionControlAndroid>();
+
   return true;
 }
 
+void BootControlAndroid::Cleanup() {
+  dynamic_control_->Cleanup();
+}
+
 unsigned int BootControlAndroid::GetNumSlots() const {
   return module_->getNumberSlots();
 }
@@ -74,41 +91,9 @@
   return module_->getCurrentSlot();
 }
 
-bool BootControlAndroid::GetPartitionDevice(const string& partition_name,
-                                            Slot slot,
-                                            string* device) const {
-  // We can't use fs_mgr to look up |partition_name| because fstab
-  // doesn't list every slot partition (it uses the slotselect option
-  // to mask the suffix).
-  //
-  // We can however assume that there's an entry for the /misc mount
-  // point and use that to get the device file for the misc
-  // partition. This helps us locate the disk that |partition_name|
-  // resides on. From there we'll assume that a by-name scheme is used
-  // so we can just replace the trailing "misc" by the given
-  // |partition_name| and suffix corresponding to |slot|, e.g.
-  //
-  //   /dev/block/platform/soc.0/7824900.sdhci/by-name/misc ->
-  //   /dev/block/platform/soc.0/7824900.sdhci/by-name/boot_a
-  //
-  // If needed, it's possible to relax the by-name assumption in the
-  // future by trawling /sys/block looking for the appropriate sibling
-  // of misc and then finding an entry in /dev matching the sysfs
-  // entry.
-
-  base::FilePath misc_device;
-  if (!utils::DeviceForMountPoint("/misc", &misc_device))
-    return false;
-
-  if (!utils::IsSymlink(misc_device.value().c_str())) {
-    LOG(ERROR) << "Device file " << misc_device.value() << " for /misc "
-               << "is not a symlink.";
-    return false;
-  }
-
-  string suffix;
+bool BootControlAndroid::GetSuffix(Slot slot, string* suffix) const {
   auto store_suffix_cb = [&suffix](hidl_string cb_suffix) {
-    suffix = cb_suffix.c_str();
+    *suffix = cb_suffix.c_str();
   };
   Return<void> ret = module_->getSuffix(slot, store_suffix_cb);
 
@@ -117,9 +102,56 @@
                << SlotName(slot);
     return false;
   }
+  return true;
+}
 
-  base::FilePath path = misc_device.DirName().Append(partition_name + suffix);
-  if (!base::PathExists(path)) {
+bool BootControlAndroid::GetPartitionDevice(const string& partition_name,
+                                            Slot slot,
+                                            string* device) const {
+  string suffix;
+  if (!GetSuffix(slot, &suffix)) {
+    return false;
+  }
+
+  const string target_partition_name = partition_name + suffix;
+
+  // DeltaPerformer calls InitPartitionMetadata before calling
+  // InstallPlan::LoadPartitionsFromSlots. After InitPartitionMetadata,
+  // the partition must be re-mapped with force_writable == true. Hence,
+  // we only need to check device mapper.
+  if (dynamic_control_->IsDynamicPartitionsEnabled()) {
+    switch (dynamic_control_->GetState(target_partition_name)) {
+      case DmDeviceState::ACTIVE:
+        if (dynamic_control_->GetDmDevicePathByName(target_partition_name,
+                                                    device)) {
+          LOG(INFO) << target_partition_name
+                    << " is mapped on device mapper: " << *device;
+          return true;
+        }
+        LOG(ERROR) << target_partition_name
+                   << " is mapped but path is unknown.";
+        return false;
+
+      case DmDeviceState::INVALID:
+        // Try static partitions.
+        break;
+
+      case DmDeviceState::SUSPENDED:  // fallthrough
+      default:
+        LOG(ERROR) << target_partition_name
+                   << " is mapped on device mapper but state is unknown";
+        return false;
+    }
+  }
+
+  string device_dir_str;
+  if (!dynamic_control_->GetDeviceDir(&device_dir_str)) {
+    return false;
+  }
+
+  base::FilePath path =
+      base::FilePath(device_dir_str).Append(target_partition_name);
+  if (!dynamic_control_->DeviceExists(path.value())) {
     LOG(ERROR) << "Device file " << path.value() << " does not exist.";
     return false;
   }
@@ -191,4 +223,250 @@
          brillo::MessageLoop::kTaskIdNull;
 }
 
+namespace {
+
+// Resize |partition_name|_|slot| to the given |size|.
+bool ResizePartition(MetadataBuilder* builder,
+                     const string& target_partition_name,
+                     uint64_t size) {
+  Partition* partition = builder->FindPartition(target_partition_name);
+  if (partition == nullptr) {
+    LOG(ERROR) << "Cannot find " << target_partition_name << " in metadata.";
+    return false;
+  }
+
+  uint64_t old_size = partition->size();
+  const string action = "resize " + target_partition_name + " in super (" +
+                        std::to_string(old_size) + " -> " +
+                        std::to_string(size) + " bytes)";
+  if (!builder->ResizePartition(partition, size)) {
+    LOG(ERROR) << "Cannot " << action << "; see previous log messages.";
+    return false;
+  }
+
+  if (partition->size() != size) {
+    LOG(ERROR) << "Cannot " << action
+               << "; value is misaligned and partition should have been "
+               << partition->size();
+    return false;
+  }
+
+  LOG(INFO) << "Successfully " << action;
+
+  return true;
+}
+
+bool ResizePartitions(DynamicPartitionControlInterface* dynamic_control,
+                      const string& super_device,
+                      Slot target_slot,
+                      const string& target_suffix,
+                      const PartitionSizes& logical_sizes,
+                      MetadataBuilder* builder) {
+  // Delete all extents to ensure that each partition has enough space to
+  // grow.
+  for (const auto& pair : logical_sizes) {
+    const string target_partition_name = pair.first + target_suffix;
+    if (builder->FindPartition(target_partition_name) == nullptr) {
+      // Use constant GUID because it is unused.
+      LOG(INFO) << "Adding partition " << target_partition_name << " to slot "
+                << BootControlInterface::SlotName(target_slot) << " in "
+                << super_device;
+      if (builder->AddPartition(target_partition_name,
+                                LP_PARTITION_ATTR_READONLY) == nullptr) {
+        LOG(ERROR) << "Cannot add partition " << target_partition_name;
+        return false;
+      }
+    }
+    if (!ResizePartition(builder, pair.first + target_suffix, 0 /* size */)) {
+      return false;
+    }
+  }
+
+  for (const auto& pair : logical_sizes) {
+    if (!ResizePartition(builder, pair.first + target_suffix, pair.second)) {
+      LOG(ERROR) << "Not enough space?";
+      return false;
+    }
+  }
+
+  if (!dynamic_control->StoreMetadata(super_device, builder, target_slot)) {
+    return false;
+  }
+  return true;
+}
+
+// Assume upgrading from slot A to B. A partition foo is considered dynamic
+// iff one of the following:
+// 1. foo_a exists as a dynamic partition (so it should continue to be a
+//    dynamic partition)
+// 2. foo_b does not exist as a static partition (in which case we may be
+//    adding a new partition).
+bool IsDynamicPartition(DynamicPartitionControlInterface* dynamic_control,
+                        const base::FilePath& device_dir,
+                        MetadataBuilder* source_metadata,
+                        const string& partition_name,
+                        const string& source_suffix,
+                        const string& target_suffix) {
+  bool dynamic_source_exist =
+      source_metadata->FindPartition(partition_name + source_suffix) != nullptr;
+  bool static_target_exist = dynamic_control->DeviceExists(
+      device_dir.Append(partition_name + target_suffix).value());
+
+  return dynamic_source_exist || !static_target_exist;
+}
+
+bool FilterPartitionSizes(DynamicPartitionControlInterface* dynamic_control,
+                          const base::FilePath& device_dir,
+                          const PartitionSizes& partition_sizes,
+                          MetadataBuilder* source_metadata,
+                          const string& source_suffix,
+                          const string& target_suffix,
+                          PartitionSizes* logical_sizes) {
+  for (const auto& pair : partition_sizes) {
+    if (!IsDynamicPartition(dynamic_control,
+                            device_dir,
+                            source_metadata,
+                            pair.first,
+                            source_suffix,
+                            target_suffix)) {
+      // In the future we can check static partition sizes, but skip for now.
+      LOG(INFO) << pair.first << " is static; assume its size is "
+                << pair.second << " bytes.";
+      continue;
+    }
+
+    logical_sizes->insert(pair);
+  }
+  return true;
+}
+
+// Return false if partition sizes are all correct in metadata slot
+// |target_slot|. If so, no need to resize. |logical_sizes| have format like
+// {vendor: size, ...}, and fail if a partition is not found.
+bool NeedResizePartitions(DynamicPartitionControlInterface* dynamic_control,
+                          const string& super_device,
+                          Slot target_slot,
+                          const string& suffix,
+                          const PartitionSizes& logical_sizes) {
+  auto target_metadata =
+      dynamic_control->LoadMetadataBuilder(super_device, target_slot);
+  if (target_metadata == nullptr) {
+    LOG(INFO) << "Metadata slot " << BootControlInterface::SlotName(target_slot)
+              << " in " << super_device
+              << " is corrupted; attempt to recover from source slot.";
+    return true;
+  }
+
+  for (const auto& pair : logical_sizes) {
+    Partition* partition = target_metadata->FindPartition(pair.first + suffix);
+    if (partition == nullptr) {
+      LOG(INFO) << "Cannot find " << pair.first << suffix << " at slot "
+                << BootControlInterface::SlotName(target_slot) << " in "
+                << super_device << ". Need to resize.";
+      return true;
+    }
+    if (partition->size() != pair.second) {
+      LOG(INFO) << super_device << ":"
+                << BootControlInterface::SlotName(target_slot) << ":"
+                << pair.first << suffix << ": size == " << partition->size()
+                << " but requested " << pair.second << ". Need to resize.";
+      return true;
+    }
+    LOG(INFO) << super_device << ":"
+              << BootControlInterface::SlotName(target_slot) << ":"
+              << pair.first << suffix << ": size == " << partition->size()
+              << " as requested.";
+  }
+  LOG(INFO) << "No need to resize at metadata slot "
+            << BootControlInterface::SlotName(target_slot) << " in "
+            << super_device;
+  return false;
+}
+}  // namespace
+
+bool BootControlAndroid::InitPartitionMetadata(
+    Slot target_slot, const PartitionSizes& partition_sizes) {
+  if (!dynamic_control_->IsDynamicPartitionsEnabled()) {
+    return true;
+  }
+
+  string device_dir_str;
+  if (!dynamic_control_->GetDeviceDir(&device_dir_str)) {
+    return false;
+  }
+  base::FilePath device_dir(device_dir_str);
+  string super_device =
+      device_dir.Append(fs_mgr_get_super_partition_name()).value();
+
+  Slot current_slot = GetCurrentSlot();
+  if (target_slot == current_slot) {
+    LOG(ERROR) << "Cannot call InitPartitionMetadata on current slot.";
+    return false;
+  }
+
+  string current_suffix;
+  if (!GetSuffix(current_slot, &current_suffix)) {
+    return false;
+  }
+
+  string target_suffix;
+  if (!GetSuffix(target_slot, &target_suffix)) {
+    return false;
+  }
+
+  auto builder =
+      dynamic_control_->LoadMetadataBuilder(super_device, current_slot);
+  if (builder == nullptr) {
+    return false;
+  }
+
+  // Read metadata from current slot to determine which partitions are logical
+  // and may be resized. Do not read from target slot because metadata at
+  // target slot may be corrupted.
+  PartitionSizes logical_sizes;
+  if (!FilterPartitionSizes(dynamic_control_.get(),
+                            device_dir,
+                            partition_sizes,
+                            builder.get() /* source metadata */,
+                            current_suffix,
+                            target_suffix,
+                            &logical_sizes)) {
+    return false;
+  }
+
+  // Read metadata from target slot to determine if the sizes are correct. Only
+  // test logical partitions.
+  if (NeedResizePartitions(dynamic_control_.get(),
+                           super_device,
+                           target_slot,
+                           target_suffix,
+                           logical_sizes)) {
+    if (!ResizePartitions(dynamic_control_.get(),
+                          super_device,
+                          target_slot,
+                          target_suffix,
+                          logical_sizes,
+                          builder.get())) {
+      return false;
+    }
+  }
+
+  // Unmap all partitions, and remap partitions if size is non-zero.
+  for (const auto& pair : logical_sizes) {
+    if (!dynamic_control_->UnmapPartitionOnDeviceMapper(
+            pair.first + target_suffix, true /* wait */)) {
+      return false;
+    }
+    if (pair.second == 0) {
+      continue;
+    }
+    string map_path;
+    if (!dynamic_control_->MapPartitionOnDeviceMapper(
+            super_device, pair.first + target_suffix, target_slot, &map_path)) {
+      return false;
+    }
+  }
+  return true;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/boot_control_android.h b/boot_control_android.h
index 1de0e41..24ab4dc 100644
--- a/boot_control_android.h
+++ b/boot_control_android.h
@@ -17,11 +17,16 @@
 #ifndef UPDATE_ENGINE_BOOT_CONTROL_ANDROID_H_
 #define UPDATE_ENGINE_BOOT_CONTROL_ANDROID_H_
 
+#include <map>
+#include <memory>
 #include <string>
 
 #include <android/hardware/boot/1.0/IBootControl.h>
+#include <base/files/file_util.h>
+#include <liblp/builder.h>
 
 #include "update_engine/common/boot_control.h"
+#include "update_engine/dynamic_partition_control_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -46,9 +51,18 @@
   bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
   bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
   bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
+  bool InitPartitionMetadata(Slot slot,
+                             const PartitionSizes& partition_sizes) override;
+  void Cleanup() override;
 
  private:
   ::android::sp<::android::hardware::boot::V1_0::IBootControl> module_;
+  std::unique_ptr<DynamicPartitionControlInterface> dynamic_control_;
+
+  friend class BootControlAndroidTest;
+
+  // Wrapper method of IBootControl::getSuffix().
+  bool GetSuffix(Slot slot, std::string* out) const;
 
   DISALLOW_COPY_AND_ASSIGN(BootControlAndroid);
 };
diff --git a/boot_control_android_unittest.cc b/boot_control_android_unittest.cc
new file mode 100644
index 0000000..3e01be2
--- /dev/null
+++ b/boot_control_android_unittest.cc
@@ -0,0 +1,670 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/boot_control_android.h"
+
+#include <set>
+
+#include <android-base/strings.h>
+#include <fs_mgr.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/mock_boot_control_hal.h"
+#include "update_engine/mock_dynamic_partition_control.h"
+
+using android::base::Join;
+using android::fs_mgr::MetadataBuilder;
+using android::hardware::Void;
+using testing::_;
+using testing::AnyNumber;
+using testing::Contains;
+using testing::Eq;
+using testing::Invoke;
+using testing::Key;
+using testing::MakeMatcher;
+using testing::Matcher;
+using testing::MatcherInterface;
+using testing::MatchResultListener;
+using testing::NiceMock;
+using testing::Return;
+
+namespace chromeos_update_engine {
+
+constexpr const uint32_t kMaxNumSlots = 2;
+constexpr const char* kSlotSuffixes[kMaxNumSlots] = {"_a", "_b"};
+constexpr const char* kFakeDevicePath = "/fake/dev/path/";
+constexpr const char* kFakeMappedPath = "/fake/mapped/path/";
+constexpr const uint32_t kFakeMetadataSize = 65536;
+
+// A map describing the size of each partition.
+using PartitionSizes = std::map<std::string, uint64_t>;
+
+// C++ standards do not allow uint64_t (aka unsigned long) to be the parameter
+// of user-defined literal operators.
+unsigned long long operator"" _MiB(unsigned long long x) {  // NOLINT
+  return x << 20;
+}
+unsigned long long operator"" _GiB(unsigned long long x) {  // NOLINT
+  return x << 30;
+}
+
+template <typename U, typename V>
+std::ostream& operator<<(std::ostream& os, const std::map<U, V>& param) {
+  os << "{";
+  bool first = true;
+  for (const auto& pair : param) {
+    if (!first)
+      os << ", ";
+    os << pair.first << ":" << pair.second;
+    first = false;
+  }
+  return os << "}";
+}
+
+inline std::string GetDevice(const std::string& name) {
+  return kFakeDevicePath + name;
+}
+inline std::string GetSuperDevice() {
+  return GetDevice(fs_mgr_get_super_partition_name());
+}
+
+struct TestParam {
+  uint32_t source;
+  uint32_t target;
+};
+std::ostream& operator<<(std::ostream& os, const TestParam& param) {
+  return os << "{source: " << param.source << ", target:" << param.target
+            << "}";
+}
+
+std::unique_ptr<MetadataBuilder> NewFakeMetadata(const PartitionSizes& sizes) {
+  auto builder = MetadataBuilder::New(10_GiB, kFakeMetadataSize, kMaxNumSlots);
+  EXPECT_NE(nullptr, builder);
+  if (builder == nullptr)
+    return nullptr;
+  for (const auto& pair : sizes) {
+    auto p = builder->AddPartition(pair.first, 0 /* attr */);
+    EXPECT_TRUE(p && builder->ResizePartition(p, pair.second));
+  }
+  return builder;
+}
+
+class MetadataMatcher : public MatcherInterface<MetadataBuilder*> {
+ public:
+  explicit MetadataMatcher(const PartitionSizes& partition_sizes)
+      : partition_sizes_(partition_sizes) {}
+  bool MatchAndExplain(MetadataBuilder* metadata,
+                       MatchResultListener* listener) const override {
+    bool success = true;
+    for (const auto& pair : partition_sizes_) {
+      auto p = metadata->FindPartition(pair.first);
+      if (p == nullptr) {
+        if (success)
+          *listener << "; ";
+        *listener << "No partition " << pair.first;
+        success = false;
+        continue;
+      }
+      if (p->size() != pair.second) {
+        if (success)
+          *listener << "; ";
+        *listener << "Partition " << pair.first << " has size " << p->size()
+                  << ", expected " << pair.second;
+        success = false;
+      }
+    }
+    return success;
+  }
+
+  void DescribeTo(std::ostream* os) const override {
+    *os << "expect: " << partition_sizes_;
+  }
+
+  void DescribeNegationTo(std::ostream* os) const override {
+    *os << "expect not: " << partition_sizes_;
+  }
+
+ private:
+  PartitionSizes partition_sizes_;
+};
+
+inline Matcher<MetadataBuilder*> MetadataMatches(
+    const PartitionSizes& partition_sizes) {
+  return MakeMatcher(new MetadataMatcher(partition_sizes));
+}
+
+class BootControlAndroidTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    // Fake init bootctl_
+    bootctl_.module_ = new NiceMock<MockBootControlHal>();
+    bootctl_.dynamic_control_ =
+        std::make_unique<NiceMock<MockDynamicPartitionControl>>();
+
+    ON_CALL(module(), getNumberSlots()).WillByDefault(Invoke([] {
+      return kMaxNumSlots;
+    }));
+    ON_CALL(module(), getSuffix(_, _))
+        .WillByDefault(Invoke([](auto slot, auto cb) {
+          EXPECT_LE(slot, kMaxNumSlots);
+          cb(slot < kMaxNumSlots ? kSlotSuffixes[slot] : "");
+          return Void();
+        }));
+
+    ON_CALL(dynamicControl(), IsDynamicPartitionsEnabled())
+        .WillByDefault(Return(true));
+    ON_CALL(dynamicControl(), GetDeviceDir(_))
+        .WillByDefault(Invoke([](auto path) {
+          *path = kFakeDevicePath;
+          return true;
+        }));
+  }
+
+  // Return the mocked HAL module.
+  NiceMock<MockBootControlHal>& module() {
+    return static_cast<NiceMock<MockBootControlHal>&>(*bootctl_.module_);
+  }
+
+  // Return the mocked DynamicPartitionControlInterface.
+  NiceMock<MockDynamicPartitionControl>& dynamicControl() {
+    return static_cast<NiceMock<MockDynamicPartitionControl>&>(
+        *bootctl_.dynamic_control_);
+  }
+
+  // Set the fake metadata to return when LoadMetadataBuilder is called on
+  // |slot|.
+  void SetMetadata(uint32_t slot, const PartitionSizes& sizes) {
+    EXPECT_CALL(dynamicControl(), LoadMetadataBuilder(GetSuperDevice(), slot))
+        .WillOnce(
+            Invoke([sizes](auto, auto) { return NewFakeMetadata(sizes); }));
+  }
+
+  // Expect that MapPartitionOnDeviceMapper is called on target() metadata slot
+  // with each partition in |partitions|.
+  void ExpectMap(const std::set<std::string>& partitions) {
+    // Error when MapPartitionOnDeviceMapper is called on unknown arguments.
+    ON_CALL(dynamicControl(), MapPartitionOnDeviceMapper(_, _, _, _))
+        .WillByDefault(Return(false));
+
+    for (const auto& partition : partitions) {
+      EXPECT_CALL(
+          dynamicControl(),
+          MapPartitionOnDeviceMapper(GetSuperDevice(), partition, target(), _))
+          .WillOnce(Invoke([this](auto, auto partition, auto, auto path) {
+            auto it = mapped_devices_.find(partition);
+            if (it != mapped_devices_.end()) {
+              *path = it->second;
+              return true;
+            }
+            mapped_devices_[partition] = *path = kFakeMappedPath + partition;
+            return true;
+          }));
+    }
+  }
+
+  // Expect that UnmapPartitionOnDeviceMapper is called on target() metadata
+  // slot with each partition in |partitions|.
+  void ExpectUnmap(const std::set<std::string>& partitions) {
+    // Error when UnmapPartitionOnDeviceMapper is called on unknown arguments.
+    ON_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(_, _))
+        .WillByDefault(Return(false));
+
+    for (const auto& partition : partitions) {
+      EXPECT_CALL(dynamicControl(), UnmapPartitionOnDeviceMapper(partition, _))
+          .WillOnce(Invoke([this](auto partition, auto) {
+            mapped_devices_.erase(partition);
+            return true;
+          }));
+    }
+  }
+
+  void ExpectRemap(const std::set<std::string>& partitions) {
+    ExpectUnmap(partitions);
+    ExpectMap(partitions);
+  }
+
+  void ExpectDevicesAreMapped(const std::set<std::string>& partitions) {
+    ASSERT_EQ(partitions.size(), mapped_devices_.size());
+    for (const auto& partition : partitions) {
+      EXPECT_THAT(mapped_devices_, Contains(Key(Eq(partition))))
+          << "Expect that " << partition << " is mapped, but it is not.";
+    }
+  }
+
+  uint32_t source() { return slots_.source; }
+
+  uint32_t target() { return slots_.target; }
+
+  // Return partition names with suffix of source().
+  std::string S(const std::string& name) {
+    return name + std::string(kSlotSuffixes[source()]);
+  }
+
+  // Return partition names with suffix of target().
+  std::string T(const std::string& name) {
+    return name + std::string(kSlotSuffixes[target()]);
+  }
+
+  // Set source and target slots to use before testing.
+  void SetSlots(const TestParam& slots) {
+    slots_ = slots;
+
+    ON_CALL(module(), getCurrentSlot()).WillByDefault(Invoke([this] {
+      return source();
+    }));
+    // Should not store metadata to source slot.
+    EXPECT_CALL(dynamicControl(), StoreMetadata(GetSuperDevice(), _, source()))
+        .Times(0);
+  }
+
+  BootControlAndroid bootctl_;  // BootControlAndroid under test.
+  TestParam slots_;
+  // mapped devices through MapPartitionOnDeviceMapper.
+  std::map<std::string, std::string> mapped_devices_;
+};
+
+class BootControlAndroidTestP
+    : public BootControlAndroidTest,
+      public ::testing::WithParamInterface<TestParam> {
+ public:
+  void SetUp() override {
+    BootControlAndroidTest::SetUp();
+    SetSlots(GetParam());
+  }
+};
+
+// Test no resize if no dynamic partitions at all.
+TEST_P(BootControlAndroidTestP, NoResizeIfNoDynamicPartitions) {
+  SetMetadata(source(), {});
+  SetMetadata(target(), {});
+  // Should not need to resize and store metadata
+  EXPECT_CALL(dynamicControl(), StoreMetadata(GetSuperDevice(), _, target()))
+      .Times(0);
+  EXPECT_CALL(dynamicControl(), DeviceExists(Eq(GetDevice("static_a"))))
+      .Times(AnyNumber())
+      .WillRepeatedly(Return(true));
+  EXPECT_CALL(dynamicControl(), DeviceExists(Eq(GetDevice("static_b"))))
+      .Times(AnyNumber())
+      .WillRepeatedly(Return(true));
+
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(target(), {{"static", 1_GiB}}));
+  ExpectDevicesAreMapped({});
+}
+
+// Test no resize if update manifest does not contain any dynamic partitions
+TEST_P(BootControlAndroidTestP, NoResizeIfEmptyMetadata) {
+  SetMetadata(source(),
+              {{S("system"), 4_GiB},
+               {S("vendor"), 100_MiB},
+               {T("system"), 3_GiB},
+               {T("vendor"), 150_MiB}});
+  SetMetadata(target(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 3_GiB},
+               {T("vendor"), 150_MiB}});
+  // Should not need to resize and store metadata
+  EXPECT_CALL(dynamicControl(), StoreMetadata(GetSuperDevice(), _, target()))
+      .Times(0);
+  EXPECT_CALL(dynamicControl(), DeviceExists(Eq(GetDevice("static_a"))))
+      .Times(AnyNumber())
+      .WillRepeatedly(Return(true));
+  EXPECT_CALL(dynamicControl(), DeviceExists(Eq(GetDevice("static_b"))))
+      .Times(AnyNumber())
+      .WillRepeatedly(Return(true));
+
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(target(), {{"static", 1_GiB}}));
+  ExpectDevicesAreMapped({});
+}
+
+// Do not resize if manifest size matches size in target metadata. When resuming
+// from an update, do not redo the resize if not needed.
+TEST_P(BootControlAndroidTestP, NoResizeIfSizeMatchWhenResizing) {
+  SetMetadata(source(), {{S("system"), 2_GiB}, {S("vendor"), 1_GiB}});
+  SetMetadata(target(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 3_GiB},
+               {T("vendor"), 1_GiB}});
+  // Should not need to resize and store metadata
+  EXPECT_CALL(dynamicControl(), StoreMetadata(GetSuperDevice(), _, target()))
+      .Times(0);
+  ExpectRemap({T("system"), T("vendor")});
+
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+      target(), {{"system", 3_GiB}, {"vendor", 1_GiB}}));
+  ExpectDevicesAreMapped({T("system"), T("vendor")});
+}
+
+// Do not resize if manifest size matches size in target metadata. When resuming
+// from an update, do not redo the resize if not needed.
+TEST_P(BootControlAndroidTestP, NoResizeIfSizeMatchWhenAdding) {
+  SetMetadata(source(), {{S("system"), 2_GiB}, {T("system"), 2_GiB}});
+  SetMetadata(
+      target(),
+      {{S("system"), 2_GiB}, {T("system"), 2_GiB}, {T("vendor"), 1_GiB}});
+  // Should not need to resize and store metadata
+  EXPECT_CALL(dynamicControl(), StoreMetadata(GetSuperDevice(), _, target()))
+      .Times(0);
+  ExpectRemap({T("system"), T("vendor")});
+
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+      target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}));
+  ExpectDevicesAreMapped({T("system"), T("vendor")});
+}
+
+// Do not resize if manifest size matches size in target metadata. When resuming
+// from an update, do not redo the resize if not needed.
+TEST_P(BootControlAndroidTestP, NoResizeIfSizeMatchWhenDeleting) {
+  SetMetadata(source(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 2_GiB},
+               {T("vendor"), 1_GiB}});
+  SetMetadata(target(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 2_GiB},
+               {T("vendor"), 0}});
+  // Should not need to resize and store metadata
+  EXPECT_CALL(dynamicControl(), StoreMetadata(GetSuperDevice(), _, target()))
+      .Times(0);
+  ExpectUnmap({T("system"), T("vendor")});
+  ExpectMap({T("system")});
+
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+      target(), {{"system", 2_GiB}, {"vendor", 0}}));
+  ExpectDevicesAreMapped({T("system")});
+}
+
+// Test resize case. Grow if target metadata contains a partition with a size
+// less than expected.
+TEST_P(BootControlAndroidTestP, NeedGrowIfSizeNotMatchWhenResizing) {
+  PartitionSizes initial{{S("system"), 2_GiB},
+                         {S("vendor"), 1_GiB},
+                         {T("system"), 2_GiB},
+                         {T("vendor"), 1_GiB}};
+  SetMetadata(source(), initial);
+  SetMetadata(target(), initial);
+  EXPECT_CALL(dynamicControl(),
+              StoreMetadata(GetSuperDevice(),
+                            MetadataMatches({{S("system"), 2_GiB},
+                                             {S("vendor"), 1_GiB},
+                                             {T("system"), 3_GiB},
+                                             {T("vendor"), 1_GiB}}),
+                            target()))
+      .WillOnce(Return(true));
+  ExpectRemap({T("system"), T("vendor")});
+
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+      target(), {{"system", 3_GiB}, {"vendor", 1_GiB}}));
+  ExpectDevicesAreMapped({T("system"), T("vendor")});
+}
+
+// Test resize case. Shrink if target metadata contains a partition with a size
+// greater than expected.
+TEST_P(BootControlAndroidTestP, NeedShrinkIfSizeNotMatchWhenResizing) {
+  PartitionSizes initial{{S("system"), 2_GiB},
+                         {S("vendor"), 1_GiB},
+                         {T("system"), 2_GiB},
+                         {T("vendor"), 1_GiB}};
+  SetMetadata(source(), initial);
+  SetMetadata(target(), initial);
+  EXPECT_CALL(dynamicControl(),
+              StoreMetadata(GetSuperDevice(),
+                            MetadataMatches({{S("system"), 2_GiB},
+                                             {S("vendor"), 1_GiB},
+                                             {T("system"), 2_GiB},
+                                             {T("vendor"), 150_MiB}}),
+                            target()))
+      .WillOnce(Return(true));
+  ExpectRemap({T("system"), T("vendor")});
+
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+      target(), {{"system", 2_GiB}, {"vendor", 150_MiB}}));
+  ExpectDevicesAreMapped({T("system"), T("vendor")});
+}
+
+// Test adding partitions on the first run.
+TEST_P(BootControlAndroidTestP, AddPartitionToEmptyMetadata) {
+  SetMetadata(source(), {});
+  SetMetadata(target(), {});
+  EXPECT_CALL(dynamicControl(),
+              StoreMetadata(
+                  GetSuperDevice(),
+                  MetadataMatches({{T("system"), 2_GiB}, {T("vendor"), 1_GiB}}),
+                  target()))
+      .WillOnce(Return(true));
+  ExpectRemap({T("system"), T("vendor")});
+
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+      target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}));
+  ExpectDevicesAreMapped({T("system"), T("vendor")});
+}
+
+// Test subsequent add case.
+TEST_P(BootControlAndroidTestP, AddAdditionalPartition) {
+  SetMetadata(source(), {{S("system"), 2_GiB}, {T("system"), 2_GiB}});
+  SetMetadata(target(), {{S("system"), 2_GiB}, {T("system"), 2_GiB}});
+  EXPECT_CALL(dynamicControl(),
+              StoreMetadata(GetSuperDevice(),
+                            MetadataMatches({{S("system"), 2_GiB},
+                                             {T("system"), 2_GiB},
+                                             {T("vendor"), 1_GiB}}),
+                            target()))
+      .WillOnce(Return(true));
+  ExpectRemap({T("system"), T("vendor")});
+
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+      target(), {{"system", 2_GiB}, {"vendor", 1_GiB}}));
+  ExpectDevicesAreMapped({T("system"), T("vendor")});
+}
+
+// Test delete one partition.
+TEST_P(BootControlAndroidTestP, DeletePartition) {
+  PartitionSizes initial{{S("system"), 2_GiB},
+                         {S("vendor"), 1_GiB},
+                         {T("system"), 2_GiB},
+                         {T("vendor"), 1_GiB}};
+  SetMetadata(source(), initial);
+  SetMetadata(target(), initial);
+  EXPECT_CALL(dynamicControl(),
+              StoreMetadata(GetSuperDevice(),
+                            MetadataMatches({{S("system"), 2_GiB},
+                                             {S("vendor"), 1_GiB},
+                                             {T("system"), 2_GiB},
+                                             {T("vendor"), 0}}),
+                            target()))
+      .WillOnce(Return(true));
+  ExpectUnmap({T("system"), T("vendor")});
+  ExpectMap({T("system")});
+
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+      target(), {{"system", 2_GiB}, {"vendor", 0}}));
+  ExpectDevicesAreMapped({T("system")});
+}
+
+// Test delete all partitions.
+TEST_P(BootControlAndroidTestP, DeleteAll) {
+  PartitionSizes initial{{S("system"), 2_GiB},
+                         {S("vendor"), 1_GiB},
+                         {T("system"), 2_GiB},
+                         {T("vendor"), 1_GiB}};
+  SetMetadata(source(), initial);
+  SetMetadata(target(), initial);
+  EXPECT_CALL(dynamicControl(),
+              StoreMetadata(GetSuperDevice(),
+                            MetadataMatches({{S("system"), 2_GiB},
+                                             {S("vendor"), 1_GiB},
+                                             {T("system"), 0},
+                                             {T("vendor"), 0}}),
+                            target()))
+      .WillOnce(Return(true));
+  ExpectUnmap({T("system"), T("vendor")});
+  ExpectMap({});
+
+  EXPECT_TRUE(
+      bootctl_.InitPartitionMetadata(target(), {{"system", 0}, {"vendor", 0}}));
+  ExpectDevicesAreMapped({});
+}
+
+// Test corrupt source metadata case. This shouldn't happen in practice,
+// because the device is already booted normally.
+TEST_P(BootControlAndroidTestP, CorruptedSourceMetadata) {
+  EXPECT_CALL(dynamicControl(), LoadMetadataBuilder(GetSuperDevice(), source()))
+      .WillOnce(Invoke([](auto, auto) { return nullptr; }));
+  EXPECT_FALSE(bootctl_.InitPartitionMetadata(target(), {}))
+      << "Should not be able to continue with corrupt source metadata";
+}
+
+// Test corrupt target metadata case. This may happen in practice.
+// BootControlAndroid should copy from source metadata and make necessary
+// modifications on it.
+TEST_P(BootControlAndroidTestP, CorruptedTargetMetadata) {
+  SetMetadata(source(),
+              {{S("system"), 2_GiB},
+               {S("vendor"), 1_GiB},
+               {T("system"), 0},
+               {T("vendor"), 0}});
+  EXPECT_CALL(dynamicControl(), LoadMetadataBuilder(GetSuperDevice(), target()))
+      .WillOnce(Invoke([](auto, auto) { return nullptr; }));
+  EXPECT_CALL(dynamicControl(),
+              StoreMetadata(GetSuperDevice(),
+                            MetadataMatches({{S("system"), 2_GiB},
+                                             {S("vendor"), 1_GiB},
+                                             {T("system"), 3_GiB},
+                                             {T("vendor"), 150_MiB}}),
+                            target()))
+      .WillOnce(Return(true));
+  ExpectRemap({T("system"), T("vendor")});
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(
+      target(), {{"system", 3_GiB}, {"vendor", 150_MiB}}));
+  ExpectDevicesAreMapped({T("system"), T("vendor")});
+}
+
+// Test that InitPartitionMetadata fail if there is not enough space on the
+// device.
+TEST_P(BootControlAndroidTestP, NotEnoughSpace) {
+  PartitionSizes initial{{S("system"), 3_GiB},
+                         {S("vendor"), 2_GiB},
+                         {T("system"), 0},
+                         {T("vendor"), 0}};
+  SetMetadata(source(), initial);
+  SetMetadata(target(), initial);
+  EXPECT_FALSE(bootctl_.InitPartitionMetadata(
+      target(), {{"system", 3_GiB}, {"vendor", 3_GiB}}))
+      << "Should not be able to fit 11GiB data into 10GiB space";
+}
+
+INSTANTIATE_TEST_CASE_P(ParamTest,
+                        BootControlAndroidTestP,
+                        testing::Values(TestParam{0, 1}, TestParam{1, 0}));
+
+const PartitionSizes update_sizes_0() {
+  return {{"grown_a", 2_GiB},
+          {"shrunk_a", 1_GiB},
+          {"same_a", 100_MiB},
+          {"deleted_a", 150_MiB},
+          {"grown_b", 200_MiB},
+          {"shrunk_b", 0},
+          {"same_b", 0}};
+}
+
+const PartitionSizes update_sizes_1() {
+  return {
+      {"grown_a", 2_GiB},
+      {"shrunk_a", 1_GiB},
+      {"same_a", 100_MiB},
+      {"deleted_a", 150_MiB},
+      {"grown_b", 3_GiB},
+      {"shrunk_b", 150_MiB},
+      {"same_b", 100_MiB},
+      {"added_b", 150_MiB},
+      {"deleted_b", 0},
+  };
+}
+
+const PartitionSizes update_sizes_2() {
+  return {{"grown_a", 4_GiB},
+          {"shrunk_a", 100_MiB},
+          {"same_a", 100_MiB},
+          {"added_a", 0_MiB},
+          {"deleted_a", 64_MiB},
+          {"grown_b", 3_GiB},
+          {"shrunk_b", 150_MiB},
+          {"same_b", 100_MiB},
+          {"added_b", 150_MiB},
+          {"deleted_b", 0}};
+}
+
+// Test case for first update after the device is manufactured, in which
+// case the "other" slot is likely of size "0" (except system, which is
+// non-zero because of system_other partition)
+TEST_F(BootControlAndroidTest, SimulatedFirstUpdate) {
+  SetSlots({0, 1});
+
+  SetMetadata(source(), update_sizes_0());
+  SetMetadata(target(), update_sizes_0());
+  EXPECT_CALL(
+      dynamicControl(),
+      StoreMetadata(
+          GetSuperDevice(), MetadataMatches(update_sizes_1()), target()))
+      .WillOnce(Return(true));
+  ExpectUnmap({"grown_b", "shrunk_b", "same_b", "added_b", "deleted_b"});
+  ExpectMap({"grown_b", "shrunk_b", "same_b", "added_b"});
+
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(target(),
+                                             {{"grown", 3_GiB},
+                                              {"shrunk", 150_MiB},
+                                              {"same", 100_MiB},
+                                              {"added", 150_MiB},
+                                              {"deleted", 0_MiB}}));
+  ExpectDevicesAreMapped({"grown_b", "shrunk_b", "same_b", "added_b"});
+}
+
+// After first update, test for the second update. In the second update, the
+// "added" partition is deleted and "deleted" partition is re-added.
+TEST_F(BootControlAndroidTest, SimulatedSecondUpdate) {
+  SetSlots({1, 0});
+
+  SetMetadata(source(), update_sizes_1());
+  SetMetadata(target(), update_sizes_0());
+
+  EXPECT_CALL(
+      dynamicControl(),
+      StoreMetadata(
+          GetSuperDevice(), MetadataMatches(update_sizes_2()), target()))
+      .WillOnce(Return(true));
+  ExpectUnmap({"grown_a", "shrunk_a", "same_a", "added_a", "deleted_a"});
+  ExpectMap({"grown_a", "shrunk_a", "same_a", "deleted_a"});
+
+  EXPECT_TRUE(bootctl_.InitPartitionMetadata(target(),
+                                             {{"grown", 4_GiB},
+                                              {"shrunk", 100_MiB},
+                                              {"same", 100_MiB},
+                                              {"added", 0_MiB},
+                                              {"deleted", 64_MiB}}));
+  ExpectDevicesAreMapped({"grown_a", "shrunk_a", "same_a", "deleted_a"});
+}
+
+TEST_F(BootControlAndroidTest, ApplyingToCurrentSlot) {
+  SetSlots({1, 1});
+  EXPECT_FALSE(bootctl_.InitPartitionMetadata(target(), {}))
+      << "Should not be able to apply to current slot.";
+}
+
+}  // namespace chromeos_update_engine
diff --git a/boot_control_chromeos.cc b/boot_control_chromeos.cc
index aa94d3c..68ac5bd 100644
--- a/boot_control_chromeos.cc
+++ b/boot_control_chromeos.cc
@@ -16,7 +16,9 @@
 
 #include "update_engine/boot_control_chromeos.h"
 
+#include <memory>
 #include <string>
+#include <utility>
 
 #include <base/bind.h>
 #include <base/files/file_path.h>
@@ -300,4 +302,11 @@
   return -1;
 }
 
+bool BootControlChromeOS::InitPartitionMetadata(
+    Slot slot, const PartitionSizes& partition_sizes) {
+  return true;
+}
+
+void BootControlChromeOS::Cleanup() {}
+
 }  // namespace chromeos_update_engine
diff --git a/boot_control_chromeos.h b/boot_control_chromeos.h
index a1d57fe..d7bab05 100644
--- a/boot_control_chromeos.h
+++ b/boot_control_chromeos.h
@@ -50,6 +50,9 @@
   bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
   bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
   bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
+  bool InitPartitionMetadata(Slot slot,
+                             const PartitionSizes& partition_sizes) override;
+  void Cleanup() override;
 
  private:
   friend class BootControlChromeOSTest;
diff --git a/boot_control_recovery.cc b/boot_control_recovery.cc
deleted file mode 100644
index b74f4aa..0000000
--- a/boot_control_recovery.cc
+++ /dev/null
@@ -1,181 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/boot_control_recovery.h"
-
-#include <base/bind.h>
-#include <base/files/file_util.h>
-#include <base/logging.h>
-#include <base/strings/string_util.h>
-#include <brillo/message_loops/message_loop.h>
-
-#include "update_engine/common/utils.h"
-#include "update_engine/utils_android.h"
-
-using std::string;
-
-#ifndef _UE_SIDELOAD
-#error "BootControlRecovery should only be used for update_engine_sideload."
-#endif
-
-// When called from update_engine_sideload, we don't attempt to dynamically load
-// the right boot_control HAL, instead we use the only HAL statically linked in
-// via the PRODUCT_STATIC_BOOT_CONTROL_HAL make variable and access the module
-// struct directly.
-extern const hw_module_t HAL_MODULE_INFO_SYM;
-
-namespace chromeos_update_engine {
-
-namespace boot_control {
-
-// Factory defined in boot_control.h.
-std::unique_ptr<BootControlInterface> CreateBootControl() {
-  std::unique_ptr<BootControlRecovery> boot_control(new BootControlRecovery());
-  if (!boot_control->Init()) {
-    return nullptr;
-  }
-  return std::move(boot_control);
-}
-
-}  // namespace boot_control
-
-bool BootControlRecovery::Init() {
-  const hw_module_t* hw_module;
-  int ret;
-
-  // For update_engine_sideload, we simulate the hw_get_module() by accessing it
-  // from the current process directly.
-  hw_module = &HAL_MODULE_INFO_SYM;
-  ret = 0;
-  if (!hw_module ||
-      strcmp(BOOT_CONTROL_HARDWARE_MODULE_ID, hw_module->id) != 0) {
-    ret = -EINVAL;
-  }
-  if (ret != 0) {
-    LOG(ERROR) << "Error loading boot_control HAL implementation.";
-    return false;
-  }
-
-  module_ = reinterpret_cast<boot_control_module_t*>(
-      const_cast<hw_module_t*>(hw_module));
-  module_->init(module_);
-
-  LOG(INFO) << "Loaded boot_control HAL "
-            << "'" << hw_module->name << "' "
-            << "version " << (hw_module->module_api_version >> 8) << "."
-            << (hw_module->module_api_version & 0xff) << " "
-            << "authored by '" << hw_module->author << "'.";
-  return true;
-}
-
-unsigned int BootControlRecovery::GetNumSlots() const {
-  return module_->getNumberSlots(module_);
-}
-
-BootControlInterface::Slot BootControlRecovery::GetCurrentSlot() const {
-  return module_->getCurrentSlot(module_);
-}
-
-bool BootControlRecovery::GetPartitionDevice(const string& partition_name,
-                                             Slot slot,
-                                             string* device) const {
-  // We can't use fs_mgr to look up |partition_name| because fstab
-  // doesn't list every slot partition (it uses the slotselect option
-  // to mask the suffix).
-  //
-  // We can however assume that there's an entry for the /misc mount
-  // point and use that to get the device file for the misc
-  // partition. This helps us locate the disk that |partition_name|
-  // resides on. From there we'll assume that a by-name scheme is used
-  // so we can just replace the trailing "misc" by the given
-  // |partition_name| and suffix corresponding to |slot|, e.g.
-  //
-  //   /dev/block/platform/soc.0/7824900.sdhci/by-name/misc ->
-  //   /dev/block/platform/soc.0/7824900.sdhci/by-name/boot_a
-  //
-  // If needed, it's possible to relax the by-name assumption in the
-  // future by trawling /sys/block looking for the appropriate sibling
-  // of misc and then finding an entry in /dev matching the sysfs
-  // entry.
-
-  base::FilePath misc_device;
-  if (!utils::DeviceForMountPoint("/misc", &misc_device))
-    return false;
-
-  if (!utils::IsSymlink(misc_device.value().c_str())) {
-    LOG(ERROR) << "Device file " << misc_device.value() << " for /misc "
-               << "is not a symlink.";
-    return false;
-  }
-
-  const char* suffix = module_->getSuffix(module_, slot);
-  if (suffix == nullptr) {
-    LOG(ERROR) << "boot_control impl returned no suffix for slot "
-               << SlotName(slot);
-    return false;
-  }
-
-  base::FilePath path = misc_device.DirName().Append(partition_name + suffix);
-  if (!base::PathExists(path)) {
-    LOG(ERROR) << "Device file " << path.value() << " does not exist.";
-    return false;
-  }
-
-  *device = path.value();
-  return true;
-}
-
-bool BootControlRecovery::IsSlotBootable(Slot slot) const {
-  int ret = module_->isSlotBootable(module_, slot);
-  if (ret < 0) {
-    LOG(ERROR) << "Unable to determine if slot " << SlotName(slot)
-               << " is bootable: " << strerror(-ret);
-    return false;
-  }
-  return ret == 1;
-}
-
-bool BootControlRecovery::MarkSlotUnbootable(Slot slot) {
-  int ret = module_->setSlotAsUnbootable(module_, slot);
-  if (ret < 0) {
-    LOG(ERROR) << "Unable to mark slot " << SlotName(slot)
-               << " as bootable: " << strerror(-ret);
-    return false;
-  }
-  return ret == 0;
-}
-
-bool BootControlRecovery::SetActiveBootSlot(Slot slot) {
-  int ret = module_->setActiveBootSlot(module_, slot);
-  if (ret < 0) {
-    LOG(ERROR) << "Unable to set the active slot to slot " << SlotName(slot)
-               << ": " << strerror(-ret);
-  }
-  return ret == 0;
-}
-
-bool BootControlRecovery::MarkBootSuccessfulAsync(
-    base::Callback<void(bool)> callback) {
-  int ret = module_->markBootSuccessful(module_);
-  if (ret < 0) {
-    LOG(ERROR) << "Unable to mark boot successful: " << strerror(-ret);
-  }
-  return brillo::MessageLoop::current()->PostTask(
-             FROM_HERE, base::Bind(callback, ret == 0)) !=
-         brillo::MessageLoop::kTaskIdNull;
-}
-
-}  // namespace chromeos_update_engine
diff --git a/boot_control_recovery.h b/boot_control_recovery.h
deleted file mode 100644
index 3a83caa..0000000
--- a/boot_control_recovery.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_BOOT_CONTROL_RECOVERY_H_
-#define UPDATE_ENGINE_BOOT_CONTROL_RECOVERY_H_
-
-#include <string>
-
-#include <hardware/boot_control.h>
-#include <hardware/hardware.h>
-
-#include "update_engine/common/boot_control.h"
-
-namespace chromeos_update_engine {
-
-// The Android recovery implementation of the BootControlInterface. This
-// implementation uses the legacy libhardware's boot_control HAL to access the
-// bootloader by linking against it statically. This should only be used in
-// recovery.
-class BootControlRecovery : public BootControlInterface {
- public:
-  BootControlRecovery() = default;
-  ~BootControlRecovery() = default;
-
-  // Load boot_control HAL implementation using libhardware and
-  // initializes it. Returns false if an error occurred.
-  bool Init();
-
-  // BootControlInterface overrides.
-  unsigned int GetNumSlots() const override;
-  BootControlInterface::Slot GetCurrentSlot() const override;
-  bool GetPartitionDevice(const std::string& partition_name,
-                          BootControlInterface::Slot slot,
-                          std::string* device) const override;
-  bool IsSlotBootable(BootControlInterface::Slot slot) const override;
-  bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
-  bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
-  bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
-
- private:
-  // NOTE: There is no way to release/unload HAL implementations so
-  // this is essentially leaked on object destruction.
-  boot_control_module_t* module_;
-
-  DISALLOW_COPY_AND_ASSIGN(BootControlRecovery);
-};
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_BOOT_CONTROL_RECOVERY_H_
diff --git a/chrome_browser_proxy_resolver.h b/chrome_browser_proxy_resolver.h
index fcf85b6..10a55fb 100644
--- a/chrome_browser_proxy_resolver.h
+++ b/chrome_browser_proxy_resolver.h
@@ -24,7 +24,7 @@
 
 #include <base/memory/weak_ptr.h>
 
-#include "update_engine/proxy_resolver.h"
+#include "update_engine/common/proxy_resolver.h"
 
 namespace chromeos_update_engine {
 
diff --git a/client_library/include/update_engine/status_update_handler.h b/client_library/include/update_engine/status_update_handler.h
index d5b8cdb..d2fad34 100644
--- a/client_library/include/update_engine/status_update_handler.h
+++ b/client_library/include/update_engine/status_update_handler.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_STATUS_UPDATE_HANDLER_H_
-#define UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_STATUS_UPDATE_HANDLER_H_
+#ifndef UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_
+#define UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_
 
 #include <string>
 
@@ -44,4 +44,4 @@
 
 }  // namespace update_engine
 
-#endif  // UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_STATUS_UPDATE_HANDLER_H_
+#endif  // UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_
diff --git a/client_library/include/update_engine/update_status.h b/client_library/include/update_engine/update_status.h
index 41fab48..5a3dccf 100644
--- a/client_library/include/update_engine/update_status.h
+++ b/client_library/include/update_engine/update_status.h
@@ -23,17 +23,24 @@
 
 namespace update_engine {
 
+// ATTENTION: When adding a new enum value here, always append at the end and
+// make sure to make proper adjustments in UpdateAttempter:ActionCompleted(). If
+// any enum memeber is deprecated, the assigned value of other members should
+// not change. See b/62842358.
 enum class UpdateStatus {
   IDLE = 0,
-  CHECKING_FOR_UPDATE,
-  UPDATE_AVAILABLE,
-  DOWNLOADING,
-  VERIFYING,
-  FINALIZING,
-  UPDATED_NEED_REBOOT,
-  REPORTING_ERROR_EVENT,
-  ATTEMPTING_ROLLBACK,
-  DISABLED,
+  CHECKING_FOR_UPDATE = 1,
+  UPDATE_AVAILABLE = 2,
+  DOWNLOADING = 3,
+  VERIFYING = 4,
+  FINALIZING = 5,
+  UPDATED_NEED_REBOOT = 6,
+  REPORTING_ERROR_EVENT = 7,
+  ATTEMPTING_ROLLBACK = 8,
+  DISABLED = 9,
+  // Broadcast this state when an update aborts because user preferences do not
+  // allow updates, e.g. over cellular network.
+  NEED_PERMISSION_TO_UPDATE = 10,
 };
 
 // Enum of bit-wise flags for controlling how updates are attempted.
diff --git a/common/action_processor.cc b/common/action_processor.cc
index 3549e08..ead99c4 100644
--- a/common/action_processor.cc
+++ b/common/action_processor.cc
@@ -17,6 +17,7 @@
 #include "update_engine/common/action_processor.h"
 
 #include <string>
+#include <utility>
 
 #include <base/logging.h>
 
@@ -24,27 +25,30 @@
 #include "update_engine/common/error_code_utils.h"
 
 using std::string;
+using std::unique_ptr;
 
 namespace chromeos_update_engine {
 
 ActionProcessor::~ActionProcessor() {
   if (IsRunning())
     StopProcessing();
-  for (auto action : actions_)
-    action->SetProcessor(nullptr);
 }
 
-void ActionProcessor::EnqueueAction(AbstractAction* action) {
-  actions_.push_back(action);
+void ActionProcessor::EnqueueAction(unique_ptr<AbstractAction> action) {
   action->SetProcessor(this);
+  actions_.push_back(std::move(action));
+}
+
+bool ActionProcessor::IsRunning() const {
+  return current_action_ != nullptr || suspended_;
 }
 
 void ActionProcessor::StartProcessing() {
   CHECK(!IsRunning());
   if (!actions_.empty()) {
-    current_action_ = actions_.front();
-    LOG(INFO) << "ActionProcessor: starting " << current_action_->Type();
+    current_action_ = std::move(actions_.front());
     actions_.pop_front();
+    LOG(INFO) << "ActionProcessor: starting " << current_action_->Type();
     current_action_->PerformAction();
   }
 }
@@ -53,16 +57,13 @@
   CHECK(IsRunning());
   if (current_action_) {
     current_action_->TerminateProcessing();
-    current_action_->SetProcessor(nullptr);
   }
   LOG(INFO) << "ActionProcessor: aborted "
             << (current_action_ ? current_action_->Type() : "")
             << (suspended_ ? " while suspended" : "");
-  current_action_ = nullptr;
+  current_action_.reset();
   suspended_ = false;
   // Delete all the actions before calling the delegate.
-  for (auto action : actions_)
-    action->SetProcessor(nullptr);
   actions_.clear();
   if (delegate_)
     delegate_->ProcessingStopped(this);
@@ -106,13 +107,12 @@
 
 void ActionProcessor::ActionComplete(AbstractAction* actionptr,
                                      ErrorCode code) {
-  CHECK_EQ(actionptr, current_action_);
+  CHECK_EQ(actionptr, current_action_.get());
   if (delegate_)
     delegate_->ActionCompleted(this, actionptr, code);
   string old_type = current_action_->Type();
   current_action_->ActionCompleted(code);
-  current_action_->SetProcessor(nullptr);
-  current_action_ = nullptr;
+  current_action_.reset();
   LOG(INFO) << "ActionProcessor: finished "
             << (actions_.empty() ? "last action " : "") << old_type
             << (suspended_ ? " while suspended" : "")
@@ -138,7 +138,7 @@
     }
     return;
   }
-  current_action_ = actions_.front();
+  current_action_ = std::move(actions_.front());
   actions_.pop_front();
   LOG(INFO) << "ActionProcessor: starting " << current_action_->Type();
   current_action_->PerformAction();
diff --git a/common/action_processor.h b/common/action_processor.h
index c9c179e..1a67c99 100644
--- a/common/action_processor.h
+++ b/common/action_processor.h
@@ -18,12 +18,16 @@
 #define UPDATE_ENGINE_COMMON_ACTION_PROCESSOR_H_
 
 #include <deque>
+#include <memory>
+#include <vector>
 
 #include <base/macros.h>
 #include <brillo/errors/error.h>
 
 #include "update_engine/common/error_code.h"
 
+#include <gtest/gtest_prod.h>
+
 // The structure of these classes (Action, ActionPipe, ActionProcessor, etc.)
 // is based on the KSAction* classes from the Google Update Engine code at
 // http://code.google.com/p/update-engine/ . The author of this file sends
@@ -69,10 +73,10 @@
 
   // Returns true iff the processing was started but not yet completed nor
   // stopped.
-  bool IsRunning() const { return current_action_ != nullptr || suspended_; }
+  bool IsRunning() const;
 
   // Adds another Action to the end of the queue.
-  virtual void EnqueueAction(AbstractAction* action);
+  virtual void EnqueueAction(std::unique_ptr<AbstractAction> action);
 
   // Sets/gets the current delegate. Set to null to remove a delegate.
   ActionProcessorDelegate* delegate() const { return delegate_; }
@@ -81,14 +85,17 @@
   }
 
   // Returns a pointer to the current Action that's processing.
-  AbstractAction* current_action() const {
-    return current_action_;
-  }
+  AbstractAction* current_action() const { return current_action_.get(); }
 
   // Called by an action to notify processor that it's done. Caller passes self.
+  // But this call deletes the action if there no other object has a reference
+  // to it, so in that case, the caller should not try to access any of its
+  // member variables after this call.
   void ActionComplete(AbstractAction* actionptr, ErrorCode code);
 
  private:
+  FRIEND_TEST(ActionProcessorTest, ChainActionsTest);
+
   // Continue processing actions (if any) after the last action terminated with
   // the passed error code. If there are no more actions to process, the
   // processing will terminate.
@@ -96,10 +103,10 @@
 
   // Actions that have not yet begun processing, in the order in which
   // they'll be processed.
-  std::deque<AbstractAction*> actions_;
+  std::deque<std::unique_ptr<AbstractAction>> actions_;
 
   // A pointer to the currently processing Action, if any.
-  AbstractAction* current_action_{nullptr};
+  std::unique_ptr<AbstractAction> current_action_;
 
   // The ErrorCode reported by an action that was suspended but finished while
   // being suspended. This error code is stored here to be reported back to the
diff --git a/common/action_processor_unittest.cc b/common/action_processor_unittest.cc
index 631e42d..eb646ef 100644
--- a/common/action_processor_unittest.cc
+++ b/common/action_processor_unittest.cc
@@ -17,6 +17,7 @@
 #include "update_engine/common/action_processor.h"
 
 #include <string>
+#include <utility>
 
 #include <gtest/gtest.h>
 
@@ -96,7 +97,11 @@
   void SetUp() override {
     action_processor_.set_delegate(&delegate_);
     // Silence Type() calls used for logging.
-    EXPECT_CALL(mock_action_, Type()).Times(testing::AnyNumber());
+    mock_action_.reset(new testing::StrictMock<MockAction>());
+    mock_action_ptr_ = mock_action_.get();
+    action_.reset(new ActionProcessorTestAction());
+    action_ptr_ = action_.get();
+    EXPECT_CALL(*mock_action_, Type()).Times(testing::AnyNumber());
   }
 
   void TearDown() override {
@@ -110,34 +115,35 @@
   MyActionProcessorDelegate delegate_{&action_processor_};
 
   // Common actions used during most tests.
-  testing::StrictMock<MockAction> mock_action_;
-  ActionProcessorTestAction action_;
+  std::unique_ptr<testing::StrictMock<MockAction>> mock_action_;
+  testing::StrictMock<MockAction>* mock_action_ptr_;
+  std::unique_ptr<ActionProcessorTestAction> action_;
+  ActionProcessorTestAction* action_ptr_;
 };
 
 TEST_F(ActionProcessorTest, SimpleTest) {
   EXPECT_FALSE(action_processor_.IsRunning());
-  action_processor_.EnqueueAction(&action_);
+  action_processor_.EnqueueAction(std::move(action_));
   EXPECT_FALSE(action_processor_.IsRunning());
-  EXPECT_FALSE(action_.IsRunning());
+  EXPECT_FALSE(action_ptr_->IsRunning());
   action_processor_.StartProcessing();
   EXPECT_TRUE(action_processor_.IsRunning());
-  EXPECT_TRUE(action_.IsRunning());
-  EXPECT_EQ(action_processor_.current_action(), &action_);
-  action_.CompleteAction();
+  EXPECT_TRUE(action_ptr_->IsRunning());
+  action_ptr_->CompleteAction();
   EXPECT_FALSE(action_processor_.IsRunning());
-  EXPECT_FALSE(action_.IsRunning());
+  EXPECT_EQ(action_processor_.current_action(), nullptr);
 }
 
 TEST_F(ActionProcessorTest, DelegateTest) {
-  action_processor_.EnqueueAction(&action_);
+  action_processor_.EnqueueAction(std::move(action_));
   action_processor_.StartProcessing();
-  action_.CompleteAction();
+  action_ptr_->CompleteAction();
   EXPECT_TRUE(delegate_.processing_done_called_);
   EXPECT_TRUE(delegate_.action_completed_called_);
 }
 
 TEST_F(ActionProcessorTest, StopProcessingTest) {
-  action_processor_.EnqueueAction(&action_);
+  action_processor_.EnqueueAction(std::move(action_));
   action_processor_.StartProcessing();
   action_processor_.StopProcessing();
   EXPECT_TRUE(delegate_.processing_stopped_called_);
@@ -150,54 +156,58 @@
   // This test doesn't use a delegate since it terminates several actions.
   action_processor_.set_delegate(nullptr);
 
-  ActionProcessorTestAction action1, action2;
-  action_processor_.EnqueueAction(&action1);
-  action_processor_.EnqueueAction(&action2);
+  auto action0 = std::make_unique<ActionProcessorTestAction>();
+  auto action1 = std::make_unique<ActionProcessorTestAction>();
+  auto action2 = std::make_unique<ActionProcessorTestAction>();
+  auto action0_ptr = action0.get();
+  auto action1_ptr = action1.get();
+  auto action2_ptr = action2.get();
+  action_processor_.EnqueueAction(std::move(action0));
+  action_processor_.EnqueueAction(std::move(action1));
+  action_processor_.EnqueueAction(std::move(action2));
+
+  EXPECT_EQ(action_processor_.actions_.size(), 3u);
+  EXPECT_EQ(action_processor_.actions_[0].get(), action0_ptr);
+  EXPECT_EQ(action_processor_.actions_[1].get(), action1_ptr);
+  EXPECT_EQ(action_processor_.actions_[2].get(), action2_ptr);
+
   action_processor_.StartProcessing();
-  EXPECT_EQ(&action1, action_processor_.current_action());
+  EXPECT_EQ(action0_ptr, action_processor_.current_action());
   EXPECT_TRUE(action_processor_.IsRunning());
-  action1.CompleteAction();
-  EXPECT_EQ(&action2, action_processor_.current_action());
+  action0_ptr->CompleteAction();
+  EXPECT_EQ(action1_ptr, action_processor_.current_action());
   EXPECT_TRUE(action_processor_.IsRunning());
-  action2.CompleteAction();
+  action1_ptr->CompleteAction();
+  EXPECT_EQ(action2_ptr, action_processor_.current_action());
+  EXPECT_TRUE(action_processor_.actions_.empty());
+  EXPECT_TRUE(action_processor_.IsRunning());
+  action2_ptr->CompleteAction();
   EXPECT_EQ(nullptr, action_processor_.current_action());
+  EXPECT_TRUE(action_processor_.actions_.empty());
   EXPECT_FALSE(action_processor_.IsRunning());
 }
 
-TEST_F(ActionProcessorTest, DtorTest) {
-  ActionProcessorTestAction action1, action2;
-  {
-    ActionProcessor action_processor;
-    action_processor.EnqueueAction(&action1);
-    action_processor.EnqueueAction(&action2);
-    action_processor.StartProcessing();
-  }
-  EXPECT_EQ(nullptr, action1.processor());
-  EXPECT_FALSE(action1.IsRunning());
-  EXPECT_EQ(nullptr, action2.processor());
-  EXPECT_FALSE(action2.IsRunning());
-}
-
 TEST_F(ActionProcessorTest, DefaultDelegateTest) {
-  // Just make sure it doesn't crash
-  action_processor_.EnqueueAction(&action_);
+  // Just make sure it doesn't crash.
+  action_processor_.EnqueueAction(std::move(action_));
   action_processor_.StartProcessing();
-  action_.CompleteAction();
+  action_ptr_->CompleteAction();
 
-  action_processor_.EnqueueAction(&action_);
+  action_.reset(new ActionProcessorTestAction());
+  action_processor_.EnqueueAction(std::move(action_));
   action_processor_.StartProcessing();
   action_processor_.StopProcessing();
 }
 
-// This test suspends and resume the action processor while running one action_.
+// This test suspends and resume the action processor while running one action.
 TEST_F(ActionProcessorTest, SuspendResumeTest) {
-  action_processor_.EnqueueAction(&mock_action_);
+  action_processor_.EnqueueAction(std::move(mock_action_));
 
   testing::InSequence s;
-  EXPECT_CALL(mock_action_, PerformAction());
+  EXPECT_CALL(*mock_action_ptr_, PerformAction());
   action_processor_.StartProcessing();
 
-  EXPECT_CALL(mock_action_, SuspendAction());
+  EXPECT_CALL(*mock_action_ptr_, SuspendAction());
   action_processor_.SuspendProcessing();
   // Suspending the processor twice should not suspend the action twice.
   action_processor_.SuspendProcessing();
@@ -205,32 +215,31 @@
   // IsRunning should return whether there's is an action doing some work, even
   // if it is suspended.
   EXPECT_TRUE(action_processor_.IsRunning());
-  EXPECT_EQ(&mock_action_, action_processor_.current_action());
+  EXPECT_EQ(mock_action_ptr_, action_processor_.current_action());
 
-  EXPECT_CALL(mock_action_, ResumeAction());
+  EXPECT_CALL(*mock_action_ptr_, ResumeAction());
   action_processor_.ResumeProcessing();
 
   // Calling ResumeProcessing twice should not affect the action_.
   action_processor_.ResumeProcessing();
-
-  action_processor_.ActionComplete(&mock_action_, ErrorCode::kSuccess);
+  action_processor_.ActionComplete(mock_action_ptr_, ErrorCode::kSuccess);
 }
 
 // This test suspends an action that presumably doesn't support suspend/resume
 // and it finished before being resumed.
 TEST_F(ActionProcessorTest, ActionCompletedWhileSuspendedTest) {
-  action_processor_.EnqueueAction(&mock_action_);
+  action_processor_.EnqueueAction(std::move(mock_action_));
 
   testing::InSequence s;
-  EXPECT_CALL(mock_action_, PerformAction());
+  EXPECT_CALL(*mock_action_ptr_, PerformAction());
   action_processor_.StartProcessing();
 
-  EXPECT_CALL(mock_action_, SuspendAction());
+  EXPECT_CALL(*mock_action_ptr_, SuspendAction());
   action_processor_.SuspendProcessing();
 
   // Simulate the action completion while suspended. No other call to
   // |mock_action_| is expected at this point.
-  action_processor_.ActionComplete(&mock_action_, ErrorCode::kSuccess);
+  action_processor_.ActionComplete(mock_action_ptr_, ErrorCode::kSuccess);
 
   // The processing should not be done since the ActionProcessor is suspended
   // and the processing is considered to be still running until resumed.
@@ -243,15 +252,15 @@
 }
 
 TEST_F(ActionProcessorTest, StoppedWhileSuspendedTest) {
-  action_processor_.EnqueueAction(&mock_action_);
+  action_processor_.EnqueueAction(std::move(mock_action_));
 
   testing::InSequence s;
-  EXPECT_CALL(mock_action_, PerformAction());
+  EXPECT_CALL(*mock_action_ptr_, PerformAction());
   action_processor_.StartProcessing();
-  EXPECT_CALL(mock_action_, SuspendAction());
+  EXPECT_CALL(*mock_action_ptr_, SuspendAction());
   action_processor_.SuspendProcessing();
 
-  EXPECT_CALL(mock_action_, TerminateProcessing());
+  EXPECT_CALL(*mock_action_ptr_, TerminateProcessing());
   action_processor_.StopProcessing();
   // Stopping the processing should abort the current execution no matter what.
   EXPECT_TRUE(delegate_.processing_stopped_called_);
diff --git a/common/action_unittest.cc b/common/action_unittest.cc
index dcdce17..b2f9ba4 100644
--- a/common/action_unittest.cc
+++ b/common/action_unittest.cc
@@ -16,8 +16,11 @@
 
 #include "update_engine/common/action.h"
 
-#include <gtest/gtest.h>
 #include <string>
+#include <utility>
+
+#include <gtest/gtest.h>
+
 #include "update_engine/common/action_processor.h"
 
 using std::string;
@@ -56,21 +59,19 @@
 // This test creates two simple Actions and sends a message via an ActionPipe
 // from one to the other.
 TEST(ActionTest, SimpleTest) {
-  ActionTestAction action;
-
-  EXPECT_FALSE(action.in_pipe());
-  EXPECT_FALSE(action.out_pipe());
-  EXPECT_FALSE(action.processor());
-  EXPECT_FALSE(action.IsRunning());
+  auto action = std::make_unique<ActionTestAction>();
+  auto action_ptr = action.get();
+  EXPECT_FALSE(action->in_pipe());
+  EXPECT_FALSE(action->out_pipe());
+  EXPECT_FALSE(action->processor());
+  EXPECT_FALSE(action->IsRunning());
 
   ActionProcessor action_processor;
-  action_processor.EnqueueAction(&action);
-  EXPECT_EQ(&action_processor, action.processor());
-
+  action_processor.EnqueueAction(std::move(action));
+  EXPECT_EQ(&action_processor, action_ptr->processor());
   action_processor.StartProcessing();
-  EXPECT_TRUE(action.IsRunning());
-  action.CompleteAction();
-  EXPECT_FALSE(action.IsRunning());
+  EXPECT_TRUE(action_ptr->IsRunning());
+  action_ptr->CompleteAction();
 }
 
 }  // namespace chromeos_update_engine
diff --git a/common/boot_control_interface.h b/common/boot_control_interface.h
index 659b388..776333c 100644
--- a/common/boot_control_interface.h
+++ b/common/boot_control_interface.h
@@ -18,6 +18,7 @@
 #define UPDATE_ENGINE_COMMON_BOOT_CONTROL_INTERFACE_H_
 
 #include <climits>
+#include <map>
 #include <string>
 
 #include <base/callback.h>
@@ -32,6 +33,7 @@
 class BootControlInterface {
  public:
   using Slot = unsigned int;
+  using PartitionSizes = std::map<std::string, uint64_t>;
 
   static const Slot kInvalidSlot = UINT_MAX;
 
@@ -77,6 +79,17 @@
   // of the operation.
   virtual bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) = 0;
 
+  // Initialize metadata of underlying partitions for a given |slot|.
+  // Ensure that partitions at the specified |slot| has a given size, as
+  // specified by |partition_sizes|. |partition_sizes| has the format:
+  // {"vendor": 524288000, "system": 2097152000, ...}; values must be
+  // aligned to the logical block size of the super partition.
+  virtual bool InitPartitionMetadata(Slot slot,
+                                     const PartitionSizes& partition_sizes) = 0;
+
+  // Do necessary clean-up operations after the whole update.
+  virtual void Cleanup() = 0;
+
   // Return a human-readable slot name used for logging.
   static std::string SlotName(Slot slot) {
     if (slot == kInvalidSlot)
diff --git a/common/boot_control_stub.cc b/common/boot_control_stub.cc
index 2de0c82..2e326e5 100644
--- a/common/boot_control_stub.cc
+++ b/common/boot_control_stub.cc
@@ -59,4 +59,14 @@
   return false;
 }
 
+bool BootControlStub::InitPartitionMetadata(
+    Slot slot, const PartitionSizes& partition_sizes) {
+  LOG(ERROR) << __FUNCTION__ << " should never be called.";
+  return false;
+}
+
+void BootControlStub::Cleanup() {
+  LOG(ERROR) << __FUNCTION__ << " should never be called.";
+}
+
 }  // namespace chromeos_update_engine
diff --git a/common/boot_control_stub.h b/common/boot_control_stub.h
index 7832adc..65248af 100644
--- a/common/boot_control_stub.h
+++ b/common/boot_control_stub.h
@@ -27,7 +27,7 @@
 // typically used when e.g. an underlying HAL implementation cannot be
 // loaded or doesn't exist.
 //
-// You are gauranteed that the implementation of GetNumSlots() method
+// You are guaranteed that the implementation of GetNumSlots() method
 // always returns 0. This can be used to identify that this
 // implementation is in use.
 class BootControlStub : public BootControlInterface {
@@ -45,6 +45,9 @@
   bool MarkSlotUnbootable(BootControlInterface::Slot slot) override;
   bool SetActiveBootSlot(BootControlInterface::Slot slot) override;
   bool MarkBootSuccessfulAsync(base::Callback<void(bool)> callback) override;
+  bool InitPartitionMetadata(Slot slot,
+                             const PartitionSizes& partition_sizes) override;
+  void Cleanup() override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(BootControlStub);
diff --git a/common/constants.cc b/common/constants.cc
index 5941c93..4bca105 100644
--- a/common/constants.cc
+++ b/common/constants.cc
@@ -47,6 +47,7 @@
     "metrics-attempt-last-reporting-time";
 const char kPrefsMetricsCheckLastReportingTime[] =
     "metrics-check-last-reporting-time";
+const char kPrefsNoIgnoreBackoff[] = "no-ignore-backoff";
 const char kPrefsNumReboots[] = "num-reboots";
 const char kPrefsNumResponsesSeen[] = "num-responses-seen";
 const char kPrefsOmahaCohort[] = "omaha-cohort";
@@ -60,6 +61,7 @@
 const char kPrefsPostInstallSucceeded[] = "post-install-succeeded";
 const char kPrefsPreviousVersion[] = "previous-version";
 const char kPrefsResumedUpdateFailures[] = "resumed-update-failures";
+const char kPrefsRollbackHappened[] = "rollback-happened";
 const char kPrefsRollbackVersion[] = "rollback-version";
 const char kPrefsChannelOnSlotPrefix[] = "channel-on-slot-";
 const char kPrefsSystemUpdatedMarker[] = "system-updated-marker";
@@ -75,6 +77,10 @@
 const char kPrefsUpdateFirstSeenAt[] = "update-first-seen-at";
 const char kPrefsUpdateOverCellularPermission[] =
     "update-over-cellular-permission";
+const char kPrefsUpdateOverCellularTargetVersion[] =
+    "update-over-cellular-target-version";
+const char kPrefsUpdateOverCellularTargetSize[] =
+    "update-over-cellular-target-size";
 const char kPrefsUpdateServerCertificate[] = "update-server-cert";
 const char kPrefsUpdateStateNextDataLength[] = "update-state-next-data-length";
 const char kPrefsUpdateStateNextDataOffset[] = "update-state-next-data-offset";
@@ -84,9 +90,13 @@
 const char kPrefsUpdateStateSignatureBlob[] = "update-state-signature-blob";
 const char kPrefsUpdateStateSignedSHA256Context[] =
     "update-state-signed-sha-256-context";
+const char kPrefsUpdateBootTimestampStart[] = "update-boot-timestamp-start";
 const char kPrefsUpdateTimestampStart[] = "update-timestamp-start";
 const char kPrefsUrlSwitchCount[] = "url-switch-count";
-const char kPrefsWallClockWaitPeriod[] = "wall-clock-wait-period";
+const char kPrefsVerityWritten[] = "verity-written";
+const char kPrefsWallClockScatteringWaitPeriod[] = "wall-clock-wait-period";
+const char kPrefsWallClockStagingWaitPeriod[] =
+    "wall-clock-staging-wait-period";
 
 // These four fields are generated by scripts/brillo_update_payload.
 const char kPayloadPropertyFileSize[] = "FILE_SIZE";
diff --git a/common/constants.h b/common/constants.h
index 26773cf..1057a65 100644
--- a/common/constants.h
+++ b/common/constants.h
@@ -49,6 +49,7 @@
 extern const char kPrefsManifestSignatureSize[];
 extern const char kPrefsMetricsAttemptLastReportingTime[];
 extern const char kPrefsMetricsCheckLastReportingTime[];
+extern const char kPrefsNoIgnoreBackoff[];
 extern const char kPrefsNumReboots[];
 extern const char kPrefsNumResponsesSeen[];
 extern const char kPrefsOmahaCohort[];
@@ -62,6 +63,7 @@
 extern const char kPrefsPostInstallSucceeded[];
 extern const char kPrefsPreviousVersion[];
 extern const char kPrefsResumedUpdateFailures[];
+extern const char kPrefsRollbackHappened[];
 extern const char kPrefsRollbackVersion[];
 extern const char kPrefsChannelOnSlotPrefix[];
 extern const char kPrefsSystemUpdatedMarker[];
@@ -76,6 +78,8 @@
 extern const char kPrefsUpdateDurationUptime[];
 extern const char kPrefsUpdateFirstSeenAt[];
 extern const char kPrefsUpdateOverCellularPermission[];
+extern const char kPrefsUpdateOverCellularTargetVersion[];
+extern const char kPrefsUpdateOverCellularTargetSize[];
 extern const char kPrefsUpdateServerCertificate[];
 extern const char kPrefsUpdateStateNextDataLength[];
 extern const char kPrefsUpdateStateNextDataOffset[];
@@ -84,9 +88,12 @@
 extern const char kPrefsUpdateStateSHA256Context[];
 extern const char kPrefsUpdateStateSignatureBlob[];
 extern const char kPrefsUpdateStateSignedSHA256Context[];
+extern const char kPrefsUpdateBootTimestampStart[];
 extern const char kPrefsUpdateTimestampStart[];
 extern const char kPrefsUrlSwitchCount[];
-extern const char kPrefsWallClockWaitPeriod[];
+extern const char kPrefsVerityWritten[];
+extern const char kPrefsWallClockScatteringWaitPeriod[];
+extern const char kPrefsWallClockStagingWaitPeriod[];
 
 // Keys used when storing and loading payload properties.
 extern const char kPayloadPropertyFileSize[];
@@ -164,9 +171,10 @@
 //
 // For non-official builds (e.g. typically built on a developer's
 // workstation and served via devserver) bump this since it takes time
-// for the workstation to generate the payload. For p2p, make this
-// relatively low since we want to fail fast.
-const int kDownloadLowSpeedTimeSeconds = 90;
+// for the workstation to generate the payload. For normal operation
+// and p2p, make this relatively low since we want to fail fast in
+// those cases.
+const int kDownloadLowSpeedTimeSeconds = 30;
 const int kDownloadDevModeLowSpeedTimeSeconds = 180;
 const int kDownloadP2PLowSpeedTimeSeconds = 60;
 
diff --git a/common/error_code.h b/common/error_code.h
index 0b08005..252cc42 100644
--- a/common/error_code.h
+++ b/common/error_code.h
@@ -73,13 +73,17 @@
   kFilesystemVerifierError = 47,
   kUserCanceled = 48,
   kNonCriticalUpdateInOOBE = 49,
-  // kOmahaUpdateIgnoredOverCellular = 50,
+  kOmahaUpdateIgnoredOverCellular = 50,
   kPayloadTimestampError = 51,
   kUpdatedButNotActive = 52,
+  kNoUpdate = 53,
+  kRollbackNotPossible = 54,
+  kFirstActiveOmahaPingSentPersistenceError = 55,
+  kVerityCalculationError = 56,
 
   // VERY IMPORTANT! When adding new error codes:
   //
-  // 1) Update tools/metrics/histograms/histograms.xml in Chrome.
+  // 1) Update tools/metrics/histograms/enums.xml in Chrome.
   //
   // 2) Update the assorted switch statements in update_engine which won't
   //    build until this case is added.
@@ -115,7 +119,7 @@
   // modify the implementation of ErrorCode into a properly encapsulated class.
   kDevModeFlag = 1 << 31,
 
-  // Set if resuming an interruped update.
+  // Set if resuming an interrupted update.
   kResumedFlag = 1 << 30,
 
   // Set if using a dev/test image as opposed to an MP-signed image.
diff --git a/common/error_code_utils.cc b/common/error_code_utils.cc
index 313a15f..a1607f5 100644
--- a/common/error_code_utils.cc
+++ b/common/error_code_utils.cc
@@ -144,10 +144,20 @@
       return "ErrorCode::kUserCanceled";
     case ErrorCode::kNonCriticalUpdateInOOBE:
       return "ErrorCode::kNonCriticalUpdateInOOBE";
+    case ErrorCode::kOmahaUpdateIgnoredOverCellular:
+      return "ErrorCode::kOmahaUpdateIgnoredOverCellular";
     case ErrorCode::kPayloadTimestampError:
       return "ErrorCode::kPayloadTimestampError";
     case ErrorCode::kUpdatedButNotActive:
       return "ErrorCode::kUpdatedButNotActive";
+    case ErrorCode::kNoUpdate:
+      return "ErrorCode::kNoUpdate";
+    case ErrorCode::kRollbackNotPossible:
+      return "ErrorCode::kRollbackNotPossible";
+    case ErrorCode::kFirstActiveOmahaPingSentPersistenceError:
+      return "ErrorCode::kFirstActiveOmahaPingSentPersistenceError";
+    case ErrorCode::kVerityCalculationError:
+      return "ErrorCode::kVerityCalculationError";
       // Don't add a default case to let the compiler warn about newly added
       // error codes which should be added here.
   }
diff --git a/common/fake_boot_control.h b/common/fake_boot_control.h
index 3eccc80..e71c83a 100644
--- a/common/fake_boot_control.h
+++ b/common/fake_boot_control.h
@@ -74,6 +74,13 @@
     return true;
   }
 
+  bool InitPartitionMetadata(Slot slot,
+                             const PartitionSizes& partition_sizes) override {
+    return true;
+  }
+
+  void Cleanup() override {}
+
   // Setters
   void SetNumSlots(unsigned int num_slots) {
     num_slots_ = num_slots;
diff --git a/common/fake_hardware.h b/common/fake_hardware.h
index f2b2c9d..55ef32d 100644
--- a/common/fake_hardware.h
+++ b/common/fake_hardware.h
@@ -34,6 +34,24 @@
   // false.
   static const int kPowerwashCountNotSet = -1;
 
+  // Default value for crossystem tpm_kernver.
+  static const int kMinKernelKeyVersion = 3;
+
+  // Default value for crossystem tpm_fwver.
+  static const int kMinFirmwareKeyVersion = 13;
+
+  // Default value for crossystem kernel_max_rollforward. This value is the
+  // default for consumer devices and effectively means "unlimited rollforward
+  // is allowed", which is the same as the behavior prior to implementing
+  // roll forward prevention.
+  static const int kKernelMaxRollforward = 0xfffffffe;
+
+  // Default value for crossystem firmware_max_rollforward. This value is the
+  // default for consumer devices and effectively means "unlimited rollforward
+  // is allowed", which is the same as the behavior prior to implementing
+  // roll forward prevention.
+  static const int kFirmwareMaxRollforward = 0xfffffffe;
+
   FakeHardware() = default;
 
   // HardwareInterface methods.
@@ -59,6 +77,31 @@
 
   std::string GetECVersion() const override { return ec_version_; }
 
+  int GetMinKernelKeyVersion() const override {
+    return min_kernel_key_version_;
+  }
+
+  int GetMinFirmwareKeyVersion() const override {
+    return min_firmware_key_version_;
+  }
+
+  int GetMaxFirmwareKeyRollforward() const override {
+    return firmware_max_rollforward_;
+  }
+
+  bool SetMaxFirmwareKeyRollforward(int firmware_max_rollforward) override {
+    if (GetMaxFirmwareKeyRollforward() == -1)
+      return false;
+
+    firmware_max_rollforward_ = firmware_max_rollforward;
+    return true;
+  }
+
+  bool SetMaxKernelKeyRollforward(int kernel_max_rollforward) override {
+    kernel_max_rollforward_ = kernel_max_rollforward;
+    return true;
+  }
+
   int GetPowerwashCount() const override { return powerwash_count_; }
 
   bool SchedulePowerwash() override {
@@ -87,8 +130,9 @@
     return first_active_omaha_ping_sent_;
   }
 
-  void SetFirstActiveOmahaPingSent() override {
+  bool SetFirstActiveOmahaPingSent() override {
     first_active_omaha_ping_sent_ = true;
+    return true;
   }
 
   // Setters
@@ -131,6 +175,14 @@
     ec_version_ = ec_version;
   }
 
+  void SetMinKernelKeyVersion(int min_kernel_key_version) {
+    min_kernel_key_version_ = min_kernel_key_version;
+  }
+
+  void SetMinFirmwareKeyVersion(int min_firmware_key_version) {
+    min_firmware_key_version_ = min_firmware_key_version;
+  }
+
   void SetPowerwashCount(int powerwash_count) {
     powerwash_count_ = powerwash_count;
   }
@@ -139,16 +191,24 @@
     build_timestamp_ = build_timestamp;
   }
 
+  // Getters to verify state.
+  int GetMaxKernelKeyRollforward() const { return kernel_max_rollforward_; }
+
  private:
   bool is_official_build_{true};
   bool is_normal_boot_mode_{true};
   bool are_dev_features_enabled_{false};
   bool is_oobe_enabled_{true};
   bool is_oobe_complete_{true};
-  base::Time oobe_timestamp_{base::Time::FromTimeT(1169280000)}; // Jan 20, 2007
+  // Jan 20, 2007
+  base::Time oobe_timestamp_{base::Time::FromTimeT(1169280000)};
   std::string hardware_class_{"Fake HWID BLAH-1234"};
   std::string firmware_version_{"Fake Firmware v1.0.1"};
   std::string ec_version_{"Fake EC v1.0a"};
+  int min_kernel_key_version_{kMinKernelKeyVersion};
+  int min_firmware_key_version_{kMinFirmwareKeyVersion};
+  int kernel_max_rollforward_{kKernelMaxRollforward};
+  int firmware_max_rollforward_{kFirmwareMaxRollforward};
   int powerwash_count_{kPowerwashCountNotSet};
   bool powerwash_scheduled_{false};
   int64_t build_timestamp_{0};
diff --git a/common/file_fetcher.cc b/common/file_fetcher.cc
index d0a109b..3836e54 100644
--- a/common/file_fetcher.cc
+++ b/common/file_fetcher.cc
@@ -138,8 +138,9 @@
       delegate_->TransferComplete(this, true);
   } else {
     bytes_copied_ += bytes_read;
-    if (delegate_)
-      delegate_->ReceivedBytes(this, buffer_.data(), bytes_read);
+    if (delegate_ &&
+        !delegate_->ReceivedBytes(this, buffer_.data(), bytes_read))
+      return;
     ScheduleRead();
   }
 }
diff --git a/common/hardware_interface.h b/common/hardware_interface.h
index 94442d1..bbc8660 100644
--- a/common/hardware_interface.h
+++ b/common/hardware_interface.h
@@ -70,6 +70,32 @@
   // running a custom chrome os ec.
   virtual std::string GetECVersion() const = 0;
 
+  // Returns the minimum kernel key version that verified boot on Chrome OS
+  // will allow to boot. This is the value of crossystem tpm_kernver. Returns
+  // -1 on error, or if not running on Chrome OS.
+  virtual int GetMinKernelKeyVersion() const = 0;
+
+  // Returns the minimum firmware key version that verified boot on Chrome OS
+  // will allow to boot. This is the value of crossystem tpm_fwver. Returns
+  // -1 on error, or if not running on Chrome OS.
+  virtual int GetMinFirmwareKeyVersion() const = 0;
+
+  // Returns the maximum firmware key version that verified boot should roll
+  // forward to. This is the value of crossystem firmware_max_rollforward.
+  // Returns -1 on error, if this board does not yet support this value, or
+  // if not running on Chrome OS.
+  virtual int GetMaxFirmwareKeyRollforward() const = 0;
+
+  // Sets the maximum firmware key version that verified boot should roll
+  // forward to. This is the value of crossystem firmware_max_rollforward.
+  // This value is not available on all Chrome OS devices.
+  virtual bool SetMaxFirmwareKeyRollforward(int firmware_max_rollforward) = 0;
+
+  // Sets the maximum kernel key version that verified boot should roll
+  // forward to. This is the value of crossystem kernel_max_rollforward.
+  // Returns false if the value cannot be set, or if not running on Chrome OS.
+  virtual bool SetMaxKernelKeyRollforward(int kernel_max_rollforward) = 0;
+
   // Returns the powerwash_count from the stateful. If the file is not found
   // or is invalid, returns -1. Brand new machines out of the factory or after
   // recovery don't have this value set.
@@ -100,9 +126,9 @@
   // |SetFirstActiveOmahaPingSent()|.
   virtual bool GetFirstActiveOmahaPingSent() const = 0;
 
-  // Persist the fact that first active ping was sent to omaha. It bails out if
-  // it fails.
-  virtual void SetFirstActiveOmahaPingSent() = 0;
+  // Persist the fact that first active ping was sent to omaha and returns false
+  // if failed to persist it.
+  virtual bool SetFirstActiveOmahaPingSent() = 0;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/common/hash_calculator_unittest.cc b/common/hash_calculator_unittest.cc
index 233237b..79f22ad 100644
--- a/common/hash_calculator_unittest.cc
+++ b/common/hash_calculator_unittest.cc
@@ -26,6 +26,7 @@
 #include <brillo/secure_blob.h>
 #include <gtest/gtest.h>
 
+#include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
 
 using std::string;
@@ -43,10 +44,7 @@
   0xc8, 0x8b, 0x59, 0xb2, 0xdc, 0x32, 0x7a, 0xa4
 };
 
-class HashCalculatorTest : public ::testing::Test {
- public:
-  HashCalculatorTest() {}
-};
+class HashCalculatorTest : public ::testing::Test {};
 
 TEST_F(HashCalculatorTest, SimpleTest) {
   HashCalculator calc;
@@ -54,7 +52,7 @@
   calc.Finalize();
   brillo::Blob raw_hash(std::begin(kExpectedRawHash),
                         std::end(kExpectedRawHash));
-  EXPECT_TRUE(raw_hash == calc.raw_hash());
+  EXPECT_EQ(raw_hash, calc.raw_hash());
 }
 
 TEST_F(HashCalculatorTest, MultiUpdateTest) {
@@ -64,7 +62,7 @@
   calc.Finalize();
   brillo::Blob raw_hash(std::begin(kExpectedRawHash),
                         std::end(kExpectedRawHash));
-  EXPECT_TRUE(raw_hash == calc.raw_hash());
+  EXPECT_EQ(raw_hash, calc.raw_hash());
 }
 
 TEST_F(HashCalculatorTest, ContextTest) {
@@ -78,7 +76,7 @@
   calc_next.Finalize();
   brillo::Blob raw_hash(std::begin(kExpectedRawHash),
                         std::end(kExpectedRawHash));
-  EXPECT_TRUE(raw_hash == calc_next.raw_hash());
+  EXPECT_EQ(raw_hash, calc_next.raw_hash());
 }
 
 TEST_F(HashCalculatorTest, BigTest) {
@@ -108,25 +106,21 @@
 }
 
 TEST_F(HashCalculatorTest, UpdateFileSimpleTest) {
-  string data_path;
-  ASSERT_TRUE(
-      utils::MakeTempFile("data.XXXXXX", &data_path, nullptr));
-  ScopedPathUnlinker data_path_unlinker(data_path);
-  ASSERT_TRUE(utils::WriteFile(data_path.c_str(), "hi", 2));
+  test_utils::ScopedTempFile data_file("data.XXXXXX");
+  ASSERT_TRUE(test_utils::WriteFileString(data_file.path(), "hi"));
 
-  static const int kLengths[] = { -1, 2, 10 };
-  for (size_t i = 0; i < arraysize(kLengths); i++) {
+  for (const int length : {-1, 2, 10}) {
     HashCalculator calc;
-    EXPECT_EQ(2, calc.UpdateFile(data_path, kLengths[i]));
+    EXPECT_EQ(2, calc.UpdateFile(data_file.path(), length));
     EXPECT_TRUE(calc.Finalize());
     brillo::Blob raw_hash(std::begin(kExpectedRawHash),
                           std::end(kExpectedRawHash));
-    EXPECT_TRUE(raw_hash == calc.raw_hash());
+    EXPECT_EQ(raw_hash, calc.raw_hash());
   }
 
   HashCalculator calc;
-  EXPECT_EQ(0, calc.UpdateFile(data_path, 0));
-  EXPECT_EQ(1, calc.UpdateFile(data_path, 1));
+  EXPECT_EQ(0, calc.UpdateFile(data_file.path(), 0));
+  EXPECT_EQ(1, calc.UpdateFile(data_file.path(), 1));
   EXPECT_TRUE(calc.Finalize());
   // echo -n h | openssl dgst -sha256 -binary | openssl base64
   EXPECT_EQ("qqlAJmTxpB9A67xSyZk+tmrrNmYClY/fqig7ceZNsSM=",
@@ -134,21 +128,16 @@
 }
 
 TEST_F(HashCalculatorTest, RawHashOfFileSimpleTest) {
-  string data_path;
-  ASSERT_TRUE(
-      utils::MakeTempFile("data.XXXXXX", &data_path, nullptr));
-  ScopedPathUnlinker data_path_unlinker(data_path);
-  ASSERT_TRUE(utils::WriteFile(data_path.c_str(), "hi", 2));
+  test_utils::ScopedTempFile data_file("data.XXXXXX");
+  ASSERT_TRUE(test_utils::WriteFileString(data_file.path(), "hi"));
 
-  static const int kLengths[] = { -1, 2, 10 };
-  for (size_t i = 0; i < arraysize(kLengths); i++) {
+  for (const int length : {-1, 2, 10}) {
     brillo::Blob exp_raw_hash(std::begin(kExpectedRawHash),
                               std::end(kExpectedRawHash));
     brillo::Blob raw_hash;
-    EXPECT_EQ(2, HashCalculator::RawHashOfFile(data_path,
-                                               kLengths[i],
-                                               &raw_hash));
-    EXPECT_TRUE(exp_raw_hash == raw_hash);
+    EXPECT_EQ(
+        2, HashCalculator::RawHashOfFile(data_file.path(), length, &raw_hash));
+    EXPECT_EQ(exp_raw_hash, raw_hash);
   }
 }
 
diff --git a/common/http_fetcher.h b/common/http_fetcher.h
index 3f7b2e8..1f5c945 100644
--- a/common/http_fetcher.h
+++ b/common/http_fetcher.h
@@ -18,6 +18,7 @@
 #define UPDATE_ENGINE_COMMON_HTTP_FETCHER_H_
 
 #include <deque>
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -27,7 +28,7 @@
 #include <brillo/message_loops/message_loop.h>
 
 #include "update_engine/common/http_common.h"
-#include "update_engine/proxy_resolver.h"
+#include "update_engine/common/proxy_resolver.h"
 
 // This class is a simple wrapper around an HTTP library (libcurl). We can
 // easily mock out this interface for testing.
@@ -186,8 +187,9 @@
  public:
   virtual ~HttpFetcherDelegate() = default;
 
-  // Called every time bytes are received.
-  virtual void ReceivedBytes(HttpFetcher* fetcher,
+  // Called every time bytes are received. Returns false if this call causes the
+  // transfer be terminated or completed otherwise it returns true.
+  virtual bool ReceivedBytes(HttpFetcher* fetcher,
                              const void* bytes,
                              size_t length) = 0;
 
diff --git a/common/http_fetcher_unittest.cc b/common/http_fetcher_unittest.cc
index 867216e..66767fb 100644
--- a/common/http_fetcher_unittest.cc
+++ b/common/http_fetcher_unittest.cc
@@ -44,12 +44,12 @@
 #include "update_engine/common/file_fetcher.h"
 #include "update_engine/common/http_common.h"
 #include "update_engine/common/mock_http_fetcher.h"
+#include "update_engine/common/mock_proxy_resolver.h"
 #include "update_engine/common/multi_range_http_fetcher.h"
+#include "update_engine/common/proxy_resolver.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/libcurl_http_fetcher.h"
-#include "update_engine/mock_proxy_resolver.h"
-#include "update_engine/proxy_resolver.h"
 
 using brillo::MessageLoop;
 using std::make_pair;
@@ -411,12 +411,13 @@
  public:
   HttpFetcherTestDelegate() = default;
 
-  void ReceivedBytes(HttpFetcher* /* fetcher */,
+  bool ReceivedBytes(HttpFetcher* /* fetcher */,
                      const void* bytes,
                      size_t length) override {
     data.append(reinterpret_cast<const char*>(bytes), length);
     // Update counters
     times_received_bytes_called_++;
+    return true;
   }
 
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
@@ -559,11 +560,13 @@
 namespace {
 class PausingHttpFetcherTestDelegate : public HttpFetcherDelegate {
  public:
-  void ReceivedBytes(HttpFetcher* fetcher,
-                     const void* /* bytes */, size_t /* length */) override {
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* /* bytes */,
+                     size_t /* length */) override {
     CHECK(!paused_);
     paused_ = true;
     fetcher->Pause();
+    return true;
   }
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
     MessageLoop::current()->BreakLoop();
@@ -640,8 +643,11 @@
 namespace {
 class AbortingHttpFetcherTestDelegate : public HttpFetcherDelegate {
  public:
-  void ReceivedBytes(HttpFetcher* fetcher,
-                     const void* bytes, size_t length) override {}
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* bytes,
+                     size_t length) override {
+    return true;
+  }
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
     ADD_FAILURE();  // We should never get here
     MessageLoop::current()->BreakLoop();
@@ -735,9 +741,11 @@
 namespace {
 class FlakyHttpFetcherTestDelegate : public HttpFetcherDelegate {
  public:
-  void ReceivedBytes(HttpFetcher* fetcher,
-                     const void* bytes, size_t length) override {
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* bytes,
+                     size_t length) override {
     data.append(reinterpret_cast<const char*>(bytes), length);
+    return true;
   }
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
     EXPECT_TRUE(successful);
@@ -799,13 +807,15 @@
     }
   }
 
-  void ReceivedBytes(HttpFetcher* fetcher,
-                     const void* bytes, size_t length) override {
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* bytes,
+                     size_t length) override {
     if (server_) {
       LOG(INFO) << "Stopping server in ReceivedBytes";
       server_.reset();
       LOG(INFO) << "server stopped";
     }
+    return true;
   }
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
     EXPECT_FALSE(successful);
@@ -973,9 +983,11 @@
  public:
   explicit RedirectHttpFetcherTestDelegate(bool expected_successful)
       : expected_successful_(expected_successful) {}
-  void ReceivedBytes(HttpFetcher* fetcher,
-                     const void* bytes, size_t length) override {
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* bytes,
+                     size_t length) override {
     data.append(reinterpret_cast<const char*>(bytes), length);
+    return true;
   }
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
     EXPECT_EQ(expected_successful_, successful);
@@ -1072,10 +1084,12 @@
   explicit MultiHttpFetcherTestDelegate(int expected_response_code)
       : expected_response_code_(expected_response_code) {}
 
-  void ReceivedBytes(HttpFetcher* fetcher,
-                     const void* bytes, size_t length) override {
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* bytes,
+                     size_t length) override {
     EXPECT_EQ(fetcher, fetcher_.get());
     data.append(reinterpret_cast<const char*>(bytes), length);
+    return true;
   }
 
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
@@ -1271,7 +1285,7 @@
   explicit MultiHttpFetcherTerminateTestDelegate(size_t terminate_trigger_bytes)
       : terminate_trigger_bytes_(terminate_trigger_bytes) {}
 
-  void ReceivedBytes(HttpFetcher* fetcher,
+  bool ReceivedBytes(HttpFetcher* fetcher,
                      const void* bytes,
                      size_t length) override {
     LOG(INFO) << "ReceivedBytes, " << length << " bytes.";
@@ -1284,6 +1298,7 @@
                      base::Unretained(fetcher_.get())));
     }
     bytes_downloaded_ += length;
+    return true;
   }
 
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
@@ -1337,9 +1352,11 @@
 namespace {
 class BlockedTransferTestDelegate : public HttpFetcherDelegate {
  public:
-  void ReceivedBytes(HttpFetcher* fetcher,
-                     const void* bytes, size_t length) override {
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* bytes,
+                     size_t length) override {
     ADD_FAILURE();
+    return true;
   }
   void TransferComplete(HttpFetcher* fetcher, bool successful) override {
     EXPECT_FALSE(successful);
diff --git a/common/mock_action_processor.h b/common/mock_action_processor.h
index 04275c1..4c62109 100644
--- a/common/mock_action_processor.h
+++ b/common/mock_action_processor.h
@@ -17,6 +17,10 @@
 #ifndef UPDATE_ENGINE_COMMON_MOCK_ACTION_PROCESSOR_H_
 #define UPDATE_ENGINE_COMMON_MOCK_ACTION_PROCESSOR_H_
 
+#include <deque>
+#include <memory>
+#include <utility>
+
 #include <gmock/gmock.h>
 
 #include "update_engine/common/action.h"
@@ -27,6 +31,12 @@
  public:
   MOCK_METHOD0(StartProcessing, void());
   MOCK_METHOD1(EnqueueAction, void(AbstractAction* action));
+
+  // This is a legacy workaround described in:
+  // https://github.com/google/googletest/blob/master/googlemock/docs/CookBook.md#legacy-workarounds-for-move-only-types-legacymoveonly
+  void EnqueueAction(std::unique_ptr<AbstractAction> action) override {
+    EnqueueAction(action.get());
+  }
 };
 
 }  // namespace chromeos_update_engine
diff --git a/common/mock_hardware.h b/common/mock_hardware.h
index 42fa7ba..f972df2 100644
--- a/common/mock_hardware.h
+++ b/common/mock_hardware.h
@@ -54,6 +54,21 @@
     ON_CALL(*this, GetECVersion())
       .WillByDefault(testing::Invoke(&fake_,
             &FakeHardware::GetECVersion));
+    ON_CALL(*this, GetMinKernelKeyVersion())
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeHardware::GetMinKernelKeyVersion));
+    ON_CALL(*this, GetMinFirmwareKeyVersion())
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeHardware::GetMinFirmwareKeyVersion));
+    ON_CALL(*this, GetMaxFirmwareKeyRollforward())
+        .WillByDefault(testing::Invoke(
+            &fake_, &FakeHardware::GetMaxFirmwareKeyRollforward));
+    ON_CALL(*this, SetMaxFirmwareKeyRollforward())
+        .WillByDefault(testing::Invoke(
+            &fake_, &FakeHardware::SetMaxFirmwareKeyRollforward));
+    ON_CALL(*this, SetMaxKernelKeyRollforward())
+        .WillByDefault(
+            testing::Invoke(&fake_, &FakeHardware::SetMaxKernelKeyRollforward));
     ON_CALL(*this, GetPowerwashCount())
       .WillByDefault(testing::Invoke(&fake_,
             &FakeHardware::GetPowerwashCount));
@@ -81,6 +96,13 @@
   MOCK_CONST_METHOD0(GetHardwareClass, std::string());
   MOCK_CONST_METHOD0(GetFirmwareVersion, std::string());
   MOCK_CONST_METHOD0(GetECVersion, std::string());
+  MOCK_CONST_METHOD0(GetMinKernelKeyVersion, int());
+  MOCK_CONST_METHOD0(GetMinFirmwareKeyVersion, int());
+  MOCK_CONST_METHOD0(GetMaxFirmwareKeyRollforward, int());
+  MOCK_CONST_METHOD1(SetMaxFirmwareKeyRollforward,
+                     bool(int firmware_max_rollforward));
+  MOCK_CONST_METHOD1(SetMaxKernelKeyRollforward,
+                     bool(int kernel_max_rollforward));
   MOCK_CONST_METHOD0(GetPowerwashCount, int());
   MOCK_CONST_METHOD1(GetNonVolatileDirectory, bool(base::FilePath*));
   MOCK_CONST_METHOD1(GetPowerwashSafeDirectory, bool(base::FilePath*));
diff --git a/common/mock_http_fetcher.cc b/common/mock_http_fetcher.cc
index f1ae72a..9507c9d 100644
--- a/common/mock_http_fetcher.cc
+++ b/common/mock_http_fetcher.cc
@@ -47,71 +47,49 @@
     SendData(true);
 }
 
-// Returns false on one condition: If timeout_id_ was already set
-// and it needs to be deleted by the caller. If timeout_id_ is null
-// when this function is called, this function will always return true.
-bool MockHttpFetcher::SendData(bool skip_delivery) {
-  if (fail_transfer_) {
+void MockHttpFetcher::SendData(bool skip_delivery) {
+  if (fail_transfer_ || sent_size_ == data_.size()) {
     SignalTransferComplete();
-    return timeout_id_ != MessageLoop::kTaskIdNull;
-  }
-
-  CHECK_LT(sent_size_, data_.size());
-  if (!skip_delivery) {
-    const size_t chunk_size = min(kMockHttpFetcherChunkSize,
-                                  data_.size() - sent_size_);
-    CHECK(delegate_);
-    delegate_->ReceivedBytes(this, &data_[sent_size_], chunk_size);
-    // We may get terminated in the callback.
-    if (sent_size_ == data_.size()) {
-      LOG(INFO) << "Terminated in the ReceivedBytes callback.";
-      return timeout_id_ != MessageLoop::kTaskIdNull;
-    }
-    sent_size_ += chunk_size;
-    CHECK_LE(sent_size_, data_.size());
-    if (sent_size_ == data_.size()) {
-      // We've sent all the data. Notify of success.
-      SignalTransferComplete();
-    }
+    return;
   }
 
   if (paused_) {
-    // If we're paused, we should return true if timeout_id_ is set,
-    // since we need the caller to delete it.
-    return timeout_id_ != MessageLoop::kTaskIdNull;
+    // If we're paused, we should return so no callback is scheduled.
+    return;
   }
 
-  if (timeout_id_ != MessageLoop::kTaskIdNull) {
-    // we still need a timeout if there's more data to send
-    return sent_size_ < data_.size();
-  } else if (sent_size_ < data_.size()) {
-    // we don't have a timeout source and we need one
+  // Setup timeout callback even if the transfer is about to be completed in
+  // order to get a call to |TransferComplete|.
+  if (timeout_id_ == MessageLoop::kTaskIdNull) {
     timeout_id_ = MessageLoop::current()->PostDelayedTask(
         FROM_HERE,
         base::Bind(&MockHttpFetcher::TimeoutCallback, base::Unretained(this)),
         base::TimeDelta::FromMilliseconds(10));
   }
-  return true;
+
+  if (!skip_delivery) {
+    const size_t chunk_size =
+        min(kMockHttpFetcherChunkSize, data_.size() - sent_size_);
+    sent_size_ += chunk_size;
+    CHECK(delegate_);
+    delegate_->ReceivedBytes(this, &data_[sent_size_ - chunk_size], chunk_size);
+  }
+  // We may get terminated and deleted right after |ReceivedBytes| call, so we
+  // should not access any class member variable after this call.
 }
 
 void MockHttpFetcher::TimeoutCallback() {
   CHECK(!paused_);
-  if (SendData(false)) {
-    // We need to re-schedule the timeout.
-    timeout_id_ = MessageLoop::current()->PostDelayedTask(
-        FROM_HERE,
-        base::Bind(&MockHttpFetcher::TimeoutCallback, base::Unretained(this)),
-        base::TimeDelta::FromMilliseconds(10));
-  } else {
-    timeout_id_ = MessageLoop::kTaskIdNull;
-  }
+  timeout_id_ = MessageLoop::kTaskIdNull;
+  CHECK_LE(sent_size_, data_.size());
+  // Same here, we should not access any member variable after this call.
+  SendData(false);
 }
 
 // If the transfer is in progress, aborts the transfer early.
 // The transfer cannot be resumed.
 void MockHttpFetcher::TerminateTransfer() {
   LOG(INFO) << "Terminating transfer.";
-  sent_size_ = data_.size();
   // Kill any timeout, it is ok to call with kTaskIdNull.
   MessageLoop::current()->CancelTask(timeout_id_);
   timeout_id_ = MessageLoop::kTaskIdNull;
@@ -140,9 +118,7 @@
 void MockHttpFetcher::Unpause() {
   CHECK(paused_) << "You must pause before unpause.";
   paused_ = false;
-  if (sent_size_ < data_.size()) {
-    SendData(false);
-  }
+  SendData(false);
 }
 
 void MockHttpFetcher::FailTransfer(int http_response_code) {
diff --git a/common/mock_http_fetcher.h b/common/mock_http_fetcher.h
index 367802e..00f4e2b 100644
--- a/common/mock_http_fetcher.h
+++ b/common/mock_http_fetcher.h
@@ -112,13 +112,10 @@
   }
 
  private:
-  // Sends data to the delegate and sets up a timeout callback if needed.
-  // There must be a delegate and there must be data to send. If there is
-  // already a timeout callback, and it should be deleted by the caller,
-  // this will return false; otherwise true is returned.
-  // If skip_delivery is true, no bytes will be delivered, but the callbacks
-  // still be set if needed.
-  bool SendData(bool skip_delivery);
+  // Sends data to the delegate and sets up a timeout callback if needed. There
+  // must be a delegate. If |skip_delivery| is true, no bytes will be delivered,
+  // but the callbacks still be set if needed.
+  void SendData(bool skip_delivery);
 
   // Callback for when our message loop timeout expires.
   void TimeoutCallback();
diff --git a/mock_proxy_resolver.h b/common/mock_proxy_resolver.h
similarity index 82%
rename from mock_proxy_resolver.h
rename to common/mock_proxy_resolver.h
index bd6d04f..67de68f 100644
--- a/mock_proxy_resolver.h
+++ b/common/mock_proxy_resolver.h
@@ -14,14 +14,14 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_MOCK_PROXY_RESOLVER_H_
-#define UPDATE_ENGINE_MOCK_PROXY_RESOLVER_H_
+#ifndef UPDATE_ENGINE_COMMON_MOCK_PROXY_RESOLVER_H_
+#define UPDATE_ENGINE_COMMON_MOCK_PROXY_RESOLVER_H_
 
 #include <string>
 
 #include <gmock/gmock.h>
 
-#include "update_engine/proxy_resolver.h"
+#include "update_engine/common/proxy_resolver.h"
 
 namespace chromeos_update_engine {
 
@@ -35,4 +35,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_MOCK_PROXY_RESOLVER_H_
+#endif  // UPDATE_ENGINE_COMMON_MOCK_PROXY_RESOLVER_H_
diff --git a/common/multi_range_http_fetcher.cc b/common/multi_range_http_fetcher.cc
index 1189fde..0a19c6a 100644
--- a/common/multi_range_http_fetcher.cc
+++ b/common/multi_range_http_fetcher.cc
@@ -86,7 +86,7 @@
 }
 
 // State change: Downloading -> Downloading or Pending transfer ended
-void MultiRangeHttpFetcher::ReceivedBytes(HttpFetcher* fetcher,
+bool MultiRangeHttpFetcher::ReceivedBytes(HttpFetcher* fetcher,
                                           const void* bytes,
                                           size_t length) {
   CHECK_LT(current_index_, ranges_.size());
@@ -99,9 +99,9 @@
                          range.length() - bytes_received_this_range_);
   }
   LOG_IF(WARNING, next_size <= 0) << "Asked to write length <= 0";
-  if (delegate_) {
-    delegate_->ReceivedBytes(this, bytes, next_size);
-  }
+  if (delegate_ && !delegate_->ReceivedBytes(this, bytes, next_size))
+    return false;
+
   bytes_received_this_range_ += length;
   if (range.HasLength() && bytes_received_this_range_ >= range.length()) {
     // Terminates the current fetcher. Waits for its TransferTerminated
@@ -109,9 +109,10 @@
     // signalling the delegate that the whole multi-transfer is complete
     // before all fetchers are really done and cleaned up.
     pending_transfer_ended_ = true;
-    LOG(INFO) << "terminating transfer";
+    LOG(INFO) << "Terminating transfer.";
     fetcher->TerminateTransfer();
   }
+  return true;
 }
 
 // State change: Downloading or Pending transfer ended -> Stopped
diff --git a/common/multi_range_http_fetcher.h b/common/multi_range_http_fetcher.h
index 54ddfbc..763c287 100644
--- a/common/multi_range_http_fetcher.h
+++ b/common/multi_range_http_fetcher.h
@@ -146,7 +146,7 @@
 
   // HttpFetcherDelegate overrides.
   // State change: Downloading -> Downloading or Pending transfer ended
-  void ReceivedBytes(HttpFetcher* fetcher,
+  bool ReceivedBytes(HttpFetcher* fetcher,
                      const void* bytes,
                      size_t length) override;
 
diff --git a/proxy_resolver.cc b/common/proxy_resolver.cc
similarity index 97%
rename from proxy_resolver.cc
rename to common/proxy_resolver.cc
index 2ec59db..0591c3e 100644
--- a/proxy_resolver.cc
+++ b/common/proxy_resolver.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/proxy_resolver.h"
+#include "update_engine/common/proxy_resolver.h"
 
 #include <base/bind.h>
 #include <base/location.h>
@@ -63,5 +63,4 @@
   callback.Run(proxies);
 }
 
-
 }  // namespace chromeos_update_engine
diff --git a/proxy_resolver.h b/common/proxy_resolver.h
similarity index 95%
rename from proxy_resolver.h
rename to common/proxy_resolver.h
index 19a400f..9bd51fc 100644
--- a/proxy_resolver.h
+++ b/common/proxy_resolver.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_PROXY_RESOLVER_H_
-#define UPDATE_ENGINE_PROXY_RESOLVER_H_
+#ifndef UPDATE_ENGINE_COMMON_PROXY_RESOLVER_H_
+#define UPDATE_ENGINE_COMMON_PROXY_RESOLVER_H_
 
 #include <deque>
 #include <string>
@@ -95,4 +95,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_PROXY_RESOLVER_H_
+#endif  // UPDATE_ENGINE_COMMON_PROXY_RESOLVER_H_
diff --git a/proxy_resolver_unittest.cc b/common/proxy_resolver_unittest.cc
similarity index 97%
rename from proxy_resolver_unittest.cc
rename to common/proxy_resolver_unittest.cc
index 484aae1..101bf6b 100644
--- a/proxy_resolver_unittest.cc
+++ b/common/proxy_resolver_unittest.cc
@@ -14,7 +14,7 @@
 // limitations under the License.
 //
 
-#include "update_engine/proxy_resolver.h"
+#include "update_engine/common/proxy_resolver.h"
 
 #include <deque>
 #include <string>
diff --git a/common/subprocess.cc b/common/subprocess.cc
index 4e6d352..1715cb0 100644
--- a/common/subprocess.cc
+++ b/common/subprocess.cc
@@ -23,6 +23,7 @@
 
 #include <memory>
 #include <string>
+#include <utility>
 #include <vector>
 
 #include <base/bind.h>
@@ -281,14 +282,22 @@
   return proc_return_code != brillo::Process::kErrorExitStatus;
 }
 
-bool Subprocess::SubprocessInFlight() {
-  for (const auto& pid_record : subprocess_records_) {
-    if (!pid_record.second->callback.is_null())
-      return true;
+void Subprocess::FlushBufferedLogsAtExit() {
+  if (!subprocess_records_.empty()) {
+    LOG(INFO) << "We are exiting, but there are still in flight subprocesses!";
+    for (auto& pid_record : subprocess_records_) {
+      SubprocessRecord* record = pid_record.second.get();
+      // Make sure we read any remaining process output.
+      OnStdoutReady(record);
+      if (!record->stdout.empty()) {
+        LOG(INFO) << "Subprocess(" << pid_record.first << ") output:\n"
+                  << record->stdout;
+      }
+    }
   }
-  return false;
 }
 
+
 Subprocess* Subprocess::subprocess_singleton_ = nullptr;
 
 }  // namespace chromeos_update_engine
diff --git a/common/subprocess.h b/common/subprocess.h
index b655fb7..209158b 100644
--- a/common/subprocess.h
+++ b/common/subprocess.h
@@ -101,8 +101,10 @@
     return *subprocess_singleton_;
   }
 
-  // Returns true iff there is at least one subprocess we're waiting on.
-  bool SubprocessInFlight();
+  // Tries to log all in flight processes's output. It is used right before
+  // exiting the update_engine, probably when the subprocess caused a system
+  // shutdown.
+  void FlushBufferedLogsAtExit();
 
  private:
   FRIEND_TEST(SubprocessTest, CancelTest);
diff --git a/common/test_utils.cc b/common/test_utils.cc
index 85f78f9..04f55d0 100644
--- a/common/test_utils.cc
+++ b/common/test_utils.cc
@@ -28,7 +28,6 @@
 #include <sys/stat.h>
 #include <sys/sysmacros.h>
 #include <sys/types.h>
-#include <sys/xattr.h>
 #include <unistd.h>
 
 #include <set>
@@ -112,36 +111,6 @@
   return string(buf.begin(), buf.begin() + r);
 }
 
-bool IsXAttrSupported(const base::FilePath& dir_path) {
-  char *path = strdup(dir_path.Append("xattr_test_XXXXXX").value().c_str());
-
-  int fd = mkstemp(path);
-  if (fd == -1) {
-    PLOG(ERROR) << "Error creating temporary file in " << dir_path.value();
-    free(path);
-    return false;
-  }
-
-  if (unlink(path) != 0) {
-    PLOG(ERROR) << "Error unlinking temporary file " << path;
-    close(fd);
-    free(path);
-    return false;
-  }
-
-  int xattr_res = fsetxattr(fd, "user.xattr-test", "value", strlen("value"), 0);
-  if (xattr_res != 0) {
-    if (errno == ENOTSUP) {
-      // Leave it to call-sites to warn about non-support.
-    } else {
-      PLOG(ERROR) << "Error setting xattr on " << path;
-    }
-  }
-  close(fd);
-  free(path);
-  return xattr_res == 0;
-}
-
 bool WriteFileVector(const string& path, const brillo::Blob& data) {
   return utils::WriteFile(path.c_str(), data.data(), data.size());
 }
diff --git a/common/test_utils.h b/common/test_utils.h
index ddb3d34..ffe6f67 100644
--- a/common/test_utils.h
+++ b/common/test_utils.h
@@ -95,11 +95,6 @@
 // Reads a symlink from disk. Returns empty string on failure.
 std::string Readlink(const std::string& path);
 
-// Checks if xattr is supported in the directory specified by
-// |dir_path| which must be writable. Returns true if the feature is
-// supported, false if not or if an error occurred.
-bool IsXAttrSupported(const base::FilePath& dir_path);
-
 void FillWithData(brillo::Blob* buffer);
 
 // Compare the value of native array for download source parameter.
diff --git a/common/utils.cc b/common/utils.cc
index f651823..1a8fd53 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -61,6 +61,7 @@
 using base::Time;
 using base::TimeDelta;
 using std::min;
+using std::numeric_limits;
 using std::pair;
 using std::string;
 using std::vector;
@@ -291,7 +292,10 @@
   return true;
 }
 
-bool PReadAll(const FileDescriptorPtr& fd, void* buf, size_t count, off_t offset,
+bool PReadAll(const FileDescriptorPtr& fd,
+              void* buf,
+              size_t count,
+              off_t offset,
               ssize_t* out_bytes_read) {
   TEST_AND_RETURN_FALSE_ERRNO(fd->Seek(offset, SEEK_SET) !=
                               static_cast<off_t>(-1));
@@ -774,7 +778,7 @@
   if (size < offsetof(Elf32_Ehdr, e_machine) + sizeof(hdr->e_machine))
     return true;
   uint16_t e_machine;
-  // Fix endianess regardless of the host endianess.
+  // Fix endianness regardless of the host endianness.
   if (ei_data == ELFDATA2LSB)
     e_machine = le16toh(hdr->e_machine);
   else
@@ -1067,6 +1071,48 @@
   return true;
 }
 
+int VersionPrefix(const std::string& version) {
+  if (version.empty()) {
+    return 0;
+  }
+  vector<string> tokens = base::SplitString(
+      version, ".", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+  int value;
+  if (tokens.empty() || !base::StringToInt(tokens[0], &value))
+    return -1;  // Target version is invalid.
+  return value;
+}
+
+void ParseRollbackKeyVersion(const string& raw_version,
+                             uint16_t* high_version,
+                             uint16_t* low_version) {
+  DCHECK(high_version);
+  DCHECK(low_version);
+  *high_version = numeric_limits<uint16_t>::max();
+  *low_version = numeric_limits<uint16_t>::max();
+
+  vector<string> parts = base::SplitString(
+      raw_version, ".", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+  if (parts.size() != 2) {
+    // The version string must have exactly one period.
+    return;
+  }
+
+  int high;
+  int low;
+  if (!(base::StringToInt(parts[0], &high) &&
+        base::StringToInt(parts[1], &low))) {
+    // Both parts of the version could not be parsed correctly.
+    return;
+  }
+
+  if (high >= 0 && high < numeric_limits<uint16_t>::max() && low >= 0 &&
+      low < numeric_limits<uint16_t>::max()) {
+    *high_version = static_cast<uint16_t>(high);
+    *low_version = static_cast<uint16_t>(low);
+  }
+}
+
 }  // namespace utils
 
 }  // namespace chromeos_update_engine
diff --git a/common/utils.h b/common/utils.h
index e4ffcf8..f7f285b 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -21,6 +21,7 @@
 #include <unistd.h>
 
 #include <algorithm>
+#include <limits>
 #include <map>
 #include <memory>
 #include <set>
@@ -110,7 +111,7 @@
 // occurs, -1 is returned.
 off_t BlockDevSize(int fd);
 
-// Returns the size of the file at path, or the file desciptor fd. If the file
+// Returns the size of the file at path, or the file descriptor fd. If the file
 // is actually a block device, this function will automatically call
 // BlockDevSize. If the file doesn't exist or some error occurrs, -1 is
 // returned.
@@ -228,20 +229,6 @@
   HexDumpArray(vect.data(), vect.size());
 }
 
-template<typename KeyType, typename ValueType>
-bool MapContainsKey(const std::map<KeyType, ValueType>& m, const KeyType& k) {
-  return m.find(k) != m.end();
-}
-template<typename KeyType>
-bool SetContainsKey(const std::set<KeyType>& s, const KeyType& k) {
-  return s.find(k) != s.end();
-}
-
-template<typename T>
-bool VectorContainsValue(const std::vector<T>& vect, const T& value) {
-  return std::find(vect.begin(), vect.end(), value) != vect.end();
-}
-
 template<typename T>
 bool VectorIndexOf(const std::vector<T>& vect, const T& value,
                    typename std::vector<T>::size_type* out_index) {
@@ -331,6 +318,28 @@
 // reboot. Returns whether it succeeded getting the boot_id.
 bool GetBootId(std::string* boot_id);
 
+// Divide |x| by |y| and round up to the nearest integer.
+constexpr uint64_t DivRoundUp(uint64_t x, uint64_t y) {
+  return (x + y - 1) / y;
+}
+
+// Round |x| up to be a multiple of |y|.
+constexpr uint64_t RoundUp(uint64_t x, uint64_t y) {
+  return DivRoundUp(x, y) * y;
+}
+
+// Returns the integer value of the first section of |version|. E.g. for
+//  "10575.39." returns 10575. Returns 0 if |version| is empty, returns -1 if
+// first section of |version| is invalid (e.g. not a number).
+int VersionPrefix(const std::string& version);
+
+// Parses a string in the form high.low, where high and low are 16 bit unsigned
+// integers. If there is more than 1 dot, or if either of the two parts are
+// not valid 16 bit unsigned numbers, then 0xffff is returned for both.
+void ParseRollbackKeyVersion(const std::string& raw_version,
+                             uint16_t* high_version,
+                             uint16_t* low_version);
+
 }  // namespace utils
 
 
diff --git a/common/utils_unittest.cc b/common/utils_unittest.cc
index 62f9f6c..3405b68 100644
--- a/common/utils_unittest.cc
+++ b/common/utils_unittest.cc
@@ -22,6 +22,7 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 
+#include <limits>
 #include <string>
 #include <vector>
 
@@ -32,6 +33,7 @@
 
 #include "update_engine/common/test_utils.h"
 
+using std::numeric_limits;
 using std::string;
 using std::vector;
 
@@ -57,13 +59,11 @@
 }
 
 TEST(UtilsTest, WriteFileReadFile) {
-  base::FilePath file;
-  EXPECT_TRUE(base::CreateTemporaryFile(&file));
-  ScopedPathUnlinker unlinker(file.value());
-  EXPECT_TRUE(utils::WriteFile(file.value().c_str(), "hello", 5));
+  test_utils::ScopedTempFile file;
+  EXPECT_TRUE(utils::WriteFile(file.path().c_str(), "hello", 5));
 
   brillo::Blob readback;
-  EXPECT_TRUE(utils::ReadFile(file.value().c_str(), &readback));
+  EXPECT_TRUE(utils::ReadFile(file.path().c_str(), &readback));
   EXPECT_EQ("hello", string(readback.begin(), readback.end()));
 }
 
@@ -73,24 +73,21 @@
 }
 
 TEST(UtilsTest, ReadFileChunk) {
-  base::FilePath file;
-  EXPECT_TRUE(base::CreateTemporaryFile(&file));
-  ScopedPathUnlinker unlinker(file.value());
+  test_utils::ScopedTempFile file;
   brillo::Blob data;
   const size_t kSize = 1024 * 1024;
   for (size_t i = 0; i < kSize; i++) {
     data.push_back(i % 255);
   }
-  EXPECT_TRUE(utils::WriteFile(file.value().c_str(), data.data(), data.size()));
+  EXPECT_TRUE(test_utils::WriteFileVector(file.path(), data));
   brillo::Blob in_data;
-  EXPECT_TRUE(utils::ReadFileChunk(file.value().c_str(), kSize, 10, &in_data));
+  EXPECT_TRUE(utils::ReadFileChunk(file.path().c_str(), kSize, 10, &in_data));
   EXPECT_TRUE(in_data.empty());
-  EXPECT_TRUE(utils::ReadFileChunk(file.value().c_str(), 0, -1, &in_data));
-  EXPECT_TRUE(data == in_data);
+  EXPECT_TRUE(utils::ReadFileChunk(file.path().c_str(), 0, -1, &in_data));
+  EXPECT_EQ(data, in_data);
   in_data.clear();
-  EXPECT_TRUE(utils::ReadFileChunk(file.value().c_str(), 10, 20, &in_data));
-  EXPECT_TRUE(brillo::Blob(data.begin() + 10, data.begin() + 10 + 20) ==
-              in_data);
+  EXPECT_TRUE(utils::ReadFileChunk(file.path().c_str(), 10, 20, &in_data));
+  EXPECT_EQ(brillo::Blob(data.begin() + 10, data.begin() + 10 + 20), in_data);
 }
 
 TEST(UtilsTest, ErrnoNumberAsStringTest) {
@@ -451,6 +448,22 @@
   *ret = true;
 }
 
+static void ExpectParseRollbackKeyVersion(const string& version,
+                                          uint16_t expected_high,
+                                          uint16_t expected_low) {
+  uint16_t actual_high;
+  uint16_t actual_low;
+  utils::ParseRollbackKeyVersion(version, &actual_high, &actual_low);
+  EXPECT_EQ(expected_high, actual_high);
+  EXPECT_EQ(expected_low, actual_low);
+}
+
+static void ExpectInvalidParseRollbackKeyVersion(const string& version) {
+  ExpectParseRollbackKeyVersion(version,
+                                numeric_limits<uint16_t>::max(),
+                                numeric_limits<uint16_t>::max());
+}
+
 TEST(UtilsTest, TestMacros) {
   bool void_test = false;
   VoidMacroTestHelper(&void_test);
@@ -464,20 +477,18 @@
 }
 
 TEST(UtilsTest, RunAsRootUnmountFilesystemBusyFailureTest) {
-  string tmp_image;
-  EXPECT_TRUE(utils::MakeTempFile("img.XXXXXX", &tmp_image, nullptr));
-  ScopedPathUnlinker tmp_image_unlinker(tmp_image);
+  test_utils::ScopedTempFile tmp_image("img.XXXXXX");
 
   EXPECT_TRUE(base::CopyFile(
       test_utils::GetBuildArtifactsPath().Append("gen/disk_ext2_4k.img"),
-      base::FilePath(tmp_image)));
+      base::FilePath(tmp_image.path())));
 
   base::ScopedTempDir mnt_dir;
   EXPECT_TRUE(mnt_dir.CreateUniqueTempDir());
 
   string loop_dev;
   test_utils::ScopedLoopbackDeviceBinder loop_binder(
-      tmp_image, true, &loop_dev);
+      tmp_image.path(), true, &loop_dev);
 
   EXPECT_FALSE(utils::IsMountpoint(mnt_dir.GetPath().value()));
   // This is the actual test part. While we hold a file descriptor open for the
@@ -506,10 +517,45 @@
   EXPECT_TRUE(mnt_dir.CreateUniqueTempDir());
   EXPECT_FALSE(utils::IsMountpoint(mnt_dir.GetPath().value()));
 
-  base::FilePath file;
-  EXPECT_TRUE(base::CreateTemporaryFile(&file));
-  ScopedPathUnlinker unlinker(file.value());
-  EXPECT_FALSE(utils::IsMountpoint(file.value()));
+  test_utils::ScopedTempFile file;
+  EXPECT_FALSE(utils::IsMountpoint(file.path()));
+}
+
+TEST(UtilsTest, VersionPrefix) {
+  EXPECT_EQ(10575, utils::VersionPrefix("10575.39."));
+  EXPECT_EQ(10575, utils::VersionPrefix("10575.39"));
+  EXPECT_EQ(10575, utils::VersionPrefix("10575.x"));
+  EXPECT_EQ(10575, utils::VersionPrefix("10575."));
+  EXPECT_EQ(10575, utils::VersionPrefix("10575"));
+  EXPECT_EQ(0, utils::VersionPrefix(""));
+  EXPECT_EQ(-1, utils::VersionPrefix("x"));
+  EXPECT_EQ(-1, utils::VersionPrefix("1x"));
+  EXPECT_EQ(-1, utils::VersionPrefix("x.1"));
+}
+
+TEST(UtilsTest, ParseDottedVersion) {
+  // Valid case.
+  ExpectParseRollbackKeyVersion("2.3", 2, 3);
+  ExpectParseRollbackKeyVersion("65535.65535", 65535, 65535);
+
+  // Zero is technically allowed but never actually used.
+  ExpectParseRollbackKeyVersion("0.0", 0, 0);
+
+  // Invalid cases.
+  ExpectInvalidParseRollbackKeyVersion("");
+  ExpectInvalidParseRollbackKeyVersion("2");
+  ExpectInvalidParseRollbackKeyVersion("2.");
+  ExpectInvalidParseRollbackKeyVersion(".2");
+  ExpectInvalidParseRollbackKeyVersion("2.2.");
+  ExpectInvalidParseRollbackKeyVersion("2.2.3");
+  ExpectInvalidParseRollbackKeyVersion(".2.2");
+  ExpectInvalidParseRollbackKeyVersion("a.b");
+  ExpectInvalidParseRollbackKeyVersion("1.b");
+  ExpectInvalidParseRollbackKeyVersion("a.2");
+  ExpectInvalidParseRollbackKeyVersion("65536.65536");
+  ExpectInvalidParseRollbackKeyVersion("99999.99999");
+  ExpectInvalidParseRollbackKeyVersion("99999.1");
+  ExpectInvalidParseRollbackKeyVersion("1.99999");
 }
 
 }  // namespace chromeos_update_engine
diff --git a/common_service.cc b/common_service.cc
index 9f3b862..c88d940 100644
--- a/common_service.cc
+++ b/common_service.cc
@@ -16,7 +16,6 @@
 
 #include "update_engine/common_service.h"
 
-#include <set>
 #include <string>
 
 #include <base/bind.h>
@@ -41,7 +40,6 @@
 using base::StringPrintf;
 using brillo::ErrorPtr;
 using brillo::string_utils::ToString;
-using std::set;
 using std::string;
 using update_engine::UpdateAttemptFlags;
 using update_engine::UpdateEngineStatus;
@@ -51,7 +49,7 @@
 namespace {
 // Log and set the error on the passed ErrorPtr.
 void LogAndSetError(ErrorPtr* error,
-                    const tracked_objects::Location& location,
+                    const base::Location& location,
                     const string& reason) {
   brillo::Error::AddTo(error,
                        location,
@@ -258,22 +256,11 @@
 
 bool UpdateEngineService::SetUpdateOverCellularPermission(ErrorPtr* error,
                                                           bool in_allowed) {
-  set<string> allowed_types;
-  const policy::DevicePolicy* device_policy = system_state_->device_policy();
-
-  // The device_policy is loaded in a lazy way before an update check. Load it
-  // now from the libbrillo cache if it wasn't already loaded.
-  if (!device_policy) {
-    UpdateAttempter* update_attempter = system_state_->update_attempter();
-    if (update_attempter) {
-      update_attempter->RefreshDevicePolicy();
-      device_policy = system_state_->device_policy();
-    }
-  }
+  ConnectionManagerInterface* connection_manager =
+      system_state_->connection_manager();
 
   // Check if this setting is allowed by the device policy.
-  if (device_policy &&
-      device_policy->GetAllowedConnectionTypesForUpdate(&allowed_types)) {
+  if (connection_manager->IsAllowedConnectionTypesForUpdateSet()) {
     LogAndSetError(error,
                    FROM_HERE,
                    "Ignoring the update over cellular setting since there's "
@@ -286,7 +273,8 @@
 
   PrefsInterface* prefs = system_state_->prefs();
 
-  if (!prefs->SetBoolean(kPrefsUpdateOverCellularPermission, in_allowed)) {
+  if (!prefs ||
+      !prefs->SetBoolean(kPrefsUpdateOverCellularPermission, in_allowed)) {
     LogAndSetError(error,
                    FROM_HERE,
                    string("Error setting the update over cellular to ") +
@@ -296,24 +284,66 @@
   return true;
 }
 
-bool UpdateEngineService::GetUpdateOverCellularPermission(ErrorPtr* /* error */,
-                                                          bool* out_allowed) {
-  ConnectionManagerInterface* cm = system_state_->connection_manager();
+bool UpdateEngineService::SetUpdateOverCellularTarget(
+    brillo::ErrorPtr* error,
+    const std::string& target_version,
+    int64_t target_size) {
+  ConnectionManagerInterface* connection_manager =
+      system_state_->connection_manager();
 
-  // The device_policy is loaded in a lazy way before an update check and is
-  // used to determine if an update is allowed over cellular. Load the device
-  // policy now from the libbrillo cache if it wasn't already loaded.
-  if (!system_state_->device_policy()) {
-    UpdateAttempter* update_attempter = system_state_->update_attempter();
-    if (update_attempter)
-      update_attempter->RefreshDevicePolicy();
+  // Check if this setting is allowed by the device policy.
+  if (connection_manager->IsAllowedConnectionTypesForUpdateSet()) {
+    LogAndSetError(error,
+                   FROM_HERE,
+                   "Ignoring the update over cellular setting since there's "
+                   "a device policy enforcing this setting.");
+    return false;
   }
 
-  // Return the current setting based on the same logic used while checking for
-  // updates. A log message could be printed as the result of this test.
-  LOG(INFO) << "Checking if updates over cellular networks are allowed:";
-  *out_allowed = cm->IsUpdateAllowedOver(ConnectionType::kCellular,
-                                         ConnectionTethering::kUnknown);
+  // If the policy wasn't loaded yet, then it is still OK to change the local
+  // setting because the policy will be checked again during the update check.
+
+  PrefsInterface* prefs = system_state_->prefs();
+
+  if (!prefs ||
+      !prefs->SetString(kPrefsUpdateOverCellularTargetVersion,
+                        target_version) ||
+      !prefs->SetInt64(kPrefsUpdateOverCellularTargetSize, target_size)) {
+    LogAndSetError(
+        error, FROM_HERE, "Error setting the target for update over cellular.");
+    return false;
+  }
+  return true;
+}
+
+bool UpdateEngineService::GetUpdateOverCellularPermission(ErrorPtr* error,
+                                                          bool* out_allowed) {
+  ConnectionManagerInterface* connection_manager =
+      system_state_->connection_manager();
+
+  if (connection_manager->IsAllowedConnectionTypesForUpdateSet()) {
+    // We have device policy, so ignore the user preferences.
+    *out_allowed = connection_manager->IsUpdateAllowedOver(
+        ConnectionType::kCellular, ConnectionTethering::kUnknown);
+  } else {
+    PrefsInterface* prefs = system_state_->prefs();
+
+    if (!prefs || !prefs->Exists(kPrefsUpdateOverCellularPermission)) {
+      // Update is not allowed as user preference is not set or not available.
+      *out_allowed = false;
+      return true;
+    }
+
+    bool is_allowed;
+
+    if (!prefs->GetBoolean(kPrefsUpdateOverCellularPermission, &is_allowed)) {
+      LogAndSetError(error,
+                     FROM_HERE,
+                     "Error getting the update over cellular preference.");
+      return false;
+    }
+    *out_allowed = is_allowed;
+  }
   return true;
 }
 
@@ -360,7 +390,8 @@
 
 bool UpdateEngineService::GetLastAttemptError(ErrorPtr* /* error */,
                                               int32_t* out_last_attempt_error) {
-  ErrorCode error_code = system_state_->payload_state()->GetAttemptErrorCode();
+  ErrorCode error_code =
+      system_state_->update_attempter()->GetAttemptErrorCode();
   *out_last_attempt_error = static_cast<int>(error_code);
   return true;
 }
diff --git a/common_service.h b/common_service.h
index 544dd93..824ef97 100644
--- a/common_service.h
+++ b/common_service.h
@@ -114,6 +114,12 @@
   bool SetUpdateOverCellularPermission(brillo::ErrorPtr* error,
                                        bool in_allowed);
 
+  // If there's no device policy installed, sets the update over cellular
+  // target. Otherwise, this method returns with an error.
+  bool SetUpdateOverCellularTarget(brillo::ErrorPtr* error,
+                                   const std::string& target_version,
+                                   int64_t target_size);
+
   // Returns the current value of the update over cellular network setting,
   // either forced by the device policy if the device is enrolled or the current
   // user preference otherwise.
diff --git a/connection_manager.cc b/connection_manager.cc
index d15faf0..4063f24 100644
--- a/connection_manager.cc
+++ b/connection_manager.cc
@@ -16,6 +16,7 @@
 
 #include "update_engine/connection_manager.h"
 
+#include <memory>
 #include <set>
 #include <string>
 
@@ -30,6 +31,7 @@
 #include "update_engine/connection_utils.h"
 #include "update_engine/shill_proxy.h"
 #include "update_engine/system_state.h"
+#include "update_engine/update_attempter.h"
 
 using org::chromium::flimflam::ManagerProxyInterface;
 using org::chromium::flimflam::ServiceProxyInterface;
@@ -58,16 +60,27 @@
 
     case ConnectionType::kCellular: {
       set<string> allowed_types;
+
       const policy::DevicePolicy* device_policy =
           system_state_->device_policy();
 
-      // A device_policy is loaded in a lazy way right before an update check,
-      // so the device_policy should be already loaded at this point. If it's
-      // not, return a safe value for this setting.
+      // The device_policy is loaded in a lazy way before an update check. Load
+      // it now from the libbrillo cache if it wasn't already loaded.
       if (!device_policy) {
-        LOG(INFO) << "Disabling updates over cellular networks as there's no "
-                     "device policy loaded yet.";
-        return false;
+        UpdateAttempter* update_attempter = system_state_->update_attempter();
+        if (update_attempter) {
+          update_attempter->RefreshDevicePolicy();
+          device_policy = system_state_->device_policy();
+        }
+      }
+
+      if (!device_policy) {
+        // Device policy fails to be loaded (possibly due to guest account). We
+        // do not check the local user setting here, which should be checked by
+        // |OmahaRequestAction| during checking for update.
+        LOG(INFO) << "Allowing updates over cellular as device policy "
+                     "fails to be loaded.";
+        return true;
       }
 
       if (device_policy->GetAllowedConnectionTypesForUpdate(&allowed_types)) {
@@ -81,31 +94,14 @@
 
         LOG(INFO) << "Allowing updates over cellular per device policy.";
         return true;
-      } else {
-        // There's no update setting in the device policy, using the local user
-        // setting.
-        PrefsInterface* prefs = system_state_->prefs();
-
-        if (!prefs || !prefs->Exists(kPrefsUpdateOverCellularPermission)) {
-          LOG(INFO) << "Disabling updates over cellular connection as there's "
-                       "no device policy setting nor user preference present.";
-          return false;
-        }
-
-        bool stored_value;
-        if (!prefs->GetBoolean(kPrefsUpdateOverCellularPermission,
-                               &stored_value)) {
-          return false;
-        }
-
-        if (!stored_value) {
-          LOG(INFO) << "Disabling updates over cellular connection per user "
-                       "setting.";
-          return false;
-        }
-        LOG(INFO) << "Allowing updates over cellular per user setting.";
-        return true;
       }
+
+      // If there's no update setting in the device policy, we do not check
+      // the local user setting here, which should be checked by
+      // |OmahaRequestAction| during checking for update.
+      LOG(INFO) << "Allowing updates over cellular as device policy does "
+                   "not include update setting.";
+      return true;
     }
 
     default:
@@ -120,6 +116,21 @@
   }
 }
 
+bool ConnectionManager::IsAllowedConnectionTypesForUpdateSet() const {
+  const policy::DevicePolicy* device_policy = system_state_->device_policy();
+  if (!device_policy) {
+    LOG(INFO) << "There's no device policy loaded yet.";
+    return false;
+  }
+
+  set<string> allowed_types;
+  if (!device_policy->GetAllowedConnectionTypesForUpdate(&allowed_types)) {
+    return false;
+  }
+
+  return true;
+}
+
 bool ConnectionManager::GetConnectionProperties(
     ConnectionType* out_type, ConnectionTethering* out_tethering) {
   dbus::ObjectPath default_service_path;
diff --git a/connection_manager.h b/connection_manager.h
index e5a9d49..dc563ef 100644
--- a/connection_manager.h
+++ b/connection_manager.h
@@ -17,6 +17,7 @@
 #ifndef UPDATE_ENGINE_CONNECTION_MANAGER_H_
 #define UPDATE_ENGINE_CONNECTION_MANAGER_H_
 
+#include <memory>
 #include <string>
 
 #include <base/macros.h>
@@ -43,6 +44,7 @@
                                ConnectionTethering* out_tethering) override;
   bool IsUpdateAllowedOver(ConnectionType type,
                            ConnectionTethering tethering) const override;
+  bool IsAllowedConnectionTypesForUpdateSet() const override;
 
  private:
   // Returns (via out_path) the default network path, or empty string if
diff --git a/connection_manager_android.cc b/connection_manager_android.cc
index 2dd824a..6da4cee 100644
--- a/connection_manager_android.cc
+++ b/connection_manager_android.cc
@@ -16,6 +16,8 @@
 
 #include "update_engine/connection_manager_android.h"
 
+#include <memory>
+
 namespace chromeos_update_engine {
 
 namespace connection_manager {
@@ -34,5 +36,8 @@
     ConnectionType type, ConnectionTethering tethering) const {
   return true;
 }
+bool ConnectionManagerAndroid::IsAllowedConnectionTypesForUpdateSet() const {
+  return false;
+}
 
 }  // namespace chromeos_update_engine
diff --git a/connection_manager_android.h b/connection_manager_android.h
index 0cd5e73..006f4ea 100644
--- a/connection_manager_android.h
+++ b/connection_manager_android.h
@@ -34,6 +34,7 @@
                                ConnectionTethering* out_tethering) override;
   bool IsUpdateAllowedOver(ConnectionType type,
                            ConnectionTethering tethering) const override;
+  bool IsAllowedConnectionTypesForUpdateSet() const override;
 
   DISALLOW_COPY_AND_ASSIGN(ConnectionManagerAndroid);
 };
diff --git a/connection_manager_interface.h b/connection_manager_interface.h
index df8eb4b..2faeb80 100644
--- a/connection_manager_interface.h
+++ b/connection_manager_interface.h
@@ -46,6 +46,10 @@
   virtual bool IsUpdateAllowedOver(ConnectionType type,
                                    ConnectionTethering tethering) const = 0;
 
+  // Returns true if the allowed connection types for update is set in the
+  // device policy. Otherwise, returns false.
+  virtual bool IsAllowedConnectionTypesForUpdateSet() const = 0;
+
  protected:
   ConnectionManagerInterface() = default;
 
diff --git a/connection_manager_unittest.cc b/connection_manager_unittest.cc
index e26a686..85b8c57 100644
--- a/connection_manager_unittest.cc
+++ b/connection_manager_unittest.cc
@@ -16,8 +16,10 @@
 
 #include "update_engine/connection_manager.h"
 
+#include <memory>
 #include <set>
 #include <string>
+#include <utility>
 
 #include <base/logging.h>
 #include <brillo/any.h>
@@ -276,16 +278,24 @@
                                         ConnectionTethering::kConfirmed));
 }
 
-TEST_F(ConnectionManagerTest, BlockUpdatesOverCellularByDefaultTest) {
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
-                                         ConnectionTethering::kUnknown));
+TEST_F(ConnectionManagerTest, AllowUpdatesOverCellularByDefaultTest) {
+  policy::MockDevicePolicy device_policy;
+  // Set an empty device policy.
+  fake_system_state_.set_device_policy(&device_policy);
+
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
+                                        ConnectionTethering::kUnknown));
 }
 
-TEST_F(ConnectionManagerTest, BlockUpdatesOverTetheredNetworkByDefaultTest) {
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kWifi,
-                                         ConnectionTethering::kConfirmed));
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kEthernet,
-                                         ConnectionTethering::kConfirmed));
+TEST_F(ConnectionManagerTest, AllowUpdatesOverTetheredNetworkByDefaultTest) {
+  policy::MockDevicePolicy device_policy;
+  // Set an empty device policy.
+  fake_system_state_.set_device_policy(&device_policy);
+
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWifi,
+                                        ConnectionTethering::kConfirmed));
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kEthernet,
+                                        ConnectionTethering::kConfirmed));
   EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWifi,
                                         ConnectionTethering::kSuspected));
 }
@@ -310,62 +320,27 @@
                                          ConnectionTethering::kUnknown));
 }
 
-TEST_F(ConnectionManagerTest, BlockUpdatesOver3GIfErrorInPolicyFetchTest) {
-  policy::MockDevicePolicy allow_3g_policy;
+TEST_F(ConnectionManagerTest, AllowUpdatesOver3GIfPolicyIsNotSet) {
+  policy::MockDevicePolicy device_policy;
 
-  fake_system_state_.set_device_policy(&allow_3g_policy);
-
-  set<string> allowed_set;
-  allowed_set.insert(StringForConnectionType(ConnectionType::kCellular));
+  fake_system_state_.set_device_policy(&device_policy);
 
   // Return false for GetAllowedConnectionTypesForUpdate and see
-  // that updates are still blocked for 3G despite the value being in
-  // the string set above.
-  EXPECT_CALL(allow_3g_policy, GetAllowedConnectionTypesForUpdate(_))
-      .Times(1)
-      .WillOnce(DoAll(SetArgPointee<0>(allowed_set), Return(false)));
-
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
-                                         ConnectionTethering::kUnknown));
-}
-
-TEST_F(ConnectionManagerTest, UseUserPrefForUpdatesOverCellularIfNoPolicyTest) {
-  policy::MockDevicePolicy no_policy;
-  testing::NiceMock<MockPrefs>* prefs = fake_system_state_.mock_prefs();
-
-  fake_system_state_.set_device_policy(&no_policy);
-
-  // No setting enforced by the device policy, user prefs should be used.
-  EXPECT_CALL(no_policy, GetAllowedConnectionTypesForUpdate(_))
-      .Times(3)
-      .WillRepeatedly(Return(false));
-
-  // No user pref: block.
-  EXPECT_CALL(*prefs, Exists(kPrefsUpdateOverCellularPermission))
+  // that updates are allowed as device policy is not set. Further
+  // check is left to |OmahaRequestAction|.
+  EXPECT_CALL(device_policy, GetAllowedConnectionTypesForUpdate(_))
       .Times(1)
       .WillOnce(Return(false));
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
-                                         ConnectionTethering::kUnknown));
 
-  // Allow per user pref.
-  EXPECT_CALL(*prefs, Exists(kPrefsUpdateOverCellularPermission))
-      .Times(1)
-      .WillOnce(Return(true));
-  EXPECT_CALL(*prefs, GetBoolean(kPrefsUpdateOverCellularPermission, _))
-      .Times(1)
-      .WillOnce(DoAll(SetArgPointee<1>(true), Return(true)));
   EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
                                         ConnectionTethering::kUnknown));
+}
 
-  // Block per user pref.
-  EXPECT_CALL(*prefs, Exists(kPrefsUpdateOverCellularPermission))
-      .Times(1)
-      .WillOnce(Return(true));
-  EXPECT_CALL(*prefs, GetBoolean(kPrefsUpdateOverCellularPermission, _))
-      .Times(1)
-      .WillOnce(DoAll(SetArgPointee<1>(false), Return(true)));
-  EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
-                                         ConnectionTethering::kUnknown));
+TEST_F(ConnectionManagerTest, AllowUpdatesOverCellularIfPolicyFailsToBeLoaded) {
+  fake_system_state_.set_device_policy(nullptr);
+
+  EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kCellular,
+                                        ConnectionTethering::kUnknown));
 }
 
 TEST_F(ConnectionManagerTest, StringForConnectionTypeTest) {
diff --git a/daemon_state_android.cc b/daemon_state_android.cc
index 0960b1a..c9c09b8 100644
--- a/daemon_state_android.cc
+++ b/daemon_state_android.cc
@@ -36,7 +36,7 @@
 
   hardware_ = hardware::CreateHardware();
   if (!hardware_) {
-    LOG(ERROR) << "Error intializing the HardwareInterface.";
+    LOG(ERROR) << "Error initializing the HardwareInterface.";
     return false;
   }
 
diff --git a/dbus_bindings/org.chromium.KioskAppService.dbus-xml b/dbus_bindings/org.chromium.KioskAppService.dbus-xml
new file mode 100644
index 0000000..11b888b
--- /dev/null
+++ b/dbus_bindings/org.chromium.KioskAppService.dbus-xml
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+
+<node name="/org/chromium/KioskAppService"
+      xmlns:tp="http://telepathy.freedesktop.org/wiki/DbusSpec#extensions-v0">
+  <interface name="org.chromium.KioskAppServiceInterface">
+    <method name="GetRequiredPlatformVersion">
+      <arg name="required_platform_version" type="s" direction="out" />
+    </method>
+  </interface>
+</node>
diff --git a/dbus_bindings/org.chromium.LibCrosService.dbus-xml b/dbus_bindings/org.chromium.LibCrosService.dbus-xml
deleted file mode 100644
index 3111c63..0000000
--- a/dbus_bindings/org.chromium.LibCrosService.dbus-xml
+++ /dev/null
@@ -1,10 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-
-<node name="/org/chromium/LibCrosService"
-      xmlns:tp="http://telepathy.freedesktop.org/wiki/DbusSpec#extensions-v0">
-  <interface name="org.chromium.LibCrosServiceInterface">
-    <method name="GetKioskAppRequiredPlatformVersion">
-      <arg name="required_platform_version" type="s" direction="out" />
-    </method>
-  </interface>
-</node>
diff --git a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml
index 848f775..a20f33f 100644
--- a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml
+++ b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml
@@ -67,6 +67,10 @@
     <method name="SetUpdateOverCellularPermission">
       <arg type="b" name="allowed" direction="in" />
     </method>
+    <method name="SetUpdateOverCellularTarget">
+      <arg type="s" name="target_version" direction="in" />
+      <arg type="x" name="target_size" direction="in" />
+    </method>
     <method name="GetUpdateOverCellularPermission">
       <arg type="b" name="allowed" direction="out" />
     </method>
diff --git a/dbus_service.cc b/dbus_service.cc
index 47aeec7..c7bc9f0 100644
--- a/dbus_service.cc
+++ b/dbus_service.cc
@@ -50,14 +50,11 @@
   bool interactive = !(flags & update_engine::kAttemptUpdateFlagNonInteractive);
   bool result;
   return common_->AttemptUpdate(
-             error,
-             in_app_version,
-             in_omaha_url,
-             interactive
-                 ? 0
-                 : update_engine::UpdateAttemptFlags::kFlagNonInteractive,
-             &result) &&
-         result;
+      error,
+      in_app_version,
+      in_omaha_url,
+      interactive ? 0 : update_engine::UpdateAttemptFlags::kFlagNonInteractive,
+      &result);
 }
 
 bool DBusUpdateEngineService::AttemptRollback(ErrorPtr* error,
@@ -133,6 +130,14 @@
   return common_->SetUpdateOverCellularPermission(error, in_allowed);
 }
 
+bool DBusUpdateEngineService::SetUpdateOverCellularTarget(
+    brillo::ErrorPtr* error,
+    const std::string& target_version,
+    int64_t target_size) {
+  return common_->SetUpdateOverCellularTarget(
+      error, target_version, target_size);
+}
+
 bool DBusUpdateEngineService::GetUpdateOverCellularPermission(
     ErrorPtr* error, bool* out_allowed) {
   return common_->GetUpdateOverCellularPermission(error, out_allowed);
diff --git a/dbus_service.h b/dbus_service.h
index b754661..e461fa6 100644
--- a/dbus_service.h
+++ b/dbus_service.h
@@ -114,6 +114,12 @@
   bool SetUpdateOverCellularPermission(brillo::ErrorPtr* error,
                                        bool in_allowed) override;
 
+  // If there's no device policy installed, sets the update over cellular
+  // target. Otherwise, this method returns with an error.
+  bool SetUpdateOverCellularTarget(brillo::ErrorPtr* error,
+                                   const std::string& target_version,
+                                   int64_t target_size) override;
+
   // Returns the current value of the update over cellular network setting,
   // either forced by the device policy if the device is enrolled or the current
   // user preference otherwise.
@@ -154,7 +160,7 @@
 class UpdateEngineAdaptor : public org::chromium::UpdateEngineInterfaceAdaptor,
                             public ServiceObserverInterface {
  public:
-  UpdateEngineAdaptor(SystemState* system_state);
+  explicit UpdateEngineAdaptor(SystemState* system_state);
   ~UpdateEngineAdaptor() = default;
 
   // Register the DBus object with the update engine service asynchronously.
diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc
new file mode 100644
index 0000000..27e117c
--- /dev/null
+++ b/dynamic_partition_control_android.cc
@@ -0,0 +1,195 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/dynamic_partition_control_android.h"
+
+#include <memory>
+#include <set>
+#include <string>
+
+#include <android-base/properties.h>
+#include <android-base/strings.h>
+#include <base/files/file_util.h>
+#include <base/logging.h>
+#include <bootloader_message/bootloader_message.h>
+#include <fs_mgr_dm_linear.h>
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/utils.h"
+
+using android::base::GetBoolProperty;
+using android::base::Join;
+using android::dm::DeviceMapper;
+using android::dm::DmDeviceState;
+using android::fs_mgr::CreateLogicalPartition;
+using android::fs_mgr::DestroyLogicalPartition;
+using android::fs_mgr::MetadataBuilder;
+
+namespace chromeos_update_engine {
+
+constexpr char kUseDynamicPartitions[] = "ro.boot.logical_partitions";
+constexpr uint64_t kMapTimeoutMillis = 1000;
+
+DynamicPartitionControlAndroid::~DynamicPartitionControlAndroid() {
+  CleanupInternal(false /* wait */);
+}
+
+bool DynamicPartitionControlAndroid::IsDynamicPartitionsEnabled() {
+  return GetBoolProperty(kUseDynamicPartitions, false);
+}
+
+bool DynamicPartitionControlAndroid::MapPartitionOnDeviceMapper(
+    const std::string& super_device,
+    const std::string& target_partition_name,
+    uint32_t slot,
+    std::string* path) {
+  if (!CreateLogicalPartition(super_device.c_str(),
+                              slot,
+                              target_partition_name,
+                              true /* force_writable */,
+                              std::chrono::milliseconds(kMapTimeoutMillis),
+                              path)) {
+    LOG(ERROR) << "Cannot map " << target_partition_name << " in "
+               << super_device << " on device mapper.";
+    return false;
+  }
+  LOG(INFO) << "Succesfully mapped " << target_partition_name
+            << " to device mapper; device path at " << *path;
+  mapped_devices_.insert(target_partition_name);
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::UnmapPartitionOnDeviceMapper(
+    const std::string& target_partition_name, bool wait) {
+  if (DeviceMapper::Instance().GetState(target_partition_name) !=
+      DmDeviceState::INVALID) {
+    if (!DestroyLogicalPartition(
+            target_partition_name,
+            std::chrono::milliseconds(wait ? kMapTimeoutMillis : 0))) {
+      LOG(ERROR) << "Cannot unmap " << target_partition_name
+                 << " from device mapper.";
+      return false;
+    }
+    LOG(INFO) << "Successfully unmapped " << target_partition_name
+              << " from device mapper.";
+  }
+  mapped_devices_.erase(target_partition_name);
+  return true;
+}
+
+void DynamicPartitionControlAndroid::CleanupInternal(bool wait) {
+  // UnmapPartitionOnDeviceMapper removes objects from mapped_devices_, hence
+  // a copy is needed for the loop.
+  std::set<std::string> mapped = mapped_devices_;
+  LOG(INFO) << "Destroying [" << Join(mapped, ", ") << "] from device mapper";
+  for (const auto& partition_name : mapped) {
+    ignore_result(UnmapPartitionOnDeviceMapper(partition_name, wait));
+  }
+}
+
+void DynamicPartitionControlAndroid::Cleanup() {
+  CleanupInternal(true /* wait */);
+}
+
+bool DynamicPartitionControlAndroid::DeviceExists(const std::string& path) {
+  return base::PathExists(base::FilePath(path));
+}
+
+android::dm::DmDeviceState DynamicPartitionControlAndroid::GetState(
+    const std::string& name) {
+  return DeviceMapper::Instance().GetState(name);
+}
+
+bool DynamicPartitionControlAndroid::GetDmDevicePathByName(
+    const std::string& name, std::string* path) {
+  return DeviceMapper::Instance().GetDmDevicePathByName(name, path);
+}
+
+std::unique_ptr<MetadataBuilder>
+DynamicPartitionControlAndroid::LoadMetadataBuilder(
+    const std::string& super_device, uint32_t source_slot) {
+  auto builder = MetadataBuilder::New(super_device, source_slot);
+  if (builder == nullptr) {
+    LOG(WARNING) << "No metadata slot "
+                 << BootControlInterface::SlotName(source_slot) << " in "
+                 << super_device;
+  }
+  LOG(INFO) << "Loaded metadata from slot "
+            << BootControlInterface::SlotName(source_slot) << " in "
+            << super_device;
+  return builder;
+}
+
+bool DynamicPartitionControlAndroid::StoreMetadata(
+    const std::string& super_device,
+    MetadataBuilder* builder,
+    uint32_t target_slot) {
+  auto metadata = builder->Export();
+  if (metadata == nullptr) {
+    LOG(ERROR) << "Cannot export metadata to slot "
+               << BootControlInterface::SlotName(target_slot) << " in "
+               << super_device;
+    return false;
+  }
+
+  if (!UpdatePartitionTable(super_device, *metadata, target_slot)) {
+    LOG(ERROR) << "Cannot write metadata to slot "
+               << BootControlInterface::SlotName(target_slot) << " in "
+               << super_device;
+    return false;
+  }
+
+  LOG(INFO) << "Copied metadata to slot "
+            << BootControlInterface::SlotName(target_slot) << " in "
+            << super_device;
+  return true;
+}
+
+bool DynamicPartitionControlAndroid::GetDeviceDir(std::string* out) {
+  // We can't use fs_mgr to look up |partition_name| because fstab
+  // doesn't list every slot partition (it uses the slotselect option
+  // to mask the suffix).
+  //
+  // We can however assume that there's an entry for the /misc mount
+  // point and use that to get the device file for the misc
+  // partition. This helps us locate the disk that |partition_name|
+  // resides on. From there we'll assume that a by-name scheme is used
+  // so we can just replace the trailing "misc" by the given
+  // |partition_name| and suffix corresponding to |slot|, e.g.
+  //
+  //   /dev/block/platform/soc.0/7824900.sdhci/by-name/misc ->
+  //   /dev/block/platform/soc.0/7824900.sdhci/by-name/boot_a
+  //
+  // If needed, it's possible to relax the by-name assumption in the
+  // future by trawling /sys/block looking for the appropriate sibling
+  // of misc and then finding an entry in /dev matching the sysfs
+  // entry.
+
+  std::string err, misc_device = get_bootloader_message_blk_device(&err);
+  if (misc_device.empty()) {
+    LOG(ERROR) << "Unable to get misc block device: " << err;
+    return false;
+  }
+
+  if (!utils::IsSymlink(misc_device.c_str())) {
+    LOG(ERROR) << "Device file " << misc_device << " for /misc "
+               << "is not a symlink.";
+    return false;
+  }
+  *out = base::FilePath(misc_device).DirName().value();
+  return true;
+}
+}  // namespace chromeos_update_engine
diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h
new file mode 100644
index 0000000..945954d
--- /dev/null
+++ b/dynamic_partition_control_android.h
@@ -0,0 +1,61 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
+#define UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
+
+#include "update_engine/dynamic_partition_control_interface.h"
+
+#include <memory>
+#include <set>
+#include <string>
+
+namespace chromeos_update_engine {
+
+class DynamicPartitionControlAndroid : public DynamicPartitionControlInterface {
+ public:
+  DynamicPartitionControlAndroid() = default;
+  ~DynamicPartitionControlAndroid();
+  bool IsDynamicPartitionsEnabled() override;
+  bool MapPartitionOnDeviceMapper(const std::string& super_device,
+                                  const std::string& target_partition_name,
+                                  uint32_t slot,
+                                  std::string* path) override;
+  bool UnmapPartitionOnDeviceMapper(const std::string& target_partition_name,
+                                    bool wait) override;
+  void Cleanup() override;
+  bool DeviceExists(const std::string& path) override;
+  android::dm::DmDeviceState GetState(const std::string& name) override;
+  bool GetDmDevicePathByName(const std::string& name,
+                             std::string* path) override;
+  std::unique_ptr<android::fs_mgr::MetadataBuilder> LoadMetadataBuilder(
+      const std::string& super_device, uint32_t source_slot) override;
+  bool StoreMetadata(const std::string& super_device,
+                     android::fs_mgr::MetadataBuilder* builder,
+                     uint32_t target_slot) override;
+  bool GetDeviceDir(std::string* path) override;
+
+ private:
+  std::set<std::string> mapped_devices_;
+
+  void CleanupInternal(bool wait);
+
+  DISALLOW_COPY_AND_ASSIGN(DynamicPartitionControlAndroid);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_ANDROID_H_
diff --git a/dynamic_partition_control_interface.h b/dynamic_partition_control_interface.h
new file mode 100644
index 0000000..00ed026
--- /dev/null
+++ b/dynamic_partition_control_interface.h
@@ -0,0 +1,90 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
+#define UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include <base/files/file_util.h>
+#include <libdm/dm.h>
+#include <liblp/builder.h>
+
+namespace chromeos_update_engine {
+
+class DynamicPartitionControlInterface {
+ public:
+  virtual ~DynamicPartitionControlInterface() = default;
+
+  // Return true iff dynamic partitions is enabled on this device.
+  virtual bool IsDynamicPartitionsEnabled() = 0;
+
+  // Map logical partition on device-mapper.
+  // |super_device| is the device path of the physical partition ("super").
+  // |target_partition_name| is the identifier used in metadata; for example,
+  // "vendor_a"
+  // |slot| is the selected slot to mount; for example, 0 for "_a".
+  // Returns true if mapped successfully; if so, |path| is set to the device
+  // path of the mapped logical partition.
+  virtual bool MapPartitionOnDeviceMapper(
+      const std::string& super_device,
+      const std::string& target_partition_name,
+      uint32_t slot,
+      std::string* path) = 0;
+
+  // Unmap logical partition on device mapper. This is the reverse operation
+  // of MapPartitionOnDeviceMapper.
+  // If |wait| is set, wait until the device is unmapped.
+  // Returns true if unmapped successfully.
+  virtual bool UnmapPartitionOnDeviceMapper(
+      const std::string& target_partition_name, bool wait) = 0;
+
+  // Do necessary cleanups before destroying the object.
+  virtual void Cleanup() = 0;
+
+  // Return true if a static partition exists at device path |path|.
+  virtual bool DeviceExists(const std::string& path) = 0;
+
+  // Returns the current state of the underlying device mapper device
+  // with given name.
+  // One of INVALID, SUSPENDED or ACTIVE.
+  virtual android::dm::DmDeviceState GetState(const std::string& name) = 0;
+
+  // Returns the path to the device mapper device node in '/dev' corresponding
+  // to 'name'. If the device does not exist, false is returned, and the path
+  // parameter is not set.
+  virtual bool GetDmDevicePathByName(const std::string& name,
+                                     std::string* path) = 0;
+
+  // Retrieve metadata from |super_device| at slot |source_slot|.
+  virtual std::unique_ptr<android::fs_mgr::MetadataBuilder> LoadMetadataBuilder(
+      const std::string& super_device, uint32_t source_slot) = 0;
+
+  // Write metadata |builder| to |super_device| at slot |target_slot|.
+  virtual bool StoreMetadata(const std::string& super_device,
+                             android::fs_mgr::MetadataBuilder* builder,
+                             uint32_t target_slot) = 0;
+
+  // Return a possible location for devices listed by name.
+  virtual bool GetDeviceDir(std::string* path) = 0;
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_DYNAMIC_PARTITION_CONTROL_INTERFACE_H_
diff --git a/hardware_android.cc b/hardware_android.cc
index 947b13a..a8a479d 100644
--- a/hardware_android.cc
+++ b/hardware_android.cc
@@ -16,23 +16,16 @@
 
 #include "update_engine/hardware_android.h"
 
-#include <fcntl.h>
-#include <sys/stat.h>
 #include <sys/types.h>
 
-#include <algorithm>
 #include <memory>
 
-#include <bootloader.h>
-
 #include <android-base/properties.h>
 #include <base/files/file_util.h>
-#include <base/strings/stringprintf.h>
+#include <bootloader_message/bootloader_message.h>
 
 #include "update_engine/common/hardware.h"
 #include "update_engine/common/platform_constants.h"
-#include "update_engine/common/utils.h"
-#include "update_engine/utils_android.h"
 
 using android::base::GetBoolProperty;
 using android::base::GetIntProperty;
@@ -43,12 +36,6 @@
 
 namespace {
 
-// The powerwash arguments passed to recovery. Arguments are separated by \n.
-const char kAndroidRecoveryPowerwashCommand[] =
-    "recovery\n"
-    "--wipe_data\n"
-    "--reason=wipe_data_from_ota\n";
-
 // Android properties that identify the hardware and potentially non-updatable
 // parts of the bootloader (such as the bootloader version and the baseband
 // version).
@@ -59,39 +46,6 @@
 const char kPropBootRevision[] = "ro.boot.revision";
 const char kPropBuildDateUTC[] = "ro.build.date.utc";
 
-// Write a recovery command line |message| to the BCB. The arguments to recovery
-// must be separated by '\n'. An empty string will erase the BCB.
-bool WriteBootloaderRecoveryMessage(const string& message) {
-  base::FilePath misc_device;
-  if (!utils::DeviceForMountPoint("/misc", &misc_device))
-    return false;
-
-  // Setup a bootloader_message with just the command and recovery fields set.
-  bootloader_message boot = {};
-  if (!message.empty()) {
-    strncpy(boot.command, "boot-recovery", sizeof(boot.command) - 1);
-    memcpy(boot.recovery,
-           message.data(),
-           std::min(message.size(), sizeof(boot.recovery) - 1));
-  }
-
-  int fd = HANDLE_EINTR(open(misc_device.value().c_str(), O_WRONLY | O_SYNC));
-  if (fd < 0) {
-    PLOG(ERROR) << "Opening misc";
-    return false;
-  }
-  ScopedFdCloser fd_closer(&fd);
-  // We only re-write the first part of the bootloader_message, up to and
-  // including the recovery message.
-  size_t boot_size =
-      offsetof(bootloader_message, recovery) + sizeof(boot.recovery);
-  if (!utils::WriteAll(fd, &boot, boot_size)) {
-    PLOG(ERROR) << "Writing recovery command to misc";
-    return false;
-  }
-  return true;
-}
-
 }  // namespace
 
 namespace hardware {
@@ -167,6 +121,32 @@
   return GetProperty(kPropBootBaseband, "");
 }
 
+int HardwareAndroid::GetMinKernelKeyVersion() const {
+  LOG(WARNING) << "STUB: No Kernel key version is available.";
+  return -1;
+}
+
+int HardwareAndroid::GetMinFirmwareKeyVersion() const {
+  LOG(WARNING) << "STUB: No Firmware key version is available.";
+  return -1;
+}
+
+int HardwareAndroid::GetMaxFirmwareKeyRollforward() const {
+  LOG(WARNING) << "STUB: Getting firmware_max_rollforward is not supported.";
+  return -1;
+}
+
+bool HardwareAndroid::SetMaxFirmwareKeyRollforward(
+    int firmware_max_rollforward) {
+  LOG(WARNING) << "STUB: Setting firmware_max_rollforward is not supported.";
+  return false;
+}
+
+bool HardwareAndroid::SetMaxKernelKeyRollforward(int kernel_max_rollforward) {
+  LOG(WARNING) << "STUB: Setting kernel_max_rollforward is not supported.";
+  return false;
+}
+
 int HardwareAndroid::GetPowerwashCount() const {
   LOG(WARNING) << "STUB: Assuming no factory reset was performed.";
   return 0;
@@ -174,11 +154,22 @@
 
 bool HardwareAndroid::SchedulePowerwash() {
   LOG(INFO) << "Scheduling a powerwash to BCB.";
-  return WriteBootloaderRecoveryMessage(kAndroidRecoveryPowerwashCommand);
+  string err;
+  if (!update_bootloader_message({"--wipe_data", "--reason=wipe_data_from_ota"},
+                                 &err)) {
+    LOG(ERROR) << "Failed to update bootloader message: " << err;
+    return false;
+  }
+  return true;
 }
 
 bool HardwareAndroid::CancelPowerwash() {
-  return WriteBootloaderRecoveryMessage("");
+  string err;
+  if (!clear_bootloader_message(&err)) {
+    LOG(ERROR) << "Failed to clear bootloader message: " << err;
+    return false;
+  }
+  return true;
 }
 
 bool HardwareAndroid::GetNonVolatileDirectory(base::FilePath* path) const {
@@ -205,9 +196,10 @@
   return false;
 }
 
-void HardwareAndroid::SetFirstActiveOmahaPingSent() {
-  LOG(WARNING) << "STUB: Assuming first active omaha is never set.";
-  return;
+bool HardwareAndroid::SetFirstActiveOmahaPingSent() {
+  LOG(WARNING) << "STUB: Assuming first active omaha is set.";
+  // We will set it true, so its failure doesn't cause escalation.
+  return true;
 }
 
 }  // namespace chromeos_update_engine
diff --git a/hardware_android.h b/hardware_android.h
index ca90b62..920b659 100644
--- a/hardware_android.h
+++ b/hardware_android.h
@@ -42,6 +42,11 @@
   std::string GetHardwareClass() const override;
   std::string GetFirmwareVersion() const override;
   std::string GetECVersion() const override;
+  int GetMinKernelKeyVersion() const override;
+  int GetMinFirmwareKeyVersion() const override;
+  int GetMaxFirmwareKeyRollforward() const override;
+  bool SetMaxFirmwareKeyRollforward(int firmware_max_rollforward) override;
+  bool SetMaxKernelKeyRollforward(int kernel_max_rollforward) override;
   int GetPowerwashCount() const override;
   bool SchedulePowerwash() override;
   bool CancelPowerwash() override;
@@ -49,7 +54,7 @@
   bool GetPowerwashSafeDirectory(base::FilePath* path) const override;
   int64_t GetBuildTimestamp() const override;
   bool GetFirstActiveOmahaPingSent() const override;
-  void SetFirstActiveOmahaPingSent() override;
+  bool SetFirstActiveOmahaPingSent() override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HardwareAndroid);
diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc
index f2bb28a..3949328 100644
--- a/hardware_chromeos.cc
+++ b/hardware_chromeos.cc
@@ -16,6 +16,8 @@
 
 #include "update_engine/hardware_chromeos.h"
 
+#include <utility>
+
 #include <base/files/file_path.h>
 #include <base/files/file_util.h>
 #include <base/logging.h>
@@ -180,6 +182,34 @@
   return utils::ParseECVersion(input_line);
 }
 
+int HardwareChromeOS::GetMinKernelKeyVersion() const {
+  return VbGetSystemPropertyInt("tpm_kernver");
+}
+
+int HardwareChromeOS::GetMaxFirmwareKeyRollforward() const {
+  return VbGetSystemPropertyInt("firmware_max_rollforward");
+}
+
+bool HardwareChromeOS::SetMaxFirmwareKeyRollforward(
+    int firmware_max_rollforward) {
+  // Not all devices have this field yet. So first try to read
+  // it and if there is an error just fail.
+  if (GetMaxFirmwareKeyRollforward() == -1)
+    return false;
+
+  return VbSetSystemPropertyInt("firmware_max_rollforward",
+                                firmware_max_rollforward) == 0;
+}
+
+int HardwareChromeOS::GetMinFirmwareKeyVersion() const {
+  return VbGetSystemPropertyInt("tpm_fwver");
+}
+
+bool HardwareChromeOS::SetMaxKernelKeyRollforward(int kernel_max_rollforward) {
+  return VbSetSystemPropertyInt("kernel_max_rollforward",
+                                kernel_max_rollforward) == 0;
+}
+
 int HardwareChromeOS::GetPowerwashCount() const {
   int powerwash_count;
   base::FilePath marker_path = base::FilePath(kPowerwashSafeDirectory).Append(
@@ -277,7 +307,7 @@
   return static_cast<bool>(active_ping);
 }
 
-void HardwareChromeOS::SetFirstActiveOmahaPingSent() {
+bool HardwareChromeOS::SetFirstActiveOmahaPingSent() {
   int exit_code = 0;
   string output;
   vector<string> vpd_set_cmd = {
@@ -287,7 +317,7 @@
     LOG(ERROR) << "Failed to set vpd key for " << kActivePingKey
                << " with exit code: " << exit_code
                << " with error: " << output;
-    return;
+    return false;
   }
 
   vector<string> vpd_dump_cmd = { "dump_vpd_log", "--force" };
@@ -296,7 +326,9 @@
     LOG(ERROR) << "Failed to cache " << kActivePingKey<< " using dump_vpd_log"
                << " with exit code: " << exit_code
                << " with error: " << output;
+    return false;
   }
+  return true;
 }
 
 }  // namespace chromeos_update_engine
diff --git a/hardware_chromeos.h b/hardware_chromeos.h
index 0cf1214..5c66641 100644
--- a/hardware_chromeos.h
+++ b/hardware_chromeos.h
@@ -17,6 +17,7 @@
 #ifndef UPDATE_ENGINE_HARDWARE_CHROMEOS_H_
 #define UPDATE_ENGINE_HARDWARE_CHROMEOS_H_
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -46,6 +47,11 @@
   std::string GetHardwareClass() const override;
   std::string GetFirmwareVersion() const override;
   std::string GetECVersion() const override;
+  int GetMinKernelKeyVersion() const override;
+  int GetMinFirmwareKeyVersion() const override;
+  int GetMaxFirmwareKeyRollforward() const override;
+  bool SetMaxFirmwareKeyRollforward(int firmware_max_rollforward) override;
+  bool SetMaxKernelKeyRollforward(int kernel_max_rollforward) override;
   int GetPowerwashCount() const override;
   bool SchedulePowerwash() override;
   bool CancelPowerwash() override;
@@ -53,7 +59,7 @@
   bool GetPowerwashSafeDirectory(base::FilePath* path) const override;
   int64_t GetBuildTimestamp() const override;
   bool GetFirstActiveOmahaPingSent() const override;
-  void SetFirstActiveOmahaPingSent() override;
+  bool SetFirstActiveOmahaPingSent() override;
 
  private:
   friend class HardwareChromeOSTest;
diff --git a/image_properties_android.cc b/image_properties_android.cc
index 4dc2c02..2d418b3 100644
--- a/image_properties_android.cc
+++ b/image_properties_android.cc
@@ -23,7 +23,7 @@
 #include <android-base/properties.h>
 #include <base/logging.h>
 #include <base/strings/string_util.h>
-#include <bootloader.h>
+#include <bootloader_message/bootloader_message.h>
 #include <brillo/osrelease_reader.h>
 #include <brillo/strings/string_utils.h>
 
@@ -33,7 +33,6 @@
 #include "update_engine/common/prefs_interface.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/system_state.h"
-#include "update_engine/utils_android.h"
 
 using android::base::GetProperty;
 using std::string;
@@ -79,18 +78,23 @@
 
 // Open misc partition for read or write and output the fd in |out_fd|.
 bool OpenMisc(bool write, int* out_fd) {
-  base::FilePath misc_device;
+  string misc_device;
   int flags = write ? O_WRONLY | O_SYNC : O_RDONLY;
   if (root_prefix) {
     // Use a file for unittest and create one if doesn't exist.
-    misc_device = base::FilePath(root_prefix).Append("misc");
+    misc_device = base::FilePath(root_prefix).Append("misc").value();
     if (write)
       flags |= O_CREAT;
-  } else if (!utils::DeviceForMountPoint("/misc", &misc_device)) {
-    return false;
+  } else {
+    string err;
+    misc_device = get_bootloader_message_blk_device(&err);
+    if (misc_device.empty()) {
+      LOG(ERROR) << "Unable to get misc block device: " << err;
+      return false;
+    }
   }
 
-  int fd = HANDLE_EINTR(open(misc_device.value().c_str(), flags, 0600));
+  int fd = HANDLE_EINTR(open(misc_device.c_str(), flags, 0600));
   if (fd < 0) {
     PLOG(ERROR) << "Opening misc failed";
     return false;
diff --git a/init/update-engine.conf b/init/update-engine.conf
index 4c05cf4..d3681db 100644
--- a/init/update-engine.conf
+++ b/init/update-engine.conf
@@ -22,7 +22,10 @@
 # also updating that reference.
 start on starting system-services
 stop on stopping system-services
-respawn
+# The default is 10 failures every 5 seconds, but even if we crash early, it is
+# hard to catch that. So here we set the crash rate as 10 failures every 20
+# seconds which will include the default and more.
+respawn limit 10 20
 
 expect fork
 
diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc
index 87f30ad..50ddeb0 100644
--- a/libcurl_http_fetcher.cc
+++ b/libcurl_http_fetcher.cc
@@ -545,8 +545,8 @@
   }
   bytes_downloaded_ += payload_size;
   in_write_callback_ = true;
-  if (delegate_)
-    delegate_->ReceivedBytes(this, ptr, payload_size);
+  if (delegate_ && !delegate_->ReceivedBytes(this, ptr, payload_size))
+    return payload_size;
   in_write_callback_ = false;
   return payload_size;
 }
diff --git a/main.cc b/main.cc
index 0612c54..67a150e 100644
--- a/main.cc
+++ b/main.cc
@@ -166,7 +166,7 @@
               "Don't daemon()ize; run in foreground.");
 
   chromeos_update_engine::Terminator::Init();
-  brillo::FlagHelper::Init(argc, argv, "Chromium OS Update Engine");
+  brillo::FlagHelper::Init(argc, argv, "A/B Update Engine");
 
   // We have two logging flags "--logtostderr" and "--logtofile"; and the logic
   // to choose the logging destination is:
@@ -179,7 +179,7 @@
   if (!FLAGS_foreground)
     PLOG_IF(FATAL, daemon(0, 0) == 1) << "daemon() failed";
 
-  LOG(INFO) << "Chrome OS Update Engine starting";
+  LOG(INFO) << "A/B Update Engine starting";
 
   // xz-embedded requires to initialize its CRC-32 table once on startup.
   xz_crc32_init();
@@ -194,7 +194,8 @@
   chromeos_update_engine::UpdateEngineDaemon update_engine_daemon;
   int exit_code = update_engine_daemon.Run();
 
-  LOG(INFO) << "Chrome OS Update Engine terminating with exit code "
-            << exit_code;
+  chromeos_update_engine::Subprocess::Get().FlushBufferedLogsAtExit();
+
+  LOG(INFO) << "A/B Update Engine terminating with exit code " << exit_code;
   return exit_code;
 }
diff --git a/metrics_reporter_android.cc b/metrics_reporter_android.cc
index 3cb356f..9165f0d 100644
--- a/metrics_reporter_android.cc
+++ b/metrics_reporter_android.cc
@@ -41,7 +41,7 @@
 constexpr char kMetricsUpdateEngineAttemptResult[] =
     "ota_update_engine_attempt_result";
 constexpr char kMetricsUpdateEngineAttemptDurationInMinutes[] =
-    "ota_update_engine_attempt_duration_boottime_in_minutes";
+    "ota_update_engine_attempt_fixed_duration_boottime_in_minutes";
 constexpr char kMetricsUpdateEngineAttemptDurationUptimeInMinutes[] =
     "ota_update_engine_attempt_duration_monotonic_in_minutes";
 constexpr char kMetricsUpdateEngineAttemptErrorCode[] =
@@ -51,12 +51,12 @@
 constexpr char kMetricsUpdateEngineAttemptPayloadType[] =
     "ota_update_engine_attempt_payload_type";
 constexpr char kMetricsUpdateEngineAttemptCurrentBytesDownloadedMiB[] =
-    "ota_update_engine_attempt_current_bytes_downloaded_mib";
+    "ota_update_engine_attempt_fixed_current_bytes_downloaded_mib";
 
 constexpr char kMetricsUpdateEngineSuccessfulUpdateAttemptCount[] =
     "ota_update_engine_successful_update_attempt_count";
 constexpr char kMetricsUpdateEngineSuccessfulUpdateTotalDurationInMinutes[] =
-    "ota_update_engine_successful_update_total_duration_in_minutes";
+    "ota_update_engine_successful_update_fixed_total_duration_in_minutes";
 constexpr char kMetricsUpdateEngineSuccessfulUpdatePayloadSizeMiB[] =
     "ota_update_engine_successful_update_payload_size_mib";
 constexpr char kMetricsUpdateEngineSuccessfulUpdatePayloadType[] =
@@ -109,7 +109,7 @@
     metrics::DownloadErrorCode /* payload_download_error_code */,
     metrics::ConnectionType /* connection_type */) {
   LogHistogram(metrics::kMetricsUpdateEngineAttemptCurrentBytesDownloadedMiB,
-               payload_bytes_downloaded);
+               payload_bytes_downloaded / kNumBytesInOneMiB);
 }
 
 void MetricsReporterAndroid::ReportSuccessfulUpdateMetrics(
@@ -120,6 +120,7 @@
     int64_t num_bytes_downloaded[kNumDownloadSources],
     int download_overhead_percentage,
     base::TimeDelta total_duration,
+    base::TimeDelta /* total_duration_uptime */,
     int reboot_count,
     int /* url_switch_count */) {
   LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdateAttemptCount,
diff --git a/metrics_reporter_android.h b/metrics_reporter_android.h
index ee94e43..8a27ef6 100644
--- a/metrics_reporter_android.h
+++ b/metrics_reporter_android.h
@@ -17,6 +17,8 @@
 #ifndef UPDATE_ENGINE_METRICS_REPORTER_ANDROID_H_
 #define UPDATE_ENGINE_METRICS_REPORTER_ANDROID_H_
 
+#include <string>
+
 #include "update_engine/common/error_code.h"
 #include "update_engine/metrics_constants.h"
 #include "update_engine/metrics_reporter_interface.h"
@@ -33,6 +35,9 @@
 
   void ReportRollbackMetrics(metrics::RollbackResult result) override {}
 
+  void ReportEnterpriseRollbackMetrics(
+      bool success, const std::string& rollback_version) override {}
+
   void ReportDailyMetrics(base::TimeDelta os_age) override {}
 
   void ReportUpdateCheckMetrics(
@@ -67,6 +72,7 @@
       int64_t num_bytes_downloaded[kNumDownloadSources],
       int download_overhead_percentage,
       base::TimeDelta total_duration,
+      base::TimeDelta total_duration_uptime,
       int reboot_count,
       int url_switch_count) override;
 
@@ -79,6 +85,12 @@
 
   void ReportInstallDateProvisioningSource(int source, int max) override {}
 
+  void ReportInternalErrorCode(ErrorCode error_code) override {}
+
+  void ReportKeyVersionMetrics(int kernel_min_version,
+                               int kernel_max_rollforward_version,
+                               bool kernel_max_rollforward_success) override {}
+
  private:
   DISALLOW_COPY_AND_ASSIGN(MetricsReporterAndroid);
 };
diff --git a/metrics_reporter_interface.h b/metrics_reporter_interface.h
index 2c7ce5b..b677aaa 100644
--- a/metrics_reporter_interface.h
+++ b/metrics_reporter_interface.h
@@ -18,6 +18,7 @@
 #define UPDATE_ENGINE_METRICS_REPORTER_INTERFACE_H_
 
 #include <memory>
+#include <string>
 
 #include <base/time/time.h>
 
@@ -43,12 +44,20 @@
 
   virtual void Initialize() = 0;
 
-  // Helper function to report metrics related to rollback. The
+  // Helper function to report metrics related to user-initiated rollback. The
   // following metrics are reported:
   //
   //  |kMetricRollbackResult|
   virtual void ReportRollbackMetrics(metrics::RollbackResult result) = 0;
 
+  // Helper function to report metrics related to enterprise (admin-initiated)
+  // rollback:
+  //
+  //  |kMetricEnterpriseRollbackSuccess|
+  //  |kMetricEnterpriseRollbackFailure|
+  virtual void ReportEnterpriseRollbackMetrics(
+      bool success, const std::string& rollback_version) = 0;
+
   // Helper function to report metrics reported once a day. The
   // following metrics are reported:
   //
@@ -64,6 +73,8 @@
   //  |kMetricCheckDownloadErrorCode|
   //  |kMetricCheckTimeSinceLastCheckMinutes|
   //  |kMetricCheckTimeSinceLastCheckUptimeMinutes|
+  //  |kMetricCheckTargetVersion|
+  //  |kMetricCheckRollbackTargetVersion|
   //
   // The |kMetricCheckResult| metric will only be reported if |result|
   // is not |kUnset|.
@@ -78,6 +89,10 @@
   // |kMetricCheckTimeSinceLastCheckUptimeMinutes| metrics are
   // automatically reported and calculated by maintaining persistent
   // and process-local state variables.
+  //
+  // |kMetricCheckTargetVersion| reports the first section of the target version
+  // if it's set, |kMetricCheckRollbackTargetVersion| reports the same, but only
+  // if rollback is also allowed using enterprise policy.
   virtual void ReportUpdateCheckMetrics(
       SystemState* system_state,
       metrics::CheckResult result,
@@ -150,6 +165,7 @@
   //  |kMetricSuccessfulUpdateDownloadSourcesUsed|
   //  |kMetricSuccessfulUpdateDownloadOverheadPercentage|
   //  |kMetricSuccessfulUpdateTotalDurationMinutes|
+  //  |kMetricSuccessfulUpdateTotalDurationUptimeMinutes|
   //  |kMetricSuccessfulUpdateRebootCount|
   //  |kMetricSuccessfulUpdateUrlSwitchCount|
   //
@@ -164,6 +180,7 @@
       int64_t num_bytes_downloaded[kNumDownloadSources],
       int download_overhead_percentage,
       base::TimeDelta total_duration,
+      base::TimeDelta total_duration_uptime,
       int reboot_count,
       int url_switch_count) = 0;
 
@@ -193,6 +210,22 @@
   //
   // |kMetricInstallDateProvisioningSource|
   virtual void ReportInstallDateProvisioningSource(int source, int max) = 0;
+
+  // Helper function to report an internal error code. The following metrics are
+  // reported:
+  //
+  // |kMetricAttemptInternalErrorCode|
+  virtual void ReportInternalErrorCode(ErrorCode error_code) = 0;
+
+  // Helper function to report metrics related to the verified boot key
+  // versions:
+  //
+  //  |kMetricKernelMinVersion|
+  //  |kMetricKernelMaxRollforwardVersion|
+  //  |kMetricKernelMaxRollforwardSetSuccess|
+  virtual void ReportKeyVersionMetrics(int kernel_min_version,
+                                       int kernel_max_rollforward_version,
+                                       bool kernel_max_rollforward_success) = 0;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/metrics_reporter_omaha.cc b/metrics_reporter_omaha.cc
index 0397b83..f0c6643 100644
--- a/metrics_reporter_omaha.cc
+++ b/metrics_reporter_omaha.cc
@@ -17,9 +17,9 @@
 #include "update_engine/metrics_reporter_omaha.h"
 
 #include <memory>
-#include <string>
 
 #include <base/logging.h>
+#include <base/strings/string_number_conversions.h>
 #include <metrics/metrics_library.h>
 
 #include "update_engine/common/clock_interface.h"
@@ -27,6 +27,7 @@
 #include "update_engine/common/prefs_interface.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/metrics_utils.h"
+#include "update_engine/omaha_request_params.h"
 #include "update_engine/system_state.h"
 
 using std::string;
@@ -43,6 +44,9 @@
     "UpdateEngine.Check.DownloadErrorCode";
 const char kMetricCheckReaction[] = "UpdateEngine.Check.Reaction";
 const char kMetricCheckResult[] = "UpdateEngine.Check.Result";
+const char kMetricCheckTargetVersion[] = "UpdateEngine.Check.TargetVersion";
+const char kMetricCheckRollbackTargetVersion[] =
+    "UpdateEngine.Check.RollbackTargetVersion";
 const char kMetricCheckTimeSinceLastCheckMinutes[] =
     "UpdateEngine.Check.TimeSinceLastCheckMinutes";
 const char kMetricCheckTimeSinceLastCheckUptimeMinutes[] =
@@ -92,6 +96,8 @@
     "UpdateEngine.SuccessfulUpdate.RebootCount";
 const char kMetricSuccessfulUpdateTotalDurationMinutes[] =
     "UpdateEngine.SuccessfulUpdate.TotalDurationMinutes";
+const char kMetricSuccessfulUpdateTotalDurationUptimeMinutes[] =
+    "UpdateEngine.SuccessfulUpdate.TotalDurationUptimeMinutes";
 const char kMetricSuccessfulUpdateUpdatesAbandonedCount[] =
     "UpdateEngine.SuccessfulUpdate.UpdatesAbandonedCount";
 const char kMetricSuccessfulUpdateUrlSwitchCount[] =
@@ -100,12 +106,25 @@
 // UpdateEngine.Rollback.* metric.
 const char kMetricRollbackResult[] = "UpdateEngine.Rollback.Result";
 
+// UpdateEngine.EnterpriseRollback.* metrics.
+const char kMetricEnterpriseRollbackFailure[] =
+    "UpdateEngine.EnterpriseRollback.Failure";
+const char kMetricEnterpriseRollbackSuccess[] =
+    "UpdateEngine.EnterpriseRollback.Success";
+
 // UpdateEngine.CertificateCheck.* metrics.
 const char kMetricCertificateCheckUpdateCheck[] =
     "UpdateEngine.CertificateCheck.UpdateCheck";
 const char kMetricCertificateCheckDownload[] =
     "UpdateEngine.CertificateCheck.Download";
 
+// UpdateEngine.KernelKey.* metrics.
+const char kMetricKernelMinVersion[] = "UpdateEngine.KernelKey.MinVersion";
+const char kMetricKernelMaxRollforwardVersion[] =
+    "UpdateEngine.KernelKey.MaxRollforwardVersion";
+const char kMetricKernelMaxRollforwardSetSuccess[] =
+    "UpdateEngine.KernelKey.MaxRollforwardSetSuccess";
+
 // UpdateEngine.* metrics.
 const char kMetricFailedUpdateCount[] = "UpdateEngine.FailedUpdateCount";
 const char kMetricInstallDateProvisioningSource[] =
@@ -194,6 +213,25 @@
                             30 * 24 * 60,  // max: 30 days
                             50);           // num_buckets
   }
+
+  // First section of target version specified for the update.
+  if (system_state && system_state->request_params()) {
+    string target_version =
+        system_state->request_params()->target_version_prefix();
+    value = utils::VersionPrefix(target_version);
+    if (value != 0) {
+      metric = metrics::kMetricCheckTargetVersion;
+      LOG(INFO) << "Sending " << value << " for metric " << metric
+                << " (sparse)";
+      metrics_lib_->SendSparseToUMA(metric, value);
+      if (system_state->request_params()->rollback_allowed()) {
+        metric = metrics::kMetricCheckRollbackTargetVersion;
+        LOG(INFO) << "Sending " << value << " for metric " << metric
+                  << " (sparse)";
+        metrics_lib_->SendSparseToUMA(metric, value);
+      }
+    }
+  }
 }
 
 void MetricsReporterOmaha::ReportAbnormallyTerminatedUpdateAttemptMetrics() {
@@ -269,12 +307,7 @@
       static_cast<int>(metrics::AttemptResult::kNumConstants));
 
   if (internal_error_code != ErrorCode::kSuccess) {
-    metric = metrics::kMetricAttemptInternalErrorCode;
-    LOG(INFO) << "Uploading " << internal_error_code << " for metric "
-              << metric;
-    metrics_lib_->SendEnumToUMA(metric,
-                                static_cast<int>(internal_error_code),
-                                static_cast<int>(ErrorCode::kUmaReportedMax));
+    ReportInternalErrorCode(internal_error_code);
   }
 
   base::TimeDelta time_since_last;
@@ -363,6 +396,7 @@
     int64_t num_bytes_downloaded[kNumDownloadSources],
     int download_overhead_percentage,
     base::TimeDelta total_duration,
+    base::TimeDelta total_duration_uptime,
     int reboot_count,
     int url_switch_count) {
   string metric = metrics::kMetricSuccessfulUpdatePayloadSizeMiB;
@@ -442,6 +476,15 @@
                           365 * 24 * 60,  // max: 365 days ~= 1 year
                           50);            // num_buckets
 
+  metric = metrics::kMetricSuccessfulUpdateTotalDurationUptimeMinutes;
+  LOG(INFO) << "Uploading " << utils::FormatTimeDelta(total_duration_uptime)
+            << " for metric " << metric;
+  metrics_lib_->SendToUMA(metric,
+                          static_cast<int>(total_duration_uptime.InMinutes()),
+                          0,             // min: 0 min
+                          30 * 24 * 60,  // max: 30 days
+                          50);           // num_buckets
+
   metric = metrics::kMetricSuccessfulUpdateRebootCount;
   LOG(INFO) << "Uploading reboot count of " << reboot_count << " for metric "
             << metric;
@@ -483,6 +526,16 @@
       metric, value, static_cast<int>(metrics::RollbackResult::kNumConstants));
 }
 
+void MetricsReporterOmaha::ReportEnterpriseRollbackMetrics(
+    bool success, const string& rollback_version) {
+  int value = utils::VersionPrefix(rollback_version);
+  string metric = metrics::kMetricEnterpriseRollbackSuccess;
+  if (!success)
+    metric = metrics::kMetricEnterpriseRollbackFailure;
+  LOG(INFO) << "Sending " << value << " for metric " << metric;
+  metrics_lib_->SendSparseToUMA(metric, value);
+}
+
 void MetricsReporterOmaha::ReportCertificateCheckMetrics(
     ServerToCheck server_to_check, CertificateCheckResult result) {
   string metric;
@@ -535,4 +588,33 @@
                               max);
 }
 
+void MetricsReporterOmaha::ReportInternalErrorCode(ErrorCode error_code) {
+  auto metric = metrics::kMetricAttemptInternalErrorCode;
+  LOG(INFO) << "Uploading " << error_code << " for metric " << metric;
+  metrics_lib_->SendEnumToUMA(metric,
+                              static_cast<int>(error_code),
+                              static_cast<int>(ErrorCode::kUmaReportedMax));
+}
+
+void MetricsReporterOmaha::ReportKeyVersionMetrics(
+    int kernel_min_version,
+    int kernel_max_rollforward_version,
+    bool kernel_max_rollforward_success) {
+  int value = kernel_min_version;
+  string metric = metrics::kMetricKernelMinVersion;
+  LOG(INFO) << "Sending " << value << " for metric " << metric;
+  metrics_lib_->SendSparseToUMA(metric, value);
+
+  value = kernel_max_rollforward_version;
+  metric = metrics::kMetricKernelMaxRollforwardVersion;
+  LOG(INFO) << "Sending " << value << " for metric " << metric;
+  metrics_lib_->SendSparseToUMA(metric, value);
+
+  bool bool_value = kernel_max_rollforward_success;
+  metric = metrics::kMetricKernelMaxRollforwardSetSuccess;
+  LOG(INFO) << "Sending " << bool_value << " for metric " << metric
+            << " (bool)";
+  metrics_lib_->SendBoolToUMA(metric, bool_value);
+}
+
 }  // namespace chromeos_update_engine
diff --git a/metrics_reporter_omaha.h b/metrics_reporter_omaha.h
index c19fe86..10aef86 100644
--- a/metrics_reporter_omaha.h
+++ b/metrics_reporter_omaha.h
@@ -18,6 +18,7 @@
 #define UPDATE_ENGINE_METRICS_REPORTER_OMAHA_H_
 
 #include <memory>
+#include <string>
 
 #include <base/time/time.h>
 #include <metrics/metrics_library.h>
@@ -42,6 +43,8 @@
 extern const char kMetricCheckDownloadErrorCode[];
 extern const char kMetricCheckReaction[];
 extern const char kMetricCheckResult[];
+extern const char kMetricCheckTargetVersion[];
+extern const char kMetricCheckRollbackTargetVersion[];
 extern const char kMetricCheckTimeSinceLastCheckMinutes[];
 extern const char kMetricCheckTimeSinceLastCheckUptimeMinutes[];
 
@@ -70,16 +73,26 @@
 extern const char kMetricSuccessfulUpdatePayloadSizeMiB[];
 extern const char kMetricSuccessfulUpdateRebootCount[];
 extern const char kMetricSuccessfulUpdateTotalDurationMinutes[];
+extern const char kMetricSuccessfulUpdateTotalDurationUptimeMinutes[];
 extern const char kMetricSuccessfulUpdateUpdatesAbandonedCount[];
 extern const char kMetricSuccessfulUpdateUrlSwitchCount[];
 
 // UpdateEngine.Rollback.* metric.
 extern const char kMetricRollbackResult[];
 
+// UpdateEngine.EnterpriseRollback.* metrics.
+extern const char kMetricEnterpriseRollbackFailure[];
+extern const char kMetricEnterpriseRollbackSuccess[];
+
 // UpdateEngine.CertificateCheck.* metrics.
 extern const char kMetricCertificateCheckUpdateCheck[];
 extern const char kMetricCertificateCheckDownload[];
 
+// UpdateEngine.KernelKey.* metrics.
+extern const char kMetricKernelMinVersion[];
+extern const char kMetricKernelMaxRollforwardVersion[];
+extern const char kMetricKernelMaxRollforwardSetSuccess[];
+
 // UpdateEngine.* metrics.
 extern const char kMetricFailedUpdateCount[];
 extern const char kMetricInstallDateProvisioningSource[];
@@ -97,6 +110,9 @@
 
   void ReportRollbackMetrics(metrics::RollbackResult result) override;
 
+  void ReportEnterpriseRollbackMetrics(
+      bool success, const std::string& rollback_version) override;
+
   void ReportDailyMetrics(base::TimeDelta os_age) override;
 
   void ReportUpdateCheckMetrics(
@@ -131,6 +147,7 @@
       int64_t num_bytes_downloaded[kNumDownloadSources],
       int download_overhead_percentage,
       base::TimeDelta total_duration,
+      base::TimeDelta total_duration_uptime,
       int reboot_count,
       int url_switch_count) override;
 
@@ -143,6 +160,12 @@
 
   void ReportInstallDateProvisioningSource(int source, int max) override;
 
+  void ReportInternalErrorCode(ErrorCode error_code) override;
+
+  void ReportKeyVersionMetrics(int kernel_min_version,
+                               int kernel_max_rollforward_version,
+                               bool kernel_max_rollforward_success) override;
+
  private:
   friend class MetricsReporterOmahaTest;
 
diff --git a/metrics_reporter_omaha_unittest.cc b/metrics_reporter_omaha_unittest.cc
index 76e33c6..878a323 100644
--- a/metrics_reporter_omaha_unittest.cc
+++ b/metrics_reporter_omaha_unittest.cc
@@ -29,8 +29,9 @@
 #include "update_engine/fake_system_state.h"
 
 using base::TimeDelta;
-using testing::AnyNumber;
 using testing::_;
+using testing::AnyNumber;
+using testing::Return;
 
 namespace chromeos_update_engine {
 class MetricsReporterOmahaTest : public ::testing::Test {
@@ -85,6 +86,14 @@
                               static_cast<int>(error_code)))
       .Times(2);
 
+  // Not pinned nor rollback
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricCheckTargetVersion, _))
+      .Times(0);
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricCheckRollbackTargetVersion, _))
+      .Times(0);
+
   EXPECT_CALL(
       *mock_metrics_lib_,
       SendToUMA(metrics::kMetricCheckTimeSinceLastCheckMinutes, 1, _, _, _))
@@ -101,6 +110,62 @@
   // Advance the clock by 1 minute and report the same metrics again.
   fake_clock.SetWallclockTime(base::Time::FromInternalValue(61000000));
   fake_clock.SetMonotonicTime(base::Time::FromInternalValue(61000000));
+  // Allow rollback
+  reporter_.ReportUpdateCheckMetrics(
+      &fake_system_state, result, reaction, error_code);
+}
+
+TEST_F(MetricsReporterOmahaTest, ReportUpdateCheckMetricsPinned) {
+  FakeSystemState fake_system_state;
+
+  OmahaRequestParams params(&fake_system_state);
+  params.set_target_version_prefix("10575.");
+  params.set_rollback_allowed(false);
+  fake_system_state.set_request_params(&params);
+
+  metrics::CheckResult result = metrics::CheckResult::kUpdateAvailable;
+  metrics::CheckReaction reaction = metrics::CheckReaction::kIgnored;
+  metrics::DownloadErrorCode error_code =
+      metrics::DownloadErrorCode::kHttpStatus200;
+
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricCheckDownloadErrorCode, _));
+  // Target version set, but not a rollback.
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricCheckTargetVersion, 10575))
+      .Times(1);
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricCheckRollbackTargetVersion, _))
+      .Times(0);
+
+  reporter_.ReportUpdateCheckMetrics(
+      &fake_system_state, result, reaction, error_code);
+}
+
+TEST_F(MetricsReporterOmahaTest, ReportUpdateCheckMetricsRollback) {
+  FakeSystemState fake_system_state;
+
+  OmahaRequestParams params(&fake_system_state);
+  params.set_target_version_prefix("10575.");
+  params.set_rollback_allowed(true);
+  fake_system_state.set_request_params(&params);
+
+  metrics::CheckResult result = metrics::CheckResult::kUpdateAvailable;
+  metrics::CheckReaction reaction = metrics::CheckReaction::kIgnored;
+  metrics::DownloadErrorCode error_code =
+      metrics::DownloadErrorCode::kHttpStatus200;
+
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricCheckDownloadErrorCode, _));
+  // Rollback.
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricCheckTargetVersion, 10575))
+      .Times(1);
+  EXPECT_CALL(
+      *mock_metrics_lib_,
+      SendSparseToUMA(metrics::kMetricCheckRollbackTargetVersion, 10575))
+      .Times(1);
+
   reporter_.ReportUpdateCheckMetrics(
       &fake_system_state, result, reaction, error_code);
 }
@@ -258,6 +323,7 @@
   num_bytes_downloaded[0] = 200 * kNumBytesInOneMiB;
   int download_overhead_percentage = 20;
   TimeDelta total_duration = TimeDelta::FromMinutes(30);
+  TimeDelta total_duration_uptime = TimeDelta::FromMinutes(20);
   int reboot_count = 2;
   int url_switch_count = 2;
 
@@ -306,6 +372,14 @@
       .Times(1);
   EXPECT_CALL(
       *mock_metrics_lib_,
+      SendToUMA(metrics::kMetricSuccessfulUpdateTotalDurationUptimeMinutes,
+                20,
+                _,
+                _,
+                _))
+      .Times(1);
+  EXPECT_CALL(
+      *mock_metrics_lib_,
       SendToUMA(
           metrics::kMetricSuccessfulUpdateRebootCount, reboot_count, _, _, _))
       .Times(1);
@@ -333,6 +407,7 @@
                                           num_bytes_downloaded,
                                           download_overhead_percentage,
                                           total_duration,
+                                          total_duration_uptime,
                                           reboot_count,
                                           url_switch_count);
 }
@@ -347,6 +422,18 @@
   reporter_.ReportRollbackMetrics(result);
 }
 
+TEST_F(MetricsReporterOmahaTest, ReportEnterpriseRollbackMetrics) {
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricEnterpriseRollbackSuccess, 10575))
+      .Times(1);
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricEnterpriseRollbackFailure, 10323))
+      .Times(1);
+
+  reporter_.ReportEnterpriseRollbackMetrics(/*success=*/true, "10575.39.2");
+  reporter_.ReportEnterpriseRollbackMetrics(/*success=*/false, "10323.67.7");
+}
+
 TEST_F(MetricsReporterOmahaTest, ReportCertificateCheckMetrics) {
   ServerToCheck server_to_check = ServerToCheck::kUpdate;
   CertificateCheckResult result = CertificateCheckResult::kValid;
@@ -391,4 +478,26 @@
   reporter_.ReportInstallDateProvisioningSource(source, max);
 }
 
+TEST_F(MetricsReporterOmahaTest, ReportKeyVersionMetrics) {
+  int kernel_min_version = 0x00040002;
+  int kernel_max_rollforward_version = 0xfffffffe;
+  bool kernel_max_rollforward_success = true;
+  EXPECT_CALL(
+      *mock_metrics_lib_,
+      SendSparseToUMA(metrics::kMetricKernelMinVersion, kernel_min_version))
+      .Times(1);
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendSparseToUMA(metrics::kMetricKernelMaxRollforwardVersion,
+                              kernel_max_rollforward_version))
+      .Times(1);
+  EXPECT_CALL(*mock_metrics_lib_,
+              SendBoolToUMA(metrics::kMetricKernelMaxRollforwardSetSuccess,
+                            kernel_max_rollforward_success))
+      .Times(1);
+
+  reporter_.ReportKeyVersionMetrics(kernel_min_version,
+                                    kernel_max_rollforward_version,
+                                    kernel_max_rollforward_success);
+}
+
 }  // namespace chromeos_update_engine
diff --git a/metrics_reporter_stub.h b/metrics_reporter_stub.h
index d0f75ab..87023ee 100644
--- a/metrics_reporter_stub.h
+++ b/metrics_reporter_stub.h
@@ -17,6 +17,8 @@
 #ifndef UPDATE_ENGINE_METRICS_REPORTER_STUB_H_
 #define UPDATE_ENGINE_METRICS_REPORTER_STUB_H_
 
+#include <string>
+
 #include "update_engine/common/error_code.h"
 #include "update_engine/metrics_constants.h"
 #include "update_engine/metrics_reporter_interface.h"
@@ -33,6 +35,9 @@
 
   void ReportRollbackMetrics(metrics::RollbackResult result) override {}
 
+  void ReportEnterpriseRollbackMetrics(
+      bool success, const std::string& rollback_version) override {}
+
   void ReportDailyMetrics(base::TimeDelta os_age) override {}
 
   void ReportUpdateCheckMetrics(
@@ -67,6 +72,7 @@
       int64_t num_bytes_downloaded[kNumDownloadSources],
       int download_overhead_percentage,
       base::TimeDelta total_duration,
+      base::TimeDelta total_duration_uptime,
       int reboot_count,
       int url_switch_count) override {}
 
@@ -79,6 +85,12 @@
 
   void ReportInstallDateProvisioningSource(int source, int max) override {}
 
+  void ReportInternalErrorCode(ErrorCode error_code) override {}
+
+  void ReportKeyVersionMetrics(int kernel_min_version,
+                               int kernel_max_rollforward_version,
+                               bool kernel_max_rollforward_success) override {}
+
  private:
   DISALLOW_COPY_AND_ASSIGN(MetricsReporterStub);
 };
diff --git a/metrics_utils.cc b/metrics_utils.cc
index 46530f0..e1aa744 100644
--- a/metrics_utils.cc
+++ b/metrics_utils.cc
@@ -68,6 +68,7 @@
     case ErrorCode::kDownloadWriteError:
     case ErrorCode::kFilesystemCopierError:
     case ErrorCode::kFilesystemVerifierError:
+    case ErrorCode::kVerityCalculationError:
       return metrics::AttemptResult::kOperationExecutionError;
 
     case ErrorCode::kDownloadMetadataSignatureMismatch:
@@ -83,6 +84,7 @@
 
     case ErrorCode::kNewRootfsVerificationError:
     case ErrorCode::kNewKernelVerificationError:
+    case ErrorCode::kRollbackNotPossible:
       return metrics::AttemptResult::kVerificationFailed;
 
     case ErrorCode::kPostinstallRunnerError:
@@ -114,6 +116,9 @@
     case ErrorCode::kPostinstallPowerwashError:
     case ErrorCode::kUpdateCanceledByChannelChange:
     case ErrorCode::kOmahaRequestXMLHasEntityDecl:
+    case ErrorCode::kOmahaUpdateIgnoredOverCellular:
+    case ErrorCode::kNoUpdate:
+    case ErrorCode::kFirstActiveOmahaPingSentPersistenceError:
       return metrics::AttemptResult::kInternalError;
 
     // Special flags. These can't happen (we mask them out above) but
@@ -214,8 +219,13 @@
     case ErrorCode::kOmahaRequestXMLHasEntityDecl:
     case ErrorCode::kFilesystemVerifierError:
     case ErrorCode::kUserCanceled:
+    case ErrorCode::kOmahaUpdateIgnoredOverCellular:
     case ErrorCode::kPayloadTimestampError:
     case ErrorCode::kUpdatedButNotActive:
+    case ErrorCode::kNoUpdate:
+    case ErrorCode::kRollbackNotPossible:
+    case ErrorCode::kFirstActiveOmahaPingSentPersistenceError:
+    case ErrorCode::kVerityCalculationError:
       break;
 
     // Special flags. These can't happen (we mask them out above) but
@@ -358,10 +368,19 @@
   CHECK(prefs);
   prefs->SetInt64(kPrefsUpdateTimestampStart,
                   update_start_time.ToInternalValue());
-  LOG(INFO) << "Update Timestamp Start = "
+  LOG(INFO) << "Update Monotonic Timestamp Start = "
             << utils::ToString(update_start_time);
 }
 
+void SetUpdateBootTimestampStart(const base::Time& update_start_boot_time,
+                                 PrefsInterface* prefs) {
+  CHECK(prefs);
+  prefs->SetInt64(kPrefsUpdateBootTimestampStart,
+                  update_start_boot_time.ToInternalValue());
+  LOG(INFO) << "Update Boot Timestamp Start = "
+            << utils::ToString(update_start_boot_time);
+}
+
 bool LoadAndReportTimeToReboot(MetricsReporterInterface* metrics_reporter,
                                PrefsInterface* prefs,
                                ClockInterface* clock) {
diff --git a/metrics_utils.h b/metrics_utils.h
index d08cc4a..8f1aad1 100644
--- a/metrics_utils.h
+++ b/metrics_utils.h
@@ -87,10 +87,16 @@
 // Persists the finished time of an update to the |kPrefsSystemUpdatedMarker|.
 void SetSystemUpdatedMarker(ClockInterface* clock, PrefsInterface* prefs);
 
-// Persists the start time of an update to |kPrefsUpdateTimestampStart|.
+// Persists the start monotonic time of an update to
+// |kPrefsUpdateTimestampStart|.
 void SetUpdateTimestampStart(const base::Time& update_start_time,
                              PrefsInterface* prefs);
 
+// Persists the start boot time of an update to
+// |kPrefsUpdateBootTimestampStart|.
+void SetUpdateBootTimestampStart(const base::Time& update_start_boot_time,
+                                 PrefsInterface* prefs);
+
 // Called at program startup if the device booted into a new update.
 // The |time_to_reboot| parameter contains the (monotonic-clock) duration
 // from when the update successfully completed (the value in
diff --git a/mock_boot_control_hal.h b/mock_boot_control_hal.h
new file mode 100644
index 0000000..4e9cb50
--- /dev/null
+++ b/mock_boot_control_hal.h
@@ -0,0 +1,49 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <android/hardware/boot/1.0/IBootControl.h>
+#include <stdint.h>
+
+#include <gmock/gmock.h>
+
+namespace chromeos_update_engine {
+
+class MockBootControlHal
+    : public ::android::hardware::boot::V1_0::IBootControl {
+ public:
+  MOCK_METHOD0(getNumberSlots, ::android::hardware::Return<uint32_t>());
+  MOCK_METHOD0(getCurrentSlot, ::android::hardware::Return<uint32_t>());
+  MOCK_METHOD1(markBootSuccessful,
+               ::android::hardware::Return<void>(markBootSuccessful_cb));
+  MOCK_METHOD2(setActiveBootSlot,
+               ::android::hardware::Return<void>(uint32_t,
+                                                 setActiveBootSlot_cb));
+  MOCK_METHOD2(setSlotAsUnbootable,
+               ::android::hardware::Return<void>(uint32_t,
+                                                 setSlotAsUnbootable_cb));
+  MOCK_METHOD1(
+      isSlotBootable,
+      ::android::hardware::Return<::android::hardware::boot::V1_0::BoolResult>(
+          uint32_t));
+  MOCK_METHOD1(
+      isSlotMarkedSuccessful,
+      ::android::hardware::Return<::android::hardware::boot::V1_0::BoolResult>(
+          uint32_t));
+  MOCK_METHOD2(getSuffix,
+               ::android::hardware::Return<void>(uint32_t, getSuffix_cb));
+};
+
+}  // namespace chromeos_update_engine
diff --git a/mock_connection_manager.h b/mock_connection_manager.h
index e37460b..2fff68c 100644
--- a/mock_connection_manager.h
+++ b/mock_connection_manager.h
@@ -36,6 +36,7 @@
 
   MOCK_CONST_METHOD2(IsUpdateAllowedOver,
                      bool(ConnectionType type, ConnectionTethering tethering));
+  MOCK_CONST_METHOD0(IsAllowedConnectionTypesForUpdateSet, bool());
 };
 
 }  // namespace chromeos_update_engine
diff --git a/mock_dynamic_partition_control.h b/mock_dynamic_partition_control.h
new file mode 100644
index 0000000..ccbc7cf
--- /dev/null
+++ b/mock_dynamic_partition_control.h
@@ -0,0 +1,49 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include <gmock/gmock.h>
+
+#include "update_engine/dynamic_partition_control_interface.h"
+
+namespace chromeos_update_engine {
+
+class MockDynamicPartitionControl : public DynamicPartitionControlInterface {
+ public:
+  MOCK_METHOD4(
+      MapPartitionOnDeviceMapper,
+      bool(const std::string&, const std::string&, uint32_t, std::string*));
+  MOCK_METHOD2(UnmapPartitionOnDeviceMapper, bool(const std::string&, bool));
+  MOCK_METHOD0(Cleanup, void());
+  MOCK_METHOD1(DeviceExists, bool(const std::string&));
+  MOCK_METHOD1(GetState, ::android::dm::DmDeviceState(const std::string&));
+  MOCK_METHOD2(GetDmDevicePathByName, bool(const std::string&, std::string*));
+  MOCK_METHOD2(LoadMetadataBuilder,
+               std::unique_ptr<::android::fs_mgr::MetadataBuilder>(
+                   const std::string&, uint32_t));
+  MOCK_METHOD3(StoreMetadata,
+               bool(const std::string&,
+                    android::fs_mgr::MetadataBuilder*,
+                    uint32_t));
+  MOCK_METHOD1(GetDeviceDir, bool(std::string*));
+  MOCK_METHOD0(IsDynamicPartitionsEnabled, bool());
+};
+
+}  // namespace chromeos_update_engine
diff --git a/mock_metrics_reporter.h b/mock_metrics_reporter.h
index a0f164b..c678a80 100644
--- a/mock_metrics_reporter.h
+++ b/mock_metrics_reporter.h
@@ -14,8 +14,10 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_MOCK_METRICS_REPORTER_H
-#define UPDATE_ENGINE_MOCK_METRICS_REPORTER_H
+#ifndef UPDATE_ENGINE_MOCK_METRICS_REPORTER_H_
+#define UPDATE_ENGINE_MOCK_METRICS_REPORTER_H_
+
+#include <string>
 
 #include <gmock/gmock.h>
 
@@ -29,6 +31,9 @@
 
   MOCK_METHOD1(ReportRollbackMetrics, void(metrics::RollbackResult result));
 
+  MOCK_METHOD2(ReportEnterpriseRollbackMetrics,
+               void(bool success, const std::string& rollback_version));
+
   MOCK_METHOD1(ReportDailyMetrics, void(base::TimeDelta os_age));
 
   MOCK_METHOD4(ReportUpdateCheckMetrics,
@@ -56,16 +61,17 @@
 
   MOCK_METHOD0(ReportAbnormallyTerminatedUpdateAttemptMetrics, void());
 
-  MOCK_METHOD9(ReportSuccessfulUpdateMetrics,
-               void(int attempt_count,
-                    int updates_abandoned_count,
-                    PayloadType payload_type,
-                    int64_t payload_size,
-                    int64_t num_bytes_downloaded[kNumDownloadSources],
-                    int download_overhead_percentage,
-                    base::TimeDelta total_duration,
-                    int reboot_count,
-                    int url_switch_count));
+  MOCK_METHOD10(ReportSuccessfulUpdateMetrics,
+                void(int attempt_count,
+                     int updates_abandoned_count,
+                     PayloadType payload_type,
+                     int64_t payload_size,
+                     int64_t num_bytes_downloaded[kNumDownloadSources],
+                     int download_overhead_percentage,
+                     base::TimeDelta total_duration,
+                     base::TimeDelta total_duration_uptime,
+                     int reboot_count,
+                     int url_switch_count));
 
   MOCK_METHOD2(ReportCertificateCheckMetrics,
                void(ServerToCheck server_to_check,
@@ -76,8 +82,15 @@
   MOCK_METHOD1(ReportTimeToReboot, void(int time_to_reboot_minutes));
 
   MOCK_METHOD2(ReportInstallDateProvisioningSource, void(int source, int max));
+
+  MOCK_METHOD1(ReportInternalErrorCode, void(ErrorCode error_code));
+
+  MOCK_METHOD3(ReportKeyVersionMetrics,
+               void(int kernel_min_version,
+                    int kernel_max_rollforward_version,
+                    bool kernel_max_rollforward_success));
 };
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_MOCK_METRICS_REPORTER_H
+#endif  // UPDATE_ENGINE_MOCK_METRICS_REPORTER_H_
diff --git a/mock_omaha_request_params.h b/mock_omaha_request_params.h
index 6d8d3d8..2fe5e01 100644
--- a/mock_omaha_request_params.h
+++ b/mock_omaha_request_params.h
@@ -50,6 +50,7 @@
   MOCK_METHOD3(SetTargetChannel, bool(const std::string& channel,
                                       bool is_powerwash_allowed,
                                       std::string* error));
+  MOCK_CONST_METHOD0(target_version_prefix, std::string(void));
   MOCK_METHOD0(UpdateDownloadChannel, void(void));
   MOCK_CONST_METHOD0(IsUpdateUrlOfficial, bool(void));
   MOCK_CONST_METHOD0(ShouldPowerwash, bool(void));
diff --git a/mock_payload_state.h b/mock_payload_state.h
index 6dccc64..4ac3ccf 100644
--- a/mock_payload_state.h
+++ b/mock_payload_state.h
@@ -53,6 +53,7 @@
   MOCK_METHOD1(SetScatteringWaitPeriod, void(base::TimeDelta));
   MOCK_METHOD1(SetP2PUrl, void(const std::string&));
   MOCK_METHOD0(NextPayload, bool());
+  MOCK_METHOD1(SetStagingWaitPeriod, void(base::TimeDelta));
 
   // Getters.
   MOCK_METHOD0(GetResponseSignature, std::string());
@@ -68,6 +69,8 @@
   MOCK_METHOD1(GetCurrentBytesDownloaded, uint64_t(DownloadSource source));
   MOCK_METHOD1(GetTotalBytesDownloaded, uint64_t(DownloadSource source));
   MOCK_METHOD0(GetNumReboots, uint32_t());
+  MOCK_METHOD0(GetRollbackHappened, bool());
+  MOCK_METHOD1(SetRollbackHappened, void(bool));
   MOCK_METHOD0(GetRollbackVersion, std::string());
   MOCK_METHOD0(GetP2PNumAttempts, int());
   MOCK_METHOD0(GetP2PFirstAttemptTimestamp, base::Time());
@@ -75,7 +78,7 @@
   MOCK_CONST_METHOD0(GetUsingP2PForSharing, bool());
   MOCK_METHOD0(GetScatteringWaitPeriod, base::TimeDelta());
   MOCK_CONST_METHOD0(GetP2PUrl, std::string());
-  MOCK_CONST_METHOD0(GetAttemptErrorCode, ErrorCode());
+  MOCK_METHOD0(GetStagingWaitPeriod, base::TimeDelta());
 };
 
 }  // namespace chromeos_update_engine
diff --git a/mock_update_attempter.h b/mock_update_attempter.h
index d88b840..6253e3d 100644
--- a/mock_update_attempter.h
+++ b/mock_update_attempter.h
@@ -29,12 +29,14 @@
  public:
   using UpdateAttempter::UpdateAttempter;
 
-  MOCK_METHOD6(Update, void(const std::string& app_version,
-                            const std::string& omaha_url,
-                            const std::string& target_channel,
-                            const std::string& target_version_prefix,
-                            bool obey_proxies,
-                            bool interactive));
+  MOCK_METHOD7(Update,
+               void(const std::string& app_version,
+                    const std::string& omaha_url,
+                    const std::string& target_channel,
+                    const std::string& target_version_prefix,
+                    bool rollback_allowed,
+                    bool obey_proxies,
+                    bool interactive));
 
   MOCK_METHOD1(GetStatus, bool(update_engine::UpdateEngineStatus* out_status));
 
@@ -42,7 +44,7 @@
 
   MOCK_METHOD0(ResetStatus, bool(void));
 
-  MOCK_METHOD0(GetCurrentUpdateAttemptFlags, UpdateAttemptFlags(void));
+  MOCK_CONST_METHOD0(GetCurrentUpdateAttemptFlags, UpdateAttemptFlags(void));
 
   MOCK_METHOD3(CheckForUpdate,
                bool(const std::string& app_version,
@@ -54,8 +56,6 @@
   MOCK_CONST_METHOD0(consecutive_failed_update_checks, unsigned int(void));
 
   MOCK_CONST_METHOD0(server_dictated_poll_interval, unsigned int(void));
-
-  MOCK_METHOD0(IsAnyUpdateSourceAllowed, bool(void));
 };
 
 }  // namespace chromeos_update_engine
diff --git a/omaha_request_action.cc b/omaha_request_action.cc
index d58612c..95dee51 100644
--- a/omaha_request_action.cc
+++ b/omaha_request_action.cc
@@ -18,6 +18,7 @@
 
 #include <inttypes.h>
 
+#include <limits>
 #include <map>
 #include <sstream>
 #include <string>
@@ -35,6 +36,7 @@
 #include <brillo/key_value_store.h>
 #include <expat.h>
 #include <metrics/metrics_library.h>
+#include <policy/libpolicy.h>
 
 #include "update_engine/common/action_pipe.h"
 #include "update_engine/common/constants.h"
@@ -52,7 +54,9 @@
 
 using base::Time;
 using base::TimeDelta;
+using chromeos_update_manager::kRollforwardInfinity;
 using std::map;
+using std::numeric_limits;
 using std::string;
 using std::vector;
 
@@ -76,16 +80,20 @@
 static const char* kTagDisableP2PForDownloading = "DisableP2PForDownloading";
 static const char* kTagDisableP2PForSharing = "DisableP2PForSharing";
 static const char* kTagPublicKeyRsa = "PublicKeyRsa";
+static const char* kTagPowerwash = "Powerwash";
 
 static const char* kOmahaUpdaterVersion = "0.1.0.0";
 
-// X-GoogleUpdate headers.
-static const char* kXGoogleUpdateInteractivity = "X-GoogleUpdate-Interactivity";
-static const char* kXGoogleUpdateAppId = "X-GoogleUpdate-AppId";
-static const char* kXGoogleUpdateUpdater = "X-GoogleUpdate-Updater";
+// X-Goog-Update headers.
+static const char* kXGoogleUpdateInteractivity = "X-Goog-Update-Interactivity";
+static const char* kXGoogleUpdateAppId = "X-Goog-Update-AppId";
+static const char* kXGoogleUpdateUpdater = "X-Goog-Update-Updater";
 
 // updatecheck attributes (without the underscore prefix).
 static const char* kEolAttr = "eol";
+static const char* kRollback = "rollback";
+static const char* kFirmwareVersion = "firmware_version";
+static const char* kKernelVersion = "kernel_version";
 
 namespace {
 
@@ -125,10 +133,17 @@
     if (include_ping)
         app_body = GetPingXml(ping_active_days, ping_roll_call_days);
     if (!ping_only) {
-      app_body += base::StringPrintf(
-          "        <updatecheck targetversionprefix=\"%s\""
-          "></updatecheck>\n",
-          XmlEncodeWithDefault(params->target_version_prefix(), "").c_str());
+      app_body += "        <updatecheck";
+      if (!params->target_version_prefix().empty()) {
+        app_body += base::StringPrintf(
+            " targetversionprefix=\"%s\"",
+            XmlEncodeWithDefault(params->target_version_prefix(), "").c_str());
+        // Rollback requires target_version_prefix set.
+        if (params->rollback_allowed()) {
+          app_body += " rollback_allowed=\"true\"";
+        }
+      }
+      app_body += "></updatecheck>\n";
 
       // If this is the first update check after a reboot following a previous
       // update, generate an event containing the previous version number. If
@@ -179,7 +194,7 @@
                        const string arg_name,
                        const string prefs_key) {
   // There's nothing wrong with not having a given cohort setting, so we check
-  // existance first to avoid the warning log message.
+  // existence first to avoid the warning log message.
   if (!prefs->Exists(prefs_key))
     return "";
   string cohort_value;
@@ -619,12 +634,14 @@
     std::unique_ptr<HttpFetcher> http_fetcher,
     bool ping_only)
     : system_state_(system_state),
+      params_(system_state->request_params()),
       event_(event),
       http_fetcher_(std::move(http_fetcher)),
+      policy_provider_(std::make_unique<policy::PolicyProvider>()),
       ping_only_(ping_only),
       ping_active_days_(0),
       ping_roll_call_days_(0) {
-  params_ = system_state->request_params();
+  policy_provider_->Reload();
 }
 
 OmahaRequestAction::~OmahaRequestAction() {}
@@ -762,7 +779,7 @@
                                     GetInstallDate(system_state_),
                                     system_state_));
 
-  // Set X-GoogleUpdate headers.
+  // Set X-Goog-Update headers.
   http_fetcher_->SetHeader(kXGoogleUpdateInteractivity,
                            params_->interactive() ? "fg" : "bg");
   http_fetcher_->SetHeader(kXGoogleUpdateAppId, params_->GetAppId());
@@ -784,11 +801,12 @@
 
 // We just store the response in the buffer. Once we've received all bytes,
 // we'll look in the buffer and decide what to do.
-void OmahaRequestAction::ReceivedBytes(HttpFetcher *fetcher,
+bool OmahaRequestAction::ReceivedBytes(HttpFetcher* fetcher,
                                        const void* bytes,
                                        size_t length) {
   const uint8_t* byte_ptr = reinterpret_cast<const uint8_t*>(bytes);
   response_buffer_.insert(response_buffer_.end(), byte_ptr, byte_ptr + length);
+  return true;
 }
 
 namespace {
@@ -923,6 +941,20 @@
   return true;
 }
 
+// Parses the 2 key version strings kernel_version and firmware_version. If the
+// field is not present, or cannot be parsed the values default to 0xffff.
+void ParseRollbackVersions(OmahaParserData* parser_data,
+                           OmahaResponse* output_object) {
+  utils::ParseRollbackKeyVersion(
+      parser_data->updatecheck_attrs[kFirmwareVersion],
+      &output_object->rollback_key_version.firmware_key,
+      &output_object->rollback_key_version.firmware);
+  utils::ParseRollbackKeyVersion(
+      parser_data->updatecheck_attrs[kKernelVersion],
+      &output_object->rollback_key_version.kernel_key,
+      &output_object->rollback_key_version.kernel);
+}
+
 }  // namespace
 
 bool OmahaRequestAction::ParseResponse(OmahaParserData* parser_data,
@@ -985,6 +1017,14 @@
 
   // Parse the updatecheck attributes.
   PersistEolStatus(parser_data->updatecheck_attrs);
+  // Rollback-related updatecheck attributes.
+  // Defaults to false if attribute is not present.
+  output_object->is_rollback =
+      ParseBool(parser_data->updatecheck_attrs[kRollback]);
+
+  // Parses the rollback versions of the current image. If the fields do not
+  // exist they default to 0xffff for the 4 key versions.
+  ParseRollbackVersions(parser_data, output_object);
 
   if (!ParseStatus(parser_data, output_object, completer))
     return false;
@@ -1085,6 +1125,7 @@
 
   output_object->disable_payload_backoff =
       ParseBool(attrs[kTagDisablePayloadBackoff]);
+  output_object->powerwash_required = ParseBool(attrs[kTagPowerwash]);
 
   return true;
 }
@@ -1100,6 +1141,9 @@
 
   PayloadStateInterface* const payload_state = system_state_->payload_state();
 
+  // Set the max kernel key version based on whether rollback is allowed.
+  SetMaxKernelKeyVersionForRollback();
+
   // Events are best effort transactions -- assume they always succeed.
   if (IsEvent()) {
     CHECK(!HasOutputPipe()) << "No output pipe allowed for event requests.";
@@ -1129,13 +1173,13 @@
       reinterpret_cast<const char*>(response_buffer_.data()),
       response_buffer_.size(),
       XML_TRUE);
-  XML_ParserFree(parser);
 
   if (res != XML_STATUS_OK || parser_data.failed) {
     LOG(ERROR) << "Omaha response not valid XML: "
                << XML_ErrorString(XML_GetErrorCode(parser))
                << " at line " << XML_GetCurrentLineNumber(parser)
                << " col " << XML_GetCurrentColumnNumber(parser);
+    XML_ParserFree(parser);
     ErrorCode error_code = ErrorCode::kOmahaRequestXMLParseError;
     if (response_buffer_.empty()) {
       error_code = ErrorCode::kOmahaRequestEmptyResponseError;
@@ -1145,6 +1189,7 @@
     completer.set_code(error_code);
     return;
   }
+  XML_ParserFree(parser);
 
   // Update the last ping day preferences based on the server daystart response
   // even if we didn't send a ping. Omaha always includes the daystart in the
@@ -1159,7 +1204,10 @@
   // their a=-1 in the past and we have to set first_active_omaha_ping_sent for
   // future checks.
   if (!system_state_->hardware()->GetFirstActiveOmahaPingSent()) {
-    system_state_->hardware()->SetFirstActiveOmahaPingSent();
+    if (!system_state_->hardware()->SetFirstActiveOmahaPingSent()) {
+      system_state_->metrics_reporter()->ReportInternalErrorCode(
+          ErrorCode::kFirstActiveOmahaPingSentPersistenceError);
+    }
   }
 
   if (!HasOutputPipe()) {
@@ -1175,9 +1223,11 @@
   output_object.update_exists = true;
   SetOutputObject(output_object);
 
-  if (ShouldIgnoreUpdate(output_object)) {
-    output_object.update_exists = false;
-    completer.set_code(ErrorCode::kOmahaUpdateIgnoredPerPolicy);
+  ErrorCode error = ErrorCode::kSuccess;
+  if (ShouldIgnoreUpdate(&error, output_object)) {
+    // No need to change output_object.update_exists here, since the value
+    // has been output to the pipe.
+    completer.set_code(error);
     return;
   }
 
@@ -1235,7 +1285,8 @@
 
   if (system_state_->hardware()->IsOOBEEnabled() &&
       !system_state_->hardware()->IsOOBEComplete(nullptr) &&
-      output_object.deadline.empty() &&
+      (output_object.deadline.empty() ||
+       payload_state->GetRollbackHappened()) &&
       params_->app_version() != "ForcedUpdate") {
     output_object.update_exists = false;
     LOG(INFO) << "Ignoring non-critical Omaha updates until OOBE is done.";
@@ -1431,13 +1482,18 @@
       system_state_->clock()->GetWallclockTime() - update_first_seen_at;
   TimeDelta max_scatter_period =
       TimeDelta::FromDays(output_object->max_days_to_scatter);
+  int64_t staging_wait_time_in_days = 0;
+  // Use staging and its default max value if staging is on.
+  if (system_state_->prefs()->GetInt64(kPrefsWallClockStagingWaitPeriod,
+                                       &staging_wait_time_in_days) &&
+      staging_wait_time_in_days > 0)
+    max_scatter_period = TimeDelta::FromDays(kMaxWaitTimeStagingInDays);
 
   LOG(INFO) << "Waiting Period = "
             << utils::FormatSecs(params_->waiting_period().InSeconds())
             << ", Time Elapsed = "
             << utils::FormatSecs(elapsed_time.InSeconds())
-            << ", MaxDaysToScatter = "
-            << max_scatter_period.InDays();
+            << ", MaxDaysToScatter = " << max_scatter_period.InDays();
 
   if (!output_object->deadline.empty()) {
     // The deadline is set for all rules which serve a delta update from a
@@ -1637,6 +1693,7 @@
     break;
 
   case ErrorCode::kOmahaUpdateIgnoredPerPolicy:
+  case ErrorCode::kOmahaUpdateIgnoredOverCellular:
     result = metrics::CheckResult::kUpdateAvailable;
     reaction = metrics::CheckReaction::kIgnored;
     break;
@@ -1671,7 +1728,7 @@
 }
 
 bool OmahaRequestAction::ShouldIgnoreUpdate(
-    const OmahaResponse& response) const {
+    ErrorCode* error, const OmahaResponse& response) const {
   // Note: policy decision to not update to a version we rolled back from.
   string rollback_version =
       system_state_->payload_state()->GetRollbackVersion();
@@ -1679,11 +1736,12 @@
     LOG(INFO) << "Detected previous rollback from version " << rollback_version;
     if (rollback_version == response.version) {
       LOG(INFO) << "Received version that we rolled back from. Ignoring.";
+      *error = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
       return true;
     }
   }
 
-  if (!IsUpdateAllowedOverCurrentConnection()) {
+  if (!IsUpdateAllowedOverCurrentConnection(error, response)) {
     LOG(INFO) << "Update is not allowed over current connection.";
     return true;
   }
@@ -1698,7 +1756,62 @@
   return false;
 }
 
-bool OmahaRequestAction::IsUpdateAllowedOverCurrentConnection() const {
+bool OmahaRequestAction::IsUpdateAllowedOverCellularByPrefs(
+    const OmahaResponse& response) const {
+  PrefsInterface* prefs = system_state_->prefs();
+
+  if (!prefs) {
+    LOG(INFO) << "Disabling updates over cellular as the preferences are "
+                 "not available.";
+    return false;
+  }
+
+  bool is_allowed;
+
+  if (prefs->Exists(kPrefsUpdateOverCellularPermission) &&
+      prefs->GetBoolean(kPrefsUpdateOverCellularPermission, &is_allowed) &&
+      is_allowed) {
+    LOG(INFO) << "Allowing updates over cellular as permission preference is "
+                 "set to true.";
+    return true;
+  }
+
+  if (!prefs->Exists(kPrefsUpdateOverCellularTargetVersion) ||
+      !prefs->Exists(kPrefsUpdateOverCellularTargetSize)) {
+    LOG(INFO) << "Disabling updates over cellular as permission preference is "
+                 "set to false or does not exist while target does not exist.";
+    return false;
+  }
+
+  std::string target_version;
+  int64_t target_size;
+
+  if (!prefs->GetString(kPrefsUpdateOverCellularTargetVersion,
+                        &target_version) ||
+      !prefs->GetInt64(kPrefsUpdateOverCellularTargetSize, &target_size)) {
+    LOG(INFO) << "Disabling updates over cellular as the target version or "
+                 "size is not accessible.";
+    return false;
+  }
+
+  uint64_t total_packages_size = 0;
+  for (const auto& package : response.packages) {
+    total_packages_size += package.size;
+  }
+  if (target_version == response.version &&
+      static_cast<uint64_t>(target_size) == total_packages_size) {
+    LOG(INFO) << "Allowing updates over cellular as the target matches the"
+                 "omaha response.";
+    return true;
+  } else {
+    LOG(INFO) << "Disabling updates over cellular as the target does not"
+                 "match the omaha response.";
+    return false;
+  }
+}
+
+bool OmahaRequestAction::IsUpdateAllowedOverCurrentConnection(
+    ErrorCode* error, const OmahaResponse& response) const {
   ConnectionType type;
   ConnectionTethering tethering;
   ConnectionManagerInterface* connection_manager =
@@ -1708,11 +1821,97 @@
               << "Defaulting to allow updates.";
     return true;
   }
+
   bool is_allowed = connection_manager->IsUpdateAllowedOver(type, tethering);
+  bool is_device_policy_set =
+      connection_manager->IsAllowedConnectionTypesForUpdateSet();
+  // Treats tethered connection as if it is cellular connection.
+  bool is_over_cellular = type == ConnectionType::kCellular ||
+                          tethering == ConnectionTethering::kConfirmed;
+
+  if (!is_over_cellular) {
+    // There's no need to further check user preferences as we are not over
+    // cellular connection.
+    if (!is_allowed)
+      *error = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
+  } else if (is_device_policy_set) {
+    // There's no need to further check user preferences as the device policy
+    // is set regarding updates over cellular.
+    if (!is_allowed)
+      *error = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
+  } else {
+    // Deivce policy is not set, so user preferences overwrite whether to
+    // allow updates over cellular.
+    is_allowed = IsUpdateAllowedOverCellularByPrefs(response);
+    if (!is_allowed)
+      *error = ErrorCode::kOmahaUpdateIgnoredOverCellular;
+  }
+
   LOG(INFO) << "We are connected via "
             << connection_utils::StringForConnectionType(type)
             << ", Updates allowed: " << (is_allowed ? "Yes" : "No");
   return is_allowed;
 }
 
+bool OmahaRequestAction::IsRollbackEnabled() const {
+  if (policy_provider_->IsConsumerDevice()) {
+    LOG(INFO) << "Rollback is not enabled for consumer devices.";
+    return false;
+  }
+
+  if (!policy_provider_->device_policy_is_loaded()) {
+    LOG(INFO) << "No device policy is loaded. Assuming rollback enabled.";
+    return true;
+  }
+
+  int allowed_milestones;
+  if (!policy_provider_->GetDevicePolicy().GetRollbackAllowedMilestones(
+          &allowed_milestones)) {
+    LOG(INFO) << "RollbackAllowedMilestones policy can't be read. "
+                 "Defaulting to rollback enabled.";
+    return true;
+  }
+
+  LOG(INFO) << "Rollback allows " << allowed_milestones << " milestones.";
+  return allowed_milestones > 0;
+}
+
+void OmahaRequestAction::SetMaxKernelKeyVersionForRollback() const {
+  int max_kernel_rollforward;
+  int min_kernel_version = system_state_->hardware()->GetMinKernelKeyVersion();
+  if (IsRollbackEnabled()) {
+    // If rollback is enabled, set the max kernel key version to the current
+    // kernel key version. This has the effect of freezing kernel key roll
+    // forwards.
+    //
+    // TODO(zentaro): This behavior is temporary, and ensures that no kernel
+    // key roll forward happens until the server side components of rollback
+    // are implemented. Future changes will allow the Omaha server to return
+    // the kernel key version from max_rollback_versions in the past. At that
+    // point the max kernel key version will be set to that value, creating a
+    // sliding window of versions that can be rolled back to.
+    LOG(INFO) << "Rollback is enabled. Setting kernel_max_rollforward to "
+              << min_kernel_version;
+    max_kernel_rollforward = min_kernel_version;
+  } else {
+    // For devices that are not rollback enabled (ie. consumer devices), the
+    // max kernel key version is set to 0xfffffffe, which is logically
+    // infinity. This maintains the previous behavior that that kernel key
+    // versions roll forward each time they are incremented.
+    LOG(INFO) << "Rollback is disabled. Setting kernel_max_rollforward to "
+              << kRollforwardInfinity;
+    max_kernel_rollforward = kRollforwardInfinity;
+  }
+
+  bool max_rollforward_set =
+      system_state_->hardware()->SetMaxKernelKeyRollforward(
+          max_kernel_rollforward);
+  if (!max_rollforward_set) {
+    LOG(ERROR) << "Failed to set kernel_max_rollforward";
+  }
+  // Report metrics
+  system_state_->metrics_reporter()->ReportKeyVersionMetrics(
+      min_kernel_version, max_kernel_rollforward, max_rollforward_set);
+}
+
 }  // namespace chromeos_update_engine
diff --git a/omaha_request_action.h b/omaha_request_action.h
index 924da40..1034c3f 100644
--- a/omaha_request_action.h
+++ b/omaha_request_action.h
@@ -39,6 +39,10 @@
 // The Omaha Request action makes a request to Omaha and can output
 // the response on the output ActionPipe.
 
+namespace policy {
+class PolicyProvider;
+}
+
 namespace chromeos_update_engine {
 
 // Encodes XML entities in a given string. Input must be ASCII-7 valid. If
@@ -123,6 +127,10 @@
   // fallback ones.
   static const int kDefaultMaxFailureCountPerUrl = 10;
 
+  // If staging is enabled, set the maximum wait time to 28 days, since that is
+  // the predetermined wait time for staging.
+  static const int kMaxWaitTimeStagingInDays = 28;
+
   // These are the possible outcome upon checking whether we satisfied
   // the wall-clock-based-wait.
   enum WallClockWaitResult {
@@ -163,8 +171,9 @@
   std::string Type() const override { return StaticType(); }
 
   // Delegate methods (see http_fetcher.h)
-  void ReceivedBytes(HttpFetcher *fetcher,
-                     const void* bytes, size_t length) override;
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* bytes,
+                     size_t length) override;
 
   void TransferComplete(HttpFetcher *fetcher, bool successful) override;
 
@@ -172,6 +181,8 @@
   bool IsEvent() const { return event_.get() != nullptr; }
 
  private:
+  friend class OmahaRequestActionTest;
+  friend class OmahaRequestActionTestProcessorDelegate;
   FRIEND_TEST(OmahaRequestActionTest, GetInstallDateWhenNoPrefsNorOOBE);
   FRIEND_TEST(OmahaRequestActionTest,
               GetInstallDateWhenOOBECompletedWithInvalidDate);
@@ -292,11 +303,24 @@
   void OnLookupPayloadViaP2PCompleted(const std::string& url);
 
   // Returns true if the current update should be ignored.
-  bool ShouldIgnoreUpdate(const OmahaResponse& response) const;
+  bool ShouldIgnoreUpdate(ErrorCode* error,
+                          const OmahaResponse& response) const;
+
+  // Return true if updates are allowed by user preferences.
+  bool IsUpdateAllowedOverCellularByPrefs(const OmahaResponse& response) const;
 
   // Returns true if updates are allowed over the current type of connection.
   // False otherwise.
-  bool IsUpdateAllowedOverCurrentConnection() const;
+  bool IsUpdateAllowedOverCurrentConnection(
+      ErrorCode* error, const OmahaResponse& response) const;
+
+  // Returns true if rollback is enabled. Always returns false for consumer
+  // devices.
+  bool IsRollbackEnabled() const;
+
+  // Sets the appropriate max kernel key version based on whether rollback is
+  // enabled.
+  void SetMaxKernelKeyVersionForRollback() const;
 
   // Global system context.
   SystemState* system_state_;
@@ -310,6 +334,9 @@
   // pointer to the HttpFetcher that does the http work
   std::unique_ptr<HttpFetcher> http_fetcher_;
 
+  // Used for fetching information about the device policy.
+  std::unique_ptr<policy::PolicyProvider> policy_provider_;
+
   // If true, only include the <ping> element in the request.
   bool ping_only_;
 
diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc
index 2f466dd..1e0ad6d 100644
--- a/omaha_request_action_unittest.cc
+++ b/omaha_request_action_unittest.cc
@@ -20,6 +20,7 @@
 
 #include <memory>
 #include <string>
+#include <utility>
 #include <vector>
 
 #include <base/bind.h>
@@ -35,6 +36,8 @@
 #include <brillo/message_loops/message_loop.h>
 #include <brillo/message_loops/message_loop_utils.h>
 #include <gtest/gtest.h>
+#include <policy/libpolicy.h>
+#include <policy/mock_libpolicy.h>
 
 #include "update_engine/common/action_pipe.h"
 #include "update_engine/common/constants.h"
@@ -49,9 +52,11 @@
 #include "update_engine/mock_connection_manager.h"
 #include "update_engine/mock_payload_state.h"
 #include "update_engine/omaha_request_params.h"
+#include "update_engine/update_manager/rollback_prefs.h"
 
 using base::Time;
 using base::TimeDelta;
+using chromeos_update_manager::kRollforwardInfinity;
 using std::string;
 using std::vector;
 using testing::AllOf;
@@ -61,6 +66,7 @@
 using testing::Le;
 using testing::NiceMock;
 using testing::Return;
+using testing::ReturnRef;
 using testing::ReturnPointee;
 using testing::SaveArg;
 using testing::SetArgPointee;
@@ -68,12 +74,26 @@
 
 namespace {
 
+static_assert(kRollforwardInfinity == 0xfffffffe,
+              "Don't change the value of kRollforward infinity unless its "
+              "size has been changed in firmware.");
+
 const char kTestAppId[] = "test-app-id";
 const char kTestAppId2[] = "test-app2-id";
 
 // This is a helper struct to allow unit tests build an update response with the
 // values they care about.
 struct FakeUpdateResponse {
+  string GetRollbackVersionAttributes() const {
+    return (rollback ? " _rollback=\"true\"" : "") +
+           (!rollback_firmware_version.empty()
+                ? " _firmware_version=\"" + rollback_firmware_version + "\""
+                : "") +
+           (!rollback_kernel_version.empty()
+                ? " _kernel_version=\"" + rollback_kernel_version + "\""
+                : "");
+  }
+
   string GetNoUpdateResponse() const {
     string entity_str;
     if (include_entity)
@@ -111,8 +131,8 @@
                       "\" cohortname=\"" + cohortname + "\" "
                 : "") +
            " status=\"ok\">"
-           "<ping status=\"ok\"/><updatecheck status=\"ok\">"
-           "<urls><url codebase=\"" +
+           "<ping status=\"ok\"/><updatecheck status=\"ok\"" +
+           GetRollbackVersionAttributes() + ">" + "<urls><url codebase=\"" +
            codebase +
            "\"/></urls>"
            "<manifest version=\"" +
@@ -143,6 +163,7 @@
            (disable_p2p_for_downloading ? "DisableP2PForDownloading=\"true\" "
                                         : "") +
            (disable_p2p_for_sharing ? "DisableP2PForSharing=\"true\" " : "") +
+           (powerwash ? "Powerwash=\"true\" " : "") +
            "/></actions></manifest></updatecheck></app>" +
            (multi_app
                 ? "<app appid=\"" + app_id2 + "\"" +
@@ -190,6 +211,8 @@
   bool disable_p2p_for_downloading = false;
   bool disable_p2p_for_sharing = false;
 
+  bool powerwash = false;
+
   // Omaha cohorts settings.
   bool include_cohorts = false;
   string cohort = "";
@@ -207,15 +230,88 @@
   bool multi_app_no_update = false;
   // Whether to include more than one package in an app.
   bool multi_package = false;
+
+  // Whether the payload is a rollback.
+  bool rollback = false;
+  // The verified boot firmware key version for the rollback image.
+  string rollback_firmware_version = "";
+  // The verified boot kernel key version for the rollback image.
+  string rollback_kernel_version = "";
 };
 
 }  // namespace
 
 namespace chromeos_update_engine {
 
+class OmahaRequestActionTestProcessorDelegate : public ActionProcessorDelegate {
+ public:
+  OmahaRequestActionTestProcessorDelegate()
+      : expected_code_(ErrorCode::kSuccess),
+        interactive_(false),
+        test_http_fetcher_headers_(false) {}
+  ~OmahaRequestActionTestProcessorDelegate() override = default;
+
+  void ProcessingDone(const ActionProcessor* processor,
+                      ErrorCode code) override {
+    brillo::MessageLoop::current()->BreakLoop();
+  }
+
+  void ActionCompleted(ActionProcessor* processor,
+                       AbstractAction* action,
+                       ErrorCode code) override {
+    // Make sure actions always succeed.
+    if (action->Type() == OmahaRequestAction::StaticType()) {
+      EXPECT_EQ(expected_code_, code);
+      // Check that the headers were set in the fetcher during the action. Note
+      // that we set this request as "interactive".
+      auto fetcher = static_cast<const MockHttpFetcher*>(
+          static_cast<OmahaRequestAction*>(action)->http_fetcher_.get());
+
+      if (test_http_fetcher_headers_) {
+        EXPECT_EQ(interactive_ ? "fg" : "bg",
+                  fetcher->GetHeader("X-Goog-Update-Interactivity"));
+        EXPECT_EQ(kTestAppId, fetcher->GetHeader("X-Goog-Update-AppId"));
+        EXPECT_NE("", fetcher->GetHeader("X-Goog-Update-Updater"));
+      }
+      post_data_ = fetcher->post_data();
+    } else if (action->Type() ==
+               ObjectCollectorAction<OmahaResponse>::StaticType()) {
+      EXPECT_EQ(ErrorCode::kSuccess, code);
+      auto collector_action =
+          static_cast<ObjectCollectorAction<OmahaResponse>*>(action);
+      omaha_response_.reset(new OmahaResponse(collector_action->object()));
+      EXPECT_TRUE(omaha_response_);
+    } else {
+      EXPECT_EQ(ErrorCode::kSuccess, code);
+    }
+  }
+  ErrorCode expected_code_;
+  brillo::Blob post_data_;
+  bool interactive_;
+  bool test_http_fetcher_headers_;
+  std::unique_ptr<OmahaResponse> omaha_response_;
+};
+
 class OmahaRequestActionTest : public ::testing::Test {
  protected:
   void SetUp() override {
+    request_params_.set_os_sp("service_pack");
+    request_params_.set_os_board("x86-generic");
+    request_params_.set_app_id(kTestAppId);
+    request_params_.set_app_version("0.1.0.0");
+    request_params_.set_app_lang("en-US");
+    request_params_.set_current_channel("unittest");
+    request_params_.set_target_channel("unittest");
+    request_params_.set_hwid("OEM MODEL 09235 7471");
+    request_params_.set_fw_version("ChromeOSFirmware.1.0");
+    request_params_.set_ec_version("0X0A1");
+    request_params_.set_delta_okay(true);
+    request_params_.set_interactive(false);
+    request_params_.set_update_url("http://url");
+    request_params_.set_target_version_prefix("");
+    request_params_.set_rollback_allowed(false);
+    request_params_.set_is_powerwash_allowed(false);
+
     fake_system_state_.set_request_params(&request_params_);
     fake_system_state_.set_prefs(&fake_prefs_);
   }
@@ -235,8 +331,22 @@
   // about reporting UpdateEngine.Check.{Result,Reaction,DownloadError}
   // UMA statistics. Use the appropriate ::kUnset value to specify that
   // the given metric should not be reported.
-  bool TestUpdateCheck(OmahaRequestParams* request_params,
-                       const string& http_response,
+  bool TestUpdateCheck(const string& http_response,
+                       int fail_http_response_code,
+                       bool ping_only,
+                       bool is_consumer_device,
+                       int rollback_allowed_milestones,
+                       bool is_policy_loaded,
+                       ErrorCode expected_code,
+                       metrics::CheckResult expected_check_result,
+                       metrics::CheckReaction expected_check_reaction,
+                       metrics::DownloadErrorCode expected_download_error_code,
+                       OmahaResponse* out_response,
+                       brillo::Blob* out_post_data);
+
+  // Overload of TestUpdateCheck that does not supply |is_consumer_device| or
+  // |rollback_allowed_milestones| which are only required for rollback tests.
+  bool TestUpdateCheck(const string& http_response,
                        int fail_http_response_code,
                        bool ping_only,
                        ErrorCode expected_code,
@@ -246,6 +356,15 @@
                        OmahaResponse* out_response,
                        brillo::Blob* out_post_data);
 
+  void TestRollbackCheck(bool is_consumer_device,
+                         int rollback_allowed_milestones,
+                         bool is_policy_loaded,
+                         OmahaResponse* out_response);
+
+  void TestEvent(OmahaEvent* event,
+                 const string& http_response,
+                 brillo::Blob* out_post_data);
+
   // Runs and checks a ping test. |ping_only| indicates whether it should send
   // only a ping or also an updatecheck.
   void PingTest(bool ping_only);
@@ -269,98 +388,23 @@
 
   FakeSystemState fake_system_state_;
   FakeUpdateResponse fake_update_response_;
-
-  // By default, all tests use these objects unless they replace them in the
-  // fake_system_state_.
-  OmahaRequestParams request_params_ = OmahaRequestParams{
-      &fake_system_state_,
-      constants::kOmahaPlatformName,
-      OmahaRequestParams::kOsVersion,
-      "service_pack",
-      "x86-generic",
-      kTestAppId,
-      "0.1.0.0",
-      "en-US",
-      "unittest",
-      "OEM MODEL 09235 7471",
-      "ChromeOSFirmware.1.0",
-      "0X0A1",
-      false,   // delta okay
-      false,   // interactive
-      "http://url",
-      ""};     // target_version_prefix
+  // Used by all tests.
+  OmahaRequestParams request_params_{&fake_system_state_};
 
   FakePrefs fake_prefs_;
-};
 
-namespace {
-class OmahaRequestActionTestProcessorDelegate : public ActionProcessorDelegate {
- public:
-  OmahaRequestActionTestProcessorDelegate()
-      : expected_code_(ErrorCode::kSuccess) {}
-  ~OmahaRequestActionTestProcessorDelegate() override {
-  }
-  void ProcessingDone(const ActionProcessor* processor,
-                      ErrorCode code) override {
-    brillo::MessageLoop::current()->BreakLoop();
-  }
+  OmahaRequestActionTestProcessorDelegate delegate_;
 
-  void ActionCompleted(ActionProcessor* processor,
-                       AbstractAction* action,
-                       ErrorCode code) override {
-    // make sure actions always succeed
-    if (action->Type() == OmahaRequestAction::StaticType())
-      EXPECT_EQ(expected_code_, code);
-    else
-      EXPECT_EQ(ErrorCode::kSuccess, code);
-  }
-  ErrorCode expected_code_;
-};
-}  // namespace
-
-class OutputObjectCollectorAction;
-
-template<>
-class ActionTraits<OutputObjectCollectorAction> {
- public:
-  // Does not take an object for input
-  typedef OmahaResponse InputObjectType;
-  // On success, puts the output path on output
-  typedef NoneType OutputObjectType;
-};
-
-class OutputObjectCollectorAction : public Action<OutputObjectCollectorAction> {
- public:
-  OutputObjectCollectorAction() : has_input_object_(false) {}
-  void PerformAction() {
-    // copy input object
-    has_input_object_ = HasInputObject();
-    if (has_input_object_)
-      omaha_response_ = GetInputObject();
-    processor_->ActionComplete(this, ErrorCode::kSuccess);
-  }
-  // Should never be called
-  void TerminateProcessing() {
-    CHECK(false);
-  }
-  // Debugging/logging
-  static string StaticType() {
-    return "OutputObjectCollectorAction";
-  }
-  string Type() const { return StaticType(); }
-  using InputObjectType =
-      ActionTraits<OutputObjectCollectorAction>::InputObjectType;
-  using OutputObjectType =
-      ActionTraits<OutputObjectCollectorAction>::OutputObjectType;
-  bool has_input_object_;
-  OmahaResponse omaha_response_;
+  bool test_http_fetcher_headers_{false};
 };
 
 bool OmahaRequestActionTest::TestUpdateCheck(
-    OmahaRequestParams* request_params,
     const string& http_response,
     int fail_http_response_code,
     bool ping_only,
+    bool is_consumer_device,
+    int rollback_allowed_milestones,
+    bool is_policy_loaded,
     ErrorCode expected_code,
     metrics::CheckResult expected_check_result,
     metrics::CheckReaction expected_check_reaction,
@@ -369,28 +413,47 @@
     brillo::Blob* out_post_data) {
   brillo::FakeMessageLoop loop(nullptr);
   loop.SetAsCurrent();
-  MockHttpFetcher* fetcher = new MockHttpFetcher(http_response.data(),
-                                                 http_response.size(),
-                                                 nullptr);
+  auto fetcher = std::make_unique<MockHttpFetcher>(
+      http_response.data(), http_response.size(), nullptr);
   if (fail_http_response_code >= 0) {
     fetcher->FailTransfer(fail_http_response_code);
   }
-  if (request_params)
-    fake_system_state_.set_request_params(request_params);
-  OmahaRequestAction action(&fake_system_state_,
-                            nullptr,
-                            base::WrapUnique(fetcher),
-                            ping_only);
-  OmahaRequestActionTestProcessorDelegate delegate;
-  delegate.expected_code_ = expected_code;
+  // This ensures the tests didn't forget to update fake_system_state_ if they
+  // are not using the default request_params_.
+  EXPECT_EQ(&request_params_, fake_system_state_.request_params());
 
+  auto omaha_request_action = std::make_unique<OmahaRequestAction>(
+      &fake_system_state_, nullptr, std::move(fetcher), ping_only);
+
+  auto mock_policy_provider =
+      std::make_unique<NiceMock<policy::MockPolicyProvider>>();
+  EXPECT_CALL(*mock_policy_provider, IsConsumerDevice())
+      .WillRepeatedly(Return(is_consumer_device));
+
+  EXPECT_CALL(*mock_policy_provider, device_policy_is_loaded())
+      .WillRepeatedly(Return(is_policy_loaded));
+
+  const policy::MockDevicePolicy device_policy;
+  const bool get_allowed_milestone_succeeds = rollback_allowed_milestones >= 0;
+  EXPECT_CALL(device_policy, GetRollbackAllowedMilestones(_))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(rollback_allowed_milestones),
+                            Return(get_allowed_milestone_succeeds)));
+
+  EXPECT_CALL(*mock_policy_provider, GetDevicePolicy())
+      .WillRepeatedly(ReturnRef(device_policy));
+  omaha_request_action->policy_provider_ = std::move(mock_policy_provider);
+
+  delegate_.expected_code_ = expected_code;
+  delegate_.interactive_ = request_params_.interactive();
+  delegate_.test_http_fetcher_headers_ = test_http_fetcher_headers_;
   ActionProcessor processor;
-  processor.set_delegate(&delegate);
-  processor.EnqueueAction(&action);
+  processor.set_delegate(&delegate_);
 
-  OutputObjectCollectorAction collector_action;
-  BondActions(&action, &collector_action);
-  processor.EnqueueAction(&collector_action);
+  auto collector_action =
+      std::make_unique<ObjectCollectorAction<OmahaResponse>>();
+  BondActions(omaha_request_action.get(), collector_action.get());
+  processor.EnqueueAction(std::move(omaha_request_action));
+  processor.EnqueueAction(std::move(collector_action));
 
   EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
               ReportUpdateCheckMetrics(_, _, _, _))
@@ -408,35 +471,75 @@
       base::Unretained(&processor)));
   loop.Run();
   EXPECT_FALSE(loop.PendingTasks());
-  if (collector_action.has_input_object_ && out_response)
-    *out_response = collector_action.omaha_response_;
+  if (delegate_.omaha_response_ && out_response)
+    *out_response = *delegate_.omaha_response_;
   if (out_post_data)
-    *out_post_data = fetcher->post_data();
-  return collector_action.has_input_object_;
+    *out_post_data = delegate_.post_data_;
+  return delegate_.omaha_response_ != nullptr;
 }
 
-// Tests Event requests -- they should always succeed. |out_post_data|
-// may be null; if non-null, the post-data received by the mock
-// HttpFetcher is returned.
-void TestEvent(OmahaRequestParams params,
-               OmahaEvent* event,
-               const string& http_response,
-               brillo::Blob* out_post_data) {
+bool OmahaRequestActionTest::TestUpdateCheck(
+    const string& http_response,
+    int fail_http_response_code,
+    bool ping_only,
+    ErrorCode expected_code,
+    metrics::CheckResult expected_check_result,
+    metrics::CheckReaction expected_check_reaction,
+    metrics::DownloadErrorCode expected_download_error_code,
+    OmahaResponse* out_response,
+    brillo::Blob* out_post_data) {
+  return TestUpdateCheck(http_response,
+                         fail_http_response_code,
+                         ping_only,
+                         true,   // is_consumer_device
+                         0,      // rollback_allowed_milestones
+                         false,  // is_policy_loaded
+                         expected_code,
+                         expected_check_result,
+                         expected_check_reaction,
+                         expected_download_error_code,
+                         out_response,
+                         out_post_data);
+}
+
+void OmahaRequestActionTest::TestRollbackCheck(bool is_consumer_device,
+                                               int rollback_allowed_milestones,
+                                               bool is_policy_loaded,
+                                               OmahaResponse* out_response) {
+  fake_update_response_.deadline = "20101020";
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              is_consumer_device,
+                              rollback_allowed_milestones,
+                              is_policy_loaded,
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              out_response,
+                              nullptr));
+  ASSERT_TRUE(out_response->update_exists);
+}
+
+// Tests Event requests -- they should always succeed. |out_post_data| may be
+// null; if non-null, the post-data received by the mock HttpFetcher is
+// returned.
+void OmahaRequestActionTest::TestEvent(OmahaEvent* event,
+                                       const string& http_response,
+                                       brillo::Blob* out_post_data) {
   brillo::FakeMessageLoop loop(nullptr);
   loop.SetAsCurrent();
-  MockHttpFetcher* fetcher = new MockHttpFetcher(http_response.data(),
-                                                 http_response.size(),
-                                                 nullptr);
-  FakeSystemState fake_system_state;
-  fake_system_state.set_request_params(&params);
-  OmahaRequestAction action(&fake_system_state,
-                            event,
-                            base::WrapUnique(fetcher),
-                            false);
-  OmahaRequestActionTestProcessorDelegate delegate;
+
+  auto action = std::make_unique<OmahaRequestAction>(
+      &fake_system_state_,
+      event,
+      std::make_unique<MockHttpFetcher>(
+          http_response.data(), http_response.size(), nullptr),
+      false);
   ActionProcessor processor;
-  processor.set_delegate(&delegate);
-  processor.EnqueueAction(&action);
+  processor.set_delegate(&delegate_);
+  processor.EnqueueAction(std::move(action));
 
   loop.PostTask(base::Bind(
       [](ActionProcessor* processor) { processor->StartProcessing(); },
@@ -445,47 +548,42 @@
   EXPECT_FALSE(loop.PendingTasks());
 
   if (out_post_data)
-    *out_post_data = fetcher->post_data();
+    *out_post_data = delegate_.post_data_;
 }
 
 TEST_F(OmahaRequestActionTest, RejectEntities) {
   OmahaResponse response;
   fake_update_response_.include_entity = true;
-  ASSERT_FALSE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kOmahaRequestXMLHasEntityDecl,
-                      metrics::CheckResult::kParsingError,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaRequestXMLHasEntityDecl,
+                               metrics::CheckResult::kParsingError,
+                               metrics::CheckReaction::kUnset,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
   EXPECT_FALSE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, NoUpdateTest) {
   OmahaResponse response;
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kNoUpdateAvailable,
+                              metrics::CheckReaction::kUnset,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_FALSE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, MultiAppNoUpdateTest) {
   OmahaResponse response;
   fake_update_response_.multi_app_no_update = true;
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetNoUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -500,8 +598,7 @@
 TEST_F(OmahaRequestActionTest, MultiAppNoPartialUpdateTest) {
   OmahaResponse response;
   fake_update_response_.multi_app_no_update = true;
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -516,7 +613,6 @@
 TEST_F(OmahaRequestActionTest, NoSelfUpdateTest) {
   OmahaResponse response;
   ASSERT_TRUE(TestUpdateCheck(
-      nullptr,  // request_params
       "<response><app><updatecheck status=\"ok\"><manifest><actions><action "
       "event=\"postinstall\" noupdate=\"true\"/></actions>"
       "</manifest></updatecheck></app></response>",
@@ -536,17 +632,15 @@
 TEST_F(OmahaRequestActionTest, ValidUpdateTest) {
   OmahaResponse response;
   fake_update_response_.deadline = "20101020";
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_TRUE(response.update_exists);
   EXPECT_EQ(fake_update_response_.version, response.version);
   EXPECT_EQ("", response.system_version);
@@ -558,6 +652,7 @@
   EXPECT_EQ(true, response.packages[0].is_delta);
   EXPECT_EQ(fake_update_response_.prompt == "true", response.prompt);
   EXPECT_EQ(fake_update_response_.deadline, response.deadline);
+  EXPECT_FALSE(response.powerwash_required);
   // Omaha cohort attributes are not set in the response, so they should not be
   // persisted.
   EXPECT_FALSE(fake_prefs_.Exists(kPrefsOmahaCohort));
@@ -568,8 +663,7 @@
 TEST_F(OmahaRequestActionTest, MultiPackageUpdateTest) {
   OmahaResponse response;
   fake_update_response_.multi_package = true;
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -598,8 +692,7 @@
 TEST_F(OmahaRequestActionTest, MultiAppUpdateTest) {
   OmahaResponse response;
   fake_update_response_.multi_app = true;
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -631,8 +724,7 @@
   // trigger the lining up of the app and system versions
   request_params_.set_system_app_id(fake_update_response_.app_id2);
 
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -663,8 +755,7 @@
   OmahaResponse response;
   fake_update_response_.multi_app = true;
   fake_update_response_.multi_app_self_update = true;
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -692,8 +783,7 @@
   OmahaResponse response;
   fake_update_response_.multi_app = true;
   fake_update_response_.multi_package = true;
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -726,31 +816,52 @@
   EXPECT_EQ(false, response.packages[2].is_delta);
 }
 
-TEST_F(OmahaRequestActionTest, ExtraHeadersSentTest) {
-  const string http_response = "<?xml invalid response";
+TEST_F(OmahaRequestActionTest, PowerwashTest) {
+  OmahaResponse response;
+  fake_update_response_.powerwash = true;
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
+  EXPECT_TRUE(response.update_exists);
+  EXPECT_TRUE(response.powerwash_required);
+}
+
+TEST_F(OmahaRequestActionTest, ExtraHeadersSentInteractiveTest) {
+  OmahaResponse response;
   request_params_.set_interactive(true);
+  test_http_fetcher_headers_ = true;
+  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaRequestXMLParseError,
+                               metrics::CheckResult::kParsingError,
+                               metrics::CheckReaction::kUnset,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
+  EXPECT_FALSE(response.update_exists);
+}
 
-  brillo::FakeMessageLoop loop(nullptr);
-  loop.SetAsCurrent();
-
-  MockHttpFetcher* fetcher =
-      new MockHttpFetcher(http_response.data(), http_response.size(), nullptr);
-  OmahaRequestAction action(&fake_system_state_, nullptr,
-                            base::WrapUnique(fetcher), false);
-  ActionProcessor processor;
-  processor.EnqueueAction(&action);
-
-  loop.PostTask(base::Bind(
-      [](ActionProcessor* processor) { processor->StartProcessing(); },
-      base::Unretained(&processor)));
-  loop.Run();
-  EXPECT_FALSE(loop.PendingTasks());
-
-  // Check that the headers were set in the fetcher during the action. Note that
-  // we set this request as "interactive".
-  EXPECT_EQ("fg", fetcher->GetHeader("X-GoogleUpdate-Interactivity"));
-  EXPECT_EQ(kTestAppId, fetcher->GetHeader("X-GoogleUpdate-AppId"));
-  EXPECT_NE("", fetcher->GetHeader("X-GoogleUpdate-Updater"));
+TEST_F(OmahaRequestActionTest, ExtraHeadersSentNoInteractiveTest) {
+  OmahaResponse response;
+  request_params_.set_interactive(false);
+  test_http_fetcher_headers_ = true;
+  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaRequestXMLParseError,
+                               metrics::CheckResult::kParsingError,
+                               metrics::CheckReaction::kUnset,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
+  EXPECT_FALSE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, ValidUpdateBlockedByConnection) {
@@ -768,20 +879,182 @@
   EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kEthernet, _))
       .WillRepeatedly(Return(false));
 
-  ASSERT_FALSE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kOmahaUpdateIgnoredPerPolicy,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kIgnored,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaUpdateIgnoredPerPolicy,
+                               metrics::CheckResult::kUpdateAvailable,
+                               metrics::CheckReaction::kIgnored,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
   EXPECT_FALSE(response.update_exists);
 }
 
+TEST_F(OmahaRequestActionTest, ValidUpdateOverCellularAllowedByDevicePolicy) {
+  // This test tests that update over cellular is allowed as device policy
+  // says yes.
+  OmahaResponse response;
+  MockConnectionManager mock_cm;
+
+  fake_system_state_.set_connection_manager(&mock_cm);
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
+      .WillRepeatedly(Return(true));
+  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
+      .WillRepeatedly(Return(true));
+
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
+  EXPECT_TRUE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, ValidUpdateOverCellularBlockedByDevicePolicy) {
+  // This test tests that update over cellular is blocked as device policy
+  // says no.
+  OmahaResponse response;
+  MockConnectionManager mock_cm;
+
+  fake_system_state_.set_connection_manager(&mock_cm);
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
+      .WillRepeatedly(Return(true));
+  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
+      .WillRepeatedly(Return(false));
+
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaUpdateIgnoredPerPolicy,
+                               metrics::CheckResult::kUpdateAvailable,
+                               metrics::CheckReaction::kIgnored,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
+  EXPECT_FALSE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest,
+       ValidUpdateOverCellularAllowedByUserPermissionTrue) {
+  // This test tests that, when device policy is not set, update over cellular
+  // is allowed as permission for update over cellular is set to true.
+  OmahaResponse response;
+  MockConnectionManager mock_cm;
+
+  fake_prefs_.SetBoolean(kPrefsUpdateOverCellularPermission, true);
+  fake_system_state_.set_connection_manager(&mock_cm);
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
+      .WillRepeatedly(Return(false));
+  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
+      .WillRepeatedly(Return(true));
+
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
+  EXPECT_TRUE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest,
+       ValidUpdateOverCellularBlockedByUpdateTargetNotMatch) {
+  // This test tests that, when device policy is not set and permission for
+  // update over cellular is set to false or does not exist, update over
+  // cellular is blocked as update target does not match the omaha response.
+  OmahaResponse response;
+  MockConnectionManager mock_cm;
+  // A version different from the version in omaha response.
+  string diff_version = "99.99.99";
+  // A size different from the size in omaha response.
+  int64_t diff_size = 999;
+
+  fake_prefs_.SetString(kPrefsUpdateOverCellularTargetVersion, diff_version);
+  fake_prefs_.SetInt64(kPrefsUpdateOverCellularTargetSize, diff_size);
+  // This test tests cellular (3G) being the only connection type being allowed.
+  fake_system_state_.set_connection_manager(&mock_cm);
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
+      .WillRepeatedly(Return(false));
+  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
+      .WillRepeatedly(Return(true));
+
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaUpdateIgnoredOverCellular,
+                               metrics::CheckResult::kUpdateAvailable,
+                               metrics::CheckReaction::kIgnored,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
+  EXPECT_FALSE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest,
+       ValidUpdateOverCellularAllowedByUpdateTargetMatch) {
+  // This test tests that, when device policy is not set and permission for
+  // update over cellular is set to false or does not exist, update over
+  // cellular is allowed as update target matches the omaha response.
+  OmahaResponse response;
+  MockConnectionManager mock_cm;
+  // A version same as the version in omaha response.
+  string new_version = fake_update_response_.version;
+  // A size same as the size in omaha response.
+  int64_t new_size = fake_update_response_.size;
+
+  fake_prefs_.SetString(kPrefsUpdateOverCellularTargetVersion, new_version);
+  fake_prefs_.SetInt64(kPrefsUpdateOverCellularTargetSize, new_size);
+  fake_system_state_.set_connection_manager(&mock_cm);
+
+  EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
+                            SetArgPointee<1>(ConnectionTethering::kUnknown),
+                            Return(true)));
+  EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
+      .WillRepeatedly(Return(false));
+  EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
+      .WillRepeatedly(Return(true));
+
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
+  EXPECT_TRUE(response.update_exists);
+}
+
 TEST_F(OmahaRequestActionTest, ValidUpdateBlockedByRollback) {
   string rollback_version = "1234.0.0";
   OmahaResponse response;
@@ -793,30 +1066,27 @@
     .WillRepeatedly(Return(rollback_version));
 
   fake_update_response_.version = rollback_version;
-  ASSERT_FALSE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kOmahaUpdateIgnoredPerPolicy,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kIgnored,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaUpdateIgnoredPerPolicy,
+                               metrics::CheckResult::kUpdateAvailable,
+                               metrics::CheckReaction::kIgnored,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
   EXPECT_FALSE(response.update_exists);
 }
 
-// Verify that update checks called during OOBE will only try to download
-// an update if the response includes a non-empty deadline field.
+// Verify that update checks called during OOBE will not try to download an
+// update if the response doesn't include the deadline field.
 TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBE) {
   OmahaResponse response;
+  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
 
   // TODO(senj): set better default value for metrics::checkresult in
   // OmahaRequestAction::ActionCompleted.
-  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
-  ASSERT_FALSE(TestUpdateCheck(nullptr,  // request_params
-                               fake_update_response_.GetUpdateResponse(),
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                                -1,
                                false,  // ping_only
                                ErrorCode::kNonCriticalUpdateInOOBE,
@@ -826,11 +1096,16 @@
                                &response,
                                nullptr));
   EXPECT_FALSE(response.update_exists);
+}
 
-  // The IsOOBEComplete() value is ignored when the OOBE flow is not enabled.
+// Verify that the IsOOBEComplete() value is ignored when the OOBE flow is not
+// enabled.
+TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBEDisabled) {
+  OmahaResponse response;
+  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
   fake_system_state_.fake_hardware()->SetIsOOBEEnabled(false);
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetUpdateResponse(),
+
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -840,122 +1115,131 @@
                               &response,
                               nullptr));
   EXPECT_TRUE(response.update_exists);
-  fake_system_state_.fake_hardware()->SetIsOOBEEnabled(true);
+}
 
-  // The payload is applied when a deadline was set in the response.
+// Verify that update checks called during OOBE will still try to download an
+// update if the response includes the deadline field.
+TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBEDeadlineSet) {
+  OmahaResponse response;
+  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
   fake_update_response_.deadline = "20101020";
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_TRUE(response.update_exists);
 }
 
+// Verify that update checks called during OOBE will not try to download an
+// update if a rollback happened, even when the response includes the deadline
+// field.
+TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBERollback) {
+  OmahaResponse response;
+  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
+  fake_update_response_.deadline = "20101020";
+  EXPECT_CALL(*(fake_system_state_.mock_payload_state()), GetRollbackHappened())
+      .WillOnce(Return(true));
+
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kNonCriticalUpdateInOOBE,
+                               metrics::CheckResult::kParsingError,
+                               metrics::CheckReaction::kUnset,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
+  EXPECT_FALSE(response.update_exists);
+}
+
 TEST_F(OmahaRequestActionTest, WallClockBasedWaitAloneCausesScattering) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
-  params.set_wall_clock_based_wait_enabled(true);
-  params.set_update_check_count_wait_enabled(false);
-  params.set_waiting_period(TimeDelta::FromDays(2));
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_update_check_count_wait_enabled(false);
+  request_params_.set_waiting_period(TimeDelta::FromDays(2));
 
-  ASSERT_FALSE(
-      TestUpdateCheck(&params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kOmahaUpdateDeferredPerPolicy,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kDeferring,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaUpdateDeferredPerPolicy,
+                               metrics::CheckResult::kUpdateAvailable,
+                               metrics::CheckReaction::kDeferring,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
   EXPECT_FALSE(response.update_exists);
 
   // Verify if we are interactive check we don't defer.
-  params.set_interactive(true);
-  ASSERT_TRUE(
-      TestUpdateCheck(&params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  request_params_.set_interactive(true);
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_TRUE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, NoWallClockBasedWaitCausesNoScattering) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
-  params.set_wall_clock_based_wait_enabled(false);
-  params.set_waiting_period(TimeDelta::FromDays(2));
+  request_params_.set_wall_clock_based_wait_enabled(false);
+  request_params_.set_waiting_period(TimeDelta::FromDays(2));
+  request_params_.set_update_check_count_wait_enabled(true);
+  request_params_.set_min_update_checks_needed(1);
+  request_params_.set_max_update_checks_allowed(8);
 
-  params.set_update_check_count_wait_enabled(true);
-  params.set_min_update_checks_needed(1);
-  params.set_max_update_checks_allowed(8);
-
-  ASSERT_TRUE(
-      TestUpdateCheck(&params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_TRUE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, ZeroMaxDaysToScatterCausesNoScattering) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
-  params.set_wall_clock_based_wait_enabled(true);
-  params.set_waiting_period(TimeDelta::FromDays(2));
-
-  params.set_update_check_count_wait_enabled(true);
-  params.set_min_update_checks_needed(1);
-  params.set_max_update_checks_allowed(8);
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta::FromDays(2));
+  request_params_.set_update_check_count_wait_enabled(true);
+  request_params_.set_min_update_checks_needed(1);
+  request_params_.set_max_update_checks_allowed(8);
 
   fake_update_response_.max_days_to_scatter = "0";
-  ASSERT_TRUE(
-      TestUpdateCheck(&params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_TRUE(response.update_exists);
 }
 
 
 TEST_F(OmahaRequestActionTest, ZeroUpdateCheckCountCausesNoScattering) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
-  params.set_wall_clock_based_wait_enabled(true);
-  params.set_waiting_period(TimeDelta());
-
-  params.set_update_check_count_wait_enabled(true);
-  params.set_min_update_checks_needed(0);
-  params.set_max_update_checks_allowed(0);
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta());
+  request_params_.set_update_check_count_wait_enabled(true);
+  request_params_.set_min_update_checks_needed(0);
+  request_params_.set_max_update_checks_allowed(0);
 
   ASSERT_TRUE(TestUpdateCheck(
-                      &params,
                       fake_update_response_.GetUpdateResponse(),
                       -1,
                       false,  // ping_only
@@ -974,16 +1258,13 @@
 
 TEST_F(OmahaRequestActionTest, NonZeroUpdateCheckCountCausesScattering) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
-  params.set_wall_clock_based_wait_enabled(true);
-  params.set_waiting_period(TimeDelta());
-
-  params.set_update_check_count_wait_enabled(true);
-  params.set_min_update_checks_needed(1);
-  params.set_max_update_checks_allowed(8);
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta());
+  request_params_.set_update_check_count_wait_enabled(true);
+  request_params_.set_min_update_checks_needed(1);
+  request_params_.set_max_update_checks_allowed(8);
 
   ASSERT_FALSE(TestUpdateCheck(
-                      &params,
                       fake_update_response_.GetUpdateResponse(),
                       -1,
                       false,  // ping_only
@@ -1000,35 +1281,30 @@
   EXPECT_FALSE(response.update_exists);
 
   // Verify if we are interactive check we don't defer.
-  params.set_interactive(true);
-  ASSERT_TRUE(
-      TestUpdateCheck(&params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  request_params_.set_interactive(true);
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_TRUE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, ExistingUpdateCheckCountCausesScattering) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
-  params.set_wall_clock_based_wait_enabled(true);
-  params.set_waiting_period(TimeDelta());
-
-  params.set_update_check_count_wait_enabled(true);
-  params.set_min_update_checks_needed(1);
-  params.set_max_update_checks_allowed(8);
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta());
+  request_params_.set_update_check_count_wait_enabled(true);
+  request_params_.set_min_update_checks_needed(1);
+  request_params_.set_max_update_checks_allowed(8);
 
   ASSERT_TRUE(fake_prefs_.SetInt64(kPrefsUpdateCheckCount, 5));
 
   ASSERT_FALSE(TestUpdateCheck(
-                      &params,
                       fake_update_response_.GetUpdateResponse(),
                       -1,
                       false,  // ping_only
@@ -1047,31 +1323,63 @@
   EXPECT_FALSE(response.update_exists);
 
   // Verify if we are interactive check we don't defer.
-  params.set_interactive(true);
-  ASSERT_TRUE(
-      TestUpdateCheck(&params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  request_params_.set_interactive(true);
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
+  EXPECT_TRUE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, StagingTurnedOnCausesScattering) {
+  // If staging is on, the value for max days to scatter should be ignored, and
+  // staging's scatter value should be used.
+  OmahaResponse response;
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta::FromDays(6));
+  request_params_.set_update_check_count_wait_enabled(false);
+
+  ASSERT_TRUE(fake_prefs_.SetInt64(kPrefsWallClockStagingWaitPeriod, 6));
+  // This should not prevent scattering due to staging.
+  fake_update_response_.max_days_to_scatter = "0";
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaUpdateDeferredPerPolicy,
+                               metrics::CheckResult::kUpdateAvailable,
+                               metrics::CheckReaction::kDeferring,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
+  EXPECT_FALSE(response.update_exists);
+
+  // Interactive updates should not be affected.
+  request_params_.set_interactive(true);
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_TRUE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, CohortsArePersisted) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
   fake_update_response_.include_cohorts = true;
   fake_update_response_.cohort = "s/154454/8479665";
   fake_update_response_.cohorthint = "please-put-me-on-beta";
   fake_update_response_.cohortname = "stable";
 
-  ASSERT_TRUE(TestUpdateCheck(&params,
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -1094,7 +1402,6 @@
 
 TEST_F(OmahaRequestActionTest, CohortsAreUpdated) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
   EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohort, "old_value"));
   EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohortHint, "old_hint"));
   EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohortName, "old_name"));
@@ -1103,8 +1410,7 @@
   fake_update_response_.cohorthint = "please-put-me-on-beta";
   fake_update_response_.cohortname = "";
 
-  ASSERT_TRUE(TestUpdateCheck(&params,
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -1126,11 +1432,9 @@
 
 TEST_F(OmahaRequestActionTest, CohortsAreNotModifiedWhenMissing) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
   EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohort, "old_value"));
 
-  ASSERT_TRUE(TestUpdateCheck(&params,
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -1150,14 +1454,12 @@
 
 TEST_F(OmahaRequestActionTest, CohortsArePersistedWhenNoUpdate) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
   fake_update_response_.include_cohorts = true;
   fake_update_response_.cohort = "s/154454/8479665";
   fake_update_response_.cohorthint = "please-put-me-on-beta";
   fake_update_response_.cohortname = "stable";
 
-  ASSERT_TRUE(TestUpdateCheck(&params,
-                              fake_update_response_.GetNoUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -1180,15 +1482,13 @@
 
 TEST_F(OmahaRequestActionTest, MultiAppCohortTest) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
   fake_update_response_.multi_app = true;
   fake_update_response_.include_cohorts = true;
   fake_update_response_.cohort = "s/154454/8479665";
   fake_update_response_.cohorthint = "please-put-me-on-beta";
   fake_update_response_.cohortname = "stable";
 
-  ASSERT_TRUE(TestUpdateCheck(&params,
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -1215,19 +1515,15 @@
   brillo::FakeMessageLoop loop(nullptr);
   loop.SetAsCurrent();
 
-  OmahaRequestParams params = request_params_;
-  fake_system_state_.set_request_params(&params);
-  OmahaRequestAction action(
+  auto action = std::make_unique<OmahaRequestAction>(
       &fake_system_state_,
       nullptr,
-      std::make_unique<MockHttpFetcher>(http_response.data(),
-                                        http_response.size(),
-                                        nullptr),
+      std::make_unique<MockHttpFetcher>(
+          http_response.data(), http_response.size(), nullptr),
       false);
-  OmahaRequestActionTestProcessorDelegate delegate;
   ActionProcessor processor;
-  processor.set_delegate(&delegate);
-  processor.EnqueueAction(&action);
+  processor.set_delegate(&delegate_);
+  processor.EnqueueAction(std::move(action));
 
   loop.PostTask(base::Bind(
       [](ActionProcessor* processor) { processor->StartProcessing(); },
@@ -1239,40 +1535,35 @@
 
 TEST_F(OmahaRequestActionTest, InvalidXmlTest) {
   OmahaResponse response;
-  ASSERT_FALSE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "invalid xml>",
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kOmahaRequestXMLParseError,
-                      metrics::CheckResult::kParsingError,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaRequestXMLParseError,
+                               metrics::CheckResult::kParsingError,
+                               metrics::CheckReaction::kUnset,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
   EXPECT_FALSE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, EmptyResponseTest) {
   OmahaResponse response;
-  ASSERT_FALSE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "",
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kOmahaRequestEmptyResponseError,
-                      metrics::CheckResult::kParsingError,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_FALSE(TestUpdateCheck("",
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaRequestEmptyResponseError,
+                               metrics::CheckResult::kParsingError,
+                               metrics::CheckReaction::kUnset,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               nullptr));
   EXPECT_FALSE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, MissingStatusTest) {
   OmahaResponse response;
   ASSERT_FALSE(TestUpdateCheck(
-      nullptr,  // request_params
       "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
       "<daystart elapsed_seconds=\"100\"/>"
       "<app appid=\"foo\" status=\"ok\">"
@@ -1292,7 +1583,6 @@
 TEST_F(OmahaRequestActionTest, InvalidStatusTest) {
   OmahaResponse response;
   ASSERT_FALSE(TestUpdateCheck(
-      nullptr,  // request_params
       "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
       "<daystart elapsed_seconds=\"100\"/>"
       "<app appid=\"foo\" status=\"ok\">"
@@ -1312,7 +1602,6 @@
 TEST_F(OmahaRequestActionTest, MissingNodesetTest) {
   OmahaResponse response;
   ASSERT_FALSE(TestUpdateCheck(
-      nullptr,  // request_params
       "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
       "<daystart elapsed_seconds=\"100\"/>"
       "<app appid=\"foo\" status=\"ok\">"
@@ -1353,8 +1642,7 @@
   LOG(INFO) << "Input Response = " << input_response;
 
   OmahaResponse response;
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              input_response,
+  ASSERT_TRUE(TestUpdateCheck(input_response,
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -1394,17 +1682,16 @@
   loop.SetAsCurrent();
 
   string http_response("doesn't matter");
-  OmahaRequestAction action(
+  auto action = std::make_unique<OmahaRequestAction>(
       &fake_system_state_,
       nullptr,
-      std::make_unique<MockHttpFetcher>(http_response.data(),
-                                        http_response.size(),
-                                        nullptr),
+      std::make_unique<MockHttpFetcher>(
+          http_response.data(), http_response.size(), nullptr),
       false);
   TerminateEarlyTestProcessorDelegate delegate;
   ActionProcessor processor;
   processor.set_delegate(&delegate);
-  processor.EnqueueAction(&action);
+  processor.EnqueueAction(std::move(action));
 
   loop.PostTask(base::Bind(&TerminateTransferTestStarter, &processor));
   loop.Run();
@@ -1436,39 +1723,26 @@
   brillo::Blob post_data;
 
   // Make sure XML Encode is being called on the params
-  OmahaRequestParams params(&fake_system_state_,
-                            constants::kOmahaPlatformName,
-                            OmahaRequestParams::kOsVersion,
-                            "testtheservice_pack>",
-                            "x86 generic<id",
-                            kTestAppId,
-                            "0.1.0.0",
-                            "en-US",
-                            "unittest_track&lt;",
-                            "<OEM MODEL>",
-                            "ChromeOSFirmware.1.0",
-                            "EC100",
-                            false,   // delta okay
-                            false,   // interactive
-                            "http://url",
-                            "");     // target_version_prefix
+  request_params_.set_os_sp("testtheservice_pack>");
+  request_params_.set_os_board("x86 generic<id");
+  request_params_.set_current_channel("unittest_track&lt;");
+  request_params_.set_target_channel("unittest_track&lt;");
+  request_params_.set_hwid("<OEM MODEL>");
   fake_prefs_.SetString(kPrefsOmahaCohort, "evil\nstring");
   fake_prefs_.SetString(kPrefsOmahaCohortHint, "evil&string\\");
   fake_prefs_.SetString(kPrefsOmahaCohortName,
                         base::JoinString(
                             vector<string>(100, "My spoon is too big."), " "));
   OmahaResponse response;
-  ASSERT_FALSE(
-      TestUpdateCheck(&params,
-                      "invalid xml>",
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kOmahaRequestXMLParseError,
-                      metrics::CheckResult::kParsingError,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      &post_data));
+  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
+                               -1,
+                               false,  // ping_only
+                               ErrorCode::kOmahaRequestXMLParseError,
+                               metrics::CheckResult::kParsingError,
+                               metrics::CheckReaction::kUnset,
+                               metrics::DownloadErrorCode::kUnset,
+                               &response,
+                               &post_data));
   // convert post_data to string
   string post_str(post_data.begin(), post_data.end());
   EXPECT_NE(string::npos, post_str.find("testtheservice_pack&gt;"));
@@ -1492,17 +1766,15 @@
   fake_update_response_.deadline = "&lt;20110101";
   fake_update_response_.more_info_url = "testthe&lt;url";
   fake_update_response_.codebase = "testthe&amp;codebase/";
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
 
   EXPECT_EQ("testthe<url", response.more_info_url);
   EXPECT_EQ("testthe&codebase/file.signed",
@@ -1514,17 +1786,15 @@
   OmahaResponse response;
   // overflows int32_t:
   fake_update_response_.size = 123123123123123ull;
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
 
   EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
 }
@@ -1539,8 +1809,7 @@
   // An existing but empty previous version means that we didn't reboot to a new
   // update, therefore, no need to update the previous version.
   EXPECT_CALL(prefs, SetString(kPrefsPreviousVersion, _)).Times(0);
-  ASSERT_FALSE(TestUpdateCheck(nullptr,  // request_params
-                               "invalid xml>",
+  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
                                -1,
                                false,  // ping_only
                                ErrorCode::kOmahaRequestXMLParseError,
@@ -1551,9 +1820,9 @@
                                &post_data));
   // convert post_data to string
   string post_str(post_data.begin(), post_data.end());
-  EXPECT_NE(post_str.find(
-      "        <ping active=\"1\" a=\"-1\" r=\"-1\"></ping>\n"
-      "        <updatecheck targetversionprefix=\"\"></updatecheck>\n"),
+  EXPECT_NE(
+      post_str.find("        <ping active=\"1\" a=\"-1\" r=\"-1\"></ping>\n"
+                    "        <updatecheck></updatecheck>\n"),
       string::npos);
   EXPECT_NE(post_str.find("hardware_class=\"OEM MODEL 09235 7471\""),
             string::npos);
@@ -1568,8 +1837,7 @@
 
 TEST_F(OmahaRequestActionTest, FormatSuccessEventOutputTest) {
   brillo::Blob post_data;
-  TestEvent(request_params_,
-            new OmahaEvent(OmahaEvent::kTypeUpdateDownloadStarted),
+  TestEvent(new OmahaEvent(OmahaEvent::kTypeUpdateDownloadStarted),
             "invalid xml>",
             &post_data);
   // convert post_data to string
@@ -1585,8 +1853,7 @@
 
 TEST_F(OmahaRequestActionTest, FormatErrorEventOutputTest) {
   brillo::Blob post_data;
-  TestEvent(request_params_,
-            new OmahaEvent(OmahaEvent::kTypeDownloadComplete,
+  TestEvent(new OmahaEvent(OmahaEvent::kTypeDownloadComplete,
                            OmahaEvent::kResultError,
                            ErrorCode::kError),
             "invalid xml>",
@@ -1605,9 +1872,6 @@
 
 TEST_F(OmahaRequestActionTest, IsEventTest) {
   string http_response("doesn't matter");
-  // Create a copy of the OmahaRequestParams to reuse it later.
-  OmahaRequestParams params = request_params_;
-  fake_system_state_.set_request_params(&params);
   OmahaRequestAction update_check_action(
       &fake_system_state_,
       nullptr,
@@ -1617,8 +1881,6 @@
       false);
   EXPECT_FALSE(update_check_action.IsEvent());
 
-  params = request_params_;
-  fake_system_state_.set_request_params(&params);
   OmahaRequestAction event_action(
       &fake_system_state_,
       new OmahaEvent(OmahaEvent::kTypeUpdateComplete),
@@ -1634,24 +1896,10 @@
     bool delta_okay = i == 1;
     const char* delta_okay_str = delta_okay ? "true" : "false";
     brillo::Blob post_data;
-    OmahaRequestParams params(&fake_system_state_,
-                              constants::kOmahaPlatformName,
-                              OmahaRequestParams::kOsVersion,
-                              "service_pack",
-                              "x86-generic",
-                              kTestAppId,
-                              "0.1.0.0",
-                              "en-US",
-                              "unittest_track",
-                              "OEM MODEL REV 1234",
-                              "ChromeOSFirmware.1.0",
-                              "EC100",
-                              delta_okay,
-                              false,  // interactive
-                              "http://url",
-                              "");    // target_version_prefix
-    ASSERT_FALSE(TestUpdateCheck(&params,
-                                 "invalid xml>",
+
+    request_params_.set_delta_okay(delta_okay);
+
+    ASSERT_FALSE(TestUpdateCheck("invalid xml>",
                                  -1,
                                  false,  // ping_only
                                  ErrorCode::kOmahaRequestXMLParseError,
@@ -1675,24 +1923,10 @@
     const char* interactive_str = interactive ? "ondemandupdate" : "scheduler";
     brillo::Blob post_data;
     FakeSystemState fake_system_state;
-    OmahaRequestParams params(&fake_system_state_,
-                              constants::kOmahaPlatformName,
-                              OmahaRequestParams::kOsVersion,
-                              "service_pack",
-                              "x86-generic",
-                              kTestAppId,
-                              "0.1.0.0",
-                              "en-US",
-                              "unittest_track",
-                              "OEM MODEL REV 1234",
-                              "ChromeOSFirmware.1.0",
-                              "EC100",
-                              true,   // delta_okay
-                              interactive,
-                              "http://url",
-                              "");    // target_version_prefix
-    ASSERT_FALSE(TestUpdateCheck(&params,
-                                 "invalid xml>",
+
+    request_params_.set_interactive(interactive);
+
+    ASSERT_FALSE(TestUpdateCheck("invalid xml>",
                                  -1,
                                  false,  // ping_only
                                  ErrorCode::kOmahaRequestXMLParseError,
@@ -1703,13 +1937,75 @@
                                  &post_data));
     // convert post_data to string
     string post_str(post_data.begin(), post_data.end());
-    EXPECT_NE(post_str.find(base::StringPrintf("installsource=\"%s\"",
-                                               interactive_str)),
+    EXPECT_NE(post_str.find(
+                  base::StringPrintf("installsource=\"%s\"", interactive_str)),
               string::npos)
         << "i = " << i;
   }
 }
 
+TEST_F(OmahaRequestActionTest, FormatTargetVersionPrefixOutputTest) {
+  for (int i = 0; i < 2; i++) {
+    bool target_version_set = i == 1;
+    const char* target_version_prefix = target_version_set ? "10032." : "";
+    brillo::Blob post_data;
+    FakeSystemState fake_system_state;
+
+    request_params_.set_target_version_prefix(target_version_prefix);
+
+    ASSERT_FALSE(TestUpdateCheck("invalid xml>",
+                                 -1,
+                                 false,  // ping_only
+                                 ErrorCode::kOmahaRequestXMLParseError,
+                                 metrics::CheckResult::kParsingError,
+                                 metrics::CheckReaction::kUnset,
+                                 metrics::DownloadErrorCode::kUnset,
+                                 nullptr,
+                                 &post_data));
+    // convert post_data to string
+    string post_str(post_data.begin(), post_data.end());
+    if (target_version_set) {
+      EXPECT_NE(post_str.find("<updatecheck targetversionprefix=\"10032.\">"),
+                string::npos)
+          << "i = " << i;
+    } else {
+      EXPECT_EQ(post_str.find("targetversionprefix"), string::npos)
+          << "i = " << i;
+    }
+  }
+}
+
+TEST_F(OmahaRequestActionTest, FormatRollbackAllowedOutputTest) {
+  for (int i = 0; i < 4; i++) {
+    bool rollback_allowed = i / 2 == 0;
+    bool target_version_set = i % 2 == 0;
+    brillo::Blob post_data;
+    FakeSystemState fake_system_state;
+
+    request_params_.set_target_version_prefix(target_version_set ? "10032."
+                                                                 : "");
+    request_params_.set_rollback_allowed(rollback_allowed);
+
+    ASSERT_FALSE(TestUpdateCheck("invalid xml>",
+                                 -1,
+                                 false,  // ping_only
+                                 ErrorCode::kOmahaRequestXMLParseError,
+                                 metrics::CheckResult::kParsingError,
+                                 metrics::CheckReaction::kUnset,
+                                 metrics::DownloadErrorCode::kUnset,
+                                 nullptr,
+                                 &post_data));
+    // convert post_data to string
+    string post_str(post_data.begin(), post_data.end());
+    if (rollback_allowed && target_version_set) {
+      EXPECT_NE(post_str.find("rollback_allowed=\"true\""), string::npos)
+          << "i = " << i;
+    } else {
+      EXPECT_EQ(post_str.find("rollback_allowed"), string::npos) << "i = " << i;
+    }
+  }
+}
+
 TEST_F(OmahaRequestActionTest, OmahaEventTest) {
   OmahaEvent default_event;
   EXPECT_EQ(OmahaEvent::kTypeUnknown, default_event.type);
@@ -1747,8 +2043,7 @@
   EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
       .WillOnce(DoAll(SetArgPointee<1>(five_days_ago), Return(true)));
   brillo::Blob post_data;
-  ASSERT_TRUE(TestUpdateCheck(nullptr,  // request_params
-                              fake_update_response_.GetNoUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
                               -1,
                               ping_only,
                               ErrorCode::kSuccess,
@@ -1793,17 +2088,15 @@
   EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
       .WillOnce(DoAll(SetArgPointee<1>(now), Return(true)));
   brillo::Blob post_data;
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      &post_data));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kNoUpdateAvailable,
+                              metrics::CheckReaction::kUnset,
+                              metrics::DownloadErrorCode::kUnset,
+                              nullptr,
+                              &post_data));
   string post_str(post_data.begin(), post_data.end());
   EXPECT_NE(post_str.find("<ping active=\"1\" a=\"3\"></ping>"),
             string::npos);
@@ -1825,17 +2118,15 @@
   EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
       .WillOnce(DoAll(SetArgPointee<1>(four_days_ago), Return(true)));
   brillo::Blob post_data;
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      &post_data));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kNoUpdateAvailable,
+                              metrics::CheckReaction::kUnset,
+                              metrics::DownloadErrorCode::kUnset,
+                              nullptr,
+                              &post_data));
   string post_str(post_data.begin(), post_data.end());
   EXPECT_NE(post_str.find("<ping active=\"1\" r=\"4\"></ping>\n"),
             string::npos);
@@ -1862,17 +2153,15 @@
   EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _))
       .WillOnce(Return(true));
   brillo::Blob post_data;
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      &post_data));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kNoUpdateAvailable,
+                              metrics::CheckReaction::kUnset,
+                              metrics::DownloadErrorCode::kUnset,
+                              nullptr,
+                              &post_data));
   string post_str(post_data.begin(), post_data.end());
   EXPECT_EQ(post_str.find("ping"), string::npos);
 }
@@ -1889,17 +2178,15 @@
   EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0);
   EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0);
   brillo::Blob post_data;
-  EXPECT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      true,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUnset,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      &post_data));
+  EXPECT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                              -1,
+                              true,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUnset,
+                              metrics::CheckReaction::kUnset,
+                              metrics::DownloadErrorCode::kUnset,
+                              nullptr,
+                              &post_data));
   EXPECT_EQ(0U, post_data.size());
 }
 
@@ -1923,8 +2210,7 @@
       .WillOnce(Return(true));
   brillo::Blob post_data;
   ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+      TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
                       "protocol=\"3.0\"><daystart elapsed_seconds=\"100\"/>"
                       "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
                       "<updatecheck status=\"noupdate\"/></app></response>",
@@ -1960,8 +2246,7 @@
                               AllOf(Ge(midnight), Le(midnight_slack))))
       .WillOnce(Return(true));
   ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+      TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
                       "protocol=\"3.0\"><daystart elapsed_seconds=\"200\"/>"
                       "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
                       "<updatecheck status=\"noupdate\"/></app></response>",
@@ -1983,8 +2268,7 @@
   EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0);
   EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0);
   ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+      TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
                       "protocol=\"3.0\"><daystart blah=\"200\"/>"
                       "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
                       "<updatecheck status=\"noupdate\"/></app></response>",
@@ -2006,8 +2290,7 @@
   EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0);
   EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0);
   ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+      TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
                       "protocol=\"3.0\"><daystart elapsed_seconds=\"x\"/>"
                       "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
                       "<updatecheck status=\"noupdate\"/></app></response>",
@@ -2025,8 +2308,7 @@
   // Test that the "eol" flags is only parsed from the "_eol" attribute and not
   // the "eol" attribute.
   ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+      TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
                       "protocol=\"3.0\"><app appid=\"foo\" status=\"ok\">"
                       "<ping status=\"ok\"/><updatecheck status=\"noupdate\" "
                       "_eol=\"security-only\" eol=\"eol\" _foo=\"bar\"/>"
@@ -2049,8 +2331,7 @@
 
 TEST_F(OmahaRequestActionTest, NoUniqueIDTest) {
   brillo::Blob post_data;
-  ASSERT_FALSE(TestUpdateCheck(nullptr,  // request_params
-                               "invalid xml>",
+  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
                                -1,
                                false,  // ping_only
                                ErrorCode::kOmahaRequestXMLParseError,
@@ -2069,17 +2350,15 @@
   OmahaResponse response;
   const int http_error_code =
       static_cast<int>(ErrorCode::kOmahaRequestHTTPResponseBase) + 501;
-  ASSERT_FALSE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "",
-                      501,
-                      false,  // ping_only
-                      static_cast<ErrorCode>(http_error_code),
-                      metrics::CheckResult::kDownloadError,
-                      metrics::CheckReaction::kUnset,
-                      static_cast<metrics::DownloadErrorCode>(501),
-                      &response,
-                      nullptr));
+  ASSERT_FALSE(TestUpdateCheck("",
+                               501,
+                               false,  // ping_only
+                               static_cast<ErrorCode>(http_error_code),
+                               metrics::CheckResult::kDownloadError,
+                               metrics::CheckReaction::kUnset,
+                               static_cast<metrics::DownloadErrorCode>(501),
+                               &response,
+                               nullptr));
   EXPECT_FALSE(response.update_exists);
 }
 
@@ -2087,32 +2366,28 @@
   OmahaResponse response;
   const int http_error_code =
       static_cast<int>(ErrorCode::kOmahaRequestHTTPResponseBase) + 999;
-  ASSERT_FALSE(
-      TestUpdateCheck(nullptr,  // request_params
-                      "",
-                      1500,
-                      false,  // ping_only
-                      static_cast<ErrorCode>(http_error_code),
-                      metrics::CheckResult::kDownloadError,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kHttpStatusOther,
-                      &response,
-                      nullptr));
+  ASSERT_FALSE(TestUpdateCheck("",
+                               1500,
+                               false,  // ping_only
+                               static_cast<ErrorCode>(http_error_code),
+                               metrics::CheckResult::kDownloadError,
+                               metrics::CheckReaction::kUnset,
+                               metrics::DownloadErrorCode::kHttpStatusOther,
+                               &response,
+                               nullptr));
   EXPECT_FALSE(response.update_exists);
 }
 
 TEST_F(OmahaRequestActionTest, TestUpdateFirstSeenAtGetsPersistedFirstTime) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
-  params.set_wall_clock_based_wait_enabled(true);
-  params.set_waiting_period(TimeDelta().FromDays(1));
-  params.set_update_check_count_wait_enabled(false);
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta().FromDays(1));
+  request_params_.set_update_check_count_wait_enabled(false);
 
   Time arbitrary_date;
   ASSERT_TRUE(Time::FromString("6/4/1989", &arbitrary_date));
   fake_system_state_.fake_clock()->SetWallclockTime(arbitrary_date);
-  ASSERT_FALSE(TestUpdateCheck(&params,
-                               fake_update_response_.GetUpdateResponse(),
+  ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                                -1,
                                false,  // ping_only
                                ErrorCode::kOmahaUpdateDeferredPerPolicy,
@@ -2128,9 +2403,8 @@
   EXPECT_FALSE(response.update_exists);
 
   // Verify if we are interactive check we don't defer.
-  params.set_interactive(true);
-  ASSERT_TRUE(TestUpdateCheck(&params,
-                              fake_update_response_.GetUpdateResponse(),
+  request_params_.set_interactive(true);
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -2144,10 +2418,9 @@
 
 TEST_F(OmahaRequestActionTest, TestUpdateFirstSeenAtGetsUsedIfAlreadyPresent) {
   OmahaResponse response;
-  OmahaRequestParams params = request_params_;
-  params.set_wall_clock_based_wait_enabled(true);
-  params.set_waiting_period(TimeDelta().FromDays(1));
-  params.set_update_check_count_wait_enabled(false);
+  request_params_.set_wall_clock_based_wait_enabled(true);
+  request_params_.set_waiting_period(TimeDelta().FromDays(1));
+  request_params_.set_update_check_count_wait_enabled(false);
 
   Time t1, t2;
   ASSERT_TRUE(Time::FromString("1/1/2012", &t1));
@@ -2155,8 +2428,7 @@
   ASSERT_TRUE(
       fake_prefs_.SetInt64(kPrefsUpdateFirstSeenAt, t1.ToInternalValue()));
   fake_system_state_.fake_clock()->SetWallclockTime(t2);
-  ASSERT_TRUE(TestUpdateCheck(&params,
-                              fake_update_response_.GetUpdateResponse(),
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
                               -1,
                               false,  // ping_only
                               ErrorCode::kSuccess,
@@ -2180,17 +2452,16 @@
   ASSERT_TRUE(tempdir.CreateUniqueTempDir());
 
   brillo::Blob post_data;
-  OmahaRequestParams params(&fake_system_state_);
-  params.set_root(tempdir.GetPath().value());
-  params.set_app_id("{22222222-2222-2222-2222-222222222222}");
-  params.set_app_version("1.2.3.4");
-  params.set_product_components("o.bundle=1");
-  params.set_current_channel("canary-channel");
-  EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr));
-  params.UpdateDownloadChannel();
-  EXPECT_TRUE(params.ShouldPowerwash());
-  ASSERT_FALSE(TestUpdateCheck(&params,
-                               "invalid xml>",
+  request_params_.set_root(tempdir.GetPath().value());
+  request_params_.set_app_id("{22222222-2222-2222-2222-222222222222}");
+  request_params_.set_app_version("1.2.3.4");
+  request_params_.set_product_components("o.bundle=1");
+  request_params_.set_current_channel("canary-channel");
+  EXPECT_TRUE(
+      request_params_.SetTargetChannel("stable-channel", true, nullptr));
+  request_params_.UpdateDownloadChannel();
+  EXPECT_TRUE(request_params_.ShouldPowerwash());
+  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
                                -1,
                                false,  // ping_only
                                ErrorCode::kOmahaRequestXMLParseError,
@@ -2214,17 +2485,16 @@
   ASSERT_TRUE(tempdir.CreateUniqueTempDir());
 
   brillo::Blob post_data;
-  OmahaRequestParams params(&fake_system_state_);
-  params.set_root(tempdir.GetPath().value());
-  params.set_app_id("{11111111-1111-1111-1111-111111111111}");
-  params.set_app_version("5.6.7.8");
-  params.set_product_components("o.bundle=1");
-  params.set_current_channel("stable-channel");
-  EXPECT_TRUE(params.SetTargetChannel("canary-channel", false, nullptr));
-  params.UpdateDownloadChannel();
-  EXPECT_FALSE(params.ShouldPowerwash());
-  ASSERT_FALSE(TestUpdateCheck(&params,
-                               "invalid xml>",
+  request_params_.set_root(tempdir.GetPath().value());
+  request_params_.set_app_id("{11111111-1111-1111-1111-111111111111}");
+  request_params_.set_app_version("5.6.7.8");
+  request_params_.set_product_components("o.bundle=1");
+  request_params_.set_current_channel("stable-channel");
+  EXPECT_TRUE(
+      request_params_.SetTargetChannel("canary-channel", false, nullptr));
+  request_params_.UpdateDownloadChannel();
+  EXPECT_FALSE(request_params_.ShouldPowerwash());
+  ASSERT_FALSE(TestUpdateCheck("invalid xml>",
                                -1,
                                false,  // ping_only
                                ErrorCode::kOmahaRequestXMLParseError,
@@ -2252,17 +2522,15 @@
   fake_system_state_.fake_hardware()->SetPowerwashCount(1);
 
   brillo::Blob post_data;
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      &post_data));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kNoUpdateAvailable,
+                              metrics::CheckReaction::kUnset,
+                              metrics::DownloadErrorCode::kUnset,
+                              nullptr,
+                              &post_data));
   // We shouldn't send a ping in this case since powerwash > 0.
   string post_str(post_data.begin(), post_data.end());
   EXPECT_EQ(string::npos, post_str.find("<ping"));
@@ -2280,17 +2548,15 @@
   fake_system_state_.fake_hardware()->SetFirstActiveOmahaPingSent();
 
   brillo::Blob post_data;
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      &post_data));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kNoUpdateAvailable,
+                              metrics::CheckReaction::kUnset,
+                              metrics::DownloadErrorCode::kUnset,
+                              nullptr,
+                              &post_data));
   // We shouldn't send a ping in this case since
   // first_active_omaha_ping_sent=true
   string post_str(post_data.begin(), post_data.end());
@@ -2303,17 +2569,15 @@
   fake_prefs_.SetString(kPrefsPreviousVersion, "1.2.3.4");
 
   brillo::Blob post_data;
-  ASSERT_TRUE(
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetNoUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kNoUpdateAvailable,
-                      metrics::CheckReaction::kUnset,
-                      metrics::DownloadErrorCode::kUnset,
-                      nullptr,
-                      &post_data));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kNoUpdateAvailable,
+                              metrics::CheckReaction::kUnset,
+                              metrics::DownloadErrorCode::kUnset,
+                              nullptr,
+                              &post_data));
   string post_str(post_data.begin(), post_data.end());
 
   // An event 54 is included and has the right version.
@@ -2343,7 +2607,6 @@
     bool expected_allow_p2p_for_sharing,
     const string& expected_p2p_url) {
   OmahaResponse response;
-  OmahaRequestParams request_params = request_params_;
   bool actual_allow_p2p_for_downloading = initial_allow_p2p_for_downloading;
   bool actual_allow_p2p_for_sharing = initial_allow_p2p_for_sharing;
   string actual_p2p_url;
@@ -2374,17 +2637,15 @@
   fake_update_response_.disable_p2p_for_downloading =
       omaha_disable_p2p_for_downloading;
   fake_update_response_.disable_p2p_for_sharing = omaha_disable_p2p_for_sharing;
-  ASSERT_TRUE(
-      TestUpdateCheck(&request_params,
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      &response,
-                      nullptr));
+  ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                              -1,
+                              false,  // ping_only
+                              ErrorCode::kSuccess,
+                              metrics::CheckResult::kUpdateAvailable,
+                              metrics::CheckReaction::kUpdating,
+                              metrics::DownloadErrorCode::kUnset,
+                              &response,
+                              nullptr));
   EXPECT_TRUE(response.update_exists);
 
   EXPECT_EQ(omaha_disable_p2p_for_downloading,
@@ -2479,17 +2740,15 @@
 bool OmahaRequestActionTest::InstallDateParseHelper(const string &elapsed_days,
                                                     OmahaResponse *response) {
   fake_update_response_.elapsed_days = elapsed_days;
-  return
-      TestUpdateCheck(nullptr,  // request_params
-                      fake_update_response_.GetUpdateResponse(),
-                      -1,
-                      false,  // ping_only
-                      ErrorCode::kSuccess,
-                      metrics::CheckResult::kUpdateAvailable,
-                      metrics::CheckReaction::kUpdating,
-                      metrics::DownloadErrorCode::kUnset,
-                      response,
-                      nullptr);
+  return TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
+                         -1,
+                         false,  // ping_only
+                         ErrorCode::kSuccess,
+                         metrics::CheckResult::kUpdateAvailable,
+                         metrics::CheckReaction::kUpdating,
+                         metrics::DownloadErrorCode::kUnset,
+                         response,
+                         nullptr);
 }
 
 TEST_F(OmahaRequestActionTest, ParseInstallDateFromResponse) {
@@ -2591,4 +2850,162 @@
   EXPECT_EQ(prefs_days, 28);
 }
 
+// Verifies that a device with no device policy, and is not a consumer
+// device sets the max kernel key version to the current version.
+// ie. the same behavior as if rollback is enabled.
+TEST_F(OmahaRequestActionTest, NoPolicyEnterpriseDevicesSetMaxRollback) {
+  FakeHardware* fake_hw = fake_system_state_.fake_hardware();
+
+  // Setup and verify some initial default values for the kernel TPM
+  // values that control verified boot and rollback.
+  const int min_kernel_version = 4;
+  fake_hw->SetMinKernelKeyVersion(min_kernel_version);
+  fake_hw->SetMaxKernelKeyRollforward(kRollforwardInfinity);
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+
+  EXPECT_CALL(
+      *fake_system_state_.mock_metrics_reporter(),
+      ReportKeyVersionMetrics(min_kernel_version, min_kernel_version, true))
+      .Times(1);
+
+  OmahaResponse response;
+  TestRollbackCheck(false /* is_consumer_device */,
+                    3 /* rollback_allowed_milestones */,
+                    false /* is_policy_loaded */,
+                    &response);
+
+  // Verify kernel_max_rollforward was set to the current minimum
+  // kernel key version. This has the effect of freezing roll
+  // forwards indefinitely. This will hold the rollback window
+  // open until a future change will be able to move this forward
+  // relative the configured window.
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMaxKernelKeyRollforward());
+}
+
+// Verifies that a conmsumer device with no device policy sets the
+// max kernel key version to the current version. ie. the same
+// behavior as if rollback is enabled.
+TEST_F(OmahaRequestActionTest, NoPolicyConsumerDevicesSetMaxRollback) {
+  FakeHardware* fake_hw = fake_system_state_.fake_hardware();
+
+  // Setup and verify some initial default values for the kernel TPM
+  // values that control verified boot and rollback.
+  const int min_kernel_version = 3;
+  fake_hw->SetMinKernelKeyVersion(min_kernel_version);
+  fake_hw->SetMaxKernelKeyRollforward(kRollforwardInfinity);
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+
+  EXPECT_CALL(
+      *fake_system_state_.mock_metrics_reporter(),
+      ReportKeyVersionMetrics(min_kernel_version, kRollforwardInfinity, true))
+      .Times(1);
+
+  OmahaResponse response;
+  TestRollbackCheck(true /* is_consumer_device */,
+                    3 /* rollback_allowed_milestones */,
+                    false /* is_policy_loaded */,
+                    &response);
+
+  // Verify that with rollback disabled that kernel_max_rollforward
+  // was set to logical infinity. This is the expected behavior for
+  // consumer devices and matches the existing behavior prior to the
+  // rollback features.
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+}
+
+// Verifies that a device with rollback enabled sets kernel_max_rollforward
+// in the TPM to prevent roll forward.
+TEST_F(OmahaRequestActionTest, RollbackEnabledDevicesSetMaxRollback) {
+  FakeHardware* fake_hw = fake_system_state_.fake_hardware();
+
+  // Setup and verify some initial default values for the kernel TPM
+  // values that control verified boot and rollback.
+  const int allowed_milestones = 4;
+  const int min_kernel_version = 3;
+  fake_hw->SetMinKernelKeyVersion(min_kernel_version);
+  fake_hw->SetMaxKernelKeyRollforward(kRollforwardInfinity);
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+
+  EXPECT_CALL(
+      *fake_system_state_.mock_metrics_reporter(),
+      ReportKeyVersionMetrics(min_kernel_version, min_kernel_version, true))
+      .Times(1);
+
+  OmahaResponse response;
+  TestRollbackCheck(false /* is_consumer_device */,
+                    allowed_milestones,
+                    true /* is_policy_loaded */,
+                    &response);
+
+  // Verify that with rollback enabled that kernel_max_rollforward
+  // was set to the current minimum kernel key version. This has
+  // the effect of freezing roll forwards indefinitely. This will
+  // hold the rollback window open until a future change will
+  // be able to move this forward relative the configured window.
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMaxKernelKeyRollforward());
+}
+
+// Verifies that a device with rollback disabled sets kernel_max_rollforward
+// in the TPM to logical infinity, to allow roll forward.
+TEST_F(OmahaRequestActionTest, RollbackDisabledDevicesSetMaxRollback) {
+  FakeHardware* fake_hw = fake_system_state_.fake_hardware();
+
+  // Setup and verify some initial default values for the kernel TPM
+  // values that control verified boot and rollback.
+  const int allowed_milestones = 0;
+  const int min_kernel_version = 3;
+  fake_hw->SetMinKernelKeyVersion(min_kernel_version);
+  fake_hw->SetMaxKernelKeyRollforward(kRollforwardInfinity);
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+
+  EXPECT_CALL(
+      *fake_system_state_.mock_metrics_reporter(),
+      ReportKeyVersionMetrics(min_kernel_version, kRollforwardInfinity, true))
+      .Times(1);
+
+  OmahaResponse response;
+  TestRollbackCheck(false /* is_consumer_device */,
+                    allowed_milestones,
+                    true /* is_policy_loaded */,
+                    &response);
+
+  // Verify that with rollback disabled that kernel_max_rollforward
+  // was set to logical infinity.
+  EXPECT_EQ(min_kernel_version, fake_hw->GetMinKernelKeyVersion());
+  EXPECT_EQ(kRollforwardInfinity, fake_hw->GetMaxKernelKeyRollforward());
+}
+
+TEST_F(OmahaRequestActionTest, RollbackResponseParsedNoEntries) {
+  OmahaResponse response;
+  fake_update_response_.rollback = true;
+  TestRollbackCheck(false /* is_consumer_device */,
+                    4 /* rollback_allowed_milestones */,
+                    true /* is_policy_loaded */,
+                    &response);
+  EXPECT_TRUE(response.is_rollback);
+}
+
+TEST_F(OmahaRequestActionTest, RollbackResponseValidVersionsParsed) {
+  OmahaResponse response;
+  fake_update_response_.rollback_firmware_version = "1.2";
+  fake_update_response_.rollback_kernel_version = "3.4";
+  fake_update_response_.rollback = true;
+  TestRollbackCheck(false /* is_consumer_device */,
+                    4 /* rollback_allowed_milestones */,
+                    true /* is_policy_loaded */,
+                    &response);
+  EXPECT_TRUE(response.is_rollback);
+  EXPECT_EQ(1, response.rollback_key_version.firmware_key);
+  EXPECT_EQ(2, response.rollback_key_version.firmware);
+  EXPECT_EQ(3, response.rollback_key_version.kernel_key);
+  EXPECT_EQ(4, response.rollback_key_version.kernel);
+}
+
 }  // namespace chromeos_update_engine
diff --git a/omaha_request_params.cc b/omaha_request_params.cc
index 9e78a93..186ea61 100644
--- a/omaha_request_params.cc
+++ b/omaha_request_params.cc
@@ -77,10 +77,14 @@
   LOG(INFO) << "Running from channel " << image_props_.current_channel;
 
   os_platform_ = constants::kOmahaPlatformName;
-  if (!image_props_.system_version.empty())
+  if (!image_props_.system_version.empty()) {
+    if (in_app_version == "ForcedUpdate") {
+      image_props_.system_version = in_app_version;
+    }
     os_version_ = image_props_.system_version;
-  else
+  } else {
     os_version_ = OmahaRequestParams::kOsVersion;
+  }
   if (!in_app_version.empty())
     image_props_.version = in_app_version;
 
@@ -127,16 +131,10 @@
 }
 
 bool OmahaRequestParams::CollectECFWVersions() const {
-  return base::StartsWith(hwid_, string("SAMS ALEX"),
-                          base::CompareCase::SENSITIVE) ||
-         base::StartsWith(hwid_, string("BUTTERFLY"),
-                          base::CompareCase::SENSITIVE) ||
-         base::StartsWith(hwid_, string("LUMPY"),
-                          base::CompareCase::SENSITIVE) ||
-         base::StartsWith(hwid_, string("PARROT"),
-                          base::CompareCase::SENSITIVE) ||
-         base::StartsWith(hwid_, string("SPRING"),
-                          base::CompareCase::SENSITIVE) ||
+  return base::StartsWith(
+             hwid_, string("PARROT"), base::CompareCase::SENSITIVE) ||
+         base::StartsWith(
+             hwid_, string("SPRING"), base::CompareCase::SENSITIVE) ||
          base::StartsWith(hwid_, string("SNOW"), base::CompareCase::SENSITIVE);
 }
 
diff --git a/omaha_request_params.h b/omaha_request_params.h
index 60619f9..c8e26b5 100644
--- a/omaha_request_params.h
+++ b/omaha_request_params.h
@@ -49,52 +49,12 @@
         os_version_(kOsVersion),
         delta_okay_(true),
         interactive_(false),
+        rollback_allowed_(false),
         wall_clock_based_wait_enabled_(false),
         update_check_count_wait_enabled_(false),
         min_update_checks_needed_(kDefaultMinUpdateChecks),
         max_update_checks_allowed_(kDefaultMaxUpdateChecks) {}
 
-  OmahaRequestParams(SystemState* system_state,
-                     const std::string& in_os_platform,
-                     const std::string& in_os_version,
-                     const std::string& in_os_sp,
-                     const std::string& in_os_board,
-                     const std::string& in_app_id,
-                     const std::string& in_app_version,
-                     const std::string& in_app_lang,
-                     const std::string& in_target_channel,
-                     const std::string& in_hwid,
-                     const std::string& in_fw_version,
-                     const std::string& in_ec_version,
-                     bool in_delta_okay,
-                     bool in_interactive,
-                     const std::string& in_update_url,
-                     const std::string& in_target_version_prefix)
-      : system_state_(system_state),
-        os_platform_(in_os_platform),
-        os_version_(in_os_version),
-        os_sp_(in_os_sp),
-        app_lang_(in_app_lang),
-        hwid_(in_hwid),
-        fw_version_(in_fw_version),
-        ec_version_(in_ec_version),
-        delta_okay_(in_delta_okay),
-        interactive_(in_interactive),
-        update_url_(in_update_url),
-        target_version_prefix_(in_target_version_prefix),
-        wall_clock_based_wait_enabled_(false),
-        update_check_count_wait_enabled_(false),
-        min_update_checks_needed_(kDefaultMinUpdateChecks),
-        max_update_checks_allowed_(kDefaultMaxUpdateChecks) {
-    image_props_.board = in_os_board;
-    image_props_.product_id = in_app_id;
-    image_props_.canary_product_id = in_app_id;
-    image_props_.version = in_app_version;
-    image_props_.current_channel = in_target_channel;
-    mutable_image_props_.target_channel = in_target_channel;
-    mutable_image_props_.is_powerwash_allowed = false;
-  }
-
   virtual ~OmahaRequestParams();
 
   // Setters and getters for the various properties.
@@ -164,6 +124,12 @@
     return target_version_prefix_;
   }
 
+  inline void set_rollback_allowed(bool rollback_allowed) {
+    rollback_allowed_ = rollback_allowed;
+  }
+
+  inline bool rollback_allowed() const { return rollback_allowed_; }
+
   inline void set_wall_clock_based_wait_enabled(bool enabled) {
     wall_clock_based_wait_enabled_ = enabled;
   }
@@ -204,7 +170,6 @@
 
   // Suggested defaults
   static const char kOsVersion[];
-  static const char kIsPowerwashAllowedKey[];
   static const int64_t kDefaultMinUpdateChecks = 0;
   static const int64_t kDefaultMaxUpdateChecks = 8;
 
@@ -249,6 +214,21 @@
   void set_target_channel(const std::string& channel) {
     mutable_image_props_.target_channel = channel;
   }
+  void set_os_sp(const std::string& os_sp) { os_sp_ = os_sp; }
+  void set_os_board(const std::string& os_board) {
+    image_props_.board = os_board;
+  }
+  void set_app_lang(const std::string& app_lang) { app_lang_ = app_lang; }
+  void set_hwid(const std::string& hwid) { hwid_ = hwid; }
+  void set_fw_version(const std::string& fw_version) {
+    fw_version_ = fw_version;
+  }
+  void set_ec_version(const std::string& ec_version) {
+    ec_version_ = ec_version;
+  }
+  void set_is_powerwash_allowed(bool powerwash_allowed) {
+    mutable_image_props_.is_powerwash_allowed = powerwash_allowed;
+  }
 
  private:
   FRIEND_TEST(OmahaRequestParamsTest, ChannelIndexTest);
@@ -279,15 +259,6 @@
   // Compares hwid to a set of whitelisted prefixes.
   bool CollectECFWVersions() const;
 
-  // These are individual helper methods to initialize the said properties from
-  // the LSB value.
-  void SetTargetChannelFromLsbValue();
-  void SetCurrentChannelFromLsbValue();
-  void SetIsPowerwashAllowedFromLsbValue();
-
-  // Initializes the required properties from the LSB value.
-  void InitFromLsbValue();
-
   // Gets the machine type (e.g. "i686").
   std::string GetMachineType() const;
 
@@ -337,14 +308,17 @@
   // to be pinned to. It's empty otherwise.
   std::string target_version_prefix_;
 
-  // True if scattering is enabled, in which case waiting_period_ specifies the
-  // amount of absolute time that we've to wait for before sending a request to
-  // Omaha.
+  // Whether the client is accepting rollback images too.
+  bool rollback_allowed_;
+
+  // True if scattering or staging are enabled, in which case waiting_period_
+  // specifies the amount of absolute time that we've to wait for before sending
+  // a request to Omaha.
   bool wall_clock_based_wait_enabled_;
   base::TimeDelta waiting_period_;
 
-  // True if scattering is enabled to denote the number of update checks
-  // we've to skip before we can send a request to Omaha. The min and max
+  // True if scattering or staging are enabled to denote the number of update
+  // checks we've to skip before we can send a request to Omaha. The min and max
   // values establish the bounds for a random number to be chosen within that
   // range to enable such a wait.
   bool update_check_count_wait_enabled_;
@@ -354,9 +328,7 @@
   // When reading files, prepend root_ to the paths. Useful for testing.
   std::string root_;
 
-  // TODO(jaysri): Uncomment this after fixing unit tests, as part of
-  // chromium-os:39752
-  // DISALLOW_COPY_AND_ASSIGN(OmahaRequestParams);
+  DISALLOW_COPY_AND_ASSIGN(OmahaRequestParams);
 };
 
 }  // namespace chromeos_update_engine
diff --git a/omaha_request_params_unittest.cc b/omaha_request_params_unittest.cc
index ce77f31..7332431 100644
--- a/omaha_request_params_unittest.cc
+++ b/omaha_request_params_unittest.cc
@@ -44,9 +44,6 @@
   void SetUp() override {
     // Create a uniquely named test directory.
     ASSERT_TRUE(tempdir_.CreateUniqueTempDir());
-    // Create a fresh copy of the params for each test, so there's no
-    // unintended reuse of state across tests.
-    params_ = OmahaRequestParams(&fake_system_state_);
     params_.set_root(tempdir_.GetPath().value());
     SetLockDown(false);
     fake_system_state_.set_prefs(&fake_prefs_);
@@ -57,8 +54,8 @@
     fake_system_state_.fake_hardware()->SetIsNormalBootMode(locked_down);
   }
 
-  OmahaRequestParams params_;
   FakeSystemState fake_system_state_;
+  OmahaRequestParams params_{&fake_system_state_};
   FakePrefs fake_prefs_;
 
   base::ScopedTempDir tempdir_;
@@ -259,9 +256,6 @@
 
   params_.hwid_ = string("SNOW 12345");
   EXPECT_TRUE(params_.CollectECFWVersions());
-
-  params_.hwid_ = string("SAMS ALEX 12345");
-  EXPECT_TRUE(params_.CollectECFWVersions());
 }
 
 }  // namespace chromeos_update_engine
diff --git a/omaha_response.h b/omaha_response.h
index b973eb5..0ac09df 100644
--- a/omaha_response.h
+++ b/omaha_response.h
@@ -21,6 +21,7 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 
+#include <limits>
 #include <string>
 #include <vector>
 
@@ -72,6 +73,9 @@
   // True if the Omaha rule instructs us to disable p2p for sharing.
   bool disable_p2p_for_sharing = false;
 
+  // True if the Omaha rule instructs us to powerwash.
+  bool powerwash_required = false;
+
   // If not blank, a base-64 encoded representation of the PEM-encoded
   // public key in the response.
   std::string public_key_rsa;
@@ -80,6 +84,24 @@
   // PST, according to the Omaha Server's clock and timezone (PST8PDT,
   // aka "Pacific Time".)
   int install_date_days = -1;
+
+  // True if the returned image is a rollback for the device.
+  bool is_rollback = false;
+
+  struct RollbackKeyVersion {
+    // Kernel key version. 0xffff if the value is unknown.
+    uint16_t kernel_key = std::numeric_limits<uint16_t>::max();
+    // Kernel version. 0xffff if the value is unknown.
+    uint16_t kernel = std::numeric_limits<uint16_t>::max();
+    // Firmware key verison. 0xffff if the value is unknown.
+    uint16_t firmware_key = std::numeric_limits<uint16_t>::max();
+    // Firmware version. 0xffff if the value is unknown.
+    uint16_t firmware = std::numeric_limits<uint16_t>::max();
+  };
+
+  // Key versions of the returned rollback image. Values are 0xffff if the
+  // image not a rollback, or the fields were not present.
+  RollbackKeyVersion rollback_key_version;
 };
 static_assert(sizeof(off_t) == 8, "off_t not 64 bit");
 
diff --git a/omaha_response_handler_action.cc b/omaha_response_handler_action.cc
index 2d6105a..c1fe854 100644
--- a/omaha_response_handler_action.cc
+++ b/omaha_response_handler_action.cc
@@ -16,11 +16,11 @@
 
 #include "update_engine/omaha_response_handler_action.h"
 
+#include <limits>
 #include <string>
 
 #include <base/logging.h>
 #include <base/strings/string_number_conversions.h>
-#include <base/strings/string_util.h>
 #include <policy/device_policy.h>
 
 #include "update_engine/common/constants.h"
@@ -36,29 +36,23 @@
 
 using chromeos_update_manager::Policy;
 using chromeos_update_manager::UpdateManager;
+using std::numeric_limits;
 using std::string;
 
 namespace chromeos_update_engine {
 
 OmahaResponseHandlerAction::OmahaResponseHandlerAction(
     SystemState* system_state)
-    : OmahaResponseHandlerAction(system_state,
-                                 constants::kOmahaResponseDeadlineFile) {}
-
-OmahaResponseHandlerAction::OmahaResponseHandlerAction(
-    SystemState* system_state, const string& deadline_file)
     : system_state_(system_state),
-      got_no_update_response_(false),
-      key_path_(constants::kUpdatePayloadPublicKeyPath),
-      deadline_file_(deadline_file) {}
+      deadline_file_(constants::kOmahaResponseDeadlineFile) {}
 
 void OmahaResponseHandlerAction::PerformAction() {
   CHECK(HasInputObject());
   ScopedActionCompleter completer(processor_, this);
   const OmahaResponse& response = GetInputObject();
   if (!response.update_exists) {
-    got_no_update_response_ = true;
     LOG(INFO) << "There are no updates. Aborting.";
+    completer.set_code(ErrorCode::kNoUpdate);
     return;
   }
 
@@ -139,7 +133,39 @@
   system_state_->prefs()->SetString(current_channel_key,
                                     params->download_channel());
 
-  if (params->ShouldPowerwash())
+  // Checking whether device is able to boot up the returned rollback image.
+  if (response.is_rollback) {
+    if (!params->rollback_allowed()) {
+      LOG(ERROR) << "Received rollback image but rollback is not allowed.";
+      completer.set_code(ErrorCode::kOmahaResponseInvalid);
+      return;
+    }
+    auto min_kernel_key_version = static_cast<uint32_t>(
+        system_state_->hardware()->GetMinKernelKeyVersion());
+    auto min_firmware_key_version = static_cast<uint32_t>(
+        system_state_->hardware()->GetMinFirmwareKeyVersion());
+    uint32_t kernel_key_version =
+        static_cast<uint32_t>(response.rollback_key_version.kernel_key) << 16 |
+        static_cast<uint32_t>(response.rollback_key_version.kernel);
+    uint32_t firmware_key_version =
+        static_cast<uint32_t>(response.rollback_key_version.firmware_key)
+            << 16 |
+        static_cast<uint32_t>(response.rollback_key_version.firmware);
+
+    // Don't attempt a rollback if the versions are incompatible or the
+    // target image does not specify the version information.
+    if (kernel_key_version == numeric_limits<uint32_t>::max() ||
+        firmware_key_version == numeric_limits<uint32_t>::max() ||
+        kernel_key_version < min_kernel_key_version ||
+        firmware_key_version < min_firmware_key_version) {
+      LOG(ERROR) << "Device won't be able to boot up the rollback image.";
+      completer.set_code(ErrorCode::kRollbackNotPossible);
+      return;
+    }
+    install_plan_.is_rollback = true;
+  }
+
+  if (response.powerwash_required || params->ShouldPowerwash())
     install_plan_.powerwash_required = true;
 
   TEST_AND_RETURN(HasOutputPipe());
@@ -156,9 +182,16 @@
   // method and UpdateStatus signal. A potential issue is that update_engine may
   // be unresponsive during an update download.
   if (!deadline_file_.empty()) {
-    utils::WriteFile(deadline_file_.c_str(),
-                     response.deadline.data(),
-                     response.deadline.size());
+    if (payload_state->GetRollbackHappened()) {
+      // Don't do forced update if rollback has happened since the last update
+      // check where policy was present.
+      LOG(INFO) << "Not forcing update because a rollback happened.";
+      utils::WriteFile(deadline_file_.c_str(), nullptr, 0);
+    } else {
+      utils::WriteFile(deadline_file_.c_str(),
+                       response.deadline.data(),
+                       response.deadline.size());
+    }
     chmod(deadline_file_.c_str(), S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
   }
 
@@ -202,37 +235,8 @@
     }
   }
 
-  // If we're using p2p, |install_plan_.download_url| may contain a
-  // HTTP URL even if |response.payload_urls| contain only HTTPS URLs.
-  if (!base::StartsWith(install_plan_.download_url, "https://",
-                        base::CompareCase::INSENSITIVE_ASCII)) {
-    LOG(INFO) << "Mandating hash checks since download_url is not HTTPS.";
-    return true;
-  }
-
-  // TODO(jaysri): VALIDATION: For official builds, we currently waive hash
-  // checks for HTTPS until we have rolled out at least once and are confident
-  // nothing breaks. chromium-os:37082 tracks turning this on for HTTPS
-  // eventually.
-
-  // Even if there's a single non-HTTPS URL, make the hash checks as
-  // mandatory because we could be downloading the payload from any URL later
-  // on. It's really hard to do book-keeping based on each byte being
-  // downloaded to see whether we only used HTTPS throughout.
-  for (const auto& package : response.packages) {
-    for (const string& payload_url : package.payload_urls) {
-      if (!base::StartsWith(
-              payload_url, "https://", base::CompareCase::INSENSITIVE_ASCII)) {
-        LOG(INFO) << "Mandating payload hash checks since Omaha response "
-                  << "contains non-HTTPS URL(s)";
-        return true;
-      }
-    }
-  }
-
-  LOG(INFO) << "Waiving payload hash checks since Omaha response "
-            << "only has HTTPS URL(s)";
-  return false;
+  LOG(INFO) << "Mandating hash checks for official URL on official build.";
+  return true;
 }
 
 }  // namespace chromeos_update_engine
diff --git a/omaha_response_handler_action.h b/omaha_response_handler_action.h
index 2974841..344fc1d 100644
--- a/omaha_response_handler_action.h
+++ b/omaha_response_handler_action.h
@@ -54,13 +54,11 @@
   // never be called
   void TerminateProcessing() override { CHECK(false); }
 
-  bool GotNoUpdateResponse() const { return got_no_update_response_; }
   const InstallPlan& install_plan() const { return install_plan_; }
 
   // Debugging/logging
   static std::string StaticType() { return "OmahaResponseHandlerAction"; }
   std::string Type() const override { return StaticType(); }
-  void set_key_path(const std::string& path) { key_path_ = path; }
 
  private:
   // Returns true if payload hash checks are mandatory based on the state
@@ -73,22 +71,18 @@
   // The install plan, if we have an update.
   InstallPlan install_plan_;
 
-  // True only if we got a response and the response said no updates
-  bool got_no_update_response_;
-
-  // Public key path to use for payload verification.
-  std::string key_path_;
-
   // File used for communication deadline to Chrome.
-  const std::string deadline_file_;
-
-  // Special ctor + friend declarations for testing purposes.
-  OmahaResponseHandlerAction(SystemState* system_state,
-                             const std::string& deadline_file);
+  std::string deadline_file_;
 
   friend class OmahaResponseHandlerActionTest;
-
+  friend class OmahaResponseHandlerActionProcessorDelegate;
   FRIEND_TEST(UpdateAttempterTest, CreatePendingErrorEventResumedTest);
+  FRIEND_TEST(UpdateAttempterTest, RollbackMetricsNotRollbackFailure);
+  FRIEND_TEST(UpdateAttempterTest, RollbackMetricsNotRollbackSuccess);
+  FRIEND_TEST(UpdateAttempterTest, RollbackMetricsRollbackFailure);
+  FRIEND_TEST(UpdateAttempterTest, RollbackMetricsRollbackSuccess);
+  FRIEND_TEST(UpdateAttempterTest, SetRollbackHappenedNotRollback);
+  FRIEND_TEST(UpdateAttempterTest, SetRollbackHappenedRollback);
   FRIEND_TEST(UpdateAttempterTest, UpdateDeferredByPolicyTest);
 
   DISALLOW_COPY_AND_ASSIGN(OmahaResponseHandlerAction);
diff --git a/omaha_response_handler_action_unittest.cc b/omaha_response_handler_action_unittest.cc
index 9e2cdd1..e4d05f4 100644
--- a/omaha_response_handler_action_unittest.cc
+++ b/omaha_response_handler_action_unittest.cc
@@ -18,6 +18,7 @@
 
 #include <memory>
 #include <string>
+#include <utility>
 
 #include <base/files/file_util.h>
 #include <base/files/scoped_temp_dir.h>
@@ -46,38 +47,6 @@
 
 namespace chromeos_update_engine {
 
-class OmahaResponseHandlerActionTest : public ::testing::Test {
- protected:
-  void SetUp() override {
-    FakeBootControl* fake_boot_control = fake_system_state_.fake_boot_control();
-    fake_boot_control->SetPartitionDevice(
-        kLegacyPartitionNameKernel, 0, "/dev/sdz2");
-    fake_boot_control->SetPartitionDevice(
-        kLegacyPartitionNameRoot, 0, "/dev/sdz3");
-    fake_boot_control->SetPartitionDevice(
-        kLegacyPartitionNameKernel, 1, "/dev/sdz4");
-    fake_boot_control->SetPartitionDevice(
-        kLegacyPartitionNameRoot, 1, "/dev/sdz5");
-  }
-
-  // Return true iff the OmahaResponseHandlerAction succeeded.
-  // If out is non-null, it's set w/ the response from the action.
-  bool DoTest(const OmahaResponse& in,
-              const string& deadline_file,
-              InstallPlan* out);
-
-  // Pointer to the Action, valid after |DoTest|, released when the test is
-  // finished.
-  std::unique_ptr<OmahaResponseHandlerAction> action_;
-  // Captures the action's result code, for tests that need to directly verify
-  // it in non-success cases.
-  ErrorCode action_result_code_;
-
-  FakeSystemState fake_system_state_;
-  // "Hash+"
-  const brillo::Blob expected_hash_ = {0x48, 0x61, 0x73, 0x68, 0x2b};
-};
-
 class OmahaResponseHandlerActionProcessorDelegate
     : public ActionProcessorDelegate {
  public:
@@ -87,12 +56,56 @@
                        AbstractAction* action,
                        ErrorCode code) {
     if (action->Type() == OmahaResponseHandlerAction::StaticType()) {
+      auto response_handler_action =
+          static_cast<OmahaResponseHandlerAction*>(action);
       code_ = code;
       code_set_ = true;
+      response_handler_action_install_plan_.reset(
+          new InstallPlan(response_handler_action->install_plan_));
+    } else if (action->Type() ==
+               ObjectCollectorAction<InstallPlan>::StaticType()) {
+      auto collector_action =
+          static_cast<ObjectCollectorAction<InstallPlan>*>(action);
+      collector_action_install_plan_.reset(
+          new InstallPlan(collector_action->object()));
     }
   }
   ErrorCode code_;
   bool code_set_;
+  std::unique_ptr<InstallPlan> collector_action_install_plan_;
+  std::unique_ptr<InstallPlan> response_handler_action_install_plan_;
+};
+
+class OmahaResponseHandlerActionTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    FakeBootControl* fake_boot_control = fake_system_state_.fake_boot_control();
+    fake_boot_control->SetPartitionDevice(
+        kPartitionNameKernel, 0, "/dev/sdz2");
+    fake_boot_control->SetPartitionDevice(
+        kPartitionNameRoot, 0, "/dev/sdz3");
+    fake_boot_control->SetPartitionDevice(
+        kPartitionNameKernel, 1, "/dev/sdz4");
+    fake_boot_control->SetPartitionDevice(
+        kPartitionNameRoot, 1, "/dev/sdz5");
+  }
+
+  // Return true iff the OmahaResponseHandlerAction succeeded.
+  // If out is non-null, it's set w/ the response from the action.
+  bool DoTest(const OmahaResponse& in,
+              const string& deadline_file,
+              InstallPlan* out);
+
+  // Delegate passed to the ActionProcessor.
+  OmahaResponseHandlerActionProcessorDelegate delegate_;
+
+  // Captures the action's result code, for tests that need to directly verify
+  // it in non-success cases.
+  ErrorCode action_result_code_;
+
+  FakeSystemState fake_system_state_;
+  // "Hash+"
+  const brillo::Blob expected_hash_ = {0x48, 0x61, 0x73, 0x68, 0x2b};
 };
 
 namespace {
@@ -115,11 +128,10 @@
   brillo::FakeMessageLoop loop(nullptr);
   loop.SetAsCurrent();
   ActionProcessor processor;
-  OmahaResponseHandlerActionProcessorDelegate delegate;
-  processor.set_delegate(&delegate);
+  processor.set_delegate(&delegate_);
 
-  ObjectFeederAction<OmahaResponse> feeder_action;
-  feeder_action.set_obj(in);
+  auto feeder_action = std::make_unique<ObjectFeederAction<OmahaResponse>>();
+  feeder_action->set_obj(in);
   if (in.update_exists && in.version != kBadVersion) {
     string expected_hash;
     for (const auto& package : in.packages)
@@ -138,32 +150,34 @@
   EXPECT_CALL(*(fake_system_state_.mock_payload_state()), GetCurrentUrl())
       .WillRepeatedly(Return(current_url));
 
-  action_.reset(new OmahaResponseHandlerAction(
-      &fake_system_state_,
-      (test_deadline_file.empty() ? constants::kOmahaResponseDeadlineFile
-                                  : test_deadline_file)));
-  BondActions(&feeder_action, action_.get());
-  ObjectCollectorAction<InstallPlan> collector_action;
-  BondActions(action_.get(), &collector_action);
-  processor.EnqueueAction(&feeder_action);
-  processor.EnqueueAction(action_.get());
-  processor.EnqueueAction(&collector_action);
+  auto response_handler_action =
+      std::make_unique<OmahaResponseHandlerAction>(&fake_system_state_);
+  if (!test_deadline_file.empty())
+    response_handler_action->deadline_file_ = test_deadline_file;
+
+  auto collector_action =
+      std::make_unique<ObjectCollectorAction<InstallPlan>>();
+
+  BondActions(feeder_action.get(), response_handler_action.get());
+  BondActions(response_handler_action.get(), collector_action.get());
+  processor.EnqueueAction(std::move(feeder_action));
+  processor.EnqueueAction(std::move(response_handler_action));
+  processor.EnqueueAction(std::move(collector_action));
   processor.StartProcessing();
   EXPECT_TRUE(!processor.IsRunning())
       << "Update test to handle non-async actions";
-  if (out)
-    *out = collector_action.object();
-  EXPECT_TRUE(delegate.code_set_);
-  action_result_code_ = delegate.code_;
-  return delegate.code_ == ErrorCode::kSuccess;
+
+  if (out && delegate_.collector_action_install_plan_)
+    *out = *delegate_.collector_action_install_plan_;
+
+  EXPECT_TRUE(delegate_.code_set_);
+  action_result_code_ = delegate_.code_;
+  return delegate_.code_ == ErrorCode::kSuccess;
 }
 
 TEST_F(OmahaResponseHandlerActionTest, SimpleTest) {
-  string test_deadline_file;
-  CHECK(utils::MakeTempFile("omaha_response_handler_action_unittest-XXXXXX",
-                            &test_deadline_file,
-                            nullptr));
-  ScopedPathUnlinker deadline_unlinker(test_deadline_file);
+  test_utils::ScopedTempFile test_deadline_file(
+      "omaha_response_handler_action_unittest-XXXXXX");
   {
     OmahaResponse in;
     in.update_exists = true;
@@ -176,15 +190,15 @@
     in.prompt = false;
     in.deadline = "20101020";
     InstallPlan install_plan;
-    EXPECT_TRUE(DoTest(in, test_deadline_file, &install_plan));
+    EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
     EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
     EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
     EXPECT_EQ(1U, install_plan.target_slot);
     string deadline;
-    EXPECT_TRUE(utils::ReadFile(test_deadline_file, &deadline));
+    EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline));
     EXPECT_EQ("20101020", deadline);
     struct stat deadline_stat;
-    EXPECT_EQ(0, stat(test_deadline_file.c_str(), &deadline_stat));
+    EXPECT_EQ(0, stat(test_deadline_file.path().c_str(), &deadline_stat));
     EXPECT_EQ(
         static_cast<mode_t>(S_IFREG | S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH),
         deadline_stat.st_mode);
@@ -203,12 +217,12 @@
     InstallPlan install_plan;
     // Set the other slot as current.
     fake_system_state_.fake_boot_control()->SetCurrentSlot(1);
-    EXPECT_TRUE(DoTest(in, test_deadline_file, &install_plan));
+    EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
     EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
     EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
     EXPECT_EQ(0U, install_plan.target_slot);
     string deadline;
-    EXPECT_TRUE(utils::ReadFile(test_deadline_file, &deadline) &&
+    EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline) &&
                 deadline.empty());
     EXPECT_EQ(in.version, install_plan.version);
   }
@@ -223,12 +237,40 @@
     in.deadline = "some-deadline";
     InstallPlan install_plan;
     fake_system_state_.fake_boot_control()->SetCurrentSlot(0);
-    EXPECT_TRUE(DoTest(in, test_deadline_file, &install_plan));
+    // Because rollback happened, the deadline shouldn't be written into the
+    // file.
+    EXPECT_CALL(*(fake_system_state_.mock_payload_state()),
+                GetRollbackHappened())
+        .WillOnce(Return(true));
+    EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
     EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
     EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
     EXPECT_EQ(1U, install_plan.target_slot);
     string deadline;
-    EXPECT_TRUE(utils::ReadFile(test_deadline_file, &deadline));
+    EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline));
+    EXPECT_TRUE(deadline.empty());
+    EXPECT_EQ(in.version, install_plan.version);
+  }
+  {
+    OmahaResponse in;
+    in.update_exists = true;
+    in.version = "a.b.c.d";
+    in.packages.push_back(
+        {.payload_urls = {kLongName}, .size = 12, .hash = kPayloadHashHex});
+    in.more_info_url = "http://more/info";
+    in.prompt = true;
+    in.deadline = "some-deadline";
+    InstallPlan install_plan;
+    fake_system_state_.fake_boot_control()->SetCurrentSlot(0);
+    EXPECT_CALL(*(fake_system_state_.mock_payload_state()),
+                GetRollbackHappened())
+        .WillOnce(Return(false));
+    EXPECT_TRUE(DoTest(in, test_deadline_file.path(), &install_plan));
+    EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+    EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+    EXPECT_EQ(1U, install_plan.target_slot);
+    string deadline;
+    EXPECT_TRUE(utils::ReadFile(test_deadline_file.path(), &deadline));
     EXPECT_EQ("some-deadline", deadline);
     EXPECT_EQ(in.version, install_plan.version);
   }
@@ -344,7 +386,7 @@
   EXPECT_TRUE(DoTest(in, "", &install_plan));
   EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
   EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
-  EXPECT_FALSE(install_plan.hash_checks_mandatory);
+  EXPECT_TRUE(install_plan.hash_checks_mandatory);
   EXPECT_EQ(in.version, install_plan.version);
 }
 
@@ -467,6 +509,109 @@
   EXPECT_TRUE(install_plan.hash_checks_mandatory);
 }
 
+TEST_F(OmahaResponseHandlerActionTest, RollbackTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.is_rollback = true;
+  in.rollback_key_version.kernel = 1;
+  in.rollback_key_version.kernel = 2;
+  in.rollback_key_version.firmware_key = 3;
+  in.rollback_key_version.firmware = 4;
+
+  fake_system_state_.fake_hardware()->SetMinKernelKeyVersion(0x00010002);
+  fake_system_state_.fake_hardware()->SetMinFirmwareKeyVersion(0x00030004);
+
+  OmahaRequestParams params(&fake_system_state_);
+  params.set_rollback_allowed(true);
+
+  fake_system_state_.set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_TRUE(install_plan.is_rollback);
+}
+
+TEST_F(OmahaResponseHandlerActionTest, RollbackKernelVersionErrorTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.is_rollback = true;
+  in.rollback_key_version.kernel_key = 1;
+  in.rollback_key_version.kernel = 1;  // This is lower than the minimum.
+  in.rollback_key_version.firmware_key = 3;
+  in.rollback_key_version.firmware = 4;
+
+  fake_system_state_.fake_hardware()->SetMinKernelKeyVersion(0x00010002);
+  fake_system_state_.fake_hardware()->SetMinFirmwareKeyVersion(0x00030004);
+
+  OmahaRequestParams params(&fake_system_state_);
+  params.set_rollback_allowed(true);
+
+  fake_system_state_.set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_FALSE(DoTest(in, "", &install_plan));
+}
+
+TEST_F(OmahaResponseHandlerActionTest, RollbackFirmwareVersionErrorTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.is_rollback = true;
+  in.rollback_key_version.kernel_key = 1;
+  in.rollback_key_version.kernel = 2;
+  in.rollback_key_version.firmware_key = 3;
+  in.rollback_key_version.firmware = 3;  // This is lower than the minimum.
+
+  fake_system_state_.fake_hardware()->SetMinKernelKeyVersion(0x00010002);
+  fake_system_state_.fake_hardware()->SetMinFirmwareKeyVersion(0x00030004);
+
+  OmahaRequestParams params(&fake_system_state_);
+  params.set_rollback_allowed(true);
+
+  fake_system_state_.set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_FALSE(DoTest(in, "", &install_plan));
+}
+
+TEST_F(OmahaResponseHandlerActionTest, RollbackNotRollbackTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.is_rollback = false;
+
+  OmahaRequestParams params(&fake_system_state_);
+  params.set_rollback_allowed(true);
+
+  fake_system_state_.set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_TRUE(DoTest(in, "", &install_plan));
+  EXPECT_FALSE(install_plan.is_rollback);
+}
+
+TEST_F(OmahaResponseHandlerActionTest, RollbackNotAllowedTest) {
+  OmahaResponse in;
+  in.update_exists = true;
+  in.packages.push_back({.payload_urls = {"https://RollbackTest"},
+                         .size = 1,
+                         .hash = kPayloadHashHex});
+  in.is_rollback = true;
+
+  OmahaRequestParams params(&fake_system_state_);
+  params.set_rollback_allowed(false);
+
+  fake_system_state_.set_request_params(&params);
+  InstallPlan install_plan;
+  EXPECT_FALSE(DoTest(in, "", &install_plan));
+}
+
 TEST_F(OmahaResponseHandlerActionTest, SystemVersionTest) {
   OmahaResponse in;
   in.update_exists = true;
@@ -514,9 +659,8 @@
   EXPECT_EQ(ErrorCode::kOmahaUpdateDeferredPerPolicy, action_result_code_);
   // Verify that DoTest() didn't set the output install plan.
   EXPECT_EQ("", install_plan.version);
-  // Copy the underlying InstallPlan from the Action (like a real Delegate).
-  install_plan = action_->install_plan();
   // Now verify the InstallPlan that was generated.
+  install_plan = *delegate_.response_handler_action_install_plan_;
   EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
   EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
   EXPECT_EQ(1U, install_plan.target_slot);
diff --git a/p2p_manager_unittest.cc b/p2p_manager_unittest.cc
index 5ffb358..02fd17b 100644
--- a/p2p_manager_unittest.cc
+++ b/p2p_manager_unittest.cc
@@ -183,7 +183,7 @@
 }
 
 // Check that we keep files with the .$EXT.p2p extension not older
-// than some specificed age (5 days, in this test).
+// than some specific age (5 days, in this test).
 TEST_F(P2PManagerTest, HousekeepingAgeLimit) {
   // We set the cutoff time to be 1 billion seconds (01:46:40 UTC on 9
   // September 2001 - arbitrary number, but constant to avoid test
@@ -340,11 +340,6 @@
 
 // Check that sharing a *new* file works.
 TEST_F(P2PManagerTest, ShareFile) {
-  if (!test_utils::IsXAttrSupported(base::FilePath("/tmp"))) {
-    LOG(WARNING) << "Skipping test because /tmp does not support xattr. "
-                 << "Please update your system to support this feature.";
-    return;
-  }
   const int kP2PTestFileSize = 1000 * 1000;  // 1 MB
 
   EXPECT_TRUE(manager_->FileShare("foo", kP2PTestFileSize));
@@ -362,11 +357,6 @@
 
 // Check that making a shared file visible, does what is expected.
 TEST_F(P2PManagerTest, MakeFileVisible) {
-  if (!test_utils::IsXAttrSupported(base::FilePath("/tmp"))) {
-    LOG(WARNING) << "Skipping test because /tmp does not support xattr. "
-                 << "Please update your system to support this feature.";
-    return;
-  }
   const int kP2PTestFileSize = 1000 * 1000;  // 1 MB
 
   // First, check that it's not visible.
@@ -388,12 +378,6 @@
 
 // Check that we return the right values for existing files in P2P_DIR.
 TEST_F(P2PManagerTest, ExistingFiles) {
-  if (!test_utils::IsXAttrSupported(base::FilePath("/tmp"))) {
-    LOG(WARNING) << "Skipping test because /tmp does not support xattr. "
-                 << "Please update your system to support this feature.";
-    return;
-  }
-
   bool visible;
 
   // Check that errors are returned if the file does not exist
diff --git a/parcelable_update_engine_status.h b/parcelable_update_engine_status.h
index 82006e4..3feac76 100644
--- a/parcelable_update_engine_status.h
+++ b/parcelable_update_engine_status.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_UPDATE_ENGINE_STATUS_H_
-#define UPDATE_ENGINE_UPDATE_ENGINE_STATUS_H_
+#ifndef UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_
+#define UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_
 
 #include <binder/Parcelable.h>
 #include <utils/String16.h>
@@ -60,4 +60,4 @@
 }  // namespace brillo
 }  // namespace android
 
-#endif  // UPDATE_ENGINE_UPDATE_ENGINE_STATUS_H_
+#endif  // UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_
diff --git a/payload_consumer/bzip_extent_writer.cc b/payload_consumer/bzip_extent_writer.cc
index 39d9d67..7828589 100644
--- a/payload_consumer/bzip_extent_writer.cc
+++ b/payload_consumer/bzip_extent_writer.cc
@@ -24,6 +24,10 @@
 const brillo::Blob::size_type kOutputBufferLength = 16 * 1024;
 }
 
+BzipExtentWriter::~BzipExtentWriter() {
+  TEST_AND_RETURN(BZ2_bzDecompressEnd(&stream_) == BZ_OK);
+}
+
 bool BzipExtentWriter::Init(FileDescriptorPtr fd,
                             const RepeatedPtrField<Extent>& extents,
                             uint32_t block_size) {
@@ -84,7 +88,6 @@
 
 bool BzipExtentWriter::EndImpl() {
   TEST_AND_RETURN_FALSE(input_buffer_.empty());
-  TEST_AND_RETURN_FALSE(BZ2_bzDecompressEnd(&stream_) == BZ_OK);
   return next_->End();
 }
 
diff --git a/payload_consumer/bzip_extent_writer.h b/payload_consumer/bzip_extent_writer.h
index 86b346a..710727f 100644
--- a/payload_consumer/bzip_extent_writer.h
+++ b/payload_consumer/bzip_extent_writer.h
@@ -38,7 +38,7 @@
       : next_(std::move(next)) {
     memset(&stream_, 0, sizeof(stream_));
   }
-  ~BzipExtentWriter() override = default;
+  ~BzipExtentWriter() override;
 
   bool Init(FileDescriptorPtr fd,
             const google::protobuf::RepeatedPtrField<Extent>& extents,
diff --git a/payload_consumer/bzip_extent_writer_unittest.cc b/payload_consumer/bzip_extent_writer_unittest.cc
index bf050ef..4426876 100644
--- a/payload_consumer/bzip_extent_writer_unittest.cc
+++ b/payload_consumer/bzip_extent_writer_unittest.cc
@@ -100,8 +100,7 @@
   for (size_t i = 0; i < decompressed_data.size(); ++i)
     decompressed_data[i] = static_cast<uint8_t>("ABC\n"[i % 4]);
 
-  vector<Extent> extents = {
-      ExtentForRange(0, (kDecompressedLength + kBlockSize - 1) / kBlockSize)};
+  vector<Extent> extents = {ExtentForBytes(kBlockSize, 0, kDecompressedLength)};
 
   BzipExtentWriter bzip_writer(std::make_unique<DirectExtentWriter>());
   EXPECT_TRUE(
diff --git a/payload_consumer/cached_file_descriptor_unittest.cc b/payload_consumer/cached_file_descriptor_unittest.cc
index 6a6302a..d2965fc 100644
--- a/payload_consumer/cached_file_descriptor_unittest.cc
+++ b/payload_consumer/cached_file_descriptor_unittest.cc
@@ -159,7 +159,7 @@
   off64_t seek = 10;
   brillo::Blob blob_in(kFileSize, 0);
   std::fill_n(&blob_in[seek], kCacheSize, value_);
-  // We are writing exactly one cache size; Then it should be commited.
+  // We are writing exactly one cache size; Then it should be committed.
   EXPECT_EQ(cfd_->Seek(seek, SEEK_SET), seek);
   Write(&blob_in[seek], kCacheSize);
 
@@ -174,7 +174,7 @@
   EXPECT_EQ(cfd_->Seek(seek, SEEK_SET), seek);
   brillo::Blob blob_in(kFileSize, 0);
   std::fill_n(&blob_in[seek], less_than_cache_size, value_);
-  // We are writing less than one cache size; then it should not be commited.
+  // We are writing less than one cache size; then it should not be committed.
   Write(&blob_in[seek], less_than_cache_size);
 
   // Revert the changes in |blob_in|.
@@ -190,7 +190,7 @@
   EXPECT_EQ(cfd_->Seek(seek, SEEK_SET), seek);
   brillo::Blob blob_in(kFileSize, 0);
   std::fill_n(&blob_in[seek], less_than_cache_size, value_);
-  // We are writing less than  one cache size; then it should not be commited.
+  // We are writing less than  one cache size; then it should not be committed.
   Write(&blob_in[seek], less_than_cache_size);
 
   // Then we seek, it should've written the cache after seek.
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index a619d1d..f7323f9 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -21,6 +21,7 @@
 
 #include <algorithm>
 #include <cstring>
+#include <map>
 #include <memory>
 #include <string>
 #include <utility>
@@ -48,11 +49,14 @@
 #include "update_engine/payload_consumer/download_action.h"
 #include "update_engine/payload_consumer/extent_reader.h"
 #include "update_engine/payload_consumer/extent_writer.h"
+#if USE_FEC
+#include "update_engine/payload_consumer/fec_file_descriptor.h"
+#endif  // USE_FEC
 #include "update_engine/payload_consumer/file_descriptor_utils.h"
 #include "update_engine/payload_consumer/mount_history.h"
 #if USE_MTD
 #include "update_engine/payload_consumer/mtd_file_descriptor.h"
-#endif
+#endif  // USE_MTD
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/payload_consumer/xz_extent_writer.h"
@@ -63,10 +67,6 @@
 using std::vector;
 
 namespace chromeos_update_engine {
-
-const uint64_t DeltaPerformer::kSupportedMajorPayloadVersion = 2;
-const uint32_t DeltaPerformer::kSupportedMinorPayloadVersion = 5;
-
 const unsigned DeltaPerformer::kProgressLogMaxChunks = 10;
 const unsigned DeltaPerformer::kProgressLogTimeoutSeconds = 30;
 const unsigned DeltaPerformer::kProgressDownloadWeight = 50;
@@ -327,6 +327,14 @@
       err = 1;
   }
   source_fd_.reset();
+  if (source_ecc_fd_ && !source_ecc_fd_->Close()) {
+    err = errno;
+    PLOG(ERROR) << "Error closing ECC source partition";
+    if (!err)
+      err = 1;
+  }
+  source_ecc_fd_.reset();
+  source_ecc_open_failure_ = false;
   source_path_.clear();
 
   if (target_fd_ && !target_fd_->Close()) {
@@ -368,11 +376,11 @@
   int err;
 
   int flags = O_RDWR;
-  if (!is_interactive_)
+  if (!interactive_)
     flags |= O_DSYNC;
 
   LOG(INFO) << "Opening " << target_path_ << " partition with"
-            << (is_interactive_ ? "out" : "") << " O_DSYNC";
+            << (interactive_ ? "out" : "") << " O_DSYNC";
 
   target_fd_ = OpenFile(target_path_.c_str(), flags, true, &err);
   if (!target_fd_) {
@@ -393,6 +401,46 @@
   return true;
 }
 
+bool DeltaPerformer::OpenCurrentECCPartition() {
+  if (source_ecc_fd_)
+    return true;
+
+  if (source_ecc_open_failure_)
+    return false;
+
+  if (current_partition_ >= partitions_.size())
+    return false;
+
+  // No support for ECC in minor version 1 or full payloads.
+  if (payload_->type == InstallPayloadType::kFull ||
+      GetMinorVersion() == kInPlaceMinorPayloadVersion)
+    return false;
+
+#if USE_FEC
+  const PartitionUpdate& partition = partitions_[current_partition_];
+  size_t num_previous_partitions =
+      install_plan_->partitions.size() - partitions_.size();
+  const InstallPlan::Partition& install_part =
+      install_plan_->partitions[num_previous_partitions + current_partition_];
+  string path = install_part.source_path;
+  FileDescriptorPtr fd(new FecFileDescriptor());
+  if (!fd->Open(path.c_str(), O_RDONLY, 0)) {
+    PLOG(ERROR) << "Unable to open ECC source partition "
+                << partition.partition_name() << " on slot "
+                << BootControlInterface::SlotName(install_plan_->source_slot)
+                << ", file " << path;
+    source_ecc_open_failure_ = true;
+    return false;
+  }
+  source_ecc_fd_ = fd;
+#else
+  // No support for ECC compiled.
+  source_ecc_open_failure_ = true;
+#endif  // USE_FEC
+
+  return !source_ecc_open_failure_;
+}
+
 namespace {
 
 void LogPartitionInfoHash(const PartitionInfo& info, const string& tag) {
@@ -417,11 +465,10 @@
 uint32_t DeltaPerformer::GetMinorVersion() const {
   if (manifest_.has_minor_version()) {
     return manifest_.minor_version();
-  } else {
-    return payload_->type == InstallPayloadType::kDelta
-               ? kSupportedMinorPayloadVersion
-               : kFullPayloadMinorVersion;
   }
+  return payload_->type == InstallPayloadType::kDelta
+             ? kMaxSupportedMinorPayloadVersion
+             : kFullPayloadMinorVersion;
 }
 
 bool DeltaPerformer::IsHeaderParsed() const {
@@ -433,8 +480,8 @@
   *error = ErrorCode::kSuccess;
 
   if (!IsHeaderParsed()) {
-    MetadataParseResult result = payload_metadata_.ParsePayloadHeader(
-        payload, supported_major_version_, error);
+    MetadataParseResult result =
+        payload_metadata_.ParsePayloadHeader(payload, error);
     if (result != MetadataParseResult::kSuccess)
       return result;
 
@@ -561,6 +608,8 @@
     // Clear the download buffer.
     DiscardBuffer(false, metadata_size_);
 
+    block_size_ = manifest_.block_size();
+
     // This populates |partitions_| and the |install_plan.partitions| with the
     // list of partitions from the manifest.
     if (!ParseManifestPartitions(error))
@@ -591,9 +640,11 @@
       return false;
     }
 
-    if (!OpenCurrentPartition()) {
-      *error = ErrorCode::kInstallDeviceOpenError;
-      return false;
+    if (next_operation_num_ < acc_num_operations_[current_partition_]) {
+      if (!OpenCurrentPartition()) {
+        *error = ErrorCode::kInstallDeviceOpenError;
+        return false;
+      }
     }
 
     if (next_operation_num_ > 0)
@@ -610,9 +661,12 @@
 
     // We know there are more operations to perform because we didn't reach the
     // |num_total_operations_| limit yet.
-    while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
+    if (next_operation_num_ >= acc_num_operations_[current_partition_]) {
       CloseCurrentPartition();
-      current_partition_++;
+      // Skip until there are operations for current_partition_.
+      while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
+        current_partition_++;
+      }
       if (!OpenCurrentPartition()) {
         *error = ErrorCode::kInstallDeviceOpenError;
         return false;
@@ -754,7 +808,7 @@
   } else if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
     LOG(INFO) << "Converting update information from old format.";
     PartitionUpdate root_part;
-    root_part.set_partition_name(kLegacyPartitionNameRoot);
+    root_part.set_partition_name(kPartitionNameRoot);
 #ifdef __ANDROID__
     LOG(WARNING) << "Legacy payload major version provided to an Android "
                     "build. Assuming no post-install. Please use major version "
@@ -776,7 +830,7 @@
     partitions_.push_back(std::move(root_part));
 
     PartitionUpdate kern_part;
-    kern_part.set_partition_name(kLegacyPartitionNameKernel);
+    kern_part.set_partition_name(kPartitionNameKernel);
     kern_part.set_run_postinstall(false);
     if (manifest_.has_old_kernel_info()) {
       *kern_part.mutable_old_partition_info() = manifest_.old_kernel_info();
@@ -822,9 +876,62 @@
     install_part.target_size = info.size();
     install_part.target_hash.assign(info.hash().begin(), info.hash().end());
 
+    install_part.block_size = block_size_;
+    if (partition.has_hash_tree_extent()) {
+      Extent extent = partition.hash_tree_data_extent();
+      install_part.hash_tree_data_offset = extent.start_block() * block_size_;
+      install_part.hash_tree_data_size = extent.num_blocks() * block_size_;
+      extent = partition.hash_tree_extent();
+      install_part.hash_tree_offset = extent.start_block() * block_size_;
+      install_part.hash_tree_size = extent.num_blocks() * block_size_;
+      uint64_t hash_tree_data_end =
+          install_part.hash_tree_data_offset + install_part.hash_tree_data_size;
+      if (install_part.hash_tree_offset < hash_tree_data_end) {
+        LOG(ERROR) << "Invalid hash tree extents, hash tree data ends at "
+                   << hash_tree_data_end << ", but hash tree starts at "
+                   << install_part.hash_tree_offset;
+        *error = ErrorCode::kDownloadNewPartitionInfoError;
+        return false;
+      }
+      install_part.hash_tree_algorithm = partition.hash_tree_algorithm();
+      install_part.hash_tree_salt.assign(partition.hash_tree_salt().begin(),
+                                         partition.hash_tree_salt().end());
+    }
+    if (partition.has_fec_extent()) {
+      Extent extent = partition.fec_data_extent();
+      install_part.fec_data_offset = extent.start_block() * block_size_;
+      install_part.fec_data_size = extent.num_blocks() * block_size_;
+      extent = partition.fec_extent();
+      install_part.fec_offset = extent.start_block() * block_size_;
+      install_part.fec_size = extent.num_blocks() * block_size_;
+      uint64_t fec_data_end =
+          install_part.fec_data_offset + install_part.fec_data_size;
+      if (install_part.fec_offset < fec_data_end) {
+        LOG(ERROR) << "Invalid fec extents, fec data ends at " << fec_data_end
+                   << ", but fec starts at " << install_part.fec_offset;
+        *error = ErrorCode::kDownloadNewPartitionInfoError;
+        return false;
+      }
+      install_part.fec_roots = partition.fec_roots();
+    }
+
     install_plan_->partitions.push_back(install_part);
   }
 
+  if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) {
+    BootControlInterface::PartitionSizes partition_sizes;
+    for (const InstallPlan::Partition& partition : install_plan_->partitions) {
+      partition_sizes.emplace(partition.name, partition.target_size);
+    }
+    if (!boot_control_->InitPartitionMetadata(install_plan_->target_slot,
+                                              partition_sizes)) {
+      LOG(ERROR) << "Unable to initialize partition metadata for slot "
+                 << BootControlInterface::SlotName(install_plan_->target_slot);
+      *error = ErrorCode::kInstallDeviceOpenError;
+      return false;
+    }
+  }
+
   if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) {
     LOG(ERROR) << "Unable to determine all the partition devices.";
     *error = ErrorCode::kInstallDeviceOpenError;
@@ -1025,20 +1132,121 @@
   if (operation.has_dst_length())
     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
 
-  brillo::Blob source_hash;
-  TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_,
-                                                     operation.src_extents(),
-                                                     target_fd_,
-                                                     operation.dst_extents(),
-                                                     block_size_,
-                                                     &source_hash));
-
   if (operation.has_src_sha256_hash()) {
+    brillo::Blob source_hash;
+    brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
+                                      operation.src_sha256_hash().end());
+
+    // We fall back to use the error corrected device if the hash of the raw
+    // device doesn't match or there was an error reading the source partition.
+    // Note that this code will also fall back if writing the target partition
+    // fails.
+    bool read_ok = fd_utils::CopyAndHashExtents(source_fd_,
+                                                operation.src_extents(),
+                                                target_fd_,
+                                                operation.dst_extents(),
+                                                block_size_,
+                                                &source_hash);
+    if (read_ok && expected_source_hash == source_hash)
+      return true;
+
+    if (!OpenCurrentECCPartition()) {
+      // The following function call will return false since the source hash
+      // mismatches, but we still want to call it so it prints the appropriate
+      // log message.
+      return ValidateSourceHash(source_hash, operation, source_fd_, error);
+    }
+
+    LOG(WARNING) << "Source hash from RAW device mismatched: found "
+                 << base::HexEncode(source_hash.data(), source_hash.size())
+                 << ", expected "
+                 << base::HexEncode(expected_source_hash.data(),
+                                    expected_source_hash.size());
+
+    TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_ecc_fd_,
+                                                       operation.src_extents(),
+                                                       target_fd_,
+                                                       operation.dst_extents(),
+                                                       block_size_,
+                                                       &source_hash));
     TEST_AND_RETURN_FALSE(
-        ValidateSourceHash(source_hash, operation, source_fd_, error));
+        ValidateSourceHash(source_hash, operation, source_ecc_fd_, error));
+    // At this point reading from the the error corrected device worked, but
+    // reading from the raw device failed, so this is considered a recovered
+    // failure.
+    source_ecc_recovered_failures_++;
+  } else {
+    // When the operation doesn't include a source hash, we attempt the error
+    // corrected device first since we can't verify the block in the raw device
+    // at this point, but we fall back to the raw device since the error
+    // corrected device can be shorter or not available.
+    if (OpenCurrentECCPartition() &&
+        fd_utils::CopyAndHashExtents(source_ecc_fd_,
+                                     operation.src_extents(),
+                                     target_fd_,
+                                     operation.dst_extents(),
+                                     block_size_,
+                                     nullptr)) {
+      return true;
+    }
+    TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_,
+                                                       operation.src_extents(),
+                                                       target_fd_,
+                                                       operation.dst_extents(),
+                                                       block_size_,
+                                                       nullptr));
+  }
+  return true;
+}
+
+FileDescriptorPtr DeltaPerformer::ChooseSourceFD(
+    const InstallOperation& operation, ErrorCode* error) {
+  if (!operation.has_src_sha256_hash()) {
+    // When the operation doesn't include a source hash, we attempt the error
+    // corrected device first since we can't verify the block in the raw device
+    // at this point, but we first need to make sure all extents are readable
+    // since the error corrected device can be shorter or not available.
+    if (OpenCurrentECCPartition() &&
+        fd_utils::ReadAndHashExtents(
+            source_ecc_fd_, operation.src_extents(), block_size_, nullptr)) {
+      return source_ecc_fd_;
+    }
+    return source_fd_;
   }
 
-  return true;
+  brillo::Blob source_hash;
+  brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
+                                    operation.src_sha256_hash().end());
+  if (fd_utils::ReadAndHashExtents(
+          source_fd_, operation.src_extents(), block_size_, &source_hash) &&
+      source_hash == expected_source_hash) {
+    return source_fd_;
+  }
+  // We fall back to use the error corrected device if the hash of the raw
+  // device doesn't match or there was an error reading the source partition.
+  if (!OpenCurrentECCPartition()) {
+    // The following function call will return false since the source hash
+    // mismatches, but we still want to call it so it prints the appropriate
+    // log message.
+    ValidateSourceHash(source_hash, operation, source_fd_, error);
+    return nullptr;
+  }
+  LOG(WARNING) << "Source hash from RAW device mismatched: found "
+               << base::HexEncode(source_hash.data(), source_hash.size())
+               << ", expected "
+               << base::HexEncode(expected_source_hash.data(),
+                                  expected_source_hash.size());
+
+  if (fd_utils::ReadAndHashExtents(
+          source_ecc_fd_, operation.src_extents(), block_size_, &source_hash) &&
+      ValidateSourceHash(source_hash, operation, source_ecc_fd_, error)) {
+    // At this point reading from the the error corrected device worked, but
+    // reading from the raw device failed, so this is considered a recovered
+    // failure.
+    source_ecc_recovered_failures_++;
+    return source_ecc_fd_;
+  }
+  return nullptr;
 }
 
 bool DeltaPerformer::ExtentsToBsdiffPositionsString(
@@ -1183,17 +1391,12 @@
   if (operation.has_dst_length())
     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
 
-  if (operation.has_src_sha256_hash()) {
-    brillo::Blob source_hash;
-    TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
-        source_fd_, operation.src_extents(), block_size_, &source_hash));
-    TEST_AND_RETURN_FALSE(
-        ValidateSourceHash(source_hash, operation, source_fd_, error));
-  }
+  FileDescriptorPtr source_fd = ChooseSourceFD(operation, error);
+  TEST_AND_RETURN_FALSE(source_fd != nullptr);
 
   auto reader = std::make_unique<DirectExtentReader>();
   TEST_AND_RETURN_FALSE(
-      reader->Init(source_fd_, operation.src_extents(), block_size_));
+      reader->Init(source_fd, operation.src_extents(), block_size_));
   auto src_file = std::make_unique<BsdiffExtentFile>(
       std::move(reader),
       utils::BlocksInExtents(operation.src_extents()) * block_size_);
@@ -1300,17 +1503,12 @@
   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
 
-  if (operation.has_src_sha256_hash()) {
-    brillo::Blob source_hash;
-    TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
-        source_fd_, operation.src_extents(), block_size_, &source_hash));
-    TEST_AND_RETURN_FALSE(
-        ValidateSourceHash(source_hash, operation, source_fd_, error));
-  }
+  FileDescriptorPtr source_fd = ChooseSourceFD(operation, error);
+  TEST_AND_RETURN_FALSE(source_fd != nullptr);
 
   auto reader = std::make_unique<DirectExtentReader>();
   TEST_AND_RETURN_FALSE(
-      reader->Init(source_fd_, operation.src_extents(), block_size_));
+      reader->Init(source_fd, operation.src_extents(), block_size_));
   puffin::UniqueStreamPtr src_stream(new PuffinExtentStream(
       std::move(reader),
       utils::BlocksInExtents(operation.src_extents()) * block_size_));
@@ -1425,11 +1623,13 @@
       return ErrorCode::kUnsupportedMinorPayloadVersion;
     }
   } else {
-    if (manifest_.minor_version() != supported_minor_version_) {
+    if (manifest_.minor_version() < kMinSupportedMinorPayloadVersion ||
+        manifest_.minor_version() > kMaxSupportedMinorPayloadVersion) {
       LOG(ERROR) << "Manifest contains minor version "
                  << manifest_.minor_version()
-                 << " not the supported "
-                 << supported_minor_version_;
+                 << " not in the range of supported minor versions ["
+                 << kMinSupportedMinorPayloadVersion << ", "
+                 << kMaxSupportedMinorPayloadVersion << "].";
       return ErrorCode::kUnsupportedMinorPayloadVersion;
     }
   }
@@ -1584,16 +1784,6 @@
   }
 
   LOG(INFO) << "Payload hash matches value in payload.";
-
-  // At this point, we are guaranteed to have downloaded a full payload, i.e
-  // the one whose size matches the size mentioned in Omaha response. If any
-  // errors happen after this, it's likely a problem with the payload itself or
-  // the state of the system and not a problem with the URL or network.  So,
-  // indicate that to the download delegate so that AU can backoff
-  // appropriately.
-  if (download_delegate_)
-    download_delegate_->DownloadComplete();
-
   return ErrorCode::kSuccess;
 }
 
@@ -1670,6 +1860,7 @@
     prefs->SetInt64(kPrefsManifestSignatureSize, -1);
     prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
     prefs->Delete(kPrefsPostInstallSucceeded);
+    prefs->Delete(kPrefsVerityWritten);
   }
   return true;
 }
@@ -1711,7 +1902,6 @@
 
 bool DeltaPerformer::PrimeUpdateState() {
   CHECK(manifest_valid_);
-  block_size_ = manifest_.block_size();
 
   int64_t next_operation = kUpdateStateOperationInvalid;
   if (!prefs_->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) ||
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index ac9ca80..38d2c43 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -48,9 +48,6 @@
 
 class DeltaPerformer : public FileWriter {
  public:
-  static const uint64_t kSupportedMajorPayloadVersion;
-  static const uint32_t kSupportedMinorPayloadVersion;
-
   // Defines the granularity of progress logging in terms of how many "completed
   // chunks" we want to report at the most.
   static const unsigned kProgressLogMaxChunks;
@@ -70,14 +67,14 @@
                  DownloadActionDelegate* download_delegate,
                  InstallPlan* install_plan,
                  InstallPlan::Payload* payload,
-                 bool is_interactive)
+                 bool interactive)
       : prefs_(prefs),
         boot_control_(boot_control),
         hardware_(hardware),
         download_delegate_(download_delegate),
         install_plan_(install_plan),
         payload_(payload),
-        is_interactive_(is_interactive) {}
+        interactive_(interactive) {}
 
   // FileWriter's Write implementation where caller doesn't care about
   // error codes.
@@ -99,6 +96,10 @@
   // work. Returns whether the required file descriptors were successfully open.
   bool OpenCurrentPartition();
 
+  // Attempt to open the error-corrected device for the current partition.
+  // Returns whether the operation succeeded.
+  bool OpenCurrentECCPartition();
+
   // Closes the current partition file descriptors if open. Returns 0 on success
   // or -errno on error.
   int CloseCurrentPartition();
@@ -174,6 +175,7 @@
   friend class DeltaPerformerIntegrationTest;
   FRIEND_TEST(DeltaPerformerTest, BrilloMetadataSignatureSizeTest);
   FRIEND_TEST(DeltaPerformerTest, BrilloParsePayloadMetadataTest);
+  FRIEND_TEST(DeltaPerformerTest, ChooseSourceFDTest);
   FRIEND_TEST(DeltaPerformerTest, UsePublicKeyFromResponse);
 
   // Parse and move the update instructions of all partitions into our local
@@ -228,6 +230,13 @@
   bool PerformPuffDiffOperation(const InstallOperation& operation,
                                 ErrorCode* error);
 
+  // For a given operation, choose the source fd to be used (raw device or error
+  // correction device) based on the source operation hash.
+  // Returns nullptr if the source hash mismatch cannot be corrected, and set
+  // the |error| accordingly.
+  FileDescriptorPtr ChooseSourceFD(const InstallOperation& operation,
+                                   ErrorCode* error);
+
   // Extracts the payload signature message from the blob on the |operation| if
   // the offset matches the one specified by the manifest. Returns whether the
   // signature was extracted.
@@ -283,6 +292,22 @@
   // partition when using a delta payload.
   FileDescriptorPtr source_fd_{nullptr};
 
+  // File descriptor of the error corrected source partition. Only set while
+  // updating partition using a delta payload for a partition where error
+  // correction is available. The size of the error corrected device is smaller
+  // than the underlying raw device, since it doesn't include the error
+  // correction blocks.
+  FileDescriptorPtr source_ecc_fd_{nullptr};
+
+  // The total number of operations that failed source hash verification but
+  // passed after falling back to the error-corrected |source_ecc_fd_| device.
+  uint64_t source_ecc_recovered_failures_{0};
+
+  // Whether opening the current partition as an error-corrected device failed.
+  // Used to avoid re-opening the same source partition if it is not actually
+  // error corrected.
+  bool source_ecc_open_failure_{false};
+
   // File descriptor of the target partition. Only set while performing the
   // operations of a given partition.
   FileDescriptorPtr target_fd_{nullptr};
@@ -363,7 +388,7 @@
   unsigned last_progress_chunk_{0};
 
   // If |true|, the update is user initiated (vs. periodic update checks).
-  bool is_interactive_{false};
+  bool interactive_{false};
 
   // The timeout after which we should force emitting a progress log (constant),
   // and the actual point in time for the next forced log to be emitted.
@@ -371,12 +396,6 @@
       base::TimeDelta::FromSeconds(kProgressLogTimeoutSeconds)};
   base::Time forced_progress_log_time_;
 
-  // The payload major payload version supported by DeltaPerformer.
-  uint64_t supported_major_version_{kSupportedMajorPayloadVersion};
-
-  // The delta minor payload version supported by DeltaPerformer.
-  uint32_t supported_minor_version_{kSupportedMinorPayloadVersion};
-
   DISALLOW_COPY_AND_ASSIGN(DeltaPerformer);
 };
 
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index 3572a6d..ba5fa18 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -39,6 +39,7 @@
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/mock_download_action.h"
 #include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_consumer/payload_metadata.h"
 #include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/payload_signer.h"
@@ -116,13 +117,7 @@
 
 }  // namespace
 
-class DeltaPerformerIntegrationTest : public ::testing::Test {
- public:
-  static void SetSupportedVersion(DeltaPerformer* performer,
-                                  uint64_t minor_version) {
-    performer->supported_minor_version_ = minor_version;
-  }
-};
+class DeltaPerformerIntegrationTest : public ::testing::Test {};
 
 static void CompareFilesByBlock(const string& a_file, const string& b_file,
                                 size_t image_size) {
@@ -240,9 +235,7 @@
     RSA_free(rsa);
   }
   int signature_size = GetSignatureSize(private_key_path);
-  string hash_file;
-  ASSERT_TRUE(utils::MakeTempFile("hash.XXXXXX", &hash_file, nullptr));
-  ScopedPathUnlinker hash_unlinker(hash_file);
+  test_utils::ScopedTempFile hash_file("hash.XXXXXX");
   string signature_size_string;
   if (signature_test == kSignatureGeneratedShellRotateCl1 ||
       signature_test == kSignatureGeneratedShellRotateCl2)
@@ -257,36 +250,33 @@
                 delta_generator_path.c_str(),
                 payload_path.c_str(),
                 signature_size_string.c_str(),
-                hash_file.c_str())));
+                hash_file.path().c_str())));
 
   // Sign the hash
   brillo::Blob hash, signature;
-  ASSERT_TRUE(utils::ReadFile(hash_file, &hash));
+  ASSERT_TRUE(utils::ReadFile(hash_file.path(), &hash));
   ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature));
 
-  string sig_file;
-  ASSERT_TRUE(utils::MakeTempFile("signature.XXXXXX", &sig_file, nullptr));
-  ScopedPathUnlinker sig_unlinker(sig_file);
-  ASSERT_TRUE(test_utils::WriteFileVector(sig_file, signature));
+  test_utils::ScopedTempFile sig_file("signature.XXXXXX");
+  ASSERT_TRUE(test_utils::WriteFileVector(sig_file.path(), signature));
+  string sig_files = sig_file.path();
 
-  string sig_file2;
-  ASSERT_TRUE(utils::MakeTempFile("signature.XXXXXX", &sig_file2, nullptr));
-  ScopedPathUnlinker sig2_unlinker(sig_file2);
+  test_utils::ScopedTempFile sig_file2("signature.XXXXXX");
   if (signature_test == kSignatureGeneratedShellRotateCl1 ||
       signature_test == kSignatureGeneratedShellRotateCl2) {
     ASSERT_TRUE(PayloadSigner::SignHash(
         hash, GetBuildArtifactsPath(kUnittestPrivateKey2Path), &signature));
-    ASSERT_TRUE(test_utils::WriteFileVector(sig_file2, signature));
+    ASSERT_TRUE(test_utils::WriteFileVector(sig_file2.path(), signature));
     // Append second sig file to first path
-    sig_file += ":" + sig_file2;
+    sig_files += ":" + sig_file2.path();
   }
 
   ASSERT_EQ(0,
             System(base::StringPrintf(
-                "%s -in_file=%s -signature_file=%s -out_file=%s",
+                "%s -in_file=%s -payload_signature_file=%s -out_file=%s",
                 delta_generator_path.c_str(),
                 payload_path.c_str(),
-                sig_file.c_str(),
+                sig_files.c_str(),
                 payload_path.c_str())));
   int verify_result = System(base::StringPrintf(
       "%s -in_file=%s -public_key=%s -public_key_version=%d",
@@ -510,8 +500,8 @@
     payload_config.version.major = kChromeOSMajorPayloadVersion;
     payload_config.version.minor = minor_version;
     if (!full_rootfs) {
-      payload_config.source.partitions.emplace_back(kLegacyPartitionNameRoot);
-      payload_config.source.partitions.emplace_back(kLegacyPartitionNameKernel);
+      payload_config.source.partitions.emplace_back(kPartitionNameRoot);
+      payload_config.source.partitions.emplace_back(kPartitionNameKernel);
       payload_config.source.partitions.front().path = state->a_img;
       if (!full_kernel)
         payload_config.source.partitions.back().path = state->old_kernel;
@@ -524,9 +514,9 @@
         // Use 1 MiB chunk size for the full unittests.
         payload_config.hard_chunk_size = 1024 * 1024;
     }
-    payload_config.target.partitions.emplace_back(kLegacyPartitionNameRoot);
+    payload_config.target.partitions.emplace_back(kPartitionNameRoot);
     payload_config.target.partitions.back().path = state->b_img;
-    payload_config.target.partitions.emplace_back(kLegacyPartitionNameKernel);
+    payload_config.target.partitions.emplace_back(kPartitionNameKernel);
     payload_config.target.partitions.back().path = state->new_kernel;
     payload_config.target.image_info = new_image_info;
     EXPECT_TRUE(payload_config.target.LoadImageSize());
@@ -592,16 +582,14 @@
                            uint32_t minor_version) {
   // Check the metadata.
   {
-    DeltaArchiveManifest manifest;
-    EXPECT_TRUE(PayloadSigner::LoadPayloadMetadata(state->delta_path,
-                                                   nullptr,
-                                                   &manifest,
-                                                   nullptr,
-                                                   &state->metadata_size,
-                                                   nullptr));
-    LOG(INFO) << "Metadata size: " << state->metadata_size;
     EXPECT_TRUE(utils::ReadFile(state->delta_path, &state->delta));
+    PayloadMetadata payload_metadata;
+    EXPECT_TRUE(payload_metadata.ParsePayloadHeader(state->delta));
+    state->metadata_size = payload_metadata.GetMetadataSize();
+    LOG(INFO) << "Metadata size: " << state->metadata_size;
 
+    DeltaArchiveManifest manifest;
+    EXPECT_TRUE(payload_metadata.GetManifest(state->delta, &manifest));
     if (signature_test == kSignatureNone) {
       EXPECT_FALSE(manifest.has_signatures_offset());
       EXPECT_FALSE(manifest.has_signatures_size());
@@ -728,10 +716,10 @@
   install_plan->target_slot = 1;
 
   InstallPlan::Partition root_part;
-  root_part.name = kLegacyPartitionNameRoot;
+  root_part.name = kPartitionNameRoot;
 
   InstallPlan::Partition kernel_part;
-  kernel_part.name = kLegacyPartitionNameKernel;
+  kernel_part.name = kPartitionNameKernel;
 
   LOG(INFO) << "Setting payload metadata size in Omaha  = "
             << state->metadata_size;
@@ -748,11 +736,10 @@
                                   &state->mock_delegate_,
                                   install_plan,
                                   &install_plan->payloads[0],
-                                  false /* is_interactive */);
+                                  false /* interactive */);
   string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath);
   EXPECT_TRUE(utils::FileExists(public_key_path.c_str()));
   (*performer)->set_public_key_path(public_key_path);
-  DeltaPerformerIntegrationTest::SetSupportedVersion(*performer, minor_version);
 
   EXPECT_EQ(static_cast<off_t>(state->image_size),
             HashCalculator::RawHashOfFile(
@@ -778,13 +765,13 @@
   }
 
   state->fake_boot_control_.SetPartitionDevice(
-      kLegacyPartitionNameRoot, install_plan->source_slot, state->a_img);
+      kPartitionNameRoot, install_plan->source_slot, state->a_img);
   state->fake_boot_control_.SetPartitionDevice(
-      kLegacyPartitionNameKernel, install_plan->source_slot, state->old_kernel);
+      kPartitionNameKernel, install_plan->source_slot, state->old_kernel);
   state->fake_boot_control_.SetPartitionDevice(
-      kLegacyPartitionNameRoot, install_plan->target_slot, target_root);
+      kPartitionNameRoot, install_plan->target_slot, target_root);
   state->fake_boot_control_.SetPartitionDevice(
-      kLegacyPartitionNameKernel, install_plan->target_slot, target_kernel);
+      kPartitionNameKernel, install_plan->target_slot, target_kernel);
 
   ErrorCode expected_error, actual_error;
   bool continue_writing;
@@ -852,9 +839,6 @@
     return;
   }
 
-  int expected_times = (expected_result == ErrorCode::kSuccess) ? 1 : 0;
-  EXPECT_CALL(state->mock_delegate_, DownloadComplete()).Times(expected_times);
-
   LOG(INFO) << "Verifying payload for expected result " << expected_result;
   brillo::Blob expected_hash;
   HashCalculator::RawHashOfData(state->delta, &expected_hash);
@@ -889,8 +873,8 @@
 
   const auto& partitions = state->install_plan.partitions;
   EXPECT_EQ(2U, partitions.size());
-  EXPECT_EQ(kLegacyPartitionNameRoot, partitions[0].name);
-  EXPECT_EQ(kLegacyPartitionNameKernel, partitions[1].name);
+  EXPECT_EQ(kPartitionNameRoot, partitions[0].name);
+  EXPECT_EQ(kPartitionNameKernel, partitions[1].name);
 
   EXPECT_EQ(kDefaultKernelSize, partitions[1].target_size);
   brillo::Blob expected_new_kernel_hash;
@@ -972,12 +956,14 @@
                    false, kInPlaceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignaturePlaceholderTest) {
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootSmallImageSignaturePlaceholderTest) {
   DoSmallImageTest(false, false, false, -1, kSignatureGeneratedPlaceholder,
                    false, kInPlaceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignaturePlaceholderMismatchTest) {
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootSmallImageSignaturePlaceholderMismatchTest) {
   DeltaState state;
   GenerateDeltaFile(false, false, false, -1,
                     kSignatureGeneratedPlaceholderMismatch, &state,
@@ -1019,17 +1005,20 @@
                    false, kInPlaceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellBadKeyTest) {
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootSmallImageSignGeneratedShellBadKeyTest) {
   DoSmallImageTest(false, false, false, -1, kSignatureGeneratedShellBadKey,
                    false, kInPlaceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellRotateCl1Test) {
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootSmallImageSignGeneratedShellRotateCl1Test) {
   DoSmallImageTest(false, false, false, -1, kSignatureGeneratedShellRotateCl1,
                    false, kInPlaceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellRotateCl2Test) {
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootSmallImageSignGeneratedShellRotateCl2Test) {
   DoSmallImageTest(false, false, false, -1, kSignatureGeneratedShellRotateCl2,
                    false, kInPlaceMinorPayloadVersion);
 }
@@ -1039,7 +1028,8 @@
                    false, kSourceMinorPayloadVersion);
 }
 
-TEST(DeltaPerformerIntegrationTest, RunAsRootMandatoryOperationHashMismatchTest) {
+TEST(DeltaPerformerIntegrationTest,
+     RunAsRootMandatoryOperationHashMismatchTest) {
   DoOperationHashMismatchTest(kInvalidOperationData, true);
 }
 
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index 88df98a..b0520e7 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -18,7 +18,9 @@
 
 #include <endian.h>
 #include <inttypes.h>
+#include <time.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -38,6 +40,7 @@
 #include "update_engine/common/fake_prefs.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/fake_file_descriptor.h"
 #include "update_engine/payload_consumer/mock_download_action.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/bzip.h"
@@ -170,9 +173,11 @@
   brillo::Blob GeneratePayload(const brillo::Blob& blob_data,
                                const vector<AnnotatedOperation>& aops,
                                bool sign_payload) {
-    return GeneratePayload(blob_data, aops, sign_payload,
-                           DeltaPerformer::kSupportedMajorPayloadVersion,
-                           DeltaPerformer::kSupportedMinorPayloadVersion);
+    return GeneratePayload(blob_data,
+                           aops,
+                           sign_payload,
+                           kMaxSupportedMajorPayloadVersion,
+                           kMaxSupportedMinorPayloadVersion);
   }
 
   brillo::Blob GeneratePayload(const brillo::Blob& blob_data,
@@ -180,12 +185,8 @@
                                bool sign_payload,
                                uint64_t major_version,
                                uint32_t minor_version) {
-    string blob_path;
-    EXPECT_TRUE(utils::MakeTempFile("Blob-XXXXXX", &blob_path, nullptr));
-    ScopedPathUnlinker blob_unlinker(blob_path);
-    EXPECT_TRUE(utils::WriteFile(blob_path.c_str(),
-                                 blob_data.data(),
-                                 blob_data.size()));
+    test_utils::ScopedTempFile blob_file("Blob-XXXXXX");
+    EXPECT_TRUE(test_utils::WriteFileVector(blob_file.path(), blob_data));
 
     PayloadGenerationConfig config;
     config.version.major = major_version;
@@ -194,38 +195,56 @@
     PayloadFile payload;
     EXPECT_TRUE(payload.Init(config));
 
-    PartitionConfig old_part(kLegacyPartitionNameRoot);
+    PartitionConfig old_part(kPartitionNameRoot);
     if (minor_version != kFullPayloadMinorVersion) {
       // When generating a delta payload we need to include the old partition
       // information to mark it as a delta payload.
       old_part.path = "/dev/null";
       old_part.size = 0;
     }
-    PartitionConfig new_part(kLegacyPartitionNameRoot);
+    PartitionConfig new_part(kPartitionNameRoot);
     new_part.path = "/dev/zero";
     new_part.size = 1234;
 
     payload.AddPartition(old_part, new_part, aops);
 
     // We include a kernel partition without operations.
-    old_part.name = kLegacyPartitionNameKernel;
-    new_part.name = kLegacyPartitionNameKernel;
+    old_part.name = kPartitionNameKernel;
+    new_part.name = kPartitionNameKernel;
     new_part.size = 0;
     payload.AddPartition(old_part, new_part, {});
 
-    string payload_path;
-    EXPECT_TRUE(utils::MakeTempFile("Payload-XXXXXX", &payload_path, nullptr));
-    ScopedPathUnlinker payload_unlinker(payload_path);
+    test_utils::ScopedTempFile payload_file("Payload-XXXXXX");
     string private_key =
         sign_payload ? GetBuildArtifactsPath(kUnittestPrivateKeyPath) : "";
-    EXPECT_TRUE(payload.WritePayload(
-        payload_path, blob_path, private_key, &payload_.metadata_size));
+    EXPECT_TRUE(payload.WritePayload(payload_file.path(),
+                                     blob_file.path(),
+                                     private_key,
+                                     &payload_.metadata_size));
 
     brillo::Blob payload_data;
-    EXPECT_TRUE(utils::ReadFile(payload_path, &payload_data));
+    EXPECT_TRUE(utils::ReadFile(payload_file.path(), &payload_data));
     return payload_data;
   }
 
+  brillo::Blob GenerateSourceCopyPayload(const brillo::Blob& copied_data,
+                                         bool add_hash) {
+    PayloadGenerationConfig config;
+    const uint64_t kDefaultBlockSize = config.block_size;
+    EXPECT_EQ(0U, copied_data.size() % kDefaultBlockSize);
+    uint64_t num_blocks = copied_data.size() / kDefaultBlockSize;
+    AnnotatedOperation aop;
+    *(aop.op.add_src_extents()) = ExtentForRange(0, num_blocks);
+    *(aop.op.add_dst_extents()) = ExtentForRange(0, num_blocks);
+    aop.op.set_type(InstallOperation::SOURCE_COPY);
+    brillo::Blob src_hash;
+    EXPECT_TRUE(HashCalculator::RawHashOfData(copied_data, &src_hash));
+    if (add_hash)
+      aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
+
+    return GeneratePayload(brillo::Blob(), {aop}, false);
+  }
+
   // Apply |payload_data| on partition specified in |source_path|.
   // Expect result of performer_.Write() to be |expect_success|.
   // Returns the result of the payload application.
@@ -245,29 +264,26 @@
                                   const string& source_path,
                                   const brillo::Blob& target_data,
                                   bool expect_success) {
-    string new_part;
-    EXPECT_TRUE(utils::MakeTempFile("Partition-XXXXXX", &new_part, nullptr));
-    ScopedPathUnlinker partition_unlinker(new_part);
-    EXPECT_TRUE(utils::WriteFile(new_part.c_str(), target_data.data(),
-                                 target_data.size()));
+    test_utils::ScopedTempFile new_part("Partition-XXXXXX");
+    EXPECT_TRUE(test_utils::WriteFileVector(new_part.path(), target_data));
 
     // We installed the operations only in the rootfs partition, but the
     // delta performer needs to access all the partitions.
     fake_boot_control_.SetPartitionDevice(
-        kLegacyPartitionNameRoot, install_plan_.target_slot, new_part);
+        kPartitionNameRoot, install_plan_.target_slot, new_part.path());
     fake_boot_control_.SetPartitionDevice(
-        kLegacyPartitionNameRoot, install_plan_.source_slot, source_path);
+        kPartitionNameRoot, install_plan_.source_slot, source_path);
     fake_boot_control_.SetPartitionDevice(
-        kLegacyPartitionNameKernel, install_plan_.target_slot, "/dev/null");
+        kPartitionNameKernel, install_plan_.target_slot, "/dev/null");
     fake_boot_control_.SetPartitionDevice(
-        kLegacyPartitionNameKernel, install_plan_.source_slot, "/dev/null");
+        kPartitionNameKernel, install_plan_.source_slot, "/dev/null");
 
     EXPECT_EQ(expect_success,
               performer_.Write(payload_data.data(), payload_data.size()));
     EXPECT_EQ(0, performer_.Close());
 
     brillo::Blob partition_data;
-    EXPECT_TRUE(utils::ReadFile(new_part, &partition_data));
+    EXPECT_TRUE(utils::ReadFile(new_part.path(), &partition_data));
     return partition_data;
   }
 
@@ -375,22 +391,37 @@
     EXPECT_EQ(payload_.metadata_size, performer_.metadata_size_);
   }
 
-  void SetSupportedMajorVersion(uint64_t major_version) {
-    performer_.supported_major_version_ = major_version;
+  // Helper function to pretend that the ECC file descriptor was already opened.
+  // Returns a pointer to the created file descriptor.
+  FakeFileDescriptor* SetFakeECCFile(size_t size) {
+    EXPECT_FALSE(performer_.source_ecc_fd_) << "source_ecc_fd_ already open.";
+    FakeFileDescriptor* ret = new FakeFileDescriptor();
+    fake_ecc_fd_.reset(ret);
+    // Call open to simulate it was already opened.
+    ret->Open("", 0);
+    ret->SetFileSize(size);
+    performer_.source_ecc_fd_ = fake_ecc_fd_;
+    return ret;
   }
+
+  uint64_t GetSourceEccRecoveredFailures() const {
+    return performer_.source_ecc_recovered_failures_;
+  }
+
   FakePrefs prefs_;
   InstallPlan install_plan_;
   InstallPlan::Payload payload_;
   FakeBootControl fake_boot_control_;
   FakeHardware fake_hardware_;
   MockDownloadActionDelegate mock_delegate_;
+  FileDescriptorPtr fake_ecc_fd_;
   DeltaPerformer performer_{&prefs_,
                             &fake_boot_control_,
                             &fake_hardware_,
                             &mock_delegate_,
                             &install_plan_,
                             &payload_,
-                            false /* is_interactive*/};
+                            false /* interactive*/};
 };
 
 TEST_F(DeltaPerformerTest, FullPayloadWriteTest) {
@@ -530,15 +561,10 @@
 
   brillo::Blob payload_data = GeneratePayload(brillo::Blob(), {aop}, false);
 
-  string source_path;
-  EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX",
-                                  &source_path, nullptr));
-  ScopedPathUnlinker path_unlinker(source_path);
-  EXPECT_TRUE(utils::WriteFile(source_path.c_str(),
-                               expected_data.data(),
-                               expected_data.size()));
+  test_utils::ScopedTempFile source("Source-XXXXXX");
+  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data));
 
-  EXPECT_EQ(expected_data, ApplyPayload(payload_data, source_path, true));
+  EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
 }
 
 TEST_F(DeltaPerformerTest, PuffdiffOperationTest) {
@@ -558,13 +584,11 @@
 
   brillo::Blob payload_data = GeneratePayload(puffdiff_payload, {aop}, false);
 
-  string source_path;
-  EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
-  ScopedPathUnlinker path_unlinker(source_path);
-  EXPECT_TRUE(utils::WriteFile(source_path.c_str(), src.data(), src.size()));
+  test_utils::ScopedTempFile source("Source-XXXXXX");
+  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), src));
 
   brillo::Blob dst(std::begin(dst_deflates), std::end(dst_deflates));
-  EXPECT_EQ(dst, ApplyPayload(payload_data, source_path, true));
+  EXPECT_EQ(dst, ApplyPayload(payload_data, source.path(), true));
 }
 
 TEST_F(DeltaPerformerTest, SourceHashMismatchTest) {
@@ -583,13 +607,89 @@
 
   brillo::Blob payload_data = GeneratePayload(brillo::Blob(), {aop}, false);
 
-  string source_path;
-  EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
-  ScopedPathUnlinker path_unlinker(source_path);
-  EXPECT_TRUE(utils::WriteFile(source_path.c_str(), actual_data.data(),
-                               actual_data.size()));
+  test_utils::ScopedTempFile source("Source-XXXXXX");
+  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), actual_data));
 
-  EXPECT_EQ(actual_data, ApplyPayload(payload_data, source_path, false));
+  EXPECT_EQ(actual_data, ApplyPayload(payload_data, source.path(), false));
+}
+
+// Test that the error-corrected file descriptor is used to read the partition
+// since the source partition doesn't match the operation hash.
+TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyFallbackTest) {
+  constexpr size_t kCopyOperationSize = 4 * 4096;
+  test_utils::ScopedTempFile source("Source-XXXXXX");
+  // Write invalid data to the source image, which doesn't match the expected
+  // hash.
+  brillo::Blob invalid_data(kCopyOperationSize, 0x55);
+  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), invalid_data));
+
+  // Setup the fec file descriptor as the fake stream, which matches
+  // |expected_data|.
+  FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize);
+  brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize);
+
+  brillo::Blob payload_data = GenerateSourceCopyPayload(expected_data, true);
+  EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
+  // Verify that the fake_fec was actually used.
+  EXPECT_EQ(1U, fake_fec->GetReadOps().size());
+  EXPECT_EQ(1U, GetSourceEccRecoveredFailures());
+}
+
+// Test that the error-corrected file descriptor is used to read a partition
+// when no hash is available for SOURCE_COPY but it falls back to the normal
+// file descriptor when the size of the error corrected one is too small.
+TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyWhenNoHashFallbackTest) {
+  constexpr size_t kCopyOperationSize = 4 * 4096;
+  test_utils::ScopedTempFile source("Source-XXXXXX");
+  // Setup the source path with the right expected data.
+  brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize);
+  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data));
+
+  // Setup the fec file descriptor as the fake stream, with smaller data than
+  // the expected.
+  FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize / 2);
+
+  // The payload operation doesn't include an operation hash.
+  brillo::Blob payload_data = GenerateSourceCopyPayload(expected_data, false);
+  EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
+  // Verify that the fake_fec was attempted to be used. Since the file
+  // descriptor is shorter it can actually do more than one read to realize it
+  // reached the EOF.
+  EXPECT_LE(1U, fake_fec->GetReadOps().size());
+  // This fallback doesn't count as an error-corrected operation since the
+  // operation hash was not available.
+  EXPECT_EQ(0U, GetSourceEccRecoveredFailures());
+}
+
+TEST_F(DeltaPerformerTest, ChooseSourceFDTest) {
+  constexpr size_t kSourceSize = 4 * 4096;
+  test_utils::ScopedTempFile source("Source-XXXXXX");
+  // Write invalid data to the source image, which doesn't match the expected
+  // hash.
+  brillo::Blob invalid_data(kSourceSize, 0x55);
+  EXPECT_TRUE(test_utils::WriteFileVector(source.path(), invalid_data));
+
+  performer_.source_fd_ = std::make_shared<EintrSafeFileDescriptor>();
+  performer_.source_fd_->Open(source.path().c_str(), O_RDONLY);
+  performer_.block_size_ = 4096;
+
+  // Setup the fec file descriptor as the fake stream, which matches
+  // |expected_data|.
+  FakeFileDescriptor* fake_fec = SetFakeECCFile(kSourceSize);
+  brillo::Blob expected_data = FakeFileDescriptorData(kSourceSize);
+
+  InstallOperation op;
+  *(op.add_src_extents()) = ExtentForRange(0, kSourceSize / 4096);
+  brillo::Blob src_hash;
+  EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash));
+  op.set_src_sha256_hash(src_hash.data(), src_hash.size());
+
+  ErrorCode error = ErrorCode::kSuccess;
+  EXPECT_EQ(performer_.source_ecc_fd_, performer_.ChooseSourceFD(op, &error));
+  EXPECT_EQ(ErrorCode::kSuccess, error);
+  // Verify that the fake_fec was actually used.
+  EXPECT_EQ(1U, fake_fec->GetReadOps().size());
+  EXPECT_EQ(1U, GetSourceEccRecoveredFailures());
 }
 
 TEST_F(DeltaPerformerTest, ExtentsToByteStringTest) {
@@ -632,7 +732,22 @@
   manifest.mutable_old_rootfs_info();
   manifest.mutable_new_kernel_info();
   manifest.mutable_new_rootfs_info();
-  manifest.set_minor_version(DeltaPerformer::kSupportedMinorPayloadVersion);
+  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
+
+  RunManifestValidation(manifest,
+                        kChromeOSMajorPayloadVersion,
+                        InstallPayloadType::kDelta,
+                        ErrorCode::kSuccess);
+}
+
+TEST_F(DeltaPerformerTest, ValidateManifestDeltaMinGoodTest) {
+  // The Manifest we are validating.
+  DeltaArchiveManifest manifest;
+  manifest.mutable_old_kernel_info();
+  manifest.mutable_old_rootfs_info();
+  manifest.mutable_new_kernel_info();
+  manifest.mutable_new_rootfs_info();
+  manifest.set_minor_version(kMinSupportedMinorPayloadVersion);
 
   RunManifestValidation(manifest,
                         kChromeOSMajorPayloadVersion,
@@ -645,7 +760,7 @@
   DeltaArchiveManifest manifest;
 
   RunManifestValidation(manifest,
-                        DeltaPerformer::kSupportedMajorPayloadVersion,
+                        kMaxSupportedMajorPayloadVersion,
                         InstallPayloadType::kFull,
                         ErrorCode::kSuccess);
 }
@@ -658,7 +773,7 @@
   manifest.mutable_old_rootfs_info();
 
   RunManifestValidation(manifest,
-                        DeltaPerformer::kSupportedMajorPayloadVersion,
+                        kMaxSupportedMajorPayloadVersion,
                         InstallPayloadType::kDelta,
                         ErrorCode::kUnsupportedMinorPayloadVersion);
 }
@@ -669,7 +784,7 @@
   manifest.mutable_old_kernel_info();
   manifest.mutable_new_kernel_info();
   manifest.mutable_new_rootfs_info();
-  manifest.set_minor_version(DeltaPerformer::kSupportedMinorPayloadVersion);
+  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
 
   RunManifestValidation(manifest,
                         kChromeOSMajorPayloadVersion,
@@ -683,7 +798,7 @@
   manifest.mutable_old_rootfs_info();
   manifest.mutable_new_kernel_info();
   manifest.mutable_new_rootfs_info();
-  manifest.set_minor_version(DeltaPerformer::kSupportedMinorPayloadVersion);
+  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
 
   RunManifestValidation(manifest,
                         kChromeOSMajorPayloadVersion,
@@ -697,7 +812,7 @@
   PartitionUpdate* partition = manifest.add_partitions();
   partition->mutable_old_partition_info();
   partition->mutable_new_partition_info();
-  manifest.set_minor_version(DeltaPerformer::kSupportedMinorPayloadVersion);
+  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
 
   RunManifestValidation(manifest,
                         kBrilloMajorPayloadVersion,
@@ -710,13 +825,12 @@
   DeltaArchiveManifest manifest;
 
   // Generate a bad version number.
-  manifest.set_minor_version(DeltaPerformer::kSupportedMinorPayloadVersion +
-                             10000);
+  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion + 10000);
   // Mark the manifest as a delta payload by setting old_rootfs_info.
   manifest.mutable_old_rootfs_info();
 
   RunManifestValidation(manifest,
-                        DeltaPerformer::kSupportedMajorPayloadVersion,
+                        kMaxSupportedMajorPayloadVersion,
                         InstallPayloadType::kDelta,
                         ErrorCode::kUnsupportedMinorPayloadVersion);
 }
@@ -730,22 +844,23 @@
   fake_hardware_.SetBuildTimestamp(2);
 
   RunManifestValidation(manifest,
-                        DeltaPerformer::kSupportedMajorPayloadVersion,
+                        kMaxSupportedMajorPayloadVersion,
                         InstallPayloadType::kFull,
                         ErrorCode::kPayloadTimestampError);
 }
 
 TEST_F(DeltaPerformerTest, BrilloMetadataSignatureSizeTest) {
+  unsigned int seed = time(nullptr);
   EXPECT_TRUE(performer_.Write(kDeltaMagic, sizeof(kDeltaMagic)));
 
   uint64_t major_version = htobe64(kBrilloMajorPayloadVersion);
   EXPECT_TRUE(performer_.Write(&major_version, 8));
 
-  uint64_t manifest_size = 222;
+  uint64_t manifest_size = rand_r(&seed) % 256;
   uint64_t manifest_size_be = htobe64(manifest_size);
   EXPECT_TRUE(performer_.Write(&manifest_size_be, 8));
 
-  uint32_t metadata_signature_size = 111;
+  uint32_t metadata_signature_size = rand_r(&seed) % 256;
   uint32_t metadata_signature_size_be = htobe32(metadata_signature_size);
   EXPECT_TRUE(performer_.Write(&metadata_signature_size_be, 4));
 
@@ -847,8 +962,10 @@
   // Non-official build, non-existing public-key, key in response -> true
   fake_hardware_.SetIsOfficialBuild(false);
   performer_.public_key_path_ = non_existing_file;
-  // result of 'echo "Test" | base64'
-  install_plan_.public_key_rsa = "VGVzdAo=";
+  // This is the result of 'echo "Test" | base64' and is not meant to be a
+  // valid public key, but it is valid base-64.
+  constexpr char kBase64TestKey[] = "VGVzdAo=";
+  install_plan_.public_key_rsa = kBase64TestKey;
   EXPECT_TRUE(performer_.GetPublicKeyFromResponse(&key_path));
   EXPECT_FALSE(key_path.empty());
   EXPECT_EQ(unlink(key_path.value().c_str()), 0);
@@ -859,8 +976,7 @@
   // Non-official build, existing public-key, key in response -> false
   fake_hardware_.SetIsOfficialBuild(false);
   performer_.public_key_path_ = existing_file;
-  // result of 'echo "Test" | base64'
-  install_plan_.public_key_rsa = "VGVzdAo=";
+  install_plan_.public_key_rsa = kBase64TestKey;
   EXPECT_FALSE(performer_.GetPublicKeyFromResponse(&key_path));
   // Same with official build -> false
   fake_hardware_.SetIsOfficialBuild(true);
@@ -894,18 +1010,18 @@
 
 TEST_F(DeltaPerformerTest, ConfVersionsMatch) {
   // Test that the versions in update_engine.conf that is installed to the
-  // image match the supported delta versions in the update engine.
+  // image match the maximum supported delta versions in the update engine.
   uint32_t minor_version;
   brillo::KeyValueStore store;
   EXPECT_TRUE(store.Load(GetBuildArtifactsPath().Append("update_engine.conf")));
   EXPECT_TRUE(utils::GetMinorVersion(store, &minor_version));
-  EXPECT_EQ(DeltaPerformer::kSupportedMinorPayloadVersion, minor_version);
+  EXPECT_EQ(kMaxSupportedMinorPayloadVersion, minor_version);
 
   string major_version_str;
   uint64_t major_version;
   EXPECT_TRUE(store.GetString("PAYLOAD_MAJOR_VERSION", &major_version_str));
   EXPECT_TRUE(base::StringToUint64(major_version_str, &major_version));
-  EXPECT_EQ(DeltaPerformer::kSupportedMajorPayloadVersion, major_version);
+  EXPECT_EQ(kMaxSupportedMajorPayloadVersion, major_version);
 }
 
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/download_action.cc b/payload_consumer/download_action.cc
index f1b6e33..cfa8846 100644
--- a/payload_consumer/download_action.cc
+++ b/payload_consumer/download_action.cc
@@ -44,19 +44,18 @@
                                HardwareInterface* hardware,
                                SystemState* system_state,
                                HttpFetcher* http_fetcher,
-                               bool is_interactive)
+                               bool interactive)
     : prefs_(prefs),
       boot_control_(boot_control),
       hardware_(hardware),
       system_state_(system_state),
       http_fetcher_(new MultiRangeHttpFetcher(http_fetcher)),
-      is_interactive_(is_interactive),
+      interactive_(interactive),
       writer_(nullptr),
       code_(ErrorCode::kSuccess),
       delegate_(nullptr),
       p2p_sharing_fd_(-1),
       p2p_visible_(true) {
-  base::StatisticsRecorder::Initialize();
 }
 
 DownloadAction::~DownloadAction() {}
@@ -251,7 +250,7 @@
                                               delegate_,
                                               &install_plan_,
                                               payload_,
-                                              is_interactive_));
+                                              interactive_));
     writer_ = delta_performer_.get();
   }
   if (system_state_ != nullptr) {
@@ -318,7 +317,7 @@
   bytes_received_ = offset;
 }
 
-void DownloadAction::ReceivedBytes(HttpFetcher* fetcher,
+bool DownloadAction::ReceivedBytes(HttpFetcher* fetcher,
                                    const void* bytes,
                                    size_t length) {
   // Note that bytes_received_ is the current offset.
@@ -345,7 +344,7 @@
     // the TransferTerminated callback. Otherwise, this and the HTTP fetcher
     // objects may get destroyed before all callbacks are complete.
     TerminateProcessing();
-    return;
+    return false;
   }
 
   // Call p2p_manager_->FileMakeVisible() when we've successfully
@@ -356,6 +355,7 @@
     system_state_->p2p_manager()->FileMakeVisible(p2p_file_id_);
     p2p_visible_ = true;
   }
+  return true;
 }
 
 void DownloadAction::TransferComplete(HttpFetcher* fetcher, bool successful) {
@@ -387,8 +387,13 @@
         StartDownloading();
         return;
       }
+
+      // All payloads have been applied and verified.
+      if (delegate_)
+        delegate_->DownloadComplete();
+
       // Log UpdateEngine.DownloadAction.* histograms to help diagnose
-      // long-blocking oeprations.
+      // long-blocking operations.
       std::string histogram_output;
       base::StatisticsRecorder::WriteGraph(
           "UpdateEngine.DownloadAction.", &histogram_output);
diff --git a/payload_consumer/download_action.h b/payload_consumer/download_action.h
index 81d7333..028a99a 100644
--- a/payload_consumer/download_action.h
+++ b/payload_consumer/download_action.h
@@ -79,7 +79,7 @@
                  HardwareInterface* hardware,
                  SystemState* system_state,
                  HttpFetcher* http_fetcher,
-                 bool is_interactive);
+                 bool interactive);
   ~DownloadAction() override;
 
   // InstallPlanAction overrides.
@@ -97,8 +97,9 @@
   int GetHTTPResponseCode() { return http_fetcher_->http_response_code(); }
 
   // HttpFetcherDelegate methods (see http_fetcher.h)
-  void ReceivedBytes(HttpFetcher* fetcher,
-                     const void* bytes, size_t length) override;
+  bool ReceivedBytes(HttpFetcher* fetcher,
+                     const void* bytes,
+                     size_t length) override;
   void SeekToOffset(off_t offset) override;
   void TransferComplete(HttpFetcher* fetcher, bool successful) override;
   void TransferTerminated(HttpFetcher* fetcher) override;
@@ -158,7 +159,7 @@
   // If |true|, the update is user initiated (vs. periodic update checks). Hence
   // the |delta_performer_| can decide not to use O_DSYNC flag for faster
   // update.
-  bool is_interactive_;
+  bool interactive_;
 
   // The FileWriter that downloaded data should be written to. It will
   // either point to *decompressing_file_writer_ or *delta_performer_.
diff --git a/payload_consumer/download_action_unittest.cc b/payload_consumer/download_action_unittest.cc
index 7ec7e0e..84673c8 100644
--- a/payload_consumer/download_action_unittest.cc
+++ b/payload_consumer/download_action_unittest.cc
@@ -64,9 +64,8 @@
 
 class DownloadActionTestProcessorDelegate : public ActionProcessorDelegate {
  public:
-  explicit DownloadActionTestProcessorDelegate(ErrorCode expected_code)
-      : processing_done_called_(false),
-        expected_code_(expected_code) {}
+  DownloadActionTestProcessorDelegate()
+      : processing_done_called_(false), expected_code_(ErrorCode::kSuccess) {}
   ~DownloadActionTestProcessorDelegate() override {
     EXPECT_TRUE(processing_done_called_);
   }
@@ -90,6 +89,7 @@
     const string type = action->Type();
     if (type == DownloadAction::StaticType()) {
       EXPECT_EQ(expected_code_, code);
+      p2p_file_id_ = static_cast<DownloadAction*>(action)->p2p_file_id();
     } else {
       EXPECT_EQ(ErrorCode::kSuccess, code);
     }
@@ -99,6 +99,7 @@
   brillo::Blob expected_data_;
   bool processing_done_called_;
   ErrorCode expected_code_;
+  string p2p_file_id_;
 };
 
 class TestDirectFileWriter : public DirectFileWriter {
@@ -154,40 +155,42 @@
       install_plan.source_slot, true);
   fake_system_state.fake_boot_control()->SetSlotBootable(
       install_plan.target_slot, true);
-  ObjectFeederAction<InstallPlan> feeder_action;
-  feeder_action.set_obj(install_plan);
+  auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
+  feeder_action->set_obj(install_plan);
   MockPrefs prefs;
   MockHttpFetcher* http_fetcher = new MockHttpFetcher(data.data(),
                                                       data.size(),
                                                       nullptr);
   // takes ownership of passed in HttpFetcher
-  DownloadAction download_action(&prefs,
-                                 fake_system_state.boot_control(),
-                                 fake_system_state.hardware(),
-                                 &fake_system_state,
-                                 http_fetcher,
-                                 false /* is_interactive */);
-  download_action.SetTestFileWriter(&writer);
-  BondActions(&feeder_action, &download_action);
+  auto download_action =
+      std::make_unique<DownloadAction>(&prefs,
+                                       fake_system_state.boot_control(),
+                                       fake_system_state.hardware(),
+                                       &fake_system_state,
+                                       http_fetcher,
+                                       false /* interactive */);
+  download_action->SetTestFileWriter(&writer);
+  BondActions(feeder_action.get(), download_action.get());
   MockDownloadActionDelegate download_delegate;
   if (use_download_delegate) {
     InSequence s;
-    download_action.set_delegate(&download_delegate);
+    download_action->set_delegate(&download_delegate);
     if (data.size() > kMockHttpFetcherChunkSize)
       EXPECT_CALL(download_delegate,
                   BytesReceived(_, kMockHttpFetcherChunkSize, _));
     EXPECT_CALL(download_delegate, BytesReceived(_, _, _)).Times(AtLeast(1));
+    EXPECT_CALL(download_delegate, DownloadComplete())
+        .Times(fail_write == 0 ? 1 : 0);
   }
-  ErrorCode expected_code = ErrorCode::kSuccess;
-  if (fail_write > 0)
-    expected_code = ErrorCode::kDownloadWriteError;
-  DownloadActionTestProcessorDelegate delegate(expected_code);
+  DownloadActionTestProcessorDelegate delegate;
+  delegate.expected_code_ =
+      (fail_write > 0) ? ErrorCode::kDownloadWriteError : ErrorCode::kSuccess;
   delegate.expected_data_ = brillo::Blob(data.begin() + 1, data.end());
   delegate.path_ = output_temp_file.path();
   ActionProcessor processor;
   processor.set_delegate(&delegate);
-  processor.EnqueueAction(&feeder_action);
-  processor.EnqueueAction(&download_action);
+  processor.EnqueueAction(std::move(feeder_action));
+  processor.EnqueueAction(std::move(download_action));
 
   loop.PostTask(FROM_HERE,
                 base::Bind(&StartProcessorInRunLoop, &processor, http_fetcher));
@@ -269,24 +272,25 @@
         {.size = size, .type = InstallPayloadType::kFull});
     total_expected_download_size += size;
   }
-  ObjectFeederAction<InstallPlan> feeder_action;
-  feeder_action.set_obj(install_plan);
+  auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
+  feeder_action->set_obj(install_plan);
   MockPrefs prefs;
   MockHttpFetcher* http_fetcher = new MockHttpFetcher(
       payload_datas[0].data(), payload_datas[0].size(), nullptr);
   // takes ownership of passed in HttpFetcher
-  DownloadAction download_action(&prefs,
-                                 fake_system_state.boot_control(),
-                                 fake_system_state.hardware(),
-                                 &fake_system_state,
-                                 http_fetcher,
-                                 false /* is_interactive */);
-  download_action.SetTestFileWriter(&mock_file_writer);
-  BondActions(&feeder_action, &download_action);
+  auto download_action =
+      std::make_unique<DownloadAction>(&prefs,
+                                       fake_system_state.boot_control(),
+                                       fake_system_state.hardware(),
+                                       &fake_system_state,
+                                       http_fetcher,
+                                       false /* interactive */);
+  download_action->SetTestFileWriter(&mock_file_writer);
+  BondActions(feeder_action.get(), download_action.get());
   MockDownloadActionDelegate download_delegate;
   {
     InSequence s;
-    download_action.set_delegate(&download_delegate);
+    download_action->set_delegate(&download_delegate);
     // these are hand-computed based on the payloads specified above
     EXPECT_CALL(download_delegate,
                 BytesReceived(kMockHttpFetcherChunkSize,
@@ -318,8 +322,8 @@
                               total_expected_download_size));
   }
   ActionProcessor processor;
-  processor.EnqueueAction(&feeder_action);
-  processor.EnqueueAction(&download_action);
+  processor.EnqueueAction(std::move(feeder_action));
+  processor.EnqueueAction(std::move(download_action));
 
   loop.PostTask(
       FROM_HERE,
@@ -358,31 +362,31 @@
     EXPECT_EQ(0, writer.Open(temp_file.path().c_str(), O_WRONLY | O_CREAT, 0));
 
     // takes ownership of passed in HttpFetcher
-    ObjectFeederAction<InstallPlan> feeder_action;
+    auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
     InstallPlan install_plan;
     install_plan.payloads.resize(1);
-    feeder_action.set_obj(install_plan);
+    feeder_action->set_obj(install_plan);
     FakeSystemState fake_system_state_;
     MockPrefs prefs;
-    DownloadAction download_action(
+    auto download_action = std::make_unique<DownloadAction>(
         &prefs,
         fake_system_state_.boot_control(),
         fake_system_state_.hardware(),
         &fake_system_state_,
         new MockHttpFetcher(data.data(), data.size(), nullptr),
-        false /* is_interactive */);
-    download_action.SetTestFileWriter(&writer);
+        false /* interactive */);
+    download_action->SetTestFileWriter(&writer);
     MockDownloadActionDelegate download_delegate;
     if (use_download_delegate) {
-      download_action.set_delegate(&download_delegate);
+      download_action->set_delegate(&download_delegate);
       EXPECT_CALL(download_delegate, BytesReceived(_, _, _)).Times(0);
     }
     TerminateEarlyTestProcessorDelegate delegate;
     ActionProcessor processor;
     processor.set_delegate(&delegate);
-    processor.EnqueueAction(&feeder_action);
-    processor.EnqueueAction(&download_action);
-    BondActions(&feeder_action, &download_action);
+    BondActions(feeder_action.get(), download_action.get());
+    processor.EnqueueAction(std::move(feeder_action));
+    processor.EnqueueAction(std::move(download_action));
 
     loop.PostTask(FROM_HERE,
                   base::Bind(&TerminateEarlyTestStarter, &processor));
@@ -420,22 +424,21 @@
 // This is a simple Action class for testing.
 class DownloadActionTestAction : public Action<DownloadActionTestAction> {
  public:
-  DownloadActionTestAction() : did_run_(false) {}
+  DownloadActionTestAction() = default;
   typedef InstallPlan InputObjectType;
   typedef InstallPlan OutputObjectType;
   ActionPipe<InstallPlan>* in_pipe() { return in_pipe_.get(); }
   ActionPipe<InstallPlan>* out_pipe() { return out_pipe_.get(); }
   ActionProcessor* processor() { return processor_; }
   void PerformAction() {
-    did_run_ = true;
     ASSERT_TRUE(HasInputObject());
     EXPECT_TRUE(expected_input_object_ == GetInputObject());
     ASSERT_TRUE(processor());
     processor()->ActionComplete(this, ErrorCode::kSuccess);
   }
-  string Type() const { return "DownloadActionTestAction"; }
+  static std::string StaticType() { return "DownloadActionTestAction"; }
+  string Type() const { return StaticType(); }
   InstallPlan expected_input_object_;
-  bool did_run_;
 };
 
 namespace {
@@ -444,9 +447,19 @@
 // only by the test PassObjectOutTest.
 class PassObjectOutTestProcessorDelegate : public ActionProcessorDelegate {
  public:
-  void ProcessingDone(const ActionProcessor* processor, ErrorCode code) {
+  void ProcessingDone(const ActionProcessor* processor,
+                      ErrorCode code) override {
     brillo::MessageLoop::current()->BreakLoop();
   }
+  void ActionCompleted(ActionProcessor* processor,
+                       AbstractAction* action,
+                       ErrorCode code) override {
+    if (action->Type() == DownloadActionTestAction::StaticType()) {
+      did_test_action_run_ = true;
+    }
+  }
+
+  bool did_test_action_run_ = false;
 };
 
 }  // namespace
@@ -463,29 +476,30 @@
   install_plan.payloads.push_back({.size = 1});
   EXPECT_TRUE(
       HashCalculator::RawHashOfData({'x'}, &install_plan.payloads[0].hash));
-  ObjectFeederAction<InstallPlan> feeder_action;
-  feeder_action.set_obj(install_plan);
+  auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
+  feeder_action->set_obj(install_plan);
   MockPrefs prefs;
   FakeSystemState fake_system_state_;
-  DownloadAction download_action(&prefs,
-                                 fake_system_state_.boot_control(),
-                                 fake_system_state_.hardware(),
-                                 &fake_system_state_,
-                                 new MockHttpFetcher("x", 1, nullptr),
-                                 false /* is_interactive */);
-  download_action.SetTestFileWriter(&writer);
+  auto download_action =
+      std::make_unique<DownloadAction>(&prefs,
+                                       fake_system_state_.boot_control(),
+                                       fake_system_state_.hardware(),
+                                       &fake_system_state_,
+                                       new MockHttpFetcher("x", 1, nullptr),
+                                       false /* interactive */);
+  download_action->SetTestFileWriter(&writer);
 
-  DownloadActionTestAction test_action;
-  test_action.expected_input_object_ = install_plan;
-  BondActions(&feeder_action, &download_action);
-  BondActions(&download_action, &test_action);
+  auto test_action = std::make_unique<DownloadActionTestAction>();
+  test_action->expected_input_object_ = install_plan;
+  BondActions(feeder_action.get(), download_action.get());
+  BondActions(download_action.get(), test_action.get());
 
   ActionProcessor processor;
   PassObjectOutTestProcessorDelegate delegate;
   processor.set_delegate(&delegate);
-  processor.EnqueueAction(&feeder_action);
-  processor.EnqueueAction(&download_action);
-  processor.EnqueueAction(&test_action);
+  processor.EnqueueAction(std::move(feeder_action));
+  processor.EnqueueAction(std::move(download_action));
+  processor.EnqueueAction(std::move(test_action));
 
   loop.PostTask(
       FROM_HERE,
@@ -495,7 +509,7 @@
   loop.Run();
   EXPECT_FALSE(loop.PendingTasks());
 
-  EXPECT_EQ(true, test_action.did_run_);
+  EXPECT_EQ(true, delegate.did_test_action_run_);
 }
 
 // Test fixture for P2P tests.
@@ -550,43 +564,44 @@
     install_plan.payloads.push_back(
         {.size = data_.length(),
          .hash = {'1', '2', '3', '4', 'h', 'a', 's', 'h'}});
-    ObjectFeederAction<InstallPlan> feeder_action;
-    feeder_action.set_obj(install_plan);
+    auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
+    feeder_action->set_obj(install_plan);
     MockPrefs prefs;
-    http_fetcher_ = new MockHttpFetcher(data_.c_str(),
-                                        data_.length(),
-                                        nullptr);
     // Note that DownloadAction takes ownership of the passed in HttpFetcher.
-    download_action_.reset(new DownloadAction(&prefs,
-                                              fake_system_state_.boot_control(),
-                                              fake_system_state_.hardware(),
-                                              &fake_system_state_,
-                                              http_fetcher_,
-                                              false /* is_interactive */));
-    download_action_->SetTestFileWriter(&writer);
-    BondActions(&feeder_action, download_action_.get());
-    DownloadActionTestProcessorDelegate delegate(ErrorCode::kSuccess);
-    delegate.expected_data_ = brillo::Blob(data_.begin() + start_at_offset_,
-                                           data_.end());
-    delegate.path_ = output_temp_file.path();
-    processor_.set_delegate(&delegate);
-    processor_.EnqueueAction(&feeder_action);
-    processor_.EnqueueAction(download_action_.get());
+    auto download_action = std::make_unique<DownloadAction>(
+        &prefs,
+        fake_system_state_.boot_control(),
+        fake_system_state_.hardware(),
+        &fake_system_state_,
+        new MockHttpFetcher(data_.c_str(), data_.length(), nullptr),
+        false /* interactive */);
+    auto http_fetcher = download_action->http_fetcher();
+    download_action->SetTestFileWriter(&writer);
+    BondActions(feeder_action.get(), download_action.get());
+    delegate_.expected_data_ =
+        brillo::Blob(data_.begin() + start_at_offset_, data_.end());
+    delegate_.path_ = output_temp_file.path();
+    processor_.set_delegate(&delegate_);
+    processor_.EnqueueAction(std::move(feeder_action));
+    processor_.EnqueueAction(std::move(download_action));
 
-    loop_.PostTask(FROM_HERE, base::Bind(
-        &P2PDownloadActionTest::StartProcessorInRunLoopForP2P,
-        base::Unretained(this)));
+    loop_.PostTask(
+        FROM_HERE,
+        base::Bind(
+            [](P2PDownloadActionTest* action_test, HttpFetcher* http_fetcher) {
+              action_test->processor_.StartProcessing();
+              http_fetcher->SetOffset(action_test->start_at_offset_);
+            },
+            base::Unretained(this),
+            base::Unretained(http_fetcher)));
     loop_.Run();
   }
 
   // Mainloop used to make StartDownload() synchronous.
   brillo::FakeMessageLoop loop_{nullptr};
 
-  // The DownloadAction instance under test.
-  unique_ptr<DownloadAction> download_action_;
-
-  // The HttpFetcher used in the test.
-  MockHttpFetcher* http_fetcher_;
+  // Delegate that is passed to the ActionProcessor.
+  DownloadActionTestProcessorDelegate delegate_;
 
   // The P2PManager used in the test.
   unique_ptr<P2PManager> p2p_manager_;
@@ -601,12 +616,6 @@
   string data_;
 
  private:
-  // Callback used in StartDownload() method.
-  void StartProcessorInRunLoopForP2P() {
-    processor_.StartProcessing();
-    download_action_->http_fetcher()->SetOffset(start_at_offset_);
-  }
-
   // The requested starting offset passed to SetupDownload().
   off_t start_at_offset_;
 
@@ -614,17 +623,11 @@
 };
 
 TEST_F(P2PDownloadActionTest, IsWrittenTo) {
-  if (!test_utils::IsXAttrSupported(FilePath("/tmp"))) {
-    LOG(WARNING) << "Skipping test because /tmp does not support xattr. "
-                 << "Please update your system to support this feature.";
-    return;
-  }
-
   SetupDownload(0);     // starting_offset
   StartDownload(true);  // use_p2p_to_share
 
   // Check the p2p file and its content matches what was sent.
-  string file_id = download_action_->p2p_file_id();
+  string file_id = delegate_.p2p_file_id_;
   EXPECT_NE("", file_id);
   EXPECT_EQ(static_cast<int>(data_.length()),
             p2p_manager_->FileGetSize(file_id));
@@ -637,28 +640,16 @@
 }
 
 TEST_F(P2PDownloadActionTest, DeleteIfHoleExists) {
-  if (!test_utils::IsXAttrSupported(FilePath("/tmp"))) {
-    LOG(WARNING) << "Skipping test because /tmp does not support xattr. "
-                 << "Please update your system to support this feature.";
-    return;
-  }
-
   SetupDownload(1000);  // starting_offset
   StartDownload(true);  // use_p2p_to_share
 
   // DownloadAction should convey that the file is not being shared.
   // and that we don't have any p2p files.
-  EXPECT_EQ(download_action_->p2p_file_id(), "");
+  EXPECT_EQ(delegate_.p2p_file_id_, "");
   EXPECT_EQ(p2p_manager_->CountSharedFiles(), 0);
 }
 
 TEST_F(P2PDownloadActionTest, CanAppend) {
-  if (!test_utils::IsXAttrSupported(FilePath("/tmp"))) {
-    LOG(WARNING) << "Skipping test because /tmp does not support xattr. "
-                 << "Please update your system to support this feature.";
-    return;
-  }
-
   SetupDownload(1000);  // starting_offset
 
   // Prepare the file with existing data before starting to write to
@@ -676,7 +667,7 @@
 
   // DownloadAction should convey the same file_id and the file should
   // have the expected size.
-  EXPECT_EQ(download_action_->p2p_file_id(), file_id);
+  EXPECT_EQ(delegate_.p2p_file_id_, file_id);
   EXPECT_EQ(static_cast<ssize_t>(data_.length()),
             p2p_manager_->FileGetSize(file_id));
   EXPECT_EQ(static_cast<ssize_t>(data_.length()),
@@ -691,12 +682,6 @@
 }
 
 TEST_F(P2PDownloadActionTest, DeletePartialP2PFileIfResumingWithoutP2P) {
-  if (!test_utils::IsXAttrSupported(FilePath("/tmp"))) {
-    LOG(WARNING) << "Skipping test because /tmp does not support xattr. "
-                 << "Please update your system to support this feature.";
-    return;
-  }
-
   SetupDownload(1000);  // starting_offset
 
   // Prepare the file with all existing data before starting to write
diff --git a/payload_consumer/fake_file_descriptor.cc b/payload_consumer/fake_file_descriptor.cc
index d54856b..63af181 100644
--- a/payload_consumer/fake_file_descriptor.cc
+++ b/payload_consumer/fake_file_descriptor.cc
@@ -73,4 +73,12 @@
   return offset_;
 }
 
+brillo::Blob FakeFileDescriptorData(size_t size) {
+  brillo::Blob ret(size);
+  FakeFileDescriptor fd;
+  fd.SetFileSize(size);
+  fd.Read(ret.data(), size);
+  return ret;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/fake_file_descriptor.h b/payload_consumer/fake_file_descriptor.h
index f17820b..c9fea7d 100644
--- a/payload_consumer/fake_file_descriptor.h
+++ b/payload_consumer/fake_file_descriptor.h
@@ -22,6 +22,8 @@
 #include <utility>
 #include <vector>
 
+#include <brillo/secure_blob.h>
+
 #include "update_engine/payload_consumer/file_descriptor.h"
 
 namespace chromeos_update_engine {
@@ -121,6 +123,9 @@
   DISALLOW_COPY_AND_ASSIGN(FakeFileDescriptor);
 };
 
+// Return a blob with the first |size| bytes of a FakeFileDescriptor stream.
+brillo::Blob FakeFileDescriptorData(size_t size);
+
 }  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_FAKE_FILE_DESCRIPTOR_H_
diff --git a/payload_consumer/fec_file_descriptor.cc b/payload_consumer/fec_file_descriptor.cc
new file mode 100644
index 0000000..de22cf3
--- /dev/null
+++ b/payload_consumer/fec_file_descriptor.cc
@@ -0,0 +1,78 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/fec_file_descriptor.h"
+
+namespace chromeos_update_engine {
+
+bool FecFileDescriptor::Open(const char* path, int flags) {
+  return Open(path, flags, 0600);
+}
+
+bool FecFileDescriptor::Open(const char* path, int flags, mode_t mode) {
+  if (!fh_.open(path, flags, mode))
+    return false;
+
+  if (!fh_.has_ecc()) {
+    LOG(ERROR) << "No ECC data in the passed file";
+    fh_.close();
+    return false;
+  }
+
+  fec_status status;
+  if (!fh_.get_status(status)) {
+    LOG(ERROR) << "Couldn't load ECC status";
+    fh_.close();
+    return false;
+  }
+
+  dev_size_ = status.data_size;
+  return true;
+}
+
+ssize_t FecFileDescriptor::Read(void* buf, size_t count) {
+  return fh_.read(buf, count);
+}
+
+ssize_t FecFileDescriptor::Write(const void* buf, size_t count) {
+  errno = EROFS;
+  return -1;
+}
+
+off64_t FecFileDescriptor::Seek(off64_t offset, int whence) {
+  if (fh_.seek(offset, whence)) {
+    return offset;
+  }
+  return -1;
+}
+
+uint64_t FecFileDescriptor::BlockDevSize() {
+  return dev_size_;
+}
+
+bool FecFileDescriptor::BlkIoctl(int request,
+                                 uint64_t start,
+                                 uint64_t length,
+                                 int* result) {
+  // No IOCTL pass-through in this mode.
+  return false;
+}
+
+bool FecFileDescriptor::Close() {
+  return fh_.close();
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/fec_file_descriptor.h b/payload_consumer/fec_file_descriptor.h
new file mode 100644
index 0000000..e7f2e40
--- /dev/null
+++ b/payload_consumer/fec_file_descriptor.h
@@ -0,0 +1,65 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_FEC_FILE_DESCRIPTOR_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_FEC_FILE_DESCRIPTOR_H_
+
+#include <fec/io.h>
+
+#include "update_engine/payload_consumer/file_descriptor.h"
+
+// A FileDescriptor implementation with error correction based on the "libfec"
+// library. The libfec on the running system allows to parse the error
+// correction blocks stored in partitions that have verity and error correction
+// enabled. This information is present in the raw block device, but of course
+// not available via the dm-verity block device.
+
+namespace chromeos_update_engine {
+
+// An error corrected file based on FEC.
+class FecFileDescriptor : public FileDescriptor {
+ public:
+  FecFileDescriptor() = default;
+  ~FecFileDescriptor() = default;
+
+  // Interface methods.
+  bool Open(const char* path, int flags, mode_t mode) override;
+  bool Open(const char* path, int flags) override;
+  ssize_t Read(void* buf, size_t count) override;
+  ssize_t Write(const void* buf, size_t count) override;
+  off64_t Seek(off64_t offset, int whence) override;
+  uint64_t BlockDevSize() override;
+  bool BlkIoctl(int request,
+                uint64_t start,
+                uint64_t length,
+                int* result) override;
+  bool Flush() override { return true; }
+  bool Close() override;
+  bool IsSettingErrno() override { return true; }
+  bool IsOpen() override {
+    // The bool operator on the fec::io class tells whether the internal
+    // handle is open.
+    return static_cast<bool>(fh_);
+  }
+
+ protected:
+  fec::io fh_;
+  uint64_t dev_size_{0};
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_FEC_FILE_DESCRIPTOR_H_
diff --git a/payload_consumer/file_descriptor_utils.cc b/payload_consumer/file_descriptor_utils.cc
index b1902de..ebfb977 100644
--- a/payload_consumer/file_descriptor_utils.cc
+++ b/payload_consumer/file_descriptor_utils.cc
@@ -96,10 +96,7 @@
                         const RepeatedPtrField<Extent>& extents,
                         uint64_t block_size,
                         brillo::Blob* hash_out) {
-  TEST_AND_RETURN_FALSE(hash_out != nullptr);
-  TEST_AND_RETURN_FALSE(
-      CommonHashExtents(source, extents, nullptr, block_size, hash_out));
-  return true;
+  return CommonHashExtents(source, extents, nullptr, block_size, hash_out);
 }
 
 }  // namespace fd_utils
diff --git a/payload_consumer/file_descriptor_utils.h b/payload_consumer/file_descriptor_utils.h
index 397c35e..68fb001 100644
--- a/payload_consumer/file_descriptor_utils.h
+++ b/payload_consumer/file_descriptor_utils.h
@@ -42,7 +42,7 @@
     uint64_t block_size,
     brillo::Blob* hash_out);
 
-// Reads blocks from |source| and caculates the hash. The blocks to read are
+// Reads blocks from |source| and calculates the hash. The blocks to read are
 // specified by |extents|. Stores the hash in |hash_out| if it is not null. The
 // block sizes are passed as |block_size|. In case of error reading, it returns
 // false and the value pointed by |hash_out| is undefined.
diff --git a/payload_consumer/file_descriptor_utils_unittest.cc b/payload_consumer/file_descriptor_utils_unittest.cc
index 79d2184..48e610f 100644
--- a/payload_consumer/file_descriptor_utils_unittest.cc
+++ b/payload_consumer/file_descriptor_utils_unittest.cc
@@ -175,10 +175,10 @@
   EXPECT_FALSE(fd_utils::ReadAndHashExtents(source_, extents, 4, &hash_out));
 }
 
-// Test that if hash_out is null, then it should fail.
+// Test that if hash_out is null, it still works.
 TEST_F(FileDescriptorUtilsTest, ReadAndHashExtentsWithoutHashingTest) {
   auto extents = CreateExtentList({{0, 5}});
-  EXPECT_FALSE(fd_utils::ReadAndHashExtents(source_, extents, 4, nullptr));
+  EXPECT_TRUE(fd_utils::ReadAndHashExtents(source_, extents, 4, nullptr));
 }
 
 // Tests that it can calculate the hash properly.
diff --git a/payload_consumer/file_writer_unittest.cc b/payload_consumer/file_writer_unittest.cc
index 92837c8..05df307 100644
--- a/payload_consumer/file_writer_unittest.cc
+++ b/payload_consumer/file_writer_unittest.cc
@@ -36,19 +36,17 @@
 
 TEST(FileWriterTest, SimpleTest) {
   // Create a uniquely named file for testing.
-  string path;
-  ASSERT_TRUE(utils::MakeTempFile("FileWriterTest-XXXXXX", &path, nullptr));
-  ScopedPathUnlinker path_unlinker(path);
-
+  test_utils::ScopedTempFile file("FileWriterTest-XXXXXX");
   DirectFileWriter file_writer;
-  EXPECT_EQ(0, file_writer.Open(path.c_str(),
-                                O_CREAT | O_LARGEFILE | O_TRUNC | O_WRONLY,
-                                0644));
+  EXPECT_EQ(0,
+            file_writer.Open(file.path().c_str(),
+                             O_CREAT | O_LARGEFILE | O_TRUNC | O_WRONLY,
+                             0644));
   EXPECT_TRUE(file_writer.Write("test", 4));
   brillo::Blob actual_data;
-  EXPECT_TRUE(utils::ReadFile(path, &actual_data));
+  EXPECT_TRUE(utils::ReadFile(file.path(), &actual_data));
 
-  EXPECT_FALSE(memcmp("test", actual_data.data(), actual_data.size()));
+  EXPECT_EQ("test", string(actual_data.begin(), actual_data.end()));
   EXPECT_EQ(0, file_writer.Close());
 }
 
@@ -61,14 +59,12 @@
 
 TEST(FileWriterTest, WriteErrorTest) {
   // Create a uniquely named file for testing.
-  string path;
-  ASSERT_TRUE(utils::MakeTempFile("FileWriterTest-XXXXXX", &path, nullptr));
-  ScopedPathUnlinker path_unlinker(path);
-
+  test_utils::ScopedTempFile file("FileWriterTest-XXXXXX");
   DirectFileWriter file_writer;
-  EXPECT_EQ(0, file_writer.Open(path.c_str(),
-                                O_CREAT | O_LARGEFILE | O_TRUNC | O_RDONLY,
-                                0644));
+  EXPECT_EQ(0,
+            file_writer.Open(file.path().c_str(),
+                             O_CREAT | O_LARGEFILE | O_TRUNC | O_RDONLY,
+                             0644));
   EXPECT_FALSE(file_writer.Write("x", 1));
   EXPECT_EQ(0, file_writer.Close());
 }
diff --git a/payload_consumer/filesystem_verifier_action.cc b/payload_consumer/filesystem_verifier_action.cc
index 5edde9e..c9cb5af 100644
--- a/payload_consumer/filesystem_verifier_action.cc
+++ b/payload_consumer/filesystem_verifier_action.cc
@@ -29,10 +29,7 @@
 #include <brillo/data_encoding.h>
 #include <brillo/streams/file_stream.h>
 
-#include "update_engine/common/boot_control_interface.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/delta_performer.h"
-#include "update_engine/payload_consumer/payload_constants.h"
 
 using brillo::data_encoding::Base64Encode;
 using std::string;
@@ -70,10 +67,6 @@
   Cleanup(ErrorCode::kSuccess);  // error code is ignored if canceled_ is true.
 }
 
-bool FilesystemVerifierAction::IsCleanupPending() const {
-  return src_stream_ != nullptr;
-}
-
 void FilesystemVerifierAction::Cleanup(ErrorCode code) {
   src_stream_.reset();
   // This memory is not used anymore.
@@ -91,24 +84,38 @@
     Cleanup(ErrorCode::kSuccess);
     return;
   }
-  InstallPlan::Partition& partition =
+  const InstallPlan::Partition& partition =
       install_plan_.partitions[partition_index_];
 
   string part_path;
   switch (verifier_step_) {
     case VerifierStep::kVerifySourceHash:
       part_path = partition.source_path;
-      remaining_size_ = partition.source_size;
+      partition_size_ = partition.source_size;
       break;
     case VerifierStep::kVerifyTargetHash:
       part_path = partition.target_path;
-      remaining_size_ = partition.target_size;
+      partition_size_ = partition.target_size;
       break;
   }
+
+  if (part_path.empty()) {
+    if (partition_size_ == 0) {
+      LOG(INFO) << "Skip hashing partition " << partition_index_ << " ("
+                << partition.name << ") because size is 0.";
+      partition_index_++;
+      StartPartitionHashing();
+      return;
+    }
+    LOG(ERROR) << "Cannot hash partition " << partition_index_ << " ("
+               << partition.name
+               << ") because its device path cannot be determined.";
+    Cleanup(ErrorCode::kFilesystemVerifierError);
+    return;
+  }
+
   LOG(INFO) << "Hashing partition " << partition_index_ << " ("
             << partition.name << ") on device " << part_path;
-  if (part_path.empty())
-    return Cleanup(ErrorCode::kFilesystemVerifierError);
 
   brillo::ErrorPtr error;
   src_stream_ = brillo::FileStream::Open(
@@ -119,33 +126,55 @@
 
   if (!src_stream_) {
     LOG(ERROR) << "Unable to open " << part_path << " for reading";
-    return Cleanup(ErrorCode::kFilesystemVerifierError);
+    Cleanup(ErrorCode::kFilesystemVerifierError);
+    return;
   }
 
   buffer_.resize(kReadFileBufferSize);
-  read_done_ = false;
-  hasher_.reset(new HashCalculator());
+  hasher_ = std::make_unique<HashCalculator>();
+
+  offset_ = 0;
+  if (verifier_step_ == VerifierStep::kVerifyTargetHash &&
+      install_plan_.write_verity) {
+    if (!verity_writer_->Init(partition)) {
+      Cleanup(ErrorCode::kVerityCalculationError);
+      return;
+    }
+  }
 
   // Start the first read.
   ScheduleRead();
 }
 
 void FilesystemVerifierAction::ScheduleRead() {
-  size_t bytes_to_read = std::min(static_cast<int64_t>(buffer_.size()),
-                                  remaining_size_);
+  const InstallPlan::Partition& partition =
+      install_plan_.partitions[partition_index_];
+
+  // We can only start reading anything past |hash_tree_offset| after we have
+  // already read all the data blocks that the hash tree covers. The same
+  // applies to FEC.
+  uint64_t read_end = partition_size_;
+  if (partition.hash_tree_size != 0 &&
+      offset_ < partition.hash_tree_data_offset + partition.hash_tree_data_size)
+    read_end = std::min(read_end, partition.hash_tree_offset);
+  if (partition.fec_size != 0 &&
+      offset_ < partition.fec_data_offset + partition.fec_data_size)
+    read_end = std::min(read_end, partition.fec_offset);
+  size_t bytes_to_read =
+      std::min(static_cast<uint64_t>(buffer_.size()), read_end - offset_);
   if (!bytes_to_read) {
-    OnReadDoneCallback(0);
+    FinishPartitionHashing();
     return;
   }
 
   bool read_async_ok = src_stream_->ReadAsync(
-    buffer_.data(),
-    bytes_to_read,
-    base::Bind(&FilesystemVerifierAction::OnReadDoneCallback,
-               base::Unretained(this)),
-    base::Bind(&FilesystemVerifierAction::OnReadErrorCallback,
-               base::Unretained(this)),
-    nullptr);
+      buffer_.data(),
+      bytes_to_read,
+      base::Bind(&FilesystemVerifierAction::OnReadDoneCallback,
+                 base::Unretained(this)),
+      base::Bind(&FilesystemVerifierAction::OnReadErrorCallback,
+                 base::Unretained(this)),
+      nullptr);
 
   if (!read_async_ok) {
     LOG(ERROR) << "Unable to schedule an asynchronous read from the stream.";
@@ -154,31 +183,40 @@
 }
 
 void FilesystemVerifierAction::OnReadDoneCallback(size_t bytes_read) {
+  if (cancelled_) {
+    Cleanup(ErrorCode::kError);
+    return;
+  }
+
   if (bytes_read == 0) {
-    read_done_ = true;
-  } else {
-    remaining_size_ -= bytes_read;
-    CHECK(!read_done_);
-    if (!hasher_->Update(buffer_.data(), bytes_read)) {
-      LOG(ERROR) << "Unable to update the hash.";
-      Cleanup(ErrorCode::kError);
+    LOG(ERROR) << "Failed to read the remaining " << partition_size_ - offset_
+               << " bytes from partition "
+               << install_plan_.partitions[partition_index_].name;
+    Cleanup(ErrorCode::kFilesystemVerifierError);
+    return;
+  }
+
+  if (!hasher_->Update(buffer_.data(), bytes_read)) {
+    LOG(ERROR) << "Unable to update the hash.";
+    Cleanup(ErrorCode::kError);
+    return;
+  }
+
+  if (verifier_step_ == VerifierStep::kVerifyTargetHash &&
+      install_plan_.write_verity) {
+    if (!verity_writer_->Update(offset_, buffer_.data(), bytes_read)) {
+      Cleanup(ErrorCode::kVerityCalculationError);
       return;
     }
   }
 
-  // We either terminate the current partition or have more data to read.
-  if (cancelled_)
-    return Cleanup(ErrorCode::kError);
+  offset_ += bytes_read;
 
-  if (read_done_ || remaining_size_ == 0) {
-    if (remaining_size_ != 0) {
-      LOG(ERROR) << "Failed to read the remaining " << remaining_size_
-                 << " bytes from partition "
-                 << install_plan_.partitions[partition_index_].name;
-      return Cleanup(ErrorCode::kFilesystemVerifierError);
-    }
-    return FinishPartitionHashing();
+  if (offset_ == partition_size_) {
+    FinishPartitionHashing();
+    return;
   }
+
   ScheduleRead();
 }
 
@@ -192,7 +230,8 @@
 void FilesystemVerifierAction::FinishPartitionHashing() {
   if (!hasher_->Finalize()) {
     LOG(ERROR) << "Unable to finalize the hash.";
-    return Cleanup(ErrorCode::kError);
+    Cleanup(ErrorCode::kError);
+    return;
   }
   InstallPlan::Partition& partition =
       install_plan_.partitions[partition_index_];
@@ -206,7 +245,8 @@
                    << "' partition verification failed.";
         if (partition.source_hash.empty()) {
           // No need to verify source if it is a full payload.
-          return Cleanup(ErrorCode::kNewRootfsVerificationError);
+          Cleanup(ErrorCode::kNewRootfsVerificationError);
+          return;
         }
         // If we have not verified source partition yet, now that the target
         // partition does not match, and it's not a full payload, we need to
@@ -242,7 +282,8 @@
                      "-binary | openssl base64";
         LOG(INFO) << "To get the checksum of partitions in a bin file, "
                   << "run: .../src/scripts/sha256_partitions.sh .../file.bin";
-        return Cleanup(ErrorCode::kDownloadStateInitializationError);
+        Cleanup(ErrorCode::kDownloadStateInitializationError);
+        return;
       }
       // The action will skip kVerifySourceHash step if target partition hash
       // matches, if we are in this step, it means target hash does not match,
@@ -250,7 +291,8 @@
       // code to reflect the error in target partition.
       // We only need to verify the source partition which the target hash does
       // not match, the rest of the partitions don't matter.
-      return Cleanup(ErrorCode::kNewRootfsVerificationError);
+      Cleanup(ErrorCode::kNewRootfsVerificationError);
+      return;
   }
   // Start hashing the next partition, if any.
   hasher_.reset();
diff --git a/payload_consumer/filesystem_verifier_action.h b/payload_consumer/filesystem_verifier_action.h
index 616f7b7..83d6668 100644
--- a/payload_consumer/filesystem_verifier_action.h
+++ b/payload_consumer/filesystem_verifier_action.h
@@ -20,6 +20,7 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -28,6 +29,7 @@
 #include "update_engine/common/action.h"
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_consumer/verity_writer_interface.h"
 
 // This action will hash all the partitions of the target slot involved in the
 // update. The hashes are then verified against the ones in the InstallPlan.
@@ -49,22 +51,19 @@
 
 class FilesystemVerifierAction : public InstallPlanAction {
  public:
-  FilesystemVerifierAction() = default;
+  FilesystemVerifierAction()
+      : verity_writer_(verity_writer::CreateVerityWriter()) {}
+  ~FilesystemVerifierAction() override = default;
 
   void PerformAction() override;
   void TerminateProcessing() override;
 
-  // Used for testing. Return true if Cleanup() has not yet been called due
-  // to a callback upon the completion or cancellation of the verifier action.
-  // A test should wait until IsCleanupPending() returns false before
-  // terminating the main loop.
-  bool IsCleanupPending() const;
-
   // Debugging/logging
   static std::string StaticType() { return "FilesystemVerifierAction"; }
   std::string Type() const override { return StaticType(); }
 
  private:
+  friend class FilesystemVerifierActionTestDelegate;
   // Starts the hashing of the current partition. If there aren't any partitions
   // remaining to be hashed, it finishes the action.
   void StartPartitionHashing();
@@ -99,7 +98,6 @@
   // Buffer for storing data we read.
   brillo::Blob buffer_;
 
-  bool read_done_{false};  // true if reached EOF on the input stream.
   bool cancelled_{false};  // true if the action has been cancelled.
 
   // The install plan we're passed in via the input pipe.
@@ -108,10 +106,18 @@
   // Calculates the hash of the data.
   std::unique_ptr<HashCalculator> hasher_;
 
-  // Reads and hashes this many bytes from the head of the input stream. This
-  // field is initialized from the corresponding InstallPlan::Partition size,
-  // when the partition starts to be hashed.
-  int64_t remaining_size_{0};
+  // Write verity data of the current partition.
+  std::unique_ptr<VerityWriterInterface> verity_writer_;
+
+  // Reads and hashes this many bytes from the head of the input stream. When
+  // the partition starts to be hashed, this field is initialized from the
+  // corresponding InstallPlan::Partition size which is the total size
+  // update_engine is expected to write, and may be smaller than the size of the
+  // partition in gpt.
+  uint64_t partition_size_{0};
+
+  // The byte offset that we are reading in the current partition.
+  uint64_t offset_{0};
 
   DISALLOW_COPY_AND_ASSIGN(FilesystemVerifierAction);
 };
diff --git a/payload_consumer/filesystem_verifier_action_unittest.cc b/payload_consumer/filesystem_verifier_action_unittest.cc
index b4f7f7f..7fa61c0 100644
--- a/payload_consumer/filesystem_verifier_action_unittest.cc
+++ b/payload_consumer/filesystem_verifier_action_unittest.cc
@@ -16,30 +16,23 @@
 
 #include "update_engine/payload_consumer/filesystem_verifier_action.h"
 
-#include <fcntl.h>
-
-#include <set>
+#include <memory>
 #include <string>
-#include <vector>
+#include <utility>
 
 #include <base/bind.h>
 #include <base/posix/eintr_wrapper.h>
-#include <base/strings/string_util.h>
-#include <base/strings/stringprintf.h>
 #include <brillo/message_loops/fake_message_loop.h>
 #include <brillo/message_loops/message_loop_utils.h>
-#include <gmock/gmock.h>
+#include <brillo/secure_blob.h>
 #include <gtest/gtest.h>
 
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/payload_constants.h"
 
 using brillo::MessageLoop;
-using std::set;
 using std::string;
-using std::vector;
 
 namespace chromeos_update_engine {
 
@@ -56,32 +49,22 @@
   // Returns true iff test has completed successfully.
   bool DoTest(bool terminate_early, bool hash_fail);
 
+  void BuildActions(const InstallPlan& install_plan);
+
   brillo::FakeMessageLoop loop_{nullptr};
+  ActionProcessor processor_;
 };
 
 class FilesystemVerifierActionTestDelegate : public ActionProcessorDelegate {
  public:
-  explicit FilesystemVerifierActionTestDelegate(
-      FilesystemVerifierAction* action)
-      : action_(action), ran_(false), code_(ErrorCode::kError) {}
-  void ExitMainLoop() {
-    // We need to wait for the Action to call Cleanup.
-    if (action_->IsCleanupPending()) {
-      LOG(INFO) << "Waiting for Cleanup() to be called.";
-      MessageLoop::current()->PostDelayedTask(
-          FROM_HERE,
-          base::Bind(&FilesystemVerifierActionTestDelegate::ExitMainLoop,
-                     base::Unretained(this)),
-          base::TimeDelta::FromMilliseconds(100));
-    } else {
-      MessageLoop::current()->BreakLoop();
-    }
-  }
+  FilesystemVerifierActionTestDelegate()
+      : ran_(false), code_(ErrorCode::kError) {}
+
   void ProcessingDone(const ActionProcessor* processor, ErrorCode code) {
-    ExitMainLoop();
+    MessageLoop::current()->BreakLoop();
   }
   void ProcessingStopped(const ActionProcessor* processor) {
-    ExitMainLoop();
+    MessageLoop::current()->BreakLoop();
   }
   void ActionCompleted(ActionProcessor* processor,
                        AbstractAction* action,
@@ -89,36 +72,27 @@
     if (action->Type() == FilesystemVerifierAction::StaticType()) {
       ran_ = true;
       code_ = code;
+      EXPECT_FALSE(static_cast<FilesystemVerifierAction*>(action)->src_stream_);
+    } else if (action->Type() ==
+               ObjectCollectorAction<InstallPlan>::StaticType()) {
+      auto collector_action =
+          static_cast<ObjectCollectorAction<InstallPlan>*>(action);
+      install_plan_.reset(new InstallPlan(collector_action->object()));
     }
   }
   bool ran() const { return ran_; }
   ErrorCode code() const { return code_; }
 
+  std::unique_ptr<InstallPlan> install_plan_;
+
  private:
-  FilesystemVerifierAction* action_;
   bool ran_;
   ErrorCode code_;
 };
 
-void StartProcessorInRunLoop(ActionProcessor* processor,
-                             FilesystemVerifierAction* filesystem_copier_action,
-                             bool terminate_early) {
-  processor->StartProcessing();
-  if (terminate_early) {
-    EXPECT_NE(nullptr, filesystem_copier_action);
-    processor->StopProcessing();
-  }
-}
-
 bool FilesystemVerifierActionTest::DoTest(bool terminate_early,
                                           bool hash_fail) {
-  string a_loop_file;
-
-  if (!(utils::MakeTempFile("a_loop_file.XXXXXX", &a_loop_file, nullptr))) {
-    ADD_FAILURE();
-    return false;
-  }
-  ScopedPathUnlinker a_loop_file_unlinker(a_loop_file);
+  test_utils::ScopedTempFile a_loop_file("a_loop_file.XXXXXX");
 
   // Make random data for a.
   const size_t kLoopFileSize = 10 * 1024 * 1024 + 512;
@@ -126,7 +100,7 @@
   test_utils::FillWithData(&a_loop_data);
 
   // Write data to disk
-  if (!(test_utils::WriteFileVector(a_loop_file, a_loop_data))) {
+  if (!(test_utils::WriteFileVector(a_loop_file.path(), a_loop_data))) {
     ADD_FAILURE();
     return false;
   }
@@ -134,13 +108,13 @@
   // Attach loop devices to the files
   string a_dev;
   test_utils::ScopedLoopbackDeviceBinder a_dev_releaser(
-      a_loop_file, false, &a_dev);
+      a_loop_file.path(), false, &a_dev);
   if (!(a_dev_releaser.is_bound())) {
     ADD_FAILURE();
     return false;
   }
 
-  LOG(INFO) << "verifying: "  << a_loop_file << " (" << a_dev << ")";
+  LOG(INFO) << "verifying: " << a_loop_file.path() << " (" << a_dev << ")";
 
   bool success = true;
 
@@ -164,27 +138,21 @@
   }
   install_plan.partitions = {part};
 
-  ActionProcessor processor;
+  BuildActions(install_plan);
 
-  ObjectFeederAction<InstallPlan> feeder_action;
-  FilesystemVerifierAction copier_action;
-  ObjectCollectorAction<InstallPlan> collector_action;
+  FilesystemVerifierActionTestDelegate delegate;
+  processor_.set_delegate(&delegate);
 
-  BondActions(&feeder_action, &copier_action);
-  BondActions(&copier_action, &collector_action);
-
-  FilesystemVerifierActionTestDelegate delegate(&copier_action);
-  processor.set_delegate(&delegate);
-  processor.EnqueueAction(&feeder_action);
-  processor.EnqueueAction(&copier_action);
-  processor.EnqueueAction(&collector_action);
-
-  feeder_action.set_obj(install_plan);
-
-  loop_.PostTask(FROM_HERE, base::Bind(&StartProcessorInRunLoop,
-                                       &processor,
-                                       &copier_action,
-                                       terminate_early));
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(
+                     [](ActionProcessor* processor, bool terminate_early) {
+                       processor->StartProcessing();
+                       if (terminate_early) {
+                         processor->StopProcessing();
+                       }
+                     },
+                     base::Unretained(&processor_),
+                     terminate_early));
   loop_.Run();
 
   if (!terminate_early) {
@@ -213,12 +181,29 @@
   EXPECT_TRUE(is_a_file_reading_eq);
   success = success && is_a_file_reading_eq;
 
-  bool is_install_plan_eq = (collector_action.object() == install_plan);
+  bool is_install_plan_eq = (*delegate.install_plan_ == install_plan);
   EXPECT_TRUE(is_install_plan_eq);
   success = success && is_install_plan_eq;
   return success;
 }
 
+void FilesystemVerifierActionTest::BuildActions(
+    const InstallPlan& install_plan) {
+  auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
+  auto verifier_action = std::make_unique<FilesystemVerifierAction>();
+  auto collector_action =
+      std::make_unique<ObjectCollectorAction<InstallPlan>>();
+
+  feeder_action->set_obj(install_plan);
+
+  BondActions(feeder_action.get(), verifier_action.get());
+  BondActions(verifier_action.get(), collector_action.get());
+
+  processor_.EnqueueAction(std::move(feeder_action));
+  processor_.EnqueueAction(std::move(verifier_action));
+  processor_.EnqueueAction(std::move(collector_action));
+}
+
 class FilesystemVerifierActionTest2Delegate : public ActionProcessorDelegate {
  public:
   void ActionCompleted(ActionProcessor* processor,
@@ -234,31 +219,25 @@
 };
 
 TEST_F(FilesystemVerifierActionTest, MissingInputObjectTest) {
-  ActionProcessor processor;
+  auto copier_action = std::make_unique<FilesystemVerifierAction>();
+  auto collector_action =
+      std::make_unique<ObjectCollectorAction<InstallPlan>>();
+
+  BondActions(copier_action.get(), collector_action.get());
+
+  processor_.EnqueueAction(std::move(copier_action));
+  processor_.EnqueueAction(std::move(collector_action));
+
   FilesystemVerifierActionTest2Delegate delegate;
+  processor_.set_delegate(&delegate);
 
-  processor.set_delegate(&delegate);
-
-  FilesystemVerifierAction copier_action;
-  ObjectCollectorAction<InstallPlan> collector_action;
-
-  BondActions(&copier_action, &collector_action);
-
-  processor.EnqueueAction(&copier_action);
-  processor.EnqueueAction(&collector_action);
-  processor.StartProcessing();
-  EXPECT_FALSE(processor.IsRunning());
+  processor_.StartProcessing();
+  EXPECT_FALSE(processor_.IsRunning());
   EXPECT_TRUE(delegate.ran_);
   EXPECT_EQ(ErrorCode::kError, delegate.code_);
 }
 
 TEST_F(FilesystemVerifierActionTest, NonExistentDriveTest) {
-  ActionProcessor processor;
-  FilesystemVerifierActionTest2Delegate delegate;
-
-  processor.set_delegate(&delegate);
-
-  ObjectFeederAction<InstallPlan> feeder_action;
   InstallPlan install_plan;
   InstallPlan::Partition part;
   part.name = "nope";
@@ -266,19 +245,15 @@
   part.target_path = "/no/such/file";
   install_plan.partitions = {part};
 
-  feeder_action.set_obj(install_plan);
-  FilesystemVerifierAction verifier_action;
-  ObjectCollectorAction<InstallPlan> collector_action;
+  BuildActions(install_plan);
 
-  BondActions(&verifier_action, &collector_action);
+  FilesystemVerifierActionTest2Delegate delegate;
+  processor_.set_delegate(&delegate);
 
-  processor.EnqueueAction(&feeder_action);
-  processor.EnqueueAction(&verifier_action);
-  processor.EnqueueAction(&collector_action);
-  processor.StartProcessing();
-  EXPECT_FALSE(processor.IsRunning());
+  processor_.StartProcessing();
+  EXPECT_FALSE(processor_.IsRunning());
   EXPECT_TRUE(delegate.ran_);
-  EXPECT_EQ(ErrorCode::kError, delegate.code_);
+  EXPECT_EQ(ErrorCode::kFilesystemVerifierError, delegate.code_);
 }
 
 TEST_F(FilesystemVerifierActionTest, RunAsRootVerifyHashTest) {
@@ -298,4 +273,112 @@
   while (loop_.RunOnce(false)) {}
 }
 
+#ifdef __ANDROID__
+TEST_F(FilesystemVerifierActionTest, WriteVerityTest) {
+  test_utils::ScopedTempFile part_file("part_file.XXXXXX");
+  constexpr size_t filesystem_size = 200 * 4096;
+  constexpr size_t part_size = 256 * 4096;
+  brillo::Blob part_data(filesystem_size, 0x1);
+  part_data.resize(part_size);
+  ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
+  string target_path;
+  test_utils::ScopedLoopbackDeviceBinder target_device(
+      part_file.path(), true, &target_path);
+
+  InstallPlan install_plan;
+  InstallPlan::Partition part;
+  part.name = "part";
+  part.target_path = target_path;
+  part.target_size = part_size;
+  part.block_size = 4096;
+  part.hash_tree_algorithm = "sha1";
+  part.hash_tree_data_offset = 0;
+  part.hash_tree_data_size = filesystem_size;
+  part.hash_tree_offset = filesystem_size;
+  part.hash_tree_size = 3 * 4096;
+  part.fec_data_offset = 0;
+  part.fec_data_size = filesystem_size + part.hash_tree_size;
+  part.fec_offset = part.fec_data_size;
+  part.fec_size = 2 * 4096;
+  part.fec_roots = 2;
+  // for i in {1..$((200 * 4096))}; do echo -n -e '\x1' >> part; done
+  // avbtool add_hashtree_footer --image part --partition_size $((256 * 4096))
+  //     --partition_name part --do_not_append_vbmeta_image
+  //     --output_vbmeta_image vbmeta
+  // truncate -s $((256 * 4096)) part
+  // sha256sum part | xxd -r -p | hexdump -v -e '/1 "0x%02x, "'
+  part.target_hash = {0x28, 0xd4, 0x96, 0x75, 0x4c, 0xf5, 0x8a, 0x3e,
+                      0x31, 0x85, 0x08, 0x92, 0x85, 0x62, 0xf0, 0x37,
+                      0xbc, 0x8d, 0x7e, 0xa4, 0xcb, 0x24, 0x18, 0x7b,
+                      0xf3, 0xeb, 0xb5, 0x8d, 0x6f, 0xc8, 0xd8, 0x1a};
+  // avbtool info_image --image vbmeta | grep Salt | cut -d':' -f 2 |
+  //     xxd -r -p | hexdump -v -e '/1 "0x%02x, "'
+  part.hash_tree_salt = {0x9e, 0xcb, 0xf8, 0xd5, 0x0b, 0xb4, 0x43,
+                         0x0a, 0x7a, 0x10, 0xad, 0x96, 0xd7, 0x15,
+                         0x70, 0xba, 0xed, 0x27, 0xe2, 0xae};
+  install_plan.partitions = {part};
+
+  BuildActions(install_plan);
+
+  FilesystemVerifierActionTestDelegate delegate;
+  processor_.set_delegate(&delegate);
+
+  loop_.PostTask(
+      FROM_HERE,
+      base::Bind(
+          [](ActionProcessor* processor) { processor->StartProcessing(); },
+          base::Unretained(&processor_)));
+  loop_.Run();
+
+  EXPECT_FALSE(processor_.IsRunning());
+  EXPECT_TRUE(delegate.ran());
+  EXPECT_EQ(ErrorCode::kSuccess, delegate.code());
+}
+#endif  // __ANDROID__
+
+TEST_F(FilesystemVerifierActionTest, SkipWriteVerityTest) {
+  test_utils::ScopedTempFile part_file("part_file.XXXXXX");
+  constexpr size_t filesystem_size = 200 * 4096;
+  constexpr size_t part_size = 256 * 4096;
+  brillo::Blob part_data(part_size);
+  test_utils::FillWithData(&part_data);
+  ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
+  string target_path;
+  test_utils::ScopedLoopbackDeviceBinder target_device(
+      part_file.path(), true, &target_path);
+
+  InstallPlan install_plan;
+  install_plan.write_verity = false;
+  InstallPlan::Partition part;
+  part.name = "part";
+  part.target_path = target_path;
+  part.target_size = part_size;
+  part.block_size = 4096;
+  part.hash_tree_data_offset = 0;
+  part.hash_tree_data_size = filesystem_size;
+  part.hash_tree_offset = filesystem_size;
+  part.hash_tree_size = 3 * 4096;
+  part.fec_data_offset = 0;
+  part.fec_data_size = filesystem_size + part.hash_tree_size;
+  part.fec_offset = part.fec_data_size;
+  part.fec_size = 2 * 4096;
+  EXPECT_TRUE(HashCalculator::RawHashOfData(part_data, &part.target_hash));
+  install_plan.partitions = {part};
+
+  BuildActions(install_plan);
+
+  FilesystemVerifierActionTestDelegate delegate;
+  processor_.set_delegate(&delegate);
+
+  loop_.PostTask(
+      FROM_HERE,
+      base::Bind(
+          [](ActionProcessor* processor) { processor->StartProcessing(); },
+          base::Unretained(&processor_)));
+  loop_.Run();
+
+  EXPECT_FALSE(processor_.IsRunning());
+  EXPECT_TRUE(delegate.ran());
+  EXPECT_EQ(ErrorCode::kSuccess, delegate.code());
+}
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc
index 45112d6..5f2697b 100644
--- a/payload_consumer/install_plan.cc
+++ b/payload_consumer/install_plan.cc
@@ -90,7 +90,9 @@
             << ", powerwash_required: " << utils::ToString(powerwash_required)
             << ", switch_slot_on_reboot: "
             << utils::ToString(switch_slot_on_reboot)
-            << ", run_post_install: " << utils::ToString(run_post_install);
+            << ", run_post_install: " << utils::ToString(run_post_install)
+            << ", is_rollback: " << utils::ToString(is_rollback)
+            << ", write_verity: " << utils::ToString(write_verity);
 }
 
 bool InstallPlan::LoadPartitionsFromSlots(BootControlInterface* boot_control) {
@@ -103,7 +105,8 @@
       partition.source_path.clear();
     }
 
-    if (target_slot != BootControlInterface::kInvalidSlot) {
+    if (target_slot != BootControlInterface::kInvalidSlot &&
+        partition.target_size > 0) {
       result = boot_control->GetPartitionDevice(
           partition.name, target_slot, &partition.target_path) && result;
     } else {
diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h
index 5cdfbc1..f56f63c 100644
--- a/payload_consumer/install_plan.h
+++ b/payload_consumer/install_plan.h
@@ -101,6 +101,7 @@
     std::string target_path;
     uint64_t target_size{0};
     brillo::Blob target_hash;
+    uint32_t block_size{0};
 
     // Whether we should run the postinstall script from this partition and the
     // postinstall parameters.
@@ -108,6 +109,21 @@
     std::string postinstall_path;
     std::string filesystem_type;
     bool postinstall_optional{false};
+
+    // Verity hash tree and FEC config. See update_metadata.proto for details.
+    // All offsets and sizes are in bytes.
+    uint64_t hash_tree_data_offset{0};
+    uint64_t hash_tree_data_size{0};
+    uint64_t hash_tree_offset{0};
+    uint64_t hash_tree_size{0};
+    std::string hash_tree_algorithm;
+    brillo::Blob hash_tree_salt;
+
+    uint64_t fec_data_offset{0};
+    uint64_t fec_data_size{0};
+    uint64_t fec_offset{0};
+    uint64_t fec_size{0};
+    uint32_t fec_roots{0};
   };
   std::vector<Partition> partitions;
 
@@ -127,6 +143,13 @@
   // False otherwise.
   bool run_post_install{true};
 
+  // True if this update is a rollback.
+  bool is_rollback{false};
+
+  // True if the update should write verity.
+  // False otherwise.
+  bool write_verity{true};
+
   // If not blank, a base-64 encoded representation of the PEM-encoded
   // public key in the response.
   std::string public_key_rsa;
diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc
index e679316..213d798 100644
--- a/payload_consumer/payload_constants.cc
+++ b/payload_consumer/payload_constants.cc
@@ -21,17 +21,24 @@
 const uint64_t kChromeOSMajorPayloadVersion = 1;
 const uint64_t kBrilloMajorPayloadVersion = 2;
 
+const uint32_t kMinSupportedMinorPayloadVersion = 1;
+const uint32_t kMaxSupportedMinorPayloadVersion = 6;
+
 const uint32_t kFullPayloadMinorVersion = 0;
 const uint32_t kInPlaceMinorPayloadVersion = 1;
 const uint32_t kSourceMinorPayloadVersion = 2;
 const uint32_t kOpSrcHashMinorPayloadVersion = 3;
 const uint32_t kBrotliBsdiffMinorPayloadVersion = 4;
 const uint32_t kPuffdiffMinorPayloadVersion = 5;
+const uint32_t kVerityMinorPayloadVersion = 6;
+
+const uint64_t kMinSupportedMajorPayloadVersion = 1;
+const uint64_t kMaxSupportedMajorPayloadVersion = 2;
 
 const uint64_t kMaxPayloadHeaderSize = 24;
 
-const char kLegacyPartitionNameKernel[] = "boot";
-const char kLegacyPartitionNameRoot[] = "system";
+const char kPartitionNameKernel[] = "kernel";
+const char kPartitionNameRoot[] = "root";
 
 const char kDeltaMagic[4] = {'C', 'r', 'A', 'U'};
 
diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h
index ac3e882..7f76898 100644
--- a/payload_consumer/payload_constants.h
+++ b/payload_consumer/payload_constants.h
@@ -31,6 +31,10 @@
 // The major version used by Brillo.
 extern const uint64_t kBrilloMajorPayloadVersion;
 
+// The minimum and maximum supported major version.
+extern const uint64_t kMinSupportedMajorPayloadVersion;
+extern const uint64_t kMaxSupportedMajorPayloadVersion;
+
 // The minor version used for all full payloads.
 extern const uint32_t kFullPayloadMinorVersion;
 
@@ -49,14 +53,21 @@
 // The minor version that allows PUFFDIFF operation.
 extern const uint32_t kPuffdiffMinorPayloadVersion;
 
+// The minor version that allows Verity hash tree and FEC generation.
+extern const uint32_t kVerityMinorPayloadVersion;
+
+// The minimum and maximum supported minor version.
+extern const uint32_t kMinSupportedMinorPayloadVersion;
+extern const uint32_t kMaxSupportedMinorPayloadVersion;
+
 // The maximum size of the payload header (anything before the protobuf).
 extern const uint64_t kMaxPayloadHeaderSize;
 
 // The kernel and rootfs partition names used by the BootControlInterface when
 // handling update payloads with a major version 1. The names of the updated
 // partitions are include in the payload itself for major version 2.
-extern const char kLegacyPartitionNameKernel[];
-extern const char kLegacyPartitionNameRoot[];
+extern const char kPartitionNameKernel[];
+extern const char kPartitionNameRoot[];
 
 extern const char kBspatchPath[];
 extern const char kDeltaMagic[4];
diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc
index fe2df0a..02ec8b1 100644
--- a/payload_consumer/payload_metadata.cc
+++ b/payload_consumer/payload_metadata.cc
@@ -60,9 +60,7 @@
 }
 
 MetadataParseResult PayloadMetadata::ParsePayloadHeader(
-    const brillo::Blob& payload,
-    uint64_t supported_major_version,
-    ErrorCode* error) {
+    const brillo::Blob& payload, ErrorCode* error) {
   uint64_t manifest_offset;
   // Ensure we have data to cover the major payload version.
   if (payload.size() < kDeltaManifestSizeOffset)
@@ -84,8 +82,8 @@
   // Switch big endian to host.
   major_payload_version_ = be64toh(major_payload_version_);
 
-  if (major_payload_version_ != supported_major_version &&
-      major_payload_version_ != kChromeOSMajorPayloadVersion) {
+  if (major_payload_version_ < kMinSupportedMajorPayloadVersion ||
+      major_payload_version_ > kMaxSupportedMajorPayloadVersion) {
     LOG(ERROR) << "Bad payload format -- unsupported payload version: "
                << major_payload_version_;
     *error = ErrorCode::kUnsupportedMajorPayloadVersion;
@@ -128,6 +126,11 @@
   return MetadataParseResult::kSuccess;
 }
 
+bool PayloadMetadata::ParsePayloadHeader(const brillo::Blob& payload) {
+  ErrorCode error;
+  return ParsePayloadHeader(payload, &error) == MetadataParseResult::kSuccess;
+}
+
 bool PayloadMetadata::GetManifest(const brillo::Blob& payload,
                                   DeltaArchiveManifest* out_manifest) const {
   uint64_t manifest_offset;
diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h
index e00b5c1..8748f6f 100644
--- a/payload_consumer/payload_metadata.h
+++ b/payload_consumer/payload_metadata.h
@@ -54,8 +54,9 @@
   // metadata. Returns kMetadataParseError if the metadata can't be parsed given
   // the payload.
   MetadataParseResult ParsePayloadHeader(const brillo::Blob& payload,
-                                         uint64_t supported_major_version,
                                          ErrorCode* error);
+  // Simpler version of the above, returns true on success.
+  bool ParsePayloadHeader(const brillo::Blob& payload);
 
   // Given the |payload|, verifies that the signed hash of its metadata matches
   // |metadata_signature| (if present) or the metadata signature in payload
diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc
index cedecda..83d910f 100644
--- a/payload_consumer/postinstall_runner_action.cc
+++ b/payload_consumer/postinstall_runner_action.cc
@@ -57,7 +57,8 @@
   CHECK(HasInputObject());
   install_plan_ = GetInputObject();
 
-  if (install_plan_.powerwash_required) {
+  // Currently we're always powerwashing when rolling back.
+  if (install_plan_.powerwash_required || install_plan_.is_rollback) {
     if (hardware_->SchedulePowerwash()) {
       powerwash_scheduled_ = true;
     } else {
@@ -264,7 +265,7 @@
 void PostinstallRunnerAction::ReportProgress(double frac) {
   if (!delegate_)
     return;
-  if (current_partition_ >= partition_weight_.size()) {
+  if (current_partition_ >= partition_weight_.size() || total_weight_ == 0) {
     delegate_->ProgressUpdate(1.);
     return;
   }
diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc
index f15171b..8381472 100644
--- a/payload_consumer/postinstall_runner_action_unittest.cc
+++ b/payload_consumer/postinstall_runner_action_unittest.cc
@@ -22,6 +22,7 @@
 
 #include <memory>
 #include <string>
+#include <utility>
 
 #include <base/bind.h>
 #include <base/files/file_util.h>
@@ -38,6 +39,7 @@
 #include "update_engine/common/fake_hardware.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/mock_payload_state.h"
 
 using brillo::MessageLoop;
 using chromeos_update_engine::test_utils::ScopedLoopbackDeviceBinder;
@@ -94,9 +96,10 @@
   // Setup an action processor and run the PostinstallRunnerAction with a single
   // partition |device_path|, running the |postinstall_program| command from
   // there.
-  void RunPosinstallAction(const string& device_path,
-                           const string& postinstall_program,
-                           bool powerwash_required);
+  void RunPostinstallAction(const string& device_path,
+                            const string& postinstall_program,
+                            bool powerwash_required,
+                            bool is_rollback);
 
  public:
   void ResumeRunningAction() {
@@ -162,13 +165,14 @@
   ActionProcessor* processor_{nullptr};
 };
 
-void PostinstallRunnerActionTest::RunPosinstallAction(
+void PostinstallRunnerActionTest::RunPostinstallAction(
     const string& device_path,
     const string& postinstall_program,
-    bool powerwash_required) {
+    bool powerwash_required,
+    bool is_rollback) {
   ActionProcessor processor;
   processor_ = &processor;
-  ObjectFeederAction<InstallPlan> feeder_action;
+  auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
   InstallPlan::Partition part;
   part.name = "part";
   part.target_path = device_path;
@@ -178,16 +182,19 @@
   install_plan.partitions = {part};
   install_plan.download_url = "http://127.0.0.1:8080/update";
   install_plan.powerwash_required = powerwash_required;
-  feeder_action.set_obj(install_plan);
-  PostinstallRunnerAction runner_action(&fake_boot_control_, &fake_hardware_);
-  postinstall_action_ = &runner_action;
-  runner_action.set_delegate(setup_action_delegate_);
-  BondActions(&feeder_action, &runner_action);
-  ObjectCollectorAction<InstallPlan> collector_action;
-  BondActions(&runner_action, &collector_action);
-  processor.EnqueueAction(&feeder_action);
-  processor.EnqueueAction(&runner_action);
-  processor.EnqueueAction(&collector_action);
+  install_plan.is_rollback = is_rollback;
+  feeder_action->set_obj(install_plan);
+  auto runner_action = std::make_unique<PostinstallRunnerAction>(
+      &fake_boot_control_, &fake_hardware_);
+  postinstall_action_ = runner_action.get();
+  runner_action->set_delegate(setup_action_delegate_);
+  BondActions(feeder_action.get(), runner_action.get());
+  auto collector_action =
+      std::make_unique<ObjectCollectorAction<InstallPlan>>();
+  BondActions(runner_action.get(), collector_action.get());
+  processor.EnqueueAction(std::move(feeder_action));
+  processor.EnqueueAction(std::move(runner_action));
+  processor.EnqueueAction(std::move(collector_action));
   processor.set_delegate(&processor_delegate_);
 
   loop_.PostTask(
@@ -240,7 +247,8 @@
 // /postinst command which only exits 0.
 TEST_F(PostinstallRunnerActionTest, RunAsRootSimpleTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
-  RunPosinstallAction(loop.dev(), kPostinstallDefaultScript, false);
+
+  RunPostinstallAction(loop.dev(), kPostinstallDefaultScript, false, false);
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
   EXPECT_TRUE(processor_delegate_.processing_done_called_);
 
@@ -250,14 +258,31 @@
 
 TEST_F(PostinstallRunnerActionTest, RunAsRootRunSymlinkFileTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
-  RunPosinstallAction(loop.dev(), "bin/postinst_link", false);
+  RunPostinstallAction(loop.dev(), "bin/postinst_link", false, false);
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
 }
 
 TEST_F(PostinstallRunnerActionTest, RunAsRootPowerwashRequiredTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
   // Run a simple postinstall program but requiring a powerwash.
-  RunPosinstallAction(loop.dev(), "bin/postinst_example", true);
+  RunPostinstallAction(loop.dev(),
+                       "bin/postinst_example",
+                       /*powerwash_required=*/true,
+                       false);
+  EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
+
+  // Check that powerwash was scheduled.
+  EXPECT_TRUE(fake_hardware_.IsPowerwashScheduled());
+}
+
+TEST_F(PostinstallRunnerActionTest, RunAsRootRollbackTest) {
+  ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
+
+  // Run a simple postinstall program, rollback happened.
+  RunPostinstallAction(loop.dev(),
+                       "bin/postinst_example",
+                       false,
+                       /*is_rollback=*/true);
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
 
   // Check that powerwash was scheduled.
@@ -267,7 +292,7 @@
 // Runs postinstall from a partition file that doesn't mount, so it should
 // fail.
 TEST_F(PostinstallRunnerActionTest, RunAsRootCantMountTest) {
-  RunPosinstallAction("/dev/null", kPostinstallDefaultScript, false);
+  RunPostinstallAction("/dev/null", kPostinstallDefaultScript, false, false);
   EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
 
   // In case of failure, Postinstall should not signal a powerwash even if it
@@ -279,7 +304,7 @@
 // fail.
 TEST_F(PostinstallRunnerActionTest, RunAsRootErrScriptTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
-  RunPosinstallAction(loop.dev(), "bin/postinst_fail1", false);
+  RunPostinstallAction(loop.dev(), "bin/postinst_fail1", false, false);
   EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
 }
 
@@ -287,7 +312,7 @@
 // UMA with a different error code. Test those cases are properly detected.
 TEST_F(PostinstallRunnerActionTest, RunAsRootFirmwareBErrScriptTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
-  RunPosinstallAction(loop.dev(), "bin/postinst_fail3", false);
+  RunPostinstallAction(loop.dev(), "bin/postinst_fail3", false, false);
   EXPECT_EQ(ErrorCode::kPostinstallBootedFromFirmwareB,
             processor_delegate_.code_);
 }
@@ -295,7 +320,7 @@
 // Check that you can't specify an absolute path.
 TEST_F(PostinstallRunnerActionTest, RunAsRootAbsolutePathNotAllowedTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
-  RunPosinstallAction(loop.dev(), "/etc/../bin/sh", false);
+  RunPostinstallAction(loop.dev(), "/etc/../bin/sh", false, false);
   EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
 }
 
@@ -304,7 +329,7 @@
 // SElinux labels are only set on Android.
 TEST_F(PostinstallRunnerActionTest, RunAsRootCheckFileContextsTest) {
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
-  RunPosinstallAction(loop.dev(), "bin/self_check_context", false);
+  RunPostinstallAction(loop.dev(), "bin/self_check_context", false, false);
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
 }
 #endif  // __ANDROID__
@@ -317,7 +342,7 @@
   loop_.PostTask(FROM_HERE,
                  base::Bind(&PostinstallRunnerActionTest::SuspendRunningAction,
                             base::Unretained(this)));
-  RunPosinstallAction(loop.dev(), "bin/postinst_suspend", false);
+  RunPostinstallAction(loop.dev(), "bin/postinst_suspend", false, false);
   // postinst_suspend returns 0 only if it was suspended at some point.
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
   EXPECT_TRUE(processor_delegate_.processing_done_called_);
@@ -329,7 +354,7 @@
 
   // Wait for the action to start and then cancel it.
   CancelWhenStarted();
-  RunPosinstallAction(loop.dev(), "bin/postinst_suspend", false);
+  RunPostinstallAction(loop.dev(), "bin/postinst_suspend", false, false);
   // When canceling the action, the action never finished and therefore we had
   // a ProcessingStopped call instead.
   EXPECT_FALSE(processor_delegate_.code_set_);
@@ -352,7 +377,7 @@
 
   ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
   setup_action_delegate_ = &mock_delegate_;
-  RunPosinstallAction(loop.dev(), "bin/postinst_progress", false);
+  RunPostinstallAction(loop.dev(), "bin/postinst_progress", false, false);
   EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
 }
 
diff --git a/payload_consumer/verity_writer_android.cc b/payload_consumer/verity_writer_android.cc
new file mode 100644
index 0000000..06d1489
--- /dev/null
+++ b/payload_consumer/verity_writer_android.cc
@@ -0,0 +1,192 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/verity_writer_android.h"
+
+#include <fcntl.h>
+
+#include <algorithm>
+#include <memory>
+
+#include <base/logging.h>
+#include <base/posix/eintr_wrapper.h>
+#include <fec/ecc.h>
+extern "C" {
+#include <fec.h>
+}
+
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+namespace verity_writer {
+std::unique_ptr<VerityWriterInterface> CreateVerityWriter() {
+  return std::make_unique<VerityWriterAndroid>();
+}
+}  // namespace verity_writer
+
+bool VerityWriterAndroid::Init(const InstallPlan::Partition& partition) {
+  partition_ = &partition;
+
+  if (partition_->hash_tree_size != 0) {
+    auto hash_function =
+        HashTreeBuilder::HashFunction(partition_->hash_tree_algorithm);
+    if (hash_function == nullptr) {
+      LOG(ERROR) << "Verity hash algorithm not supported: "
+                 << partition_->hash_tree_algorithm;
+      return false;
+    }
+    hash_tree_builder_ = std::make_unique<HashTreeBuilder>(
+        partition_->block_size, hash_function);
+    TEST_AND_RETURN_FALSE(hash_tree_builder_->Initialize(
+        partition_->hash_tree_data_size, partition_->hash_tree_salt));
+    if (hash_tree_builder_->CalculateSize(partition_->hash_tree_data_size) !=
+        partition_->hash_tree_size) {
+      LOG(ERROR) << "Verity hash tree size does not match, stored: "
+                 << partition_->hash_tree_size << ", calculated: "
+                 << hash_tree_builder_->CalculateSize(
+                        partition_->hash_tree_data_size);
+      return false;
+    }
+  }
+  return true;
+}
+
+bool VerityWriterAndroid::Update(uint64_t offset,
+                                 const uint8_t* buffer,
+                                 size_t size) {
+  if (partition_->hash_tree_size != 0) {
+    uint64_t hash_tree_data_end =
+        partition_->hash_tree_data_offset + partition_->hash_tree_data_size;
+    uint64_t start_offset = std::max(offset, partition_->hash_tree_data_offset);
+    uint64_t end_offset = std::min(offset + size, hash_tree_data_end);
+    if (start_offset < end_offset) {
+      TEST_AND_RETURN_FALSE(hash_tree_builder_->Update(
+          buffer + start_offset - offset, end_offset - start_offset));
+
+      if (end_offset == hash_tree_data_end) {
+        // All hash tree data blocks has been hashed, write hash tree to disk.
+        int fd = HANDLE_EINTR(open(partition_->target_path.c_str(), O_WRONLY));
+        if (fd < 0) {
+          PLOG(ERROR) << "Failed to open " << partition_->target_path
+                      << " to write hash tree.";
+          return false;
+        }
+        ScopedFdCloser fd_closer(&fd);
+
+        LOG(INFO) << "Writing verity hash tree to " << partition_->target_path;
+        TEST_AND_RETURN_FALSE(hash_tree_builder_->BuildHashTree());
+        TEST_AND_RETURN_FALSE(hash_tree_builder_->WriteHashTreeToFd(
+            fd, partition_->hash_tree_offset));
+        hash_tree_builder_.reset();
+      }
+    }
+  }
+  if (partition_->fec_size != 0) {
+    uint64_t fec_data_end =
+        partition_->fec_data_offset + partition_->fec_data_size;
+    if (offset < fec_data_end && offset + size >= fec_data_end) {
+      LOG(INFO) << "Writing verity FEC to " << partition_->target_path;
+      TEST_AND_RETURN_FALSE(EncodeFEC(partition_->target_path,
+                                      partition_->fec_data_offset,
+                                      partition_->fec_data_size,
+                                      partition_->fec_offset,
+                                      partition_->fec_size,
+                                      partition_->fec_roots,
+                                      partition_->block_size,
+                                      false /* verify_mode */));
+    }
+  }
+  return true;
+}
+
+bool VerityWriterAndroid::EncodeFEC(const std::string& path,
+                                    uint64_t data_offset,
+                                    uint64_t data_size,
+                                    uint64_t fec_offset,
+                                    uint64_t fec_size,
+                                    uint32_t fec_roots,
+                                    uint32_t block_size,
+                                    bool verify_mode) {
+  TEST_AND_RETURN_FALSE(data_size % block_size == 0);
+  TEST_AND_RETURN_FALSE(fec_roots >= 0 && fec_roots < FEC_RSM);
+  // This is the N in RS(M, N), which is the number of bytes for each rs block.
+  size_t rs_n = FEC_RSM - fec_roots;
+  uint64_t rounds = utils::DivRoundUp(data_size / block_size, rs_n);
+  TEST_AND_RETURN_FALSE(rounds * fec_roots * block_size == fec_size);
+
+  std::unique_ptr<void, decltype(&free_rs_char)> rs_char(
+      init_rs_char(FEC_PARAMS(fec_roots)), &free_rs_char);
+  TEST_AND_RETURN_FALSE(rs_char != nullptr);
+
+  int fd = HANDLE_EINTR(open(path.c_str(), verify_mode ? O_RDONLY : O_RDWR));
+  if (fd < 0) {
+    PLOG(ERROR) << "Failed to open " << path << " to write FEC.";
+    return false;
+  }
+  ScopedFdCloser fd_closer(&fd);
+
+  for (size_t i = 0; i < rounds; i++) {
+    // Encodes |block_size| number of rs blocks each round so that we can read
+    // one block each time instead of 1 byte to increase random read
+    // performance. This uses about 1 MiB memory for 4K block size.
+    brillo::Blob rs_blocks(block_size * rs_n);
+    for (size_t j = 0; j < rs_n; j++) {
+      brillo::Blob buffer(block_size, 0);
+      uint64_t offset =
+          fec_ecc_interleave(i * rs_n * block_size + j, rs_n, rounds);
+      // Don't read past |data_size|, treat them as 0.
+      if (offset < data_size) {
+        ssize_t bytes_read = 0;
+        TEST_AND_RETURN_FALSE(utils::PReadAll(fd,
+                                              buffer.data(),
+                                              buffer.size(),
+                                              data_offset + offset,
+                                              &bytes_read));
+        TEST_AND_RETURN_FALSE(bytes_read ==
+                              static_cast<ssize_t>(buffer.size()));
+      }
+      for (size_t k = 0; k < buffer.size(); k++) {
+        rs_blocks[k * rs_n + j] = buffer[k];
+      }
+    }
+    brillo::Blob fec(block_size * fec_roots);
+    for (size_t j = 0; j < block_size; j++) {
+      // Encode [j * rs_n : (j + 1) * rs_n) in |rs_blocks| and write |fec_roots|
+      // number of parity bytes to |j * fec_roots| in |fec|.
+      encode_rs_char(rs_char.get(),
+                     rs_blocks.data() + j * rs_n,
+                     fec.data() + j * fec_roots);
+    }
+
+    if (verify_mode) {
+      brillo::Blob fec_read(fec.size());
+      ssize_t bytes_read = 0;
+      TEST_AND_RETURN_FALSE(utils::PReadAll(
+          fd, fec_read.data(), fec_read.size(), fec_offset, &bytes_read));
+      TEST_AND_RETURN_FALSE(bytes_read ==
+                            static_cast<ssize_t>(fec_read.size()));
+      TEST_AND_RETURN_FALSE(fec == fec_read);
+    } else {
+      TEST_AND_RETURN_FALSE(
+          utils::PWriteAll(fd, fec.data(), fec.size(), fec_offset));
+    }
+    fec_offset += fec.size();
+  }
+
+  return true;
+}
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/verity_writer_android.h b/payload_consumer/verity_writer_android.h
new file mode 100644
index 0000000..05a5856
--- /dev/null
+++ b/payload_consumer/verity_writer_android.h
@@ -0,0 +1,62 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_ANDROID_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_ANDROID_H_
+
+#include <memory>
+#include <string>
+
+#include <verity/hash_tree_builder.h>
+
+#include "update_engine/payload_consumer/verity_writer_interface.h"
+
+namespace chromeos_update_engine {
+
+class VerityWriterAndroid : public VerityWriterInterface {
+ public:
+  VerityWriterAndroid() = default;
+  ~VerityWriterAndroid() override = default;
+
+  bool Init(const InstallPlan::Partition& partition) override;
+  bool Update(uint64_t offset, const uint8_t* buffer, size_t size) override;
+
+  // Read [data_offset : data_offset + data_size) from |path| and encode FEC
+  // data, if |verify_mode|, then compare the encoded FEC with the one in
+  // |path|, otherwise write the encoded FEC to |path|. We can't encode as we go
+  // in each Update() like hash tree, because for every rs block, its data are
+  // spreaded across entire |data_size|, unless we can cache all data in
+  // memory, we have to re-read them from disk.
+  static bool EncodeFEC(const std::string& path,
+                        uint64_t data_offset,
+                        uint64_t data_size,
+                        uint64_t fec_offset,
+                        uint64_t fec_size,
+                        uint32_t fec_roots,
+                        uint32_t block_size,
+                        bool verify_mode);
+
+ private:
+  const InstallPlan::Partition* partition_ = nullptr;
+
+  std::unique_ptr<HashTreeBuilder> hash_tree_builder_;
+
+  DISALLOW_COPY_AND_ASSIGN(VerityWriterAndroid);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_ANDROID_H_
diff --git a/payload_consumer/verity_writer_android_unittest.cc b/payload_consumer/verity_writer_android_unittest.cc
new file mode 100644
index 0000000..f943ce8
--- /dev/null
+++ b/payload_consumer/verity_writer_android_unittest.cc
@@ -0,0 +1,120 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/verity_writer_android.h"
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+class VerityWriterAndroidTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    partition_.target_path = temp_file_.path();
+    partition_.block_size = 4096;
+    partition_.hash_tree_data_offset = 0;
+    partition_.hash_tree_data_size = 4096;
+    partition_.hash_tree_offset = 4096;
+    partition_.hash_tree_size = 4096;
+    partition_.hash_tree_algorithm = "sha1";
+    partition_.fec_roots = 2;
+  }
+
+  VerityWriterAndroid verity_writer_;
+  InstallPlan::Partition partition_;
+  test_utils::ScopedTempFile temp_file_;
+};
+
+TEST_F(VerityWriterAndroidTest, SimpleTest) {
+  brillo::Blob part_data(8192);
+  test_utils::WriteFileVector(partition_.target_path, part_data);
+  ASSERT_TRUE(verity_writer_.Init(partition_));
+  EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), 4096));
+  EXPECT_TRUE(verity_writer_.Update(4096, part_data.data() + 4096, 4096));
+  brillo::Blob actual_part;
+  utils::ReadFile(partition_.target_path, &actual_part);
+  // dd if=/dev/zero bs=4096 count=1 2>/dev/null | sha1sum | xxd -r -p |
+  //     hexdump -v -e '/1 "0x%02x, "'
+  brillo::Blob hash = {0x1c, 0xea, 0xf7, 0x3d, 0xf4, 0x0e, 0x53,
+                       0x1d, 0xf3, 0xbf, 0xb2, 0x6b, 0x4f, 0xb7,
+                       0xcd, 0x95, 0xfb, 0x7b, 0xff, 0x1d};
+  memcpy(part_data.data() + 4096, hash.data(), hash.size());
+  EXPECT_EQ(part_data, actual_part);
+}
+
+TEST_F(VerityWriterAndroidTest, NoOpTest) {
+  partition_.hash_tree_data_size = 0;
+  partition_.hash_tree_size = 0;
+  brillo::Blob part_data(4096);
+  ASSERT_TRUE(verity_writer_.Init(partition_));
+  EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), part_data.size()));
+  EXPECT_TRUE(verity_writer_.Update(4096, part_data.data(), part_data.size()));
+  EXPECT_TRUE(verity_writer_.Update(8192, part_data.data(), part_data.size()));
+}
+
+TEST_F(VerityWriterAndroidTest, InvalidHashAlgorithmTest) {
+  partition_.hash_tree_algorithm = "sha123";
+  EXPECT_FALSE(verity_writer_.Init(partition_));
+}
+
+TEST_F(VerityWriterAndroidTest, WrongHashTreeSizeTest) {
+  partition_.hash_tree_size = 8192;
+  EXPECT_FALSE(verity_writer_.Init(partition_));
+}
+
+TEST_F(VerityWriterAndroidTest, SHA256Test) {
+  partition_.hash_tree_algorithm = "sha256";
+  brillo::Blob part_data(8192);
+  test_utils::WriteFileVector(partition_.target_path, part_data);
+  ASSERT_TRUE(verity_writer_.Init(partition_));
+  EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), 4096));
+  EXPECT_TRUE(verity_writer_.Update(4096, part_data.data() + 4096, 4096));
+  brillo::Blob actual_part;
+  utils::ReadFile(partition_.target_path, &actual_part);
+  // dd if=/dev/zero bs=4096 count=1 2>/dev/null | sha256sum | xxd -r -p |
+  //     hexdump -v -e '/1 "0x%02x, "'
+  brillo::Blob hash = {0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9,
+                       0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b, 0x02,
+                       0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a,
+                       0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89, 0x2c, 0xa7};
+  memcpy(part_data.data() + 4096, hash.data(), hash.size());
+  EXPECT_EQ(part_data, actual_part);
+}
+
+TEST_F(VerityWriterAndroidTest, FECTest) {
+  partition_.fec_data_offset = 0;
+  partition_.fec_data_size = 4096;
+  partition_.fec_offset = 4096;
+  partition_.fec_size = 2 * 4096;
+  brillo::Blob part_data(3 * 4096, 0x1);
+  test_utils::WriteFileVector(partition_.target_path, part_data);
+  ASSERT_TRUE(verity_writer_.Init(partition_));
+  EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), part_data.size()));
+  brillo::Blob actual_part;
+  utils::ReadFile(partition_.target_path, &actual_part);
+  // Write FEC data.
+  for (size_t i = 4096; i < part_data.size(); i += 2) {
+    part_data[i] = 0x8e;
+    part_data[i + 1] = 0x8f;
+  }
+  EXPECT_EQ(part_data, actual_part);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/verity_writer_interface.h b/payload_consumer/verity_writer_interface.h
new file mode 100644
index 0000000..a3ecef3
--- /dev/null
+++ b/payload_consumer/verity_writer_interface.h
@@ -0,0 +1,53 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_INTERFACE_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_INTERFACE_H_
+
+#include <cstdint>
+#include <memory>
+
+#include <base/macros.h>
+
+#include "update_engine/payload_consumer/install_plan.h"
+
+namespace chromeos_update_engine {
+
+class VerityWriterInterface {
+ public:
+  virtual ~VerityWriterInterface() = default;
+
+  virtual bool Init(const InstallPlan::Partition& partition) = 0;
+  // Update partition data at [offset : offset + size) stored in |buffer|.
+  // Data not in |hash_tree_data_extent| or |fec_data_extent| is ignored.
+  // Will write verity data to the target partition once all the necessary
+  // blocks has passed.
+  virtual bool Update(uint64_t offset, const uint8_t* buffer, size_t size) = 0;
+
+ protected:
+  VerityWriterInterface() = default;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(VerityWriterInterface);
+};
+
+namespace verity_writer {
+std::unique_ptr<VerityWriterInterface> CreateVerityWriter();
+}
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_INTERFACE_H_
diff --git a/payload_consumer/verity_writer_stub.cc b/payload_consumer/verity_writer_stub.cc
new file mode 100644
index 0000000..a0e2467
--- /dev/null
+++ b/payload_consumer/verity_writer_stub.cc
@@ -0,0 +1,39 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/verity_writer_stub.h"
+
+#include <memory>
+
+namespace chromeos_update_engine {
+
+namespace verity_writer {
+std::unique_ptr<VerityWriterInterface> CreateVerityWriter() {
+  return std::make_unique<VerityWriterStub>();
+}
+}  // namespace verity_writer
+
+bool VerityWriterStub::Init(const InstallPlan::Partition& partition) {
+  return partition.hash_tree_size == 0 && partition.fec_size == 0;
+}
+
+bool VerityWriterStub::Update(uint64_t offset,
+                              const uint8_t* buffer,
+                              size_t size) {
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/verity_writer_stub.h b/payload_consumer/verity_writer_stub.h
new file mode 100644
index 0000000..ea5e574
--- /dev/null
+++ b/payload_consumer/verity_writer_stub.h
@@ -0,0 +1,38 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_STUB_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_STUB_H_
+
+#include "update_engine/payload_consumer/verity_writer_interface.h"
+
+namespace chromeos_update_engine {
+
+class VerityWriterStub : public VerityWriterInterface {
+ public:
+  VerityWriterStub() = default;
+  ~VerityWriterStub() override = default;
+
+  bool Init(const InstallPlan::Partition& partition) override;
+  bool Update(uint64_t offset, const uint8_t* buffer, size_t size) override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(VerityWriterStub);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_STUB_H_
diff --git a/payload_generator/ab_generator.cc b/payload_generator/ab_generator.cc
index 089dfd9..f24f6c3 100644
--- a/payload_generator/ab_generator.cc
+++ b/payload_generator/ab_generator.cc
@@ -57,8 +57,6 @@
                                                        blob_file));
   LOG(INFO) << "done reading " << new_part.name;
 
-  TEST_AND_RETURN_FALSE(
-      FragmentOperations(config.version, aops, new_part.path, blob_file));
   SortOperationsByDestination(aops);
 
   // Use the soft_chunk_size when merging operations to prevent merging all
@@ -69,8 +67,10 @@
     merge_chunk_blocks = hard_chunk_blocks;
   }
 
+  LOG(INFO) << "Merging " << aops->size() << " operations.";
   TEST_AND_RETURN_FALSE(MergeOperations(
       aops, config.version, merge_chunk_blocks, new_part.path, blob_file));
+  LOG(INFO) << aops->size() << " operations after merge.";
 
   if (config.version.minor >= kOpSrcHashMinorPayloadVersion)
     TEST_AND_RETURN_FALSE(AddSourceHash(aops, old_part.path));
diff --git a/payload_generator/ab_generator_unittest.cc b/payload_generator/ab_generator_unittest.cc
index 25609c7..d083d8a 100644
--- a/payload_generator/ab_generator_unittest.cc
+++ b/payload_generator/ab_generator_unittest.cc
@@ -58,10 +58,6 @@
   const size_t part_num_blocks = 7;
 
   // Create the target partition data.
-  string part_path;
-  EXPECT_TRUE(utils::MakeTempFile(
-      "SplitReplaceOrReplaceBzTest_part.XXXXXX", &part_path, nullptr));
-  ScopedPathUnlinker part_path_unlinker(part_path);
   const size_t part_size = part_num_blocks * kBlockSize;
   brillo::Blob part_data;
   if (compressible) {
@@ -74,7 +70,9 @@
       part_data.push_back(dis(gen));
   }
   ASSERT_EQ(part_size, part_data.size());
-  ASSERT_TRUE(utils::WriteFile(part_path.c_str(), part_data.data(), part_size));
+  test_utils::ScopedTempFile part_file(
+      "SplitReplaceOrReplaceBzTest_part.XXXXXX");
+  ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
 
   // Create original operation and blob data.
   const size_t op_ex1_offset = op_ex1_start_block * kBlockSize;
@@ -109,15 +107,12 @@
   aop.name = "SplitTestOp";
 
   // Create the data file.
-  string data_path;
-  EXPECT_TRUE(utils::MakeTempFile(
-      "SplitReplaceOrReplaceBzTest_data.XXXXXX", &data_path, nullptr));
-  ScopedPathUnlinker data_path_unlinker(data_path);
-  int data_fd = open(data_path.c_str(), O_RDWR, 000);
+  test_utils::ScopedTempFile data_file(
+      "SplitReplaceOrReplaceBzTest_data.XXXXXX");
+  EXPECT_TRUE(test_utils::WriteFileVector(data_file.path(), op_blob));
+  int data_fd = open(data_file.path().c_str(), O_RDWR, 000);
   EXPECT_GE(data_fd, 0);
   ScopedFdCloser data_fd_closer(&data_fd);
-  EXPECT_TRUE(utils::WriteFile(data_path.c_str(), op_blob.data(),
-                               op_blob.size()));
   off_t data_file_size = op_blob.size();
   BlobFileWriter blob_file(data_fd, &data_file_size);
 
@@ -126,7 +121,7 @@
   PayloadVersion version(kChromeOSMajorPayloadVersion,
                          kSourceMinorPayloadVersion);
   ASSERT_TRUE(ABGenerator::SplitAReplaceOp(
-      version, aop, part_path, &result_ops, &blob_file));
+      version, aop, part_file.path(), &result_ops, &blob_file));
 
   // Check the result.
   InstallOperation_Type expected_type =
@@ -212,10 +207,6 @@
   const size_t part_num_blocks = total_op_num_blocks + 2;
 
   // Create the target partition data.
-  string part_path;
-  EXPECT_TRUE(utils::MakeTempFile(
-      "MergeReplaceOrReplaceBzTest_part.XXXXXX", &part_path, nullptr));
-  ScopedPathUnlinker part_path_unlinker(part_path);
   const size_t part_size = part_num_blocks * kBlockSize;
   brillo::Blob part_data;
   if (compressible) {
@@ -228,7 +219,9 @@
       part_data.push_back(dis(gen));
   }
   ASSERT_EQ(part_size, part_data.size());
-  ASSERT_TRUE(utils::WriteFile(part_path.c_str(), part_data.data(), part_size));
+  test_utils::ScopedTempFile part_file(
+      "MergeReplaceOrReplaceBzTest_part.XXXXXX");
+  ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
 
   // Create original operations and blob data.
   vector<AnnotatedOperation> aops;
@@ -277,23 +270,20 @@
   aops.push_back(second_aop);
 
   // Create the data file.
-  string data_path;
-  EXPECT_TRUE(utils::MakeTempFile(
-      "MergeReplaceOrReplaceBzTest_data.XXXXXX", &data_path, nullptr));
-  ScopedPathUnlinker data_path_unlinker(data_path);
-  int data_fd = open(data_path.c_str(), O_RDWR, 000);
+  test_utils::ScopedTempFile data_file(
+      "MergeReplaceOrReplaceBzTest_data.XXXXXX");
+  EXPECT_TRUE(test_utils::WriteFileVector(data_file.path(), blob_data));
+  int data_fd = open(data_file.path().c_str(), O_RDWR, 000);
   EXPECT_GE(data_fd, 0);
   ScopedFdCloser data_fd_closer(&data_fd);
-  EXPECT_TRUE(utils::WriteFile(data_path.c_str(), blob_data.data(),
-                               blob_data.size()));
   off_t data_file_size = blob_data.size();
   BlobFileWriter blob_file(data_fd, &data_file_size);
 
   // Merge the operations.
   PayloadVersion version(kChromeOSMajorPayloadVersion,
                          kSourceMinorPayloadVersion);
-  EXPECT_TRUE(
-      ABGenerator::MergeOperations(&aops, version, 5, part_path, &blob_file));
+  EXPECT_TRUE(ABGenerator::MergeOperations(
+      &aops, version, 5, part_file.path(), &blob_file));
 
   // Check the result.
   InstallOperation_Type expected_op_type =
@@ -570,16 +560,12 @@
   second_aop.op = second_op;
   aops.push_back(second_aop);
 
-  string src_part_path;
-  EXPECT_TRUE(utils::MakeTempFile("AddSourceHashTest_src_part.XXXXXX",
-                                  &src_part_path, nullptr));
-  ScopedPathUnlinker src_part_path_unlinker(src_part_path);
+  test_utils::ScopedTempFile src_part_file("AddSourceHashTest_src_part.XXXXXX");
   brillo::Blob src_data(kBlockSize);
   test_utils::FillWithData(&src_data);
-  ASSERT_TRUE(utils::WriteFile(src_part_path.c_str(), src_data.data(),
-                               src_data.size()));
+  ASSERT_TRUE(test_utils::WriteFileVector(src_part_file.path(), src_data));
 
-  EXPECT_TRUE(ABGenerator::AddSourceHash(&aops, src_part_path));
+  EXPECT_TRUE(ABGenerator::AddSourceHash(&aops, src_part_file.path()));
 
   EXPECT_TRUE(aops[0].op.has_src_sha256_hash());
   EXPECT_FALSE(aops[1].op.has_src_sha256_hash());
diff --git a/payload_generator/block_mapping_unittest.cc b/payload_generator/block_mapping_unittest.cc
index 4d09710..e1870ec 100644
--- a/payload_generator/block_mapping_unittest.cc
+++ b/payload_generator/block_mapping_unittest.cc
@@ -39,23 +39,9 @@
 
 class BlockMappingTest : public ::testing::Test {
  protected:
-  void SetUp() override {
-    EXPECT_TRUE(utils::MakeTempFile("BlockMappingTest_old.XXXXXX",
-                                    &old_part_path_,
-                                    nullptr));
-    EXPECT_TRUE(utils::MakeTempFile("BlockMappingTest_new.XXXXXX",
-                                    &new_part_path_,
-                                    nullptr));
-
-    old_part_unlinker_.reset(new ScopedPathUnlinker(old_part_path_));
-    new_part_unlinker_.reset(new ScopedPathUnlinker(new_part_path_));
-  }
-
   // Old new partition files used in testing.
-  string old_part_path_;
-  string new_part_path_;
-  std::unique_ptr<ScopedPathUnlinker> old_part_unlinker_;
-  std::unique_ptr<ScopedPathUnlinker> new_part_unlinker_;
+  test_utils::ScopedTempFile old_part_{"BlockMappingTest_old.XXXXXX"};
+  test_utils::ScopedTempFile new_part_{"BlockMappingTest_new.XXXXXX"};
 
   size_t block_size_{1024};
   BlockMapping bm_{block_size_};  // BlockMapping under test.
@@ -72,8 +58,8 @@
 }
 
 TEST_F(BlockMappingTest, BlocksAreNotKeptInMemory) {
-  test_utils::WriteFileString(old_part_path_, string(block_size_, 'a'));
-  int old_fd = HANDLE_EINTR(open(old_part_path_.c_str(), O_RDONLY));
+  test_utils::WriteFileString(old_part_.path(), string(block_size_, 'a'));
+  int old_fd = HANDLE_EINTR(open(old_part_.path().c_str(), O_RDONLY));
   ScopedFdCloser old_fd_closer(&old_fd);
 
   EXPECT_EQ(0, bm_.AddDiskBlock(old_fd, 0));
@@ -107,18 +93,18 @@
   string old_contents(10 * block_size_, '\0');
   for (size_t i = 0; i < old_contents.size(); ++i)
     old_contents[i] = 4 + i / block_size_;
-  test_utils::WriteFileString(old_part_path_, old_contents);
+  test_utils::WriteFileString(old_part_.path(), old_contents);
 
   // A string including the block with all zeros and overlapping some of the
   // other blocks in old_contents.
   string new_contents(6 * block_size_, '\0');
   for (size_t i = 0; i < new_contents.size(); ++i)
     new_contents[i] = i / block_size_;
-  test_utils::WriteFileString(new_part_path_, new_contents);
+  test_utils::WriteFileString(new_part_.path(), new_contents);
 
   vector<BlockMapping::BlockId> old_ids, new_ids;
-  EXPECT_TRUE(MapPartitionBlocks(old_part_path_,
-                                 new_part_path_,
+  EXPECT_TRUE(MapPartitionBlocks(old_part_.path(),
+                                 new_part_.path(),
                                  old_contents.size(),
                                  new_contents.size(),
                                  block_size_,
diff --git a/payload_generator/boot_img_filesystem.cc b/payload_generator/boot_img_filesystem.cc
new file mode 100644
index 0000000..90be966
--- /dev/null
+++ b/payload_generator/boot_img_filesystem.cc
@@ -0,0 +1,110 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/boot_img_filesystem.h"
+
+#include <base/logging.h>
+#include <brillo/secure_blob.h>
+#include <puffin/utils.h>
+
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_generator/delta_diff_generator.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+
+using std::string;
+using std::unique_ptr;
+using std::vector;
+
+namespace chromeos_update_engine {
+
+unique_ptr<BootImgFilesystem> BootImgFilesystem::CreateFromFile(
+    const string& filename) {
+  if (filename.empty())
+    return nullptr;
+
+  brillo::Blob header;
+  if (!utils::ReadFileChunk(filename, 0, sizeof(boot_img_hdr), &header) ||
+      header.size() != sizeof(boot_img_hdr) ||
+      memcmp(header.data(), BOOT_MAGIC, BOOT_MAGIC_SIZE) != 0) {
+    return nullptr;
+  }
+
+  unique_ptr<BootImgFilesystem> result(new BootImgFilesystem());
+  result->filename_ = filename;
+  memcpy(&result->hdr_, header.data(), header.size());
+  return result;
+}
+
+size_t BootImgFilesystem::GetBlockSize() const {
+  // Page size may not be 4K, but we currently only support 4K block size.
+  return kBlockSize;
+}
+
+size_t BootImgFilesystem::GetBlockCount() const {
+  return utils::DivRoundUp(utils::FileSize(filename_), kBlockSize);
+}
+
+FilesystemInterface::File BootImgFilesystem::GetFile(const string& name,
+                                                     uint64_t offset,
+                                                     uint64_t size) const {
+  File file;
+  file.name = name;
+  file.extents = {ExtentForBytes(kBlockSize, offset, size)};
+
+  brillo::Blob data;
+  if (utils::ReadFileChunk(filename_, offset, size, &data)) {
+    constexpr size_t kGZipHeaderSize = 10;
+    // Check GZip header magic.
+    if (data.size() > kGZipHeaderSize && data[0] == 0x1F && data[1] == 0x8B) {
+      if (!puffin::LocateDeflatesInGzip(data, &file.deflates)) {
+        // We still use the deflates found even if LocateDeflatesInGzip() fails,
+        // if any deflates are returned, they should be correct, it's possible
+        // something went wrong later but it shouldn't stop us from using the
+        // previous deflates. Another common case is if there's more data after
+        // the gzip, the function will try to parse that as another gzip and
+        // will fail, but we still want the deflates from the first gzip.
+        LOG(WARNING) << "Error occurred parsing gzip " << name << " at offset "
+                     << offset << " of " << filename_ << ", found "
+                     << file.deflates.size() << " deflates.";
+      }
+      for (auto& deflate : file.deflates) {
+        deflate.offset += offset * 8;
+      }
+    }
+  }
+  return file;
+}
+
+bool BootImgFilesystem::GetFiles(vector<File>* files) const {
+  files->clear();
+  const uint64_t file_size = utils::FileSize(filename_);
+  // The first page is header.
+  uint64_t offset = hdr_.page_size;
+  if (hdr_.kernel_size > 0 && offset + hdr_.kernel_size <= file_size) {
+    files->emplace_back(GetFile("<kernel>", offset, hdr_.kernel_size));
+  }
+  offset += utils::RoundUp(hdr_.kernel_size, hdr_.page_size);
+  if (hdr_.ramdisk_size > 0 && offset + hdr_.ramdisk_size <= file_size) {
+    files->emplace_back(GetFile("<ramdisk>", offset, hdr_.ramdisk_size));
+  }
+  return true;
+}
+
+bool BootImgFilesystem::LoadSettings(brillo::KeyValueStore* store) const {
+  return false;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/boot_img_filesystem.h b/payload_generator/boot_img_filesystem.h
new file mode 100644
index 0000000..87725d4
--- /dev/null
+++ b/payload_generator/boot_img_filesystem.h
@@ -0,0 +1,78 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_BOOT_IMG_FILESYSTEM_H_
+#define UPDATE_ENGINE_PAYLOAD_GENERATOR_BOOT_IMG_FILESYSTEM_H_
+
+#include "update_engine/payload_generator/filesystem_interface.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace chromeos_update_engine {
+
+class BootImgFilesystem : public FilesystemInterface {
+ public:
+  // Creates an BootImgFilesystem from an Android boot.img file.
+  static std::unique_ptr<BootImgFilesystem> CreateFromFile(
+      const std::string& filename);
+  ~BootImgFilesystem() override = default;
+
+  // FilesystemInterface overrides.
+  size_t GetBlockSize() const override;
+  size_t GetBlockCount() const override;
+
+  // GetFiles will return one FilesystemInterface::File for kernel and one for
+  // ramdisk.
+  bool GetFiles(std::vector<File>* files) const override;
+
+  bool LoadSettings(brillo::KeyValueStore* store) const override;
+
+ private:
+  friend class BootImgFilesystemTest;
+
+  BootImgFilesystem() = default;
+
+  File GetFile(const std::string& name, uint64_t offset, uint64_t size) const;
+
+  // The boot.img file path.
+  std::string filename_;
+
+// https://android.googlesource.com/platform/system/core/+/master/mkbootimg/include/bootimg/bootimg.h
+#define BOOT_MAGIC "ANDROID!"
+#define BOOT_MAGIC_SIZE 8
+  struct boot_img_hdr {
+    // Must be BOOT_MAGIC.
+    uint8_t magic[BOOT_MAGIC_SIZE];
+    uint32_t kernel_size;  /* size in bytes */
+    uint32_t kernel_addr;  /* physical load addr */
+    uint32_t ramdisk_size; /* size in bytes */
+    uint32_t ramdisk_addr; /* physical load addr */
+    uint32_t second_size;  /* size in bytes */
+    uint32_t second_addr;  /* physical load addr */
+    uint32_t tags_addr;    /* physical addr for kernel tags */
+    uint32_t page_size;    /* flash page size we assume */
+  } __attribute__((packed));
+  // The boot image header.
+  boot_img_hdr hdr_;
+
+  DISALLOW_COPY_AND_ASSIGN(BootImgFilesystem);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_PAYLOAD_GENERATOR_BOOT_IMG_FILESYSTEM_H_
diff --git a/payload_generator/boot_img_filesystem_unittest.cc b/payload_generator/boot_img_filesystem_unittest.cc
new file mode 100644
index 0000000..b1e0d99
--- /dev/null
+++ b/payload_generator/boot_img_filesystem_unittest.cc
@@ -0,0 +1,117 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/boot_img_filesystem.h"
+
+#include <vector>
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+using std::unique_ptr;
+using std::vector;
+
+class BootImgFilesystemTest : public ::testing::Test {
+ protected:
+  brillo::Blob GetBootImg(const brillo::Blob& kernel,
+                          const brillo::Blob& ramdisk) {
+    brillo::Blob boot_img(16 * 1024);
+    BootImgFilesystem::boot_img_hdr hdr;
+    memcpy(hdr.magic, BOOT_MAGIC, BOOT_MAGIC_SIZE);
+    hdr.kernel_size = kernel.size();
+    hdr.ramdisk_size = ramdisk.size();
+    hdr.page_size = 4096;
+    size_t offset = 0;
+    memcpy(boot_img.data() + offset, &hdr, sizeof(hdr));
+    offset += utils::RoundUp(sizeof(hdr), hdr.page_size);
+    memcpy(boot_img.data() + offset, kernel.data(), kernel.size());
+    offset += utils::RoundUp(kernel.size(), hdr.page_size);
+    memcpy(boot_img.data() + offset, ramdisk.data(), ramdisk.size());
+    return boot_img;
+  }
+
+  test_utils::ScopedTempFile boot_file_;
+};
+
+TEST_F(BootImgFilesystemTest, SimpleTest) {
+  test_utils::WriteFileVector(
+      boot_file_.path(),
+      GetBootImg(brillo::Blob(1234, 'k'), brillo::Blob(5678, 'r')));
+  unique_ptr<BootImgFilesystem> fs =
+      BootImgFilesystem::CreateFromFile(boot_file_.path());
+  EXPECT_NE(nullptr, fs);
+
+  vector<FilesystemInterface::File> files;
+  EXPECT_TRUE(fs->GetFiles(&files));
+  ASSERT_EQ(2u, files.size());
+
+  EXPECT_EQ("<kernel>", files[0].name);
+  EXPECT_EQ(1u, files[0].extents.size());
+  EXPECT_EQ(1u, files[0].extents[0].start_block());
+  EXPECT_EQ(1u, files[0].extents[0].num_blocks());
+  EXPECT_TRUE(files[0].deflates.empty());
+
+  EXPECT_EQ("<ramdisk>", files[1].name);
+  EXPECT_EQ(1u, files[1].extents.size());
+  EXPECT_EQ(2u, files[1].extents[0].start_block());
+  EXPECT_EQ(2u, files[1].extents[0].num_blocks());
+  EXPECT_TRUE(files[1].deflates.empty());
+}
+
+TEST_F(BootImgFilesystemTest, BadImageTest) {
+  brillo::Blob boot_img = GetBootImg({}, {});
+  boot_img[7] = '?';
+  test_utils::WriteFileVector(boot_file_.path(), boot_img);
+  unique_ptr<BootImgFilesystem> fs =
+      BootImgFilesystem::CreateFromFile(boot_file_.path());
+  EXPECT_EQ(nullptr, fs);
+}
+
+TEST_F(BootImgFilesystemTest, GZipRamdiskTest) {
+  // echo ramdisk | gzip | hexdump -v -e '/1 "0x%02x, "'
+  const brillo::Blob ramdisk = {0x1f, 0x8b, 0x08, 0x00, 0x3a, 0x83, 0x35,
+                                0x5b, 0x00, 0x03, 0x2b, 0x4a, 0xcc, 0x4d,
+                                0xc9, 0x2c, 0xce, 0xe6, 0x02, 0x00, 0x2e,
+                                0xf6, 0x0b, 0x08, 0x08, 0x00, 0x00, 0x00};
+  test_utils::WriteFileVector(boot_file_.path(),
+                              GetBootImg(brillo::Blob(5678, 'k'), ramdisk));
+  unique_ptr<BootImgFilesystem> fs =
+      BootImgFilesystem::CreateFromFile(boot_file_.path());
+  EXPECT_NE(nullptr, fs);
+
+  vector<FilesystemInterface::File> files;
+  EXPECT_TRUE(fs->GetFiles(&files));
+  ASSERT_EQ(2u, files.size());
+
+  EXPECT_EQ("<kernel>", files[0].name);
+  EXPECT_EQ(1u, files[0].extents.size());
+  EXPECT_EQ(1u, files[0].extents[0].start_block());
+  EXPECT_EQ(2u, files[0].extents[0].num_blocks());
+  EXPECT_TRUE(files[0].deflates.empty());
+
+  EXPECT_EQ("<ramdisk>", files[1].name);
+  EXPECT_EQ(1u, files[1].extents.size());
+  EXPECT_EQ(3u, files[1].extents[0].start_block());
+  EXPECT_EQ(1u, files[1].extents[0].num_blocks());
+  EXPECT_EQ(1u, files[1].deflates.size());
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/cycle_breaker.cc b/payload_generator/cycle_breaker.cc
index 52a6f60..a8a04ab 100644
--- a/payload_generator/cycle_breaker.cc
+++ b/payload_generator/cycle_breaker.cc
@@ -18,14 +18,15 @@
 
 #include <inttypes.h>
 
+#include <limits>
 #include <set>
 #include <string>
 #include <utility>
 
+#include <base/stl_util.h>
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
 
-#include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/graph_utils.h"
 #include "update_engine/payload_generator/tarjan.h"
 
@@ -83,7 +84,7 @@
            jt != component_indexes.end(); ++jt) {
         // If there's a link from *it -> *jt in the graph,
         // add a subgraph_ edge
-        if (utils::MapContainsKey(subgraph_[*it].out_edges, *jt))
+        if (base::ContainsKey(subgraph_[*it].out_edges, *jt))
           subgraph_[*it].subgraph_edges.insert(*jt);
       }
     }
@@ -146,7 +147,7 @@
   for (vector<Vertex::Index>::const_iterator it = ++stack_.begin(),
            e = stack_.end(); it != e; ++it) {
     Edge edge = make_pair(*(it - 1), *it);
-    if (utils::SetContainsKey(cut_edges_, edge)) {
+    if (base::ContainsKey(cut_edges_, edge)) {
       return true;
     }
   }
diff --git a/payload_generator/cycle_breaker_unittest.cc b/payload_generator/cycle_breaker_unittest.cc
index e92bc30..7554dbb 100644
--- a/payload_generator/cycle_breaker_unittest.cc
+++ b/payload_generator/cycle_breaker_unittest.cc
@@ -22,9 +22,9 @@
 #include <vector>
 
 #include <base/logging.h>
+#include <base/stl_util.h>
 #include <gtest/gtest.h>
 
-#include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/graph_types.h"
 
 using std::make_pair;
@@ -83,14 +83,14 @@
   // C->D->E
   // G->H
 
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_a, n_e)) ||
-              utils::SetContainsKey(broken_edges, make_pair(n_e, n_b)) ||
-              utils::SetContainsKey(broken_edges, make_pair(n_b, n_a)));
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_c, n_d)) ||
-              utils::SetContainsKey(broken_edges, make_pair(n_d, n_e)) ||
-              utils::SetContainsKey(broken_edges, make_pair(n_e, n_c)));
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_g, n_h)) ||
-              utils::SetContainsKey(broken_edges, make_pair(n_h, n_g)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_a, n_e)) ||
+              base::ContainsKey(broken_edges, make_pair(n_e, n_b)) ||
+              base::ContainsKey(broken_edges, make_pair(n_b, n_a)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_c, n_d)) ||
+              base::ContainsKey(broken_edges, make_pair(n_d, n_e)) ||
+              base::ContainsKey(broken_edges, make_pair(n_e, n_c)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_g, n_h)) ||
+              base::ContainsKey(broken_edges, make_pair(n_h, n_g)));
   EXPECT_EQ(3U, broken_edges.size());
 }
 
@@ -217,11 +217,11 @@
   breaker.BreakCycles(graph, &broken_edges);
 
   // These are required to be broken:
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_b, n_a)));
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_b, n_c)));
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_d, n_e)));
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_f, n_g)));
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_h, n_i)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_b, n_a)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_b, n_c)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_d, n_e)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_f, n_g)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_h, n_i)));
 }
 
 TEST(CycleBreakerTest, UnblockGraphTest) {
@@ -248,8 +248,8 @@
   breaker.BreakCycles(graph, &broken_edges);
 
   // These are required to be broken:
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_a, n_b)));
-  EXPECT_TRUE(utils::SetContainsKey(broken_edges, make_pair(n_a, n_c)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_a, n_b)));
+  EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_a, n_c)));
 }
 
 TEST(CycleBreakerTest, SkipOpsTest) {
diff --git a/payload_generator/deflate_utils.cc b/payload_generator/deflate_utils.cc
index e331142..2719048 100644
--- a/payload_generator/deflate_utils.cc
+++ b/payload_generator/deflate_utils.cc
@@ -287,12 +287,16 @@
     }
 
     // Search for deflates if the file is in zip format.
+    // .zvoice files may eventually move out of rootfs. If that happens, remove
+    // ".zvoice" (crbug.com/782918).
+    const string zip_file_extensions[] = {".apk", ".zip", ".jar", ".zvoice"};
     bool is_zip =
-        base::EndsWith(
-            file.name, ".apk", base::CompareCase::INSENSITIVE_ASCII) ||
-        base::EndsWith(
-            file.name, ".zip", base::CompareCase::INSENSITIVE_ASCII) ||
-        base::EndsWith(file.name, ".jar", base::CompareCase::INSENSITIVE_ASCII);
+        any_of(zip_file_extensions,
+               std::end(zip_file_extensions),
+               [&file](const string& ext) {
+                 return base::EndsWith(
+                     file.name, ext, base::CompareCase::INSENSITIVE_ASCII);
+               });
 
     if (is_zip && extract_deflates) {
       brillo::Blob data;
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index 877e13f..41de623 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -29,6 +29,8 @@
 #include <unistd.h>
 
 #include <algorithm>
+#include <functional>
+#include <list>
 #include <map>
 #include <memory>
 #include <utility>
@@ -38,14 +40,17 @@
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
 #include <base/threading/simple_thread.h>
+#include <base/time/time.h>
 #include <brillo/data_encoding.h>
 #include <bsdiff/bsdiff.h>
 #include <bsdiff/patch_writer_factory.h>
+#include <puffin/utils.h>
 
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_generator/ab_generator.h"
 #include "update_engine/payload_generator/block_mapping.h"
 #include "update_engine/payload_generator/bzip.h"
 #include "update_engine/payload_generator/deflate_utils.h"
@@ -55,6 +60,7 @@
 #include "update_engine/payload_generator/squashfs_filesystem.h"
 #include "update_engine/payload_generator/xz.h"
 
+using std::list;
 using std::map;
 using std::string;
 using std::vector;
@@ -195,13 +201,16 @@
         version_(version),
         old_extents_(old_extents),
         new_extents_(new_extents),
+        new_extents_blocks_(utils::BlocksInExtents(new_extents)),
         old_deflates_(old_deflates),
         new_deflates_(new_deflates),
         name_(name),
         chunk_blocks_(chunk_blocks),
         blob_file_(blob_file) {}
 
-  FileDeltaProcessor(FileDeltaProcessor&& processor) = default;
+  bool operator>(const FileDeltaProcessor& other) const {
+    return new_extents_blocks_ > other.new_extents_blocks_;
+  }
 
   ~FileDeltaProcessor() override = default;
 
@@ -211,34 +220,35 @@
   void Run() override;
 
   // Merge each file processor's ops list to aops.
-  void MergeOperation(vector<AnnotatedOperation>* aops);
+  bool MergeOperation(vector<AnnotatedOperation>* aops);
 
  private:
-  const string& old_part_;
-  const string& new_part_;
+  const string& old_part_;  // NOLINT(runtime/member_string_references)
+  const string& new_part_;  // NOLINT(runtime/member_string_references)
   const PayloadVersion& version_;
 
   // The block ranges of the old/new file within the src/tgt image
   const vector<Extent> old_extents_;
   const vector<Extent> new_extents_;
+  const size_t new_extents_blocks_;
   const vector<puffin::BitExtent> old_deflates_;
   const vector<puffin::BitExtent> new_deflates_;
   const string name_;
   // Block limit of one aop.
-  ssize_t chunk_blocks_;
+  const ssize_t chunk_blocks_;
   BlobFileWriter* blob_file_;
 
   // The list of ops to reach the new file from the old file.
   vector<AnnotatedOperation> file_aops_;
 
+  bool failed_ = false;
+
   DISALLOW_COPY_AND_ASSIGN(FileDeltaProcessor);
 };
 
 void FileDeltaProcessor::Run() {
   TEST_AND_RETURN(blob_file_ != nullptr);
-
-  LOG(INFO) << "Encoding file " << name_ << " ("
-            << utils::BlocksInExtents(new_extents_) << " blocks)";
+  base::Time start = base::Time::Now();
 
   if (!DeltaReadFile(&file_aops_,
                      old_part_,
@@ -252,13 +262,31 @@
                      version_,
                      blob_file_)) {
     LOG(ERROR) << "Failed to generate delta for " << name_ << " ("
-               << utils::BlocksInExtents(new_extents_) << " blocks)";
+               << new_extents_blocks_ << " blocks)";
+    failed_ = true;
+    return;
   }
+
+  if (!version_.InplaceUpdate()) {
+    if (!ABGenerator::FragmentOperations(
+            version_, &file_aops_, new_part_, blob_file_)) {
+      LOG(ERROR) << "Failed to fragment operations for " << name_;
+      failed_ = true;
+      return;
+    }
+  }
+
+  LOG(INFO) << "Encoded file " << name_ << " (" << new_extents_blocks_
+            << " blocks) in " << (base::Time::Now() - start).InSecondsF()
+            << " seconds.";
 }
 
-void FileDeltaProcessor::MergeOperation(vector<AnnotatedOperation>* aops) {
+bool FileDeltaProcessor::MergeOperation(vector<AnnotatedOperation>* aops) {
+  if (failed_)
+    return false;
   aops->reserve(aops->size() + file_aops_.size());
   std::move(file_aops_.begin(), file_aops_.end(), std::back_inserter(*aops));
+  return true;
 }
 
 bool DeltaReadPartition(vector<AnnotatedOperation>* aops,
@@ -271,6 +299,18 @@
   ExtentRanges old_visited_blocks;
   ExtentRanges new_visited_blocks;
 
+  // If verity is enabled, mark those blocks as visited to skip generating
+  // operations for them.
+  if (version.minor >= kVerityMinorPayloadVersion &&
+      !new_part.verity.IsEmpty()) {
+    LOG(INFO) << "Skipping verity hash tree blocks: "
+              << ExtentsToString({new_part.verity.hash_tree_extent});
+    new_visited_blocks.AddExtent(new_part.verity.hash_tree_extent);
+    LOG(INFO) << "Skipping verity FEC blocks: "
+              << ExtentsToString({new_part.verity.fec_extent});
+    new_visited_blocks.AddExtent(new_part.verity.fec_extent);
+  }
+
   TEST_AND_RETURN_FALSE(DeltaMovedAndZeroBlocks(
       aops,
       old_part.path,
@@ -298,7 +338,7 @@
   TEST_AND_RETURN_FALSE(deflate_utils::PreprocessParitionFiles(
       new_part, &new_files, puffdiff_allowed));
 
-  vector<FileDeltaProcessor> file_delta_processors;
+  list<FileDeltaProcessor> file_delta_processors;
 
   // The processing is very straightforward here, we generate operations for
   // every file (and pseudo-file such as the metadata) in the new filesystem
@@ -343,8 +383,45 @@
                                        hard_chunk_blocks,
                                        blob_file);
   }
+  // Process all the blocks not included in any file. We provided all the unused
+  // blocks in the old partition as available data.
+  vector<Extent> new_unvisited = {
+      ExtentForRange(0, new_part.size / kBlockSize)};
+  new_unvisited = FilterExtentRanges(new_unvisited, new_visited_blocks);
+  if (!new_unvisited.empty()) {
+    vector<Extent> old_unvisited;
+    if (old_part.fs_interface) {
+      old_unvisited.push_back(ExtentForRange(0, old_part.size / kBlockSize));
+      old_unvisited = FilterExtentRanges(old_unvisited, old_visited_blocks);
+    }
+
+    LOG(INFO) << "Scanning " << utils::BlocksInExtents(new_unvisited)
+              << " unwritten blocks using chunk size of " << soft_chunk_blocks
+              << " blocks.";
+    // We use the soft_chunk_blocks limit for the <non-file-data> as we don't
+    // really know the structure of this data and we should not expect it to
+    // have redundancy between partitions.
+    file_delta_processors.emplace_back(
+        old_part.path,
+        new_part.path,
+        version,
+        std::move(old_unvisited),
+        std::move(new_unvisited),
+        vector<puffin::BitExtent>{},  // old_deflates,
+        vector<puffin::BitExtent>{},  // new_deflates
+        "<non-file-data>",            // operation name
+        soft_chunk_blocks,
+        blob_file);
+  }
 
   size_t max_threads = GetMaxThreads();
+
+  // Sort the files in descending order based on number of new blocks to make
+  // sure we start the largest ones first.
+  if (file_delta_processors.size() > max_threads) {
+    file_delta_processors.sort(std::greater<FileDeltaProcessor>());
+  }
+
   base::DelegateSimpleThreadPool thread_pool("incremental-update-generator",
                                              max_threads);
   thread_pool.Start();
@@ -354,41 +431,9 @@
   thread_pool.JoinAll();
 
   for (auto& processor : file_delta_processors) {
-    processor.MergeOperation(aops);
+    TEST_AND_RETURN_FALSE(processor.MergeOperation(aops));
   }
 
-  // Process all the blocks not included in any file. We provided all the unused
-  // blocks in the old partition as available data.
-  vector<Extent> new_unvisited = {
-      ExtentForRange(0, new_part.size / kBlockSize)};
-  new_unvisited = FilterExtentRanges(new_unvisited, new_visited_blocks);
-  if (new_unvisited.empty())
-    return true;
-
-  vector<Extent> old_unvisited;
-  if (old_part.fs_interface) {
-    old_unvisited.push_back(ExtentForRange(0, old_part.size / kBlockSize));
-    old_unvisited = FilterExtentRanges(old_unvisited, old_visited_blocks);
-  }
-
-  LOG(INFO) << "Scanning " << utils::BlocksInExtents(new_unvisited)
-            << " unwritten blocks using chunk size of " << soft_chunk_blocks
-            << " blocks.";
-  // We use the soft_chunk_blocks limit for the <non-file-data> as we don't
-  // really know the structure of this data and we should not expect it to have
-  // redundancy between partitions.
-  TEST_AND_RETURN_FALSE(DeltaReadFile(aops,
-                                      old_part.path,
-                                      new_part.path,
-                                      old_unvisited,
-                                      new_unvisited,
-                                      {},                 // old_deflates,
-                                      {},                 // new_deflates
-                                      "<non-file-data>",  // operation name
-                                      soft_chunk_blocks,
-                                      version,
-                                      blob_file));
-
   return true;
 }
 
@@ -478,30 +523,44 @@
       old_blocks_map_it->second.pop_back();
   }
 
+  if (chunk_blocks == -1)
+    chunk_blocks = new_num_blocks;
+
   // Produce operations for the zero blocks split per output extent.
-  // TODO(deymo): Produce ZERO operations instead of calling DeltaReadFile().
   size_t num_ops = aops->size();
   new_visited_blocks->AddExtents(new_zeros);
   for (const Extent& extent : new_zeros) {
-    TEST_AND_RETURN_FALSE(DeltaReadFile(aops,
-                                        "",
-                                        new_part,
-                                        vector<Extent>(),        // old_extents
-                                        vector<Extent>{extent},  // new_extents
-                                        {},                      // old_deflates
-                                        {},                      // new_deflates
-                                        "<zeros>",
-                                        chunk_blocks,
-                                        version,
-                                        blob_file));
+    if (version.OperationAllowed(InstallOperation::ZERO)) {
+      for (uint64_t offset = 0; offset < extent.num_blocks();
+           offset += chunk_blocks) {
+        uint64_t num_blocks =
+            std::min(static_cast<uint64_t>(extent.num_blocks()) - offset,
+                     static_cast<uint64_t>(chunk_blocks));
+        InstallOperation operation;
+        operation.set_type(InstallOperation::ZERO);
+        *(operation.add_dst_extents()) =
+            ExtentForRange(extent.start_block() + offset, num_blocks);
+        aops->push_back({.name = "<zeros>", .op = operation});
+      }
+    } else {
+      TEST_AND_RETURN_FALSE(DeltaReadFile(aops,
+                                          "",
+                                          new_part,
+                                          {},        // old_extents
+                                          {extent},  // new_extents
+                                          {},        // old_deflates
+                                          {},        // new_deflates
+                                          "<zeros>",
+                                          chunk_blocks,
+                                          version,
+                                          blob_file));
+    }
   }
   LOG(INFO) << "Produced " << (aops->size() - num_ops) << " operations for "
             << utils::BlocksInExtents(new_zeros) << " zeroed blocks";
 
   // Produce MOVE/SOURCE_COPY operations for the moved blocks.
   num_ops = aops->size();
-  if (chunk_blocks == -1)
-    chunk_blocks = new_num_blocks;
   uint64_t used_blocks = 0;
   old_visited_blocks->AddExtents(old_identical_blocks);
   new_visited_blocks->AddExtents(new_identical_blocks);
@@ -785,25 +844,8 @@
         TEST_AND_RETURN_FALSE(deflate_utils::FindAndCompactDeflates(
             dst_extents, new_deflates, &dst_deflates));
 
-        // Remove equal deflates. TODO(*): We can do a N*N check using
-        // hashing. It will not reduce the payload size, but it will speeds up
-        // the puffing on the client device.
-        auto src = src_deflates.begin();
-        auto dst = dst_deflates.begin();
-        for (; src != src_deflates.end() && dst != dst_deflates.end();) {
-          auto src_in_bytes = deflate_utils::ExpandToByteExtent(*src);
-          auto dst_in_bytes = deflate_utils::ExpandToByteExtent(*dst);
-          if (src_in_bytes.length == dst_in_bytes.length &&
-              !memcmp(old_data.data() + src_in_bytes.offset,
-                      new_data.data() + dst_in_bytes.offset,
-                      src_in_bytes.length)) {
-            src = src_deflates.erase(src);
-            dst = dst_deflates.erase(dst);
-          } else {
-            src++;
-            dst++;
-          }
-        }
+        puffin::RemoveEqualBitExtents(
+            old_data, new_data, &src_deflates, &dst_deflates);
 
         // Only Puffdiff if both files have at least one deflate left.
         if (!src_deflates.empty() && !dst_deflates.empty()) {
diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc
index a83cea2..e32dde2 100644
--- a/payload_generator/delta_diff_utils_unittest.cc
+++ b/payload_generator/delta_diff_utils_unittest.cc
@@ -161,6 +161,31 @@
   ExtentRanges new_visited_blocks_;
 };
 
+TEST_F(DeltaDiffUtilsTest, SkipVerityExtentsTest) {
+  new_part_.verity.hash_tree_extent = ExtentForRange(20, 30);
+  new_part_.verity.fec_extent = ExtentForRange(40, 50);
+
+  BlobFileWriter blob_file(blob_fd_, &blob_size_);
+  EXPECT_TRUE(diff_utils::DeltaReadPartition(
+      &aops_,
+      old_part_,
+      new_part_,
+      -1,
+      -1,
+      PayloadVersion(kMaxSupportedMajorPayloadVersion,
+                     kVerityMinorPayloadVersion),
+      &blob_file));
+  for (const auto& aop : aops_) {
+    new_visited_blocks_.AddRepeatedExtents(aop.op.dst_extents());
+  }
+  for (const auto& extent : new_visited_blocks_.extent_set()) {
+    EXPECT_FALSE(ExtentRanges::ExtentsOverlap(
+        extent, new_part_.verity.hash_tree_extent));
+    EXPECT_FALSE(
+        ExtentRanges::ExtentsOverlap(extent, new_part_.verity.fec_extent));
+  }
+}
+
 TEST_F(DeltaDiffUtilsTest, MoveSmallTest) {
   brillo::Blob data_blob(block_size_);
   test_utils::FillWithData(&data_blob);
diff --git a/payload_generator/extent_ranges.cc b/payload_generator/extent_ranges.cc
index c1d3d63..41e8f76 100644
--- a/payload_generator/extent_ranges.cc
+++ b/payload_generator/extent_ranges.cc
@@ -227,6 +227,14 @@
   return ret;
 }
 
+Extent ExtentForBytes(uint64_t block_size,
+                      uint64_t start_bytes,
+                      uint64_t size_bytes) {
+  uint64_t start_block = start_bytes / block_size;
+  uint64_t end_block = utils::DivRoundUp(start_bytes + size_bytes, block_size);
+  return ExtentForRange(start_block, end_block - start_block);
+}
+
 vector<Extent> ExtentRanges::GetExtentsForBlockCount(
     uint64_t count) const {
   vector<Extent> out;
diff --git a/payload_generator/extent_ranges.h b/payload_generator/extent_ranges.h
index 198c834..02cf8fc 100644
--- a/payload_generator/extent_ranges.h
+++ b/payload_generator/extent_ranges.h
@@ -41,6 +41,9 @@
 };
 
 Extent ExtentForRange(uint64_t start_block, uint64_t num_blocks);
+Extent ExtentForBytes(uint64_t block_size,
+                      uint64_t start_bytes,
+                      uint64_t size_bytes);
 
 class ExtentRanges {
  public:
diff --git a/payload_generator/extent_ranges_unittest.cc b/payload_generator/extent_ranges_unittest.cc
index 3705bac..d9dd467 100644
--- a/payload_generator/extent_ranges_unittest.cc
+++ b/payload_generator/extent_ranges_unittest.cc
@@ -304,7 +304,7 @@
 }
 
 TEST(ExtentRangesTest, FilterExtentRangesMultipleRanges) {
-  // Two overlaping extents, with three ranges to remove.
+  // Two overlapping extents, with three ranges to remove.
   vector<Extent> extents {
       ExtentForRange(10, 100),
       ExtentForRange(30, 100) };
diff --git a/payload_generator/filesystem_interface.h b/payload_generator/filesystem_interface.h
index b1506e4..08dfd19 100644
--- a/payload_generator/filesystem_interface.h
+++ b/payload_generator/filesystem_interface.h
@@ -19,9 +19,9 @@
 
 // This class is used to abstract a filesystem and iterate the blocks
 // associated with the files and filesystem structures.
-// For the purposes of the update payload generation, a filesystem is a formated
-// partition composed by fixed-size blocks, since that's the interface used in
-// the update payload.
+// For the purposes of the update payload generation, a filesystem is a
+// formatted partition composed by fixed-size blocks, since that's the interface
+// used in the update payload.
 
 #include <sys/stat.h>
 #include <sys/types.h>
diff --git a/payload_generator/full_update_generator.cc b/payload_generator/full_update_generator.cc
index 482a789..98bb0f3 100644
--- a/payload_generator/full_update_generator.cc
+++ b/payload_generator/full_update_generator.cc
@@ -152,7 +152,7 @@
   // We potentially have all the ChunkProcessors in memory but only
   // |max_threads| will actually hold a block in memory while we process.
   size_t partition_blocks = new_part.size / config.block_size;
-  size_t num_chunks = (partition_blocks + chunk_blocks - 1) / chunk_blocks;
+  size_t num_chunks = utils::DivRoundUp(partition_blocks, chunk_blocks);
   aops->resize(num_chunks);
   vector<ChunkProcessor> chunk_processors;
   chunk_processors.reserve(num_chunks);
diff --git a/payload_generator/full_update_generator_unittest.cc b/payload_generator/full_update_generator_unittest.cc
index 6da4d10..e398125 100644
--- a/payload_generator/full_update_generator_unittest.cc
+++ b/payload_generator/full_update_generator_unittest.cc
@@ -40,15 +40,11 @@
     config_.hard_chunk_size = 128 * 1024;
     config_.block_size = 4096;
 
-    EXPECT_TRUE(utils::MakeTempFile("FullUpdateTest_partition.XXXXXX",
-                                    &new_part_conf.path,
-                                    nullptr));
-    EXPECT_TRUE(utils::MakeTempFile("FullUpdateTest_blobs.XXXXXX",
-                                    &out_blobs_path_,
-                                    &out_blobs_fd_));
+    new_part_conf.path = part_file_.path();
+    EXPECT_TRUE(utils::MakeTempFile(
+        "FullUpdateTest_blobs.XXXXXX", &out_blobs_path_, &out_blobs_fd_));
 
     blob_file_.reset(new BlobFileWriter(out_blobs_fd_, &out_blobs_length_));
-    part_path_unlinker_.reset(new ScopedPathUnlinker(new_part_conf.path));
     out_blobs_unlinker_.reset(new ScopedPathUnlinker(out_blobs_path_));
   }
 
@@ -62,9 +58,9 @@
   int out_blobs_fd_{-1};
   off_t out_blobs_length_{0};
   ScopedFdCloser out_blobs_fd_closer_{&out_blobs_fd_};
+  test_utils::ScopedTempFile part_file_{"FullUpdateTest_partition.XXXXXX"};
 
   std::unique_ptr<BlobFileWriter> blob_file_;
-  std::unique_ptr<ScopedPathUnlinker> part_path_unlinker_;
   std::unique_ptr<ScopedPathUnlinker> out_blobs_unlinker_;
 
   // FullUpdateGenerator under test.
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index 2729bc4..8240518 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -14,28 +14,27 @@
 // limitations under the License.
 //
 
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <xz.h>
-
 #include <string>
 #include <vector>
 
+#include <base/files/file_path.h>
+#include <base/files/file_util.h>
 #include <base/logging.h>
 #include <base/strings/string_number_conversions.h>
 #include <base/strings/string_split.h>
 #include <brillo/flag_helper.h>
 #include <brillo/key_value_store.h>
+#include <brillo/message_loops/base_message_loop.h>
+#include <xz.h>
 
 #include "update_engine/common/fake_boot_control.h"
 #include "update_engine/common/fake_hardware.h"
+#include "update_engine/common/file_fetcher.h"
 #include "update_engine/common/prefs.h"
 #include "update_engine/common/terminator.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/delta_performer.h"
+#include "update_engine/payload_consumer/download_action.h"
+#include "update_engine/payload_consumer/filesystem_verifier_action.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/payload_generation_config.h"
@@ -150,13 +149,16 @@
   LOG_IF(FATAL, out_file.empty())
       << "Must pass --out_file to sign payload.";
   LOG_IF(FATAL, payload_signature_file.empty())
-      << "Must pass --signature_file to sign payload.";
-  vector<brillo::Blob> signatures, metadata_signatures;
-  SignatureFileFlagToBlobs(payload_signature_file, &signatures);
+      << "Must pass --payload_signature_file to sign payload.";
+  vector<brillo::Blob> payload_signatures, metadata_signatures;
+  SignatureFileFlagToBlobs(payload_signature_file, &payload_signatures);
   SignatureFileFlagToBlobs(metadata_signature_file, &metadata_signatures);
   uint64_t final_metadata_size;
-  CHECK(PayloadSigner::AddSignatureToPayload(in_file, signatures,
-      metadata_signatures, out_file, &final_metadata_size));
+  CHECK(PayloadSigner::AddSignatureToPayload(in_file,
+                                             payload_signatures,
+                                             metadata_signatures,
+                                             out_file,
+                                             &final_metadata_size));
   LOG(INFO) << "Done signing payload. Final metadata size = "
             << final_metadata_size;
   if (!out_metadata_size_file.empty()) {
@@ -182,8 +184,20 @@
   return 0;
 }
 
-// TODO(deymo): This function is likely broken for deltas minor version 2 or
-// newer. Move this function to a new file and make the delta_performer
+class ApplyPayloadProcessorDelegate : public ActionProcessorDelegate {
+ public:
+  void ProcessingDone(const ActionProcessor* processor,
+                      ErrorCode code) override {
+    brillo::MessageLoop::current()->BreakLoop();
+    code_ = code;
+  }
+  void ProcessingStopped(const ActionProcessor* processor) override {
+    brillo::MessageLoop::current()->BreakLoop();
+  }
+  ErrorCode code_;
+};
+
+// TODO(deymo): Move this function to a new file and make the delta_performer
 // integration tests use this instead.
 bool ApplyPayload(const string& payload_file,
                   // Simply reuses the payload config used for payload
@@ -200,6 +214,14 @@
   install_plan.target_slot = 1;
   payload.type =
       config.is_delta ? InstallPayloadType::kDelta : InstallPayloadType::kFull;
+  payload.size = utils::FileSize(payload_file);
+  // TODO(senj): This hash is only correct for unsigned payload, need to support
+  // signed payload using PayloadSigner.
+  HashCalculator::RawHashOfFile(payload_file, payload.size, &payload.hash);
+  install_plan.payloads = {payload};
+  install_plan.download_url =
+      "file://" +
+      base::MakeAbsoluteFilePath(base::FilePath(payload_file)).value();
 
   for (size_t i = 0; i < config.target.partitions.size(); i++) {
     const string& part_name = config.target.partitions[i].name;
@@ -217,31 +239,34 @@
     }
 
     LOG(INFO) << "Install partition:"
-              << " source: " << source_path << " target: " << target_path;
+              << " source: " << source_path << "\ttarget: " << target_path;
   }
 
-  DeltaPerformer performer(&prefs,
-                           &fake_boot_control,
-                           &fake_hardware,
-                           nullptr,
-                           &install_plan,
-                           &payload,
-                           true);  // is_interactive
-
-  brillo::Blob buf(1024 * 1024);
-  int fd = open(payload_file.c_str(), O_RDONLY, 0);
-  CHECK_GE(fd, 0);
-  ScopedFdCloser fd_closer(&fd);
   xz_crc32_init();
-  for (off_t offset = 0;; offset += buf.size()) {
-    ssize_t bytes_read;
-    CHECK(utils::PReadAll(fd, buf.data(), buf.size(), offset, &bytes_read));
-    if (bytes_read == 0)
-      break;
-    TEST_AND_RETURN_FALSE(performer.Write(buf.data(), bytes_read));
-  }
-  CHECK_EQ(performer.Close(), 0);
-  DeltaPerformer::ResetUpdateProgress(&prefs, false);
+  brillo::BaseMessageLoop loop;
+  loop.SetAsCurrent();
+  auto install_plan_action = std::make_unique<InstallPlanAction>(install_plan);
+  auto download_action =
+      std::make_unique<DownloadAction>(&prefs,
+                                       &fake_boot_control,
+                                       &fake_hardware,
+                                       nullptr,
+                                       new FileFetcher(),
+                                       true /* interactive */);
+  auto filesystem_verifier_action =
+      std::make_unique<FilesystemVerifierAction>();
+
+  BondActions(install_plan_action.get(), download_action.get());
+  BondActions(download_action.get(), filesystem_verifier_action.get());
+  ActionProcessor processor;
+  ApplyPayloadProcessorDelegate delegate;
+  processor.set_delegate(&delegate);
+  processor.EnqueueAction(std::move(install_plan_action));
+  processor.EnqueueAction(std::move(download_action));
+  processor.EnqueueAction(std::move(filesystem_verifier_action));
+  processor.StartProcessing();
+  loop.Run();
+  CHECK_EQ(delegate.code_, ErrorCode::kSuccess);
   LOG(INFO) << "Completed applying " << (config.is_delta ? "delta" : "full")
             << " payload.";
   return true;
@@ -287,8 +312,8 @@
                 "Path to the .map files associated with the partition files "
                 "in the new partition, similar to the -old_mapfiles flag.");
   DEFINE_string(partition_names,
-                string(kLegacyPartitionNameRoot) + ":" +
-                kLegacyPartitionNameKernel,
+                string(kPartitionNameRoot) + ":" +
+                kPartitionNameKernel,
                 "Names of the partitions. To pass multiple names, use a single "
                 "argument with a colon between names, e.g. "
                 "name:name2:name3:last_name . Name can not be empty, and it "
@@ -311,7 +336,8 @@
                 "You may pass in multiple sizes by colon separating them. E.g. "
                 "2048:2048:4096 will assume 3 signatures, the first two with "
                 "2048 size and the last 4096.");
-  DEFINE_string(signature_file, "",
+  DEFINE_string(payload_signature_file,
+                "",
                 "Raw signature file to sign payload with. To pass multiple "
                 "signatures, use a single argument with a colon between paths, "
                 "e.g. /path/to/sig:/path/to/next:/path/to/last_sig . Each "
@@ -406,9 +432,12 @@
                             FLAGS_out_metadata_hash_file, FLAGS_in_file);
     return 0;
   }
-  if (!FLAGS_signature_file.empty()) {
-    SignPayload(FLAGS_in_file, FLAGS_out_file, FLAGS_signature_file,
-                FLAGS_metadata_signature_file, FLAGS_out_metadata_size_file);
+  if (!FLAGS_payload_signature_file.empty()) {
+    SignPayload(FLAGS_in_file,
+                FLAGS_out_file,
+                FLAGS_payload_signature_file,
+                FLAGS_metadata_signature_file,
+                FLAGS_out_metadata_size_file);
     return 0;
   }
   if (!FLAGS_public_key.empty()) {
@@ -444,8 +473,8 @@
     LOG_IF(FATAL, partition_names.size() != 2)
         << "To support more than 2 partitions, please use the "
         << "--new_partitions flag and major version 2.";
-    LOG_IF(FATAL, partition_names[0] != kLegacyPartitionNameRoot ||
-                  partition_names[1] != kLegacyPartitionNameKernel)
+    LOG_IF(FATAL, partition_names[0] != kPartitionNameRoot ||
+                  partition_names[1] != kPartitionNameKernel)
         << "To support non-default partition name, please use the "
         << "--new_partitions flag and major version 2.";
   }
@@ -582,6 +611,9 @@
 
   payload_config.max_timestamp = FLAGS_max_timestamp;
 
+  if (payload_config.version.minor >= kVerityMinorPayloadVersion)
+    CHECK(payload_config.target.LoadVerityConfig());
+
   LOG(INFO) << "Generating " << (payload_config.is_delta ? "delta" : "full")
             << " update";
 
diff --git a/payload_generator/inplace_generator.cc b/payload_generator/inplace_generator.cc
index b858c2b..62608e5 100644
--- a/payload_generator/inplace_generator.cc
+++ b/payload_generator/inplace_generator.cc
@@ -23,6 +23,8 @@
 #include <utility>
 #include <vector>
 
+#include <base/stl_util.h>
+
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/cycle_breaker.h"
@@ -341,7 +343,7 @@
   vector<Vertex::Index> new_op_indexes;
   new_op_indexes.reserve(op_indexes->size());
   for (Vertex::Index vertex_index : *op_indexes) {
-    if (utils::SetContainsKey(deleted_nodes, vertex_index))
+    if (base::ContainsKey(deleted_nodes, vertex_index))
       continue;
     new_op_indexes.push_back(vertex_index);
   }
@@ -800,7 +802,7 @@
                                config.hard_chunk_size / config.block_size);
   size_t soft_chunk_blocks = config.soft_chunk_size / config.block_size;
   uint64_t partition_size = new_part.size;
-  if (new_part.name == kLegacyPartitionNameRoot)
+  if (new_part.name == kPartitionNameRoot)
     partition_size = config.rootfs_partition_size;
 
   LOG(INFO) << "Delta compressing " << new_part.name << " partition...";
diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc
index f48d2a2..0ffd3e2 100644
--- a/payload_generator/payload_file.cc
+++ b/payload_generator/payload_file.cc
@@ -20,6 +20,7 @@
 
 #include <algorithm>
 #include <map>
+#include <utility>
 
 #include <base/strings/stringprintf.h>
 
@@ -82,8 +83,8 @@
                                const vector<AnnotatedOperation>& aops) {
   // Check partitions order for Chrome OS
   if (major_version_ == kChromeOSMajorPayloadVersion) {
-    const vector<const char*> part_order = { kLegacyPartitionNameRoot,
-                                             kLegacyPartitionNameKernel };
+    const vector<const char*> part_order = { kPartitionNameRoot,
+                                             kPartitionNameKernel };
     TEST_AND_RETURN_FALSE(part_vec_.size() < part_order.size());
     TEST_AND_RETURN_FALSE(new_conf.name == part_order[part_vec_.size()]);
   }
@@ -91,6 +92,7 @@
   part.name = new_conf.name;
   part.aops = aops;
   part.postinstall = new_conf.postinstall;
+  part.verity = new_conf.verity;
   // Initialize the PartitionInfo objects if present.
   if (!old_conf.path.empty())
     TEST_AND_RETURN_FALSE(diff_utils::InitializePartitionInfo(old_conf,
@@ -144,6 +146,22 @@
           partition->set_filesystem_type(part.postinstall.filesystem_type);
         partition->set_postinstall_optional(part.postinstall.optional);
       }
+      if (!part.verity.IsEmpty()) {
+        if (part.verity.hash_tree_extent.num_blocks() != 0) {
+          *partition->mutable_hash_tree_data_extent() =
+              part.verity.hash_tree_data_extent;
+          *partition->mutable_hash_tree_extent() = part.verity.hash_tree_extent;
+          partition->set_hash_tree_algorithm(part.verity.hash_tree_algorithm);
+          if (!part.verity.hash_tree_salt.empty())
+            partition->set_hash_tree_salt(part.verity.hash_tree_salt.data(),
+                                          part.verity.hash_tree_salt.size());
+        }
+        if (part.verity.fec_extent.num_blocks() != 0) {
+          *partition->mutable_fec_data_extent() = part.verity.fec_data_extent;
+          *partition->mutable_fec_extent() = part.verity.fec_extent;
+          partition->set_fec_roots(part.verity.fec_roots);
+        }
+      }
       for (const AnnotatedOperation& aop : part.aops) {
         *partition->add_operations() = aop.op;
       }
@@ -153,7 +171,7 @@
         *(partition->mutable_new_partition_info()) = part.new_info;
     } else {
       // major_version_ == kChromeOSMajorPayloadVersion
-      if (part.name == kLegacyPartitionNameKernel) {
+      if (part.name == kPartitionNameKernel) {
         for (const AnnotatedOperation& aop : part.aops)
           *manifest_.add_kernel_install_operations() = aop.op;
         if (part.old_info.has_size() || part.old_info.has_hash())
diff --git a/payload_generator/payload_file.h b/payload_generator/payload_file.h
index 7cc792a..9dc80a7 100644
--- a/payload_generator/payload_file.h
+++ b/payload_generator/payload_file.h
@@ -95,6 +95,7 @@
     PartitionInfo new_info;
 
     PostInstallConfig postinstall;
+    VerityConfig verity;
   };
 
   std::vector<Partition> part_vec_;
diff --git a/payload_generator/payload_file_unittest.cc b/payload_generator/payload_file_unittest.cc
index e8e7e14..45faebb9 100644
--- a/payload_generator/payload_file_unittest.cc
+++ b/payload_generator/payload_file_unittest.cc
@@ -36,23 +36,16 @@
 };
 
 TEST_F(PayloadFileTest, ReorderBlobsTest) {
-  string orig_blobs;
-  EXPECT_TRUE(utils::MakeTempFile("ReorderBlobsTest.orig.XXXXXX", &orig_blobs,
-                                  nullptr));
-  ScopedPathUnlinker orig_blobs_unlinker(orig_blobs);
+  test_utils::ScopedTempFile orig_blobs("ReorderBlobsTest.orig.XXXXXX");
 
   // The operations have three blob and one gap (the whitespace):
   // Rootfs operation 1: [8, 3] bcd
   // Rootfs operation 2: [7, 1] a
   // Kernel operation 1: [0, 6] kernel
   string orig_data = "kernel abcd";
-  EXPECT_TRUE(
-      utils::WriteFile(orig_blobs.c_str(), orig_data.data(), orig_data.size()));
+  EXPECT_TRUE(test_utils::WriteFileString(orig_blobs.path(), orig_data));
 
-  string new_blobs;
-  EXPECT_TRUE(
-      utils::MakeTempFile("ReorderBlobsTest.new.XXXXXX", &new_blobs, nullptr));
-  ScopedPathUnlinker new_blobs_unlinker(new_blobs);
+  test_utils::ScopedTempFile new_blobs("ReorderBlobsTest.new.XXXXXX");
 
   payload_.part_vec_.resize(2);
 
@@ -71,12 +64,12 @@
   aop.op.set_data_length(6);
   payload_.part_vec_[1].aops = {aop};
 
-  EXPECT_TRUE(payload_.ReorderDataBlobs(orig_blobs, new_blobs));
+  EXPECT_TRUE(payload_.ReorderDataBlobs(orig_blobs.path(), new_blobs.path()));
 
   const vector<AnnotatedOperation>& part0_aops = payload_.part_vec_[0].aops;
   const vector<AnnotatedOperation>& part1_aops = payload_.part_vec_[1].aops;
   string new_data;
-  EXPECT_TRUE(utils::ReadFile(new_blobs, &new_data));
+  EXPECT_TRUE(utils::ReadFile(new_blobs.path(), &new_data));
   // Kernel blobs should appear at the end.
   EXPECT_EQ("bcdakernel", new_data);
 
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index 15d4ab5..c49fdb5 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -20,6 +20,7 @@
 
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/delta_performer.h"
+#include "update_engine/payload_generator/boot_img_filesystem.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/delta_diff_utils.h"
 #include "update_engine/payload_generator/ext2_filesystem.h"
@@ -32,6 +33,13 @@
   return !run && path.empty() && filesystem_type.empty() && !optional;
 }
 
+bool VerityConfig::IsEmpty() const {
+  return hash_tree_data_extent.num_blocks() == 0 &&
+         hash_tree_extent.num_blocks() == 0 && hash_tree_algorithm.empty() &&
+         hash_tree_salt.empty() && fec_data_extent.num_blocks() == 0 &&
+         fec_extent.num_blocks() == 0 && fec_roots == 0;
+}
+
 bool PartitionConfig::ValidateExists() const {
   TEST_AND_RETURN_FALSE(!path.empty());
   TEST_AND_RETURN_FALSE(utils::FileExists(path.c_str()));
@@ -64,6 +72,12 @@
     }
   }
 
+  fs_interface = BootImgFilesystem::CreateFromFile(path);
+  if (fs_interface) {
+    TEST_AND_RETURN_FALSE(fs_interface->GetBlockSize() == kBlockSize);
+    return true;
+  }
+
   // Fall back to a RAW filesystem.
   TEST_AND_RETURN_FALSE(size % kBlockSize == 0);
   fs_interface = RawFilesystem::Create(
@@ -129,7 +143,8 @@
                         minor == kSourceMinorPayloadVersion ||
                         minor == kOpSrcHashMinorPayloadVersion ||
                         minor == kBrotliBsdiffMinorPayloadVersion ||
-                        minor == kPuffdiffMinorPayloadVersion);
+                        minor == kPuffdiffMinorPayloadVersion ||
+                        minor == kVerityMinorPayloadVersion);
   return true;
 }
 
@@ -192,8 +207,9 @@
         TEST_AND_RETURN_FALSE(part.ValidateExists());
         TEST_AND_RETURN_FALSE(part.size % block_size == 0);
       }
-      // Source partition should not have postinstall.
+      // Source partition should not have postinstall or verity config.
       TEST_AND_RETURN_FALSE(part.postinstall.IsEmpty());
+      TEST_AND_RETURN_FALSE(part.verity.IsEmpty());
     }
 
     // If new_image_info is present, old_image_info must be present.
@@ -209,10 +225,12 @@
     TEST_AND_RETURN_FALSE(part.ValidateExists());
     TEST_AND_RETURN_FALSE(part.size % block_size == 0);
     if (version.minor == kInPlaceMinorPayloadVersion &&
-        part.name == kLegacyPartitionNameRoot)
+        part.name == kPartitionNameRoot)
       TEST_AND_RETURN_FALSE(rootfs_partition_size >= part.size);
     if (version.major == kChromeOSMajorPayloadVersion)
       TEST_AND_RETURN_FALSE(part.postinstall.IsEmpty());
+    if (version.minor < kVerityMinorPayloadVersion)
+      TEST_AND_RETURN_FALSE(part.verity.IsEmpty());
   }
 
   TEST_AND_RETURN_FALSE(hard_chunk_size == -1 ||
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index c553d29..38e7b10 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -24,6 +24,7 @@
 #include <vector>
 
 #include <brillo/key_value_store.h>
+#include <brillo/secure_blob.h>
 
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/filesystem_interface.h"
@@ -51,6 +52,34 @@
   bool optional = false;
 };
 
+// Data will be written to the payload and used for hash tree and FEC generation
+// at device update time.
+struct VerityConfig {
+  // Whether the verity config is empty.
+  bool IsEmpty() const;
+
+  // The extent for data covered by verity hash tree.
+  Extent hash_tree_data_extent;
+
+  // The extent to store verity hash tree.
+  Extent hash_tree_extent;
+
+  // The hash algorithm used in verity hash tree.
+  std::string hash_tree_algorithm;
+
+  // The salt used for verity hash tree.
+  brillo::Blob hash_tree_salt;
+
+  // The extent for data covered by FEC.
+  Extent fec_data_extent;
+
+  // The extent to store FEC.
+  Extent fec_extent;
+
+  // The number of FEC roots.
+  uint32_t fec_roots = 0;
+};
+
 struct PartitionConfig {
   explicit PartitionConfig(std::string name) : name(name) {}
 
@@ -86,6 +115,7 @@
   std::string name;
 
   PostInstallConfig postinstall;
+  VerityConfig verity;
 };
 
 // The ImageConfig struct describes a pair of binaries kernel and rootfs and the
@@ -104,6 +134,9 @@
   // Load postinstall config from a key value store.
   bool LoadPostInstallConfig(const brillo::KeyValueStore& store);
 
+  // Load verity config by parsing the partition images.
+  bool LoadVerityConfig();
+
   // Returns whether the |image_info| field is empty.
   bool ImageInfoIsEmpty() const;
 
@@ -154,7 +187,7 @@
   // if is_full is false, so we are requested a delta payload.
   ImageConfig source;
 
-  // Wheter the requested payload is a delta payload.
+  // Whether the requested payload is a delta payload.
   bool is_delta = false;
 
   // The major/minor version of the payload.
diff --git a/payload_generator/payload_generation_config_android.cc b/payload_generator/payload_generation_config_android.cc
new file mode 100644
index 0000000..e1dee58
--- /dev/null
+++ b/payload_generator/payload_generation_config_android.cc
@@ -0,0 +1,135 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/payload_generation_config.h"
+
+#include <base/logging.h>
+#include <brillo/secure_blob.h>
+#include <libavb/libavb.h>
+#include <verity/hash_tree_builder.h>
+
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/verity_writer_android.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+
+namespace chromeos_update_engine {
+
+namespace {
+bool AvbDescriptorCallback(const AvbDescriptor* descriptor, void* user_data) {
+  PartitionConfig* part = static_cast<PartitionConfig*>(user_data);
+  AvbDescriptor desc;
+  TEST_AND_RETURN_FALSE(
+      avb_descriptor_validate_and_byteswap(descriptor, &desc));
+  if (desc.tag != AVB_DESCRIPTOR_TAG_HASHTREE)
+    return true;
+
+  AvbHashtreeDescriptor hashtree;
+  TEST_AND_RETURN_FALSE(avb_hashtree_descriptor_validate_and_byteswap(
+      reinterpret_cast<const AvbHashtreeDescriptor*>(descriptor), &hashtree));
+  // We only support version 1 right now, will need to introduce a new
+  // payload minor version to support new dm verity version.
+  TEST_AND_RETURN_FALSE(hashtree.dm_verity_version == 1);
+  part->verity.hash_tree_algorithm =
+      reinterpret_cast<const char*>(hashtree.hash_algorithm);
+
+  const uint8_t* salt = reinterpret_cast<const uint8_t*>(descriptor) +
+                        sizeof(AvbHashtreeDescriptor) +
+                        hashtree.partition_name_len;
+  part->verity.hash_tree_salt.assign(salt, salt + hashtree.salt_len);
+
+  TEST_AND_RETURN_FALSE(hashtree.data_block_size ==
+                        part->fs_interface->GetBlockSize());
+  part->verity.hash_tree_data_extent =
+      ExtentForBytes(hashtree.data_block_size, 0, hashtree.image_size);
+
+  TEST_AND_RETURN_FALSE(hashtree.hash_block_size ==
+                        part->fs_interface->GetBlockSize());
+
+  // Generate hash tree based on the descriptor and verify that it matches
+  // the hash tree stored in the image.
+  auto hash_function =
+      HashTreeBuilder::HashFunction(part->verity.hash_tree_algorithm);
+  TEST_AND_RETURN_FALSE(hash_function != nullptr);
+  HashTreeBuilder hash_tree_builder(hashtree.data_block_size, hash_function);
+  TEST_AND_RETURN_FALSE(hash_tree_builder.Initialize(
+      hashtree.image_size, part->verity.hash_tree_salt));
+  TEST_AND_RETURN_FALSE(hash_tree_builder.CalculateSize(hashtree.image_size) ==
+                        hashtree.tree_size);
+
+  brillo::Blob buffer;
+  for (uint64_t offset = 0; offset < hashtree.image_size;) {
+    constexpr uint64_t kBufferSize = 1024 * 1024;
+    size_t bytes_to_read = std::min(kBufferSize, hashtree.image_size - offset);
+    TEST_AND_RETURN_FALSE(
+        utils::ReadFileChunk(part->path, offset, bytes_to_read, &buffer));
+    TEST_AND_RETURN_FALSE(
+        hash_tree_builder.Update(buffer.data(), buffer.size()));
+    offset += buffer.size();
+    buffer.clear();
+  }
+  TEST_AND_RETURN_FALSE(hash_tree_builder.BuildHashTree());
+  TEST_AND_RETURN_FALSE(utils::ReadFileChunk(
+      part->path, hashtree.tree_offset, hashtree.tree_size, &buffer));
+  TEST_AND_RETURN_FALSE(hash_tree_builder.CheckHashTree(buffer));
+
+  part->verity.hash_tree_extent = ExtentForBytes(
+      hashtree.hash_block_size, hashtree.tree_offset, hashtree.tree_size);
+
+  TEST_AND_RETURN_FALSE(VerityWriterAndroid::EncodeFEC(part->path,
+                                                       0 /* data_offset */,
+                                                       hashtree.fec_offset,
+                                                       hashtree.fec_offset,
+                                                       hashtree.fec_size,
+                                                       hashtree.fec_num_roots,
+                                                       hashtree.data_block_size,
+                                                       true /* verify_mode */));
+
+  part->verity.fec_data_extent =
+      ExtentForBytes(hashtree.data_block_size, 0, hashtree.fec_offset);
+  part->verity.fec_extent = ExtentForBytes(
+      hashtree.data_block_size, hashtree.fec_offset, hashtree.fec_size);
+  part->verity.fec_roots = hashtree.fec_num_roots;
+  return true;
+}
+}  // namespace
+
+bool ImageConfig::LoadVerityConfig() {
+  for (PartitionConfig& part : partitions) {
+    if (part.size < sizeof(AvbFooter))
+      continue;
+    uint64_t footer_offset = part.size - sizeof(AvbFooter);
+    brillo::Blob buffer;
+    TEST_AND_RETURN_FALSE(utils::ReadFileChunk(
+        part.path, footer_offset, sizeof(AvbFooter), &buffer));
+    if (memcmp(buffer.data(), AVB_FOOTER_MAGIC, AVB_FOOTER_MAGIC_LEN) != 0)
+      continue;
+    LOG(INFO) << "Parsing verity config from AVB footer for " << part.name;
+    AvbFooter footer;
+    TEST_AND_RETURN_FALSE(avb_footer_validate_and_byteswap(
+        reinterpret_cast<const AvbFooter*>(buffer.data()), &footer));
+    buffer.clear();
+
+    TEST_AND_RETURN_FALSE(footer.vbmeta_offset + sizeof(AvbVBMetaImageHeader) <=
+                          part.size);
+    TEST_AND_RETURN_FALSE(utils::ReadFileChunk(
+        part.path, footer.vbmeta_offset, footer.vbmeta_size, &buffer));
+    TEST_AND_RETURN_FALSE(avb_descriptor_foreach(
+        buffer.data(), buffer.size(), AvbDescriptorCallback, &part));
+  }
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/payload_generation_config_android_unittest.cc b/payload_generator/payload_generation_config_android_unittest.cc
new file mode 100644
index 0000000..53378c2
--- /dev/null
+++ b/payload_generator/payload_generation_config_android_unittest.cc
@@ -0,0 +1,197 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/payload_generation_config.h"
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/test_utils.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+#include "update_engine/payload_generator/extent_utils.h"
+
+namespace chromeos_update_engine {
+
+namespace {
+// dd if=/dev/zero of=part bs=4096 count=2
+// avbtool add_hashtree_footer --image part --partition_size $((24 * 4096))
+//     --partition_name system
+constexpr uint64_t kImageSize = 24 * 4096;
+
+// hexdump -s $((2 * 4096)) -n 64 -v -e '/1 "0x%02x, "' part
+constexpr uint64_t kHashTreeOffset = 2 * 4096;
+const uint8_t kHashTree[] = {
+    0x62, 0x4b, 0x5a, 0x4d, 0xa2, 0x97, 0xa0, 0xc8, 0x08, 0x03, 0xa6,
+    0x95, 0x4c, 0x4c, 0x7a, 0x2d, 0xac, 0x50, 0xde, 0x74, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62,
+    0x4b, 0x5a, 0x4d, 0xa2, 0x97, 0xa0, 0xc8, 0x08, 0x03, 0xa6, 0x95,
+    0x4c, 0x4c, 0x7a, 0x2d, 0xac, 0x50, 0xde, 0x74, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+// hexdump -s $((3 * 4096)) -n 128 -v -e '/1 "0x%02x, "' part
+constexpr uint64_t kFECOffset = 3 * 4096;
+const uint8_t kFEC[] = {
+    0xec, 0x8e, 0x93, 0xd8, 0xf9, 0xa3, 0xd6, 0x9b, 0xa4, 0x06, 0x5f, 0xc8,
+    0x6c, 0xcc, 0x4f, 0x87, 0x07, 0x0f, 0xac, 0xaf, 0x29, 0x8f, 0x97, 0x02,
+    0xb2, 0xfe, 0xb2, 0xfe, 0xe5, 0x9f, 0xf2, 0xdf, 0xe6, 0x4a, 0x36, 0x66,
+    0x04, 0xda, 0xa7, 0xd3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0xec, 0x8e, 0x93, 0xd8, 0xf9, 0xa3, 0xd6, 0x9b,
+    0xa4, 0x06, 0x5f, 0xc8, 0x6c, 0xcc, 0x4f, 0x87, 0x07, 0x0f, 0xac, 0xaf,
+    0x29, 0x8f, 0x97, 0x02, 0xb2, 0xfe, 0xb2, 0xfe, 0xe5, 0x9f, 0xf2, 0xdf,
+    0xe6, 0x4a, 0x36, 0x66, 0x04, 0xda, 0xa7, 0xd3, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+// hexdump -s $((5 * 4096)) -n 512 -v -e '/1 "0x%02x, "' part
+constexpr uint64_t kVBMetaImageOffset = 5 * 4096;
+const uint8_t kVBMetaImage[] = {
+    0x41, 0x56, 0x42, 0x30, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x76, 0x62, 0x74,
+    0x6f, 0x6f, 0x6c, 0x20, 0x31, 0x2e, 0x31, 0x2e, 0x30, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x00, 0x00, 0x00, 0x01,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,
+    0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x02,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x20, 0x00, 0x73, 0x68, 0x61, 0x31, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x14,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x1f, 0xab,
+    0x7a, 0x6b, 0xf6, 0xb1, 0x3a, 0x1f, 0xdb, 0x34, 0xa3, 0xfc, 0xc8, 0x73,
+    0x0b, 0x23, 0x61, 0xb3, 0x04, 0xe2, 0x4f, 0x6c, 0xd0, 0x1e, 0x39, 0x9d,
+    0xaa, 0x73, 0x35, 0x53, 0xa7, 0x74, 0x1f, 0x81, 0xd0, 0xa6, 0xa9, 0x5f,
+    0x19, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+// hexdump -s $((24 * 4096 - 64)) -n 64 -v -e '/1 "0x%02x, "' part
+constexpr uint64_t kAVBFooterOffset = 24 * 4096 - 64;
+const uint8_t kAVBFooter[] = {
+    0x41, 0x56, 0x42, 0x66, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+// avbtool info_image --image part | grep Salt | cut -d':' -f 2 | xxd -r -p |
+//     hexdump -v -e '/1 "0x%02x, "'
+const uint8_t kHashTreeSalt[] = {0x1f, 0xab, 0x7a, 0x6b, 0xf6, 0xb1, 0x3a,
+                                 0x1f, 0xdb, 0x34, 0xa3, 0xfc, 0xc8, 0x73,
+                                 0x0b, 0x23, 0x61, 0xb3, 0x04, 0xe2};
+
+brillo::Blob GetAVBPartition() {
+  brillo::Blob part(kImageSize);
+  memcpy(part.data() + kHashTreeOffset, kHashTree, sizeof(kHashTree));
+  memcpy(part.data() + kFECOffset, kFEC, sizeof(kFEC));
+  memcpy(part.data() + kVBMetaImageOffset, kVBMetaImage, sizeof(kVBMetaImage));
+  memcpy(part.data() + kAVBFooterOffset, kAVBFooter, sizeof(kAVBFooter));
+  return part;
+}
+
+}  // namespace
+
+class PayloadGenerationConfigAndroidTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    image_config_.partitions.emplace_back("system");
+    image_config_.partitions[0].path = temp_file_.path();
+  }
+
+  ImageConfig image_config_;
+  test_utils::ScopedTempFile temp_file_{
+      "PayloadGenerationConfigAndroidTest.XXXXXX"};
+};
+
+TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigSimpleTest) {
+  brillo::Blob part = GetAVBPartition();
+  test_utils::WriteFileVector(temp_file_.path(), part);
+  EXPECT_TRUE(image_config_.LoadImageSize());
+  EXPECT_TRUE(image_config_.partitions[0].OpenFilesystem());
+  EXPECT_TRUE(image_config_.LoadVerityConfig());
+  const VerityConfig& verity = image_config_.partitions[0].verity;
+  EXPECT_FALSE(verity.IsEmpty());
+  EXPECT_EQ(ExtentForRange(0, 2), verity.hash_tree_data_extent);
+  EXPECT_EQ(ExtentForRange(2, 1), verity.hash_tree_extent);
+  EXPECT_EQ("sha1", verity.hash_tree_algorithm);
+  brillo::Blob salt(kHashTreeSalt, std::end(kHashTreeSalt));
+  EXPECT_EQ(salt, verity.hash_tree_salt);
+  EXPECT_EQ(ExtentForRange(0, 3), verity.fec_data_extent);
+  EXPECT_EQ(ExtentForRange(3, 2), verity.fec_extent);
+  EXPECT_EQ(2u, verity.fec_roots);
+}
+
+TEST_F(PayloadGenerationConfigAndroidTest,
+       LoadVerityConfigInvalidHashTreeTest) {
+  brillo::Blob part = GetAVBPartition();
+  part[kHashTreeOffset] ^= 1;  // flip one bit
+  test_utils::WriteFileVector(temp_file_.path(), part);
+  EXPECT_TRUE(image_config_.LoadImageSize());
+  EXPECT_TRUE(image_config_.partitions[0].OpenFilesystem());
+  EXPECT_FALSE(image_config_.LoadVerityConfig());
+}
+
+TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigInvalidFECTest) {
+  brillo::Blob part = GetAVBPartition();
+  part[kFECOffset] ^= 1;  // flip one bit
+  test_utils::WriteFileVector(temp_file_.path(), part);
+  EXPECT_TRUE(image_config_.LoadImageSize());
+  EXPECT_TRUE(image_config_.partitions[0].OpenFilesystem());
+  EXPECT_FALSE(image_config_.LoadVerityConfig());
+}
+
+TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigEmptyImageTest) {
+  brillo::Blob part(kImageSize);
+  test_utils::WriteFileVector(temp_file_.path(), part);
+  EXPECT_TRUE(image_config_.LoadImageSize());
+  EXPECT_TRUE(image_config_.LoadVerityConfig());
+  EXPECT_TRUE(image_config_.partitions[0].verity.IsEmpty());
+}
+
+TEST_F(PayloadGenerationConfigAndroidTest, LoadVerityConfigTinyImageTest) {
+  test_utils::WriteFileString(temp_file_.path(), "tiny");
+  EXPECT_TRUE(image_config_.LoadImageSize());
+  EXPECT_TRUE(image_config_.LoadVerityConfig());
+  EXPECT_TRUE(image_config_.partitions[0].verity.IsEmpty());
+}
+
+}  // namespace chromeos_update_engine
diff --git a/boot_control_recovery_stub.cc b/payload_generator/payload_generation_config_chromeos.cc
similarity index 68%
rename from boot_control_recovery_stub.cc
rename to payload_generator/payload_generation_config_chromeos.cc
index 129c5d0..bb05aff 100644
--- a/boot_control_recovery_stub.cc
+++ b/payload_generator/payload_generation_config_chromeos.cc
@@ -1,5 +1,5 @@
 //
-// Copyright (C) 2016 The Android Open Source Project
+// Copyright (C) 2018 The Android Open Source Project
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -14,8 +14,12 @@
 // limitations under the License.
 //
 
-#include <hardware/hardware.h>
+#include "update_engine/payload_generator/payload_generation_config.h"
 
-hw_module_t HAL_MODULE_INFO_SYM = {
-  .id = "stub",
-};
+namespace chromeos_update_engine {
+
+bool ImageConfig::LoadVerityConfig() {
+  return true;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc
index 0b47dd4..2c386fa 100644
--- a/payload_generator/payload_signer.cc
+++ b/payload_generator/payload_signer.cc
@@ -25,8 +25,6 @@
 #include <base/strings/string_split.h>
 #include <base/strings/string_util.h>
 #include <brillo/data_encoding.h>
-#include <brillo/streams/file_stream.h>
-#include <brillo/streams/stream.h>
 #include <openssl/err.h>
 #include <openssl/pem.h>
 
@@ -35,6 +33,7 @@
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/delta_performer.h"
 #include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_consumer/payload_metadata.h"
 #include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/payload_file.h"
@@ -93,21 +92,14 @@
   uint64_t manifest_offset = 20;
   const int kProtobufSizeOffset = 12;
 
-  DeltaArchiveManifest manifest;
-  uint64_t metadata_size, major_version;
-  uint32_t metadata_signature_size;
-  TEST_AND_RETURN_FALSE(
-      PayloadSigner::LoadPayloadMetadata(payload_path,
-                                         nullptr,
-                                         &manifest,
-                                         &major_version,
-                                         &metadata_size,
-                                         &metadata_signature_size));
-
   brillo::Blob payload;
   TEST_AND_RETURN_FALSE(utils::ReadFile(payload_path, &payload));
-
-  if (major_version == kBrilloMajorPayloadVersion) {
+  PayloadMetadata payload_metadata;
+  TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadHeader(payload));
+  uint64_t metadata_size = payload_metadata.GetMetadataSize();
+  uint32_t metadata_signature_size =
+      payload_metadata.GetMetadataSignatureSize();
+  if (payload_metadata.GetMajorVersion() == kBrilloMajorPayloadVersion) {
     // Write metadata signature size in header.
     uint32_t metadata_signature_size_be =
         htobe32(metadata_signature_blob.size());
@@ -124,6 +116,9 @@
     LOG(INFO) << "Metadata signature size: " << metadata_signature_size;
   }
 
+  DeltaArchiveManifest manifest;
+  TEST_AND_RETURN_FALSE(payload_metadata.GetManifest(payload, &manifest));
+
   // Is there already a signature op in place?
   if (manifest.has_signatures_size()) {
     // The signature op is tied to the size of the signature blob, but not it's
@@ -143,7 +138,7 @@
     PayloadSigner::AddSignatureToManifest(
         payload.size() - metadata_size - metadata_signature_size,
         signature_blob.size(),
-        major_version == kChromeOSMajorPayloadVersion,
+        payload_metadata.GetMajorVersion() == kChromeOSMajorPayloadVersion,
         &manifest);
 
     // Updates the payload to include the new manifest.
@@ -231,95 +226,26 @@
     Extent* dummy_extent = dummy_op->add_dst_extents();
     // Tell the dummy op to write this data to a big sparse hole
     dummy_extent->set_start_block(kSparseHole);
-    dummy_extent->set_num_blocks((signature_blob_length + kBlockSize - 1) /
-                                 kBlockSize);
+    dummy_extent->set_num_blocks(
+        utils::DivRoundUp(signature_blob_length, kBlockSize));
   }
 }
 
-bool PayloadSigner::LoadPayloadMetadata(const string& payload_path,
-                                        brillo::Blob* out_payload_metadata,
-                                        DeltaArchiveManifest* out_manifest,
-                                        uint64_t* out_major_version,
-                                        uint64_t* out_metadata_size,
-                                        uint32_t* out_metadata_signature_size) {
-  brillo::StreamPtr payload_file =
-      brillo::FileStream::Open(base::FilePath(payload_path),
-                               brillo::Stream::AccessMode::READ,
-                               brillo::FileStream::Disposition::OPEN_EXISTING,
-                               nullptr);
-  TEST_AND_RETURN_FALSE(payload_file);
-  brillo::Blob payload_metadata;
-
-  payload_metadata.resize(kMaxPayloadHeaderSize);
-  TEST_AND_RETURN_FALSE(payload_file->ReadAllBlocking(
-      payload_metadata.data(), payload_metadata.size(), nullptr));
-
-  const uint8_t* read_pointer = payload_metadata.data();
-  TEST_AND_RETURN_FALSE(
-      memcmp(read_pointer, kDeltaMagic, sizeof(kDeltaMagic)) == 0);
-  read_pointer += sizeof(kDeltaMagic);
-
-  uint64_t major_version;
-  memcpy(&major_version, read_pointer, sizeof(major_version));
-  read_pointer += sizeof(major_version);
-  major_version = be64toh(major_version);
-  TEST_AND_RETURN_FALSE(major_version == kChromeOSMajorPayloadVersion ||
-                        major_version == kBrilloMajorPayloadVersion);
-  if (out_major_version)
-    *out_major_version = major_version;
-
-  uint64_t manifest_size = 0;
-  memcpy(&manifest_size, read_pointer, sizeof(manifest_size));
-  read_pointer += sizeof(manifest_size);
-  manifest_size = be64toh(manifest_size);
-
-  uint32_t metadata_signature_size = 0;
-  if (major_version == kBrilloMajorPayloadVersion) {
-    memcpy(&metadata_signature_size, read_pointer,
-           sizeof(metadata_signature_size));
-    read_pointer += sizeof(metadata_signature_size);
-    metadata_signature_size = be32toh(metadata_signature_size);
-  }
-  if (out_metadata_signature_size)
-    *out_metadata_signature_size = metadata_signature_size;
-
-  uint64_t header_size = read_pointer - payload_metadata.data();
-  uint64_t metadata_size = header_size + manifest_size;
-  if (out_metadata_size)
-    *out_metadata_size = metadata_size;
-
-  size_t bytes_read = payload_metadata.size();
-  payload_metadata.resize(metadata_size);
-  TEST_AND_RETURN_FALSE(
-      payload_file->ReadAllBlocking(payload_metadata.data() + bytes_read,
-                                    payload_metadata.size() - bytes_read,
-                                    nullptr));
-  if (out_manifest) {
-    TEST_AND_RETURN_FALSE(out_manifest->ParseFromArray(
-        payload_metadata.data() + header_size, manifest_size));
-  }
-  if (out_payload_metadata)
-    *out_payload_metadata = std::move(payload_metadata);
-  return true;
-}
-
 bool PayloadSigner::VerifySignedPayload(const string& payload_path,
                                         const string& public_key_path) {
-  DeltaArchiveManifest manifest;
-  uint64_t metadata_size;
-  uint32_t metadata_signature_size;
-  TEST_AND_RETURN_FALSE(LoadPayloadMetadata(payload_path,
-                                            nullptr,
-                                            &manifest,
-                                            nullptr,
-                                            &metadata_size,
-                                            &metadata_signature_size));
   brillo::Blob payload;
   TEST_AND_RETURN_FALSE(utils::ReadFile(payload_path, &payload));
+  PayloadMetadata payload_metadata;
+  TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadHeader(payload));
+  DeltaArchiveManifest manifest;
+  TEST_AND_RETURN_FALSE(payload_metadata.GetManifest(payload, &manifest));
   TEST_AND_RETURN_FALSE(manifest.has_signatures_offset() &&
                         manifest.has_signatures_size());
-  uint64_t signatures_offset = metadata_size + metadata_signature_size +
-                               manifest.signatures_offset();
+  uint64_t metadata_size = payload_metadata.GetMetadataSize();
+  uint32_t metadata_signature_size =
+      payload_metadata.GetMetadataSignatureSize();
+  uint64_t signatures_offset =
+      metadata_size + metadata_signature_size + manifest.signatures_offset();
   CHECK_EQ(payload.size(), signatures_offset + manifest.signatures_size());
   brillo::Blob payload_hash, metadata_hash;
   TEST_AND_RETURN_FALSE(CalculateHashFromPayload(payload,
@@ -521,20 +447,15 @@
 
 bool PayloadSigner::ExtractPayloadProperties(
     const string& payload_path, brillo::KeyValueStore* properties) {
-  DeltaArchiveManifest manifest;
-  brillo::Blob payload_metadata;
-  uint64_t major_version, metadata_size;
-  uint32_t metadata_signature_size;
-  uint64_t file_size = utils::FileSize(payload_path);
-
+  brillo::Blob payload;
   TEST_AND_RETURN_FALSE(
-      PayloadSigner::LoadPayloadMetadata(payload_path,
-                                         &payload_metadata,
-                                         &manifest,
-                                         &major_version,
-                                         &metadata_size,
-                                         &metadata_signature_size));
+      utils::ReadFileChunk(payload_path, 0, kMaxPayloadHeaderSize, &payload));
 
+  PayloadMetadata payload_metadata;
+  TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadHeader(payload));
+  uint64_t metadata_size = payload_metadata.GetMetadataSize();
+
+  uint64_t file_size = utils::FileSize(payload_path);
   properties->SetString(kPayloadPropertyFileSize, std::to_string(file_size));
   properties->SetString(kPayloadPropertyMetadataSize,
                         std::to_string(metadata_size));
@@ -543,8 +464,10 @@
   TEST_AND_RETURN_FALSE(
       HashCalculator::RawHashOfFile(payload_path, file_size, &file_hash) ==
       static_cast<off_t>(file_size));
-  TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfBytes(
-      payload_metadata.data(), payload_metadata.size(), &metadata_hash));
+
+  TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile(
+                            payload_path, metadata_size, &metadata_hash) ==
+                        static_cast<off_t>(metadata_size));
 
   properties->SetString(kPayloadPropertyFileHash,
                         brillo::data_encoding::Base64Encode(file_hash));
diff --git a/payload_generator/payload_signer.h b/payload_generator/payload_signer.h
index 00e32fa..38c673c 100644
--- a/payload_generator/payload_signer.h
+++ b/payload_generator/payload_signer.h
@@ -33,20 +33,6 @@
 
 class PayloadSigner {
  public:
-  // Reads the payload metadata from the given |payload_path| into the
-  // |out_payload_metadata| vector if not null. It also parses the manifest
-  // protobuf in the payload and returns it in |out_manifest| if not null, along
-  // with the major version of the payload in |out_major_version| if not null,
-  // the size of the entire metadata in |out_metadata_size| and the size of
-  // metadata signature in |out_metadata_signature_size| if not null. Returns
-  // whether a valid payload metadata was found and parsed.
-  static bool LoadPayloadMetadata(const std::string& payload_path,
-                                  brillo::Blob* out_payload_metadata,
-                                  DeltaArchiveManifest* out_manifest,
-                                  uint64_t* out_major_version,
-                                  uint64_t* out_metadata_size,
-                                  uint32_t* out_metadata_signature_size);
-
   // Returns true if the payload in |payload_path| is signed and its hash can be
   // verified using the public key in |public_key_path| with the signature
   // of a given version in the signature blob. Returns false otherwise.
diff --git a/payload_generator/payload_signer_unittest.cc b/payload_generator/payload_signer_unittest.cc
index 62b6e7a..967e026 100644
--- a/payload_generator/payload_signer_unittest.cc
+++ b/payload_generator/payload_signer_unittest.cc
@@ -124,44 +124,9 @@
     PayloadVerifier::PadRSA2048SHA256Hash(&padded_hash_data_);
   }
 
-  void DoWriteAndLoadPayloadTest(const PayloadGenerationConfig& config) {
-    PayloadFile payload;
-    payload.Init(config);
-    string payload_path;
-    EXPECT_TRUE(utils::MakeTempFile("payload.XXXXXX", &payload_path, nullptr));
-    ScopedPathUnlinker payload_path_unlinker(payload_path);
-    uint64_t metadata_size;
-    EXPECT_TRUE(
-        payload.WritePayload(payload_path, "/dev/null", "", &metadata_size));
-    brillo::Blob payload_metadata_blob;
-    DeltaArchiveManifest manifest;
-    uint64_t load_metadata_size, load_major_version;
-    EXPECT_TRUE(PayloadSigner::LoadPayloadMetadata(payload_path,
-                                                   &payload_metadata_blob,
-                                                   &manifest,
-                                                   &load_major_version,
-                                                   &load_metadata_size,
-                                                   nullptr));
-    EXPECT_EQ(metadata_size, payload_metadata_blob.size());
-    EXPECT_EQ(config.version.major, load_major_version);
-    EXPECT_EQ(metadata_size, load_metadata_size);
-  }
-
   brillo::Blob padded_hash_data_{std::begin(kDataHash), std::end(kDataHash)};
 };
 
-TEST_F(PayloadSignerTest, LoadPayloadV1Test) {
-  PayloadGenerationConfig config;
-  config.version.major = kChromeOSMajorPayloadVersion;
-  DoWriteAndLoadPayloadTest(config);
-}
-
-TEST_F(PayloadSignerTest, LoadPayloadV2Test) {
-  PayloadGenerationConfig config;
-  config.version.major = kBrilloMajorPayloadVersion;
-  DoWriteAndLoadPayloadTest(config);
-}
-
 TEST_F(PayloadSignerTest, SignSimpleTextTest) {
   brillo::Blob signature_blob;
   SignSampleData(&signature_blob,
@@ -215,50 +180,46 @@
 }
 
 TEST_F(PayloadSignerTest, SkipMetadataSignatureTest) {
-  string payload_path;
-  EXPECT_TRUE(utils::MakeTempFile("payload.XXXXXX", &payload_path, nullptr));
-  ScopedPathUnlinker payload_path_unlinker(payload_path);
-
+  test_utils::ScopedTempFile payload_file("payload.XXXXXX");
   PayloadGenerationConfig config;
   config.version.major = kBrilloMajorPayloadVersion;
   PayloadFile payload;
   EXPECT_TRUE(payload.Init(config));
   uint64_t metadata_size;
-  EXPECT_TRUE(
-      payload.WritePayload(payload_path, "/dev/null", "", &metadata_size));
+  EXPECT_TRUE(payload.WritePayload(
+      payload_file.path(), "/dev/null", "", &metadata_size));
   const vector<int> sizes = {256};
   brillo::Blob unsigned_payload_hash, unsigned_metadata_hash;
-  EXPECT_TRUE(PayloadSigner::HashPayloadForSigning(
-      payload_path, sizes, &unsigned_payload_hash, &unsigned_metadata_hash));
+  EXPECT_TRUE(PayloadSigner::HashPayloadForSigning(payload_file.path(),
+                                                   sizes,
+                                                   &unsigned_payload_hash,
+                                                   &unsigned_metadata_hash));
   EXPECT_TRUE(
-      payload.WritePayload(payload_path,
+      payload.WritePayload(payload_file.path(),
                            "/dev/null",
                            GetBuildArtifactsPath(kUnittestPrivateKeyPath),
                            &metadata_size));
   brillo::Blob signed_payload_hash, signed_metadata_hash;
   EXPECT_TRUE(PayloadSigner::HashPayloadForSigning(
-      payload_path, sizes, &signed_payload_hash, &signed_metadata_hash));
+      payload_file.path(), sizes, &signed_payload_hash, &signed_metadata_hash));
   EXPECT_EQ(unsigned_payload_hash, signed_payload_hash);
   EXPECT_EQ(unsigned_metadata_hash, signed_metadata_hash);
 }
 
 TEST_F(PayloadSignerTest, VerifySignedPayloadTest) {
-  string payload_path;
-  EXPECT_TRUE(utils::MakeTempFile("payload.XXXXXX", &payload_path, nullptr));
-  ScopedPathUnlinker payload_path_unlinker(payload_path);
-
+  test_utils::ScopedTempFile payload_file("payload.XXXXXX");
   PayloadGenerationConfig config;
   config.version.major = kBrilloMajorPayloadVersion;
   PayloadFile payload;
   EXPECT_TRUE(payload.Init(config));
   uint64_t metadata_size;
   EXPECT_TRUE(
-      payload.WritePayload(payload_path,
+      payload.WritePayload(payload_file.path(),
                            "/dev/null",
                            GetBuildArtifactsPath(kUnittestPrivateKeyPath),
                            &metadata_size));
   EXPECT_TRUE(PayloadSigner::VerifySignedPayload(
-      payload_path, GetBuildArtifactsPath(kUnittestPublicKeyPath)));
+      payload_file.path(), GetBuildArtifactsPath(kUnittestPublicKeyPath)));
 }
 
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/squashfs_filesystem.cc b/payload_generator/squashfs_filesystem.cc
index c98ad12..6c892f5 100644
--- a/payload_generator/squashfs_filesystem.cc
+++ b/payload_generator/squashfs_filesystem.cc
@@ -44,14 +44,6 @@
 
 namespace {
 
-Extent ExtentForBytes(uint64_t block_size,
-                      uint64_t start_bytes,
-                      uint64_t size_bytes) {
-  uint64_t start_block = start_bytes / block_size;
-  uint64_t end_block = (start_bytes + size_bytes + block_size - 1) / block_size;
-  return ExtentForRange(start_block, end_block - start_block);
-}
-
 // The size of the squashfs super block.
 constexpr size_t kSquashfsSuperBlockSize = 96;
 constexpr uint64_t kSquashfsCompressedBit = 1 << 24;
@@ -192,8 +184,7 @@
   for (const auto& file : files_) {
     file_extents.AddExtents(file.extents);
   }
-  vector<Extent> full = {
-      ExtentForRange(0, (size_ + kBlockSize - 1) / kBlockSize)};
+  vector<Extent> full = {ExtentForBytes(kBlockSize, 0, size_)};
   auto metadata_extents = FilterExtentRanges(full, file_extents);
   // For now there should be at most two extents. One for superblock and one for
   // metadata at the end. Just create appropriate files with <metadata-i> name.
diff --git a/payload_generator/tarjan.cc b/payload_generator/tarjan.cc
index 98e29f9..d99ae12 100644
--- a/payload_generator/tarjan.cc
+++ b/payload_generator/tarjan.cc
@@ -19,8 +19,7 @@
 #include <vector>
 
 #include <base/logging.h>
-
-#include "update_engine/common/utils.h"
+#include <base/stl_util.h>
 
 using std::min;
 using std::vector;
@@ -59,7 +58,7 @@
       Tarjan(vertex_next, graph);
       (*graph)[vertex].lowlink = min((*graph)[vertex].lowlink,
                                      (*graph)[vertex_next].lowlink);
-    } else if (utils::VectorContainsValue(stack_, vertex_next)) {
+    } else if (base::ContainsValue(stack_, vertex_next)) {
       (*graph)[vertex].lowlink = min((*graph)[vertex].lowlink,
                                      (*graph)[vertex_next].index);
     }
@@ -73,7 +72,7 @@
       component.push_back(other_vertex);
     } while (other_vertex != vertex && !stack_.empty());
 
-    if (utils::VectorContainsValue(component, required_vertex_)) {
+    if (base::ContainsValue(component, required_vertex_)) {
       components_.resize(components_.size() + 1);
       component.swap(components_.back());
     }
diff --git a/payload_generator/tarjan_unittest.cc b/payload_generator/tarjan_unittest.cc
index c29cbdc..b271227 100644
--- a/payload_generator/tarjan_unittest.cc
+++ b/payload_generator/tarjan_unittest.cc
@@ -20,9 +20,9 @@
 #include <utility>
 
 #include <base/logging.h>
+#include <base/stl_util.h>
 #include <gtest/gtest.h>
 
-#include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/graph_types.h"
 
 using std::make_pair;
@@ -66,11 +66,11 @@
     tarjan.Execute(i, &graph, &vertex_indexes);
 
     EXPECT_EQ(5U, vertex_indexes.size());
-    EXPECT_TRUE(utils::VectorContainsValue(vertex_indexes, n_a));
-    EXPECT_TRUE(utils::VectorContainsValue(vertex_indexes, n_b));
-    EXPECT_TRUE(utils::VectorContainsValue(vertex_indexes, n_c));
-    EXPECT_TRUE(utils::VectorContainsValue(vertex_indexes, n_d));
-    EXPECT_TRUE(utils::VectorContainsValue(vertex_indexes, n_e));
+    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_a));
+    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_b));
+    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_c));
+    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_d));
+    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_e));
   }
 
   {
@@ -78,7 +78,7 @@
     tarjan.Execute(n_f, &graph, &vertex_indexes);
 
     EXPECT_EQ(1U, vertex_indexes.size());
-    EXPECT_TRUE(utils::VectorContainsValue(vertex_indexes, n_f));
+    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_f));
   }
 
   for (Vertex::Index i = n_g; i <= n_h; i++) {
@@ -86,8 +86,8 @@
     tarjan.Execute(i, &graph, &vertex_indexes);
 
     EXPECT_EQ(2U, vertex_indexes.size());
-    EXPECT_TRUE(utils::VectorContainsValue(vertex_indexes, n_g));
-    EXPECT_TRUE(utils::VectorContainsValue(vertex_indexes, n_h));
+    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_g));
+    EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_h));
   }
 }
 
diff --git a/payload_generator/xz_android.cc b/payload_generator/xz_android.cc
index f3b836d..41c55f7 100644
--- a/payload_generator/xz_android.cc
+++ b/payload_generator/xz_android.cc
@@ -16,12 +16,14 @@
 
 #include "update_engine/payload_generator/xz.h"
 
-#include <7zCrc.h>
-#include <Xz.h>
-#include <XzEnc.h>
+#include <elf.h>
+#include <endian.h>
 
 #include <algorithm>
 
+#include <7zCrc.h>
+#include <Xz.h>
+#include <XzEnc.h>
 #include <base/logging.h>
 
 namespace {
@@ -34,9 +36,8 @@
     Read = &BlobReaderStream::ReadStatic;
   }
 
-  static SRes ReadStatic(void* p, void* buf, size_t* size) {
-    auto* self =
-        static_cast<BlobReaderStream*>(reinterpret_cast<ISeqInStream*>(p));
+  static SRes ReadStatic(const ISeqInStream* p, void* buf, size_t* size) {
+    auto* self = static_cast<BlobReaderStream*>(const_cast<ISeqInStream*>(p));
     *size = std::min(*size, self->data_.size() - self->pos_);
     memcpy(buf, self->data_.data() + self->pos_, *size);
     self->pos_ += *size;
@@ -55,9 +56,10 @@
     Write = &BlobWriterStream::WriteStatic;
   }
 
-  static size_t WriteStatic(void* p, const void* buf, size_t size) {
-    auto* self =
-        static_cast<BlobWriterStream*>(reinterpret_cast<ISeqOutStream*>(p));
+  static size_t WriteStatic(const ISeqOutStream* p,
+                            const void* buf,
+                            size_t size) {
+    auto* self = static_cast<const BlobWriterStream*>(p);
     const uint8_t* buffer = reinterpret_cast<const uint8_t*>(buf);
     self->data_->reserve(self->data_->size() + size);
     self->data_->insert(self->data_->end(), buffer, buffer + size);
@@ -67,6 +69,37 @@
   brillo::Blob* data_;
 };
 
+// Returns the filter id to be used to compress |data|.
+// Only BCJ filter for x86 and ARM ELF file are supported, returns 0 otherwise.
+int GetFilterID(const brillo::Blob& data) {
+  if (data.size() < sizeof(Elf32_Ehdr) ||
+      memcmp(data.data(), ELFMAG, SELFMAG) != 0)
+    return 0;
+
+  const Elf32_Ehdr* header = reinterpret_cast<const Elf32_Ehdr*>(data.data());
+
+  // Only little-endian is supported.
+  if (header->e_ident[EI_DATA] != ELFDATA2LSB)
+    return 0;
+
+  switch (le16toh(header->e_machine)) {
+    case EM_386:
+    case EM_X86_64:
+      return XZ_ID_X86;
+    case EM_ARM:
+      // Both ARM and ARM Thumb instructions could be found in the same ARM ELF
+      // file. We choose to use the ARM Thumb filter here because testing shows
+      // that it usually works better than the ARM filter.
+      return XZ_ID_ARMT;
+#ifdef EM_AARCH64
+    case EM_AARCH64:
+      // Neither the ARM nor the ARM Thumb filter works well with AArch64.
+      return 0;
+#endif
+  }
+  return 0;
+}
+
 }  // namespace
 
 namespace chromeos_update_engine {
@@ -97,7 +130,6 @@
 
   // LZMA2 compression properties.
   CLzma2EncProps lzma2Props;
-  props.lzma2Props = &lzma2Props;
   Lzma2EncProps_Init(&lzma2Props);
   // LZMA compression "level 6" requires 9 MB of RAM to decompress in the worst
   // case.
@@ -106,6 +138,9 @@
   // The input size data is used to reduce the dictionary size if possible.
   lzma2Props.lzmaProps.reduceSize = in.size();
   Lzma2EncProps_Normalize(&lzma2Props);
+  props.lzma2Props = lzma2Props;
+
+  props.filterProps.id = GetFilterID(in);
 
   BlobWriterStream out_writer(out);
   BlobReaderStream in_reader(in);
diff --git a/payload_generator/xz_chromeos.cc b/payload_generator/xz_chromeos.cc
index a8cda4e..2ff9458 100644
--- a/payload_generator/xz_chromeos.cc
+++ b/payload_generator/xz_chromeos.cc
@@ -16,13 +16,39 @@
 
 #include "update_engine/payload_generator/xz.h"
 
+#include <base/logging.h>
+#include <lzma.h>
+
 namespace chromeos_update_engine {
 
 void XzCompressInit() {}
 
 bool XzCompress(const brillo::Blob& in, brillo::Blob* out) {
-  // No Xz compressor implementation in Chrome OS delta_generator builds.
-  return false;
+  out->clear();
+  if (in.empty())
+    return true;
+
+  // Resize the output buffer to get enough memory for writing the compressed
+  // data.
+  out->resize(lzma_stream_buffer_bound(in.size()));
+
+  const uint32_t kLzmaPreset = 6;
+  size_t out_pos = 0;
+  int rc = lzma_easy_buffer_encode(kLzmaPreset,
+                                   LZMA_CHECK_NONE,  // We do not need CRC.
+                                   nullptr,
+                                   in.data(),
+                                   in.size(),
+                                   out->data(),
+                                   &out_pos,
+                                   out->size());
+  if (rc != LZMA_OK) {
+    LOG(ERROR) << "Failed to compress data to LZMA stream with return code: "
+               << rc;
+    return false;
+  }
+  out->resize(out_pos);
+  return true;
 }
 
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/zip_unittest.cc b/payload_generator/zip_unittest.cc
index c750eb7..29f16d3 100644
--- a/payload_generator/zip_unittest.cc
+++ b/payload_generator/zip_unittest.cc
@@ -115,12 +115,7 @@
   }
 };
 
-#ifdef __ANDROID__
 typedef ::testing::Types<BzipTest, XzTest> ZipTestTypes;
-#else
-// Chrome OS implementation of Xz compressor just returns false.
-typedef ::testing::Types<BzipTest> ZipTestTypes;
-#endif  // __ANDROID__
 
 TYPED_TEST_CASE(ZipTest, ZipTestTypes);
 
@@ -140,7 +135,7 @@
   brillo::Blob decompressed;
   EXPECT_TRUE(this->ZipDecompress(out, &decompressed));
   EXPECT_EQ(in.size(), decompressed.size());
-  EXPECT_TRUE(!memcmp(in.data(), decompressed.data(), in.size()));
+  EXPECT_EQ(0, memcmp(in.data(), decompressed.data(), in.size()));
 }
 
 TYPED_TEST(ZipTest, PoorCompressionTest) {
@@ -170,4 +165,18 @@
   EXPECT_EQ(0U, out.size());
 }
 
+TYPED_TEST(ZipTest, CompressELFTest) {
+  string path = test_utils::GetBuildArtifactsPath("delta_generator");
+  brillo::Blob in;
+  utils::ReadFile(path, &in);
+  brillo::Blob out;
+  EXPECT_TRUE(this->ZipCompress(in, &out));
+  EXPECT_LT(out.size(), in.size());
+  EXPECT_GT(out.size(), 0U);
+  brillo::Blob decompressed;
+  EXPECT_TRUE(this->ZipDecompress(out, &decompressed));
+  EXPECT_EQ(in.size(), decompressed.size());
+  EXPECT_EQ(0, memcmp(in.data(), decompressed.data(), in.size()));
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_state.cc b/payload_state.cc
index 4992606..36f120a 100644
--- a/payload_state.cc
+++ b/payload_state.cc
@@ -64,9 +64,9 @@
       url_index_(0),
       url_failure_count_(0),
       url_switch_count_(0),
+      rollback_happened_(false),
       attempt_num_bytes_downloaded_(0),
       attempt_connection_type_(metrics::ConnectionType::kUnknown),
-      attempt_error_code_(ErrorCode::kSuccess),
       attempt_type_(AttemptType::kUpdate) {
   for (int i = 0; i <= kNumDownloadSources; i++)
     total_bytes_downloaded_[i] = current_bytes_downloaded_[i] = 0;
@@ -94,6 +94,7 @@
   }
   LoadNumReboots();
   LoadNumResponsesSeen();
+  LoadRollbackHappened();
   LoadRollbackVersion();
   LoadP2PFirstAttemptTimestamp();
   LoadP2PNumAttempts();
@@ -242,7 +243,6 @@
           metrics::RollbackResult::kSuccess);
       break;
   }
-  attempt_error_code_ = ErrorCode::kSuccess;
 
   // Reset the number of responses seen since it counts from the last
   // successful update, e.g. now.
@@ -256,7 +256,6 @@
   ErrorCode base_error = utils::GetBaseErrorCode(error);
   LOG(INFO) << "Updating payload state for error code: " << base_error
             << " (" << utils::ErrorCodeToString(base_error) << ")";
-  attempt_error_code_ = base_error;
 
   if (candidate_urls_.size() == 0) {
     // This means we got this error even before we got a valid Omaha response
@@ -306,6 +305,7 @@
     case ErrorCode::kUnsupportedMajorPayloadVersion:
     case ErrorCode::kUnsupportedMinorPayloadVersion:
     case ErrorCode::kPayloadTimestampError:
+    case ErrorCode::kVerityCalculationError:
       IncrementUrlIndex();
       break;
 
@@ -358,7 +358,11 @@
     case ErrorCode::kOmahaRequestXMLHasEntityDecl:
     case ErrorCode::kFilesystemVerifierError:
     case ErrorCode::kUserCanceled:
+    case ErrorCode::kOmahaUpdateIgnoredOverCellular:
     case ErrorCode::kUpdatedButNotActive:
+    case ErrorCode::kNoUpdate:
+    case ErrorCode::kRollbackNotPossible:
+    case ErrorCode::kFirstActiveOmahaPingSentPersistenceError:
       LOG(INFO) << "Not incrementing URL index or failure count for this error";
       break;
 
@@ -408,9 +412,11 @@
     }
   }
 
-  if (!system_state_->hardware()->IsOfficialBuild()) {
+  if (!system_state_->hardware()->IsOfficialBuild() &&
+      !prefs_->Exists(kPrefsNoIgnoreBackoff)) {
     // Backoffs are needed only for official builds. We do not want any delays
-    // or update failures due to backoffs during testing or development.
+    // or update failures due to backoffs during testing or development. Unless
+    // the |kPrefsNoIgnoreBackoff| is manually set.
     LOG(INFO) << "No backoffs for test/dev images. "
               << "Can proceed with the download";
     return false;
@@ -732,6 +738,7 @@
   SetNumReboots(0);
 
   TimeDelta duration = GetUpdateDuration();
+  TimeDelta duration_uptime = GetUpdateDurationUptime();
 
   prefs_->Delete(kPrefsUpdateTimestampStart);
   prefs_->Delete(kPrefsUpdateDurationUptime);
@@ -752,6 +759,7 @@
       total_bytes_by_source,
       download_overhead_percentage,
       duration,
+      duration_uptime,
       reboot_count,
       url_switch_count);
 }
@@ -787,6 +795,7 @@
   SetP2PNumAttempts(0);
   SetP2PFirstAttemptTimestamp(Time());  // Set to null time
   SetScatteringWaitPeriod(TimeDelta());
+  SetStagingWaitPeriod(TimeDelta());
 }
 
 void PayloadState::ResetRollbackVersion() {
@@ -909,7 +918,7 @@
 
 void PayloadState::LoadScatteringWaitPeriod() {
   SetScatteringWaitPeriod(TimeDelta::FromSeconds(
-      GetPersistedValue(kPrefsWallClockWaitPeriod, prefs_)));
+      GetPersistedValue(kPrefsWallClockScatteringWaitPeriod, prefs_)));
 }
 
 void PayloadState::SetScatteringWaitPeriod(TimeDelta wait_period) {
@@ -918,10 +927,27 @@
   LOG(INFO) << "Scattering Wait Period (seconds) = "
             << scattering_wait_period_.InSeconds();
   if (scattering_wait_period_.InSeconds() > 0) {
-    prefs_->SetInt64(kPrefsWallClockWaitPeriod,
+    prefs_->SetInt64(kPrefsWallClockScatteringWaitPeriod,
                      scattering_wait_period_.InSeconds());
   } else {
-    prefs_->Delete(kPrefsWallClockWaitPeriod);
+    prefs_->Delete(kPrefsWallClockScatteringWaitPeriod);
+  }
+}
+
+void PayloadState::LoadStagingWaitPeriod() {
+  SetStagingWaitPeriod(TimeDelta::FromSeconds(
+      GetPersistedValue(kPrefsWallClockStagingWaitPeriod, prefs_)));
+}
+
+void PayloadState::SetStagingWaitPeriod(TimeDelta wait_period) {
+  CHECK(prefs_);
+  staging_wait_period_ = wait_period;
+  LOG(INFO) << "Staging Wait Period (days) =" << staging_wait_period_.InDays();
+  if (staging_wait_period_.InSeconds() > 0) {
+    prefs_->SetInt64(kPrefsWallClockStagingWaitPeriod,
+                     staging_wait_period_.InSeconds());
+  } else {
+    prefs_->Delete(kPrefsWallClockStagingWaitPeriod);
   }
 }
 
@@ -1070,6 +1096,25 @@
   SetNumReboots(GetPersistedValue(kPrefsNumReboots, prefs_));
 }
 
+void PayloadState::LoadRollbackHappened() {
+  CHECK(powerwash_safe_prefs_);
+  bool rollback_happened = false;
+  powerwash_safe_prefs_->GetBoolean(kPrefsRollbackHappened, &rollback_happened);
+  SetRollbackHappened(rollback_happened);
+}
+
+void PayloadState::SetRollbackHappened(bool rollback_happened) {
+  CHECK(powerwash_safe_prefs_);
+  LOG(INFO) << "Setting rollback-happened to " << rollback_happened << ".";
+  rollback_happened_ = rollback_happened;
+  if (rollback_happened) {
+    powerwash_safe_prefs_->SetBoolean(kPrefsRollbackHappened,
+                                      rollback_happened);
+  } else {
+    powerwash_safe_prefs_->Delete(kPrefsRollbackHappened);
+  }
+}
+
 void PayloadState::LoadRollbackVersion() {
   CHECK(powerwash_safe_prefs_);
   string rollback_version;
diff --git a/payload_state.h b/payload_state.h
index 24e9900..cb6454f 100644
--- a/payload_state.h
+++ b/payload_state.h
@@ -80,7 +80,8 @@
   }
 
   inline std::string GetCurrentUrl() override {
-    return candidate_urls_.size() && candidate_urls_[payload_index_].size()
+    return (payload_index_ < candidate_urls_.size() &&
+            url_index_ < candidate_urls_[payload_index_].size())
                ? candidate_urls_[payload_index_][url_index_]
                : "";
   }
@@ -119,6 +120,10 @@
 
   void UpdateEngineStarted() override;
 
+  inline bool GetRollbackHappened() override { return rollback_happened_; }
+
+  void SetRollbackHappened(bool rollback_happened) override;
+
   inline std::string GetRollbackVersion() override {
     return rollback_version_;
   }
@@ -142,6 +147,8 @@
 
   void SetScatteringWaitPeriod(base::TimeDelta wait_period) override;
 
+  void SetStagingWaitPeriod(base::TimeDelta wait_period) override;
+
   void SetP2PUrl(const std::string& url) override {
     p2p_url_ = url;
   }
@@ -150,10 +157,6 @@
     return p2p_url_;
   }
 
-  inline ErrorCode GetAttemptErrorCode() const override {
-    return attempt_error_code_;
-  }
-
   bool NextPayload() override;
 
  private:
@@ -166,6 +169,7 @@
   FRIEND_TEST(PayloadStateTest, RebootAfterUpdateFailedMetric);
   FRIEND_TEST(PayloadStateTest, RebootAfterUpdateSucceed);
   FRIEND_TEST(PayloadStateTest, RebootAfterCanceledUpdate);
+  FRIEND_TEST(PayloadStateTest, RollbackHappened);
   FRIEND_TEST(PayloadStateTest, RollbackVersion);
   FRIEND_TEST(PayloadStateTest, UpdateSuccessWithWipedPrefs);
 
@@ -341,7 +345,7 @@
 
   // Loads the number of bytes that have been currently downloaded through the
   // previous attempts from the persisted state for the given source. It's
-  // reset to 0 everytime we begin a full update and is continued from previous
+  // reset to 0 every time we begin a full update and is continued from previous
   // attempt if we're resuming the update.
   void LoadCurrentBytesDownloaded(DownloadSource source);
 
@@ -353,7 +357,7 @@
 
   // Loads the total number of bytes that have been downloaded (since the last
   // successful update) from the persisted state for the given source. It's
-  // reset to 0 everytime we successfully apply an update and counts the bytes
+  // reset to 0 every time we successfully apply an update and counts the bytes
   // downloaded for both successful and failed attempts since then.
   void LoadTotalBytesDownloaded(DownloadSource source);
 
@@ -363,6 +367,10 @@
                                uint64_t total_bytes_downloaded,
                                bool log);
 
+  // Loads whether rollback has happened on this device since the last update
+  // check where policy was available. This info is preserved over powerwash.
+  void LoadRollbackHappened();
+
   // Loads the blacklisted version from our prefs file.
   void LoadRollbackVersion();
 
@@ -374,9 +382,10 @@
   void ResetRollbackVersion();
 
   inline uint32_t GetUrlIndex() {
-    return url_index_ ? std::min(candidate_urls_[payload_index_].size() - 1,
-                                 url_index_)
-                      : 0;
+    return (url_index_ != 0 && payload_index_ < candidate_urls_.size())
+               ? std::min(candidate_urls_[payload_index_].size() - 1,
+                          url_index_)
+               : 0;
   }
 
   // Computes the list of candidate URLs from the total list of payload URLs in
@@ -401,8 +410,6 @@
   // increments num_reboots.
   void UpdateNumReboots();
 
-
-
   // Loads the |kPrefsP2PFirstAttemptTimestamp| state variable from disk
   // into |p2p_first_attempt_timestamp_|.
   void LoadP2PFirstAttemptTimestamp();
@@ -419,6 +426,9 @@
   // Loads the persisted scattering wallclock-based wait period.
   void LoadScatteringWaitPeriod();
 
+  // Loads the persisted staging wallclock-based wait period.
+  void LoadStagingWaitPeriod();
+
   // Get the total size of all payloads.
   int64_t GetPayloadSize();
 
@@ -489,7 +499,7 @@
   int32_t url_switch_count_;
 
   // The current download source based on the current URL. This value is
-  // not persisted as it can be recomputed everytime we update the URL.
+  // not persisted as it can be recomputed every time we update the URL.
   // We're storing this so as not to recompute this on every few bytes of
   // data we read from the socket.
   DownloadSource current_download_source_;
@@ -550,6 +560,11 @@
   // allowed as per device policy.
   std::vector<std::vector<std::string>> candidate_urls_;
 
+  // This stores whether rollback has happened since the last time device policy
+  // was available during update check. When this is set, we're preventing
+  // forced updates to avoid update-rollback loops.
+  bool rollback_happened_;
+
   // This stores a blacklisted version set as part of rollback. When we rollback
   // we store the version of the os from which we are rolling back from in order
   // to guarantee that we do not re-update to it on the next au attempt after
@@ -568,15 +583,15 @@
   // The connection type when the attempt started.
   metrics::ConnectionType attempt_connection_type_;
 
-  // The attempt error code when the attempt finished.
-  ErrorCode attempt_error_code_;
-
   // Whether we're currently rolling back.
   AttemptType attempt_type_;
 
   // The current scattering wallclock-based wait period.
   base::TimeDelta scattering_wait_period_;
 
+  // The current staging wallclock-based wait period.
+  base::TimeDelta staging_wait_period_;
+
   DISALLOW_COPY_AND_ASSIGN(PayloadState);
 };
 
diff --git a/payload_state_interface.h b/payload_state_interface.h
index 4aa25e3..d384a0e 100644
--- a/payload_state_interface.h
+++ b/payload_state_interface.h
@@ -155,6 +155,16 @@
   // Called at update_engine startup to do various house-keeping.
   virtual void UpdateEngineStarted() = 0;
 
+  // Returns whether a rollback happened since the last update check with policy
+  // present.
+  virtual bool GetRollbackHappened() = 0;
+
+  // Sets whether rollback has happened on this device since the last update
+  // check where policy was available. This info is preserved over powerwash.
+  // This prevents forced updates happening on a rolled back device before
+  // device policy is available.
+  virtual void SetRollbackHappened(bool rollback_happened) = 0;
+
   // Returns the version from before a rollback if our last update was a
   // rollback.
   virtual std::string GetRollbackVersion() = 0;
@@ -192,10 +202,12 @@
   // Sets/gets the P2P download URL, if one is to be used.
   virtual void SetP2PUrl(const std::string& url) = 0;
   virtual std::string GetP2PUrl() const = 0;
-  virtual ErrorCode GetAttemptErrorCode() const = 0;
 
   // Switch to next payload.
   virtual bool NextPayload() = 0;
+
+  // Sets and persists the staging wallclock-based wait period.
+  virtual void SetStagingWaitPeriod(base::TimeDelta wait_period) = 0;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/payload_state_unittest.cc b/payload_state_unittest.cc
index f1c3835..637891b 100644
--- a/payload_state_unittest.cc
+++ b/payload_state_unittest.cc
@@ -639,7 +639,7 @@
   PayloadState payload_state;
   FakeSystemState fake_system_state;
   OmahaRequestParams params(&fake_system_state);
-  params.Init("", "", true);  // is_interactive = True.
+  params.Init("", "", true);  // interactive = True.
   fake_system_state.set_request_params(&params);
 
   EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
@@ -662,7 +662,7 @@
   PayloadState payload_state;
   FakeSystemState fake_system_state;
   OmahaRequestParams params(&fake_system_state);
-  params.Init("", "", false);  // is_interactive = False.
+  params.Init("", "", false);  // interactive = False.
   fake_system_state.set_request_params(&params);
 
   EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
@@ -871,7 +871,7 @@
 
   EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
               ReportSuccessfulUpdateMetrics(
-                  1, _, kPayloadTypeFull, _, _, 314, _, _, 3));
+                  1, _, kPayloadTypeFull, _, _, 314, _, _, _, 3));
 
   payload_state.UpdateSucceeded();
 
@@ -920,6 +920,7 @@
                   _,
                   _,
                   _,
+                  _,
                   _))
       .Times(1);
 
@@ -984,6 +985,37 @@
   EXPECT_EQ(0U, payload_state.GetNumReboots());
 }
 
+TEST(PayloadStateTest, RollbackHappened) {
+  FakeSystemState fake_system_state;
+  PayloadState payload_state;
+
+  NiceMock<MockPrefs>* mock_powerwash_safe_prefs =
+      fake_system_state.mock_powerwash_safe_prefs();
+  EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+
+  // Verify pre-conditions are good.
+  EXPECT_FALSE(payload_state.GetRollbackHappened());
+
+  // Set to true.
+  EXPECT_CALL(*mock_powerwash_safe_prefs,
+              SetBoolean(kPrefsRollbackHappened, true));
+  payload_state.SetRollbackHappened(true);
+  EXPECT_TRUE(payload_state.GetRollbackHappened());
+
+  // Set to false.
+  EXPECT_CALL(*mock_powerwash_safe_prefs, Delete(kPrefsRollbackHappened));
+  payload_state.SetRollbackHappened(false);
+  EXPECT_FALSE(payload_state.GetRollbackHappened());
+
+  // Let's verify we can reload it correctly.
+  EXPECT_CALL(*mock_powerwash_safe_prefs, GetBoolean(kPrefsRollbackHappened, _))
+      .WillOnce(DoAll(SetArgPointee<1>(true), Return(true)));
+  EXPECT_CALL(*mock_powerwash_safe_prefs,
+              SetBoolean(kPrefsRollbackHappened, true));
+  payload_state.LoadRollbackHappened();
+  EXPECT_TRUE(payload_state.GetRollbackHappened());
+}
+
 TEST(PayloadStateTest, RollbackVersion) {
   FakeSystemState fake_system_state;
   PayloadState payload_state;
@@ -1305,9 +1337,9 @@
 
   // Simulate a successful download and update.
   payload_state.DownloadComplete();
-  EXPECT_CALL(
-      *fake_system_state.mock_metrics_reporter(),
-      ReportSuccessfulUpdateMetrics(_, _, kPayloadTypeDelta, _, _, _, _, _, _));
+  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+              ReportSuccessfulUpdateMetrics(
+                  _, _, kPayloadTypeDelta, _, _, _, _, _, _, _));
   payload_state.UpdateSucceeded();
 
   // Mock the request to a request where the delta was disabled but Omaha sends
@@ -1321,9 +1353,9 @@
 
   payload_state.DownloadComplete();
 
-  EXPECT_CALL(
-      *fake_system_state.mock_metrics_reporter(),
-      ReportSuccessfulUpdateMetrics(_, _, kPayloadTypeDelta, _, _, _, _, _, _));
+  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+              ReportSuccessfulUpdateMetrics(
+                  _, _, kPayloadTypeDelta, _, _, _, _, _, _, _));
   payload_state.UpdateSucceeded();
 }
 
@@ -1346,7 +1378,7 @@
 
   EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
               ReportSuccessfulUpdateMetrics(
-                  _, _, kPayloadTypeForcedFull, _, _, _, _, _, _));
+                  _, _, kPayloadTypeForcedFull, _, _, _, _, _, _, _));
   payload_state.UpdateSucceeded();
 }
 
@@ -1368,9 +1400,9 @@
   // Simulate a successful download and update.
   payload_state.DownloadComplete();
 
-  EXPECT_CALL(
-      *fake_system_state.mock_metrics_reporter(),
-      ReportSuccessfulUpdateMetrics(_, _, kPayloadTypeFull, _, _, _, _, _, _));
+  EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+              ReportSuccessfulUpdateMetrics(
+                  _, _, kPayloadTypeFull, _, _, _, _, _, _, _));
   payload_state.UpdateSucceeded();
 }
 
diff --git a/power_manager_chromeos.h b/power_manager_chromeos.h
index ad49889..eeb14d8 100644
--- a/power_manager_chromeos.h
+++ b/power_manager_chromeos.h
@@ -14,8 +14,8 @@
 // limitations under the License.
 //
 
-#ifndef UPDATE_ENGINE_POWER_MANAGER_H_
-#define UPDATE_ENGINE_POWER_MANAGER_H_
+#ifndef UPDATE_ENGINE_POWER_MANAGER_CHROMEOS_H_
+#define UPDATE_ENGINE_POWER_MANAGER_CHROMEOS_H_
 
 #include <base/macros.h>
 #include <power_manager/dbus-proxies.h>
@@ -41,4 +41,4 @@
 
 }  // namespace chromeos_update_engine
 
-#endif  // UPDATE_ENGINE_POWER_MANAGER_H_
+#endif  // UPDATE_ENGINE_POWER_MANAGER_CHROMEOS_H_
diff --git a/real_system_state.cc b/real_system_state.cc
index 8e7ad51..9dab3a1 100644
--- a/real_system_state.cc
+++ b/real_system_state.cc
@@ -18,6 +18,7 @@
 
 #include <memory>
 #include <string>
+#include <utility>
 
 #include <base/bind.h>
 #include <base/files/file_util.h>
@@ -34,6 +35,7 @@
 #include "update_engine/common/hardware.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/metrics_reporter_omaha.h"
+#include "update_engine/update_boot_flags_action.h"
 #if USE_DBUS
 #include "update_engine/dbus_connection.h"
 #endif  // USE_DBUS
@@ -62,13 +64,13 @@
 
   hardware_ = hardware::CreateHardware();
   if (!hardware_) {
-    LOG(ERROR) << "Error intializing the HardwareInterface.";
+    LOG(ERROR) << "Error initializing the HardwareInterface.";
     return false;
   }
 
 #if USE_CHROME_KIOSK_APP
-  libcros_proxy_.reset(new org::chromium::LibCrosServiceInterfaceProxy(
-      DBusConnection::Get()->GetDBus(), chromeos::kLibCrosServiceName));
+  kiosk_app_proxy_.reset(new org::chromium::KioskAppServiceInterfaceProxy(
+      DBusConnection::Get()->GetDBus(), chromeos::kKioskAppServiceName));
 #endif  // USE_CHROME_KIOSK_APP
 
   LOG_IF(INFO, !hardware_->IsNormalBootMode()) << "Booted in dev mode.";
@@ -76,13 +78,13 @@
 
   connection_manager_ = connection_manager::CreateConnectionManager(this);
   if (!connection_manager_) {
-    LOG(ERROR) << "Error intializing the ConnectionManagerInterface.";
+    LOG(ERROR) << "Error initializing the ConnectionManagerInterface.";
     return false;
   }
 
   power_manager_ = power_manager::CreatePowerManager();
   if (!power_manager_) {
-    LOG(ERROR) << "Error intializing the PowerManagerInterface.";
+    LOG(ERROR) << "Error initializing the PowerManagerInterface.";
     return false;
   }
 
@@ -150,7 +152,7 @@
   chromeos_update_manager::State* um_state =
       chromeos_update_manager::DefaultStateFactory(&policy_provider_,
 #if USE_CHROME_KIOSK_APP
-                                                   libcros_proxy_.get(),
+                                                   kiosk_app_proxy_.get(),
 #else
                                                    nullptr,
 #endif  // USE_CHROME_KIOSK_APP
@@ -183,11 +185,14 @@
   // Initiate update checks.
   update_attempter_->ScheduleUpdates();
 
+  auto update_boot_flags_action =
+      std::make_unique<UpdateBootFlagsAction>(boot_control_.get());
+  processor_.EnqueueAction(std::move(update_boot_flags_action));
   // Update boot flags after 45 seconds.
   MessageLoop::current()->PostDelayedTask(
       FROM_HERE,
-      base::Bind(&UpdateAttempter::UpdateBootFlags,
-                 base::Unretained(update_attempter_.get())),
+      base::Bind(&ActionProcessor::StartProcessing,
+                 base::Unretained(&processor_)),
       base::TimeDelta::FromSeconds(45));
 
   // Broadcast the update engine status on startup to ensure consistent system
diff --git a/real_system_state.h b/real_system_state.h
index 49f7c31..5239160 100644
--- a/real_system_state.h
+++ b/real_system_state.h
@@ -25,7 +25,7 @@
 #include <policy/device_policy.h>
 
 #if USE_CHROME_KIOSK_APP
-#include <libcros/dbus-proxies.h>
+#include <kiosk-app/dbus-proxies.h>
 #endif  // USE_CHROME_KIOSK_APP
 
 #include "update_engine/certificate_checker.h"
@@ -129,7 +129,8 @@
  private:
   // Real DBus proxies using the DBus connection.
 #if USE_CHROME_KIOSK_APP
-  std::unique_ptr<org::chromium::LibCrosServiceInterfaceProxy> libcros_proxy_;
+  std::unique_ptr<org::chromium::KioskAppServiceInterfaceProxy>
+      kiosk_app_proxy_;
 #endif  // USE_CHROME_KIOSK_APP
 
   // Interface for the power manager.
@@ -184,6 +185,8 @@
   // rebooted. Important for tracking whether you are running instance of the
   // update engine on first boot or due to a crash/restart.
   bool system_rebooted_{false};
+
+  ActionProcessor processor_;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/scripts/blockdiff.py b/scripts/blockdiff.py
index 1dc60a6..5793def 100755
--- a/scripts/blockdiff.py
+++ b/scripts/blockdiff.py
@@ -1,14 +1,26 @@
 #!/usr/bin/python2
 #
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Block diff utility."""
 
 from __future__ import print_function
 
-import optparse
+# pylint: disable=import-error
+import argparse
 import sys
 
 
@@ -71,28 +83,25 @@
 
 def main(argv):
   # Parse command-line arguments.
-  parser = optparse.OptionParser(
-      usage='Usage: %prog FILE1 FILE2',
-      description='Compare FILE1 and FILE2 by blocks.')
+  parser = argparse.ArgumentParser(
+      description='Compare FILE1 and FILE2 by blocks.',
+      formatter_class=argparse.ArgumentDefaultsHelpFormatter)
 
-  parser.add_option('-b', '--block-size', metavar='NUM', type=int, default=4096,
-                    help='the block size to use (default: %default)')
-  parser.add_option('-m', '--max-length', metavar='NUM', type=int, default=-1,
-                    help='maximum number of bytes to compared')
+  parser.add_argument('-b', '--block-size', metavar='NUM', type=int,
+                      default=4096, help='the block size to use')
+  parser.add_argument('-m', '--max-length', metavar='NUM', type=int, default=-1,
+                      help='maximum number of bytes to compare')
+  parser.add_argument('file1', metavar='FILE1')
+  parser.add_argument('file2', metavar='FILE2')
 
-  opts, args = parser.parse_args(argv[1:])
-
-  try:
-    name1, name2 = args
-  except ValueError:
-    parser.error('unexpected number of arguments')
+  args = parser.parse_args(argv[1:])
 
   # Perform the block diff.
   try:
-    with open(name1) as file1:
-      with open(name2) as file2:
-        diff_list = BlockDiff(opts.block_size, file1, file2, name1, name2,
-                              opts.max_length)
+    with open(args.file1) as file1:
+      with open(args.file2) as file2:
+        diff_list = BlockDiff(args.block_size, file1, file2,
+                              args.file1, args.file2, args.max_length)
   except BlockDiffError as e:
     print('Error: ' % e, file=sys.stderr)
     return 2
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index 65c63f5..47ac830 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -25,6 +25,7 @@
 #  sign        generate a signed payload
 #  properties  generate a properties file from a payload
 #  verify      verify a payload by recreating a target image.
+#  check       verify a payload using paycheck (static testing)
 #
 #  Generate command arguments:
 #  --payload             generated unsigned payload output file
@@ -67,6 +68,9 @@
 #  --payload             payload input file
 #  --source_image        verify payload to the specified source image.
 #  --target_image        the target image to verify upon.
+#
+#  Check command arguments:
+#     Symmetrical with the verify command.
 
 
 # Exit codes:
@@ -82,7 +86,7 @@
 }
 
 # Loads shflags. We first look at the default install location; then look for
-# crosutils (chroot); finally check our own directory (au-generator zipfile).
+# crosutils (chroot); finally check our own directory.
 load_shflags() {
   local my_dir="$(dirname "$(readlink -f "$0")")"
   local path
@@ -102,7 +106,9 @@
 for signing."
 HELP_SIGN="sign: Insert the signatures into the unsigned payload."
 HELP_PROPERTIES="properties: Extract payload properties to a file."
-HELP_VERIFY="verify: Verify a (signed) update payload."
+HELP_VERIFY="verify: Verify a (signed) update payload using delta_generator."
+HELP_CHECK="check: Check a (signed) update payload using paycheck (static \
+testing)."
 
 usage() {
   echo "Supported commands:"
@@ -112,13 +118,14 @@
   echo "${HELP_SIGN}"
   echo "${HELP_PROPERTIES}"
   echo "${HELP_VERIFY}"
+  echo "${HELP_CHECK}"
   echo
   echo "Use: \"$0 <command> --help\" for more options."
 }
 
 # Check that a command is specified.
 if [[ $# -lt 1 ]]; then
-  echo "Please specify a command [generate|hash|sign|properties]"
+  echo "Please specify a command [generate|hash|sign|properties|verify|check]"
   exit 1
 fi
 
@@ -147,6 +154,10 @@
     FLAGS_HELP="${HELP_VERIFY}"
     ;;
 
+  check)
+    FLAGS_HELP="${HELP_CHECK}"
+    ;;
+
   *)
     echo "Unrecognized command: \"${COMMAND}\"" >&2
     usage >&2
@@ -202,7 +213,7 @@
     "Path to output the extracted property files. If '-' is passed stdout will \
 be used."
 fi
-if [[ "${COMMAND}" == "verify" ]]; then
+if [[ "${COMMAND}" == "verify" || "${COMMAND}" == "check" ]]; then
   DEFINE_string payload "" \
     "Path to the input payload file."
   DEFINE_string target_image "" \
@@ -268,14 +279,12 @@
 
 # truncate_file <file_path> <file_size>
 #
-# Truncate the given |file_path| to |file_size| using perl.
+# Truncate the given |file_path| to |file_size| using python.
 # The truncate binary might not be available.
 truncate_file() {
   local file_path="$1"
   local file_size="$2"
-  perl -e "open(FILE, \"+<\", \$ARGV[0]); \
-           truncate(FILE, ${file_size}); \
-           close(FILE);" "${file_path}"
+  python -c "open(\"${file_path}\", 'a').truncate(${file_size})"
 }
 
 # Create a temporary file in the work_dir with an optional pattern name.
@@ -366,24 +375,79 @@
   # updater supports a newer major version.
   FORCE_MAJOR_VERSION="1"
 
-  # When generating legacy Chrome OS images, we need to use "boot" and "system"
-  # for the partition names to be compatible with updating Brillo devices with
-  # Chrome OS images.
-  eval ${partitions_array}[boot]=\""${kernel}"\"
-  eval ${partitions_array}[system]=\""${root}"\"
+  eval ${partitions_array}[kernel]=\""${kernel}"\"
+  eval ${partitions_array}[root]=\""${root}"\"
 
   if [[ -n "${partitions_order}" ]]; then
-    eval "${partitions_order}=( \"system\" \"boot\" )"
+    eval "${partitions_order}=( \"root\" \"kernel\" )"
   fi
 
   local part varname
-  for part in boot system; do
+  for part in kernel root; do
     varname="${partitions_array}[${part}]"
     printf "md5sum of %s: " "${varname}"
     md5sum "${!varname}"
   done
 }
 
+# extract_partition_brillo <target_files.zip> <partitions_array> <partition>
+#     <part_file> <part_map_file>
+#
+# Extract the <partition> from target_files zip file into <part_file> and its
+# map file into <part_map_file>.
+extract_partition_brillo() {
+  local image="$1"
+  local partitions_array="$2"
+  local part="$3"
+  local part_file="$4"
+  local part_map_file="$5"
+
+  # For each partition, we in turn look for its image file under IMAGES/ and
+  # RADIO/ in the given target_files zip file.
+  local path path_in_zip
+  for path in IMAGES RADIO; do
+    if unzip -l "${image}" "${path}/${part}.img" >/dev/null; then
+      path_in_zip="${path}"
+      break
+    fi
+  done
+  [[ -n "${path_in_zip}" ]] || die "Failed to find ${part}.img"
+  unzip -p "${image}" "${path_in_zip}/${part}.img" >"${part_file}"
+
+  # If the partition is stored as an Android sparse image file, we need to
+  # convert them to a raw image for the update.
+  local magic=$(head --bytes=4 "${part_file}" | hexdump -e '1/1 "%.2x"')
+  if [[ "${magic}" == "3aff26ed" ]]; then
+    local temp_sparse=$(create_tempfile "${part}.sparse.XXXXXX")
+    echo "Converting Android sparse image ${part}.img to RAW."
+    mv "${part_file}" "${temp_sparse}"
+    simg2img "${temp_sparse}" "${part_file}"
+    rm -f "${temp_sparse}"
+  fi
+
+  # Extract the .map file (if one is available).
+  unzip -p "${image}" "${path_in_zip}/${part}.map" >"${part_map_file}" \
+    2>/dev/null || true
+
+  # delta_generator only supports images multiple of 4 KiB. For target images
+  # we pad the data with zeros if needed, but for source images we truncate
+  # down the data since the last block of the old image could be padded on
+  # disk with unknown data.
+  local filesize=$(stat -c%s "${part_file}")
+  if [[ $(( filesize % 4096 )) -ne 0 ]]; then
+    if [[ "${partitions_array}" == "SRC_PARTITIONS" ]]; then
+      echo "Rounding DOWN partition ${part}.img to a multiple of 4 KiB."
+      : $(( filesize = filesize & -4096 ))
+    else
+      echo "Rounding UP partition ${part}.img to a multiple of 4 KiB."
+      : $(( filesize = (filesize + 4095) & -4096 ))
+    fi
+    truncate_file "${part_file}" "${filesize}"
+  fi
+
+  echo "Extracted ${partitions_array}[${part}]: ${filesize} bytes"
+}
+
 # extract_image_brillo <target_files.zip> <partitions_array> [partitions_order]
 #
 # Extract the A/B updated partitions from a Brillo target_files zip file into
@@ -409,7 +473,7 @@
   else
     warn "No ab_partitions.txt found. Using default."
   fi
-  echo "List of A/B partitions: ${partitions[@]}"
+  echo "List of A/B partitions for ${partitions_array}: ${partitions[@]}"
 
   if [[ -n "${partitions_order}" ]]; then
     eval "${partitions_order}=(${partitions[@]})"
@@ -450,59 +514,59 @@
     fi
   fi
 
-  local part part_file temp_raw filesize
+  local part
   for part in "${partitions[@]}"; do
-    part_file=$(create_tempfile "${part}.img.XXXXXX")
-    CLEANUP_FILES+=("${part_file}")
-    unzip -p "${image}" "IMAGES/${part}.img" >"${part_file}"
-
-    # If the partition is stored as an Android sparse image file, we need to
-    # convert them to a raw image for the update.
-    local magic=$(head --bytes=4 "${part_file}" | hexdump -e '1/1 "%.2x"')
-    if [[ "${magic}" == "3aff26ed" ]]; then
-      temp_raw=$(create_tempfile "${part}.raw.XXXXXX")
-      CLEANUP_FILES+=("${temp_raw}")
-      echo "Converting Android sparse image ${part}.img to RAW."
-      simg2img "${part_file}" "${temp_raw}"
-      # At this point, we can drop the contents of the old part_file file, but
-      # we can't delete the file because it will be deleted in cleanup.
-      true >"${part_file}"
-      part_file="${temp_raw}"
-    fi
-
-    # Extract the .map file (if one is available).
-    part_map_file=$(create_tempfile "${part}.map.XXXXXX")
-    CLEANUP_FILES+=("${part_map_file}")
-    unzip -p "${image}" "IMAGES/${part}.map" >"${part_map_file}" || \
-      part_map_file=""
-
-    # delta_generator only supports images multiple of 4 KiB. For target images
-    # we pad the data with zeros if needed, but for source images we truncate
-    # down the data since the last block of the old image could be padded on
-    # disk with unknown data.
-    filesize=$(stat -c%s "${part_file}")
-    if [[ $(( filesize % 4096 )) -ne 0 ]]; then
-      if [[ "${partitions_array}" == "SRC_PARTITIONS" ]]; then
-        echo "Rounding DOWN partition ${part}.img to a multiple of 4 KiB."
-        : $(( filesize = filesize & -4096 ))
-        if [[ ${filesize} == 0 ]]; then
-          echo "Source partition ${part}.img is empty after rounding down," \
-            "skipping."
-          continue
-        fi
-      else
-        echo "Rounding UP partition ${part}.img to a multiple of 4 KiB."
-        : $(( filesize = (filesize + 4095) & -4096 ))
-      fi
-      truncate_file "${part_file}" "${filesize}"
-    fi
-
+    local part_file=$(create_tempfile "${part}.img.XXXXXX")
+    local part_map_file=$(create_tempfile "${part}.map.XXXXXX")
+    CLEANUP_FILES+=("${part_file}" "${part_map_file}")
+    # Extract partitions in background.
+    extract_partition_brillo "${image}" "${partitions_array}" "${part}" \
+        "${part_file}" "${part_map_file}" &
     eval "${partitions_array}[\"${part}\"]=\"${part_file}\""
     eval "${partitions_array}_MAP[\"${part}\"]=\"${part_map_file}\""
-    echo "Extracted ${partitions_array}[${part}]: ${filesize} bytes"
   done
 }
 
+# cleanup_partition_array <partitions_array>
+#
+# Remove all empty files in <partitions_array>.
+cleanup_partition_array() {
+  local partitions_array="$1"
+  # Have to use eval to iterate over associative array keys with variable array
+  # names, we should change it to use nameref once bash 4.3 is available
+  # everywhere.
+  for part in $(eval "echo \${!${partitions_array}[@]}"); do
+    local path="${partitions_array}[$part]"
+    if [[ ! -s "${!path}" ]]; then
+      eval "unset ${partitions_array}[${part}]"
+    fi
+  done
+}
+
+extract_payload_images() {
+  local payload_type=$1
+  echo "Extracting images for ${payload_type} update."
+
+  if [[ "${payload_type}" == "delta" ]]; then
+    extract_image "${FLAGS_source_image}" SRC_PARTITIONS
+  fi
+  extract_image "${FLAGS_target_image}" DST_PARTITIONS PARTITIONS_ORDER
+  # Wait for all subprocesses.
+  wait
+  cleanup_partition_array SRC_PARTITIONS
+  cleanup_partition_array SRC_PARTITIONS_MAP
+  cleanup_partition_array DST_PARTITIONS
+  cleanup_partition_array DST_PARTITIONS_MAP
+}
+
+get_payload_type() {
+  if [[ -z "${FLAGS_source_image}" ]]; then
+    echo "full"
+  else
+    echo "delta"
+  fi
+}
+
 validate_generate() {
   [[ -n "${FLAGS_payload}" ]] ||
     die "You must specify an output filename with --payload FILENAME"
@@ -512,21 +576,12 @@
 }
 
 cmd_generate() {
-  local payload_type="delta"
-  if [[ -z "${FLAGS_source_image}" ]]; then
-    payload_type="full"
-  fi
-
-  echo "Extracting images for ${payload_type} update."
-
-  extract_image "${FLAGS_target_image}" DST_PARTITIONS PARTITIONS_ORDER
-  if [[ "${payload_type}" == "delta" ]]; then
-    extract_image "${FLAGS_source_image}" SRC_PARTITIONS
-  fi
+  local payload_type=$(get_payload_type)
+  extract_payload_images ${payload_type}
 
   echo "Generating ${payload_type} update."
   # Common payload args:
-  GENERATOR_ARGS=( -out_file="${FLAGS_payload}" )
+  GENERATOR_ARGS=( --out_file="${FLAGS_payload}" )
 
   local part old_partitions="" new_partitions="" partition_names=""
   local old_mapfiles="" new_mapfiles=""
@@ -547,16 +602,16 @@
 
   # Target image args:
   GENERATOR_ARGS+=(
-    -partition_names="${partition_names}"
-    -new_partitions="${new_partitions}"
-    -new_mapfiles="${new_mapfiles}"
+    --partition_names="${partition_names}"
+    --new_partitions="${new_partitions}"
+    --new_mapfiles="${new_mapfiles}"
   )
 
   if [[ "${payload_type}" == "delta" ]]; then
     # Source image args:
     GENERATOR_ARGS+=(
-      -old_partitions="${old_partitions}"
-      -old_mapfiles="${old_mapfiles}"
+      --old_partitions="${old_partitions}"
+      --old_mapfiles="${old_mapfiles}"
     )
     if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
       GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
@@ -604,10 +659,10 @@
 
 cmd_hash() {
   "${GENERATOR}" \
-      -in_file="${FLAGS_unsigned_payload}" \
-      -signature_size="${FLAGS_signature_size}" \
-      -out_hash_file="${FLAGS_payload_hash_file}" \
-      -out_metadata_hash_file="${FLAGS_metadata_hash_file}"
+      --in_file="${FLAGS_unsigned_payload}" \
+      --signature_size="${FLAGS_signature_size}" \
+      --out_hash_file="${FLAGS_payload_hash_file}" \
+      --out_metadata_hash_file="${FLAGS_metadata_hash_file}"
 
   echo "Done generating hash."
 }
@@ -634,11 +689,11 @@
 
 cmd_sign() {
   GENERATOR_ARGS=(
-    -in_file="${FLAGS_unsigned_payload}"
-    -signature_size="${FLAGS_signature_size}"
-    -signature_file="${FLAGS_payload_signature_file}"
-    -metadata_signature_file="${FLAGS_metadata_signature_file}"
-    -out_file="${FLAGS_payload}"
+    --in_file="${FLAGS_unsigned_payload}"
+    --signature_size="${FLAGS_signature_size}"
+    --payload_signature_file="${FLAGS_payload_signature_file}"
+    --metadata_signature_file="${FLAGS_metadata_signature_file}"
+    --out_file="${FLAGS_payload}"
   )
 
   if [[ -n "${FLAGS_metadata_size_file}" ]]; then
@@ -659,11 +714,11 @@
 
 cmd_properties() {
   "${GENERATOR}" \
-      -in_file="${FLAGS_payload}" \
-      -properties_file="${FLAGS_properties_file}"
+      --in_file="${FLAGS_payload}" \
+      --properties_file="${FLAGS_properties_file}"
 }
 
-validate_verify() {
+validate_verify_and_check() {
   [[ -n "${FLAGS_payload}" ]] ||
     die "Error: you must specify an input filename with --payload FILENAME"
 
@@ -672,17 +727,8 @@
 }
 
 cmd_verify() {
-  local payload_type="delta"
-  if [[ -z "${FLAGS_source_image}" ]]; then
-    payload_type="full"
-  fi
-
-  echo "Extracting images for ${payload_type} update."
-
-  if [[ "${payload_type}" == "delta" ]]; then
-    extract_image "${FLAGS_source_image}" SRC_PARTITIONS
-  fi
-  extract_image "${FLAGS_target_image}" DST_PARTITIONS PARTITIONS_ORDER
+  local payload_type=$(get_payload_type)
+  extract_payload_images ${payload_type}
 
   declare -A TMP_PARTITIONS
   for part in "${PARTITIONS_ORDER[@]}"; do
@@ -697,7 +743,7 @@
 
   echo "Verifying ${payload_type} update."
   # Common payload args:
-  GENERATOR_ARGS=( -in_file="${FLAGS_payload}" )
+  GENERATOR_ARGS=( --in_file="${FLAGS_payload}" )
 
   local part old_partitions="" new_partitions="" partition_names=""
   for part in "${PARTITIONS_ORDER[@]}"; do
@@ -713,14 +759,14 @@
 
   # Target image args:
   GENERATOR_ARGS+=(
-    -partition_names="${partition_names}"
-    -new_partitions="${new_partitions}"
+    --partition_names="${partition_names}"
+    --new_partitions="${new_partitions}"
   )
 
   if [[ "${payload_type}" == "delta" ]]; then
     # Source image args:
     GENERATOR_ARGS+=(
-      -old_partitions="${old_partitions}"
+      --old_partitions="${old_partitions}"
     )
   fi
 
@@ -730,24 +776,54 @@
 
   echo "Running delta_generator to verify ${payload_type} payload with args: \
 ${GENERATOR_ARGS[@]}"
-  "${GENERATOR}" "${GENERATOR_ARGS[@]}"
+  "${GENERATOR}" "${GENERATOR_ARGS[@]}" || true
 
-  if [[ $? -eq 0 ]]; then
-    echo "Done applying ${payload_type} update."
-    echo "Checking the newly generated partitions against the target partitions"
-    for part in "${PARTITIONS_ORDER[@]}"; do
-      cmp "${TMP_PARTITIONS[${part}]}" "${DST_PARTITIONS[${part}]}"
-      local not_str=""
-      if [[ $? -ne 0 ]]; then
-        not_str="in"
-      fi
-      echo "The new partition (${part}) is ${not_str}valid."
-    done
-  else
-    echo "Failed to apply ${payload_type} update."
+  echo "Done applying ${payload_type} update."
+  echo "Checking the newly generated partitions against the target partitions"
+  local need_pause=false
+  for part in "${PARTITIONS_ORDER[@]}"; do
+    local not_str=""
+    if ! cmp "${TMP_PARTITIONS[${part}]}" "${DST_PARTITIONS[${part}]}"; then
+      not_str="in"
+      need_pause=true
+    fi
+    echo "The new partition (${part}) is ${not_str}valid."
+  done
+  # All images will be cleaned up when script exits, pause here to give a chance
+  # to inspect the images.
+  if [[ "$need_pause" == true ]]; then
+    read -n1 -r -s -p "Paused to investigate invalid partitions, \
+press any key to exit."
   fi
 }
 
+cmd_check() {
+  local payload_type=$(get_payload_type)
+  extract_payload_images ${payload_type}
+
+  local part dst_partitions="" src_partitions=""
+  for part in "${PARTITIONS_ORDER[@]}"; do
+    if [[ -n "${dst_partitions}" ]]; then
+      dst_partitions+=" "
+      src_partitions+=" "
+    fi
+    dst_partitions+="${DST_PARTITIONS[${part}]}"
+    src_partitions+="${SRC_PARTITIONS[${part}]:-}"
+  done
+
+  # Common payload args:
+  PAYCHECK_ARGS=( "${FLAGS_payload}" --type ${payload_type} \
+    --part_names ${PARTITIONS_ORDER[@]} \
+    --dst_part_paths ${dst_partitions} )
+
+  if [[ ! -z "${SRC_PARTITIONS[@]}" ]]; then
+    PAYCHECK_ARGS+=( --src_part_paths ${src_partitions} )
+  fi
+
+  echo "Checking ${payload_type} update."
+  check_update_payload ${PAYCHECK_ARGS[@]} --check
+}
+
 # Sanity check that the real generator exists:
 GENERATOR="$(which delta_generator || true)"
 [[ -x "${GENERATOR}" ]] || die "can't find delta_generator"
@@ -765,7 +841,10 @@
   properties) validate_properties
               cmd_properties
               ;;
-  verify) validate_verify
+  verify) validate_verify_and_check
           cmd_verify
           ;;
+  check) validate_verify_and_check
+         cmd_check
+         ;;
 esac
diff --git a/scripts/paycheck.py b/scripts/paycheck.py
index 8df1bf0..9d61778 100755
--- a/scripts/paycheck.py
+++ b/scripts/paycheck.py
@@ -1,16 +1,33 @@
 #!/usr/bin/python2
 #
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Command-line tool for checking and applying Chrome OS update payloads."""
 
 from __future__ import print_function
 
-import optparse
+# pylint: disable=import-error
+import argparse
+import filecmp
 import os
 import sys
+import tempfile
+
+from update_payload import common
+from update_payload import error
 
 lib_dir = os.path.join(os.path.dirname(__file__), 'lib')
 if os.path.exists(lib_dir) and os.path.isdir(lib_dir):
@@ -29,17 +46,12 @@
     argv: command-line arguments to parse (excluding the program name)
 
   Returns:
-    A tuple (opts, payload, extra_args), where `opts' are the options
-    returned by the parser, `payload' is the name of the payload file
-    (mandatory argument) and `extra_args' are any additional command-line
-    arguments.
+    Returns the arguments returned by the argument parser.
   """
-  parser = optparse.OptionParser(
-      usage=('Usage: %prog [OPTION...] PAYLOAD [DST_KERN DST_ROOT '
-             '[SRC_KERN SRC_ROOT]]'),
-      description=('Applies a Chrome OS update PAYLOAD to SRC_KERN and '
-                   'SRC_ROOT emitting DST_KERN and DST_ROOT, respectively. '
-                   'SRC_KERN and SRC_ROOT are only needed for delta payloads. '
+  parser = argparse.ArgumentParser(
+      description=('Applies a Chrome OS update PAYLOAD to src_kern and '
+                   'src_root emitting dst_kern and dst_root, respectively. '
+                   'src_kern and src_root are only needed for delta payloads. '
                    'When no partitions are provided, verifies the payload '
                    'integrity.'),
       epilog=('Note: a payload may verify correctly but fail to apply, and '
@@ -47,186 +59,241 @@
               'vs dynamic correctness. A payload that both verifies and '
               'applies correctly should be safe for use by the Chrome OS '
               'Update Engine. Use --check to verify a payload prior to '
-              'applying it.'))
+              'applying it.'),
+      formatter_class=argparse.RawDescriptionHelpFormatter
+  )
 
-  check_opts = optparse.OptionGroup(parser, 'Checking payload integrity')
-  check_opts.add_option('-c', '--check', action='store_true', default=False,
-                        help=('force payload integrity check (e.g. before '
-                              'applying)'))
-  check_opts.add_option('-D', '--describe', action='store_true', default=False,
-                        help='Print a friendly description of the payload.')
-  check_opts.add_option('-r', '--report', metavar='FILE',
-                        help="dump payload report (`-' for stdout)")
-  check_opts.add_option('-t', '--type', metavar='TYPE', dest='assert_type',
-                        help=("assert that payload is either `%s' or `%s'" %
-                              (_TYPE_FULL, _TYPE_DELTA)))
-  check_opts.add_option('-z', '--block-size', metavar='NUM', default=0,
-                        type='int',
-                        help='assert a non-default (4096) payload block size')
-  check_opts.add_option('-u', '--allow-unhashed', action='store_true',
-                        default=False, help='allow unhashed operations')
-  check_opts.add_option('-d', '--disabled_tests', metavar='TESTLIST',
-                        default=(),
-                        help=('comma-separated list of tests to disable; '
-                              'available values: ' +
-                              ', '.join(update_payload.CHECKS_TO_DISABLE)))
-  check_opts.add_option('-k', '--key', metavar='FILE',
-                        help=('Override standard key used for signature '
-                              'validation'))
-  check_opts.add_option('-m', '--meta-sig', metavar='FILE',
-                        help='verify metadata against its signature')
-  check_opts.add_option('-p', '--root-part-size', metavar='NUM',
-                        default=0, type='int',
-                        help=('override rootfs partition size auto-inference'))
-  check_opts.add_option('-P', '--kern-part-size', metavar='NUM',
-                        default=0, type='int',
-                        help=('override kernel partition size auto-inference'))
-  parser.add_option_group(check_opts)
+  check_args = parser.add_argument_group('Checking payload integrity')
+  check_args.add_argument('-c', '--check', action='store_true', default=False,
+                          help=('force payload integrity check (e.g. before '
+                                'applying)'))
+  check_args.add_argument('-D', '--describe', action='store_true',
+                          default=False,
+                          help='Print a friendly description of the payload.')
+  check_args.add_argument('-r', '--report', metavar='FILE',
+                          help="dump payload report (`-' for stdout)")
+  check_args.add_argument('-t', '--type', dest='assert_type',
+                          help='assert the payload type',
+                          choices=[_TYPE_FULL, _TYPE_DELTA])
+  check_args.add_argument('-z', '--block-size', metavar='NUM', default=0,
+                          type=int,
+                          help='assert a non-default (4096) payload block size')
+  check_args.add_argument('-u', '--allow-unhashed', action='store_true',
+                          default=False, help='allow unhashed operations')
+  check_args.add_argument('-d', '--disabled_tests', default=(), metavar='',
+                          help=('space separated list of tests to disable. '
+                                'allowed options include: ' +
+                                ', '.join(update_payload.CHECKS_TO_DISABLE)),
+                          choices=update_payload.CHECKS_TO_DISABLE)
+  check_args.add_argument('-k', '--key', metavar='FILE',
+                          help=('override standard key used for signature '
+                                'validation'))
+  check_args.add_argument('-m', '--meta-sig', metavar='FILE',
+                          help='verify metadata against its signature')
+  check_args.add_argument('-s', '--metadata-size', metavar='NUM', default=0,
+                          help='the metadata size to verify with the one in'
+                          ' payload')
+  # TODO(tbrindus): deprecated in favour of --part_sizes
+  check_args.add_argument('-p', '--root-part-size', metavar='NUM',
+                          default=0, type=int,
+                          help='override rootfs partition size auto-inference')
+  check_args.add_argument('-P', '--kern-part-size', metavar='NUM',
+                          default=0, type=int,
+                          help='override kernel partition size auto-inference')
+  check_args.add_argument('--part_sizes', metavar='NUM', nargs='+', type=int,
+                          help='override partition size auto-inference')
 
-  trace_opts = optparse.OptionGroup(parser, 'Applying payload')
-  trace_opts.add_option('-x', '--extract-bsdiff', action='store_true',
-                        default=False,
-                        help=('use temp input/output files with BSDIFF '
-                              'operations (not in-place)'))
-  trace_opts.add_option('--bspatch-path', metavar='FILE',
-                        help=('use the specified bspatch binary'))
-  trace_opts.add_option('--puffpatch-path', metavar='FILE',
-                        help=('use the specified puffpatch binary'))
-  parser.add_option_group(trace_opts)
+  apply_args = parser.add_argument_group('Applying payload')
+  # TODO(ahassani): Extent extract-bsdiff to puffdiff too.
+  apply_args.add_argument('-x', '--extract-bsdiff', action='store_true',
+                          default=False,
+                          help=('use temp input/output files with BSDIFF '
+                                'operations (not in-place)'))
+  apply_args.add_argument('--bspatch-path', metavar='FILE',
+                          help='use the specified bspatch binary')
+  apply_args.add_argument('--puffpatch-path', metavar='FILE',
+                          help='use the specified puffpatch binary')
+  # TODO(tbrindus): deprecated in favour of --dst_part_paths
+  apply_args.add_argument('--dst_kern', metavar='FILE',
+                          help='destination kernel partition file')
+  apply_args.add_argument('--dst_root', metavar='FILE',
+                          help='destination root partition file')
+  # TODO(tbrindus): deprecated in favour of --src_part_paths
+  apply_args.add_argument('--src_kern', metavar='FILE',
+                          help='source kernel partition file')
+  apply_args.add_argument('--src_root', metavar='FILE',
+                          help='source root partition file')
+  # TODO(tbrindus): deprecated in favour of --out_dst_part_paths
+  apply_args.add_argument('--out_dst_kern', metavar='FILE',
+                          help='created destination kernel partition file')
+  apply_args.add_argument('--out_dst_root', metavar='FILE',
+                          help='created destination root partition file')
 
-  trace_opts = optparse.OptionGroup(parser, 'Block tracing')
-  trace_opts.add_option('-b', '--root-block', metavar='BLOCK', type='int',
-                        help='trace the origin for a rootfs block')
-  trace_opts.add_option('-B', '--kern-block', metavar='BLOCK', type='int',
-                        help='trace the origin for a kernel block')
-  trace_opts.add_option('-s', '--skip', metavar='NUM', default='0', type='int',
-                        help='skip first NUM occurrences of traced block')
-  parser.add_option_group(trace_opts)
+  apply_args.add_argument('--src_part_paths', metavar='FILE', nargs='+',
+                          help='source partitition files')
+  apply_args.add_argument('--dst_part_paths', metavar='FILE', nargs='+',
+                          help='destination partition files')
+  apply_args.add_argument('--out_dst_part_paths', metavar='FILE', nargs='+',
+                          help='created destination partition files')
+
+  parser.add_argument('payload', metavar='PAYLOAD', help='the payload file')
+  parser.add_argument('--part_names', metavar='NAME', nargs='+',
+                      help='names of partitions')
 
   # Parse command-line arguments.
-  opts, args = parser.parse_args(argv)
+  args = parser.parse_args(argv)
 
-  # Validate a value given to --type, if any.
-  if opts.assert_type not in (None, _TYPE_FULL, _TYPE_DELTA):
-    parser.error('invalid argument to --type: %s' % opts.assert_type)
+  # TODO(tbrindus): temporary workaround to keep old-style flags from breaking
+  # without having to handle both types in our code. Remove after flag usage is
+  # removed from calling scripts.
+  args.part_names = args.part_names or [common.KERNEL, common.ROOTFS]
+  args.part_sizes = args.part_sizes or [args.kern_part_size,
+                                        args.root_part_size]
+  args.src_part_paths = args.src_part_paths or [args.src_kern, args.src_root]
+  args.dst_part_paths = args.dst_part_paths or [args.dst_kern, args.dst_root]
+  args.out_dst_part_paths = args.out_dst_part_paths or [args.out_dst_kern,
+                                                        args.out_dst_root]
 
-  # Convert and validate --disabled_tests value list, if provided.
-  if opts.disabled_tests:
-    opts.disabled_tests = opts.disabled_tests.split(',')
-    for test in opts.disabled_tests:
-      if test not in update_payload.CHECKS_TO_DISABLE:
-        parser.error('invalid argument to --disabled_tests: %s' % test)
-
-  # Ensure consistent use of block tracing options.
-  do_block_trace = not (opts.root_block is None and opts.kern_block is None)
-  if opts.skip and not do_block_trace:
-    parser.error('--skip must be used with either --root-block or --kern-block')
+  # Make sure we don't have new dependencies on old flags by deleting them from
+  # the namespace here.
+  for old in ['kern_part_size', 'root_part_size', 'src_kern', 'src_root',
+              'dst_kern', 'dst_root', 'out_dst_kern', 'out_dst_root']:
+    delattr(args, old)
 
   # There are several options that imply --check.
-  opts.check = (opts.check or opts.report or opts.assert_type or
-                opts.block_size or opts.allow_unhashed or
-                opts.disabled_tests or opts.meta_sig or opts.key or
-                opts.root_part_size or opts.kern_part_size)
+  args.check = (args.check or args.report or args.assert_type or
+                args.block_size or args.allow_unhashed or
+                args.disabled_tests or args.meta_sig or args.key or
+                any(args.part_sizes) or args.metadata_size)
 
-  # Check number of arguments, enforce payload type accordingly.
-  if len(args) == 3:
-    if opts.assert_type == _TYPE_DELTA:
-      parser.error('%s payload requires source partition arguments' %
-                   _TYPE_DELTA)
-    opts.assert_type = _TYPE_FULL
-  elif len(args) == 5:
-    if opts.assert_type == _TYPE_FULL:
-      parser.error('%s payload does not accept source partition arguments' %
-                   _TYPE_FULL)
-    opts.assert_type = _TYPE_DELTA
-  elif len(args) == 1:
-    # Not applying payload; if block tracing not requested either, do an
-    # integrity check.
-    if not do_block_trace:
-      opts.check = True
-    if opts.extract_bsdiff:
-      parser.error('--extract-bsdiff can only be used when applying payloads')
-    if opts.bspatch_path:
-      parser.error('--bspatch-path can only be used when applying payloads')
-    if opts.puffpatch_path:
-      parser.error('--puffpatch-path can only be used when applying payloads')
+  for arg in ['part_sizes', 'src_part_paths', 'dst_part_paths',
+              'out_dst_part_paths']:
+    if len(args.part_names) != len(getattr(args, arg, [])):
+      parser.error('partitions in --%s do not match --part_names' % arg)
+
+  if all(args.dst_part_paths) or all(args.out_dst_part_paths):
+    if all(args.src_part_paths):
+      if args.assert_type == _TYPE_FULL:
+        parser.error('%s payload does not accept source partition arguments'
+                     % _TYPE_FULL)
+      else:
+        args.assert_type = _TYPE_DELTA
+    else:
+      if args.assert_type == _TYPE_DELTA:
+        parser.error('%s payload requires source partitions arguments'
+                     % _TYPE_DELTA)
+      else:
+        args.assert_type = _TYPE_FULL
   else:
-    parser.error('unexpected number of arguments')
+    # Not applying payload.
+    if args.extract_bsdiff:
+      parser.error('--extract-bsdiff can only be used when applying payloads')
+    if args.bspatch_path:
+      parser.error('--bspatch-path can only be used when applying payloads')
+    if args.puffpatch_path:
+      parser.error('--puffpatch-path can only be used when applying payloads')
 
   # By default, look for a metadata-signature file with a name based on the name
   # of the payload we are checking. We only do it if check was triggered.
-  if opts.check and not opts.meta_sig:
-    default_meta_sig = args[0] + '.metadata-signature'
+  if args.check and not args.meta_sig:
+    default_meta_sig = args.payload + '.metadata-signature'
     if os.path.isfile(default_meta_sig):
-      opts.meta_sig = default_meta_sig
-      print('Using default metadata signature', opts.meta_sig, file=sys.stderr)
+      args.meta_sig = default_meta_sig
+      print('Using default metadata signature', args.meta_sig, file=sys.stderr)
 
-  return opts, args[0], args[1:]
+  return args
 
 
 def main(argv):
   # Parse and validate arguments.
-  options, payload_file_name, extra_args = ParseArguments(argv[1:])
+  args = ParseArguments(argv[1:])
 
-  with open(payload_file_name) as payload_file:
+  with open(args.payload) as payload_file:
     payload = update_payload.Payload(payload_file)
     try:
       # Initialize payload.
       payload.Init()
 
-      if options.describe:
+      if args.describe:
         payload.Describe()
 
       # Perform payload integrity checks.
-      if options.check:
+      if args.check:
         report_file = None
         do_close_report_file = False
         metadata_sig_file = None
         try:
-          if options.report:
-            if options.report == '-':
+          if args.report:
+            if args.report == '-':
               report_file = sys.stdout
             else:
-              report_file = open(options.report, 'w')
+              report_file = open(args.report, 'w')
               do_close_report_file = True
 
-          metadata_sig_file = options.meta_sig and open(options.meta_sig)
+          part_sizes = dict(zip(args.part_names, args.part_sizes))
+          metadata_sig_file = args.meta_sig and open(args.meta_sig)
           payload.Check(
-              pubkey_file_name=options.key,
+              pubkey_file_name=args.key,
               metadata_sig_file=metadata_sig_file,
+              metadata_size=int(args.metadata_size),
               report_out_file=report_file,
-              assert_type=options.assert_type,
-              block_size=int(options.block_size),
-              rootfs_part_size=options.root_part_size,
-              kernel_part_size=options.kern_part_size,
-              allow_unhashed=options.allow_unhashed,
-              disabled_tests=options.disabled_tests)
+              assert_type=args.assert_type,
+              block_size=int(args.block_size),
+              part_sizes=part_sizes,
+              allow_unhashed=args.allow_unhashed,
+              disabled_tests=args.disabled_tests)
         finally:
           if metadata_sig_file:
             metadata_sig_file.close()
           if do_close_report_file:
             report_file.close()
 
-      # Trace blocks.
-      if options.root_block is not None:
-        payload.TraceBlock(options.root_block, options.skip, sys.stdout, False)
-      if options.kern_block is not None:
-        payload.TraceBlock(options.kern_block, options.skip, sys.stdout, True)
-
       # Apply payload.
-      if extra_args:
-        dargs = {'bsdiff_in_place': not options.extract_bsdiff}
-        if options.bspatch_path:
-          dargs['bspatch_path'] = options.bspatch_path
-        if options.puffpatch_path:
-          dargs['puffpatch_path'] = options.puffpatch_path
-        if options.assert_type == _TYPE_DELTA:
-          dargs['old_kernel_part'] = extra_args[2]
-          dargs['old_rootfs_part'] = extra_args[3]
+      if all(args.dst_part_paths) or all(args.out_dst_part_paths):
+        dargs = {'bsdiff_in_place': not args.extract_bsdiff}
+        if args.bspatch_path:
+          dargs['bspatch_path'] = args.bspatch_path
+        if args.puffpatch_path:
+          dargs['puffpatch_path'] = args.puffpatch_path
+        if args.assert_type == _TYPE_DELTA:
+          dargs['old_parts'] = dict(zip(args.part_names, args.src_part_paths))
 
-        payload.Apply(extra_args[0], extra_args[1], **dargs)
+        out_dst_parts = {}
+        file_handles = []
+        if all(args.out_dst_part_paths):
+          for name, path in zip(args.part_names, args.out_dst_part_paths):
+            handle = open(path, 'w+')
+            file_handles.append(handle)
+            out_dst_parts[name] = handle.name
+        else:
+          for name in args.part_names:
+            handle = tempfile.NamedTemporaryFile()
+            file_handles.append(handle)
+            out_dst_parts[name] = handle.name
 
-    except update_payload.PayloadError, e:
+        payload.Apply(out_dst_parts, **dargs)
+
+        # If destination kernel and rootfs partitions are not given, then this
+        # just becomes an apply operation with no check.
+        if all(args.dst_part_paths):
+          # Prior to comparing, add the unused space past the filesystem
+          # boundary in the new target partitions to become the same size as
+          # the given partitions. This will truncate to larger size.
+          for part_name, out_dst_part, dst_part in zip(args.part_names,
+                                                       file_handles,
+                                                       args.dst_part_paths):
+            out_dst_part.truncate(os.path.getsize(dst_part))
+
+            # Compare resulting partitions with the ones from the target image.
+            if not filecmp.cmp(out_dst_part.name, dst_part):
+              raise error.PayloadError(
+                  'Resulting %s partition corrupted.' % part_name)
+
+        # Close the output files. If args.out_dst_* was not given, then these
+        # files are created as temp files and will be deleted upon close().
+        for handle in file_handles:
+          handle.close()
+    except error.PayloadError, e:
       sys.stderr.write('Error: %s\n' % e)
       return 1
 
diff --git a/scripts/payload_info.py b/scripts/payload_info.py
new file mode 100755
index 0000000..09a7cf7
--- /dev/null
+++ b/scripts/payload_info.py
@@ -0,0 +1,249 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""payload_info: Show information about an update payload."""
+
+from __future__ import print_function
+
+import argparse
+import itertools
+import sys
+import textwrap
+
+import update_payload
+
+MAJOR_PAYLOAD_VERSION_CHROMEOS = 1
+MAJOR_PAYLOAD_VERSION_BRILLO = 2
+
+def DisplayValue(key, value):
+  """Print out a key, value pair with values left-aligned."""
+  if value != None:
+    print('%-*s %s' % (28, key + ':', value))
+  else:
+    raise ValueError('Cannot display an empty value.')
+
+
+def DisplayHexData(data, indent=0):
+  """Print out binary data as a hex values."""
+  for off in range(0, len(data), 16):
+    chunk = data[off:off + 16]
+    print(' ' * indent +
+          ' '.join('%.2x' % ord(c) for c in chunk) +
+          '   ' * (16 - len(chunk)) +
+          ' | ' +
+          ''.join(c if 32 <= ord(c) < 127 else '.' for c in chunk))
+
+
+class PayloadCommand(object):
+  """Show basic information about an update payload.
+
+  This command parses an update payload and displays information from
+  its header and manifest.
+  """
+
+  def __init__(self, options):
+    self.options = options
+    self.payload = None
+
+  def _DisplayHeader(self):
+    """Show information from the payload header."""
+    header = self.payload.header
+    DisplayValue('Payload version', header.version)
+    DisplayValue('Manifest length', header.manifest_len)
+
+  def _DisplayManifest(self):
+    """Show information from the payload manifest."""
+    manifest = self.payload.manifest
+    if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
+      DisplayValue('Number of partitions', len(manifest.partitions))
+      for partition in manifest.partitions:
+        DisplayValue('  Number of "%s" ops' % partition.partition_name,
+                     len(partition.operations))
+    else:
+      DisplayValue('Number of operations', len(manifest.install_operations))
+      DisplayValue('Number of kernel ops',
+                   len(manifest.kernel_install_operations))
+    DisplayValue('Block size', manifest.block_size)
+    DisplayValue('Minor version', manifest.minor_version)
+
+  def _DisplaySignatures(self):
+    """Show information about the signatures from the manifest."""
+    header = self.payload.header
+    if header.metadata_signature_len:
+      offset = header.size + header.manifest_len
+      DisplayValue('Metadata signatures blob',
+                   'file_offset=%d (%d bytes)' %
+                   (offset, header.metadata_signature_len))
+      # pylint: disable=invalid-unary-operand-type
+      signatures_blob = self.payload.ReadDataBlob(
+          -header.metadata_signature_len,
+          header.metadata_signature_len)
+      self._DisplaySignaturesBlob('Metadata', signatures_blob)
+    else:
+      print('No metadata signatures stored in the payload')
+
+    manifest = self.payload.manifest
+    if manifest.HasField('signatures_offset'):
+      signature_msg = 'blob_offset=%d' % manifest.signatures_offset
+      if manifest.signatures_size:
+        signature_msg += ' (%d bytes)' % manifest.signatures_size
+      DisplayValue('Payload signatures blob', signature_msg)
+      signatures_blob = self.payload.ReadDataBlob(manifest.signatures_offset,
+                                                  manifest.signatures_size)
+      self._DisplaySignaturesBlob('Payload', signatures_blob)
+    else:
+      print('No payload signatures stored in the payload')
+
+  @staticmethod
+  def _DisplaySignaturesBlob(signature_name, signatures_blob):
+    """Show information about the signatures blob."""
+    signatures = update_payload.update_metadata_pb2.Signatures()
+    signatures.ParseFromString(signatures_blob)
+    print('%s signatures: (%d entries)' %
+          (signature_name, len(signatures.signatures)))
+    for signature in signatures.signatures:
+      print('  version=%s, hex_data: (%d bytes)' %
+            (signature.version if signature.HasField('version') else None,
+             len(signature.data)))
+      DisplayHexData(signature.data, indent=4)
+
+
+  def _DisplayOps(self, name, operations):
+    """Show information about the install operations from the manifest.
+
+    The list shown includes operation type, data offset, data length, source
+    extents, source length, destination extents, and destinations length.
+
+    Args:
+      name: The name you want displayed above the operation table.
+      operations: The install_operations object that you want to display
+                  information about.
+    """
+    def _DisplayExtents(extents, name):
+      """Show information about extents."""
+      num_blocks = sum([ext.num_blocks for ext in extents])
+      ext_str = ' '.join(
+          '(%s,%s)' % (ext.start_block, ext.num_blocks) for ext in extents)
+      # Make extent list wrap around at 80 chars.
+      ext_str = '\n      '.join(textwrap.wrap(ext_str, 74))
+      extent_plural = 's' if len(extents) > 1 else ''
+      block_plural = 's' if num_blocks > 1 else ''
+      print('    %s: %d extent%s (%d block%s)' %
+            (name, len(extents), extent_plural, num_blocks, block_plural))
+      print('      %s' % ext_str)
+
+    op_dict = update_payload.common.OpType.NAMES
+    print('%s:' % name)
+    for op, op_count in itertools.izip(operations, itertools.count()):
+      print('  %d: %s' % (op_count, op_dict[op.type]))
+      if op.HasField('data_offset'):
+        print('    Data offset: %s' % op.data_offset)
+      if op.HasField('data_length'):
+        print('    Data length: %s' % op.data_length)
+      if op.src_extents:
+        _DisplayExtents(op.src_extents, 'Source')
+      if op.dst_extents:
+        _DisplayExtents(op.dst_extents, 'Destination')
+
+  def _GetStats(self, manifest):
+    """Returns various statistics about a payload file.
+
+    Returns a dictionary containing the number of blocks read during payload
+    application, the number of blocks written, and the number of seeks done
+    when writing during operation application.
+    """
+    read_blocks = 0
+    written_blocks = 0
+    num_write_seeks = 0
+    if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
+      partitions_operations = [part.operations for part in manifest.partitions]
+    else:
+      partitions_operations = [manifest.install_operations,
+                               manifest.kernel_install_operations]
+    for operations in partitions_operations:
+      last_ext = None
+      for curr_op in operations:
+        read_blocks += sum([ext.num_blocks for ext in curr_op.src_extents])
+        written_blocks += sum([ext.num_blocks for ext in curr_op.dst_extents])
+        for curr_ext in curr_op.dst_extents:
+          # See if the extent is contiguous with the last extent seen.
+          if last_ext and (curr_ext.start_block !=
+                           last_ext.start_block + last_ext.num_blocks):
+            num_write_seeks += 1
+          last_ext = curr_ext
+
+    if manifest.minor_version == 1:
+      # Rootfs and kernel are written during the filesystem copy in version 1.
+      written_blocks += manifest.old_rootfs_info.size / manifest.block_size
+      written_blocks += manifest.old_kernel_info.size / manifest.block_size
+    # Old and new rootfs and kernel are read once during verification
+    read_blocks += manifest.old_rootfs_info.size / manifest.block_size
+    read_blocks += manifest.old_kernel_info.size / manifest.block_size
+    read_blocks += manifest.new_rootfs_info.size / manifest.block_size
+    read_blocks += manifest.new_kernel_info.size / manifest.block_size
+    stats = {'read_blocks': read_blocks,
+             'written_blocks': written_blocks,
+             'num_write_seeks': num_write_seeks}
+    return stats
+
+  def _DisplayStats(self, manifest):
+    stats = self._GetStats(manifest)
+    DisplayValue('Blocks read', stats['read_blocks'])
+    DisplayValue('Blocks written', stats['written_blocks'])
+    DisplayValue('Seeks when writing', stats['num_write_seeks'])
+
+  def Run(self):
+    """Parse the update payload and display information from it."""
+    self.payload = update_payload.Payload(self.options.payload_file)
+    self.payload.Init()
+    self._DisplayHeader()
+    self._DisplayManifest()
+    if self.options.signatures:
+      self._DisplaySignatures()
+    if self.options.stats:
+      self._DisplayStats(self.payload.manifest)
+    if self.options.list_ops:
+      print()
+      if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
+        for partition in self.payload.manifest.partitions:
+          self._DisplayOps('%s install operations' % partition.partition_name,
+                           partition.operations)
+      else:
+        self._DisplayOps('Install operations',
+                         self.payload.manifest.install_operations)
+        self._DisplayOps('Kernel install operations',
+                         self.payload.manifest.kernel_install_operations)
+
+
+def main():
+  parser = argparse.ArgumentParser(
+      description='Show information about an update payload.')
+  parser.add_argument('payload_file', type=file,
+                      help='The update payload file.')
+  parser.add_argument('--list_ops', default=False, action='store_true',
+                      help='List the install operations and their extents.')
+  parser.add_argument('--stats', default=False, action='store_true',
+                      help='Show information about overall input/output.')
+  parser.add_argument('--signatures', default=False, action='store_true',
+                      help='Show signatures stored in the payload.')
+  args = parser.parse_args()
+
+  PayloadCommand(args).Run()
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/scripts/payload_info_unittest.py b/scripts/payload_info_unittest.py
new file mode 100755
index 0000000..a4ee9d5
--- /dev/null
+++ b/scripts/payload_info_unittest.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python2
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Unit testing payload_info.py."""
+
+from __future__ import print_function
+
+import StringIO
+import collections
+import mock
+import sys
+import unittest
+
+import payload_info
+import update_payload
+
+from contextlib import contextmanager
+
+from update_payload import update_metadata_pb2
+
+class FakePayloadError(Exception):
+  """A generic error when using the FakePayload."""
+
+class FakeOption(object):
+  """Fake options object for testing."""
+
+  def __init__(self, **kwargs):
+    self.list_ops = False
+    self.stats = False
+    self.signatures = False
+    for key, val in kwargs.iteritems():
+      setattr(self, key, val)
+    if not hasattr(self, 'payload_file'):
+      self.payload_file = None
+
+class FakeOp(object):
+  """Fake manifest operation for testing."""
+
+  def __init__(self, src_extents, dst_extents, op_type, **kwargs):
+    self.src_extents = src_extents
+    self.dst_extents = dst_extents
+    self.type = op_type
+    for key, val in kwargs.iteritems():
+      setattr(self, key, val)
+
+  def HasField(self, field):
+    return hasattr(self, field)
+
+class FakePartition(object):
+  """Fake PartitionUpdate field for testing."""
+
+  def __init__(self, partition_name, operations):
+    self.partition_name = partition_name
+    self.operations = operations
+
+class FakeManifest(object):
+  """Fake manifest for testing."""
+
+  def __init__(self, major_version):
+    FakeExtent = collections.namedtuple('FakeExtent',
+                                        ['start_block', 'num_blocks'])
+    self.install_operations = [FakeOp([],
+                                      [FakeExtent(1, 1), FakeExtent(2, 2)],
+                                      update_payload.common.OpType.REPLACE_BZ,
+                                      dst_length=3*4096,
+                                      data_offset=1,
+                                      data_length=1)]
+    self.kernel_install_operations = [FakeOp(
+        [FakeExtent(1, 1)],
+        [FakeExtent(x, x) for x in xrange(20)],
+        update_payload.common.OpType.SOURCE_COPY,
+        src_length=4096)]
+    if major_version == payload_info.MAJOR_PAYLOAD_VERSION_BRILLO:
+      self.partitions = [FakePartition('root', self.install_operations),
+                         FakePartition('kernel',
+                                       self.kernel_install_operations)]
+      self.install_operations = self.kernel_install_operations = []
+    self.block_size = 4096
+    self.minor_version = 4
+    FakePartInfo = collections.namedtuple('FakePartInfo', ['size'])
+    self.old_rootfs_info = FakePartInfo(1 * 4096)
+    self.old_kernel_info = FakePartInfo(2 * 4096)
+    self.new_rootfs_info = FakePartInfo(3 * 4096)
+    self.new_kernel_info = FakePartInfo(4 * 4096)
+    self.signatures_offset = None
+    self.signatures_size = None
+
+  def HasField(self, field_name):
+    """Fake HasField method based on the python members."""
+    return hasattr(self, field_name) and getattr(self, field_name) is not None
+
+class FakeHeader(object):
+  """Fake payload header for testing."""
+
+  def __init__(self, version, manifest_len, metadata_signature_len):
+    self.version = version
+    self.manifest_len = manifest_len
+    self.metadata_signature_len = metadata_signature_len
+
+  @property
+  def size(self):
+    return (20 if self.version == payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS
+            else 24)
+
+class FakePayload(object):
+  """Fake payload for testing."""
+
+  def __init__(self, major_version):
+    self._header = FakeHeader(major_version, 222, 0)
+    self.header = None
+    self._manifest = FakeManifest(major_version)
+    self.manifest = None
+
+    self._blobs = {}
+    self._payload_signatures = update_metadata_pb2.Signatures()
+    self._metadata_signatures = update_metadata_pb2.Signatures()
+
+  def Init(self):
+    """Fake Init that sets header and manifest.
+
+    Failing to call Init() will not make header and manifest available to the
+    test.
+    """
+    self.header = self._header
+    self.manifest = self._manifest
+
+  def ReadDataBlob(self, offset, length):
+    """Return the blob that should be present at the offset location"""
+    if not offset in self._blobs:
+      raise FakePayloadError('Requested blob at unknown offset %d' % offset)
+    blob = self._blobs[offset]
+    if len(blob) != length:
+      raise FakePayloadError('Read blob with the wrong length (expect: %d, '
+                             'actual: %d)' % (len(blob), length))
+    return blob
+
+  @staticmethod
+  def _AddSignatureToProto(proto, **kwargs):
+    """Add a new Signature element to the passed proto."""
+    new_signature = proto.signatures.add()
+    for key, val in kwargs.iteritems():
+      setattr(new_signature, key, val)
+
+  def AddPayloadSignature(self, **kwargs):
+    self._AddSignatureToProto(self._payload_signatures, **kwargs)
+    blob = self._payload_signatures.SerializeToString()
+    self._manifest.signatures_offset = 1234
+    self._manifest.signatures_size = len(blob)
+    self._blobs[self._manifest.signatures_offset] = blob
+
+  def AddMetadataSignature(self, **kwargs):
+    self._AddSignatureToProto(self._metadata_signatures, **kwargs)
+    if self._header.metadata_signature_len:
+      del self._blobs[-self._header.metadata_signature_len]
+    blob = self._metadata_signatures.SerializeToString()
+    self._header.metadata_signature_len = len(blob)
+    self._blobs[-len(blob)] = blob
+
+class PayloadCommandTest(unittest.TestCase):
+  """Test class for our PayloadCommand class."""
+
+  @contextmanager
+  def OutputCapturer(self):
+    """A tool for capturing the sys.stdout"""
+    stdout = sys.stdout
+    try:
+      sys.stdout = StringIO.StringIO()
+      yield sys.stdout
+    finally:
+      sys.stdout = stdout
+
+  def TestCommand(self, payload_cmd, payload, expected_out):
+    """A tool for testing a payload command.
+
+    It tests that a payload command which runs with a given payload produces a
+    correct output.
+    """
+    with mock.patch.object(update_payload, 'Payload', return_value=payload), \
+         self.OutputCapturer() as output:
+      payload_cmd.Run()
+    self.assertEquals(output.getvalue(), expected_out)
+
+  def testDisplayValue(self):
+    """Verify that DisplayValue prints what we expect."""
+    with self.OutputCapturer() as output:
+      payload_info.DisplayValue('key', 'value')
+    self.assertEquals(output.getvalue(), 'key:                         value\n')
+
+  def testRun(self):
+    """Verify that Run parses and displays the payload like we expect."""
+    payload_cmd = payload_info.PayloadCommand(FakeOption(action='show'))
+    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
+    expected_out = """Payload version:             1
+Manifest length:             222
+Number of operations:        1
+Number of kernel ops:        1
+Block size:                  4096
+Minor version:               4
+"""
+    self.TestCommand(payload_cmd, payload, expected_out)
+
+  def testListOpsOnVersion1(self):
+    """Verify that the --list_ops option gives the correct output."""
+    payload_cmd = payload_info.PayloadCommand(
+        FakeOption(list_ops=True, action='show'))
+    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
+    expected_out = """Payload version:             1
+Manifest length:             222
+Number of operations:        1
+Number of kernel ops:        1
+Block size:                  4096
+Minor version:               4
+
+Install operations:
+  0: REPLACE_BZ
+    Data offset: 1
+    Data length: 1
+    Destination: 2 extents (3 blocks)
+      (1,1) (2,2)
+Kernel install operations:
+  0: SOURCE_COPY
+    Source: 1 extent (1 block)
+      (1,1)
+    Destination: 20 extents (190 blocks)
+      (0,0) (1,1) (2,2) (3,3) (4,4) (5,5) (6,6) (7,7) (8,8) (9,9) (10,10)
+      (11,11) (12,12) (13,13) (14,14) (15,15) (16,16) (17,17) (18,18) (19,19)
+"""
+    self.TestCommand(payload_cmd, payload, expected_out)
+
+  def testListOpsOnVersion2(self):
+    """Verify that the --list_ops option gives the correct output."""
+    payload_cmd = payload_info.PayloadCommand(
+        FakeOption(list_ops=True, action='show'))
+    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO)
+    expected_out = """Payload version:             2
+Manifest length:             222
+Number of partitions:        2
+  Number of "root" ops:      1
+  Number of "kernel" ops:    1
+Block size:                  4096
+Minor version:               4
+
+root install operations:
+  0: REPLACE_BZ
+    Data offset: 1
+    Data length: 1
+    Destination: 2 extents (3 blocks)
+      (1,1) (2,2)
+kernel install operations:
+  0: SOURCE_COPY
+    Source: 1 extent (1 block)
+      (1,1)
+    Destination: 20 extents (190 blocks)
+      (0,0) (1,1) (2,2) (3,3) (4,4) (5,5) (6,6) (7,7) (8,8) (9,9) (10,10)
+      (11,11) (12,12) (13,13) (14,14) (15,15) (16,16) (17,17) (18,18) (19,19)
+"""
+    self.TestCommand(payload_cmd, payload, expected_out)
+
+  def testStatsOnVersion1(self):
+    """Verify that the --stats option works correctly."""
+    payload_cmd = payload_info.PayloadCommand(
+        FakeOption(stats=True, action='show'))
+    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
+    expected_out = """Payload version:             1
+Manifest length:             222
+Number of operations:        1
+Number of kernel ops:        1
+Block size:                  4096
+Minor version:               4
+Blocks read:                 11
+Blocks written:              193
+Seeks when writing:          18
+"""
+    self.TestCommand(payload_cmd, payload, expected_out)
+
+  def testStatsOnVersion2(self):
+    """Verify that the --stats option works correctly on version 2."""
+    payload_cmd = payload_info.PayloadCommand(
+        FakeOption(stats=True, action='show'))
+    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO)
+    expected_out = """Payload version:             2
+Manifest length:             222
+Number of partitions:        2
+  Number of "root" ops:      1
+  Number of "kernel" ops:    1
+Block size:                  4096
+Minor version:               4
+Blocks read:                 11
+Blocks written:              193
+Seeks when writing:          18
+"""
+    self.TestCommand(payload_cmd, payload, expected_out)
+
+  def testEmptySignatures(self):
+    """Verify that the --signatures option works with unsigned payloads."""
+    payload_cmd = payload_info.PayloadCommand(
+        FakeOption(action='show', signatures=True))
+    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
+    expected_out = """Payload version:             1
+Manifest length:             222
+Number of operations:        1
+Number of kernel ops:        1
+Block size:                  4096
+Minor version:               4
+No metadata signatures stored in the payload
+No payload signatures stored in the payload
+"""
+    self.TestCommand(payload_cmd, payload, expected_out)
+
+  def testSignatures(self):
+    """Verify that the --signatures option shows the present signatures."""
+    payload_cmd = payload_info.PayloadCommand(
+        FakeOption(action='show', signatures=True))
+    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO)
+    payload.AddPayloadSignature(version=1,
+                                data='12345678abcdefgh\x00\x01\x02\x03')
+    payload.AddPayloadSignature(data='I am a signature so access is yes.')
+    payload.AddMetadataSignature(data='\x00\x0a\x0c')
+    expected_out = """Payload version:             2
+Manifest length:             222
+Number of partitions:        2
+  Number of "root" ops:      1
+  Number of "kernel" ops:    1
+Block size:                  4096
+Minor version:               4
+Metadata signatures blob:    file_offset=246 (7 bytes)
+Metadata signatures: (1 entries)
+  version=None, hex_data: (3 bytes)
+    00 0a 0c                                        | ...
+Payload signatures blob:     blob_offset=1234 (64 bytes)
+Payload signatures: (2 entries)
+  version=1, hex_data: (20 bytes)
+    31 32 33 34 35 36 37 38 61 62 63 64 65 66 67 68 | 12345678abcdefgh
+    00 01 02 03                                     | ....
+  version=None, hex_data: (34 bytes)
+    49 20 61 6d 20 61 20 73 69 67 6e 61 74 75 72 65 | I am a signature
+    20 73 6f 20 61 63 63 65 73 73 20 69 73 20 79 65 |  so access is ye
+    73 2e                                           | s.
+"""
+    self.TestCommand(payload_cmd, payload, expected_out)
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/scripts/run_unittests b/scripts/run_unittests
index c8e713d..0d301ba 100755
--- a/scripts/run_unittests
+++ b/scripts/run_unittests
@@ -25,4 +25,6 @@
   python -m update_payload."${filename%.*}"
 done
 
+./payload_info_unittest.py
+
 exit 0
diff --git a/scripts/test_paycheck.sh b/scripts/test_paycheck.sh
index c395db4..239b984 100755
--- a/scripts/test_paycheck.sh
+++ b/scripts/test_paycheck.sh
@@ -1,8 +1,19 @@
 #!/bin/bash
 #
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 # A test script for paycheck.py and the update_payload.py library.
 #
@@ -21,9 +32,6 @@
 #   payload type. Another artifact is a human-readable payload report, which
 #   is output to stdout to be inspected by the user.
 #
-# - It performs a random block trace on the delta payload (both kernel and
-#   rootfs blocks), dumping the traces to stdout for the user to inspect.
-#
 # - It applies old_full_payload to yield old kernel (old_kern.part) and rootfs
 #   (old_root.part) partitions.
 #
@@ -37,11 +45,9 @@
 #   ensure that they are binary identical.
 #
 # If all steps have completed successfully we know with high certainty that
-# paycheck.py (and hence update_payload.py) correctly parses both full and
-# delta payloads, and applies them to yield the expected result. We also know
-# that tracing works, to the extent it does not crash. Manual inspection of
-# payload reports and block traces will improve this our confidence and are
-# strongly encouraged. Finally, each paycheck.py execution is timed.
+# paycheck.py (and hence update_payload.py) correctly parses both full and delta
+# payloads, and applies them to yield the expected result. Finally, each
+# paycheck.py execution is timed.
 
 
 # Stop on errors, unset variables.
@@ -55,6 +61,7 @@
 NEW_DELTA_ROOT_PART=new_delta_root.part
 NEW_FULL_KERN_PART=new_full_kern.part
 NEW_FULL_ROOT_PART=new_full_root.part
+CROS_PARTS="kernel root"
 
 
 log() {
@@ -80,35 +87,30 @@
   time ${paycheck} -t ${payload_type} ${payload_file}
 }
 
-trace_kern_block() {
-  payload_file=$1
-  block=$2
-  time ${paycheck} -B ${block} ${payload_file}
-}
-
-trace_root_block() {
-  payload_file=$1
-  block=$2
-  time ${paycheck} -b ${block} ${payload_file}
-}
-
 apply_full_payload() {
   payload_file=$1
-  dst_kern_part="$2/$3"
-  dst_root_part="$2/$4"
+  out_dst_kern_part="$2/$3"
+  out_dst_root_part="$2/$4"
 
-  time ${paycheck} ${payload_file} ${dst_kern_part} ${dst_root_part}
+  time ${paycheck} ${payload_file} \
+    --part_names ${CROS_PARTS} \
+    --out_dst_part_paths ${out_dst_kern_part} ${out_dst_root_part}
 }
 
 apply_delta_payload() {
   payload_file=$1
-  dst_kern_part="$2/$3"
-  dst_root_part="$2/$4"
-  src_kern_part="$2/$5"
-  src_root_part="$2/$6"
+  out_dst_kern_part="$2/$3"
+  out_dst_root_part="$2/$4"
+  dst_kern_part="$2/$5"
+  dst_root_part="$2/$6"
+  src_kern_part="$2/$7"
+  src_root_part="$2/$8"
 
-  time ${paycheck} ${payload_file} ${dst_kern_part} ${dst_root_part} \
-    ${src_kern_part} ${src_root_part}
+  time ${paycheck} ${payload_file} \
+    --part_names ${CROS_PARTS} \
+    --out_dst_part_paths ${out_dst_kern_part} ${out_dst_root_part} \
+    --dst_part_paths ${dst_kern_part} ${dst_root_part} \
+    --src_part_paths ${src_kern_part} ${src_root_part}
 }
 
 main() {
@@ -135,15 +137,6 @@
   check_payload "${delta_payload}" delta
   log "Done"
 
-  # Trace a random block between 0-1024 on all payloads.
-  block=$((RANDOM * 1024 / 32767))
-  log "Tracing a random block (${block}) in full/delta payloads..."
-  trace_kern_block "${new_full_payload}" ${block}
-  trace_root_block "${new_full_payload}" ${block}
-  trace_kern_block "${delta_payload}" ${block}
-  trace_root_block "${delta_payload}" ${block}
-  log "Done"
-
   # Apply full/delta payloads and verify results are identical.
   tmpdir="$(mktemp -d --tmpdir test_paycheck.XXXXXXXX)"
   log "Initiating application of payloads at $tmpdir"
@@ -153,16 +146,17 @@
     "${OLD_ROOT_PART}"
   log "Done"
 
-  log "Applying delta payload to old partitions..."
-  apply_delta_payload "${delta_payload}" "${tmpdir}" "${NEW_DELTA_KERN_PART}" \
-    "${NEW_DELTA_ROOT_PART}" "${OLD_KERN_PART}" "${OLD_ROOT_PART}"
-  log "Done"
-
   log "Applying new full payload..."
   apply_full_payload "${new_full_payload}" "${tmpdir}" "${NEW_FULL_KERN_PART}" \
     "${NEW_FULL_ROOT_PART}"
   log "Done"
 
+  log "Applying delta payload to old partitions..."
+  apply_delta_payload "${delta_payload}" "${tmpdir}" "${NEW_DELTA_KERN_PART}" \
+    "${NEW_DELTA_ROOT_PART}" "${NEW_FULL_KERN_PART}" \
+    "${NEW_FULL_ROOT_PART}" "${OLD_KERN_PART}" "${OLD_ROOT_PART}"
+  log "Done"
+
   log "Comparing results of delta and new full updates..."
   diff "${tmpdir}/${NEW_FULL_KERN_PART}" "${tmpdir}/${NEW_DELTA_KERN_PART}"
   diff "${tmpdir}/${NEW_FULL_ROOT_PART}" "${tmpdir}/${NEW_DELTA_ROOT_PART}"
diff --git a/scripts/update_device.py b/scripts/update_device.py
index 64cfbe3..5c19b89 100755
--- a/scripts/update_device.py
+++ b/scripts/update_device.py
@@ -89,7 +89,9 @@
 
     otazip = zipfile.ZipFile(otafilename, 'r')
     payload_info = otazip.getinfo(self.OTA_PAYLOAD_BIN)
-    self.offset = payload_info.header_offset + len(payload_info.FileHeader())
+    self.offset = payload_info.header_offset
+    self.offset += zipfile.sizeFileHeader
+    self.offset += len(payload_info.extra) + len(payload_info.filename)
     self.size = payload_info.file_size
     self.properties = otazip.read(self.OTA_PAYLOAD_PROPERTIES_TXT)
 
diff --git a/scripts/update_payload/__init__.py b/scripts/update_payload/__init__.py
index e4a5588..8ee95e2 100644
--- a/scripts/update_payload/__init__.py
+++ b/scripts/update_payload/__init__.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Library for processing, verifying and applying Chrome OS update payloads."""
 
diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py
index 6db7664..c63e156 100644
--- a/scripts/update_payload/applier.py
+++ b/scripts/update_payload/applier.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Applying a Chrome OS update payload.
 
@@ -18,6 +30,20 @@
 import bz2
 import hashlib
 import itertools
+# Not everywhere we can have the lzma library so we ignore it if we didn't have
+# it because it is not going to be used. For example, 'cros flash' uses
+# devserver code which eventually loads this file, but the lzma library is not
+# included in the client test devices, and it is not necessary to do so. But
+# lzma is not used in 'cros flash' so it should be fine. Python 3.x include
+# lzma, but for backward compatibility with Python 2.7, backports-lzma is
+# needed.
+try:
+  import lzma
+except ImportError:
+  try:
+    from backports import lzma
+  except ImportError:
+    pass
 import os
 import shutil
 import subprocess
@@ -216,7 +242,7 @@
     self.truncate_to_expected_size = truncate_to_expected_size
 
   def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
-    """Applies a REPLACE{,_BZ} operation.
+    """Applies a REPLACE{,_BZ,_XZ} operation.
 
     Args:
       op: the operation object
@@ -235,6 +261,10 @@
     if op.type == common.OpType.REPLACE_BZ:
       out_data = bz2.decompress(out_data)
       data_length = len(out_data)
+    elif op.type == common.OpType.REPLACE_XZ:
+      # pylint: disable=no-member
+      out_data = lzma.decompress(out_data)
+      data_length = len(out_data)
 
     # Write data to blocks specified in dst extents.
     data_start = 0
@@ -508,7 +538,8 @@
       # Read data blob.
       data = self.payload.ReadDataBlob(op.data_offset, op.data_length)
 
-      if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
+      if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
+                     common.OpType.REPLACE_XZ):
         self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size)
       elif op.type == common.OpType.MOVE:
         self._ApplyMoveOperation(op, op_name, new_part_file)
@@ -591,46 +622,64 @@
       _VerifySha256(new_part_file, new_part_info.hash,
                     'new ' + part_name, length=new_part_info.size)
 
-  def Run(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
-          old_rootfs_part=None):
+  def Run(self, new_parts, old_parts=None):
     """Applier entry point, invoking all update operations.
 
     Args:
-      new_kernel_part: name of dest kernel partition file
-      new_rootfs_part: name of dest rootfs partition file
-      old_kernel_part: name of source kernel partition file (optional)
-      old_rootfs_part: name of source rootfs partition file (optional)
+      new_parts: map of partition name to dest partition file
+      old_parts: map of partition name to source partition file (optional)
 
     Raises:
       PayloadError if payload application failed.
     """
+    if old_parts is None:
+      old_parts = {}
+
     self.payload.ResetFile()
 
-    # Make sure the arguments are sane and match the payload.
-    if not (new_kernel_part and new_rootfs_part):
-      raise PayloadError('missing dst {kernel,rootfs} partitions')
+    new_part_info = {}
+    old_part_info = {}
+    install_operations = []
 
-    if not (old_kernel_part or old_rootfs_part):
-      if not self.payload.IsFull():
-        raise PayloadError('trying to apply a non-full update without src '
-                           '{kernel,rootfs} partitions')
-    elif old_kernel_part and old_rootfs_part:
-      if not self.payload.IsDelta():
-        raise PayloadError('trying to apply a non-delta update onto src '
-                           '{kernel,rootfs} partitions')
+    manifest = self.payload.manifest
+    if self.payload.header.version == 1:
+      for real_name, proto_name in common.CROS_PARTITIONS:
+        new_part_info[real_name] = getattr(manifest, 'new_%s_info' % proto_name)
+        old_part_info[real_name] = getattr(manifest, 'old_%s_info' % proto_name)
+
+      install_operations.append((common.ROOTFS, manifest.install_operations))
+      install_operations.append((common.KERNEL,
+                                 manifest.kernel_install_operations))
+    else:
+      for part in manifest.partitions:
+        name = part.partition_name
+        new_part_info[name] = part.new_partition_info
+        old_part_info[name] = part.old_partition_info
+        install_operations.append((name, part.operations))
+
+    part_names = set(new_part_info.keys())  # Equivalently, old_part_info.keys()
+
+    # Make sure the arguments are sane and match the payload.
+    new_part_names = set(new_parts.keys())
+    if new_part_names != part_names:
+      raise PayloadError('missing dst partition(s) %s' %
+                         ', '.join(part_names - new_part_names))
+
+    old_part_names = set(old_parts.keys())
+    if part_names - old_part_names:
+      if self.payload.IsDelta():
+        raise PayloadError('trying to apply a delta update without src '
+                           'partition(s) %s' %
+                           ', '.join(part_names - old_part_names))
+    elif old_part_names == part_names:
+      if self.payload.IsFull():
+        raise PayloadError('trying to apply a full update onto src partitions')
     else:
       raise PayloadError('not all src partitions provided')
 
-    # Apply update to rootfs.
-    self._ApplyToPartition(
-        self.payload.manifest.install_operations, 'rootfs',
-        'install_operations', new_rootfs_part,
-        self.payload.manifest.new_rootfs_info, old_rootfs_part,
-        self.payload.manifest.old_rootfs_info)
+    for name, operations in install_operations:
+      # Apply update to partition.
+      self._ApplyToPartition(
+          operations, name, '%s_install_operations' % name, new_parts[name],
+          new_part_info[name], old_parts.get(name, None), old_part_info[name])
 
-    # Apply update to kernel update.
-    self._ApplyToPartition(
-        self.payload.manifest.kernel_install_operations, 'kernel',
-        'kernel_install_operations', new_kernel_part,
-        self.payload.manifest.new_kernel_info, old_kernel_part,
-        self.payload.manifest.old_kernel_info)
diff --git a/scripts/update_payload/block_tracer.py b/scripts/update_payload/block_tracer.py
deleted file mode 100644
index 5caf7e3..0000000
--- a/scripts/update_payload/block_tracer.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Tracing block data source through a Chrome OS update payload.
-
-This module is used internally by the main Payload class for tracing block
-content through an update payload. This is a useful feature in debugging
-payload applying functionality in this package. The interface for invoking the
-tracer is as follows:
-
-  tracer = PayloadBlockTracer(payload)
-  tracer.Run(...)
-
-"""
-
-from __future__ import print_function
-
-from update_payload import common
-
-
-#
-# Payload block tracing.
-#
-class PayloadBlockTracer(object):
-  """Tracing the origin of block data through update instructions.
-
-  This is a short-lived object whose purpose is to isolate the logic used for
-  tracing the origin of destination partition blocks.
-
-  """
-
-  def __init__(self, payload):
-    assert payload.is_init, 'uninitialized update payload'
-    self.payload = payload
-
-  @staticmethod
-  def _TraceBlock(block, skip, trace_out_file, operations, base_name):
-    """Trace the origin of a given block through a sequence of operations.
-
-    This method tries to map the given dest block to the corresponding source
-    block from which its content originates in the course of an update. It
-    further tries to trace transitive origins through MOVE operations. It is
-    rather efficient, doing the actual tracing by means of a single reverse
-    sweep through the operation sequence. It dumps a log of operations and
-    source blocks responsible for the data in the given dest block to the
-    provided output file.
-
-    Args:
-      block: the block number to trace
-      skip: number of initial transitive origins to ignore
-      trace_out_file: a file object to dump the trace to
-      operations: the sequence of operations
-      base_name: name of the operation sequence
-    """
-    # Traverse operations backwards.
-    for op, op_name in common.OperationIter(operations, base_name,
-                                            reverse=True):
-      total_block_offset = 0
-      found = False
-
-      # Is the traced block mentioned in the dest extents?
-      for dst_ex, dst_ex_name in common.ExtentIter(op.dst_extents,
-                                                   op_name + '.dst_extents'):
-        if (block >= dst_ex.start_block
-            and block < dst_ex.start_block + dst_ex.num_blocks):
-          if skip:
-            skip -= 1
-          else:
-            total_block_offset += block - dst_ex.start_block
-            trace_out_file.write(
-                '%d: %s: found %s (total block offset: %d)\n' %
-                (block, dst_ex_name, common.FormatExtent(dst_ex),
-                 total_block_offset))
-            found = True
-            break
-
-        total_block_offset += dst_ex.num_blocks
-
-      if found:
-        # Don't trace further, unless it's a MOVE.
-        if op.type != common.OpType.MOVE:
-          break
-
-        # For MOVE, find corresponding source block and keep tracing.
-        for src_ex, src_ex_name in common.ExtentIter(op.src_extents,
-                                                     op_name + '.src_extents'):
-          if total_block_offset < src_ex.num_blocks:
-            block = src_ex.start_block + total_block_offset
-            trace_out_file.write(
-                '%s:  mapped to %s (%d)\n' %
-                (src_ex_name, common.FormatExtent(src_ex), block))
-            break
-
-          total_block_offset -= src_ex.num_blocks
-
-  def Run(self, block, skip, trace_out_file, is_kernel):
-    """Block tracer entry point, invoking the actual search.
-
-    Args:
-      block: the block number whose origin to trace
-      skip: the number of first origin mappings to skip
-      trace_out_file: file object to dump the trace to
-      is_kernel: trace through kernel (True) or rootfs (False) operations
-    """
-    if is_kernel:
-      operations = self.payload.manifest.kernel_install_operations
-      base_name = 'kernel_install_operations'
-    else:
-      operations = self.payload.manifest.install_operations
-      base_name = 'install_operations'
-
-    self._TraceBlock(block, skip, trace_out_file, operations, base_name)
diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py
index 21d99a0..746d4be 100644
--- a/scripts/update_payload/checker.py
+++ b/scripts/update_payload/checker.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Verifying the integrity of a Chrome OS update payload.
 
@@ -16,6 +28,7 @@
 
 import array
 import base64
+import collections
 import hashlib
 import itertools
 import os
@@ -318,11 +331,12 @@
     # Reset state; these will be assigned when the manifest is checked.
     self.sigs_offset = 0
     self.sigs_size = 0
-    self.old_rootfs_fs_size = 0
-    self.old_kernel_fs_size = 0
-    self.new_rootfs_fs_size = 0
-    self.new_kernel_fs_size = 0
+    self.old_part_info = {}
+    self.new_part_info = {}
+    self.new_fs_sizes = collections.defaultdict(int)
+    self.old_fs_sizes = collections.defaultdict(int)
     self.minor_version = None
+    self.major_version = None
 
   @staticmethod
   def _CheckElem(msg, name, report, is_mandatory, is_submsg, convert=str,
@@ -352,22 +366,56 @@
     Raises:
       error.PayloadError if a mandatory element is missing.
     """
+    element_result = collections.namedtuple('element_result', ['msg', 'report'])
+
     if not msg.HasField(name):
       if is_mandatory:
         raise error.PayloadError('%smissing mandatory %s %r.' %
                                  (msg_name + ' ' if msg_name else '',
                                   'sub-message' if is_submsg else 'field',
                                   name))
-      return None, None
+      return element_result(None, None)
 
     value = getattr(msg, name)
     if is_submsg:
-      return value, report and report.AddSubReport(name)
+      return element_result(value, report and report.AddSubReport(name))
     else:
       if report:
         report.AddField(name, convert(value), linebreak=linebreak,
                         indent=indent)
-      return value, None
+      return element_result(value, None)
+
+  @staticmethod
+  def _CheckRepeatedElemNotPresent(msg, field_name, msg_name):
+    """Checks that a repeated element is not specified in the message.
+
+    Args:
+      msg: The message containing the element.
+      field_name: The name of the element.
+      msg_name: The name of the message object (for error reporting).
+
+    Raises:
+      error.PayloadError if the repeated element is present or non-empty.
+    """
+    if getattr(msg, field_name, None):
+      raise error.PayloadError('%sfield %r not empty.' %
+                               (msg_name + ' ' if msg_name else '', field_name))
+
+  @staticmethod
+  def _CheckElemNotPresent(msg, field_name, msg_name):
+    """Checks that an element is not specified in the message.
+
+    Args:
+      msg: The message containing the element.
+      field_name: The name of the element.
+      msg_name: The name of the message object (for error reporting).
+
+    Raises:
+      error.PayloadError if the repeated element is present.
+    """
+    if msg.HasField(field_name):
+      raise error.PayloadError('%sfield %r exists.' %
+                               (msg_name + ' ' if msg_name else '', field_name))
 
   @staticmethod
   def _CheckMandatoryField(msg, field_name, report, msg_name, convert=str,
@@ -417,6 +465,22 @@
                                 ' in ' + obj_name if obj_name else ''))
 
   @staticmethod
+  def _CheckPresentIffMany(vals, name, obj_name):
+    """Checks that a set of vals and names imply every other element.
+
+    Args:
+      vals: The set of values to be compared.
+      name: The name of the objects holding the corresponding value.
+      obj_name: Name of the object containing these values.
+
+    Raises:
+      error.PayloadError if assertion does not hold.
+    """
+    if any(vals) and not all(vals):
+      raise error.PayloadError('%r is not present in all values%s.' %
+                               (name, ' in ' + obj_name if obj_name else ''))
+
+  @staticmethod
   def _Run(cmd, send_data=None):
     """Runs a subprocess, returns its output.
 
@@ -528,13 +592,12 @@
       raise error.PayloadError('Unsupported minor version: %d' %
                                self.minor_version)
 
-  def _CheckManifest(self, report, rootfs_part_size=0, kernel_part_size=0):
+  def _CheckManifest(self, report, part_sizes=None):
     """Checks the payload manifest.
 
     Args:
       report: A report object to add to.
-      rootfs_part_size: Size of the rootfs partition in bytes.
-      kernel_part_size: Size of the kernel partition in bytes.
+      part_sizes: Map of partition label to partition size in bytes.
 
     Returns:
       A tuple consisting of the partition block size used during the update
@@ -543,6 +606,9 @@
     Raises:
       error.PayloadError if any of the checks fail.
     """
+    self.major_version = self.payload.header.version
+
+    part_sizes = collections.defaultdict(int, part_sizes)
     manifest = self.payload.manifest
     report.AddSection('manifest')
 
@@ -561,39 +627,57 @@
     self._CheckPresentIff(self.sigs_offset, self.sigs_size,
                           'signatures_offset', 'signatures_size', 'manifest')
 
-    # Check: old_kernel_info <==> old_rootfs_info.
-    oki_msg, oki_report = self._CheckOptionalSubMsg(manifest,
-                                                    'old_kernel_info', report)
-    ori_msg, ori_report = self._CheckOptionalSubMsg(manifest,
-                                                    'old_rootfs_info', report)
-    self._CheckPresentIff(oki_msg, ori_msg, 'old_kernel_info',
-                          'old_rootfs_info', 'manifest')
-    if oki_msg:  # equivalently, ori_msg
+    if self.major_version == 1:
+      for real_name, proto_name in common.CROS_PARTITIONS:
+        self.old_part_info[real_name] = self._CheckOptionalSubMsg(
+            manifest, 'old_%s_info' % proto_name, report)
+        self.new_part_info[real_name] = self._CheckMandatorySubMsg(
+            manifest, 'new_%s_info' % proto_name, report, 'manifest')
+
+      # Check: old_kernel_info <==> old_rootfs_info.
+      self._CheckPresentIff(self.old_part_info[common.KERNEL].msg,
+                            self.old_part_info[common.ROOTFS].msg,
+                            'old_kernel_info', 'old_rootfs_info', 'manifest')
+    else:
+      for part in manifest.partitions:
+        name = part.partition_name
+        self.old_part_info[name] = self._CheckOptionalSubMsg(
+            part, 'old_partition_info', report)
+        self.new_part_info[name] = self._CheckMandatorySubMsg(
+            part, 'new_partition_info', report, 'manifest.partitions')
+
+      # Check: Old-style partition infos should not be specified.
+      for _, part in common.CROS_PARTITIONS:
+        self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest')
+        self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest')
+
+      # Check: If old_partition_info is specified anywhere, it must be
+      # specified everywhere.
+      old_part_msgs = [part.msg for part in self.old_part_info.values() if part]
+      self._CheckPresentIffMany(old_part_msgs, 'old_partition_info',
+                                'manifest.partitions')
+
+    is_delta = any(part and part.msg for part in self.old_part_info.values())
+    if is_delta:
       # Assert/mark delta payload.
       if self.payload_type == _TYPE_FULL:
         raise error.PayloadError(
             'Apparent full payload contains old_{kernel,rootfs}_info.')
       self.payload_type = _TYPE_DELTA
 
-      # Check: {size, hash} present in old_{kernel,rootfs}_info.
-      self.old_kernel_fs_size = self._CheckMandatoryField(
-          oki_msg, 'size', oki_report, 'old_kernel_info')
-      self._CheckMandatoryField(oki_msg, 'hash', oki_report, 'old_kernel_info',
-                                convert=common.FormatSha256)
-      self.old_rootfs_fs_size = self._CheckMandatoryField(
-          ori_msg, 'size', ori_report, 'old_rootfs_info')
-      self._CheckMandatoryField(ori_msg, 'hash', ori_report, 'old_rootfs_info',
-                                convert=common.FormatSha256)
+      for part, (msg, part_report) in self.old_part_info.iteritems():
+        # Check: {size, hash} present in old_{kernel,rootfs}_info.
+        field = 'old_%s_info' % part
+        self.old_fs_sizes[part] = self._CheckMandatoryField(msg, 'size',
+                                                            part_report, field)
+        self._CheckMandatoryField(msg, 'hash', part_report, field,
+                                  convert=common.FormatSha256)
 
-      # Check: old_{kernel,rootfs} size must fit in respective partition.
-      if kernel_part_size and self.old_kernel_fs_size > kernel_part_size:
-        raise error.PayloadError(
-            'Old kernel content (%d) exceed partition size (%d).' %
-            (self.old_kernel_fs_size, kernel_part_size))
-      if rootfs_part_size and self.old_rootfs_fs_size > rootfs_part_size:
-        raise error.PayloadError(
-            'Old rootfs content (%d) exceed partition size (%d).' %
-            (self.old_rootfs_fs_size, rootfs_part_size))
+        # Check: old_{kernel,rootfs} size must fit in respective partition.
+        if self.old_fs_sizes[part] > part_sizes[part] > 0:
+          raise error.PayloadError(
+              'Old %s content (%d) exceed partition size (%d).' %
+              (part, self.old_fs_sizes[part], part_sizes[part]))
     else:
       # Assert/mark full payload.
       if self.payload_type == _TYPE_DELTA:
@@ -601,31 +685,19 @@
             'Apparent delta payload missing old_{kernel,rootfs}_info.')
       self.payload_type = _TYPE_FULL
 
-    # Check: new_kernel_info present; contains {size, hash}.
-    nki_msg, nki_report = self._CheckMandatorySubMsg(
-        manifest, 'new_kernel_info', report, 'manifest')
-    self.new_kernel_fs_size = self._CheckMandatoryField(
-        nki_msg, 'size', nki_report, 'new_kernel_info')
-    self._CheckMandatoryField(nki_msg, 'hash', nki_report, 'new_kernel_info',
-                              convert=common.FormatSha256)
+    # Check: new_{kernel,rootfs}_info present; contains {size, hash}.
+    for part, (msg, part_report) in self.new_part_info.iteritems():
+      field = 'new_%s_info' % part
+      self.new_fs_sizes[part] = self._CheckMandatoryField(msg, 'size',
+                                                          part_report, field)
+      self._CheckMandatoryField(msg, 'hash', part_report, field,
+                                convert=common.FormatSha256)
 
-    # Check: new_rootfs_info present; contains {size, hash}.
-    nri_msg, nri_report = self._CheckMandatorySubMsg(
-        manifest, 'new_rootfs_info', report, 'manifest')
-    self.new_rootfs_fs_size = self._CheckMandatoryField(
-        nri_msg, 'size', nri_report, 'new_rootfs_info')
-    self._CheckMandatoryField(nri_msg, 'hash', nri_report, 'new_rootfs_info',
-                              convert=common.FormatSha256)
-
-    # Check: new_{kernel,rootfs} size must fit in respective partition.
-    if kernel_part_size and self.new_kernel_fs_size > kernel_part_size:
-      raise error.PayloadError(
-          'New kernel content (%d) exceed partition size (%d).' %
-          (self.new_kernel_fs_size, kernel_part_size))
-    if rootfs_part_size and self.new_rootfs_fs_size > rootfs_part_size:
-      raise error.PayloadError(
-          'New rootfs content (%d) exceed partition size (%d).' %
-          (self.new_rootfs_fs_size, rootfs_part_size))
+      # Check: new_{kernel,rootfs} size must fit in respective partition.
+      if self.new_fs_sizes[part] > part_sizes[part] > 0:
+        raise error.PayloadError(
+            'New %s content (%d) exceed partition size (%d).' %
+            (part, self.new_fs_sizes[part], part_sizes[part]))
 
     # Check: minor_version makes sense for the payload type. This check should
     # run after the payload type has been set.
@@ -702,7 +774,7 @@
     return total_num_blocks
 
   def _CheckReplaceOperation(self, op, data_length, total_dst_blocks, op_name):
-    """Specific checks for REPLACE/REPLACE_BZ operations.
+    """Specific checks for REPLACE/REPLACE_BZ/REPLACE_XZ operations.
 
     Args:
       op: The operation object from the manifest.
@@ -726,7 +798,7 @@
                                            self.block_size,
                                            op_name + '.data_length', 'dst')
     else:
-      # Check: data_length must be smaller than the alotted dst blocks.
+      # Check: data_length must be smaller than the allotted dst blocks.
       if data_length >= total_dst_blocks * self.block_size:
         raise error.PayloadError(
             '%s: data_length (%d) must be less than allotted dst block '
@@ -851,7 +923,7 @@
     if data_length is None:
       raise error.PayloadError('%s: missing data_{offset,length}.' % op_name)
 
-    # Check: data_length is strictly smaller than the alotted dst blocks.
+    # Check: data_length is strictly smaller than the allotted dst blocks.
     if data_length >= total_dst_blocks * self.block_size:
       raise error.PayloadError(
           '%s: data_length (%d) must be smaller than allotted dst space '
@@ -997,6 +1069,9 @@
     # Type-specific checks.
     if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
       self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
+    elif op.type == common.OpType.REPLACE_XZ and (self.minor_version >= 3 or
+                                                  self.major_version >= 2):
+      self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
     elif op.type == common.OpType.MOVE and self.minor_version == 1:
       self._CheckMoveOperation(op, data_offset, total_src_blocks,
                                total_dst_blocks, op_name)
@@ -1071,6 +1146,7 @@
     op_counts = {
         common.OpType.REPLACE: 0,
         common.OpType.REPLACE_BZ: 0,
+        common.OpType.REPLACE_XZ: 0,
         common.OpType.MOVE: 0,
         common.OpType.ZERO: 0,
         common.OpType.BSDIFF: 0,
@@ -1083,6 +1159,7 @@
     op_blob_totals = {
         common.OpType.REPLACE: 0,
         common.OpType.REPLACE_BZ: 0,
+        common.OpType.REPLACE_XZ: 0,
         # MOVE operations don't have blobs.
         common.OpType.BSDIFF: 0,
         # SOURCE_COPY operations don't have blobs.
@@ -1210,17 +1287,16 @@
         raise error.PayloadError('Unknown signature version (%d).' %
                                  sig.version)
 
-  def Run(self, pubkey_file_name=None, metadata_sig_file=None,
-          rootfs_part_size=0, kernel_part_size=0, report_out_file=None):
+  def Run(self, pubkey_file_name=None, metadata_sig_file=None, metadata_size=0,
+          part_sizes=None, report_out_file=None):
     """Checker entry point, invoking all checks.
 
     Args:
       pubkey_file_name: Public key used for signature verification.
       metadata_sig_file: Metadata signature, if verification is desired.
-      rootfs_part_size: The size of rootfs partitions in bytes (default: infer
-                        based on payload type and version).
-      kernel_part_size: The size of kernel partitions in bytes (default: use
-                        reported filesystem size).
+      metadata_size: Metadata size, if verification is desired.
+      part_sizes: Mapping of partition label to size in bytes (default: infer
+        based on payload type and version or filesystem).
       report_out_file: File object to dump the report to.
 
     Raises:
@@ -1237,6 +1313,12 @@
     self.payload.ResetFile()
 
     try:
+      # Check metadata_size (if provided).
+      if metadata_size and self.payload.data_offset != metadata_size:
+        raise error.PayloadError('Invalid payload metadata size in payload(%d) '
+                                 'vs given(%d)' % (self.payload.data_offset,
+                                                   metadata_size))
+
       # Check metadata signature (if provided).
       if metadata_sig_file:
         metadata_sig = base64.b64decode(metadata_sig_file.read())
@@ -1247,52 +1329,64 @@
       # Part 1: Check the file header.
       report.AddSection('header')
       # Check: Payload version is valid.
-      if self.payload.header.version != 1:
+      if self.payload.header.version not in (1, 2):
         raise error.PayloadError('Unknown payload version (%d).' %
                                  self.payload.header.version)
       report.AddField('version', self.payload.header.version)
       report.AddField('manifest len', self.payload.header.manifest_len)
 
       # Part 2: Check the manifest.
-      self._CheckManifest(report, rootfs_part_size, kernel_part_size)
+      self._CheckManifest(report, part_sizes)
       assert self.payload_type, 'payload type should be known by now'
 
-      # Infer the usable partition size when validating rootfs operations:
-      # - If rootfs partition size was provided, use that.
-      # - Otherwise, if this is an older delta (minor version < 2), stick with
-      #   a known constant size. This is necessary because older deltas may
-      #   exceed the filesystem size when moving data blocks around.
-      # - Otherwise, use the encoded filesystem size.
-      new_rootfs_usable_size = self.new_rootfs_fs_size
-      old_rootfs_usable_size = self.old_rootfs_fs_size
-      if rootfs_part_size:
-        new_rootfs_usable_size = rootfs_part_size
-        old_rootfs_usable_size = rootfs_part_size
-      elif self.payload_type == _TYPE_DELTA and self.minor_version in (None, 1):
-        new_rootfs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
-        old_rootfs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
+      manifest = self.payload.manifest
 
-      # Part 3: Examine rootfs operations.
-      # TODO(garnold)(chromium:243559) only default to the filesystem size if
-      # no explicit size provided *and* the partition size is not embedded in
-      # the payload; see issue for more details.
-      report.AddSection('rootfs operations')
-      total_blob_size = self._CheckOperations(
-          self.payload.manifest.install_operations, report,
-          'install_operations', self.old_rootfs_fs_size,
-          self.new_rootfs_fs_size, old_rootfs_usable_size,
-          new_rootfs_usable_size, 0, False)
+      # Part 3: Examine partition operations.
+      install_operations = []
+      if self.major_version == 1:
+        # partitions field should not ever exist in major version 1 payloads
+        self._CheckRepeatedElemNotPresent(manifest, 'partitions', 'manifest')
 
-      # Part 4: Examine kernel operations.
-      # TODO(garnold)(chromium:243559) as above.
-      report.AddSection('kernel operations')
-      total_blob_size += self._CheckOperations(
-          self.payload.manifest.kernel_install_operations, report,
-          'kernel_install_operations', self.old_kernel_fs_size,
-          self.new_kernel_fs_size,
-          kernel_part_size if kernel_part_size else self.old_kernel_fs_size,
-          kernel_part_size if kernel_part_size else self.new_kernel_fs_size,
-          total_blob_size, True)
+        install_operations.append((common.ROOTFS, manifest.install_operations))
+        install_operations.append((common.KERNEL,
+                                   manifest.kernel_install_operations))
+
+      else:
+        self._CheckRepeatedElemNotPresent(manifest, 'install_operations',
+                                          'manifest')
+        self._CheckRepeatedElemNotPresent(manifest, 'kernel_install_operations',
+                                          'manifest')
+
+        for update in manifest.partitions:
+          install_operations.append((update.partition_name, update.operations))
+
+      total_blob_size = 0
+      for part, operations in install_operations:
+        report.AddSection('%s operations' % part)
+
+        new_fs_usable_size = self.new_fs_sizes[part]
+        old_fs_usable_size = self.old_fs_sizes[part]
+
+        if part_sizes.get(part, None):
+          new_fs_usable_size = old_fs_usable_size = part_sizes[part]
+        # Infer the usable partition size when validating rootfs operations:
+        # - If rootfs partition size was provided, use that.
+        # - Otherwise, if this is an older delta (minor version < 2), stick with
+        #   a known constant size. This is necessary because older deltas may
+        #   exceed the filesystem size when moving data blocks around.
+        # - Otherwise, use the encoded filesystem size.
+        elif self.payload_type == _TYPE_DELTA and part == common.ROOTFS and \
+            self.minor_version in (None, 1):
+          new_fs_usable_size = old_fs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
+
+        # TODO(garnold)(chromium:243559) only default to the filesystem size if
+        # no explicit size provided *and* the partition size is not embedded in
+        # the payload; see issue for more details.
+        total_blob_size += self._CheckOperations(
+            operations, report, '%s_install_operations' % part,
+            self.old_fs_sizes[part], self.new_fs_sizes[part],
+            old_fs_usable_size, new_fs_usable_size, total_blob_size,
+            self.major_version == 1 and part == common.KERNEL)
 
       # Check: Operations data reach the end of the payload file.
       used_payload_size = self.payload.data_offset + total_blob_size
@@ -1301,11 +1395,11 @@
             'Used payload size (%d) different from actual file size (%d).' %
             (used_payload_size, payload_file_size))
 
-      # Part 5: Handle payload signatures message.
+      # Part 4: Handle payload signatures message.
       if self.check_payload_sig and self.sigs_size:
         self._CheckSignatures(report, pubkey_file_name)
 
-      # Part 6: Summary.
+      # Part 5: Summary.
       report.AddSection('summary')
       report.AddField('update type', self.payload_type)
 
diff --git a/scripts/update_payload/checker_unittest.py b/scripts/update_payload/checker_unittest.py
index a21c2ba..98bf612 100755
--- a/scripts/update_payload/checker_unittest.py
+++ b/scripts/update_payload/checker_unittest.py
@@ -1,8 +1,19 @@
 #!/usr/bin/python2
 #
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Unit testing checker.py."""
 
@@ -474,13 +485,16 @@
                    fail_bad_nki or fail_bad_nri or fail_old_kernel_fs_size or
                    fail_old_rootfs_fs_size or fail_new_kernel_fs_size or
                    fail_new_rootfs_fs_size)
+    part_sizes = {
+        common.ROOTFS: rootfs_part_size,
+        common.KERNEL: kernel_part_size
+    }
+
     if should_fail:
       self.assertRaises(PayloadError, payload_checker._CheckManifest, report,
-                        rootfs_part_size, kernel_part_size)
+                        part_sizes)
     else:
-      self.assertIsNone(payload_checker._CheckManifest(report,
-                                                       rootfs_part_size,
-                                                       kernel_part_size))
+      self.assertIsNone(payload_checker._CheckManifest(report, part_sizes))
 
   def testCheckLength(self):
     """Tests _CheckLength()."""
@@ -620,6 +634,41 @@
         PayloadError, payload_checker._CheckReplaceOperation,
         op, data_length, (data_length + block_size - 1) / block_size, 'foo')
 
+  def testCheckReplaceXzOperation(self):
+    """Tests _CheckReplaceOperation() where op.type == REPLACE_XZ."""
+    payload_checker = checker.PayloadChecker(self.MockPayload())
+    block_size = payload_checker.block_size
+    data_length = block_size * 3
+
+    op = self.mox.CreateMock(
+        update_metadata_pb2.InstallOperation)
+    op.type = common.OpType.REPLACE_XZ
+
+    # Pass.
+    op.src_extents = []
+    self.assertIsNone(
+        payload_checker._CheckReplaceOperation(
+            op, data_length, (data_length + block_size - 1) / block_size + 5,
+            'foo'))
+
+    # Fail, src extents founds.
+    op.src_extents = ['bar']
+    self.assertRaises(
+        PayloadError, payload_checker._CheckReplaceOperation,
+        op, data_length, (data_length + block_size - 1) / block_size + 5, 'foo')
+
+    # Fail, missing data.
+    op.src_extents = []
+    self.assertRaises(
+        PayloadError, payload_checker._CheckReplaceOperation,
+        op, None, (data_length + block_size - 1) / block_size, 'foo')
+
+    # Fail, too few blocks to justify XZ.
+    op.src_extents = []
+    self.assertRaises(
+        PayloadError, payload_checker._CheckReplaceOperation,
+        op, data_length, (data_length + block_size - 1) / block_size, 'foo')
+
   def testCheckMoveOperation_Pass(self):
     """Tests _CheckMoveOperation(); pass case."""
     payload_checker = checker.PayloadChecker(self.MockPayload())
@@ -792,8 +841,8 @@
     """Parametric testing of _CheckOperation().
 
     Args:
-      op_type_name: 'REPLACE', 'REPLACE_BZ', 'MOVE', 'BSDIFF', 'SOURCE_COPY',
-        'SOURCE_BSDIFF', BROTLI_BSDIFF or 'PUFFDIFF'.
+      op_type_name: 'REPLACE', 'REPLACE_BZ', 'REPLACE_XZ', 'MOVE', 'BSDIFF',
+        'SOURCE_COPY', 'SOURCE_BSDIFF', BROTLI_BSDIFF or 'PUFFDIFF'.
       is_last: Whether we're testing the last operation in a sequence.
       allow_signature: Whether we're testing a signature-capable operation.
       allow_unhashed: Whether we're allowing to not hash the data.
@@ -842,12 +891,16 @@
                           self.NewExtentList((1, 16)))
         total_src_blocks = 16
 
+    # TODO(tbrindus): add major version 2 tests.
+    payload_checker.major_version = 1
     if op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
       payload_checker.minor_version = 0
     elif op_type in (common.OpType.MOVE, common.OpType.BSDIFF):
       payload_checker.minor_version = 2 if fail_bad_minor_version else 1
     elif op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF):
       payload_checker.minor_version = 1 if fail_bad_minor_version else 2
+    if op_type == common.OpType.REPLACE_XZ:
+      payload_checker.minor_version = 2 if fail_bad_minor_version else 3
     elif op_type in (common.OpType.ZERO, common.OpType.DISCARD,
                      common.OpType.BROTLI_BSDIFF):
       payload_checker.minor_version = 3 if fail_bad_minor_version else 4
@@ -1037,7 +1090,10 @@
     report = checker._PayloadReport()
 
     # We have to check the manifest first in order to set signature attributes.
-    payload_checker._CheckManifest(report, rootfs_part_size, kernel_part_size)
+    payload_checker._CheckManifest(report, {
+        common.ROOTFS: rootfs_part_size,
+        common.KERNEL: kernel_part_size
+    })
 
     should_fail = (fail_empty_sigs_blob or fail_missing_pseudo_op or
                    fail_mismatched_pseudo_op or fail_sig_missing_fields or
@@ -1079,8 +1135,8 @@
 
   def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided,
                 fail_wrong_payload_type, fail_invalid_block_size,
-                fail_mismatched_block_size, fail_excess_data,
-                fail_rootfs_part_size_exceeded,
+                fail_mismatched_metadata_size, fail_mismatched_block_size,
+                fail_excess_data, fail_rootfs_part_size_exceeded,
                 fail_kernel_part_size_exceeded):
     """Tests Run()."""
     # Generate a test payload. For this test, we generate a full update that
@@ -1130,6 +1186,11 @@
     else:
       use_block_size = block_size
 
+    # For the unittests 246 is the value that generated for the payload.
+    metadata_size = 246
+    if fail_mismatched_metadata_size:
+      metadata_size += 1
+
     kwargs = {
         'payload_gen_dargs': {
             'privkey_file_name': test_utils._PRIVKEY_FILE_NAME,
@@ -1146,11 +1207,15 @@
       payload_checker = _GetPayloadChecker(payload_gen.WriteToFileWithData,
                                            **kwargs)
 
-      kwargs = {'pubkey_file_name': test_utils._PUBKEY_FILE_NAME,
-                'rootfs_part_size': rootfs_part_size,
-                'kernel_part_size': kernel_part_size}
+      kwargs = {
+          'pubkey_file_name': test_utils._PUBKEY_FILE_NAME,
+          'metadata_size': metadata_size,
+          'part_sizes': {
+              common.KERNEL: kernel_part_size,
+              common.ROOTFS: rootfs_part_size}}
+
       should_fail = (fail_wrong_payload_type or fail_mismatched_block_size or
-                     fail_excess_data or
+                     fail_mismatched_metadata_size or fail_excess_data or
                      fail_rootfs_part_size_exceeded or
                      fail_kernel_part_size_exceeded)
       if should_fail:
@@ -1170,10 +1235,13 @@
   """Returns True iff the combination of arguments represents a valid test."""
   op_type = _OpTypeByName(op_type_name)
 
-  # REPLACE/REPLACE_BZ operations don't read data from src partition. They are
-  # compatible with all valid minor versions, so we don't need to check that.
-  if (op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ) and (
-      fail_src_extents or fail_src_length or fail_bad_minor_version)):
+  # REPLACE/REPLACE_BZ/REPLACE_XZ operations don't read data from src
+  # partition. They are compatible with all valid minor versions, so we don't
+  # need to check that.
+  if (op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
+                  common.OpType.REPLACE_XZ) and (fail_src_extents or
+                                                 fail_src_length or
+                                                 fail_bad_minor_version)):
     return False
 
   # MOVE and SOURCE_COPY operations don't carry data.
@@ -1259,8 +1327,8 @@
 
   # Add all _CheckOperation() test cases.
   AddParametricTests('CheckOperation',
-                     {'op_type_name': ('REPLACE', 'REPLACE_BZ', 'MOVE',
-                                       'BSDIFF', 'SOURCE_COPY',
+                     {'op_type_name': ('REPLACE', 'REPLACE_BZ', 'REPLACE_XZ',
+                                       'MOVE', 'BSDIFF', 'SOURCE_COPY',
                                        'SOURCE_BSDIFF', 'PUFFDIFF',
                                        'BROTLI_BSDIFF'),
                       'is_last': (True, False),
@@ -1302,6 +1370,7 @@
                       'kernel_part_size_provided': (True, False),
                       'fail_wrong_payload_type': (True, False),
                       'fail_invalid_block_size': (True, False),
+                      'fail_mismatched_metadata_size': (True, False),
                       'fail_mismatched_block_size': (True, False),
                       'fail_excess_data': (True, False),
                       'fail_rootfs_part_size_exceeded': (True, False),
diff --git a/scripts/update_payload/common.py b/scripts/update_payload/common.py
index ac05ccd..9061a75 100644
--- a/scripts/update_payload/common.py
+++ b/scripts/update_payload/common.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Utilities for update payload processing."""
 
@@ -30,6 +42,11 @@
 BROTLI_BSDIFF_MINOR_PAYLOAD_VERSION = 4
 PUFFDIFF_MINOR_PAYLOAD_VERSION = 5
 
+KERNEL = 'kernel'
+ROOTFS = 'root'
+# Tuple of (name in system, name in protobuf).
+CROS_PARTITIONS = ((KERNEL, KERNEL), (ROOTFS, 'rootfs'))
+
 #
 # Payload operation types.
 #
diff --git a/scripts/update_payload/error.py b/scripts/update_payload/error.py
index 8b9cadd..6f95433 100644
--- a/scripts/update_payload/error.py
+++ b/scripts/update_payload/error.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Payload handling errors."""
 
diff --git a/scripts/update_payload/format_utils.py b/scripts/update_payload/format_utils.py
index 2c3775c..6248ba9 100644
--- a/scripts/update_payload/format_utils.py
+++ b/scripts/update_payload/format_utils.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Various formatting functions."""
 
diff --git a/scripts/update_payload/format_utils_unittest.py b/scripts/update_payload/format_utils_unittest.py
index 7153f9e..42ea621 100755
--- a/scripts/update_payload/format_utils_unittest.py
+++ b/scripts/update_payload/format_utils_unittest.py
@@ -1,8 +1,19 @@
 #!/usr/bin/python2
 #
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Unit tests for format_utils.py."""
 
diff --git a/scripts/update_payload/histogram.py b/scripts/update_payload/histogram.py
index f72db61..1ac2ab5 100644
--- a/scripts/update_payload/histogram.py
+++ b/scripts/update_payload/histogram.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Histogram generation tools."""
 
diff --git a/scripts/update_payload/histogram_unittest.py b/scripts/update_payload/histogram_unittest.py
index 643bb32..e757dd0 100755
--- a/scripts/update_payload/histogram_unittest.py
+++ b/scripts/update_payload/histogram_unittest.py
@@ -1,8 +1,19 @@
 #!/usr/bin/python2
 #
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Unit tests for histogram.py."""
 
diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py
index 8d9a20e..2a0cb58 100644
--- a/scripts/update_payload/payload.py
+++ b/scripts/update_payload/payload.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Tools for reading, verifying and applying Chrome OS update payloads."""
 
@@ -10,7 +22,6 @@
 import struct
 
 from update_payload import applier
-from update_payload import block_tracer
 from update_payload import checker
 from update_payload import common
 from update_payload import update_metadata_pb2
@@ -262,19 +273,19 @@
     return not self.IsDelta()
 
   def Check(self, pubkey_file_name=None, metadata_sig_file=None,
-            report_out_file=None, assert_type=None, block_size=0,
-            rootfs_part_size=0, kernel_part_size=0, allow_unhashed=False,
+            metadata_size=0, report_out_file=None, assert_type=None,
+            block_size=0, part_sizes=None, allow_unhashed=False,
             disabled_tests=()):
     """Checks the payload integrity.
 
     Args:
       pubkey_file_name: public key used for signature verification
       metadata_sig_file: metadata signature, if verification is desired
+      metadata_size: metadata size, if verification is desired
       report_out_file: file object to dump the report to
       assert_type: assert that payload is either 'full' or 'delta'
       block_size: expected filesystem / payload block size
-      rootfs_part_size: the size of (physical) rootfs partitions in bytes
-      kernel_part_size: the size of (physical) kernel partitions in bytes
+      part_sizes: map of partition label to (physical) size in bytes
       allow_unhashed: allow unhashed operation blobs
       disabled_tests: list of tests to disable
 
@@ -289,20 +300,18 @@
         allow_unhashed=allow_unhashed, disabled_tests=disabled_tests)
     helper.Run(pubkey_file_name=pubkey_file_name,
                metadata_sig_file=metadata_sig_file,
-               rootfs_part_size=rootfs_part_size,
-               kernel_part_size=kernel_part_size,
+               metadata_size=metadata_size,
+               part_sizes=part_sizes,
                report_out_file=report_out_file)
 
-  def Apply(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
-            old_rootfs_part=None, bsdiff_in_place=True, bspatch_path=None,
-            puffpatch_path=None, truncate_to_expected_size=True):
+  def Apply(self, new_parts, old_parts=None, bsdiff_in_place=True,
+            bspatch_path=None, puffpatch_path=None,
+            truncate_to_expected_size=True):
     """Applies the update payload.
 
     Args:
-      new_kernel_part: name of dest kernel partition file
-      new_rootfs_part: name of dest rootfs partition file
-      old_kernel_part: name of source kernel partition file (optional)
-      old_rootfs_part: name of source rootfs partition file (optional)
+      new_parts: map of partition name to dest partition file
+      old_parts: map of partition name to partition file (optional)
       bsdiff_in_place: whether to perform BSDIFF operations in-place (optional)
       bspatch_path: path to the bspatch binary (optional)
       puffpatch_path: path to the puffpatch binary (optional)
@@ -320,26 +329,4 @@
         self, bsdiff_in_place=bsdiff_in_place, bspatch_path=bspatch_path,
         puffpatch_path=puffpatch_path,
         truncate_to_expected_size=truncate_to_expected_size)
-    helper.Run(new_kernel_part, new_rootfs_part,
-               old_kernel_part=old_kernel_part,
-               old_rootfs_part=old_rootfs_part)
-
-  def TraceBlock(self, block, skip, trace_out_file, is_kernel):
-    """Traces the origin(s) of a given dest partition block.
-
-    The tracing tries to find origins transitively, when possible (it currently
-    only works for move operations, where the mapping of src/dst is
-    one-to-one). It will dump a list of operations and source blocks
-    responsible for the data in the given dest block.
-
-    Args:
-      block: the block number whose origin to trace
-      skip: the number of first origin mappings to skip
-      trace_out_file: file object to dump the trace to
-      is_kernel: trace through kernel (True) or rootfs (False) operations
-    """
-    self._AssertInit()
-
-    # Create a short-lived payload block tracer object and run it.
-    helper = block_tracer.PayloadBlockTracer(self)
-    helper.Run(block, skip, trace_out_file, is_kernel)
+    helper.Run(new_parts, old_parts=old_parts)
diff --git a/scripts/update_payload/test_utils.py b/scripts/update_payload/test_utils.py
index 38712fb..1e2259d 100644
--- a/scripts/update_payload/test_utils.py
+++ b/scripts/update_payload/test_utils.py
@@ -1,6 +1,18 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 """Utilities for unit testing."""
 
@@ -276,7 +288,7 @@
 
     Args:
       is_kernel: whether this is a kernel (True) or rootfs (False) operation
-      op_type: one of REPLACE, REPLACE_BZ, MOVE or BSDIFF
+      op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ, MOVE or BSDIFF
       src_extents: list of (start, length) pairs indicating src block ranges
       src_length: size of the src data in bytes (needed for BSDIFF)
       dst_extents: list of (start, length) pairs indicating dst block ranges
diff --git a/tar_bunzip2.gypi b/tar_bunzip2.gypi
index 8c6614a..4d1be28 100644
--- a/tar_bunzip2.gypi
+++ b/tar_bunzip2.gypi
@@ -21,9 +21,6 @@
     {
       'rule_name': 'tar-bunzip2',
       'extension': 'bz2',
-      'inputs': [
-        '<(RULE_INPUT_PATH)',
-      ],
       'outputs': [
         # The .flag file is used to mark the timestamp of the file extraction
         # and re-run this action if a new .bz2 file is generated.
diff --git a/test_http_server.cc b/test_http_server.cc
index 93aa11c..cf15672 100644
--- a/test_http_server.cc
+++ b/test_http_server.cc
@@ -496,7 +496,7 @@
     CHECK_EQ(terms.size(), num_terms);
   }
 
-  inline string Get(const off_t index) const {
+  inline const string& Get(const off_t index) const {
     return terms[index];
   }
   inline const char *GetCStr(const off_t index) const {
diff --git a/update_attempter.cc b/update_attempter.cc
index 9cef154..63d8a61 100644
--- a/update_attempter.cc
+++ b/update_attempter.cc
@@ -20,7 +20,6 @@
 
 #include <algorithm>
 #include <memory>
-#include <set>
 #include <string>
 #include <utility>
 #include <vector>
@@ -59,7 +58,9 @@
 #include "update_engine/payload_state_interface.h"
 #include "update_engine/power_manager_interface.h"
 #include "update_engine/system_state.h"
+#include "update_engine/update_boot_flags_action.h"
 #include "update_engine/update_manager/policy.h"
+#include "update_engine/update_manager/policy_utils.h"
 #include "update_engine/update_manager/update_manager.h"
 #include "update_engine/update_status_utils.h"
 
@@ -72,8 +73,8 @@
 using chromeos_update_manager::EvalStatus;
 using chromeos_update_manager::Policy;
 using chromeos_update_manager::UpdateCheckParams;
-using std::set;
-using std::shared_ptr;
+using chromeos_update_manager::CalculateStagingCase;
+using chromeos_update_manager::StagingCase;
 using std::string;
 using std::vector;
 using update_engine::UpdateAttemptFlags;
@@ -101,8 +102,7 @@
 // to |action| (e.g., ErrorCode::kFilesystemVerifierError). If |code| is
 // not ErrorCode::kError, or the action is not matched, returns |code|
 // unchanged.
-ErrorCode GetErrorCodeForAction(AbstractAction* action,
-                                     ErrorCode code) {
+ErrorCode GetErrorCodeForAction(AbstractAction* action, ErrorCode code) {
   if (code != ErrorCode::kError)
     return code;
 
@@ -238,6 +238,7 @@
                              const string& omaha_url,
                              const string& target_channel,
                              const string& target_version_prefix,
+                             bool rollback_allowed,
                              bool obey_proxies,
                              bool interactive) {
   // This is normally called frequently enough so it's appropriate to use as a
@@ -246,10 +247,6 @@
   // timeout event.
   CheckAndReportDailyMetrics();
 
-  // Notify of the new update attempt, clearing prior interactive requests.
-  if (forced_update_pending_callback_.get())
-    forced_update_pending_callback_->Run(false, false);
-
   fake_update_success_ = false;
   if (status_ == UpdateStatus::UPDATED_NEED_REBOOT) {
     // Although we have applied an update, we still want to ping Omaha
@@ -276,6 +273,7 @@
                              omaha_url,
                              target_channel,
                              target_version_prefix,
+                             rollback_allowed,
                              obey_proxies,
                              interactive)) {
     return;
@@ -290,10 +288,7 @@
   // checks in the case where a response is not received.
   UpdateLastCheckedTime();
 
-  // Just in case we didn't update boot flags yet, make sure they're updated
-  // before any update processing starts.
-  start_action_processor_ = true;
-  UpdateBootFlags();
+  ScheduleProcessingStart();
 }
 
 void UpdateAttempter::RefreshDevicePolicy() {
@@ -351,6 +346,7 @@
                                             const string& omaha_url,
                                             const string& target_channel,
                                             const string& target_version_prefix,
+                                            bool rollback_allowed,
                                             bool obey_proxies,
                                             bool interactive) {
   http_response_code_ = 0;
@@ -359,10 +355,21 @@
   // Refresh the policy before computing all the update parameters.
   RefreshDevicePolicy();
 
+  // Check whether we need to clear the rollback-happened preference after
+  // policy is available again.
+  UpdateRollbackHappened();
+
   // Update the target version prefix.
   omaha_request_params_->set_target_version_prefix(target_version_prefix);
 
-  CalculateScatteringParams(interactive);
+  // Set whether rollback is allowed.
+  omaha_request_params_->set_rollback_allowed(rollback_allowed);
+
+  CalculateStagingParams(interactive);
+  // If staging_wait_time_ wasn't set, staging is off, use scattering instead.
+  if (staging_wait_time_.InSeconds() == 0) {
+    CalculateScatteringParams(interactive);
+  }
 
   CalculateP2PParams(interactive);
   if (payload_state->GetUsingP2PForDownloading() ||
@@ -407,6 +414,8 @@
 
   LOG(INFO) << "target_version_prefix = "
             << omaha_request_params_->target_version_prefix()
+            << ", rollback_allowed = "
+            << omaha_request_params_->rollback_allowed()
             << ", scatter_factor_in_seconds = "
             << utils::FormatSecs(scatter_factor_.InSeconds());
 
@@ -482,7 +491,8 @@
     if (omaha_request_params_->waiting_period().InSeconds() == 0) {
       // First case. Check if we have a suitable value to set for
       // the waiting period.
-      if (prefs_->GetInt64(kPrefsWallClockWaitPeriod, &wait_period_in_secs) &&
+      if (prefs_->GetInt64(kPrefsWallClockScatteringWaitPeriod,
+                           &wait_period_in_secs) &&
           wait_period_in_secs > 0 &&
           wait_period_in_secs <= scatter_factor_.InSeconds()) {
         // This means:
@@ -542,7 +552,7 @@
     omaha_request_params_->set_wall_clock_based_wait_enabled(false);
     omaha_request_params_->set_update_check_count_wait_enabled(false);
     omaha_request_params_->set_waiting_period(TimeDelta::FromSeconds(0));
-    prefs_->Delete(kPrefsWallClockWaitPeriod);
+    prefs_->Delete(kPrefsWallClockScatteringWaitPeriod);
     prefs_->Delete(kPrefsUpdateCheckCount);
     // Don't delete the UpdateFirstSeenAt file as we don't want manual checks
     // that result in no-updates (e.g. due to server side throttling) to
@@ -566,15 +576,43 @@
       omaha_request_params_->waiting_period());
 }
 
-void UpdateAttempter::BuildPostInstallActions(
-    InstallPlanAction* previous_action) {
-  shared_ptr<PostinstallRunnerAction> postinstall_runner_action(
-      new PostinstallRunnerAction(system_state_->boot_control(),
-                                  system_state_->hardware()));
-  postinstall_runner_action->set_delegate(this);
-  actions_.push_back(shared_ptr<AbstractAction>(postinstall_runner_action));
-  BondActions(previous_action,
-              postinstall_runner_action.get());
+void UpdateAttempter::CalculateStagingParams(bool interactive) {
+  bool oobe_complete = system_state_->hardware()->IsOOBEEnabled() &&
+                       system_state_->hardware()->IsOOBEComplete(nullptr);
+  auto device_policy = system_state_->device_policy();
+  StagingCase staging_case = StagingCase::kOff;
+  if (device_policy && !interactive && oobe_complete) {
+    staging_wait_time_ = omaha_request_params_->waiting_period();
+    staging_case = CalculateStagingCase(
+        device_policy, prefs_, &staging_wait_time_, &staging_schedule_);
+  }
+  switch (staging_case) {
+    case StagingCase::kOff:
+      // Staging is off, get rid of persisted value.
+      prefs_->Delete(kPrefsWallClockStagingWaitPeriod);
+      // Set |staging_wait_time_| to its default value so scattering can still
+      // be turned on
+      staging_wait_time_ = TimeDelta();
+      break;
+    // Let the cases fall through since they just add, and never remove, steps
+    // to turning staging on.
+    case StagingCase::kNoSavedValue:
+      prefs_->SetInt64(kPrefsWallClockStagingWaitPeriod,
+                       staging_wait_time_.InDays());
+    case StagingCase::kSetStagingFromPref:
+      omaha_request_params_->set_waiting_period(staging_wait_time_);
+    case StagingCase::kNoAction:
+      // Staging is on, enable wallclock based wait so that its values get used.
+      omaha_request_params_->set_wall_clock_based_wait_enabled(true);
+      // Use UpdateCheckCount if possible to prevent devices updating all at
+      // once.
+      omaha_request_params_->set_update_check_count_wait_enabled(
+          DecrementUpdateCheckCount());
+      // Scattering should not be turned on if staging is on, delete the
+      // existing scattering configuration.
+      prefs_->Delete(kPrefsWallClockScatteringWaitPeriod);
+      scatter_factor_ = TimeDelta();
+  }
 }
 
 void UpdateAttempter::BuildUpdateActions(bool interactive) {
@@ -582,82 +620,75 @@
   processor_->set_delegate(this);
 
   // Actions:
-  std::unique_ptr<LibcurlHttpFetcher> update_check_fetcher(
-      new LibcurlHttpFetcher(GetProxyResolver(), system_state_->hardware()));
+  auto update_check_fetcher = std::make_unique<LibcurlHttpFetcher>(
+      GetProxyResolver(), system_state_->hardware());
   update_check_fetcher->set_server_to_check(ServerToCheck::kUpdate);
   // Try harder to connect to the network, esp when not interactive.
   // See comment in libcurl_http_fetcher.cc.
   update_check_fetcher->set_no_network_max_retries(interactive ? 1 : 3);
-  shared_ptr<OmahaRequestAction> update_check_action(
-      new OmahaRequestAction(system_state_,
-                             nullptr,
-                             std::move(update_check_fetcher),
-                             false));
-  shared_ptr<OmahaResponseHandlerAction> response_handler_action(
-      new OmahaResponseHandlerAction(system_state_));
-
-  shared_ptr<OmahaRequestAction> download_started_action(new OmahaRequestAction(
+  auto update_check_action = std::make_unique<OmahaRequestAction>(
+      system_state_, nullptr, std::move(update_check_fetcher), false);
+  auto response_handler_action =
+      std::make_unique<OmahaResponseHandlerAction>(system_state_);
+  auto update_boot_flags_action =
+      std::make_unique<UpdateBootFlagsAction>(system_state_->boot_control());
+  auto download_started_action = std::make_unique<OmahaRequestAction>(
       system_state_,
       new OmahaEvent(OmahaEvent::kTypeUpdateDownloadStarted),
       std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
                                            system_state_->hardware()),
-      false));
+      false);
 
   LibcurlHttpFetcher* download_fetcher =
       new LibcurlHttpFetcher(GetProxyResolver(), system_state_->hardware());
   download_fetcher->set_server_to_check(ServerToCheck::kDownload);
   if (interactive)
     download_fetcher->set_max_retry_count(kDownloadMaxRetryCountInteractive);
-  shared_ptr<DownloadAction> download_action(
-      new DownloadAction(prefs_,
-                         system_state_->boot_control(),
-                         system_state_->hardware(),
-                         system_state_,
-                         download_fetcher,  // passes ownership
-                         interactive));
-  shared_ptr<OmahaRequestAction> download_finished_action(
-      new OmahaRequestAction(
-          system_state_,
-          new OmahaEvent(OmahaEvent::kTypeUpdateDownloadFinished),
-          std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
-                                               system_state_->hardware()),
-          false));
-  shared_ptr<FilesystemVerifierAction> filesystem_verifier_action(
-      new FilesystemVerifierAction());
-  shared_ptr<OmahaRequestAction> update_complete_action(
-      new OmahaRequestAction(system_state_,
-                             new OmahaEvent(OmahaEvent::kTypeUpdateComplete),
-                             std::make_unique<LibcurlHttpFetcher>(
-                                 GetProxyResolver(), system_state_->hardware()),
-                             false));
-
+  auto download_action =
+      std::make_unique<DownloadAction>(prefs_,
+                                       system_state_->boot_control(),
+                                       system_state_->hardware(),
+                                       system_state_,
+                                       download_fetcher,  // passes ownership
+                                       interactive);
   download_action->set_delegate(this);
-  response_handler_action_ = response_handler_action;
-  download_action_ = download_action;
 
-  actions_.push_back(shared_ptr<AbstractAction>(update_check_action));
-  actions_.push_back(shared_ptr<AbstractAction>(response_handler_action));
-  actions_.push_back(shared_ptr<AbstractAction>(download_started_action));
-  actions_.push_back(shared_ptr<AbstractAction>(download_action));
-  actions_.push_back(shared_ptr<AbstractAction>(download_finished_action));
-  actions_.push_back(shared_ptr<AbstractAction>(filesystem_verifier_action));
+  auto download_finished_action = std::make_unique<OmahaRequestAction>(
+      system_state_,
+      new OmahaEvent(OmahaEvent::kTypeUpdateDownloadFinished),
+      std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
+                                           system_state_->hardware()),
+      false);
+  auto filesystem_verifier_action =
+      std::make_unique<FilesystemVerifierAction>();
+  auto update_complete_action = std::make_unique<OmahaRequestAction>(
+      system_state_,
+      new OmahaEvent(OmahaEvent::kTypeUpdateComplete),
+      std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
+                                           system_state_->hardware()),
+      false);
+
+  auto postinstall_runner_action = std::make_unique<PostinstallRunnerAction>(
+      system_state_->boot_control(), system_state_->hardware());
+  postinstall_runner_action->set_delegate(this);
 
   // Bond them together. We have to use the leaf-types when calling
   // BondActions().
-  BondActions(update_check_action.get(),
-              response_handler_action.get());
-  BondActions(response_handler_action.get(),
-              download_action.get());
-  BondActions(download_action.get(),
-              filesystem_verifier_action.get());
-  BuildPostInstallActions(filesystem_verifier_action.get());
+  BondActions(update_check_action.get(), response_handler_action.get());
+  BondActions(response_handler_action.get(), download_action.get());
+  BondActions(download_action.get(), filesystem_verifier_action.get());
+  BondActions(filesystem_verifier_action.get(),
+              postinstall_runner_action.get());
 
-  actions_.push_back(shared_ptr<AbstractAction>(update_complete_action));
-
-  // Enqueue the actions
-  for (const shared_ptr<AbstractAction>& action : actions_) {
-    processor_->EnqueueAction(action.get());
-  }
+  processor_->EnqueueAction(std::move(update_check_action));
+  processor_->EnqueueAction(std::move(response_handler_action));
+  processor_->EnqueueAction(std::move(update_boot_flags_action));
+  processor_->EnqueueAction(std::move(download_started_action));
+  processor_->EnqueueAction(std::move(download_action));
+  processor_->EnqueueAction(std::move(download_finished_action));
+  processor_->EnqueueAction(std::move(filesystem_verifier_action));
+  processor_->EnqueueAction(std::move(postinstall_runner_action));
+  processor_->EnqueueAction(std::move(update_complete_action));
 }
 
 bool UpdateAttempter::Rollback(bool powerwash) {
@@ -688,39 +719,32 @@
   }
 
   LOG(INFO) << "Setting rollback options.";
-  InstallPlan install_plan;
-
-  install_plan.target_slot = GetRollbackSlot();
-  install_plan.source_slot = system_state_->boot_control()->GetCurrentSlot();
+  install_plan_.reset(new InstallPlan());
+  install_plan_->target_slot = GetRollbackSlot();
+  install_plan_->source_slot = system_state_->boot_control()->GetCurrentSlot();
 
   TEST_AND_RETURN_FALSE(
-      install_plan.LoadPartitionsFromSlots(system_state_->boot_control()));
-  install_plan.powerwash_required = powerwash;
+      install_plan_->LoadPartitionsFromSlots(system_state_->boot_control()));
+  install_plan_->powerwash_required = powerwash;
 
   LOG(INFO) << "Using this install plan:";
-  install_plan.Dump();
+  install_plan_->Dump();
 
-  shared_ptr<InstallPlanAction> install_plan_action(
-      new InstallPlanAction(install_plan));
-  actions_.push_back(shared_ptr<AbstractAction>(install_plan_action));
-
-  BuildPostInstallActions(install_plan_action.get());
-
-  // Enqueue the actions
-  for (const shared_ptr<AbstractAction>& action : actions_) {
-    processor_->EnqueueAction(action.get());
-  }
+  auto install_plan_action =
+      std::make_unique<InstallPlanAction>(*install_plan_);
+  auto postinstall_runner_action = std::make_unique<PostinstallRunnerAction>(
+      system_state_->boot_control(), system_state_->hardware());
+  postinstall_runner_action->set_delegate(this);
+  BondActions(install_plan_action.get(), postinstall_runner_action.get());
+  processor_->EnqueueAction(std::move(install_plan_action));
+  processor_->EnqueueAction(std::move(postinstall_runner_action));
 
   // Update the payload state for Rollback.
   system_state_->payload_state()->Rollback();
 
   SetStatusAndNotify(UpdateStatus::ATTEMPTING_ROLLBACK);
 
-  // Just in case we didn't update boot flags yet, make sure they're updated
-  // before any update processing starts. This also schedules the start of the
-  // actions we just posted.
-  start_action_processor_ = true;
-  UpdateBootFlags();
+  ScheduleProcessingStart();
   return true;
 }
 
@@ -813,11 +837,13 @@
 }
 
 bool UpdateAttempter::RebootIfNeeded() {
+#ifdef __ANDROID__
   if (status_ != UpdateStatus::UPDATED_NEED_REBOOT) {
     LOG(INFO) << "Reboot requested, but status is "
               << UpdateStatusToString(status_) << ", so not rebooting.";
     return false;
   }
+#endif  // __ANDROID__
 
   if (system_state_->power_manager()->RequestReboot())
     return true;
@@ -863,11 +889,10 @@
       return;
     }
 
-    LOG(INFO) << "Running "
-              << (params.is_interactive ? "interactive" : "periodic")
+    LOG(INFO) << "Running " << (params.interactive ? "interactive" : "periodic")
               << " update.";
 
-    if (!params.is_interactive) {
+    if (!params.interactive) {
       // Cache the update attempt flags that will be used by this update attempt
       // so that they can't be changed mid-way through.
       current_update_attempt_flags_ = update_attempt_flags_;
@@ -876,8 +901,13 @@
     LOG(INFO) << "Update attempt flags in use = 0x" << std::hex
               << current_update_attempt_flags_;
 
-    Update(forced_app_version_, forced_omaha_url_, params.target_channel,
-           params.target_version_prefix, false, params.is_interactive);
+    Update(forced_app_version_,
+           forced_omaha_url_,
+           params.target_channel,
+           params.target_version_prefix,
+           params.rollback_allowed,
+           /*obey_proxies=*/false,
+           params.interactive);
     // Always clear the forced app_version and omaha_url after an update attempt
     // so the next update uses the defaults.
     forced_app_version_.clear();
@@ -900,11 +930,23 @@
   last_checked_time_ = system_state_->clock()->GetWallclockTime().ToTimeT();
 }
 
+void UpdateAttempter::UpdateRollbackHappened() {
+  DCHECK(system_state_);
+  DCHECK(system_state_->payload_state());
+  DCHECK(policy_provider_);
+  if (system_state_->payload_state()->GetRollbackHappened() &&
+      (policy_provider_->device_policy_is_loaded() ||
+       policy_provider_->IsConsumerDevice())) {
+    // Rollback happened, but we already went through OOBE and policy is
+    // present or it's a consumer device.
+    system_state_->payload_state()->SetRollbackHappened(false);
+  }
+}
+
 // Delegate methods:
 void UpdateAttempter::ProcessingDone(const ActionProcessor* processor,
                                      ErrorCode code) {
   LOG(INFO) << "Processing Done.";
-  actions_.clear();
 
   // Reset cpu shares back to normal.
   cpu_limiter_.StopLimiter();
@@ -912,6 +954,10 @@
   // reset the state that's only valid for a single update pass
   current_update_attempt_flags_ = UpdateAttemptFlags::kNone;
 
+  if (forced_update_pending_callback_.get())
+    // Clear prior interactive requests once the processor is done.
+    forced_update_pending_callback_->Run(false, false);
+
   if (status_ == UpdateStatus::REPORTING_ERROR_EVENT) {
     LOG(INFO) << "Error event sent.";
 
@@ -926,6 +972,8 @@
         "so requesting reboot from user.";
   }
 
+  attempt_error_code_ = utils::GetBaseErrorCode(code);
+
   if (code == ErrorCode::kSuccess) {
     WriteUpdateCompletedMarker();
     prefs_->SetInt64(kPrefsDeltaUpdateFailures, 0);
@@ -946,26 +994,32 @@
     // way.
     prefs_->Delete(kPrefsUpdateCheckCount);
     system_state_->payload_state()->SetScatteringWaitPeriod(TimeDelta());
+    system_state_->payload_state()->SetStagingWaitPeriod(TimeDelta());
     prefs_->Delete(kPrefsUpdateFirstSeenAt);
 
     SetStatusAndNotify(UpdateStatus::UPDATED_NEED_REBOOT);
     ScheduleUpdates();
     LOG(INFO) << "Update successfully applied, waiting to reboot.";
 
-    // This pointer is null during rollback operations, and the stats
-    // don't make much sense then anyway.
-    if (response_handler_action_) {
-      const InstallPlan& install_plan =
-          response_handler_action_->install_plan();
-
+    // |install_plan_| is null during rollback operations, and the stats don't
+    // make much sense then anyway.
+    if (install_plan_) {
       // Generate an unique payload identifier.
       string target_version_uid;
-      for (const auto& payload : install_plan.payloads) {
+      for (const auto& payload : install_plan_->payloads) {
         target_version_uid +=
             brillo::data_encoding::Base64Encode(payload.hash) + ":" +
             payload.metadata_signature + ":";
       }
 
+      // If we just downloaded a rollback image, we should preserve this fact
+      // over the following powerwash.
+      if (install_plan_->is_rollback) {
+        system_state_->payload_state()->SetRollbackHappened(true);
+        system_state_->metrics_reporter()->ReportEnterpriseRollbackMetrics(
+            /*success=*/true, install_plan_->version);
+      }
+
       // Expect to reboot into the new version to send the proper metric during
       // next boot.
       system_state_->payload_state()->ExpectRebootInNewVersion(
@@ -974,8 +1028,7 @@
       // If we just finished a rollback, then we expect to have no Omaha
       // response. Otherwise, it's an error.
       if (system_state_->payload_state()->GetRollbackVersion().empty()) {
-        LOG(ERROR) << "Can't send metrics because expected "
-            "response_handler_action_ missing.";
+        LOG(ERROR) << "Can't send metrics because there was no Omaha response";
       }
     }
     return;
@@ -993,9 +1046,11 @@
   // Reset cpu shares back to normal.
   cpu_limiter_.StopLimiter();
   download_progress_ = 0.0;
+  if (forced_update_pending_callback_.get())
+    // Clear prior interactive requests once the processor is done.
+    forced_update_pending_callback_->Run(false, false);
   SetStatusAndNotify(UpdateStatus::IDLE);
   ScheduleUpdates();
-  actions_.clear();
   error_event_.reset(nullptr);
 }
 
@@ -1028,9 +1083,23 @@
         consecutive_failed_update_checks_ = 0;
       }
 
+      const OmahaResponse& omaha_response =
+          omaha_request_action->GetOutputObject();
       // Store the server-dictated poll interval, if any.
       server_dictated_poll_interval_ =
-          std::max(0, omaha_request_action->GetOutputObject().poll_interval);
+          std::max(0, omaha_response.poll_interval);
+
+      // This update is ignored by omaha request action because update over
+      // cellular connection is not allowed. Needs to ask for user's permissions
+      // to update.
+      if (code == ErrorCode::kOmahaUpdateIgnoredOverCellular) {
+        new_version_ = omaha_response.version;
+        new_payload_size_ = 0;
+        for (const auto& package : omaha_response.packages) {
+          new_payload_size_ += package.size;
+        }
+        SetStatusAndNotify(UpdateStatus::NEED_PERMISSION_TO_UPDATE);
+      }
     }
   } else if (type == OmahaResponseHandlerAction::StaticType()) {
     // Depending on the returned error code, note that an update is available.
@@ -1041,13 +1110,15 @@
       // callback is invoked. This avoids notifying the user that a download
       // has started in cases when the server and the client are unable to
       // initiate the download.
-      CHECK(action == response_handler_action_.get());
-      auto plan = response_handler_action_->install_plan();
+      auto omaha_response_handler_action =
+          static_cast<OmahaResponseHandlerAction*>(action);
+      install_plan_.reset(
+          new InstallPlan(omaha_response_handler_action->install_plan()));
       UpdateLastCheckedTime();
-      new_version_ = plan.version;
-      new_system_version_ = plan.system_version;
+      new_version_ = install_plan_->version;
+      new_system_version_ = install_plan_->system_version;
       new_payload_size_ = 0;
-      for (const auto& payload : plan.payloads)
+      for (const auto& payload : install_plan_->payloads)
         new_payload_size_ += payload.size;
       cpu_limiter_.StartLimiter();
       SetStatusAndNotify(UpdateStatus::UPDATE_AVAILABLE);
@@ -1058,12 +1129,28 @@
     // If the current state is at or past the download phase, count the failure
     // in case a switch to full update becomes necessary. Ignore network
     // transfer timeouts and failures.
-    if (status_ >= UpdateStatus::DOWNLOADING &&
-        code != ErrorCode::kDownloadTransferError) {
-      MarkDeltaUpdateFailure();
+    if (code != ErrorCode::kDownloadTransferError) {
+      switch (status_) {
+        case UpdateStatus::IDLE:
+        case UpdateStatus::CHECKING_FOR_UPDATE:
+        case UpdateStatus::UPDATE_AVAILABLE:
+        case UpdateStatus::NEED_PERMISSION_TO_UPDATE:
+          break;
+        case UpdateStatus::DOWNLOADING:
+        case UpdateStatus::VERIFYING:
+        case UpdateStatus::FINALIZING:
+        case UpdateStatus::UPDATED_NEED_REBOOT:
+        case UpdateStatus::REPORTING_ERROR_EVENT:
+        case UpdateStatus::ATTEMPTING_ROLLBACK:
+        case UpdateStatus::DISABLED:
+          MarkDeltaUpdateFailure();
+          break;
+      }
     }
-    // On failure, schedule an error event to be sent to Omaha.
-    CreatePendingErrorEvent(action, code);
+    if (code != ErrorCode::kNoUpdate) {
+      // On failure, schedule an error event to be sent to Omaha.
+      CreatePendingErrorEvent(action, code);
+    }
     return;
   }
   // Find out which action completed (successfully).
@@ -1176,38 +1263,6 @@
   return true;
 }
 
-void UpdateAttempter::UpdateBootFlags() {
-  if (update_boot_flags_running_) {
-    LOG(INFO) << "Update boot flags running, nothing to do.";
-    return;
-  }
-  if (updated_boot_flags_) {
-    LOG(INFO) << "Already updated boot flags. Skipping.";
-    if (start_action_processor_) {
-      ScheduleProcessingStart();
-    }
-    return;
-  }
-  // This is purely best effort. Failures should be logged by Subprocess. Run
-  // the script asynchronously to avoid blocking the event loop regardless of
-  // the script runtime.
-  update_boot_flags_running_ = true;
-  LOG(INFO) << "Marking booted slot as good.";
-  if (!system_state_->boot_control()->MarkBootSuccessfulAsync(Bind(
-          &UpdateAttempter::CompleteUpdateBootFlags, base::Unretained(this)))) {
-    LOG(ERROR) << "Failed to mark current boot as successful.";
-    CompleteUpdateBootFlags(false);
-  }
-}
-
-void UpdateAttempter::CompleteUpdateBootFlags(bool successful) {
-  update_boot_flags_running_ = false;
-  updated_boot_flags_ = true;
-  if (start_action_processor_) {
-    ScheduleProcessingStart();
-  }
-}
-
 void UpdateAttempter::BroadcastStatus() {
   UpdateEngineStatus broadcast_status;
   // Use common method for generating the current status.
@@ -1225,15 +1280,13 @@
   if (!system_state_->hardware()->IsNormalBootMode())
     flags |= static_cast<uint32_t>(ErrorCode::kDevModeFlag);
 
-  if (response_handler_action_.get() &&
-      response_handler_action_->install_plan().is_resume)
+  if (install_plan_ && install_plan_->is_resume)
     flags |= static_cast<uint32_t>(ErrorCode::kResumedFlag);
 
   if (!system_state_->hardware()->IsOfficialBuild())
     flags |= static_cast<uint32_t>(ErrorCode::kTestImageFlag);
 
-  if (omaha_request_params_->update_url() !=
-      constants::kOmahaDefaultProductionURL) {
+  if (!omaha_request_params_->IsUpdateUrlOfficial()) {
     flags |= static_cast<uint32_t>(ErrorCode::kTestOmahaUrlFlag);
   }
 
@@ -1263,22 +1316,12 @@
 
 void UpdateAttempter::CreatePendingErrorEvent(AbstractAction* action,
                                               ErrorCode code) {
-  if (error_event_.get()) {
+  if (error_event_.get() || status_ == UpdateStatus::REPORTING_ERROR_EVENT) {
     // This shouldn't really happen.
     LOG(WARNING) << "There's already an existing pending error event.";
     return;
   }
 
-  // For now assume that a generic Omaha response action failure means that
-  // there's no update so don't send an event. Also, double check that the
-  // failure has not occurred while sending an error event -- in which case
-  // don't schedule another. This shouldn't really happen but just in case...
-  if ((action->Type() == OmahaResponseHandlerAction::StaticType() &&
-       code == ErrorCode::kError) ||
-      status_ == UpdateStatus::REPORTING_ERROR_EVENT) {
-    return;
-  }
-
   // Classify the code to generate the appropriate result so that
   // the Borgmon charts show up the results correctly.
   // Do this before calling GetErrorCodeForAction which could potentially
@@ -1314,16 +1357,21 @@
   LOG(ERROR) << "Update failed.";
   system_state_->payload_state()->UpdateFailed(error_event_->error_code);
 
+  // Send metrics if it was a rollback.
+  if (install_plan_ && install_plan_->is_rollback) {
+    system_state_->metrics_reporter()->ReportEnterpriseRollbackMetrics(
+        /*success=*/false, install_plan_->version);
+  }
+
   // Send it to Omaha.
   LOG(INFO) << "Reporting the error event";
-  shared_ptr<OmahaRequestAction> error_event_action(
-      new OmahaRequestAction(system_state_,
-                             error_event_.release(),  // Pass ownership.
-                             std::make_unique<LibcurlHttpFetcher>(
-                                 GetProxyResolver(), system_state_->hardware()),
-                             false));
-  actions_.push_back(shared_ptr<AbstractAction>(error_event_action));
-  processor_->EnqueueAction(error_event_action.get());
+  auto error_event_action = std::make_unique<OmahaRequestAction>(
+      system_state_,
+      error_event_.release(),  // Pass ownership.
+      std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
+                                           system_state_->hardware()),
+      false);
+  processor_->EnqueueAction(std::move(error_event_action));
   SetStatusAndNotify(UpdateStatus::REPORTING_ERROR_EVENT);
   processor_->StartProcessing();
   return true;
@@ -1331,7 +1379,6 @@
 
 void UpdateAttempter::ScheduleProcessingStart() {
   LOG(INFO) << "Scheduling an action processor start.";
-  start_action_processor_ = false;
   MessageLoop::current()->PostTask(
       FROM_HERE,
       Bind([](ActionProcessor* processor) { processor->StartProcessing(); },
@@ -1361,15 +1408,14 @@
 
 void UpdateAttempter::PingOmaha() {
   if (!processor_->IsRunning()) {
-    shared_ptr<OmahaRequestAction> ping_action(new OmahaRequestAction(
+    auto ping_action = std::make_unique<OmahaRequestAction>(
         system_state_,
         nullptr,
         std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
                                              system_state_->hardware()),
-        true));
-    actions_.push_back(shared_ptr<OmahaRequestAction>(ping_action));
+        true);
     processor_->set_delegate(nullptr);
-    processor_->EnqueueAction(ping_action.get());
+    processor_->EnqueueAction(std::move(ping_action));
     // Call StartProcessing() synchronously here to avoid any race conditions
     // caused by multiple outstanding ping Omaha requests.  If we call
     // StartProcessing() asynchronously, the device can be suspended before we
@@ -1427,7 +1473,7 @@
 
     // Write out the new value of update_check_count_value.
     if (prefs_->SetInt64(kPrefsUpdateCheckCount, update_check_count_value)) {
-      // We successfully wrote out te new value, so enable the
+      // We successfully wrote out the new value, so enable the
       // update check based wait.
       LOG(INFO) << "New update check count = " << update_check_count_value;
       return true;
@@ -1537,7 +1583,7 @@
           waiting_for_scheduled_check_);
 }
 
-bool UpdateAttempter::IsAnyUpdateSourceAllowed() {
+bool UpdateAttempter::IsAnyUpdateSourceAllowed() const {
   // We allow updates from any source if either of these are true:
   //  * The device is running an unofficial (dev/test) image.
   //  * The debugd dev features are accessible (i.e. in devmode with no owner).
diff --git a/update_attempter.h b/update_attempter.h
index 76e93a2..d0beff6 100644
--- a/update_attempter.h
+++ b/update_attempter.h
@@ -36,14 +36,15 @@
 #include "update_engine/client_library/include/update_engine/update_status.h"
 #include "update_engine/common/action_processor.h"
 #include "update_engine/common/cpu_limiter.h"
+#include "update_engine/common/proxy_resolver.h"
 #include "update_engine/omaha_request_params.h"
 #include "update_engine/omaha_response_handler_action.h"
 #include "update_engine/payload_consumer/download_action.h"
 #include "update_engine/payload_consumer/postinstall_runner_action.h"
-#include "update_engine/proxy_resolver.h"
 #include "update_engine/service_observer_interface.h"
 #include "update_engine/system_state.h"
 #include "update_engine/update_manager/policy.h"
+#include "update_engine/update_manager/staging_utils.h"
 #include "update_engine/update_manager/update_manager.h"
 
 namespace policy {
@@ -52,8 +53,6 @@
 
 namespace chromeos_update_engine {
 
-class UpdateEngineAdaptor;
-
 class UpdateAttempter : public ActionProcessorDelegate,
                         public DownloadActionDelegate,
                         public CertificateChecker::Observer,
@@ -83,6 +82,7 @@
                       const std::string& omaha_url,
                       const std::string& target_channel,
                       const std::string& target_version_prefix,
+                      bool rollback_allowed,
                       bool obey_proxies,
                       bool interactive);
 
@@ -107,16 +107,6 @@
   // Returns the current status in the out param. Returns true on success.
   virtual bool GetStatus(update_engine::UpdateEngineStatus* out_status);
 
-  // Runs chromeos-setgoodkernel, whose responsibility it is to mark the
-  // currently booted partition has high priority/permanent/etc. The execution
-  // is asynchronous. On completion, the action processor may be started
-  // depending on the |start_action_processor_| field. Note that every update
-  // attempt goes through this method.
-  void UpdateBootFlags();
-
-  // Called when the boot flags have been updated.
-  void CompleteUpdateBootFlags(bool success);
-
   UpdateStatus status() const { return status_; }
 
   int http_response_code() const { return http_response_code_; }
@@ -132,7 +122,7 @@
   // Returns the update attempt flags that are in place for the current update
   // attempt.  These are cached at the start of an update attempt so that they
   // remain constant throughout the process.
-  virtual UpdateAttemptFlags GetCurrentUpdateAttemptFlags() {
+  virtual UpdateAttemptFlags GetCurrentUpdateAttemptFlags() const {
     return current_update_attempt_flags_;
   }
 
@@ -160,7 +150,7 @@
   BootControlInterface::Slot GetRollbackSlot() const;
 
   // Initiates a reboot if the current state is
-  // UPDATED_NEED_REBOOT. Returns true on sucess, false otherwise.
+  // UPDATED_NEED_REBOOT. Returns true on success, false otherwise.
   bool RebootIfNeeded();
 
   // DownloadActionDelegate methods:
@@ -177,9 +167,7 @@
   // Broadcasts the current status to all observers.
   void BroadcastStatus();
 
-  // Returns the special flags to be added to ErrorCode values based on the
-  // parameters used in the current update attempt.
-  uint32_t GetErrorCodeFlags();
+  ErrorCode GetAttemptErrorCode() const { return attempt_error_code_; }
 
   // Called at update_engine startup to do various house-keeping.
   void UpdateEngineStarted();
@@ -196,9 +184,9 @@
   virtual bool GetBootTimeAtUpdate(base::Time *out_boot_time);
 
   // Returns a version OS version that was being used before the last reboot,
-  // and if that reboot happended to be into an update (current version).
+  // and if that reboot happened to be into an update (current version).
   // This will return an empty string otherwise.
-  std::string const& GetPrevVersion() const { return prev_version_; }
+  const std::string& GetPrevVersion() const { return prev_version_; }
 
   // Returns the number of consecutive failed update checks.
   virtual unsigned int consecutive_failed_update_checks() const {
@@ -218,8 +206,7 @@
   // Note that only one callback can be set, so effectively at most one client
   // can be notified.
   virtual void set_forced_update_pending_callback(
-      base::Callback<void(bool, bool)>*  // NOLINT(readability/function)
-      callback) {
+      base::Callback<void(bool, bool)>* callback) {
     forced_update_pending_callback_.reset(callback);
   }
 
@@ -227,7 +214,7 @@
   // we want to restrict updates to known safe sources, but under certain
   // conditions it's useful to allow updating from anywhere (e.g. to allow
   // 'cros flash' to function properly).
-  virtual bool IsAnyUpdateSourceAllowed();
+  bool IsAnyUpdateSourceAllowed() const;
 
   // Add and remove a service observer.
   void AddObserver(ServiceObserverInterface* observer) {
@@ -245,9 +232,6 @@
   void ClearObservers() { service_observers_.clear(); }
 
  private:
-  // Update server URL for automated lab test.
-  static const char* const kTestUpdateUrl;
-
   // Friend declarations for testing purposes.
   friend class UpdateAttempterUnderTest;
   friend class UpdateAttempterTest;
@@ -264,13 +248,25 @@
   FRIEND_TEST(UpdateAttempterTest, MarkDeltaUpdateFailureTest);
   FRIEND_TEST(UpdateAttempterTest, PingOmahaTest);
   FRIEND_TEST(UpdateAttempterTest, ReportDailyMetrics);
+  FRIEND_TEST(UpdateAttempterTest, RollbackNotAllowed);
+  FRIEND_TEST(UpdateAttempterTest, RollbackAllowed);
+  FRIEND_TEST(UpdateAttempterTest, RollbackAllowedSetAndReset);
+  FRIEND_TEST(UpdateAttempterTest, RollbackMetricsNotRollbackFailure);
+  FRIEND_TEST(UpdateAttempterTest, RollbackMetricsNotRollbackSuccess);
+  FRIEND_TEST(UpdateAttempterTest, RollbackMetricsRollbackFailure);
+  FRIEND_TEST(UpdateAttempterTest, RollbackMetricsRollbackSuccess);
   FRIEND_TEST(UpdateAttempterTest, ScheduleErrorEventActionNoEventTest);
   FRIEND_TEST(UpdateAttempterTest, ScheduleErrorEventActionTest);
+  FRIEND_TEST(UpdateAttempterTest, SetRollbackHappenedNotRollback);
+  FRIEND_TEST(UpdateAttempterTest, SetRollbackHappenedRollback);
   FRIEND_TEST(UpdateAttempterTest, TargetVersionPrefixSetAndReset);
   FRIEND_TEST(UpdateAttempterTest, UpdateAttemptFlagsCachedAtUpdateStart);
   FRIEND_TEST(UpdateAttempterTest, UpdateDeferredByPolicyTest);
   FRIEND_TEST(UpdateAttempterTest, UpdateIsNotRunningWhenUpdateAvailable);
-  FRIEND_TEST(UpdateAttempterTest, UpdateTest);
+
+  // Returns the special flags to be added to ErrorCode values based on the
+  // parameters used in the current update attempt.
+  uint32_t GetErrorCodeFlags();
 
   // CertificateChecker::Observer method.
   // Report metrics about the certificate being checked.
@@ -313,12 +309,10 @@
 
   ProxyResolver* GetProxyResolver() {
 #if USE_CHROME_NETWORK_PROXY
-    return obeying_proxies_ ?
-        reinterpret_cast<ProxyResolver*>(&chrome_proxy_resolver_) :
-        reinterpret_cast<ProxyResolver*>(&direct_proxy_resolver_);
-#else
-    return &direct_proxy_resolver_;
+    if (obeying_proxies_)
+      return &chrome_proxy_resolver_;
 #endif  // USE_CHROME_NETWORK_PROXY
+    return &direct_proxy_resolver_;
   }
 
   // Sends a ping to Omaha.
@@ -330,11 +324,12 @@
 
   // Helper method of Update() to calculate the update-related parameters
   // from various sources and set the appropriate state. Please refer to
-  // Update() method for the meaning of the parametes.
+  // Update() method for the meaning of the parameters.
   bool CalculateUpdateParams(const std::string& app_version,
                              const std::string& omaha_url,
                              const std::string& target_channel,
                              const std::string& target_version_prefix,
+                             bool rollback_allowed,
                              bool obey_proxies,
                              bool interactive);
 
@@ -342,19 +337,13 @@
   // which type of scattering is enabled, etc.) and also updates/deletes
   // the corresponding prefs file used in scattering. Should be called
   // only after the device policy has been loaded and set in the system_state_.
-  void CalculateScatteringParams(bool is_interactive);
+  void CalculateScatteringParams(bool interactive);
 
   // Sets a random value for the waiting period to wait for before downloading
   // an update, if one available. This value will be upperbounded by the
   // scatter factor value specified from policy.
   void GenerateNewWaitingPeriod();
 
-  // Helper method of Update() and Rollback() to construct the sequence of
-  // actions to be performed for the postinstall.
-  // |previous_action| is the previous action to get
-  // bonded with the install_plan that gets passed to postinstall.
-  void BuildPostInstallActions(InstallPlanAction* previous_action);
-
   // Helper method of Update() to construct the sequence of actions to
   // be performed for an update check. Please refer to
   // Update() method for the meaning of the parameters.
@@ -396,15 +385,20 @@
   // Updates the time an update was last attempted to the current time.
   void UpdateLastCheckedTime();
 
+  // Checks whether we need to clear the rollback-happened preference after
+  // policy is available again.
+  void UpdateRollbackHappened();
+
   // Returns whether an update is currently running or scheduled.
   bool IsUpdateRunningOrScheduled();
 
+  void CalculateStagingParams(bool interactive);
+
   // Last status notification timestamp used for throttling. Use monotonic
   // TimeTicks to ensure that notifications are sent even if the system clock is
   // set back in the middle of an update.
   base::TimeTicks last_notify_time_;
 
-  std::vector<std::shared_ptr<AbstractAction>> actions_;
   std::unique_ptr<ActionProcessor> processor_;
 
   // External state of the system outside the update_engine process
@@ -417,11 +411,8 @@
   // The list of services observing changes in the updater.
   std::set<ServiceObserverInterface*> service_observers_;
 
-  // Pointer to the OmahaResponseHandlerAction in the actions_ vector.
-  std::shared_ptr<OmahaResponseHandlerAction> response_handler_action_;
-
-  // Pointer to the DownloadAction in the actions_ vector.
-  std::shared_ptr<DownloadAction> download_action_;
+  // The install plan.
+  std::unique_ptr<InstallPlan> install_plan_;
 
   // Pointer to the preferences store interface. This is just a cached
   // copy of system_state->prefs() because it's used in many methods and
@@ -437,6 +428,9 @@
   // HTTP server response code from the last HTTP request action.
   int http_response_code_ = 0;
 
+  // The attempt error code when the update attempt finished.
+  ErrorCode attempt_error_code_ = ErrorCode::kSuccess;
+
   // CPU limiter during the update.
   CPULimiter cpu_limiter_;
 
@@ -470,20 +464,6 @@
   ChromeBrowserProxyResolver chrome_proxy_resolver_;
 #endif  // USE_CHROME_NETWORK_PROXY
 
-  // Originally, both of these flags are false. Once UpdateBootFlags is called,
-  // |update_boot_flags_running_| is set to true. As soon as UpdateBootFlags
-  // completes its asynchronous run, |update_boot_flags_running_| is reset to
-  // false and |updated_boot_flags_| is set to true. From that point on there
-  // will be no more changes to these flags.
-  //
-  // True if UpdateBootFlags has completed.
-  bool updated_boot_flags_ = false;
-  // True if UpdateBootFlags is running.
-  bool update_boot_flags_running_ = false;
-
-  // True if the action processor needs to be started by the boot flag updater.
-  bool start_action_processor_ = false;
-
   // Used for fetching information about the device policy.
   std::unique_ptr<policy::PolicyProvider> policy_provider_;
 
@@ -514,6 +494,10 @@
   std::string forced_app_version_;
   std::string forced_omaha_url_;
 
+  // If this is not TimeDelta(), then that means staging is turned on.
+  base::TimeDelta staging_wait_time_;
+  chromeos_update_manager::StagingSchedule staging_schedule_;
+
   DISALLOW_COPY_AND_ASSIGN(UpdateAttempter);
 };
 
diff --git a/update_attempter_android.cc b/update_attempter_android.cc
index 04ccb18..f0de4cb 100644
--- a/update_attempter_android.cc
+++ b/update_attempter_android.cc
@@ -47,6 +47,7 @@
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/payload_metadata.h"
 #include "update_engine/payload_consumer/postinstall_runner_action.h"
+#include "update_engine/update_boot_flags_action.h"
 #include "update_engine/update_status_utils.h"
 
 #ifndef _UE_SIDELOAD
@@ -79,7 +80,7 @@
 
 // Log and set the error on the passed ErrorPtr.
 bool LogAndSetError(brillo::ErrorPtr* error,
-                    const tracked_objects::Location& location,
+                    const base::Location& location,
                     const string& reason) {
   brillo::Error::AddTo(error, location, kErrorDomain, kGenericError, reason);
   LOG(ERROR) << "Replying with failure: " << location.ToString() << ": "
@@ -138,7 +139,7 @@
     return LogAndSetError(
         error, FROM_HERE, "An update already applied, waiting for reboot");
   }
-  if (ongoing_update_) {
+  if (processor_->IsRunning()) {
     return LogAndSetError(
         error, FROM_HERE, "Already processing an update, cancel it first.");
   }
@@ -219,13 +220,24 @@
   // c) RUN_POST_INSTALL is set to 0.
   if (install_plan_.is_resume && prefs_->Exists(kPrefsPostInstallSucceeded)) {
     bool post_install_succeeded = false;
-    prefs_->GetBoolean(kPrefsPostInstallSucceeded, &post_install_succeeded);
-    if (post_install_succeeded) {
+    if (prefs_->GetBoolean(kPrefsPostInstallSucceeded,
+                           &post_install_succeeded) &&
+        post_install_succeeded) {
       install_plan_.run_post_install =
           GetHeaderAsBool(headers[kPayloadPropertyRunPostInstall], true);
     }
   }
 
+  // Skip writing verity if we're resuming and verity has already been written.
+  install_plan_.write_verity = true;
+  if (install_plan_.is_resume && prefs_->Exists(kPrefsVerityWritten)) {
+    bool verity_written = false;
+    if (prefs_->GetBoolean(kPrefsVerityWritten, &verity_written) &&
+        verity_written) {
+      install_plan_.write_verity = false;
+    }
+  }
+
   NetworkId network_id = kDefaultNetworkId;
   if (!headers[kPayloadPropertyNetworkId].empty()) {
     if (!base::StringToUint64(headers[kPayloadPropertyNetworkId],
@@ -246,43 +258,53 @@
   LOG(INFO) << "Using this install plan:";
   install_plan_.Dump();
 
-  BuildUpdateActions(payload_url);
+  HttpFetcher* fetcher = nullptr;
+  if (FileFetcher::SupportedUrl(payload_url)) {
+    DLOG(INFO) << "Using FileFetcher for file URL.";
+    fetcher = new FileFetcher();
+  } else {
+#ifdef _UE_SIDELOAD
+    LOG(FATAL) << "Unsupported sideload URI: " << payload_url;
+#else
+    LibcurlHttpFetcher* libcurl_fetcher =
+        new LibcurlHttpFetcher(&proxy_resolver_, hardware_);
+    libcurl_fetcher->set_server_to_check(ServerToCheck::kDownload);
+    fetcher = libcurl_fetcher;
+#endif  // _UE_SIDELOAD
+  }
   // Setup extra headers.
-  HttpFetcher* fetcher = download_action_->http_fetcher();
   if (!headers[kPayloadPropertyAuthorization].empty())
     fetcher->SetHeader("Authorization", headers[kPayloadPropertyAuthorization]);
   if (!headers[kPayloadPropertyUserAgent].empty())
     fetcher->SetHeader("User-Agent", headers[kPayloadPropertyUserAgent]);
 
-  SetStatusAndNotify(UpdateStatus::UPDATE_AVAILABLE);
-  ongoing_update_ = true;
+  BuildUpdateActions(fetcher);
 
-  // Just in case we didn't update boot flags yet, make sure they're updated
-  // before any update processing starts. This will start the update process.
-  UpdateBootFlags();
+  SetStatusAndNotify(UpdateStatus::UPDATE_AVAILABLE);
 
   UpdatePrefsOnUpdateStart(install_plan_.is_resume);
   // TODO(xunchang) report the metrics for unresumable updates
 
+  ScheduleProcessingStart();
   return true;
 }
 
 bool UpdateAttempterAndroid::SuspendUpdate(brillo::ErrorPtr* error) {
-  if (!ongoing_update_)
+  if (!processor_->IsRunning())
     return LogAndSetError(error, FROM_HERE, "No ongoing update to suspend.");
   processor_->SuspendProcessing();
   return true;
 }
 
 bool UpdateAttempterAndroid::ResumeUpdate(brillo::ErrorPtr* error) {
-  if (!ongoing_update_)
+  if (!processor_->IsRunning())
     return LogAndSetError(error, FROM_HERE, "No ongoing update to resume.");
   processor_->ResumeProcessing();
   return true;
 }
 
 bool UpdateAttempterAndroid::CancelUpdate(brillo::ErrorPtr* error) {
-  if (!ongoing_update_)
+  if (!processor_->IsRunning())
     return LogAndSetError(error, FROM_HERE, "No ongoing update to cancel.");
   processor_->StopProcessing();
   return true;
@@ -349,8 +371,7 @@
   }
   ErrorCode errorcode;
   PayloadMetadata payload_metadata;
-  if (payload_metadata.ParsePayloadHeader(
-          metadata, kBrilloMajorPayloadVersion, &errorcode) !=
+  if (payload_metadata.ParsePayloadHeader(metadata, &errorcode) !=
       MetadataParseResult::kSuccess) {
     return LogAndSetError(error,
                           FROM_HERE,
@@ -485,6 +506,8 @@
   }
   if (type == DownloadAction::StaticType()) {
     SetStatusAndNotify(UpdateStatus::FINALIZING);
+  } else if (type == FilesystemVerifierAction::StaticType()) {
+    prefs_->SetBoolean(kPrefsVerityWritten, true);
   }
 }
 
@@ -534,27 +557,6 @@
   }
 }
 
-void UpdateAttempterAndroid::UpdateBootFlags() {
-  if (updated_boot_flags_) {
-    LOG(INFO) << "Already updated boot flags. Skipping.";
-    CompleteUpdateBootFlags(true);
-    return;
-  }
-  // This is purely best effort.
-  LOG(INFO) << "Marking booted slot as good.";
-  if (!boot_control_->MarkBootSuccessfulAsync(
-          Bind(&UpdateAttempterAndroid::CompleteUpdateBootFlags,
-               base::Unretained(this)))) {
-    LOG(ERROR) << "Failed to mark current boot as successful.";
-    CompleteUpdateBootFlags(false);
-  }
-}
-
-void UpdateAttempterAndroid::CompleteUpdateBootFlags(bool successful) {
-  updated_boot_flags_ = true;
-  ScheduleProcessingStart();
-}
-
 void UpdateAttempterAndroid::ScheduleProcessingStart() {
   LOG(INFO) << "Scheduling an action processor start.";
   brillo::MessageLoop::current()->PostTask(
@@ -569,13 +571,13 @@
     return;
   }
 
+  boot_control_->Cleanup();
+
   download_progress_ = 0;
-  actions_.clear();
   UpdateStatus new_status =
       (error_code == ErrorCode::kSuccess ? UpdateStatus::UPDATED_NEED_REBOOT
                                          : UpdateStatus::IDLE);
   SetStatusAndNotify(new_status);
-  ongoing_update_ = false;
 
   // The network id is only applicable to one download attempt and once it's
   // done the network id should not be re-used anymore.
@@ -609,51 +611,29 @@
   last_notify_time_ = TimeTicks::Now();
 }
 
-void UpdateAttempterAndroid::BuildUpdateActions(const string& url) {
+void UpdateAttempterAndroid::BuildUpdateActions(HttpFetcher* fetcher) {
   CHECK(!processor_->IsRunning());
   processor_->set_delegate(this);
 
   // Actions:
-  shared_ptr<InstallPlanAction> install_plan_action(
-      new InstallPlanAction(install_plan_));
-
-  HttpFetcher* download_fetcher = nullptr;
-  if (FileFetcher::SupportedUrl(url)) {
-    DLOG(INFO) << "Using FileFetcher for file URL.";
-    download_fetcher = new FileFetcher();
-  } else {
-#ifdef _UE_SIDELOAD
-    LOG(FATAL) << "Unsupported sideload URI: " << url;
-#else
-    LibcurlHttpFetcher* libcurl_fetcher =
-        new LibcurlHttpFetcher(&proxy_resolver_, hardware_);
-    libcurl_fetcher->set_server_to_check(ServerToCheck::kDownload);
-    download_fetcher = libcurl_fetcher;
-#endif  // _UE_SIDELOAD
-  }
-  shared_ptr<DownloadAction> download_action(
-      new DownloadAction(prefs_,
-                         boot_control_,
-                         hardware_,
-                         nullptr,           // system_state, not used.
-                         download_fetcher,  // passes ownership
-                         true /* is_interactive */));
-  shared_ptr<FilesystemVerifierAction> filesystem_verifier_action(
-      new FilesystemVerifierAction());
-
-  shared_ptr<PostinstallRunnerAction> postinstall_runner_action(
-      new PostinstallRunnerAction(boot_control_, hardware_));
-
+  auto update_boot_flags_action =
+      std::make_unique<UpdateBootFlagsAction>(boot_control_);
+  auto install_plan_action = std::make_unique<InstallPlanAction>(install_plan_);
+  auto download_action =
+      std::make_unique<DownloadAction>(prefs_,
+                                       boot_control_,
+                                       hardware_,
+                                       nullptr,  // system_state, not used.
+                                       fetcher,  // passes ownership
+                                       true /* interactive */);
   download_action->set_delegate(this);
   download_action->set_base_offset(base_offset_);
-  download_action_ = download_action;
+  auto filesystem_verifier_action =
+      std::make_unique<FilesystemVerifierAction>();
+  auto postinstall_runner_action =
+      std::make_unique<PostinstallRunnerAction>(boot_control_, hardware_);
   postinstall_runner_action->set_delegate(this);
 
-  actions_.push_back(shared_ptr<AbstractAction>(install_plan_action));
-  actions_.push_back(shared_ptr<AbstractAction>(download_action));
-  actions_.push_back(shared_ptr<AbstractAction>(filesystem_verifier_action));
-  actions_.push_back(shared_ptr<AbstractAction>(postinstall_runner_action));
-
   // Bond them together. We have to use the leaf-types when calling
   // BondActions().
   BondActions(install_plan_action.get(), download_action.get());
@@ -661,9 +641,11 @@
   BondActions(filesystem_verifier_action.get(),
               postinstall_runner_action.get());
 
-  // Enqueue the actions.
-  for (const shared_ptr<AbstractAction>& action : actions_)
-    processor_->EnqueueAction(action.get());
+  processor_->EnqueueAction(std::move(update_boot_flags_action));
+  processor_->EnqueueAction(std::move(install_plan_action));
+  processor_->EnqueueAction(std::move(download_action));
+  processor_->EnqueueAction(std::move(filesystem_verifier_action));
+  processor_->EnqueueAction(std::move(postinstall_runner_action));
 }
 
 bool UpdateAttempterAndroid::WriteUpdateCompletedMarker() {
@@ -702,10 +684,12 @@
 
   metrics::AttemptResult attempt_result =
       metrics_utils::GetAttemptResult(error_code);
-  Time attempt_start_time = Time::FromInternalValue(
+  Time boot_time_start = Time::FromInternalValue(
+      metrics_utils::GetPersistedValue(kPrefsUpdateBootTimestampStart, prefs_));
+  Time monotonic_time_start = Time::FromInternalValue(
       metrics_utils::GetPersistedValue(kPrefsUpdateTimestampStart, prefs_));
-  TimeDelta duration = clock_->GetBootTime() - attempt_start_time;
-  TimeDelta duration_uptime = clock_->GetMonotonicTime() - attempt_start_time;
+  TimeDelta duration = clock_->GetBootTime() - boot_time_start;
+  TimeDelta duration_uptime = clock_->GetMonotonicTime() - monotonic_time_start;
 
   metrics_reporter_->ReportUpdateAttemptMetrics(
       nullptr,  // system_state
@@ -755,6 +739,7 @@
         num_bytes_downloaded,
         download_overhead_percentage,
         duration,
+        duration_uptime,
         static_cast<int>(reboot_count),
         0);  // url_switch_count
   }
@@ -823,8 +808,8 @@
         metrics_utils::GetPersistedValue(kPrefsPayloadAttemptNumber, prefs_);
     metrics_utils::SetPayloadAttemptNumber(attempt_number + 1, prefs_);
   }
-  Time update_start_time = clock_->GetMonotonicTime();
-  metrics_utils::SetUpdateTimestampStart(update_start_time, prefs_);
+  metrics_utils::SetUpdateTimestampStart(clock_->GetMonotonicTime(), prefs_);
+  metrics_utils::SetUpdateBootTimestampStart(clock_->GetBootTime(), prefs_);
 }
 
 void UpdateAttempterAndroid::ClearMetricsPrefs() {
@@ -834,6 +819,7 @@
   prefs_->Delete(kPrefsPayloadAttemptNumber);
   prefs_->Delete(kPrefsSystemUpdatedMarker);
   prefs_->Delete(kPrefsUpdateTimestampStart);
+  prefs_->Delete(kPrefsUpdateBootTimestampStart);
 }
 
 }  // namespace chromeos_update_engine
diff --git a/update_attempter_android.h b/update_attempter_android.h
index f00692e..0438c16 100644
--- a/update_attempter_android.h
+++ b/update_attempter_android.h
@@ -93,14 +93,6 @@
  private:
   friend class UpdateAttempterAndroidTest;
 
-  // Asynchronously marks the current slot as successful if needed. If already
-  // marked as good, CompleteUpdateBootFlags() is called starting the action
-  // processor.
-  void UpdateBootFlags();
-
-  // Called when the boot flags have been updated.
-  void CompleteUpdateBootFlags(bool success);
-
   // Schedules an event loop callback to start the action processor. This is
   // scheduled asynchronously to unblock the event loop.
   void ScheduleProcessingStart();
@@ -114,8 +106,9 @@
   void SetStatusAndNotify(UpdateStatus status);
 
   // Helper method to construct the sequence of actions to be performed for
-  // applying an update from the given |url|.
-  void BuildUpdateActions(const std::string& url);
+  // applying an update using a given HttpFetcher. The ownership of |fetcher| is
+  // passed to this function.
+  void BuildUpdateActions(HttpFetcher* fetcher);
 
   // Writes to the processing completed marker. Does nothing if
   // |update_completed_marker_| is empty.
@@ -129,7 +122,10 @@
   // payload_id.
   // |KprefsNumReboots|: number of reboots when applying the current update.
   // |kPrefsSystemUpdatedMarker|: end timestamp of the last successful update.
-  // |kPrefsUpdateTimestampStart|: start timestamp of the current update.
+  // |kPrefsUpdateTimestampStart|: start timestamp in monotonic time of the
+  // current update.
+  // |kPrefsUpdateBootTimestampStart|: start timestamp in boot time of
+  // the current update.
   // |kPrefsCurrentBytesDownloaded|: number of bytes downloaded for the current
   // payload_id.
   // |kPrefsTotalBytesDownloaded|: number of bytes downloaded in total since
@@ -150,13 +146,14 @@
   void UpdatePrefsAndReportUpdateMetricsOnReboot();
 
   // Prefs to update:
-  //   |kPrefsPayloadAttemptNumber|, |kPrefsUpdateTimestampStart|
+  //   |kPrefsPayloadAttemptNumber|, |kPrefsUpdateTimestampStart|,
+  //   |kPrefsUpdateBootTimestampStart|
   void UpdatePrefsOnUpdateStart(bool is_resume);
 
   // Prefs to delete:
   //   |kPrefsNumReboots|, |kPrefsPayloadAttemptNumber|,
   //   |kPrefsSystemUpdatedMarker|, |kPrefsUpdateTimestampStart|,
-  //   |kPrefsCurrentBytesDownloaded|
+  //   |kPrefsUpdateBootTimestampStart|, |kPrefsCurrentBytesDownloaded|
   void ClearMetricsPrefs();
 
   DaemonStateInterface* daemon_state_;
@@ -171,19 +168,9 @@
   // set back in the middle of an update.
   base::TimeTicks last_notify_time_;
 
-  // The list of actions and action processor that runs them asynchronously.
-  // Only used when |ongoing_update_| is true.
-  std::vector<std::shared_ptr<AbstractAction>> actions_;
+  // The processor for running Actions.
   std::unique_ptr<ActionProcessor> processor_;
 
-  // Pointer to the DownloadAction in the actions_ vector.
-  std::shared_ptr<DownloadAction> download_action_;
-
-  // Whether there is an ongoing update. This implies that an update was started
-  // but not finished yet. This value will be true even if the update was
-  // suspended.
-  bool ongoing_update_{false};
-
   // The InstallPlan used during the ongoing update.
   InstallPlan install_plan_;
 
@@ -200,10 +187,6 @@
   // Helper class to select the network to use during the update.
   std::unique_ptr<NetworkSelectorInterface> network_selector_;
 
-  // Whether we have marked the current slot as good. This step is required
-  // before applying an update to the other slot.
-  bool updated_boot_flags_ = false;
-
   std::unique_ptr<ClockInterface> clock_;
 
   std::unique_ptr<MetricsReporterInterface> metrics_reporter_;
diff --git a/update_attempter_android_unittest.cc b/update_attempter_android_unittest.cc
index 94452df..2593d44 100644
--- a/update_attempter_android_unittest.cc
+++ b/update_attempter_android_unittest.cc
@@ -120,13 +120,14 @@
   prefs_.SetInt64(kPrefsNumReboots, 3);
   prefs_.SetInt64(kPrefsPayloadAttemptNumber, 2);
   prefs_.SetString(kPrefsPreviousVersion, "56789");
+  prefs_.SetInt64(kPrefsUpdateBootTimestampStart, 10000);
   prefs_.SetInt64(kPrefsUpdateTimestampStart, 12345);
 
   Time boot_time = Time::FromInternalValue(22345);
   Time up_time = Time::FromInternalValue(21345);
   clock_->SetBootTime(boot_time);
   clock_->SetMonotonicTime(up_time);
-  TimeDelta duration = boot_time - Time::FromInternalValue(12345);
+  TimeDelta duration = boot_time - Time::FromInternalValue(10000);
   TimeDelta duration_uptime = up_time - Time::FromInternalValue(12345);
   EXPECT_CALL(
       *metrics_reporter_,
@@ -140,7 +141,8 @@
                                  ErrorCode::kSuccess))
       .Times(1);
   EXPECT_CALL(*metrics_reporter_,
-              ReportSuccessfulUpdateMetrics(2, 0, _, _, _, _, duration, 3, _))
+              ReportSuccessfulUpdateMetrics(
+                  2, 0, _, _, _, _, duration, duration_uptime, 3, _))
       .Times(1);
 
   SetUpdateStatus(UpdateStatus::UPDATE_AVAILABLE);
@@ -181,10 +183,11 @@
                   125,
                   _,
                   _,
+                  _,
                   _))
       .Times(1);
 
-  // The first update fails after receving 50 bytes in total.
+  // The first update fails after receiving 50 bytes in total.
   update_attempter_android_.BytesReceived(30, 50, 200);
   update_attempter_android_.ProcessingDone(nullptr, ErrorCode::kError);
   EXPECT_EQ(
diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc
index 240e4ec..fb9f7bc 100644
--- a/update_attempter_unittest.cc
+++ b/update_attempter_unittest.cc
@@ -28,6 +28,7 @@
 #include <gtest/gtest.h>
 #include <policy/libpolicy.h>
 #include <policy/mock_device_policy.h>
+#include <policy/mock_libpolicy.h>
 
 #include "update_engine/common/fake_clock.h"
 #include "update_engine/common/fake_prefs.h"
@@ -47,11 +48,14 @@
 #include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/postinstall_runner_action.h"
+#include "update_engine/update_boot_flags_action.h"
 
 using base::Time;
 using base::TimeDelta;
 using chromeos_update_manager::EvalStatus;
+using chromeos_update_manager::StagingSchedule;
 using chromeos_update_manager::UpdateCheckParams;
+using policy::DevicePolicy;
 using std::string;
 using std::unique_ptr;
 using testing::_;
@@ -60,9 +64,11 @@
 using testing::InSequence;
 using testing::Ne;
 using testing::NiceMock;
+using testing::Pointee;
 using testing::Property;
 using testing::Return;
 using testing::ReturnPointee;
+using testing::ReturnRef;
 using testing::SaveArg;
 using testing::SetArgPointee;
 using update_engine::UpdateAttemptFlags;
@@ -71,6 +77,8 @@
 
 namespace chromeos_update_engine {
 
+const char kRollbackVersion[] = "10575.39.2";
+
 // Test a subclass rather than the main class directly so that we can mock out
 // methods within the class. There're explicit unit tests for the mocked out
 // methods.
@@ -165,6 +173,15 @@
   void P2PEnabledInteractiveStart();
   void P2PEnabledStartingFailsStart();
   void P2PEnabledHousekeepingFailsStart();
+  void ResetRollbackHappenedStart(bool is_consumer,
+                                  bool is_policy_available,
+                                  bool expected_reset);
+  // Staging related callbacks.
+  void SetUpStagingTest(const StagingSchedule& schedule, FakePrefs* prefs);
+  void CheckStagingOff();
+  void StagingSetsPrefsAndTurnsOffScatteringStart();
+  void StagingOffIfInteractiveStart();
+  void StagingOffIfOobeStart();
 
   bool actual_using_p2p_for_downloading() {
     return actual_using_p2p_for_downloading_;
@@ -204,7 +221,7 @@
                         nullptr,
                         nullptr,
                         fetcher.release(),
-                        false /* is_interactive */);
+                        false /* interactive */);
   EXPECT_CALL(*prefs_, GetInt64(kPrefsDeltaUpdateFailures, _)).Times(0);
   attempter_.ActionCompleted(nullptr, &action, ErrorCode::kSuccess);
   EXPECT_EQ(UpdateStatus::FINALIZING, attempter_.status());
@@ -418,8 +435,8 @@
 
 TEST_F(UpdateAttempterTest, ScheduleErrorEventActionTest) {
   EXPECT_CALL(*processor_,
-              EnqueueAction(Property(&AbstractAction::Type,
-                                     OmahaRequestAction::StaticType())));
+              EnqueueAction(Pointee(Property(
+                  &AbstractAction::Type, OmahaRequestAction::StaticType()))));
   EXPECT_CALL(*processor_, StartProcessing());
   ErrorCode err = ErrorCode::kError;
   EXPECT_CALL(*fake_system_state_.mock_payload_state(), UpdateFailed(err));
@@ -433,15 +450,15 @@
 namespace {
 // Actions that will be built as part of an update check.
 const string kUpdateActionTypes[] = {  // NOLINT(runtime/string)
-  OmahaRequestAction::StaticType(),
-  OmahaResponseHandlerAction::StaticType(),
-  OmahaRequestAction::StaticType(),
-  DownloadAction::StaticType(),
-  OmahaRequestAction::StaticType(),
-  FilesystemVerifierAction::StaticType(),
-  PostinstallRunnerAction::StaticType(),
-  OmahaRequestAction::StaticType()
-};
+    OmahaRequestAction::StaticType(),
+    OmahaResponseHandlerAction::StaticType(),
+    UpdateBootFlagsAction::StaticType(),
+    OmahaRequestAction::StaticType(),
+    DownloadAction::StaticType(),
+    OmahaRequestAction::StaticType(),
+    FilesystemVerifierAction::StaticType(),
+    PostinstallRunnerAction::StaticType(),
+    OmahaRequestAction::StaticType()};
 
 // Actions that will be built as part of a user-initiated rollback.
 const string kRollbackActionTypes[] = {  // NOLINT(runtime/string)
@@ -449,6 +466,9 @@
   PostinstallRunnerAction::StaticType(),
 };
 
+const StagingSchedule kValidStagingSchedule = {
+    {4, 10}, {10, 40}, {19, 70}, {26, 100}};
+
 }  // namespace
 
 void UpdateAttempterTest::UpdateTestStart() {
@@ -466,13 +486,13 @@
     InSequence s;
     for (size_t i = 0; i < arraysize(kUpdateActionTypes); ++i) {
       EXPECT_CALL(*processor_,
-                  EnqueueAction(Property(&AbstractAction::Type,
-                                         kUpdateActionTypes[i])));
+                  EnqueueAction(Pointee(
+                      Property(&AbstractAction::Type, kUpdateActionTypes[i]))));
     }
     EXPECT_CALL(*processor_, StartProcessing());
   }
 
-  attempter_.Update("", "", "", "", false, false);
+  attempter_.Update("", "", "", "", false, false, false);
   loop_.PostTask(FROM_HERE,
                  base::Bind(&UpdateAttempterTest::UpdateTestVerify,
                             base::Unretained(this)));
@@ -481,17 +501,6 @@
 void UpdateAttempterTest::UpdateTestVerify() {
   EXPECT_EQ(0, attempter_.http_response_code());
   EXPECT_EQ(&attempter_, processor_->delegate());
-  EXPECT_EQ(arraysize(kUpdateActionTypes), attempter_.actions_.size());
-  for (size_t i = 0; i < arraysize(kUpdateActionTypes); ++i) {
-    EXPECT_EQ(kUpdateActionTypes[i], attempter_.actions_[i]->Type());
-  }
-  EXPECT_EQ(attempter_.response_handler_action_.get(),
-            attempter_.actions_[1].get());
-  AbstractAction* action_3 = attempter_.actions_[3].get();
-  ASSERT_NE(nullptr, action_3);
-  ASSERT_EQ(DownloadAction::StaticType(), action_3->Type());
-  DownloadAction* download_action = static_cast<DownloadAction*>(action_3);
-  EXPECT_EQ(&attempter_, download_action->delegate());
   EXPECT_EQ(UpdateStatus::CHECKING_FOR_UPDATE, attempter_.status());
   loop_.BreakLoop();
 }
@@ -537,8 +546,8 @@
     InSequence s;
     for (size_t i = 0; i < arraysize(kRollbackActionTypes); ++i) {
       EXPECT_CALL(*processor_,
-                  EnqueueAction(Property(&AbstractAction::Type,
-                                         kRollbackActionTypes[i])));
+                  EnqueueAction(Pointee(Property(&AbstractAction::Type,
+                                                 kRollbackActionTypes[i]))));
     }
     EXPECT_CALL(*processor_, StartProcessing());
 
@@ -555,19 +564,9 @@
 void UpdateAttempterTest::RollbackTestVerify() {
   // Verifies the actions that were enqueued.
   EXPECT_EQ(&attempter_, processor_->delegate());
-  EXPECT_EQ(arraysize(kRollbackActionTypes), attempter_.actions_.size());
-  for (size_t i = 0; i < arraysize(kRollbackActionTypes); ++i) {
-    EXPECT_EQ(kRollbackActionTypes[i], attempter_.actions_[i]->Type());
-  }
   EXPECT_EQ(UpdateStatus::ATTEMPTING_ROLLBACK, attempter_.status());
-  AbstractAction* action_0 = attempter_.actions_[0].get();
-  ASSERT_NE(nullptr, action_0);
-  ASSERT_EQ(InstallPlanAction::StaticType(), action_0->Type());
-  InstallPlanAction* install_plan_action =
-      static_cast<InstallPlanAction*>(action_0);
-  InstallPlan* install_plan = install_plan_action->install_plan();
-  EXPECT_EQ(0U, install_plan->partitions.size());
-  EXPECT_EQ(install_plan->powerwash_required, true);
+  EXPECT_EQ(0U, attempter_.install_plan_->partitions.size());
+  EXPECT_EQ(attempter_.install_plan_->powerwash_required, true);
   loop_.BreakLoop();
 }
 
@@ -602,8 +601,8 @@
 
 void UpdateAttempterTest::PingOmahaTestStart() {
   EXPECT_CALL(*processor_,
-              EnqueueAction(Property(&AbstractAction::Type,
-                                     OmahaRequestAction::StaticType())));
+              EnqueueAction(Pointee(Property(
+                  &AbstractAction::Type, OmahaRequestAction::StaticType()))));
   EXPECT_CALL(*processor_, StartProcessing());
   attempter_.PingOmaha();
   ScheduleQuitMainLoop();
@@ -637,10 +636,8 @@
 }
 
 TEST_F(UpdateAttempterTest, CreatePendingErrorEventResumedTest) {
-  OmahaResponseHandlerAction *response_action =
-      new OmahaResponseHandlerAction(&fake_system_state_);
-  response_action->install_plan_.is_resume = true;
-  attempter_.response_handler_action_.reset(response_action);
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_resume = true;
   MockAction action;
   const ErrorCode kCode = ErrorCode::kInstallDeviceOpenError;
   attempter_.CreatePendingErrorEvent(&action, kCode);
@@ -694,7 +691,7 @@
   fake_system_state_.set_p2p_manager(&mock_p2p_manager);
   mock_p2p_manager.fake().SetP2PEnabled(false);
   EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0);
-  attempter_.Update("", "", "", "", false, false);
+  attempter_.Update("", "", "", "", false, false, false);
   EXPECT_FALSE(actual_using_p2p_for_downloading_);
   EXPECT_FALSE(actual_using_p2p_for_sharing());
   ScheduleQuitMainLoop();
@@ -716,7 +713,7 @@
   mock_p2p_manager.fake().SetEnsureP2PRunningResult(false);
   mock_p2p_manager.fake().SetPerformHousekeepingResult(false);
   EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0);
-  attempter_.Update("", "", "", "", false, false);
+  attempter_.Update("", "", "", "", false, false, false);
   EXPECT_FALSE(actual_using_p2p_for_downloading());
   EXPECT_FALSE(actual_using_p2p_for_sharing());
   ScheduleQuitMainLoop();
@@ -739,7 +736,7 @@
   mock_p2p_manager.fake().SetEnsureP2PRunningResult(true);
   mock_p2p_manager.fake().SetPerformHousekeepingResult(false);
   EXPECT_CALL(mock_p2p_manager, PerformHousekeeping());
-  attempter_.Update("", "", "", "", false, false);
+  attempter_.Update("", "", "", "", false, false, false);
   EXPECT_FALSE(actual_using_p2p_for_downloading());
   EXPECT_FALSE(actual_using_p2p_for_sharing());
   ScheduleQuitMainLoop();
@@ -761,7 +758,7 @@
   mock_p2p_manager.fake().SetEnsureP2PRunningResult(true);
   mock_p2p_manager.fake().SetPerformHousekeepingResult(true);
   EXPECT_CALL(mock_p2p_manager, PerformHousekeeping());
-  attempter_.Update("", "", "", "", false, false);
+  attempter_.Update("", "", "", "", false, false, false);
   EXPECT_TRUE(actual_using_p2p_for_downloading());
   EXPECT_TRUE(actual_using_p2p_for_sharing());
   ScheduleQuitMainLoop();
@@ -784,7 +781,13 @@
   mock_p2p_manager.fake().SetEnsureP2PRunningResult(true);
   mock_p2p_manager.fake().SetPerformHousekeepingResult(true);
   EXPECT_CALL(mock_p2p_manager, PerformHousekeeping());
-  attempter_.Update("", "", "", "", false, true /* interactive */);
+  attempter_.Update("",
+                    "",
+                    "",
+                    "",
+                    false,
+                    false,
+                    /*interactive=*/true);
   EXPECT_FALSE(actual_using_p2p_for_downloading());
   EXPECT_TRUE(actual_using_p2p_for_sharing());
   ScheduleQuitMainLoop();
@@ -815,7 +818,7 @@
   attempter_.policy_provider_.reset(
       new policy::PolicyProvider(std::move(device_policy)));
 
-  attempter_.Update("", "", "", "", false, false);
+  attempter_.Update("", "", "", "", false, false, false);
   EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
 
   ScheduleQuitMainLoop();
@@ -854,7 +857,7 @@
   attempter_.policy_provider_.reset(
       new policy::PolicyProvider(std::move(device_policy)));
 
-  attempter_.Update("", "", "", "", false, false);
+  attempter_.Update("", "", "", "", false, false, false);
   EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
 
   // Make sure the file still exists.
@@ -870,7 +873,7 @@
   // However, if the count is already 0, it's not decremented. Test that.
   initial_value = 0;
   EXPECT_TRUE(fake_prefs.SetInt64(kPrefsUpdateCheckCount, initial_value));
-  attempter_.Update("", "", "", "", false, false);
+  attempter_.Update("", "", "", "", false, false, false);
   EXPECT_TRUE(fake_prefs.Exists(kPrefsUpdateCheckCount));
   EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &new_value));
   EXPECT_EQ(initial_value, new_value);
@@ -895,7 +898,8 @@
   fake_system_state_.fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch());
   fake_system_state_.set_prefs(&fake_prefs);
 
-  EXPECT_TRUE(fake_prefs.SetInt64(kPrefsWallClockWaitPeriod, initial_value));
+  EXPECT_TRUE(
+      fake_prefs.SetInt64(kPrefsWallClockScatteringWaitPeriod, initial_value));
   EXPECT_TRUE(fake_prefs.SetInt64(kPrefsUpdateCheckCount, initial_value));
 
   // make sure scatter_factor is non-zero as scattering is disabled
@@ -915,14 +919,20 @@
       new policy::PolicyProvider(std::move(device_policy)));
 
   // Trigger an interactive check so we can test that scattering is disabled.
-  attempter_.Update("", "", "", "", false, true);
+  attempter_.Update("",
+                    "",
+                    "",
+                    "",
+                    false,
+                    false,
+                    /*interactive=*/true);
   EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
 
   // Make sure scattering is disabled for manual (i.e. user initiated) update
   // checks and all artifacts are removed.
   EXPECT_FALSE(
       attempter_.omaha_request_params_->wall_clock_based_wait_enabled());
-  EXPECT_FALSE(fake_prefs.Exists(kPrefsWallClockWaitPeriod));
+  EXPECT_FALSE(fake_prefs.Exists(kPrefsWallClockScatteringWaitPeriod));
   EXPECT_EQ(0, attempter_.omaha_request_params_->waiting_period().InSeconds());
   EXPECT_FALSE(
       attempter_.omaha_request_params_->update_check_count_wait_enabled());
@@ -931,6 +941,125 @@
   ScheduleQuitMainLoop();
 }
 
+void UpdateAttempterTest::SetUpStagingTest(const StagingSchedule& schedule,
+                                           FakePrefs* prefs) {
+  attempter_.prefs_ = prefs;
+  fake_system_state_.set_prefs(prefs);
+
+  int64_t initial_value = 8;
+  EXPECT_TRUE(
+      prefs->SetInt64(kPrefsWallClockScatteringWaitPeriod, initial_value));
+  EXPECT_TRUE(prefs->SetInt64(kPrefsUpdateCheckCount, initial_value));
+  attempter_.scatter_factor_ = TimeDelta::FromSeconds(20);
+
+  auto device_policy = std::make_unique<policy::MockDevicePolicy>();
+  EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
+  fake_system_state_.set_device_policy(device_policy.get());
+  EXPECT_CALL(*device_policy, GetDeviceUpdateStagingSchedule(_))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(schedule), Return(true)));
+
+  attempter_.policy_provider_.reset(
+      new policy::PolicyProvider(std::move(device_policy)));
+}
+
+TEST_F(UpdateAttempterTest, StagingSetsPrefsAndTurnsOffScattering) {
+  loop_.PostTask(
+      FROM_HERE,
+      base::Bind(
+          &UpdateAttempterTest::StagingSetsPrefsAndTurnsOffScatteringStart,
+          base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::StagingSetsPrefsAndTurnsOffScatteringStart() {
+  // Tests that staging sets its prefs properly and turns off scattering.
+  fake_system_state_.fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch());
+  FakePrefs fake_prefs;
+  SetUpStagingTest(kValidStagingSchedule, &fake_prefs);
+
+  attempter_.Update("", "", "", "", false, false, false);
+  // Check that prefs have the correct values.
+  int64_t update_count;
+  EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &update_count));
+  int64_t waiting_time_days;
+  EXPECT_TRUE(fake_prefs.GetInt64(kPrefsWallClockStagingWaitPeriod,
+                                  &waiting_time_days));
+  EXPECT_GT(waiting_time_days, 0);
+  // Update count should have been decremented.
+  EXPECT_EQ(7, update_count);
+  // Check that Omaha parameters were updated correctly.
+  EXPECT_TRUE(
+      attempter_.omaha_request_params_->update_check_count_wait_enabled());
+  EXPECT_TRUE(
+      attempter_.omaha_request_params_->wall_clock_based_wait_enabled());
+  EXPECT_EQ(waiting_time_days,
+            attempter_.omaha_request_params_->waiting_period().InDays());
+  // Check class variables.
+  EXPECT_EQ(waiting_time_days, attempter_.staging_wait_time_.InDays());
+  EXPECT_EQ(kValidStagingSchedule, attempter_.staging_schedule_);
+  // Check that scattering is turned off
+  EXPECT_EQ(0, attempter_.scatter_factor_.InSeconds());
+  EXPECT_FALSE(fake_prefs.Exists(kPrefsWallClockScatteringWaitPeriod));
+
+  ScheduleQuitMainLoop();
+}
+
+void UpdateAttempterTest::CheckStagingOff() {
+  // Check that all prefs were removed.
+  EXPECT_FALSE(attempter_.prefs_->Exists(kPrefsUpdateCheckCount));
+  EXPECT_FALSE(attempter_.prefs_->Exists(kPrefsWallClockScatteringWaitPeriod));
+  EXPECT_FALSE(attempter_.prefs_->Exists(kPrefsWallClockStagingWaitPeriod));
+  // Check that the Omaha parameters have the correct value.
+  EXPECT_EQ(0, attempter_.omaha_request_params_->waiting_period().InDays());
+  EXPECT_EQ(attempter_.omaha_request_params_->waiting_period(),
+            attempter_.staging_wait_time_);
+  EXPECT_FALSE(
+      attempter_.omaha_request_params_->update_check_count_wait_enabled());
+  EXPECT_FALSE(
+      attempter_.omaha_request_params_->wall_clock_based_wait_enabled());
+  // Check that scattering is turned off too.
+  EXPECT_EQ(0, attempter_.scatter_factor_.InSeconds());
+}
+
+TEST_F(UpdateAttempterTest, StagingOffIfInteractive) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::StagingOffIfInteractiveStart,
+                            base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::StagingOffIfInteractiveStart() {
+  // Tests that staging is turned off when an interactive update is requested.
+  fake_system_state_.fake_hardware()->SetIsOOBEComplete(Time::UnixEpoch());
+  FakePrefs fake_prefs;
+  SetUpStagingTest(kValidStagingSchedule, &fake_prefs);
+
+  attempter_.Update("", "", "", "", false, false, /* interactive = */ true);
+  CheckStagingOff();
+
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, StagingOffIfOobe) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::StagingOffIfOobeStart,
+                            base::Unretained(this)));
+  loop_.Run();
+}
+
+void UpdateAttempterTest::StagingOffIfOobeStart() {
+  // Tests that staging is turned off if OOBE hasn't been completed.
+  fake_system_state_.fake_hardware()->SetIsOOBEEnabled(true);
+  fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
+  FakePrefs fake_prefs;
+  SetUpStagingTest(kValidStagingSchedule, &fake_prefs);
+
+  attempter_.Update("", "", "", "", false, false, /* interactive = */ true);
+  CheckStagingOff();
+
+  ScheduleQuitMainLoop();
+}
+
 // Checks that we only report daily metrics at most every 24 hours.
 TEST_F(UpdateAttempterTest, ReportDailyMetrics) {
   FakeClock fake_clock;
@@ -1044,37 +1173,56 @@
 }
 
 TEST_F(UpdateAttempterTest, TargetVersionPrefixSetAndReset) {
-  attempter_.CalculateUpdateParams("", "", "", "1234", false, false);
+  attempter_.CalculateUpdateParams("", "", "", "1234", false, false, false);
   EXPECT_EQ("1234",
             fake_system_state_.request_params()->target_version_prefix());
 
-  attempter_.CalculateUpdateParams("", "", "", "", false, false);
+  attempter_.CalculateUpdateParams("", "", "", "", false, false, false);
   EXPECT_TRUE(
       fake_system_state_.request_params()->target_version_prefix().empty());
 }
 
+TEST_F(UpdateAttempterTest, RollbackAllowedSetAndReset) {
+  attempter_.CalculateUpdateParams("",
+                                   "",
+                                   "",
+                                   "1234",
+                                   /*rollback_allowed=*/true,
+                                   false,
+                                   false);
+  EXPECT_TRUE(fake_system_state_.request_params()->rollback_allowed());
+
+  attempter_.CalculateUpdateParams("",
+                                   "",
+                                   "",
+                                   "1234",
+                                   /*rollback_allowed=*/false,
+                                   false,
+                                   false);
+  EXPECT_FALSE(fake_system_state_.request_params()->rollback_allowed());
+}
+
 TEST_F(UpdateAttempterTest, UpdateDeferredByPolicyTest) {
   // Construct an OmahaResponseHandlerAction that has processed an InstallPlan,
   // but the update is being deferred by the Policy.
-  OmahaResponseHandlerAction* response_action =
-      new OmahaResponseHandlerAction(&fake_system_state_);
-  response_action->install_plan_.version = "a.b.c.d";
-  response_action->install_plan_.system_version = "b.c.d.e";
-  response_action->install_plan_.payloads.push_back(
+  OmahaResponseHandlerAction response_action(&fake_system_state_);
+  response_action.install_plan_.version = "a.b.c.d";
+  response_action.install_plan_.system_version = "b.c.d.e";
+  response_action.install_plan_.payloads.push_back(
       {.size = 1234ULL, .type = InstallPayloadType::kFull});
-  attempter_.response_handler_action_.reset(response_action);
   // Inform the UpdateAttempter that the OmahaResponseHandlerAction has
   // completed, with the deferred-update error code.
   attempter_.ActionCompleted(
-      nullptr, response_action, ErrorCode::kOmahaUpdateDeferredPerPolicy);
+      nullptr, &response_action, ErrorCode::kOmahaUpdateDeferredPerPolicy);
   {
     UpdateEngineStatus status;
     attempter_.GetStatus(&status);
     EXPECT_EQ(UpdateStatus::UPDATE_AVAILABLE, status.status);
-    EXPECT_EQ(response_action->install_plan_.version, status.new_version);
-    EXPECT_EQ(response_action->install_plan_.system_version,
+    EXPECT_TRUE(attempter_.install_plan_);
+    EXPECT_EQ(attempter_.install_plan_->version, status.new_version);
+    EXPECT_EQ(attempter_.install_plan_->system_version,
               status.new_system_version);
-    EXPECT_EQ(response_action->install_plan_.payloads[0].size,
+    EXPECT_EQ(attempter_.install_plan_->payloads[0].size,
               status.new_size_bytes);
   }
   // An "error" event should have been created to tell Omaha that the update is
@@ -1093,10 +1241,10 @@
     UpdateEngineStatus status;
     attempter_.GetStatus(&status);
     EXPECT_EQ(UpdateStatus::REPORTING_ERROR_EVENT, status.status);
-    EXPECT_EQ(response_action->install_plan_.version, status.new_version);
-    EXPECT_EQ(response_action->install_plan_.system_version,
+    EXPECT_EQ(response_action.install_plan_.version, status.new_version);
+    EXPECT_EQ(response_action.install_plan_.system_version,
               status.new_system_version);
-    EXPECT_EQ(response_action->install_plan_.payloads[0].size,
+    EXPECT_EQ(response_action.install_plan_.payloads[0].size,
               status.new_size_bytes);
   }
 }
@@ -1118,6 +1266,20 @@
             attempter_.GetCurrentUpdateAttemptFlags());
 }
 
+TEST_F(UpdateAttempterTest, RollbackNotAllowed) {
+  UpdateCheckParams params = {.updates_enabled = true,
+                              .rollback_allowed = false};
+  attempter_.OnUpdateScheduled(EvalStatus::kSucceeded, params);
+  EXPECT_FALSE(fake_system_state_.request_params()->rollback_allowed());
+}
+
+TEST_F(UpdateAttempterTest, RollbackAllowed) {
+  UpdateCheckParams params = {.updates_enabled = true,
+                              .rollback_allowed = true};
+  attempter_.OnUpdateScheduled(EvalStatus::kSucceeded, params);
+  EXPECT_TRUE(fake_system_state_.request_params()->rollback_allowed());
+}
+
 TEST_F(UpdateAttempterTest, InteractiveUpdateUsesPassedRestrictions) {
   attempter_.SetUpdateAttemptFlags(UpdateAttemptFlags::kFlagRestrictDownload);
 
@@ -1139,4 +1301,124 @@
             attempter_.GetCurrentUpdateAttemptFlags());
 }
 
+void UpdateAttempterTest::ResetRollbackHappenedStart(bool is_consumer,
+                                                     bool is_policy_loaded,
+                                                     bool expected_reset) {
+  EXPECT_CALL(*fake_system_state_.mock_payload_state(), GetRollbackHappened())
+      .WillRepeatedly(Return(true));
+  auto mock_policy_provider =
+      std::make_unique<NiceMock<policy::MockPolicyProvider>>();
+  EXPECT_CALL(*mock_policy_provider, IsConsumerDevice())
+      .WillRepeatedly(Return(is_consumer));
+  EXPECT_CALL(*mock_policy_provider, device_policy_is_loaded())
+      .WillRepeatedly(Return(is_policy_loaded));
+  const policy::MockDevicePolicy device_policy;
+  EXPECT_CALL(*mock_policy_provider, GetDevicePolicy())
+      .WillRepeatedly(ReturnRef(device_policy));
+  EXPECT_CALL(*fake_system_state_.mock_payload_state(),
+              SetRollbackHappened(false))
+      .Times(expected_reset ? 1 : 0);
+  attempter_.policy_provider_ = std::move(mock_policy_provider);
+  attempter_.Update("", "", "", "", false, false, false);
+  ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, ResetRollbackHappenedOobe) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::ResetRollbackHappenedStart,
+                            base::Unretained(this),
+                            /*is_consumer=*/false,
+                            /*is_policy_loaded=*/false,
+                            /*expected_reset=*/false));
+  loop_.Run();
+}
+
+TEST_F(UpdateAttempterTest, ResetRollbackHappenedConsumer) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::ResetRollbackHappenedStart,
+                            base::Unretained(this),
+                            /*is_consumer=*/true,
+                            /*is_policy_loaded=*/false,
+                            /*expected_reset=*/true));
+  loop_.Run();
+}
+
+TEST_F(UpdateAttempterTest, ResetRollbackHappenedEnterprise) {
+  loop_.PostTask(FROM_HERE,
+                 base::Bind(&UpdateAttempterTest::ResetRollbackHappenedStart,
+                            base::Unretained(this),
+                            /*is_consumer=*/false,
+                            /*is_policy_loaded=*/true,
+                            /*expected_reset=*/true));
+  loop_.Run();
+}
+
+TEST_F(UpdateAttempterTest, SetRollbackHappenedRollback) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = true;
+
+  EXPECT_CALL(*fake_system_state_.mock_payload_state(),
+              SetRollbackHappened(true))
+      .Times(1);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
+TEST_F(UpdateAttempterTest, SetRollbackHappenedNotRollback) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = false;
+
+  EXPECT_CALL(*fake_system_state_.mock_payload_state(),
+              SetRollbackHappened(true))
+      .Times(0);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
+TEST_F(UpdateAttempterTest, RollbackMetricsRollbackSuccess) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = true;
+  attempter_.install_plan_->version = kRollbackVersion;
+
+  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
+              ReportEnterpriseRollbackMetrics(true, kRollbackVersion))
+      .Times(1);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
+TEST_F(UpdateAttempterTest, RollbackMetricsNotRollbackSuccess) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = false;
+  attempter_.install_plan_->version = kRollbackVersion;
+
+  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
+              ReportEnterpriseRollbackMetrics(_, _))
+      .Times(0);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+}
+
+TEST_F(UpdateAttempterTest, RollbackMetricsRollbackFailure) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = true;
+  attempter_.install_plan_->version = kRollbackVersion;
+
+  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
+              ReportEnterpriseRollbackMetrics(false, kRollbackVersion))
+      .Times(1);
+  MockAction action;
+  attempter_.CreatePendingErrorEvent(&action, ErrorCode::kRollbackNotPossible);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kRollbackNotPossible);
+}
+
+TEST_F(UpdateAttempterTest, RollbackMetricsNotRollbackFailure) {
+  attempter_.install_plan_.reset(new InstallPlan);
+  attempter_.install_plan_->is_rollback = false;
+  attempter_.install_plan_->version = kRollbackVersion;
+
+  EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
+              ReportEnterpriseRollbackMetrics(_, _))
+      .Times(0);
+  MockAction action;
+  attempter_.CreatePendingErrorEvent(&action, ErrorCode::kRollbackNotPossible);
+  attempter_.ProcessingDone(nullptr, ErrorCode::kRollbackNotPossible);
+}
+
 }  // namespace chromeos_update_engine
diff --git a/update_boot_flags_action.cc b/update_boot_flags_action.cc
new file mode 100644
index 0000000..97ef7f2
--- /dev/null
+++ b/update_boot_flags_action.cc
@@ -0,0 +1,68 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_boot_flags_action.h"
+
+#include <base/bind.h>
+#include <base/logging.h>
+
+#include "update_engine/common/boot_control.h"
+
+namespace chromeos_update_engine {
+
+bool UpdateBootFlagsAction::updated_boot_flags_ = false;
+bool UpdateBootFlagsAction::is_running_ = false;
+
+void UpdateBootFlagsAction::PerformAction() {
+  if (is_running_) {
+    LOG(INFO) << "Update boot flags running, nothing to do.";
+    processor_->ActionComplete(this, ErrorCode::kSuccess);
+    return;
+  }
+  if (updated_boot_flags_) {
+    LOG(INFO) << "Already updated boot flags. Skipping.";
+    processor_->ActionComplete(this, ErrorCode::kSuccess);
+    return;
+  }
+
+  // This is purely best effort. Failures should be logged by Subprocess. Run
+  // the script asynchronously to avoid blocking the event loop regardless of
+  // the script runtime.
+  is_running_ = true;
+  LOG(INFO) << "Marking booted slot as good.";
+  if (!boot_control_->MarkBootSuccessfulAsync(
+          base::Bind(&UpdateBootFlagsAction::CompleteUpdateBootFlags,
+                     base::Unretained(this)))) {
+    CompleteUpdateBootFlags(false);
+  }
+}
+
+void UpdateBootFlagsAction::CompleteUpdateBootFlags(bool successful) {
+  is_running_ = false;
+  if (!successful) {
+    // We ignore the failure for now because if the updating boot flags is flaky
+    // or has a bug in a specific release, then blocking the update can cause
+    // devices to stay behind even though we could have updated the system and
+    // fixed the issue regardless of this failure.
+    //
+    // TODO(ahassani): Add new error code metric for kUpdateBootFlagsFailed.
+    LOG(ERROR) << "Updating boot flags failed, but ignoring its failure.";
+  }
+  updated_boot_flags_ = true;
+  processor_->ActionComplete(this, ErrorCode::kSuccess);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/update_boot_flags_action.h b/update_boot_flags_action.h
new file mode 100644
index 0000000..afa2c3f
--- /dev/null
+++ b/update_boot_flags_action.h
@@ -0,0 +1,59 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <string>
+
+#include "update_engine/common/action.h"
+#include "update_engine/common/boot_control_interface.h"
+
+#include <gtest/gtest_prod.h>
+
+namespace chromeos_update_engine {
+
+class UpdateBootFlagsAction : public AbstractAction {
+ public:
+  explicit UpdateBootFlagsAction(BootControlInterface* boot_control)
+      : boot_control_(boot_control) {}
+
+  void PerformAction() override;
+
+  static std::string StaticType() { return "UpdateBootFlagsAction"; }
+  std::string Type() const override { return StaticType(); }
+
+  void CompleteUpdateBootFlags(bool successful);
+
+ private:
+  FRIEND_TEST(UpdateBootFlagsActionTest, SimpleTest);
+  FRIEND_TEST(UpdateBootFlagsActionTest, DoubleActionTest);
+
+  // Originally, both of these flags are false. Once UpdateBootFlags is called,
+  // |is_running_| is set to true. As soon as UpdateBootFlags completes its
+  // asynchronous run, |is_running_| is reset to false and |updated_boot_flags_|
+  // is set to true. From that point on there will be no more changes to these
+  // flags.
+  //
+  // True if have updated the boot flags.
+  static bool updated_boot_flags_;
+  // True if we are still updating the boot flags.
+  static bool is_running_;
+
+  // Used for setting the boot flag.
+  BootControlInterface* boot_control_;
+
+  DISALLOW_COPY_AND_ASSIGN(UpdateBootFlagsAction);
+};
+
+}  // namespace chromeos_update_engine
diff --git a/update_boot_flags_action_unittest.cc b/update_boot_flags_action_unittest.cc
new file mode 100644
index 0000000..1b2bfa5
--- /dev/null
+++ b/update_boot_flags_action_unittest.cc
@@ -0,0 +1,69 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_boot_flags_action.h"
+
+#include <memory>
+#include <utility>
+
+#include <base/bind.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/fake_system_state.h"
+
+namespace chromeos_update_engine {
+
+class UpdateBootFlagsActionTest : public ::testing::Test {
+ public:
+  FakeSystemState fake_system_state_;
+};
+
+TEST_F(UpdateBootFlagsActionTest, SimpleTest) {
+  auto boot_control = fake_system_state_.fake_boot_control();
+  auto action = std::make_unique<UpdateBootFlagsAction>(boot_control);
+  ActionProcessor processor;
+  processor.EnqueueAction(std::move(action));
+
+  EXPECT_FALSE(UpdateBootFlagsAction::updated_boot_flags_);
+  EXPECT_FALSE(UpdateBootFlagsAction::is_running_);
+  processor.StartProcessing();
+  EXPECT_TRUE(UpdateBootFlagsAction::updated_boot_flags_);
+  EXPECT_FALSE(UpdateBootFlagsAction::is_running_);
+}
+
+TEST_F(UpdateBootFlagsActionTest, DoubleActionTest) {
+  // Reset the static flags.
+  UpdateBootFlagsAction::updated_boot_flags_ = false;
+  UpdateBootFlagsAction::is_running_ = false;
+
+  auto boot_control = fake_system_state_.fake_boot_control();
+  auto action1 = std::make_unique<UpdateBootFlagsAction>(boot_control);
+  auto action2 = std::make_unique<UpdateBootFlagsAction>(boot_control);
+  ActionProcessor processor1, processor2;
+  processor1.EnqueueAction(std::move(action1));
+  processor2.EnqueueAction(std::move(action2));
+
+  EXPECT_FALSE(UpdateBootFlagsAction::updated_boot_flags_);
+  EXPECT_FALSE(UpdateBootFlagsAction::is_running_);
+  processor1.StartProcessing();
+  EXPECT_TRUE(UpdateBootFlagsAction::updated_boot_flags_);
+  EXPECT_FALSE(UpdateBootFlagsAction::is_running_);
+  processor2.StartProcessing();
+  EXPECT_TRUE(UpdateBootFlagsAction::updated_boot_flags_);
+  EXPECT_FALSE(UpdateBootFlagsAction::is_running_);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/update_engine.conf b/update_engine.conf
index 3358411..af213ad 100644
--- a/update_engine.conf
+++ b/update_engine.conf
@@ -1,2 +1,2 @@
 PAYLOAD_MAJOR_VERSION=2
-PAYLOAD_MINOR_VERSION=5
+PAYLOAD_MINOR_VERSION=6
diff --git a/update_engine.gyp b/update_engine.gyp
index 1ff4d7f..358f64a 100644
--- a/update_engine.gyp
+++ b/update_engine.gyp
@@ -50,6 +50,7 @@
       '_POSIX_C_SOURCE=199309L',
       'USE_BINDER=<(USE_binder)',
       'USE_DBUS=<(USE_dbus)',
+      'USE_FEC=0',
       'USE_HWID_OVERRIDE=<(USE_hwid_override)',
       'USE_CHROME_KIOSK_APP=<(USE_chrome_kiosk_app)',
       'USE_CHROME_NETWORK_PROXY=<(USE_chrome_network_proxy)',
@@ -103,16 +104,16 @@
       'includes': ['../../../platform2/common-mk/generate-dbus-adaptors.gypi'],
     },
     {
-      'target_name': 'update_engine-dbus-libcros-client',
+      'target_name': 'update_engine-dbus-kiosk-app-client',
       'type': 'none',
       'actions': [{
-        'action_name': 'update_engine-dbus-libcros-client-action',
+        'action_name': 'update_engine-dbus-kiosk-app-client-action',
         'variables': {
-          'mock_output_file': 'include/libcros/dbus-proxy-mocks.h',
-          'proxy_output_file': 'include/libcros/dbus-proxies.h',
+          'mock_output_file': 'include/kiosk-app/dbus-proxy-mocks.h',
+          'proxy_output_file': 'include/kiosk-app/dbus-proxies.h',
         },
         'sources': [
-          'dbus_bindings/org.chromium.LibCrosService.dbus-xml',
+          'dbus_bindings/org.chromium.KioskAppService.dbus-xml',
         ],
         'includes': ['../../../platform2/common-mk/generate-dbus-proxies.gypi'],
       }],
@@ -124,7 +125,7 @@
       'dependencies': [
         'update_metadata-protos',
       ],
-      #TODO(deymo): Remove unused dependencies once we stop including files
+      # TODO(deymo): Remove unused dependencies once we stop including files
       # from the root directory.
       'variables': {
         'exported_deps': [
@@ -167,6 +168,7 @@
         'common/multi_range_http_fetcher.cc',
         'common/platform_constants_chromeos.cc',
         'common/prefs.cc',
+        'common/proxy_resolver.cc',
         'common/subprocess.cc',
         'common/terminator.cc',
         'common/utils.cc',
@@ -186,6 +188,7 @@
         'payload_consumer/payload_metadata.cc',
         'payload_consumer/payload_verifier.cc',
         'payload_consumer/postinstall_runner_action.cc',
+        'payload_consumer/verity_writer_stub.cc',
         'payload_consumer/xz_extent_writer.cc',
       ],
       'conditions': [
@@ -268,10 +271,10 @@
         'p2p_manager.cc',
         'payload_state.cc',
         'power_manager_chromeos.cc',
-        'proxy_resolver.cc',
         'real_system_state.cc',
         'shill_proxy.cc',
         'update_attempter.cc',
+        'update_boot_flags_action.cc',
         'update_manager/boxed_value.cc',
         'update_manager/chromeos_policy.cc',
         'update_manager/default_policy.cc',
@@ -291,8 +294,11 @@
         'update_manager/real_system_provider.cc',
         'update_manager/real_time_provider.cc',
         'update_manager/real_updater_provider.cc',
+        'update_manager/staging_utils.cc',
         'update_manager/state_factory.cc',
         'update_manager/update_manager.cc',
+        'update_manager/update_time_restrictions_policy_impl.cc',
+        'update_manager/weekly_time.cc',
         'update_status_utils.cc',
       ],
       'conditions': [
@@ -303,7 +309,7 @@
         }],
         ['USE_chrome_kiosk_app == 1', {
           'dependencies': [
-            'update_engine-dbus-libcros-client',
+            'update_engine-dbus-kiosk-app-client',
           ],
         }],
       ],
@@ -364,6 +370,7 @@
         'exported_deps': [
           'ext2fs',
           'libpuffdiff',
+          'liblzma',
         ],
         'deps': ['<@(exported_deps)'],
       },
@@ -389,6 +396,7 @@
         'payload_generator/annotated_operation.cc',
         'payload_generator/blob_file_writer.cc',
         'payload_generator/block_mapping.cc',
+        'payload_generator/boot_img_filesystem.cc',
         'payload_generator/bzip.cc',
         'payload_generator/cycle_breaker.cc',
         'payload_generator/deflate_utils.cc',
@@ -403,6 +411,7 @@
         'payload_generator/inplace_generator.cc',
         'payload_generator/mapfile_filesystem.cc',
         'payload_generator/payload_file.cc',
+        'payload_generator/payload_generation_config_chromeos.cc',
         'payload_generator/payload_generation_config.cc',
         'payload_generator/payload_signer.cc',
         'payload_generator/raw_filesystem.cc',
@@ -509,6 +518,7 @@
             'common/hwid_override_unittest.cc',
             'common/mock_http_fetcher.cc',
             'common/prefs_unittest.cc',
+            'common/proxy_resolver_unittest.cc',
             'common/subprocess_unittest.cc',
             'common/terminator_unittest.cc',
             'common/test_utils.cc',
@@ -542,6 +552,7 @@
             'payload_generator/ab_generator_unittest.cc',
             'payload_generator/blob_file_writer_unittest.cc',
             'payload_generator/block_mapping_unittest.cc',
+            'payload_generator/boot_img_filesystem_unittest.cc',
             'payload_generator/cycle_breaker_unittest.cc',
             'payload_generator/deflate_utils_unittest.cc',
             'payload_generator/delta_diff_utils_unittest.cc',
@@ -561,9 +572,9 @@
             'payload_generator/topological_sort_unittest.cc',
             'payload_generator/zip_unittest.cc',
             'payload_state_unittest.cc',
-            'proxy_resolver_unittest.cc',
             'testrunner.cc',
             'update_attempter_unittest.cc',
+            'update_boot_flags_action_unittest.cc',
             'update_manager/boxed_value_unittest.cc',
             'update_manager/chromeos_policy_unittest.cc',
             'update_manager/evaluation_context_unittest.cc',
@@ -575,9 +586,12 @@
             'update_manager/real_system_provider_unittest.cc',
             'update_manager/real_time_provider_unittest.cc',
             'update_manager/real_updater_provider_unittest.cc',
+            'update_manager/staging_utils_unittest.cc',
             'update_manager/umtest_utils.cc',
             'update_manager/update_manager_unittest.cc',
+            'update_manager/update_time_restrictions_policy_impl_unittest.cc',
             'update_manager/variable_unittest.cc',
+            'update_manager/weekly_time_unittest.cc',
           ],
         },
       ],
diff --git a/update_engine_client.cc b/update_engine_client.cc
index bb19632..b7096c5 100644
--- a/update_engine_client.cc
+++ b/update_engine_client.cc
@@ -297,7 +297,7 @@
 
   // Boilerplate init commands.
   base::CommandLine::Init(argc_, argv_);
-  brillo::FlagHelper::Init(argc_, argv_, "Chromium OS Update Engine Client");
+  brillo::FlagHelper::Init(argc_, argv_, "A/B Update Engine Client");
 
   // Ensure there are no positional arguments.
   const vector<string> positional_args =
@@ -396,7 +396,7 @@
     string rollback_partition;
 
     if (!client_->GetRollbackPartition(&rollback_partition)) {
-      LOG(ERROR) << "Error while querying rollback partition availabilty.";
+      LOG(ERROR) << "Error while querying rollback partition availability.";
       return 1;
     }
 
@@ -458,8 +458,8 @@
       LOG(INFO) << "Target Channel (pending update): " << target_channel;
   }
 
-  bool do_update_request = FLAGS_check_for_update | FLAGS_update |
-                           !FLAGS_app_version.empty() |
+  bool do_update_request = FLAGS_check_for_update || FLAGS_update ||
+                           !FLAGS_app_version.empty() ||
                            !FLAGS_omaha_url.empty();
   if (FLAGS_update) FLAGS_follow = true;
 
diff --git a/update_engine_client_android.cc b/update_engine_client_android.cc
index 267f6e9..82a9f84 100644
--- a/update_engine_client_android.cc
+++ b/update_engine_client_android.cc
@@ -124,6 +124,14 @@
                 "A list of key-value pairs, one element of the list per line. "
                 "Used when --update is passed.");
 
+  DEFINE_bool(verify,
+              false,
+              "Given payload metadata, verify if the payload is applicable.");
+  DEFINE_string(metadata,
+                "/data/ota_package/metadata",
+                "The path to the update payload metadata. "
+                "Used when --verify is passed.");
+
   DEFINE_bool(suspend, false, "Suspend an ongoing update and exit.");
   DEFINE_bool(resume, false, "Resume a suspended update.");
   DEFINE_bool(cancel, false, "Cancel the ongoing update and exit.");
@@ -182,6 +190,15 @@
     return ExitWhenIdle(service_->resetStatus());
   }
 
+  if (FLAGS_verify) {
+    bool applicable = false;
+    Status status = service_->verifyPayloadApplicable(
+        android::String16{FLAGS_metadata.data(), FLAGS_metadata.size()},
+        &applicable);
+    LOG(INFO) << "Payload is " << (applicable ? "" : "not ") << "applicable.";
+    return ExitWhenIdle(status);
+  }
+
   if (FLAGS_follow) {
     // Register a callback object with the service.
     callback_ = new UECallback(this);
diff --git a/update_manager/android_things_policy.cc b/update_manager/android_things_policy.cc
index 5fbda46..4afcf12 100644
--- a/update_manager/android_things_policy.cc
+++ b/update_manager/android_things_policy.cc
@@ -53,7 +53,9 @@
   result->updates_enabled = true;
   result->target_channel.clear();
   result->target_version_prefix.clear();
-  result->is_interactive = false;
+  result->rollback_allowed = false;
+  result->rollback_allowed_milestones = -1;
+  result->interactive = false;
 
   // Build a list of policies to consult.  Note that each policy may modify the
   // result structure, even if it signals kContinue.
@@ -68,12 +70,12 @@
       // A/B updates
       &enough_slots_ab_updates_policy,
 
-      // Unofficial builds should not perform periodic update checks.
-      &only_update_official_builds_policy,
-
       // Check to see if an interactive update was requested.
       &interactive_update_policy,
 
+      // Unofficial builds should not perform periodic update checks.
+      &only_update_official_builds_policy,
+
       // Ensure that periodic update checks are timed properly.
       &next_update_check_time_policy,
   };
diff --git a/update_manager/android_things_policy_unittest.cc b/update_manager/android_things_policy_unittest.cc
index 8a50bc2..6961efc 100644
--- a/update_manager/android_things_policy_unittest.cc
+++ b/update_manager/android_things_policy_unittest.cc
@@ -97,7 +97,7 @@
   ExpectPolicyStatus(
       EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmAndroidThingsPolicyTest,
@@ -140,7 +140,7 @@
   ExpectPolicyStatus(
       EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_TRUE(result.is_interactive);
+  EXPECT_TRUE(result.interactive);
 }
 
 TEST_F(UmAndroidThingsPolicyTest,
@@ -156,7 +156,7 @@
   ExpectPolicyStatus(
       EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmAndroidThingsPolicyTest, UpdateCanBeAppliedOk) {
diff --git a/update_manager/boxed_value.cc b/update_manager/boxed_value.cc
index a437c02..8ec3375 100644
--- a/update_manager/boxed_value.cc
+++ b/update_manager/boxed_value.cc
@@ -26,8 +26,10 @@
 
 #include "update_engine/common/utils.h"
 #include "update_engine/connection_utils.h"
+#include "update_engine/update_manager/rollback_prefs.h"
 #include "update_engine/update_manager/shill_provider.h"
 #include "update_engine/update_manager/updater_provider.h"
+#include "update_engine/update_manager/weekly_time.h"
 
 using chromeos_update_engine::ConnectionTethering;
 using chromeos_update_engine::ConnectionType;
@@ -49,26 +51,26 @@
 template<>
 string BoxedValue::ValuePrinter<int>(const void* value) {
   const int* val = reinterpret_cast<const int*>(value);
-  return base::IntToString(*val);
+  return base::NumberToString(*val);
 }
 
 template<>
 string BoxedValue::ValuePrinter<unsigned int>(const void* value) {
   const unsigned int* val = reinterpret_cast<const unsigned int*>(value);
-  return base::UintToString(*val);
+  return base::NumberToString(*val);
 }
 
 template<>
 string BoxedValue::ValuePrinter<int64_t>(const void* value) {
   const int64_t* val = reinterpret_cast<const int64_t*>(value);
-  return base::Int64ToString(*val);
+  return base::NumberToString(*val);
 }
 
 template<>
 string BoxedValue::ValuePrinter<uint64_t>(const void* value) {
   const uint64_t* val =
     reinterpret_cast<const uint64_t*>(value);
-  return base::Uint64ToString(static_cast<uint64_t>(*val));
+  return base::NumberToString(static_cast<uint64_t>(*val));
 }
 
 template<>
@@ -80,7 +82,7 @@
 template<>
 string BoxedValue::ValuePrinter<double>(const void* value) {
   const double* val = reinterpret_cast<const double*>(value);
-  return base::DoubleToString(*val);
+  return base::NumberToString(*val);
 }
 
 template<>
@@ -133,6 +135,25 @@
   return "Unknown";
 }
 
+template <>
+string BoxedValue::ValuePrinter<RollbackToTargetVersion>(const void* value) {
+  const RollbackToTargetVersion* val =
+      reinterpret_cast<const RollbackToTargetVersion*>(value);
+  switch (*val) {
+    case RollbackToTargetVersion::kUnspecified:
+      return "Unspecified";
+    case RollbackToTargetVersion::kDisabled:
+      return "Disabled";
+    case RollbackToTargetVersion::kRollbackWithFullPowerwash:
+      return "Rollback with full powerwash";
+    case RollbackToTargetVersion::kMaxValue:
+      NOTREACHED();
+      return "Max value";
+  }
+  NOTREACHED();
+  return "Unknown";
+}
+
 template<>
 string BoxedValue::ValuePrinter<Stage>(const void* value) {
   const Stage* val = reinterpret_cast<const Stage*>(value);
@@ -191,4 +212,23 @@
   return retval;
 }
 
+template <>
+string BoxedValue::ValuePrinter<WeeklyTimeInterval>(const void* value) {
+  const WeeklyTimeInterval* val =
+      reinterpret_cast<const WeeklyTimeInterval*>(value);
+  return val->ToString();
+}
+
+template <>
+string BoxedValue::ValuePrinter<WeeklyTimeIntervalVector>(const void* value) {
+  const WeeklyTimeIntervalVector* val =
+      reinterpret_cast<const WeeklyTimeIntervalVector*>(value);
+
+  string retval = "Disallowed intervals:\n";
+  for (const auto& interval : *val) {
+    retval += interval.ToString() + "\n";
+  }
+  return retval;
+}
+
 }  // namespace chromeos_update_manager
diff --git a/update_manager/boxed_value.h b/update_manager/boxed_value.h
index 5f41835..c40215e 100644
--- a/update_manager/boxed_value.h
+++ b/update_manager/boxed_value.h
@@ -70,8 +70,9 @@
   // move constructor explicitly preventing it from accidental references,
   // like in:
   //   BoxedValue new_box(std::move(other_box));
-  BoxedValue(BoxedValue&& other)  // NOLINT(build/c++11)
-      : value_(other.value_), deleter_(other.deleter_),
+  BoxedValue(BoxedValue&& other) noexcept
+      : value_(other.value_),
+        deleter_(other.deleter_),
         printer_(other.printer_) {
     other.value_ = nullptr;
     other.deleter_ = nullptr;
diff --git a/update_manager/boxed_value_unittest.cc b/update_manager/boxed_value_unittest.cc
index 4aeaec8..3fa0f1a 100644
--- a/update_manager/boxed_value_unittest.cc
+++ b/update_manager/boxed_value_unittest.cc
@@ -26,9 +26,11 @@
 #include <base/strings/stringprintf.h>
 #include <base/time/time.h>
 
+#include "update_engine/update_manager/rollback_prefs.h"
 #include "update_engine/update_manager/shill_provider.h"
 #include "update_engine/update_manager/umtest_utils.h"
 #include "update_engine/update_manager/updater_provider.h"
+#include "update_engine/update_manager/weekly_time.h"
 
 using base::Time;
 using base::TimeDelta;
@@ -192,6 +194,21 @@
             .ToString());
 }
 
+TEST(UmBoxedValueTest, RollbackToTargetVersionToString) {
+  EXPECT_EQ("Unspecified",
+            BoxedValue(new RollbackToTargetVersion(
+                           RollbackToTargetVersion::kUnspecified))
+                .ToString());
+  EXPECT_EQ("Disabled",
+            BoxedValue(
+                new RollbackToTargetVersion(RollbackToTargetVersion::kDisabled))
+                .ToString());
+  EXPECT_EQ("Rollback with full powerwash",
+            BoxedValue(new RollbackToTargetVersion(
+                           RollbackToTargetVersion::kRollbackWithFullPowerwash))
+                .ToString());
+}
+
 TEST(UmBoxedValueTest, SetConnectionTypeToString) {
   set<ConnectionType>* set1 = new set<ConnectionType>;
   set1->insert(ConnectionType::kWimax);
@@ -242,4 +259,34 @@
                 .ToString());
 }
 
+TEST(UmBoxedValueTest, WeeklyTimeIntervalToString) {
+  EXPECT_EQ("Start: day_of_week=2 time=100\nEnd: day_of_week=4 time=200",
+            BoxedValue(new WeeklyTimeInterval(
+                           WeeklyTime(2, TimeDelta::FromMinutes(100)),
+                           WeeklyTime(4, TimeDelta::FromMinutes(200))))
+                .ToString());
+  EXPECT_EQ("Start: day_of_week=1 time=10\nEnd: day_of_week=1 time=20",
+            BoxedValue(new WeeklyTimeInterval(
+                           WeeklyTime(1, TimeDelta::FromMinutes(10)),
+                           WeeklyTime(1, TimeDelta::FromMinutes(20))))
+                .ToString());
+}
+
+TEST(UmBoxedValueTest, WeeklyTimeIntervalVectorToString) {
+  WeeklyTimeIntervalVector intervals;
+  intervals.emplace_back(WeeklyTime(5, TimeDelta::FromMinutes(10)),
+                         WeeklyTime(1, TimeDelta::FromMinutes(30)));
+  EXPECT_EQ(
+      "Disallowed intervals:\nStart: day_of_week=5 time=10\nEnd: "
+      "day_of_week=1 time=30\n",
+      BoxedValue(new WeeklyTimeIntervalVector(intervals)).ToString());
+  intervals.emplace_back(WeeklyTime(1, TimeDelta::FromMinutes(5)),
+                         WeeklyTime(6, TimeDelta::FromMinutes(1000)));
+  EXPECT_EQ(
+      "Disallowed intervals:\nStart: day_of_week=5 time=10\nEnd: "
+      "day_of_week=1 time=30\nStart: day_of_week=1 time=5\nEnd: day_of_week=6 "
+      "time=1000\n",
+      BoxedValue(new WeeklyTimeIntervalVector(intervals)).ToString());
+}
+
 }  // namespace chromeos_update_manager
diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc
index 916acd3..587ac67 100644
--- a/update_manager/chromeos_policy.cc
+++ b/update_manager/chromeos_policy.cc
@@ -36,6 +36,7 @@
 #include "update_engine/update_manager/out_of_box_experience_policy_impl.h"
 #include "update_engine/update_manager/policy_utils.h"
 #include "update_engine/update_manager/shill_provider.h"
+#include "update_engine/update_manager/update_time_restrictions_policy_impl.h"
 
 using base::Time;
 using base::TimeDelta;
@@ -86,6 +87,7 @@
     case ErrorCode::kUnsupportedMajorPayloadVersion:
     case ErrorCode::kUnsupportedMinorPayloadVersion:
     case ErrorCode::kPayloadTimestampError:
+    case ErrorCode::kVerityCalculationError:
       LOG(INFO) << "Advancing download URL due to error "
                 << chromeos_update_engine::utils::ErrorCodeToString(err_code)
                 << " (" << static_cast<int>(err_code) << ")";
@@ -142,7 +144,11 @@
     case ErrorCode::kOmahaRequestXMLHasEntityDecl:
     case ErrorCode::kFilesystemVerifierError:
     case ErrorCode::kUserCanceled:
+    case ErrorCode::kOmahaUpdateIgnoredOverCellular:
     case ErrorCode::kUpdatedButNotActive:
+    case ErrorCode::kNoUpdate:
+    case ErrorCode::kRollbackNotPossible:
+    case ErrorCode::kFirstActiveOmahaPingSentPersistenceError:
       LOG(INFO) << "Not changing URL index or failure count due to error "
                 << chromeos_update_engine::utils::ErrorCodeToString(err_code)
                 << " (" << static_cast<int>(err_code) << ")";
@@ -199,7 +205,9 @@
   result->updates_enabled = true;
   result->target_channel.clear();
   result->target_version_prefix.clear();
-  result->is_interactive = false;
+  result->rollback_allowed = false;
+  result->rollback_allowed_milestones = -1;
+  result->interactive = false;
 
   EnoughSlotsAbUpdatesPolicyImpl enough_slots_ab_updates_policy;
   EnterpriseDevicePolicyImpl enterprise_device_policy;
@@ -255,8 +263,33 @@
                                               std::string* error,
                                               ErrorCode* result,
                                               InstallPlan* install_plan) const {
-  *result = ErrorCode::kSuccess;
-  return EvalStatus::kSucceeded;
+  UpdateTimeRestrictionsPolicyImpl update_time_restrictions_policy;
+  InteractiveUpdatePolicyImpl interactive_update_policy;
+
+  vector<Policy const*> policies_to_consult = {
+      // Check to see if an interactive update has been requested.
+      &interactive_update_policy,
+
+      // Do not apply or download an update if we are inside one of the
+      // restricted times.
+      &update_time_restrictions_policy,
+  };
+
+  EvalStatus status = ConsultPolicies(policies_to_consult,
+                                      &Policy::UpdateCanBeApplied,
+                                      ec,
+                                      state,
+                                      error,
+                                      result,
+                                      install_plan);
+  if (EvalStatus::kContinue != status) {
+    return status;
+  } else {
+    // The update can proceed.
+    LOG(INFO) << "Allowing update to be applied.";
+    *result = ErrorCode::kSuccess;
+    return EvalStatus::kSucceeded;
+  }
 }
 
 EvalStatus ChromeOSPolicy::UpdateCanStart(
@@ -324,7 +357,7 @@
     bool is_scattering_applicable = false;
     result->scatter_wait_period = kZeroInterval;
     result->scatter_check_threshold = 0;
-    if (!update_state.is_interactive) {
+    if (!update_state.interactive) {
       const bool* is_oobe_enabled_p = ec->GetValue(
           state->config_provider()->var_is_oobe_enabled());
       if (is_oobe_enabled_p && !(*is_oobe_enabled_p)) {
@@ -372,7 +405,7 @@
     // interactive, and other limits haven't been reached.
     if (update_state.p2p_downloading_disabled) {
       LOG(INFO) << "Blocked P2P downloading because it is disabled by Omaha.";
-    } else if (update_state.is_interactive) {
+    } else if (update_state.interactive) {
       LOG(INFO) << "Blocked P2P downloading because update is interactive.";
     } else if (update_state.p2p_num_attempts >= kMaxP2PAttempts) {
       LOG(INFO) << "Blocked P2P downloading as it was attempted too many "
@@ -572,7 +605,7 @@
   bool may_backoff = false;
   if (update_state.is_backoff_disabled) {
     LOG(INFO) << "Backoff disabled by Omaha.";
-  } else if (update_state.is_interactive) {
+  } else if (update_state.interactive) {
     LOG(INFO) << "No backoff for interactive updates.";
   } else if (update_state.is_delta_payload) {
     LOG(INFO) << "No backoff for delta payloads.";
diff --git a/update_manager/chromeos_policy.h b/update_manager/chromeos_policy.h
index 67c0d15..d4ce4a6 100644
--- a/update_manager/chromeos_policy.h
+++ b/update_manager/chromeos_policy.h
@@ -114,6 +114,8 @@
               UpdateCanStartAllowedP2PDownloadingBlockedDueToNumAttempts);
   FRIEND_TEST(UmChromeOSPolicyTest,
               UpdateCanStartAllowedP2PDownloadingBlockedDueToAttemptsPeriod);
+  FRIEND_TEST(UmChromeOSPolicyTest,
+              UpdateCheckAllowedNextUpdateCheckOutsideDisallowedInterval);
 
   // Auxiliary constant (zero by default).
   const base::TimeDelta kZeroInterval;
diff --git a/update_manager/chromeos_policy_unittest.cc b/update_manager/chromeos_policy_unittest.cc
index df29e8c..96f3d79 100644
--- a/update_manager/chromeos_policy_unittest.cc
+++ b/update_manager/chromeos_policy_unittest.cc
@@ -21,12 +21,14 @@
 
 #include "update_engine/update_manager/next_update_check_policy_impl.h"
 #include "update_engine/update_manager/policy_test_utils.h"
+#include "update_engine/update_manager/weekly_time.h"
 
 using base::Time;
 using base::TimeDelta;
 using chromeos_update_engine::ConnectionTethering;
 using chromeos_update_engine::ConnectionType;
 using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::InstallPlan;
 using std::set;
 using std::string;
 
@@ -83,6 +85,9 @@
         new bool(false));
     fake_state_.device_policy_provider()->var_release_channel_delegated()->
         reset(new bool(true));
+    fake_state_.device_policy_provider()
+        ->var_disallowed_time_intervals()
+        ->reset(new WeeklyTimeIntervalVector());
   }
 
   // Configures the policy to return a desired value from UpdateCheckAllowed by
@@ -105,6 +110,67 @@
       curr_time -= TimeDelta::FromSeconds(1);
     fake_clock_.SetWallclockTime(curr_time);
   }
+
+  // Sets the policies required for a kiosk app to control Chrome OS version:
+  // - AllowKioskAppControlChromeVersion = True
+  // - UpdateDisabled = True
+  // In the kiosk app manifest:
+  // - RequiredPlatformVersion = 1234.
+  void SetKioskAppControlsChromeOsVersion() {
+    fake_state_.device_policy_provider()
+        ->var_allow_kiosk_app_control_chrome_version()
+        ->reset(new bool(true));
+    fake_state_.device_policy_provider()->var_update_disabled()->reset(
+        new bool(true));
+    fake_state_.system_provider()->var_kiosk_required_platform_version()->reset(
+        new string("1234."));
+  }
+
+  // Sets up a test with the value of RollbackToTargetVersion policy (and
+  // whether it's set), and returns the value of
+  // UpdateCheckParams.rollback_allowed.
+  bool TestRollbackAllowed(bool set_policy,
+                           RollbackToTargetVersion rollback_to_target_version) {
+    // Update check is allowed, response includes attributes for use in the
+    // request.
+    SetUpdateCheckAllowed(true);
+
+    if (set_policy) {
+      // Override RollbackToTargetVersion device policy attribute.
+      fake_state_.device_policy_provider()
+          ->var_rollback_to_target_version()
+          ->reset(new RollbackToTargetVersion(rollback_to_target_version));
+    }
+
+    UpdateCheckParams result;
+    ExpectPolicyStatus(
+        EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
+    return result.rollback_allowed;
+  }
+
+  // Sets up a test with the given intervals and the current fake wallclock
+  // time.
+  void TestDisallowedTimeIntervals(const WeeklyTimeIntervalVector& intervals,
+                                   const ErrorCode& expected_error_code,
+                                   bool kiosk) {
+    SetUpDefaultTimeProvider();
+    if (kiosk)
+      fake_state_.device_policy_provider()
+          ->var_auto_launched_kiosk_app_id()
+          ->reset(new string("myapp"));
+    fake_state_.device_policy_provider()
+        ->var_disallowed_time_intervals()
+        ->reset(new WeeklyTimeIntervalVector(intervals));
+
+    // Check that |expected_status| matches the value of UpdateCheckAllowed
+    ErrorCode result;
+    InstallPlan install_plan;
+    ExpectPolicyStatus(EvalStatus::kSucceeded,
+                       &Policy::UpdateCanBeApplied,
+                       &result,
+                       &install_plan);
+    EXPECT_EQ(result, expected_error_code);
+  }
 };
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedWaitsForTheTimeout) {
@@ -140,7 +206,7 @@
   ExpectPolicyStatus(EvalStatus::kSucceeded,
                      &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedWaitsForOOBE) {
@@ -178,7 +244,7 @@
   ExpectPolicyStatus(EvalStatus::kSucceeded,
                      &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedWithAttributes) {
@@ -189,8 +255,11 @@
   // Override specific device policy attributes.
   fake_state_.device_policy_provider()->var_target_version_prefix()->
       reset(new string("1.2"));
-  fake_state_.device_policy_provider()->var_release_channel_delegated()->
-      reset(new bool(false));
+  fake_state_.device_policy_provider()
+      ->var_rollback_allowed_milestones()
+      ->reset(new int(5));
+  fake_state_.device_policy_provider()->var_release_channel_delegated()->reset(
+      new bool(false));
   fake_state_.device_policy_provider()->var_release_channel()->
       reset(new string("foo-channel"));
 
@@ -199,8 +268,55 @@
                      &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
   EXPECT_EQ("1.2", result.target_version_prefix);
+  EXPECT_EQ(5, result.rollback_allowed_milestones);
   EXPECT_EQ("foo-channel", result.target_channel);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackAllowed) {
+  EXPECT_TRUE(TestRollbackAllowed(
+      true, RollbackToTargetVersion::kRollbackWithFullPowerwash));
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackDisabled) {
+  EXPECT_FALSE(TestRollbackAllowed(true, RollbackToTargetVersion::kDisabled));
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackUnspecified) {
+  EXPECT_FALSE(
+      TestRollbackAllowed(true, RollbackToTargetVersion::kUnspecified));
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackNotSet) {
+  EXPECT_FALSE(
+      TestRollbackAllowed(false, RollbackToTargetVersion::kUnspecified));
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedKioskRollbackAllowed) {
+  SetKioskAppControlsChromeOsVersion();
+
+  EXPECT_TRUE(TestRollbackAllowed(
+      true, RollbackToTargetVersion::kRollbackWithFullPowerwash));
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedKioskRollbackDisabled) {
+  SetKioskAppControlsChromeOsVersion();
+
+  EXPECT_FALSE(TestRollbackAllowed(true, RollbackToTargetVersion::kDisabled));
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedKioskRollbackUnspecified) {
+  SetKioskAppControlsChromeOsVersion();
+
+  EXPECT_FALSE(
+      TestRollbackAllowed(true, RollbackToTargetVersion::kUnspecified));
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedKioskRollbackNotSet) {
+  SetKioskAppControlsChromeOsVersion();
+
+  EXPECT_FALSE(
+      TestRollbackAllowed(false, RollbackToTargetVersion::kUnspecified));
 }
 
 TEST_F(UmChromeOSPolicyTest,
@@ -256,7 +372,7 @@
   ExpectPolicyStatus(EvalStatus::kSucceeded,
                      &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_TRUE(result.is_interactive);
+  EXPECT_TRUE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedForcedUpdateRequestedPeriodic) {
@@ -271,29 +387,21 @@
   ExpectPolicyStatus(EvalStatus::kSucceeded,
                      &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedKioskPin) {
   // Update check is allowed.
   SetUpdateCheckAllowed(true);
 
-  // A typical setup for kiosk pin policy: AU disabled, allow kiosk to pin
-  // and there is a kiosk required platform version.
-  fake_state_.device_policy_provider()->var_update_disabled()->reset(
-      new bool(true));
-  fake_state_.device_policy_provider()
-      ->var_allow_kiosk_app_control_chrome_version()
-      ->reset(new bool(true));
-  fake_state_.system_provider()->var_kiosk_required_platform_version()->reset(
-      new string("1234.0.0"));
+  SetKioskAppControlsChromeOsVersion();
 
   UpdateCheckParams result;
   ExpectPolicyStatus(EvalStatus::kSucceeded,
                      &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
-  EXPECT_EQ("1234.0.0", result.target_version_prefix);
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_EQ("1234.", result.target_version_prefix);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedDisabledWhenNoKioskPin) {
@@ -333,7 +441,7 @@
                      &Policy::UpdateCheckAllowed, &result);
   EXPECT_TRUE(result.updates_enabled);
   EXPECT_TRUE(result.target_version_prefix.empty());
-  EXPECT_FALSE(result.is_interactive);
+  EXPECT_FALSE(result.interactive);
 }
 
 TEST_F(UmChromeOSPolicyTest,
@@ -561,7 +669,7 @@
   update_state.download_errors.emplace_back(
       0, ErrorCode::kDownloadTransferError,
       curr_time - TimeDelta::FromSeconds(2));
-  update_state.is_interactive = true;
+  update_state.interactive = true;
 
   // Check that UpdateCanStart returns false and a new backoff expiry is
   // generated.
@@ -798,7 +906,7 @@
       new TimeDelta(TimeDelta::FromSeconds(1)));
 
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(1));
-  update_state.is_interactive = true;
+  update_state.interactive = true;
   update_state.scatter_check_threshold = 0;
   update_state.scatter_check_threshold_min = 2;
   update_state.scatter_check_threshold_max = 5;
@@ -828,7 +936,7 @@
   fake_state_.system_provider()->var_is_oobe_complete()->reset(new bool(false));
 
   UpdateState update_state = GetDefaultUpdateState(TimeDelta::FromSeconds(1));
-  update_state.is_interactive = true;
+  update_state.interactive = true;
   update_state.scatter_check_threshold = 0;
   update_state.scatter_check_threshold_min = 2;
   update_state.scatter_check_threshold_max = 5;
@@ -1496,4 +1604,48 @@
                      &result, false);
 }
 
+TEST_F(UmChromeOSPolicyTest,
+       UpdateCanBeAppliedForcedUpdatesDisablesTimeRestrictions) {
+  Time curr_time = fake_clock_.GetWallclockTime();
+  fake_state_.updater_provider()->var_forced_update_requested()->reset(
+      new UpdateRequestStatus(UpdateRequestStatus::kInteractive));
+  // Should return kAskMeAgainLater when updated are not forced.
+  TestDisallowedTimeIntervals(
+      {WeeklyTimeInterval(
+          WeeklyTime::FromTime(curr_time),
+          WeeklyTime::FromTime(curr_time + TimeDelta::FromMinutes(1)))},
+      ErrorCode::kSuccess,
+      /* kiosk = */ true);
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCanBeAppliedFailsInDisallowedTime) {
+  Time curr_time = fake_clock_.GetWallclockTime();
+  TestDisallowedTimeIntervals(
+      {WeeklyTimeInterval(
+          WeeklyTime::FromTime(curr_time),
+          WeeklyTime::FromTime(curr_time + TimeDelta::FromMinutes(1)))},
+      ErrorCode::kOmahaUpdateDeferredPerPolicy,
+      /* kiosk = */ true);
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCanBeAppliedOutsideDisallowedTime) {
+  Time curr_time = fake_clock_.GetWallclockTime();
+  TestDisallowedTimeIntervals(
+      {WeeklyTimeInterval(
+          WeeklyTime::FromTime(curr_time - TimeDelta::FromHours(3)),
+          WeeklyTime::FromTime(curr_time))},
+      ErrorCode::kSuccess,
+      /* kiosk = */ true);
+}
+
+TEST_F(UmChromeOSPolicyTest, UpdateCanBeAppliedPassesOnNonKiosk) {
+  Time curr_time = fake_clock_.GetWallclockTime();
+  TestDisallowedTimeIntervals(
+      {WeeklyTimeInterval(
+          WeeklyTime::FromTime(curr_time),
+          WeeklyTime::FromTime(curr_time + TimeDelta::FromMinutes(1)))},
+      ErrorCode::kSuccess,
+      /* kiosk = */ false);
+}
+
 }  // namespace chromeos_update_manager
diff --git a/update_manager/default_policy.cc b/update_manager/default_policy.cc
index 5da1520..5509abc 100644
--- a/update_manager/default_policy.cc
+++ b/update_manager/default_policy.cc
@@ -40,7 +40,9 @@
   result->updates_enabled = true;
   result->target_channel.clear();
   result->target_version_prefix.clear();
-  result->is_interactive = false;
+  result->rollback_allowed = false;
+  result->rollback_allowed_milestones = -1;  // No version rolls should happen.
+  result->interactive = false;
 
   // Ensure that the minimum interval is set. If there's no clock, this defaults
   // to always allowing the update.
diff --git a/update_manager/device_policy_provider.h b/update_manager/device_policy_provider.h
index 3537d13..80dcfa2 100644
--- a/update_manager/device_policy_provider.h
+++ b/update_manager/device_policy_provider.h
@@ -24,8 +24,10 @@
 #include <policy/libpolicy.h>
 
 #include "update_engine/update_manager/provider.h"
+#include "update_engine/update_manager/rollback_prefs.h"
 #include "update_engine/update_manager/shill_provider.h"
 #include "update_engine/update_manager/variable.h"
+#include "update_engine/update_manager/weekly_time.h"
 
 namespace chromeos_update_manager {
 
@@ -46,6 +48,15 @@
 
   virtual Variable<std::string>* var_target_version_prefix() = 0;
 
+  // Variable returning what should happen if the target_version_prefix is
+  // earlier than the current Chrome OS version.
+  virtual Variable<RollbackToTargetVersion>*
+      var_rollback_to_target_version() = 0;
+
+  // Variable returning the number of Chrome milestones rollback should be
+  // possible. Rollback protection will be postponed by this many versions.
+  virtual Variable<int>* var_rollback_allowed_milestones() = 0;
+
   // Returns a non-negative scatter interval used for updates.
   virtual Variable<base::TimeDelta>* var_scatter_factor() = 0;
 
@@ -65,6 +76,15 @@
 
   virtual Variable<bool>* var_allow_kiosk_app_control_chrome_version() = 0;
 
+  // Variable that contains the app that is to be run when launched in kiosk
+  // mode. If the device is not in kiosk-mode this should be empty.
+  virtual Variable<std::string>* var_auto_launched_kiosk_app_id() = 0;
+
+  // Variable that contains the time intervals during the week for which update
+  // checks are disallowed.
+  virtual Variable<WeeklyTimeIntervalVector>*
+  var_disallowed_time_intervals() = 0;
+
  protected:
   DevicePolicyProvider() {}
 
diff --git a/update_manager/enterprise_device_policy_impl.cc b/update_manager/enterprise_device_policy_impl.cc
index 94518a1..6f14b1f 100644
--- a/update_manager/enterprise_device_policy_impl.cc
+++ b/update_manager/enterprise_device_policy_impl.cc
@@ -55,6 +55,7 @@
       }
     }
 
+    // By default, result->rollback_allowed is false.
     if (kiosk_app_control_chrome_version) {
       // Get the required platform version from Chrome.
       const string* kiosk_required_platform_version_p =
@@ -66,11 +67,10 @@
       }
 
       result->target_version_prefix = *kiosk_required_platform_version_p;
-      LOG(INFO) << "Allow kiosk app to control Chrome version policy is set,"
-                << ", target version is "
-                << (kiosk_required_platform_version_p
-                        ? *kiosk_required_platform_version_p
-                        : std::string("latest"));
+      LOG(INFO) << "Allow kiosk app to control Chrome version policy is set, "
+                << "target version is " << result->target_version_prefix;
+      // TODO(hunyadym): Add support for allowing rollback using the manifest
+      // (if policy doesn't specify otherwise).
     } else {
       // Determine whether a target version prefix is dictated by policy.
       const string* target_version_prefix_p =
@@ -79,6 +79,36 @@
         result->target_version_prefix = *target_version_prefix_p;
     }
 
+    // Policy always overwrites whether rollback is allowed by the kiosk app
+    // manifest.
+    const RollbackToTargetVersion* rollback_to_target_version_p =
+        ec->GetValue(dp_provider->var_rollback_to_target_version());
+    if (rollback_to_target_version_p) {
+      switch (*rollback_to_target_version_p) {
+        case RollbackToTargetVersion::kUnspecified:
+          // We leave the default or the one specified by the kiosk app.
+          break;
+        case RollbackToTargetVersion::kDisabled:
+          LOG(INFO) << "Policy disables rollbacks.";
+          result->rollback_allowed = false;
+          break;
+        case RollbackToTargetVersion::kRollbackWithFullPowerwash:
+          LOG(INFO) << "Policy allows rollbacks.";
+          result->rollback_allowed = true;
+          break;
+        case RollbackToTargetVersion::kMaxValue:
+          NOTREACHED();
+          // Don't add a default case to let the compiler warn about newly
+          // added enum values which should be added here.
+      }
+    }
+
+    // Determine allowed milestones for rollback
+    const int* rollback_allowed_milestones_p =
+        ec->GetValue(dp_provider->var_rollback_allowed_milestones());
+    if (rollback_allowed_milestones_p)
+      result->rollback_allowed_milestones = *rollback_allowed_milestones_p;
+
     // Determine whether a target channel is dictated by policy.
     const bool* release_channel_delegated_p =
         ec->GetValue(dp_provider->var_release_channel_delegated());
diff --git a/update_manager/evaluation_context.h b/update_manager/evaluation_context.h
index df5816a..0bdbaec 100644
--- a/update_manager/evaluation_context.h
+++ b/update_manager/evaluation_context.h
@@ -114,7 +114,7 @@
   // there's no cached variable, this method returns false.
   //
   // Right before the passed closure is called the EvaluationContext is
-  // reseted, removing all the non-const cached values.
+  // reset, removing all the non-const cached values.
   bool RunOnValueChangeOrTimeout(base::Closure callback);
 
   // Returns a textual representation of the evaluation context,
diff --git a/update_manager/evaluation_context_unittest.cc b/update_manager/evaluation_context_unittest.cc
index 1e61db7..6a8475b 100644
--- a/update_manager/evaluation_context_unittest.cc
+++ b/update_manager/evaluation_context_unittest.cc
@@ -20,6 +20,7 @@
 #include <string>
 
 #include <base/bind.h>
+#include <base/bind_helpers.h>
 #include <brillo/message_loops/fake_message_loop.h>
 #include <brillo/message_loops/message_loop_utils.h>
 #include <gtest/gtest.h>
@@ -48,8 +49,6 @@
 
 namespace {
 
-void DoNothing() {}
-
 // Sets the value of the passed pointer to true.
 void SetTrue(bool* value) {
   *value = true;
@@ -207,7 +206,7 @@
   fake_const_var_.reset(new string("Hello world!"));
   EXPECT_EQ(*eval_ctx_->GetValue(&fake_const_var_), "Hello world!");
 
-  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
 }
 
 // Test that reevaluation occurs when an async variable it depends on changes.
@@ -277,11 +276,11 @@
   EXPECT_TRUE(value);
 
   // Ensure that we cannot reschedule an evaluation.
-  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
 
   // Ensure that we can reschedule an evaluation after resetting expiration.
   eval_ctx_->ResetExpiration();
-  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
 }
 
 // Test that we clear the events when destroying the EvaluationContext.
@@ -327,7 +326,7 @@
   fake_poll_var_.reset(new string("Polled value"));
   eval_ctx_->GetValue(&fake_async_var_);
   eval_ctx_->GetValue(&fake_poll_var_);
-  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
   // TearDown() checks for leaked observers on this async_variable, which means
   // that our object is still alive after removing its reference.
 }
@@ -420,7 +419,7 @@
 
   // The "false" from IsWallclockTimeGreaterThan means that's not that timestamp
   // yet, so this should schedule a callback for when that happens.
-  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
 }
 
 TEST_F(UmEvaluationContextTest,
@@ -430,7 +429,7 @@
 
   // The "false" from IsMonotonicTimeGreaterThan means that's not that timestamp
   // yet, so this should schedule a callback for when that happens.
-  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+  EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
 }
 
 TEST_F(UmEvaluationContextTest,
@@ -443,7 +442,7 @@
       fake_clock_.GetWallclockTime() - TimeDelta::FromSeconds(1)));
 
   // Callback should not be scheduled.
-  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
 }
 
 TEST_F(UmEvaluationContextTest,
@@ -456,7 +455,7 @@
       fake_clock_.GetMonotonicTime() - TimeDelta::FromSeconds(1)));
 
   // Callback should not be scheduled.
-  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(Bind(&DoNothing)));
+  EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
 }
 
 TEST_F(UmEvaluationContextTest, DumpContext) {
diff --git a/update_manager/fake_device_policy_provider.h b/update_manager/fake_device_policy_provider.h
index 9e4f5b7..d70e0c3 100644
--- a/update_manager/fake_device_policy_provider.h
+++ b/update_manager/fake_device_policy_provider.h
@@ -50,6 +50,15 @@
     return &var_target_version_prefix_;
   }
 
+  FakeVariable<RollbackToTargetVersion>* var_rollback_to_target_version()
+      override {
+    return &var_rollback_to_target_version_;
+  }
+
+  FakeVariable<int>* var_rollback_allowed_milestones() override {
+    return &var_rollback_allowed_milestones_;
+  }
+
   FakeVariable<base::TimeDelta>* var_scatter_factor() override {
     return &var_scatter_factor_;
   }
@@ -75,6 +84,15 @@
     return &var_allow_kiosk_app_control_chrome_version_;
   }
 
+  FakeVariable<std::string>* var_auto_launched_kiosk_app_id() override {
+    return &var_auto_launched_kiosk_app_id_;
+  }
+
+  FakeVariable<WeeklyTimeIntervalVector>* var_disallowed_time_intervals()
+      override {
+    return &var_disallowed_time_intervals_;
+  }
+
  private:
   FakeVariable<bool> var_device_policy_is_loaded_{
       "policy_is_loaded", kVariableModePoll};
@@ -86,6 +104,10 @@
       "update_disabled", kVariableModePoll};
   FakeVariable<std::string> var_target_version_prefix_{
       "target_version_prefix", kVariableModePoll};
+  FakeVariable<RollbackToTargetVersion> var_rollback_to_target_version_{
+      "rollback_to_target_version", kVariableModePoll};
+  FakeVariable<int> var_rollback_allowed_milestones_{
+      "rollback_allowed_milestones", kVariableModePoll};
   FakeVariable<base::TimeDelta> var_scatter_factor_{
       "scatter_factor", kVariableModePoll};
   FakeVariable<std::set<chromeos_update_engine::ConnectionType>>
@@ -97,6 +119,10 @@
   FakeVariable<bool> var_au_p2p_enabled_{"au_p2p_enabled", kVariableModePoll};
   FakeVariable<bool> var_allow_kiosk_app_control_chrome_version_{
       "allow_kiosk_app_control_chrome_version", kVariableModePoll};
+  FakeVariable<std::string> var_auto_launched_kiosk_app_id_{
+      "auto_launched_kiosk_app_id", kVariableModePoll};
+  FakeVariable<WeeklyTimeIntervalVector> var_disallowed_time_intervals_{
+      "disallowed_time_intervals", kVariableModePoll};
 
   DISALLOW_COPY_AND_ASSIGN(FakeDevicePolicyProvider);
 };
diff --git a/update_manager/fake_time_provider.h b/update_manager/fake_time_provider.h
index 2aea2e7..bd370d2 100644
--- a/update_manager/fake_time_provider.h
+++ b/update_manager/fake_time_provider.h
@@ -29,10 +29,12 @@
 
   FakeVariable<base::Time>* var_curr_date() override { return &var_curr_date_; }
   FakeVariable<int>* var_curr_hour() override { return &var_curr_hour_; }
+  FakeVariable<int>* var_curr_minute() override { return &var_curr_minute_; }
 
  private:
   FakeVariable<base::Time> var_curr_date_{"curr_date", kVariableModePoll};
   FakeVariable<int> var_curr_hour_{"curr_hour", kVariableModePoll};
+  FakeVariable<int> var_curr_minute_{"curr_minute", kVariableModePoll};
 
   DISALLOW_COPY_AND_ASSIGN(FakeTimeProvider);
 };
diff --git a/update_manager/interactive_update_policy_impl.cc b/update_manager/interactive_update_policy_impl.cc
index df7f17b..872dc5d 100644
--- a/update_manager/interactive_update_policy_impl.cc
+++ b/update_manager/interactive_update_policy_impl.cc
@@ -16,6 +16,9 @@
 
 #include "update_engine/update_manager/interactive_update_policy_impl.h"
 
+using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::InstallPlan;
+
 namespace chromeos_update_manager {
 
 // Check to see if an interactive update was requested.
@@ -24,21 +27,51 @@
     State* state,
     std::string* error,
     UpdateCheckParams* result) const {
-  UpdaterProvider* const updater_provider = state->updater_provider();
+  bool interactive;
+  if (CheckInteractiveUpdateRequested(
+          ec, state->updater_provider(), &interactive)) {
+    result->interactive = interactive;
+    LOG(INFO) << "Forced update signaled ("
+              << (interactive ? "interactive" : "periodic")
+              << "), allowing update check.";
+    return EvalStatus::kSucceeded;
+  }
+  return EvalStatus::kContinue;
+}
 
+EvalStatus InteractiveUpdatePolicyImpl::UpdateCanBeApplied(
+    EvaluationContext* ec,
+    State* state,
+    std::string* error,
+    ErrorCode* result,
+    InstallPlan* install_plan) const {
+  bool interactive;
+  if (CheckInteractiveUpdateRequested(
+          ec, state->updater_provider(), &interactive)) {
+    LOG(INFO) << "Forced update signaled ("
+              << (interactive ? "interactive" : "periodic")
+              << "), allowing update to be applied.";
+    *result = ErrorCode::kSuccess;
+    return EvalStatus::kSucceeded;
+  }
+  return EvalStatus::kContinue;
+}
+
+bool InteractiveUpdatePolicyImpl::CheckInteractiveUpdateRequested(
+    EvaluationContext* ec,
+    UpdaterProvider* const updater_provider,
+    bool* interactive_out) const {
   // First, check to see if an interactive update was requested.
   const UpdateRequestStatus* forced_update_requested_p =
       ec->GetValue(updater_provider->var_forced_update_requested());
   if (forced_update_requested_p != nullptr &&
       *forced_update_requested_p != UpdateRequestStatus::kNone) {
-    result->is_interactive =
-        (*forced_update_requested_p == UpdateRequestStatus::kInteractive);
-    LOG(INFO) << "Forced update signaled ("
-              << (result->is_interactive ? "interactive" : "periodic")
-              << "), allowing update check.";
-    return EvalStatus::kSucceeded;
+    if (interactive_out)
+      *interactive_out =
+          (*forced_update_requested_p == UpdateRequestStatus::kInteractive);
+    return true;
   }
-  return EvalStatus::kContinue;
+  return false;
 }
 
 }  // namespace chromeos_update_manager
diff --git a/update_manager/interactive_update_policy_impl.h b/update_manager/interactive_update_policy_impl.h
index a431456..3690cfb 100644
--- a/update_manager/interactive_update_policy_impl.h
+++ b/update_manager/interactive_update_policy_impl.h
@@ -19,6 +19,8 @@
 
 #include <string>
 
+#include "update_engine/common/error_code.h"
+#include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/update_manager/policy_utils.h"
 
 namespace chromeos_update_manager {
@@ -35,15 +37,30 @@
                                 std::string* error,
                                 UpdateCheckParams* result) const override;
 
+  EvalStatus UpdateCanBeApplied(
+      EvaluationContext* ec,
+      State* state,
+      std::string* error,
+      chromeos_update_engine::ErrorCode* result,
+      chromeos_update_engine::InstallPlan* install_plan) const override;
+
  protected:
   std::string PolicyName() const override {
     return "InteractiveUpdatePolicyImpl";
   }
 
  private:
+  // Checks whether a forced update was requested. If there is a forced update,
+  // return true and set |interactive_out| to true if the forced update is
+  // interactive, and false otherwise. If there are no forced updates, return
+  // true and don't modify |interactive_out|.
+  bool CheckInteractiveUpdateRequested(EvaluationContext* ec,
+                                       UpdaterProvider* const updater_provider,
+                                       bool* interactive_out) const;
+
   DISALLOW_COPY_AND_ASSIGN(InteractiveUpdatePolicyImpl);
 };
 
 }  // namespace chromeos_update_manager
 
-#endif  // UPDATE_ENGINE_UPDATE_MANAGER_OFFICIAL_BUILD_CHECK_POLICY_IMPL_H_
+#endif  // UPDATE_ENGINE_UPDATE_MANAGER_INTERACTIVE_UPDATE_POLICY_IMPL_H_
diff --git a/update_manager/policy.h b/update_manager/policy.h
index b60c4da..ee163b3 100644
--- a/update_manager/policy.h
+++ b/update_manager/policy.h
@@ -24,6 +24,7 @@
 #include "update_engine/common/error_code.h"
 #include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/update_manager/evaluation_context.h"
+#include "update_engine/update_manager/rollback_prefs.h"
 #include "update_engine/update_manager/state.h"
 
 namespace chromeos_update_manager {
@@ -47,11 +48,18 @@
   //
   // A target version prefix, if imposed by policy; otherwise, an empty string.
   std::string target_version_prefix;
+  // Specifies whether rollback images are allowed by device policy.
+  bool rollback_allowed;
+  // Specifies the number of Chrome milestones rollback should be allowed,
+  // starting from the stable version at any time. Value is -1 if unspecified
+  // (e.g. no device policy is available yet), in this case no version
+  // roll-forward should happen.
+  int rollback_allowed_milestones;
   // A target channel, if so imposed by policy; otherwise, an empty string.
   std::string target_channel;
 
   // Whether the allowed update is interactive (user-initiated) or periodic.
-  bool is_interactive;
+  bool interactive;
 };
 
 // Input arguments to UpdateCanStart.
@@ -64,7 +72,7 @@
   //
   // Whether the current update check is an interactive one. The caller should
   // feed the value returned by the preceding call to UpdateCheckAllowed().
-  bool is_interactive;
+  bool interactive;
   // Whether it is a delta payload.
   bool is_delta_payload;
   // Wallclock time when payload was first (consecutively) offered by Omaha.
@@ -118,7 +126,7 @@
 
   // Information pertaining to update scattering.
   //
-  // The currently knwon (persisted) scattering wallclock-based wait period and
+  // The currently known (persisted) scattering wallclock-based wait period and
   // update check threshold; zero if none.
   base::TimeDelta scatter_wait_period;
   int scatter_check_threshold;
diff --git a/update_manager/policy_test_utils.cc b/update_manager/policy_test_utils.cc
index fbfcb82..5491e00 100644
--- a/update_manager/policy_test_utils.cc
+++ b/update_manager/policy_test_utils.cc
@@ -48,6 +48,17 @@
   fake_clock_.SetWallclockTime(Time::FromInternalValue(12345678901234L));
 }
 
+void UmPolicyTestBase::SetUpDefaultTimeProvider() {
+  Time current_time = fake_clock_.GetWallclockTime();
+  base::Time::Exploded exploded;
+  current_time.LocalExplode(&exploded);
+  fake_state_.time_provider()->var_curr_hour()->reset(new int(exploded.hour));
+  fake_state_.time_provider()->var_curr_minute()->reset(
+      new int(exploded.minute));
+  fake_state_.time_provider()->var_curr_date()->reset(
+      new Time(current_time.LocalMidnight()));
+}
+
 void UmPolicyTestBase::SetUpDefaultState() {
   fake_state_.updater_provider()->var_updater_started_time()->reset(
       new Time(fake_clock_.GetWallclockTime()));
@@ -73,7 +84,7 @@
   // This is a non-interactive check returning a delta payload, seen for the
   // first time (|first_seen_period| ago). Clearly, there were no failed
   // attempts so far.
-  update_state.is_interactive = false;
+  update_state.interactive = false;
   update_state.is_delta_payload = false;
   update_state.first_seen = first_seen_time;
   update_state.num_checks = 1;
diff --git a/update_manager/policy_test_utils.h b/update_manager/policy_test_utils.h
index 5b93f7b..eb5758f 100644
--- a/update_manager/policy_test_utils.h
+++ b/update_manager/policy_test_utils.h
@@ -42,6 +42,9 @@
   // Sets the clock to fixed values.
   virtual void SetUpDefaultClock();
 
+  // Sets the fake time provider to the time given by the fake clock.
+  virtual void SetUpDefaultTimeProvider();
+
   // Sets up the default state in fake_state_.  override to add Policy-specific
   // items, but only after calling this class's implementation.
   virtual void SetUpDefaultState();
diff --git a/update_manager/real_device_policy_provider.cc b/update_manager/real_device_policy_provider.cc
index d9880c3..e0872bb 100644
--- a/update_manager/real_device_policy_provider.cc
+++ b/update_manager/real_device_policy_provider.cc
@@ -18,6 +18,8 @@
 
 #include <stdint.h>
 
+#include <vector>
+
 #include <base/location.h>
 #include <base/logging.h>
 #include <base/time/time.h>
@@ -33,6 +35,7 @@
 using policy::DevicePolicy;
 using std::set;
 using std::string;
+using std::vector;
 
 namespace {
 
@@ -126,6 +129,23 @@
   }
 }
 
+bool RealDevicePolicyProvider::ConvertRollbackToTargetVersion(
+    RollbackToTargetVersion* rollback_to_target_version) const {
+  int rollback_to_target_version_int;
+  if (!policy_provider_->GetDevicePolicy().GetRollbackToTargetVersion(
+          &rollback_to_target_version_int)) {
+    return false;
+  }
+  if (rollback_to_target_version_int < 0 ||
+      rollback_to_target_version_int >=
+          static_cast<int>(RollbackToTargetVersion::kMaxValue)) {
+    return false;
+  }
+  *rollback_to_target_version =
+      static_cast<RollbackToTargetVersion>(rollback_to_target_version_int);
+  return true;
+}
+
 bool RealDevicePolicyProvider::ConvertAllowedConnectionTypesForUpdate(
       set<ConnectionType>* allowed_types) const {
   set<string> allowed_types_str;
@@ -162,6 +182,23 @@
   return true;
 }
 
+bool RealDevicePolicyProvider::ConvertDisallowedTimeIntervals(
+    WeeklyTimeIntervalVector* disallowed_intervals_out) const {
+  vector<DevicePolicy::WeeklyTimeInterval> parsed_intervals;
+  if (!policy_provider_->GetDevicePolicy().GetDisallowedTimeIntervals(
+          &parsed_intervals)) {
+    return false;
+  }
+
+  disallowed_intervals_out->clear();
+  for (const auto& interval : parsed_intervals) {
+    disallowed_intervals_out->emplace_back(
+        WeeklyTime(interval.start_day_of_week, interval.start_time),
+        WeeklyTime(interval.end_day_of_week, interval.end_time));
+  }
+  return true;
+}
+
 void RealDevicePolicyProvider::RefreshDevicePolicy() {
   if (!policy_provider_->Reload()) {
     LOG(INFO) << "No device policies/settings present.";
@@ -176,6 +213,14 @@
   UpdateVariable(&var_update_disabled_, &DevicePolicy::GetUpdateDisabled);
   UpdateVariable(&var_target_version_prefix_,
                  &DevicePolicy::GetTargetVersionPrefix);
+  UpdateVariable(&var_rollback_to_target_version_,
+                 &RealDevicePolicyProvider::ConvertRollbackToTargetVersion);
+  UpdateVariable(&var_rollback_allowed_milestones_,
+                 &DevicePolicy::GetRollbackAllowedMilestones);
+  if (policy_provider_->IsConsumerDevice()) {
+    // For consumer devices (which won't ever have policy), set value to 0.
+    var_rollback_allowed_milestones_.SetValue(0);
+  }
   UpdateVariable(&var_scatter_factor_,
                  &RealDevicePolicyProvider::ConvertScatterFactor);
   UpdateVariable(
@@ -187,6 +232,10 @@
   UpdateVariable(&var_au_p2p_enabled_, &DevicePolicy::GetAuP2PEnabled);
   UpdateVariable(&var_allow_kiosk_app_control_chrome_version_,
                  &DevicePolicy::GetAllowKioskAppControlChromeVersion);
+  UpdateVariable(&var_auto_launched_kiosk_app_id_,
+                 &DevicePolicy::GetAutoLaunchedKioskAppId);
+  UpdateVariable(&var_disallowed_time_intervals_,
+                 &RealDevicePolicyProvider::ConvertDisallowedTimeIntervals);
 }
 
 }  // namespace chromeos_update_manager
diff --git a/update_manager/real_device_policy_provider.h b/update_manager/real_device_policy_provider.h
index 5b5ee58..d999d81 100644
--- a/update_manager/real_device_policy_provider.h
+++ b/update_manager/real_device_policy_provider.h
@@ -20,6 +20,7 @@
 #include <memory>
 #include <set>
 #include <string>
+#include <utility>
 
 #include <brillo/message_loops/message_loop.h>
 #include <gtest/gtest_prod.h>  // for FRIEND_TEST
@@ -71,6 +72,14 @@
     return &var_target_version_prefix_;
   }
 
+  Variable<RollbackToTargetVersion>* var_rollback_to_target_version() override {
+    return &var_rollback_to_target_version_;
+  }
+
+  Variable<int>* var_rollback_allowed_milestones() override {
+    return &var_rollback_allowed_milestones_;
+  }
+
   Variable<base::TimeDelta>* var_scatter_factor() override {
     return &var_scatter_factor_;
   }
@@ -96,6 +105,14 @@
     return &var_allow_kiosk_app_control_chrome_version_;
   }
 
+  Variable<std::string>* var_auto_launched_kiosk_app_id() override {
+    return &var_auto_launched_kiosk_app_id_;
+  }
+
+  Variable<WeeklyTimeIntervalVector>* var_disallowed_time_intervals() override {
+    return &var_disallowed_time_intervals_;
+  }
+
  private:
   FRIEND_TEST(UmRealDevicePolicyProviderTest, RefreshScheduledTest);
   FRIEND_TEST(UmRealDevicePolicyProviderTest, NonExistentDevicePolicyReloaded);
@@ -130,6 +147,11 @@
       AsyncCopyVariable<T>* var,
       bool (RealDevicePolicyProvider::*getter_method)(T*) const);
 
+  // Wrapper for DevicePolicy::GetRollbackToTargetVersion() that converts the
+  // result to RollbackToTargetVersion.
+  bool ConvertRollbackToTargetVersion(
+      RollbackToTargetVersion* rollback_to_target_version) const;
+
   // Wrapper for DevicePolicy::GetScatterFactorInSeconds() that converts the
   // result to a base::TimeDelta. It returns the same value as
   // GetScatterFactorInSeconds().
@@ -140,6 +162,12 @@
   bool ConvertAllowedConnectionTypesForUpdate(
       std::set<chromeos_update_engine::ConnectionType>* allowed_types) const;
 
+  // Wrapper for DevicePolicy::GetUpdateTimeRestrictions() that converts
+  // the DevicePolicy::WeeklyTimeInterval structs to WeeklyTimeInterval objects,
+  // which offer more functionality.
+  bool ConvertDisallowedTimeIntervals(
+      WeeklyTimeIntervalVector* disallowed_intervals_out) const;
+
   // Used for fetching information about the device policy.
   policy::PolicyProvider* policy_provider_;
 
@@ -164,6 +192,10 @@
   AsyncCopyVariable<bool> var_update_disabled_{"update_disabled"};
   AsyncCopyVariable<std::string> var_target_version_prefix_{
       "target_version_prefix"};
+  AsyncCopyVariable<RollbackToTargetVersion> var_rollback_to_target_version_{
+      "rollback_to_target_version"};
+  AsyncCopyVariable<int> var_rollback_allowed_milestones_{
+      "rollback_allowed_milestones"};
   AsyncCopyVariable<base::TimeDelta> var_scatter_factor_{"scatter_factor"};
   AsyncCopyVariable<std::set<chromeos_update_engine::ConnectionType>>
       var_allowed_connection_types_for_update_{
@@ -173,6 +205,10 @@
   AsyncCopyVariable<bool> var_au_p2p_enabled_{"au_p2p_enabled"};
   AsyncCopyVariable<bool> var_allow_kiosk_app_control_chrome_version_{
       "allow_kiosk_app_control_chrome_version"};
+  AsyncCopyVariable<WeeklyTimeIntervalVector> var_disallowed_time_intervals_{
+      "update_time_restrictions"};
+  AsyncCopyVariable<std::string> var_auto_launched_kiosk_app_id_{
+      "auto_launched_kiosk_app_id"};
 
   DISALLOW_COPY_AND_ASSIGN(RealDevicePolicyProvider);
 };
diff --git a/update_manager/real_device_policy_provider_unittest.cc b/update_manager/real_device_policy_provider_unittest.cc
index 167cbd9..32e273d 100644
--- a/update_manager/real_device_policy_provider_unittest.cc
+++ b/update_manager/real_device_policy_provider_unittest.cc
@@ -17,6 +17,7 @@
 #include "update_engine/update_manager/real_device_policy_provider.h"
 
 #include <memory>
+#include <vector>
 
 #include <base/memory/ptr_util.h>
 #include <brillo/message_loops/fake_message_loop.h>
@@ -40,12 +41,14 @@
 using base::TimeDelta;
 using brillo::MessageLoop;
 using chromeos_update_engine::ConnectionType;
+using policy::DevicePolicy;
 #if USE_DBUS
 using chromeos_update_engine::dbus_test_utils::MockSignalHandler;
 #endif  // USE_DBUS
 using std::set;
 using std::string;
 using std::unique_ptr;
+using std::vector;
 using testing::DoAll;
 using testing::Mock;
 using testing::Return;
@@ -178,6 +181,10 @@
   UmTestUtils::ExpectVariableNotSet(provider_->var_release_channel_delegated());
   UmTestUtils::ExpectVariableNotSet(provider_->var_update_disabled());
   UmTestUtils::ExpectVariableNotSet(provider_->var_target_version_prefix());
+  UmTestUtils::ExpectVariableNotSet(
+      provider_->var_rollback_to_target_version());
+  UmTestUtils::ExpectVariableNotSet(
+      provider_->var_rollback_allowed_milestones());
   UmTestUtils::ExpectVariableNotSet(provider_->var_scatter_factor());
   UmTestUtils::ExpectVariableNotSet(
       provider_->var_allowed_connection_types_for_update());
@@ -186,6 +193,9 @@
   UmTestUtils::ExpectVariableNotSet(provider_->var_au_p2p_enabled());
   UmTestUtils::ExpectVariableNotSet(
       provider_->var_allow_kiosk_app_control_chrome_version());
+  UmTestUtils::ExpectVariableNotSet(
+      provider_->var_auto_launched_kiosk_app_id());
+  UmTestUtils::ExpectVariableNotSet(provider_->var_disallowed_time_intervals());
 }
 
 TEST_F(UmRealDevicePolicyProviderTest, ValuesUpdated) {
@@ -203,6 +213,8 @@
       .WillOnce(Return(false));
   EXPECT_CALL(mock_device_policy_, GetAllowKioskAppControlChromeVersion(_))
       .WillOnce(DoAll(SetArgPointee<0>(true), Return(true)));
+  EXPECT_CALL(mock_device_policy_, GetAutoLaunchedKioskAppId(_))
+      .WillOnce(DoAll(SetArgPointee<0>(string("myapp")), Return(true)));
 
   provider_->RefreshDevicePolicy();
 
@@ -216,6 +228,63 @@
       provider_->var_allowed_connection_types_for_update());
   UmTestUtils::ExpectVariableHasValue(
       true, provider_->var_allow_kiosk_app_control_chrome_version());
+  UmTestUtils::ExpectVariableHasValue(
+      string("myapp"), provider_->var_auto_launched_kiosk_app_id());
+}
+
+TEST_F(UmRealDevicePolicyProviderTest, RollbackToTargetVersionConverted) {
+  SetUpExistentDevicePolicy();
+  EXPECT_CALL(mock_device_policy_, GetRollbackToTargetVersion(_))
+#if USE_DBUS
+      .Times(2)
+#else
+      .Times(1)
+#endif  // USE_DBUS
+      .WillRepeatedly(DoAll(SetArgPointee<0>(2), Return(true)));
+  EXPECT_TRUE(provider_->Init());
+  loop_.RunOnce(false);
+
+  UmTestUtils::ExpectVariableHasValue(
+      RollbackToTargetVersion::kRollbackWithFullPowerwash,
+      provider_->var_rollback_to_target_version());
+}
+
+TEST_F(UmRealDevicePolicyProviderTest, RollbackAllowedMilestonesOobe) {
+  SetUpNonExistentDevicePolicy();
+  EXPECT_CALL(mock_device_policy_, GetRollbackAllowedMilestones(_)).Times(0);
+  ON_CALL(mock_policy_provider_, IsConsumerDevice())
+      .WillByDefault(Return(false));
+  EXPECT_TRUE(provider_->Init());
+  loop_.RunOnce(false);
+
+  UmTestUtils::ExpectVariableNotSet(
+      provider_->var_rollback_allowed_milestones());
+}
+
+TEST_F(UmRealDevicePolicyProviderTest, RollbackAllowedMilestonesConsumer) {
+  SetUpNonExistentDevicePolicy();
+  EXPECT_CALL(mock_device_policy_, GetRollbackAllowedMilestones(_)).Times(0);
+  ON_CALL(mock_policy_provider_, IsConsumerDevice())
+      .WillByDefault(Return(true));
+  EXPECT_TRUE(provider_->Init());
+  loop_.RunOnce(false);
+
+  UmTestUtils::ExpectVariableHasValue(
+      0, provider_->var_rollback_allowed_milestones());
+}
+
+TEST_F(UmRealDevicePolicyProviderTest,
+       RollbackAllowedMilestonesEnterprisePolicySet) {
+  SetUpExistentDevicePolicy();
+  ON_CALL(mock_device_policy_, GetRollbackAllowedMilestones(_))
+      .WillByDefault(DoAll(SetArgPointee<0>(2), Return(true)));
+  ON_CALL(mock_policy_provider_, IsConsumerDevice())
+      .WillByDefault(Return(false));
+  EXPECT_TRUE(provider_->Init());
+  loop_.RunOnce(false);
+
+  UmTestUtils::ExpectVariableHasValue(
+      2, provider_->var_rollback_allowed_milestones());
 }
 
 TEST_F(UmRealDevicePolicyProviderTest, ScatterFactorConverted) {
@@ -268,4 +337,25 @@
       provider_->var_allowed_connection_types_for_update());
 }
 
+TEST_F(UmRealDevicePolicyProviderTest, DisallowedIntervalsConverted) {
+  SetUpExistentDevicePolicy();
+
+  vector<DevicePolicy::WeeklyTimeInterval> intervals = {
+      {5, TimeDelta::FromHours(5), 6, TimeDelta::FromHours(8)},
+      {1, TimeDelta::FromHours(1), 3, TimeDelta::FromHours(10)}};
+
+  EXPECT_CALL(mock_device_policy_, GetDisallowedTimeIntervals(_))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(intervals), Return(true)));
+  EXPECT_TRUE(provider_->Init());
+  loop_.RunOnce(false);
+
+  UmTestUtils::ExpectVariableHasValue(
+      WeeklyTimeIntervalVector{
+          WeeklyTimeInterval(WeeklyTime(5, TimeDelta::FromHours(5)),
+                             WeeklyTime(6, TimeDelta::FromHours(8))),
+          WeeklyTimeInterval(WeeklyTime(1, TimeDelta::FromHours(1)),
+                             WeeklyTime(3, TimeDelta::FromHours(10)))},
+      provider_->var_disallowed_time_intervals());
+}
+
 }  // namespace chromeos_update_manager
diff --git a/update_manager/real_system_provider.cc b/update_manager/real_system_provider.cc
index fdf7e86..53e9ab3 100644
--- a/update_manager/real_system_provider.cc
+++ b/update_manager/real_system_provider.cc
@@ -21,7 +21,7 @@
 #include <base/logging.h>
 #include <base/time/time.h>
 #if USE_CHROME_KIOSK_APP
-#include <libcros/dbus-proxies.h>
+#include <kiosk-app/dbus-proxies.h>
 #endif  // USE_CHROME_KIOSK_APP
 
 #include "update_engine/common/utils.h"
@@ -126,8 +126,8 @@
     string* required_platform_version) {
 #if USE_CHROME_KIOSK_APP
   brillo::ErrorPtr error;
-  if (!libcros_proxy_->GetKioskAppRequiredPlatformVersion(
-          required_platform_version, &error)) {
+  if (!kiosk_app_proxy_->GetRequiredPlatformVersion(required_platform_version,
+                                                    &error)) {
     LOG(WARNING) << "Failed to get kiosk required platform version";
     required_platform_version->clear();
     return false;
diff --git a/update_manager/real_system_provider.h b/update_manager/real_system_provider.h
index 80a8615..9d71d0d 100644
--- a/update_manager/real_system_provider.h
+++ b/update_manager/real_system_provider.h
@@ -26,7 +26,7 @@
 
 namespace org {
 namespace chromium {
-class LibCrosServiceInterfaceProxyInterface;
+class KioskAppServiceInterfaceProxyInterface;
 }  // namespace chromium
 }  // namespace org
 
@@ -38,11 +38,12 @@
   RealSystemProvider(
       chromeos_update_engine::HardwareInterface* hardware,
       chromeos_update_engine::BootControlInterface* boot_control,
-      org::chromium::LibCrosServiceInterfaceProxyInterface* libcros_proxy)
+      org::chromium::KioskAppServiceInterfaceProxyInterface* kiosk_app_proxy)
       : hardware_(hardware),
 #if USE_CHROME_KIOSK_APP
         boot_control_(boot_control),
-        libcros_proxy_(libcros_proxy) {}
+        kiosk_app_proxy_(kiosk_app_proxy) {
+  }
 #else
         boot_control_(boot_control) {}
 #endif  // USE_CHROME_KIOSK_APP
@@ -83,7 +84,7 @@
   chromeos_update_engine::HardwareInterface* const hardware_;
   chromeos_update_engine::BootControlInterface* const boot_control_;
 #if USE_CHROME_KIOSK_APP
-  org::chromium::LibCrosServiceInterfaceProxyInterface* const libcros_proxy_;
+  org::chromium::KioskAppServiceInterfaceProxyInterface* const kiosk_app_proxy_;
 #endif  // USE_CHROME_KIOSK_APP
 
   DISALLOW_COPY_AND_ASSIGN(RealSystemProvider);
diff --git a/update_manager/real_system_provider_unittest.cc b/update_manager/real_system_provider_unittest.cc
index 103a35f..4e4da67 100644
--- a/update_manager/real_system_provider_unittest.cc
+++ b/update_manager/real_system_provider_unittest.cc
@@ -26,10 +26,10 @@
 #include "update_engine/common/fake_hardware.h"
 #include "update_engine/update_manager/umtest_utils.h"
 #if USE_CHROME_KIOSK_APP
-#include "libcros/dbus-proxies.h"
-#include "libcros/dbus-proxy-mocks.h"
+#include "kiosk-app/dbus-proxies.h"
+#include "kiosk-app/dbus-proxy-mocks.h"
 
-using org::chromium::LibCrosServiceInterfaceProxyMock;
+using org::chromium::KioskAppServiceInterfaceProxyMock;
 #endif  // USE_CHROME_KIOSK_APP
 using std::unique_ptr;
 using testing::_;
@@ -49,14 +49,13 @@
  protected:
   void SetUp() override {
 #if USE_CHROME_KIOSK_APP
-    libcros_proxy_mock_.reset(new LibCrosServiceInterfaceProxyMock());
-    ON_CALL(*libcros_proxy_mock_,
-            GetKioskAppRequiredPlatformVersion(_, _, _))
+    kiosk_app_proxy_mock_.reset(new KioskAppServiceInterfaceProxyMock());
+    ON_CALL(*kiosk_app_proxy_mock_, GetRequiredPlatformVersion(_, _, _))
         .WillByDefault(
             DoAll(SetArgPointee<0>(kRequiredPlatformVersion), Return(true)));
 
     provider_.reset(new RealSystemProvider(
-        &fake_hardware_, &fake_boot_control_, libcros_proxy_mock_.get()));
+        &fake_hardware_, &fake_boot_control_, kiosk_app_proxy_mock_.get()));
 #else
     provider_.reset(
         new RealSystemProvider(&fake_hardware_, &fake_boot_control_, nullptr));
@@ -69,7 +68,7 @@
   unique_ptr<RealSystemProvider> provider_;
 
 #if USE_CHROME_KIOSK_APP
-  unique_ptr<LibCrosServiceInterfaceProxyMock> libcros_proxy_mock_;
+  unique_ptr<KioskAppServiceInterfaceProxyMock> kiosk_app_proxy_mock_;
 #endif  // USE_CHROME_KIOSK_APP
 };
 
@@ -98,8 +97,7 @@
 }
 
 TEST_F(UmRealSystemProviderTest, KioskRequiredPlatformVersionFailure) {
-  EXPECT_CALL(*libcros_proxy_mock_,
-              GetKioskAppRequiredPlatformVersion(_, _, _))
+  EXPECT_CALL(*kiosk_app_proxy_mock_, GetRequiredPlatformVersion(_, _, _))
       .WillOnce(Return(false));
 
   UmTestUtils::ExpectVariableNotSet(
@@ -108,15 +106,13 @@
 
 TEST_F(UmRealSystemProviderTest,
        KioskRequiredPlatformVersionRecoveryFromFailure) {
-  EXPECT_CALL(*libcros_proxy_mock_,
-              GetKioskAppRequiredPlatformVersion(_, _, _))
+  EXPECT_CALL(*kiosk_app_proxy_mock_, GetRequiredPlatformVersion(_, _, _))
       .WillOnce(Return(false));
   UmTestUtils::ExpectVariableNotSet(
       provider_->var_kiosk_required_platform_version());
-  testing::Mock::VerifyAndClearExpectations(libcros_proxy_mock_.get());
+  testing::Mock::VerifyAndClearExpectations(kiosk_app_proxy_mock_.get());
 
-  EXPECT_CALL(*libcros_proxy_mock_,
-              GetKioskAppRequiredPlatformVersion(_, _, _))
+  EXPECT_CALL(*kiosk_app_proxy_mock_, GetRequiredPlatformVersion(_, _, _))
       .WillOnce(
           DoAll(SetArgPointee<0>(kRequiredPlatformVersion), Return(true)));
   UmTestUtils::ExpectVariableHasValue(
diff --git a/update_manager/real_time_provider.cc b/update_manager/real_time_provider.cc
index db26816..baa8ae3 100644
--- a/update_manager/real_time_provider.cc
+++ b/update_manager/real_time_provider.cc
@@ -77,9 +77,28 @@
   DISALLOW_COPY_AND_ASSIGN(CurrHourVariable);
 };
 
+class CurrMinuteVariable : public Variable<int> {
+ public:
+  CurrMinuteVariable(const string& name, ClockInterface* clock)
+      : Variable<int>(name, TimeDelta::FromSeconds(15)), clock_(clock) {}
+
+ protected:
+  virtual const int* GetValue(TimeDelta /* timeout */, string* /* errmsg */) {
+    Time::Exploded exploded;
+    clock_->GetWallclockTime().LocalExplode(&exploded);
+    return new int(exploded.minute);
+  }
+
+ private:
+  ClockInterface* clock_;
+
+  DISALLOW_COPY_AND_ASSIGN(CurrMinuteVariable);
+};
+
 bool RealTimeProvider::Init() {
   var_curr_date_.reset(new CurrDateVariable("curr_date", clock_));
   var_curr_hour_.reset(new CurrHourVariable("curr_hour", clock_));
+  var_curr_minute_.reset(new CurrMinuteVariable("curr_minute", clock_));
   return true;
 }
 
diff --git a/update_manager/real_time_provider.h b/update_manager/real_time_provider.h
index e7cae94..989cefb 100644
--- a/update_manager/real_time_provider.h
+++ b/update_manager/real_time_provider.h
@@ -43,12 +43,15 @@
     return var_curr_hour_.get();
   }
 
+  Variable<int>* var_curr_minute() override { return var_curr_minute_.get(); }
+
  private:
   // A clock abstraction (fakeable).
   chromeos_update_engine::ClockInterface* const clock_;
 
   std::unique_ptr<Variable<base::Time>> var_curr_date_;
   std::unique_ptr<Variable<int>> var_curr_hour_;
+  std::unique_ptr<Variable<int>> var_curr_minute_;
 
   DISALLOW_COPY_AND_ASSIGN(RealTimeProvider);
 };
diff --git a/update_manager/real_time_provider_unittest.cc b/update_manager/real_time_provider_unittest.cc
index f8db30b..ce2a718 100644
--- a/update_manager/real_time_provider_unittest.cc
+++ b/update_manager/real_time_provider_unittest.cc
@@ -84,4 +84,13 @@
                                       provider_->var_curr_hour());
 }
 
+TEST_F(UmRealTimeProviderTest, CurrMinuteValid) {
+  const Time now = CurrTime();
+  Time::Exploded expected;
+  now.LocalExplode(&expected);
+  fake_clock_.SetWallclockTime(now);
+  UmTestUtils::ExpectVariableHasValue(expected.minute,
+                                      provider_->var_curr_minute());
+}
+
 }  // namespace chromeos_update_manager
diff --git a/update_manager/real_updater_provider.cc b/update_manager/real_updater_provider.cc
index 050bd42..094e79c 100644
--- a/update_manager/real_updater_provider.cc
+++ b/update_manager/real_updater_provider.cc
@@ -401,11 +401,11 @@
     return new UpdateRequestStatus(update_request_status_);
   }
 
-  void Reset(bool forced_update_requested, bool is_interactive) {
+  void Reset(bool forced_update_requested, bool interactive) {
     UpdateRequestStatus new_value = UpdateRequestStatus::kNone;
     if (forced_update_requested)
-      new_value = (is_interactive ? UpdateRequestStatus::kInteractive :
-                   UpdateRequestStatus::kPeriodic);
+      new_value = (interactive ? UpdateRequestStatus::kInteractive
+                               : UpdateRequestStatus::kPeriodic);
     if (update_request_status_ != new_value) {
       update_request_status_ = new_value;
       NotifyValueChanged();
diff --git a/update_manager/rollback_prefs.h b/update_manager/rollback_prefs.h
new file mode 100644
index 0000000..1783eb0
--- /dev/null
+++ b/update_manager/rollback_prefs.h
@@ -0,0 +1,39 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_ROLLBACK_PREFS_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_ROLLBACK_PREFS_H_
+
+namespace chromeos_update_manager {
+
+// Value used to represent that kernel key versions can always roll-forward.
+// This is the maximum value of a kernel key version.
+constexpr int kRollforwardInfinity = 0xfffffffe;
+
+// Whether the device should roll back to the target version, and if yes, which
+// type of rollback should it do. Matches chrome_device_policy.proto's
+// AutoUpdateSettingsProto::RollbackToTargetVersion.
+enum class RollbackToTargetVersion {
+  kUnspecified = 0,
+  kDisabled = 1,
+  kRollbackWithFullPowerwash = 2,
+  // This value must be the last entry.
+  kMaxValue = 3
+};
+
+}  // namespace chromeos_update_manager
+
+#endif  // UPDATE_ENGINE_UPDATE_MANAGER_ROLLBACK_PREFS_H_
diff --git a/update_manager/staging_utils.cc b/update_manager/staging_utils.cc
new file mode 100644
index 0000000..4835ab2
--- /dev/null
+++ b/update_manager/staging_utils.cc
@@ -0,0 +1,142 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/staging_utils.h"
+
+#include <utility>
+#include <vector>
+
+#include <base/logging.h>
+#include <base/rand_util.h>
+#include <base/time/time.h>
+#include <policy/device_policy.h>
+
+#include "update_engine/common/constants.h"
+#include "update_engine/common/hardware_interface.h"
+#include "update_engine/common/prefs_interface.h"
+#include "update_engine/system_state.h"
+
+using base::TimeDelta;
+using chromeos_update_engine::kPrefsWallClockStagingWaitPeriod;
+using chromeos_update_engine::PrefsInterface;
+using chromeos_update_engine::SystemState;
+using policy::DevicePolicy;
+
+namespace chromeos_update_manager {
+
+int GetStagingSchedule(const DevicePolicy* device_policy,
+                       StagingSchedule* staging_schedule_out) {
+  StagingSchedule staging_schedule;
+  if (!device_policy->GetDeviceUpdateStagingSchedule(&staging_schedule) ||
+      staging_schedule.empty()) {
+    return 0;
+  }
+
+  // Last percentage of the schedule should be 100.
+  if (staging_schedule.back().percentage != 100) {
+    LOG(ERROR) << "Last percentage of the schedule is not 100, it's: "
+               << staging_schedule.back().percentage;
+    return 0;
+  }
+
+  int previous_days = 0;
+  int previous_percentage = -1;
+  // Ensure that the schedule has a monotonically increasing set of percentages
+  // and that days are also monotonically increasing.
+  for (const auto& staging_pair : staging_schedule) {
+    int days = staging_pair.days;
+    if (previous_days >= days) {
+      LOG(ERROR) << "Days in staging schedule are not monotonically "
+                 << "increasing. Previous value: " << previous_days
+                 << " Current value: " << days;
+      return 0;
+    }
+    previous_days = days;
+    int percentage = staging_pair.percentage;
+    if (previous_percentage >= percentage) {
+      LOG(ERROR) << "Percentages in staging schedule are not monotonically "
+                 << "increasing. Previous value: " << previous_percentage
+                 << " Current value: " << percentage;
+      return 0;
+    }
+    previous_percentage = percentage;
+  }
+  // Modify staging schedule only if the schedule in the device policy is valid.
+  if (staging_schedule_out)
+    *staging_schedule_out = std::move(staging_schedule);
+
+  return previous_days;
+}
+
+int CalculateWaitTimeInDaysFromSchedule(
+    const StagingSchedule& staging_schedule) {
+  int prev_days = 0;
+  int percentage_position = base::RandInt(1, 100);
+  for (const auto& staging_pair : staging_schedule) {
+    int days = staging_pair.days;
+    if (percentage_position <= staging_pair.percentage) {
+      // Scatter between the start of the range and the end.
+      return prev_days + base::RandInt(1, days - prev_days);
+    }
+    prev_days = days;
+  }
+  // Something went wrong.
+  NOTREACHED();
+  return 0;
+}
+
+StagingCase CalculateStagingCase(const DevicePolicy* device_policy,
+                                 PrefsInterface* prefs,
+                                 TimeDelta* staging_wait_time,
+                                 StagingSchedule* staging_schedule) {
+  // Check that the schedule in the device policy is correct.
+  StagingSchedule new_staging_schedule;
+  int max_days = GetStagingSchedule(device_policy, &new_staging_schedule);
+  if (max_days == 0)
+    return StagingCase::kOff;
+
+  // Calculate the new wait time.
+  TimeDelta new_staging_wait_time = TimeDelta::FromDays(
+      CalculateWaitTimeInDaysFromSchedule(new_staging_schedule));
+  DCHECK_GT(new_staging_wait_time.InSeconds(), 0);
+  if (staging_wait_time->InSeconds() > 0) {
+    // If there hasn't been any changes to the schedule and there is a value
+    // set, don't change the waiting time.
+    if (new_staging_schedule == *staging_schedule) {
+      return StagingCase::kNoAction;
+    }
+    // Otherwise, update the schedule and wait time.
+    *staging_wait_time = new_staging_wait_time;
+    *staging_schedule = std::move(new_staging_schedule);
+    return StagingCase::kNoSavedValue;
+  }
+  // Getting this means the schedule changed, update the old schedule.
+  *staging_schedule = std::move(new_staging_schedule);
+
+  int64_t wait_period_in_days;
+  // There exists a persisted value that is valid. That is, it's smaller than
+  // the maximum amount of days of staging set by the user.
+  if (prefs->GetInt64(kPrefsWallClockStagingWaitPeriod, &wait_period_in_days) &&
+      wait_period_in_days > 0 && wait_period_in_days <= max_days) {
+    *staging_wait_time = TimeDelta::FromDays(wait_period_in_days);
+    return StagingCase::kSetStagingFromPref;
+  }
+
+  *staging_wait_time = new_staging_wait_time;
+  return StagingCase::kNoSavedValue;
+}
+
+}  // namespace chromeos_update_manager
diff --git a/update_manager/staging_utils.h b/update_manager/staging_utils.h
new file mode 100644
index 0000000..e91bfeb
--- /dev/null
+++ b/update_manager/staging_utils.h
@@ -0,0 +1,71 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_STAGING_UTILS_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_STAGING_UTILS_H_
+
+#include <utility>
+#include <vector>
+
+#include <base/time/time.h>
+#include <policy/device_policy.h>
+
+#include "update_engine/common/prefs_interface.h"
+
+namespace chromeos_update_manager {
+
+using StagingSchedule = std::vector<policy::DevicePolicy::DayPercentagePair>;
+
+// Possible cases that staging might run into based on the inputs.
+enum class StagingCase {
+  // Staging is off, remove the persisted value.
+  kOff,
+  // Staging is enabled, but there is no valid persisted value, saved value or
+  // the value of the schedule has changed.
+  kNoSavedValue,
+  // Staging is enabled, and there is a valid persisted value.
+  kSetStagingFromPref,
+  // Staging is enabled, and there have been no changes to the schedule.
+  kNoAction
+};
+
+// Calculate the bucket in which the device belongs based on a given staging
+// schedule. |staging_schedule| is assumed to have already been validated.
+int CalculateWaitTimeInDaysFromSchedule(
+    const StagingSchedule& staging_schedule);
+
+// Verifies that |device_policy| contains a valid staging schedule. If
+// |device_policy| contains a valid staging schedule, move it into
+// |staging_schedule_out| and return the total number of days spanned by the
+// schedule. Otherwise, don't modify |staging_schedule_out| and return 0 (which
+// is an invalid value for the length of a schedule).
+int GetStagingSchedule(const policy::DevicePolicy* device_policy,
+                       StagingSchedule* staging_schedule_out);
+
+// Uses the given arguments to check whether staging is on, and whether the
+// state should be updated with a new waiting time or not. |staging_wait_time|
+// should contain the old value of the wait time, it will be replaced with the
+// new calculated wait time value if staging is on. |staging_schedule| should
+// contain the previous staging schedule, if there is a new schedule found, its
+// value will be replaced with the new one.
+StagingCase CalculateStagingCase(const policy::DevicePolicy* device_policy,
+                                 chromeos_update_engine::PrefsInterface* prefs,
+                                 base::TimeDelta* staging_wait_time,
+                                 StagingSchedule* staging_schedule);
+
+}  // namespace chromeos_update_manager
+
+#endif  // UPDATE_ENGINE_UPDATE_MANAGER_STAGING_UTILS_H_
diff --git a/update_manager/staging_utils_unittest.cc b/update_manager/staging_utils_unittest.cc
new file mode 100644
index 0000000..8d75acd
--- /dev/null
+++ b/update_manager/staging_utils_unittest.cc
@@ -0,0 +1,175 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/staging_utils.h"
+
+#include <memory>
+#include <utility>
+
+#include <base/time/time.h>
+#include <gtest/gtest.h>
+#include <policy/mock_device_policy.h>
+
+#include "update_engine/common/constants.h"
+#include "update_engine/common/fake_prefs.h"
+
+using base::TimeDelta;
+using chromeos_update_engine::FakePrefs;
+using chromeos_update_engine::kPrefsWallClockStagingWaitPeriod;
+using testing::_;
+using testing::DoAll;
+using testing::Return;
+using testing::SetArgPointee;
+
+namespace chromeos_update_manager {
+
+constexpr TimeDelta kDay = TimeDelta::FromDays(1);
+constexpr int kMaxDays = 28;
+constexpr int kValidDaySum = 14;
+const StagingSchedule valid_schedule = {{2, 0}, {7, 50}, {9, 80}, {14, 100}};
+
+class StagingUtilsScheduleTest : public testing::Test {
+ protected:
+  void SetUp() override {
+    test_wait_time_ = TimeDelta();
+    test_staging_schedule_ = StagingSchedule();
+  }
+
+  void SetStagingSchedule(const StagingSchedule& staging_schedule) {
+    EXPECT_CALL(device_policy_, GetDeviceUpdateStagingSchedule(_))
+        .WillRepeatedly(
+            DoAll(SetArgPointee<0>(staging_schedule), Return(true)));
+  }
+
+  void SetPersistedStagingVal(int64_t wait_time) {
+    EXPECT_TRUE(
+        fake_prefs_.SetInt64(kPrefsWallClockStagingWaitPeriod, wait_time));
+  }
+
+  void TestStagingCase(const StagingCase& expected) {
+    EXPECT_EQ(expected,
+              CalculateStagingCase(&device_policy_,
+                                   &fake_prefs_,
+                                   &test_wait_time_,
+                                   &test_staging_schedule_));
+  }
+
+  void ExpectNoChanges() {
+    EXPECT_EQ(TimeDelta(), test_wait_time_);
+    EXPECT_EQ(StagingSchedule(), test_staging_schedule_);
+  }
+
+  policy::MockDevicePolicy device_policy_;
+  TimeDelta test_wait_time_;
+  StagingSchedule test_staging_schedule_;
+  FakePrefs fake_prefs_;
+};
+
+// Last element should be 100, if not return false.
+TEST_F(StagingUtilsScheduleTest, GetStagingScheduleInvalidLastElem) {
+  SetStagingSchedule(StagingSchedule{{2, 10}, {4, 20}, {5, 40}});
+  EXPECT_EQ(0, GetStagingSchedule(&device_policy_, &test_staging_schedule_));
+  ExpectNoChanges();
+}
+
+// Percentage should be monotonically increasing.
+TEST_F(StagingUtilsScheduleTest, GetStagingScheduleNonMonotonic) {
+  SetStagingSchedule(StagingSchedule{{2, 10}, {6, 20}, {11, 20}, {12, 100}});
+  EXPECT_EQ(0, GetStagingSchedule(&device_policy_, &test_staging_schedule_));
+  ExpectNoChanges();
+}
+
+// The days should be monotonically increasing.
+TEST_F(StagingUtilsScheduleTest, GetStagingScheduleOverMaxDays) {
+  SetStagingSchedule(StagingSchedule{{2, 10}, {4, 20}, {15, 30}, {10, 100}});
+  EXPECT_EQ(0, GetStagingSchedule(&device_policy_, &test_staging_schedule_));
+  ExpectNoChanges();
+}
+
+TEST_F(StagingUtilsScheduleTest, GetStagingScheduleValid) {
+  SetStagingSchedule(valid_schedule);
+  EXPECT_EQ(kValidDaySum,
+            GetStagingSchedule(&device_policy_, &test_staging_schedule_));
+  EXPECT_EQ(test_staging_schedule_, valid_schedule);
+}
+
+TEST_F(StagingUtilsScheduleTest, StagingOffNoSchedule) {
+  // If the function returns false, the schedule shouldn't get used.
+  EXPECT_CALL(device_policy_, GetDeviceUpdateStagingSchedule(_))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(valid_schedule), Return(false)));
+  TestStagingCase(StagingCase::kOff);
+  ExpectNoChanges();
+}
+
+TEST_F(StagingUtilsScheduleTest, StagingOffEmptySchedule) {
+  SetStagingSchedule(StagingSchedule());
+  TestStagingCase(StagingCase::kOff);
+  ExpectNoChanges();
+}
+
+TEST_F(StagingUtilsScheduleTest, StagingOffInvalidSchedule) {
+  // Any invalid schedule should return |StagingCase::kOff|.
+  SetStagingSchedule(StagingSchedule{{3, 30}, {6, 40}});
+  TestStagingCase(StagingCase::kOff);
+  ExpectNoChanges();
+}
+
+TEST_F(StagingUtilsScheduleTest, StagingOnNoAction) {
+  test_wait_time_ = kDay;
+  // Same as valid schedule, just using std::pair types.
+  StagingSchedule valid_schedule_pairs = {{2, 0}, {7, 50}, {9, 80}, {14, 100}};
+  test_staging_schedule_ = valid_schedule_pairs;
+  SetStagingSchedule(valid_schedule);
+  TestStagingCase(StagingCase::kNoAction);
+  // Vars should not be changed.
+  EXPECT_EQ(kDay, test_wait_time_);
+  EXPECT_EQ(test_staging_schedule_, valid_schedule_pairs);
+}
+
+TEST_F(StagingUtilsScheduleTest, StagingNoSavedValueChangePolicy) {
+  test_wait_time_ = kDay;
+  SetStagingSchedule(valid_schedule);
+  TestStagingCase(StagingCase::kNoSavedValue);
+  // Vars should change since < 2 days should not be possible due to
+  // valid_schedule's value.
+  EXPECT_NE(kDay, test_wait_time_);
+  EXPECT_EQ(test_staging_schedule_, valid_schedule);
+  EXPECT_LE(test_wait_time_, kDay * kMaxDays);
+}
+
+// Tests the case where there was a reboot and there is no persisted value.
+TEST_F(StagingUtilsScheduleTest, StagingNoSavedValueNoPersisted) {
+  SetStagingSchedule(valid_schedule);
+  TestStagingCase(StagingCase::kNoSavedValue);
+  // Vars should change since there are no preset values and there is a new
+  // staging schedule.
+  EXPECT_NE(TimeDelta(), test_wait_time_);
+  EXPECT_EQ(test_staging_schedule_, valid_schedule);
+  EXPECT_LE(test_wait_time_, kDay * kMaxDays);
+}
+
+// If there is a pref set and its value is less than the day count, use that
+// pref.
+TEST_F(StagingUtilsScheduleTest, StagingSetFromPref) {
+  SetStagingSchedule(valid_schedule);
+  SetPersistedStagingVal(5);
+  TestStagingCase(StagingCase::kSetStagingFromPref);
+  // Vars should change.
+  EXPECT_EQ(kDay * 5, test_wait_time_);
+  EXPECT_EQ(test_staging_schedule_, valid_schedule);
+}
+
+}  // namespace chromeos_update_manager
diff --git a/update_manager/state_factory.cc b/update_manager/state_factory.cc
index 208ed51..7293692 100644
--- a/update_manager/state_factory.cc
+++ b/update_manager/state_factory.cc
@@ -46,7 +46,7 @@
 
 State* DefaultStateFactory(
     policy::PolicyProvider* policy_provider,
-    org::chromium::LibCrosServiceInterfaceProxyInterface* libcros_proxy,
+    org::chromium::KioskAppServiceInterfaceProxyInterface* kiosk_app_proxy,
     chromeos_update_engine::SystemState* system_state) {
   chromeos_update_engine::ClockInterface* const clock = system_state->clock();
   unique_ptr<RealConfigProvider> config_provider(
@@ -70,7 +70,7 @@
 #endif  // USE_SHILL
   unique_ptr<RealRandomProvider> random_provider(new RealRandomProvider());
   unique_ptr<RealSystemProvider> system_provider(new RealSystemProvider(
-      system_state->hardware(), system_state->boot_control(), libcros_proxy));
+      system_state->hardware(), system_state->boot_control(), kiosk_app_proxy));
 
   unique_ptr<RealTimeProvider> time_provider(new RealTimeProvider(clock));
   unique_ptr<RealUpdaterProvider> updater_provider(
diff --git a/update_manager/state_factory.h b/update_manager/state_factory.h
index 689684a..1c1c1d9 100644
--- a/update_manager/state_factory.h
+++ b/update_manager/state_factory.h
@@ -22,7 +22,7 @@
 
 namespace org {
 namespace chromium {
-class LibCrosServiceInterfaceProxyInterface;
+class KioskAppServiceInterfaceProxyInterface;
 }  // namespace chromium
 }  // namespace org
 
@@ -35,7 +35,7 @@
 // to initialize.
 State* DefaultStateFactory(
     policy::PolicyProvider* policy_provider,
-    org::chromium::LibCrosServiceInterfaceProxyInterface* libcros_proxy,
+    org::chromium::KioskAppServiceInterfaceProxyInterface* kiosk_app_proxy,
     chromeos_update_engine::SystemState* system_state);
 
 }  // namespace chromeos_update_manager
diff --git a/update_manager/time_provider.h b/update_manager/time_provider.h
index 663ec2c..94f4a8f 100644
--- a/update_manager/time_provider.h
+++ b/update_manager/time_provider.h
@@ -36,6 +36,9 @@
   // consistent with base::Time.
   virtual Variable<int>* var_curr_hour() = 0;
 
+  // Returns the current minutes (0 to 60) in local time.
+  virtual Variable<int>* var_curr_minute() = 0;
+
  protected:
   TimeProvider() {}
 
diff --git a/update_manager/update_manager_unittest.cc b/update_manager/update_manager_unittest.cc
index c2766ea..125a60c 100644
--- a/update_manager/update_manager_unittest.cc
+++ b/update_manager/update_manager_unittest.cc
@@ -189,7 +189,7 @@
 
 TEST_F(UmUpdateManagerTest, PolicyRequestCallUpdateCanStart) {
   UpdateState update_state = UpdateState();
-  update_state.is_interactive = true;
+  update_state.interactive = true;
   update_state.is_delta_payload = false;
   update_state.first_seen = FixedTime();
   update_state.num_checks = 1;
diff --git a/update_manager/update_time_restrictions_policy_impl.cc b/update_manager/update_time_restrictions_policy_impl.cc
new file mode 100644
index 0000000..f9b83de
--- /dev/null
+++ b/update_manager/update_time_restrictions_policy_impl.cc
@@ -0,0 +1,74 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "update_engine/update_manager/update_time_restrictions_policy_impl.h"
+
+#include <memory>
+
+#include <base/time/time.h>
+
+#include "update_engine/update_manager/device_policy_provider.h"
+#include "update_engine/update_manager/system_provider.h"
+#include "update_engine/update_manager/weekly_time.h"
+
+using base::Time;
+using base::TimeDelta;
+
+using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::InstallPlan;
+
+namespace chromeos_update_manager {
+
+EvalStatus UpdateTimeRestrictionsPolicyImpl::UpdateCanBeApplied(
+    EvaluationContext* ec,
+    State* state,
+    std::string* error,
+    ErrorCode* result,
+    InstallPlan* install_plan) const {
+  DevicePolicyProvider* const dp_provider = state->device_policy_provider();
+  TimeProvider* const time_provider = state->time_provider();
+
+  // If kiosk mode is not enabled, don't restrict updates.
+  if (!ec->GetValue(dp_provider->var_auto_launched_kiosk_app_id()))
+    return EvalStatus::kContinue;
+
+  const Time* curr_date = ec->GetValue(time_provider->var_curr_date());
+  const int* curr_hour = ec->GetValue(time_provider->var_curr_hour());
+  const int* curr_minute = ec->GetValue(time_provider->var_curr_minute());
+  if (!curr_date || !curr_hour || !curr_minute) {
+    LOG(WARNING) << "Unable to access local time.";
+    return EvalStatus::kContinue;
+  }
+
+  WeeklyTime now = WeeklyTime::FromTime(*curr_date);
+  now.AddTime(TimeDelta::FromHours(*curr_hour) +
+              TimeDelta::FromMinutes(*curr_minute));
+
+  const WeeklyTimeIntervalVector* intervals =
+      ec->GetValue(dp_provider->var_disallowed_time_intervals());
+  if (!intervals) {
+    return EvalStatus::kContinue;
+  }
+  for (const auto& interval : *intervals) {
+    if (interval.InRange(now)) {
+      *result = ErrorCode::kOmahaUpdateDeferredPerPolicy;
+      return EvalStatus::kSucceeded;
+    }
+  }
+
+  return EvalStatus::kContinue;
+}
+
+}  // namespace chromeos_update_manager
diff --git a/update_manager/update_time_restrictions_policy_impl.h b/update_manager/update_time_restrictions_policy_impl.h
new file mode 100644
index 0000000..11cbceb
--- /dev/null
+++ b/update_manager/update_time_restrictions_policy_impl.h
@@ -0,0 +1,61 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_UPDATE_TIME_RESTRICTIONS_POLICY_IMPL_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_UPDATE_TIME_RESTRICTIONS_POLICY_IMPL_H_
+
+#include <string>
+
+#include <base/time/time.h>
+
+#include "update_engine/common/error_code.h"
+#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/update_manager/policy_utils.h"
+
+namespace chromeos_update_manager {
+
+// Policy that allows administrators to set time intervals during which
+// automatic update checks are disallowed. This implementation then checks if
+// the current time falls in the range spanned by the time intervals. If the
+// current time falls in one of the intervals then the update check is
+// blocked by this policy.
+class UpdateTimeRestrictionsPolicyImpl : public PolicyImplBase {
+ public:
+  UpdateTimeRestrictionsPolicyImpl() = default;
+  ~UpdateTimeRestrictionsPolicyImpl() override = default;
+
+  // When the current time is inside one of the intervals returns
+  // kSucceeded and sets |result| to kOmahaUpdateDeferredPerPolicy. If the
+  // current time is not inside any intervals returns kContinue. In case of
+  // errors, i.e. cannot access intervals or time, return kContinue.
+  EvalStatus UpdateCanBeApplied(
+      EvaluationContext* ec,
+      State* state,
+      std::string* error,
+      chromeos_update_engine::ErrorCode* result,
+      chromeos_update_engine::InstallPlan* install_plan) const override;
+
+ protected:
+  std::string PolicyName() const override {
+    return "UpdateTimeRestrictionsPolicyImpl";
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(UpdateTimeRestrictionsPolicyImpl);
+};
+
+}  // namespace chromeos_update_manager
+
+#endif  // UPDATE_ENGINE_UPDATE_MANAGER_UPDATE_TIME_RESTRICTIONS_POLICY_IMPL_H_
diff --git a/update_manager/update_time_restrictions_policy_impl_unittest.cc b/update_manager/update_time_restrictions_policy_impl_unittest.cc
new file mode 100644
index 0000000..74e7f3c
--- /dev/null
+++ b/update_manager/update_time_restrictions_policy_impl_unittest.cc
@@ -0,0 +1,120 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/update_time_restrictions_policy_impl.h"
+
+#include <memory>
+
+#include <base/time/time.h>
+
+#include "update_engine/update_manager/policy_test_utils.h"
+#include "update_engine/update_manager/weekly_time.h"
+
+using base::Time;
+using base::TimeDelta;
+using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::InstallPlan;
+using std::string;
+
+namespace chromeos_update_manager {
+
+constexpr TimeDelta kHour = TimeDelta::FromHours(1);
+constexpr TimeDelta kMinute = TimeDelta::FromMinutes(1);
+
+const WeeklyTimeIntervalVector kTestIntervals{
+    // Monday 10:15 AM to Monday 3:30 PM.
+    WeeklyTimeInterval(WeeklyTime(1, kHour * 10 + kMinute * 15),
+                       WeeklyTime(1, kHour * 15 + kMinute * 30)),
+    // Wednesday 8:30 PM to Thursday 8:40 AM.
+    WeeklyTimeInterval(WeeklyTime(3, kHour * 20 + kMinute * 30),
+                       WeeklyTime(4, kHour * 8 + kMinute * 40)),
+};
+
+class UmUpdateTimeRestrictionsPolicyImplTest : public UmPolicyTestBase {
+ protected:
+  UmUpdateTimeRestrictionsPolicyImplTest() {
+    policy_ = std::make_unique<UpdateTimeRestrictionsPolicyImpl>();
+  }
+
+  void TestPolicy(const Time::Exploded& exploded,
+                  const WeeklyTimeIntervalVector& test_intervals,
+                  const EvalStatus& expected_value,
+                  bool kiosk) {
+    if (kiosk)
+      fake_state_.device_policy_provider()
+          ->var_auto_launched_kiosk_app_id()
+          ->reset(new string("myapp"));
+
+    Time time;
+    EXPECT_TRUE(Time::FromLocalExploded(exploded, &time));
+    fake_clock_.SetWallclockTime(time);
+    SetUpDefaultTimeProvider();
+    fake_state_.device_policy_provider()
+        ->var_disallowed_time_intervals()
+        ->reset(new WeeklyTimeIntervalVector(test_intervals));
+    ErrorCode result;
+    InstallPlan install_plan;
+    ExpectPolicyStatus(
+        expected_value, &Policy::UpdateCanBeApplied, &result, &install_plan);
+    if (expected_value == EvalStatus::kSucceeded)
+      EXPECT_EQ(result, ErrorCode::kOmahaUpdateDeferredPerPolicy);
+  }
+};
+
+// If there are no intervals, then the check should always return kContinue.
+TEST_F(UmUpdateTimeRestrictionsPolicyImplTest, NoIntervalsSetTest) {
+  Time::Exploded random_time{2018, 7, 1, 9, 12, 30, 0, 0};
+  TestPolicy(random_time,
+             WeeklyTimeIntervalVector(),
+             EvalStatus::kContinue,
+             /* kiosk = */ true);
+}
+
+// Check that all intervals are checked.
+TEST_F(UmUpdateTimeRestrictionsPolicyImplTest, TimeInRange) {
+  // Monday, July 9th 2018 12:30 PM.
+  Time::Exploded first_interval_time{2018, 7, 1, 9, 12, 30, 0, 0};
+  TestPolicy(first_interval_time,
+             kTestIntervals,
+             EvalStatus::kSucceeded,
+             /* kiosk = */ true);
+
+  // Check second interval.
+  // Thursday, July 12th 2018 4:30 AM.
+  Time::Exploded second_interval_time{2018, 7, 4, 12, 4, 30, 0, 0};
+  TestPolicy(second_interval_time,
+             kTestIntervals,
+             EvalStatus::kSucceeded,
+             /* kiosk = */ true);
+}
+
+TEST_F(UmUpdateTimeRestrictionsPolicyImplTest, TimeOutOfRange) {
+  // Monday, July 9th 2018 6:30 PM.
+  Time::Exploded out_of_range_time{2018, 7, 1, 9, 18, 30, 0, 0};
+  TestPolicy(out_of_range_time,
+             kTestIntervals,
+             EvalStatus::kContinue,
+             /* kiosk = */ true);
+}
+
+TEST_F(UmUpdateTimeRestrictionsPolicyImplTest, NoKioskDisablesPolicy) {
+  Time::Exploded in_range_time{2018, 7, 1, 9, 12, 30, 0, 0};
+  TestPolicy(in_range_time,
+             kTestIntervals,
+             EvalStatus::kContinue,
+             /* kiosk = */ false);
+}
+}  // namespace chromeos_update_manager
diff --git a/update_manager/weekly_time.cc b/update_manager/weekly_time.cc
new file mode 100644
index 0000000..e478f9f
--- /dev/null
+++ b/update_manager/weekly_time.cc
@@ -0,0 +1,75 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "update_engine/update_manager/weekly_time.h"
+
+#include <base/strings/string_number_conversions.h>
+#include <base/strings/stringprintf.h>
+#include <base/time/time.h>
+
+using base::Time;
+using base::TimeDelta;
+using std::string;
+
+namespace {
+const int kDaysInWeek = 7;
+}
+
+namespace chromeos_update_manager {
+
+TimeDelta WeeklyTime::GetDurationTo(const WeeklyTime& other) const {
+  if (other.TimeFromStartOfWeek() < TimeFromStartOfWeek()) {
+    return other.TimeFromStartOfWeek() +
+           (TimeDelta::FromDays(kDaysInWeek) - TimeFromStartOfWeek());
+  }
+  return other.TimeFromStartOfWeek() - TimeFromStartOfWeek();
+}
+
+TimeDelta WeeklyTime::TimeFromStartOfWeek() const {
+  return TimeDelta::FromDays(day_of_week_) + time_;
+}
+
+void WeeklyTime::AddTime(const TimeDelta& offset) {
+  time_ += offset;
+  int days_over = time_.InDays();
+  time_ -= TimeDelta::FromDays(days_over);
+  day_of_week_ = (day_of_week_ + days_over - 1) % kDaysInWeek + 1;
+}
+
+// static
+WeeklyTime WeeklyTime::FromTime(const Time& time) {
+  Time::Exploded exploded;
+  time.LocalExplode(&exploded);
+  return WeeklyTime(exploded.day_of_week,
+                    TimeDelta::FromHours(exploded.hour) +
+                        TimeDelta::FromMinutes(exploded.minute));
+}
+
+bool WeeklyTimeInterval::InRange(const WeeklyTime& time) const {
+  return time == start_ ||
+         (time.GetDurationTo(start_) >= time.GetDurationTo(end_) &&
+          time != end_);
+}
+
+string WeeklyTimeInterval::ToString() const {
+  return base::StringPrintf(
+      "Start: day_of_week=%d time=%d\nEnd: day_of_week=%d time=%d",
+      start_.day_of_week(),
+      start_.time().InMinutes(),
+      end_.day_of_week(),
+      end_.time().InMinutes());
+}
+
+}  // namespace chromeos_update_manager
diff --git a/update_manager/weekly_time.h b/update_manager/weekly_time.h
new file mode 100644
index 0000000..9e3a039
--- /dev/null
+++ b/update_manager/weekly_time.h
@@ -0,0 +1,97 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_WEEKLY_TIME_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_WEEKLY_TIME_H_
+
+#include <string>
+#include <vector>
+
+#include <base/time/time.h>
+
+namespace chromeos_update_manager {
+
+// Represents a day of the week and the time since it started.
+class WeeklyTime {
+ public:
+  // Day of week (Sunday = 0 and so on) and time since start of the day (12 AM).
+  WeeklyTime(const int& day_of_week, const base::TimeDelta& time)
+      : day_of_week_(day_of_week), time_(time) {}
+
+  // Create a weekly time from a time object.
+  static WeeklyTime FromTime(const base::Time& time);
+
+  bool operator==(const WeeklyTime& other) const {
+    return time_ == other.time() && day_of_week_ == other.day_of_week();
+  }
+
+  bool operator!=(const WeeklyTime& other) const { return !(*this == other); }
+
+  // Return the duration between WeeklyTime and |other|. |other| is always
+  // considered to be after WeeklyTime. i.e. calling this function on [Friday
+  // 12:00, Monday 12:00] would return 3 days.
+  base::TimeDelta GetDurationTo(const WeeklyTime& other) const;
+
+  // Gets the weekly time represented as a time delta.
+  base::TimeDelta TimeFromStartOfWeek() const;
+
+  // Adds the given |offset| to the time with proper wraparound (e.g. Sunday + 1
+  // day = Monday).
+  void AddTime(const base::TimeDelta& offset);
+
+  int day_of_week() const { return day_of_week_; }
+
+  base::TimeDelta time() const { return time_; }
+
+ private:
+  int day_of_week_;
+  base::TimeDelta time_;
+};
+
+// Represents an interval of time during a week represented with WeeklyTime
+// objects. This interval can span at most 7 days. |end| is always considered to
+// be after |start|, this is possible since the times of the week are cyclic.
+// For example, the interval [Thursday 12:00, Monday 12:00) will span the time
+// between Thursday and Monday.
+class WeeklyTimeInterval {
+ public:
+  WeeklyTimeInterval(const WeeklyTime& start, const WeeklyTime& end)
+      : start_(start), end_(end) {}
+
+  // Determines if |time| is in this interval.
+  bool InRange(const WeeklyTime& time) const;
+
+  WeeklyTime start() const { return start_; }
+
+  WeeklyTime end() const { return end_; }
+
+  bool operator==(const WeeklyTimeInterval& other) const {
+    return start_ == other.start() && end_ == other.end();
+  }
+
+  // Converts the interval to a string. Used for the BoxedValue ToString
+  // function.
+  std::string ToString() const;
+
+ private:
+  WeeklyTime start_;
+  WeeklyTime end_;
+};
+
+using WeeklyTimeIntervalVector = std::vector<WeeklyTimeInterval>;
+
+}  // namespace chromeos_update_manager
+
+#endif  // UPDATE_ENGINE_UPDATE_MANAGER_WEEKLY_TIME_H_
diff --git a/update_manager/weekly_time_unittest.cc b/update_manager/weekly_time_unittest.cc
new file mode 100644
index 0000000..52c5425
--- /dev/null
+++ b/update_manager/weekly_time_unittest.cc
@@ -0,0 +1,212 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "update_engine/update_manager/weekly_time.h"
+
+#include <tuple>
+
+#include <base/time/time.h>
+#include <gtest/gtest.h>
+
+using base::TimeDelta;
+using std::tuple;
+
+namespace chromeos_update_manager {
+
+namespace {
+
+enum {
+  kSunday = 0,
+  kMonday,
+  kTuesday,
+  kWednesday,
+  kThursday,
+  kFriday,
+  kSaturday
+};
+
+}  // namespace
+
+class WeeklyTimeDurationTest
+    : public testing::TestWithParam<tuple<int /* start_day_of_week */,
+                                          TimeDelta /* start_time */,
+                                          int /* end_day_of_week */,
+                                          TimeDelta /* end_time */,
+                                          TimeDelta /* expected result */>> {
+ protected:
+  int start_day_of_week() { return std::get<0>(GetParam()); }
+  TimeDelta start_time() { return std::get<1>(GetParam()); }
+  int end_day_of_week() { return std::get<2>(GetParam()); }
+  TimeDelta end_time() { return std::get<3>(GetParam()); }
+  TimeDelta result() { return std::get<4>(GetParam()); }
+};
+
+TEST_P(WeeklyTimeDurationTest, GetDurationTo) {
+  WeeklyTime start = WeeklyTime(start_day_of_week(), start_time());
+  WeeklyTime end = WeeklyTime(end_day_of_week(), end_time());
+
+  EXPECT_EQ(result(), start.GetDurationTo(end));
+}
+
+INSTANTIATE_TEST_CASE_P(
+    SameMinutes,
+    WeeklyTimeDurationTest,
+    testing::Values(std::make_tuple(kThursday,
+                                    TimeDelta::FromMinutes(30),
+                                    kSaturday,
+                                    TimeDelta::FromMinutes(30),
+                                    TimeDelta::FromDays(2))));
+
+INSTANTIATE_TEST_CASE_P(
+    DifferentMinutes,
+    WeeklyTimeDurationTest,
+    testing::Values(std::make_tuple(kMonday,
+                                    TimeDelta::FromMinutes(10),
+                                    kWednesday,
+                                    TimeDelta::FromMinutes(30),
+                                    TimeDelta::FromDays(2) +
+                                        TimeDelta::FromMinutes(20))));
+
+INSTANTIATE_TEST_CASE_P(
+    EndLessThanStartSameMinutes,
+    WeeklyTimeDurationTest,
+    testing::Values(std::make_tuple(kSaturday,
+                                    TimeDelta::FromMinutes(100),
+                                    kTuesday,
+                                    TimeDelta::FromMinutes(100),
+                                    TimeDelta::FromDays(3))));
+
+INSTANTIATE_TEST_CASE_P(
+    EndLessThanStartDifferentMinutes,
+    WeeklyTimeDurationTest,
+    testing::Values(std::make_tuple(kSaturday,
+                                    TimeDelta::FromMinutes(150),
+                                    kMonday,
+                                    TimeDelta::FromMinutes(10),
+                                    TimeDelta::FromDays(2) -
+                                        TimeDelta::FromMinutes(140))));
+
+class WeeklyTimeOffsetTest
+    : public testing::TestWithParam<tuple<int /* day_of_week */,
+                                          TimeDelta /* time */,
+                                          TimeDelta /* offset */,
+                                          WeeklyTime /* expected result */>> {
+ protected:
+  int day_of_week() { return std::get<0>(GetParam()); }
+  TimeDelta time() { return std::get<1>(GetParam()); }
+  TimeDelta offset() { return std::get<2>(GetParam()); }
+  WeeklyTime result() { return std::get<3>(GetParam()); }
+};
+
+TEST_P(WeeklyTimeOffsetTest, WeekTimeAddTime) {
+  WeeklyTime test_time = WeeklyTime(day_of_week(), time());
+  test_time.AddTime(offset());
+
+  EXPECT_EQ(result(), test_time);
+}
+
+INSTANTIATE_TEST_CASE_P(
+    SameDayTest,
+    WeeklyTimeOffsetTest,
+    testing::Values(std::make_tuple(kTuesday,
+                                    TimeDelta::FromMinutes(200),
+                                    TimeDelta::FromMinutes(400),
+                                    WeeklyTime(kTuesday,
+                                               TimeDelta::FromMinutes(600)))));
+
+INSTANTIATE_TEST_CASE_P(DayChangeTest,
+                        WeeklyTimeOffsetTest,
+                        testing::Values(std::make_tuple(
+                            kThursday,
+                            TimeDelta::FromHours(23),
+                            TimeDelta::FromHours(2),
+                            WeeklyTime(kFriday, TimeDelta::FromHours(1)))));
+
+INSTANTIATE_TEST_CASE_P(DayChangeTestOver7,
+                        WeeklyTimeOffsetTest,
+                        testing::Values(std::make_tuple(
+                            kSunday,
+                            TimeDelta::FromHours(20),
+                            TimeDelta::FromDays(3),
+                            WeeklyTime(kWednesday, TimeDelta::FromHours(20)))));
+
+class WeeklyTimeIntervalRangeTest
+    : public testing::TestWithParam<tuple<int /* test_day_of_week */,
+                                          int /* test_time */,
+                                          bool /* in regular interval */,
+                                          bool /* in short interval */,
+                                          bool /* |start| < | */>> {
+ protected:
+  int day_of_week() { return std::get<0>(GetParam()); }
+  int minutes() { return std::get<1>(GetParam()); }
+  bool regular_result() { return std::get<2>(GetParam()); }
+  bool short_result() { return std::get<3>(GetParam()); }
+  bool wraparound_result() { return std::get<4>(GetParam()); }
+};
+
+TEST_P(WeeklyTimeIntervalRangeTest, InRange) {
+  WeeklyTime test =
+      WeeklyTime(day_of_week(), TimeDelta::FromMinutes(minutes()));
+  WeeklyTimeInterval interval_regular =
+      WeeklyTimeInterval(WeeklyTime(kMonday, TimeDelta::FromMinutes(10)),
+                         WeeklyTime(kWednesday, TimeDelta::FromMinutes(30)));
+  WeeklyTimeInterval interval_short =
+      WeeklyTimeInterval(WeeklyTime(kThursday, TimeDelta::FromMinutes(10)),
+                         WeeklyTime(kThursday, TimeDelta::FromMinutes(11)));
+
+  WeeklyTimeInterval interval_wraparound =
+      WeeklyTimeInterval(WeeklyTime(kFriday, TimeDelta::FromMinutes(10)),
+                         WeeklyTime(kTuesday, TimeDelta::FromMinutes(30)));
+
+  EXPECT_EQ(regular_result(), interval_regular.InRange(test));
+  EXPECT_EQ(short_result(), interval_short.InRange(test));
+  EXPECT_EQ(wraparound_result(), interval_wraparound.InRange(test));
+}
+
+// Test the left side of the range being inclusive.
+INSTANTIATE_TEST_CASE_P(
+    InclusiveSuccessLeft,
+    WeeklyTimeIntervalRangeTest,
+    testing::Values(std::make_tuple(kThursday, 10, false, true, false)));
+
+// Test the right side of the range being exclusive.
+INSTANTIATE_TEST_CASE_P(
+    ExclusiveSuccessRight,
+    WeeklyTimeIntervalRangeTest,
+    testing::Values(std::make_tuple(kThursday, 11, false, false, false)));
+
+// Test falling out of the interval by a small amount.
+INSTANTIATE_TEST_CASE_P(
+    FailOutsideRangeSmall,
+    WeeklyTimeIntervalRangeTest,
+    testing::Values(std::make_tuple(kThursday, 12, false, false, false)));
+
+// These test cases check that intervals wrap around properly.
+INSTANTIATE_TEST_CASE_P(
+    WraparoundOutside,
+    WeeklyTimeIntervalRangeTest,
+    testing::Values(std::make_tuple(kWednesday, 10, true, false, false)));
+
+INSTANTIATE_TEST_CASE_P(
+    WraparoundInsideRight,
+    WeeklyTimeIntervalRangeTest,
+    testing::Values(std::make_tuple(kSaturday, 10, false, false, true)));
+
+INSTANTIATE_TEST_CASE_P(
+    WraparoundInsideLeft,
+    WeeklyTimeIntervalRangeTest,
+    testing::Values(std::make_tuple(kMonday, 0, false, false, true)));
+
+}  // namespace chromeos_update_manager
diff --git a/update_metadata.proto b/update_metadata.proto
index 99b7422..f90ec3c 100644
--- a/update_metadata.proto
+++ b/update_metadata.proto
@@ -82,7 +82,7 @@
 //   the new partition.
 //
 // The operations allowed in the payload (supported by the client) depend on the
-// major and minor version. See InstallOperation.Type bellow for details.
+// major and minor version. See InstallOperation.Type below for details.
 
 syntax = "proto2";
 
@@ -248,6 +248,29 @@
   // Whether a failure in the postinstall step for this partition should be
   // ignored.
   optional bool postinstall_optional = 9;
+
+  // On minor version 6 or newer, these fields are supported:
+
+  // The extent for data covered by verity hash tree.
+  optional Extent hash_tree_data_extent = 10;
+
+  // The extent to store verity hash tree.
+  optional Extent hash_tree_extent = 11;
+
+  // The hash algorithm used in verity hash tree.
+  optional string hash_tree_algorithm = 12;
+
+  // The salt used for verity hash tree.
+  optional bytes hash_tree_salt = 13;
+
+  // The extent for data covered by FEC.
+  optional Extent fec_data_extent = 14;
+
+  // The extent to store FEC.
+  optional Extent fec_extent = 15;
+
+  // The number of FEC roots.
+  optional uint32 fec_roots = 16 [default = 2];
 }
 
 message DeltaArchiveManifest {
diff --git a/update_status_utils.cc b/update_status_utils.cc
index ff039b8..5de3381 100644
--- a/update_status_utils.cc
+++ b/update_status_utils.cc
@@ -30,6 +30,8 @@
       return update_engine::kUpdateStatusCheckingForUpdate;
     case UpdateStatus::UPDATE_AVAILABLE:
       return update_engine::kUpdateStatusUpdateAvailable;
+    case UpdateStatus::NEED_PERMISSION_TO_UPDATE:
+      return update_engine::kUpdateStatusNeedPermissionToUpdate;
     case UpdateStatus::DOWNLOADING:
       return update_engine::kUpdateStatusDownloading;
     case UpdateStatus::VERIFYING:
@@ -61,6 +63,9 @@
   } else if (s == update_engine::kUpdateStatusUpdateAvailable) {
     *status = UpdateStatus::UPDATE_AVAILABLE;
     return true;
+  } else if (s == update_engine::kUpdateStatusNeedPermissionToUpdate) {
+    *status = UpdateStatus::NEED_PERMISSION_TO_UPDATE;
+    return true;
   } else if (s == update_engine::kUpdateStatusDownloading) {
     *status = UpdateStatus::DOWNLOADING;
     return true;
diff --git a/utils_android.cc b/utils_android.cc
deleted file mode 100644
index 393e65a..0000000
--- a/utils_android.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/utils_android.h"
-
-#include <fs_mgr.h>
-
-using std::string;
-
-namespace chromeos_update_engine {
-
-namespace {
-
-// Open the appropriate fstab file and fallback to /fstab.device if
-// that's what's being used.
-static struct fstab* OpenFSTab() {
-  struct fstab* fstab = fs_mgr_read_fstab_default();
-  if (fstab != nullptr)
-    return fstab;
-
-  fstab = fs_mgr_read_fstab("/fstab.device");
-  return fstab;
-}
-
-}  // namespace
-
-namespace utils {
-
-bool DeviceForMountPoint(const string& mount_point, base::FilePath* device) {
-  struct fstab* fstab;
-  struct fstab_rec* record;
-
-  fstab = OpenFSTab();
-  if (fstab == nullptr) {
-    LOG(ERROR) << "Error opening fstab file.";
-    return false;
-  }
-  record = fs_mgr_get_entry_for_mount_point(fstab, mount_point.c_str());
-  if (record == nullptr) {
-    LOG(ERROR) << "Error finding " << mount_point << " entry in fstab file.";
-    fs_mgr_free_fstab(fstab);
-    return false;
-  }
-
-  *device = base::FilePath(record->blk_device);
-  fs_mgr_free_fstab(fstab);
-  return true;
-}
-
-}  // namespace utils
-
-}  // namespace chromeos_update_engine
diff --git a/utils_android.h b/utils_android.h
deleted file mode 100644
index 18dd8ab..0000000
--- a/utils_android.h
+++ /dev/null
@@ -1,37 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_UTILS_ANDROID_H_
-#define UPDATE_ENGINE_UTILS_ANDROID_H_
-
-#include <string>
-
-#include <base/files/file_util.h>
-
-namespace chromeos_update_engine {
-
-namespace utils {
-
-// Find the block device that should be mounted in the |mount_point| path and
-// store it in |device|. Returns whether a device was found on the fstab.
-bool DeviceForMountPoint(const std::string& mount_point,
-                         base::FilePath* device);
-
-}  // namespace utils
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_UTILS_ANDROID_H_