Error out if SetProcessNetwork fails.
am: 772470ab38

Change-Id: Ia9ca6033370eef9017948d5701ee1b1ab7d48537
diff --git a/Android.mk b/Android.mk
index a3a7017..37a754d 100644
--- a/Android.mk
+++ b/Android.mk
@@ -22,18 +22,18 @@
 local_use_binder := $(if $(BRILLO_USE_BINDER),$(BRILLO_USE_BINDER),1)
 local_use_hwid_override := \
     $(if $(BRILLO_USE_HWID_OVERRIDE),$(BRILLO_USE_HWID_OVERRIDE),0)
-# "libcros" gates the LibCrosService exposed by the Chrome OS' chrome browser to
-# the system layer.
-local_use_libcros := $(if $(BRILLO_USE_LIBCROS),$(BRILLO_USE_LIBCROS),0)
 local_use_mtd := $(if $(BRILLO_USE_MTD),$(BRILLO_USE_MTD),0)
+local_use_chrome_network_proxy := 0
+local_use_chrome_kiosk_app := 0
 
 # IoT devices use Omaha for updates.
 local_use_omaha := $(if $(filter true,$(PRODUCT_IOT)),1,0)
 
 ue_common_cflags := \
     -DUSE_BINDER=$(local_use_binder) \
+    -DUSE_CHROME_NETWORK_PROXY=$(local_use_chrome_network_proxy) \
+    -DUSE_CHROME_KIOSK_APP=$(local_use_chrome_kiosk_app) \
     -DUSE_HWID_OVERRIDE=$(local_use_hwid_override) \
-    -DUSE_LIBCROS=$(local_use_libcros) \
     -DUSE_MTD=$(local_use_mtd) \
     -DUSE_OMAHA=$(local_use_omaha) \
     -D_FILE_OFFSET_BITS=64 \
@@ -129,6 +129,7 @@
     payload_consumer/download_action.cc \
     payload_consumer/extent_writer.cc \
     payload_consumer/file_descriptor.cc \
+    payload_consumer/file_descriptor_utils.cc \
     payload_consumer/file_writer.cc \
     payload_consumer/filesystem_verifier_action.cc \
     payload_consumer/install_plan.cc \
@@ -143,7 +144,6 @@
 LOCAL_MODULE := libpayload_consumer
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
 LOCAL_CFLAGS := $(ue_common_cflags)
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -167,7 +167,6 @@
 LOCAL_MODULE := libpayload_consumer
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
 LOCAL_CFLAGS := $(ue_common_cflags)
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -203,7 +202,6 @@
 LOCAL_MODULE := libupdate_engine_boot_control
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
 LOCAL_CFLAGS := $(ue_common_cflags)
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -261,7 +259,6 @@
 LOCAL_MODULE := libupdate_engine
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
 LOCAL_EXPORT_C_INCLUDE_DIRS := $(ue_libupdate_engine_exported_c_includes)
 LOCAL_CFLAGS := $(ue_common_cflags)
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
@@ -326,10 +323,10 @@
     binder_service_brillo.cc \
     parcelable_update_engine_status.cc
 endif  # local_use_binder == 1
-ifeq ($(local_use_libcros),1)
+ifeq ($(local_use_chrome_network_proxy),1)
 LOCAL_SRC_FILES += \
     chrome_browser_proxy_resolver.cc
-endif  # local_use_libcros == 1
+endif  # local_use_chrome_network_proxy == 1
 include $(BUILD_STATIC_LIBRARY)
 
 else  # local_use_omaha == 1
@@ -366,7 +363,6 @@
 LOCAL_MODULE := libupdate_engine_android
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
 LOCAL_CFLAGS := $(ue_common_cflags)
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -411,7 +407,6 @@
 LOCAL_REQUIRED_MODULES := \
     cacerts_google
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
 LOCAL_CFLAGS := $(ue_common_cflags)
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -453,7 +448,6 @@
 LOCAL_MODULE_PATH := $(TARGET_RECOVERY_ROOT_OUT)/sbin
 LOCAL_MODULE_CLASS := EXECUTABLES
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
 LOCAL_CFLAGS := \
     $(ue_common_cflags) \
     -D_UE_SIDELOAD
@@ -522,7 +516,6 @@
     -Werror \
     -Wno-unused-parameter \
     -DUSE_BINDER=$(local_use_binder)
-LOCAL_CLANG := true
 LOCAL_CPP_EXTENSION := .cc
 # TODO(deymo): Remove "external/cros/system_api/dbus" when dbus is not used.
 LOCAL_C_INCLUDES := \
@@ -560,7 +553,6 @@
 LOCAL_MODULE := update_engine_client
 LOCAL_MODULE_CLASS := EXECUTABLES
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
 LOCAL_CFLAGS := $(ue_common_cflags)
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -599,9 +591,12 @@
 # server-side code. This is used for delta_generator and unittests but not
 # for any client code.
 ue_libpayload_generator_exported_static_libraries := \
+    libbsdiff \
+    libdivsufsort \
+    libdivsufsort64 \
     libpayload_consumer \
-    update_metadata-protos \
     liblzma \
+    update_metadata-protos \
     $(ue_libpayload_consumer_exported_static_libraries) \
     $(ue_update_metadata_protos_exported_static_libraries)
 ue_libpayload_generator_exported_shared_libraries := \
@@ -640,15 +635,17 @@
 LOCAL_MODULE := libpayload_generator
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
 LOCAL_CFLAGS := $(ue_common_cflags)
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := $(ue_common_c_includes)
 LOCAL_STATIC_LIBRARIES := \
+    libbsdiff \
+    libdivsufsort \
+    libdivsufsort64 \
     libpayload_consumer \
-    update_metadata-protos \
     liblzma \
+    update_metadata-protos \
     $(ue_common_static_libraries) \
     $(ue_libpayload_consumer_exported_static_libraries) \
     $(ue_update_metadata_protos_exported_static_libraries)
@@ -666,12 +663,14 @@
 LOCAL_MODULE := libpayload_generator
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
 LOCAL_CFLAGS := $(ue_common_cflags)
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
 LOCAL_C_INCLUDES := $(ue_common_c_includes)
 LOCAL_STATIC_LIBRARIES := \
+    libbsdiff \
+    libdivsufsort \
+    libdivsufsort64 \
     libpayload_consumer \
     update_metadata-protos \
     liblzma \
@@ -696,12 +695,8 @@
 # Build for the host.
 include $(CLEAR_VARS)
 LOCAL_MODULE := delta_generator
-LOCAL_REQUIRED_MODULES := \
-    bsdiff \
-    imgdiff
 LOCAL_MODULE_CLASS := EXECUTABLES
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
 LOCAL_CFLAGS := $(ue_common_cflags)
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -727,7 +722,6 @@
 LOCAL_MODULE_STEM := delta_generator
 LOCAL_MODULE_CLASS := EXECUTABLES
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
 LOCAL_CFLAGS := $(ue_common_cflags)
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -829,7 +823,6 @@
 LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
 LOCAL_MODULE_CLASS := EXECUTABLES
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
 LOCAL_CFLAGS := $(ue_common_cflags)
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -841,28 +834,6 @@
     test_http_server.cc
 include $(BUILD_EXECUTABLE)
 
-# bsdiff (type: executable)
-# ========================================================
-# We need bsdiff in the update_engine_unittests directory, so we build it here.
-include $(CLEAR_VARS)
-LOCAL_MODULE := ue_unittest_bsdiff
-LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
-LOCAL_MODULE_STEM := bsdiff
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_SRC_FILES := ../../external/bsdiff/bsdiff_main.cc
-LOCAL_CFLAGS := \
-    -D_FILE_OFFSET_BITS=64 \
-    -Wall \
-    -Werror \
-    -Wextra \
-    -Wno-unused-parameter
-LOCAL_STATIC_LIBRARIES := \
-    libbsdiff \
-    libbz \
-    libdivsufsort64 \
-    libdivsufsort
-include $(BUILD_EXECUTABLE)
-
 # test_subprocess (type: executable)
 # ========================================================
 # Test helper subprocess program.
@@ -871,7 +842,6 @@
 LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
 LOCAL_MODULE_CLASS := EXECUTABLES
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
 LOCAL_CFLAGS := $(ue_common_cflags)
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -889,7 +859,6 @@
 LOCAL_REQUIRED_MODULES := \
     test_http_server \
     test_subprocess \
-    ue_unittest_bsdiff \
     ue_unittest_delta_generator \
     ue_unittest_disk_ext2_1k.img \
     ue_unittest_disk_ext2_4k.img \
@@ -902,7 +871,6 @@
     ue_unittest_update_engine.conf \
     zlib_fingerprint
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
 LOCAL_CFLAGS := $(ue_common_cflags)
 LOCAL_CPPFLAGS := $(ue_common_cppflags)
 LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -940,6 +908,8 @@
     payload_consumer/delta_performer_integration_test.cc \
     payload_consumer/delta_performer_unittest.cc \
     payload_consumer/extent_writer_unittest.cc \
+    payload_consumer/fake_file_descriptor.cc \
+    payload_consumer/file_descriptor_utils_unittest.cc \
     payload_consumer/file_writer_unittest.cc \
     payload_consumer/filesystem_verifier_action_unittest.cc \
     payload_consumer/postinstall_runner_action_unittest.cc \
@@ -1006,10 +976,10 @@
 LOCAL_SHARED_LIBRARIES += \
     $(ue_libupdate_engine_android_exported_shared_libraries:-host=)
 endif  # local_use_omaha == 1
-ifeq ($(local_use_libcros),1)
+ifeq ($(local_use_chrome_network_proxy),1)
 LOCAL_SRC_FILES += \
     chrome_browser_proxy_resolver_unittest.cc
-endif  # local_use_libcros == 1
+endif  # local_use_chrome_network_proxy == 1
 include $(BUILD_NATIVE_TEST)
 
 # Update payload signing public key.
diff --git a/common/utils.cc b/common/utils.cc
index f528660..f651823 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -57,7 +57,6 @@
 #include "update_engine/common/prefs_interface.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/payload_consumer/file_descriptor.h"
-#include "update_engine/payload_consumer/payload_constants.h"
 
 using base::Time;
 using base::TimeDelta;
@@ -1035,19 +1034,6 @@
   return false;
 }
 
-bool IsZlibCompatible(const string& fingerprint) {
-  if (fingerprint.size() != sizeof(kCompatibleZlibFingerprint[0]) - 1) {
-    LOG(ERROR) << "Invalid fingerprint: " << fingerprint;
-    return false;
-  }
-  for (auto& f : kCompatibleZlibFingerprint) {
-    if (base::CompareCaseInsensitiveASCII(fingerprint, f) == 0) {
-      return true;
-    }
-  }
-  return false;
-}
-
 bool ReadExtents(const string& path, const vector<Extent>& extents,
                  brillo::Blob* out_data, ssize_t out_data_size,
                  size_t block_size) {
diff --git a/common/utils.h b/common/utils.h
index eaf2640..2117836 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -308,9 +308,6 @@
 bool GetMinorVersion(const brillo::KeyValueStore& store,
                      uint32_t* minor_version);
 
-// Returns whether zlib |fingerprint| is compatible with zlib we are using.
-bool IsZlibCompatible(const std::string& fingerprint);
-
 // This function reads the specified data in |extents| into |out_data|. The
 // extents are read from the file at |path|. |out_data_size| is the size of
 // |out_data|. Returns false if the number of bytes to read given in
diff --git a/hardware_android.cc b/hardware_android.cc
index 91c3fbe..4c5473f 100644
--- a/hardware_android.cc
+++ b/hardware_android.cc
@@ -71,8 +71,7 @@
            std::min(message.size(), sizeof(boot.recovery) - 1));
   }
 
-  int fd =
-      HANDLE_EINTR(open(misc_device.value().c_str(), O_WRONLY | O_SYNC, 0600));
+  int fd = HANDLE_EINTR(open(misc_device.value().c_str(), O_WRONLY | O_SYNC));
   if (fd < 0) {
     PLOG(ERROR) << "Opening misc";
     return false;
diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc
index eac6ea0..87f30ad 100644
--- a/libcurl_http_fetcher.cc
+++ b/libcurl_http_fetcher.cc
@@ -272,7 +272,7 @@
     } else if (base::StartsWith(
                    url_, "https://", base::CompareCase::INSENSITIVE_ASCII)) {
       SetCurlOptionsForHttps();
-#if !defined(__CHROMEOS__) && !defined(__BRILLO__)
+#if !USE_OMAHA
     } else if (base::StartsWith(
                    url_, "file://", base::CompareCase::INSENSITIVE_ASCII)) {
       SetCurlOptionsForFile();
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index 12d3654..d6ac16c 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -44,6 +44,7 @@
 #include "update_engine/payload_consumer/bzip_extent_writer.h"
 #include "update_engine/payload_consumer/download_action.h"
 #include "update_engine/payload_consumer/extent_writer.h"
+#include "update_engine/payload_consumer/file_descriptor_utils.h"
 #if USE_MTD
 #include "update_engine/payload_consumer/mtd_file_descriptor.h"
 #endif
@@ -587,7 +588,6 @@
 // and stores an action exit code in |error|.
 bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode *error) {
   *error = ErrorCode::kSuccess;
-
   const char* c_bytes = reinterpret_cast<const char*>(bytes);
 
   // Update the total byte downloaded count and the progress logs.
@@ -740,8 +740,8 @@
       case InstallOperation::SOURCE_BSDIFF:
         op_result = PerformSourceBsdiffOperation(op, error);
         break;
-      case InstallOperation::IMGDIFF:
-        // TODO(deymo): Replace with PUFFIN operation.
+      case InstallOperation::PUFFDIFF:
+        // TODO(ahassani): Later add PerformPuffdiffOperation(op, error);
         op_result = false;
         break;
       default:
@@ -1035,25 +1035,6 @@
 
 namespace {
 
-// Takes |extents| and fills an empty vector |blocks| with a block index for
-// each block in |extents|. For example, [(3, 2), (8, 1)] would give [3, 4, 8].
-void ExtentsToBlocks(const RepeatedPtrField<Extent>& extents,
-                     vector<uint64_t>* blocks) {
-  for (const Extent& ext : extents) {
-    for (uint64_t j = 0; j < ext.num_blocks(); j++)
-      blocks->push_back(ext.start_block() + j);
-  }
-}
-
-// Takes |extents| and returns the number of blocks in those extents.
-uint64_t GetBlockCount(const RepeatedPtrField<Extent>& extents) {
-  uint64_t sum = 0;
-  for (const Extent& ext : extents) {
-    sum += ext.num_blocks();
-  }
-  return sum;
-}
-
 // Compare |calculated_hash| with source hash in |operation|, return false and
 // dump hash and set |error| if don't match.
 bool ValidateSourceHash(const brillo::Blob& calculated_hash,
@@ -1099,57 +1080,18 @@
   if (operation.has_dst_length())
     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
 
-  uint64_t blocks_to_read = GetBlockCount(operation.src_extents());
-  uint64_t blocks_to_write = GetBlockCount(operation.dst_extents());
-  TEST_AND_RETURN_FALSE(blocks_to_write ==  blocks_to_read);
-
-  // Create vectors of all the individual src/dst blocks.
-  vector<uint64_t> src_blocks;
-  vector<uint64_t> dst_blocks;
-  ExtentsToBlocks(operation.src_extents(), &src_blocks);
-  ExtentsToBlocks(operation.dst_extents(), &dst_blocks);
-  DCHECK_EQ(src_blocks.size(), blocks_to_read);
-  DCHECK_EQ(src_blocks.size(), dst_blocks.size());
-
-  brillo::Blob buf(block_size_);
-  ssize_t bytes_read = 0;
-  HashCalculator source_hasher;
-  // Read/write one block at a time.
-  for (uint64_t i = 0; i < blocks_to_read; i++) {
-    ssize_t bytes_read_this_iteration = 0;
-    uint64_t src_block = src_blocks[i];
-    uint64_t dst_block = dst_blocks[i];
-
-    // Read in bytes.
-    TEST_AND_RETURN_FALSE(
-        utils::PReadAll(source_fd_,
-                        buf.data(),
-                        block_size_,
-                        src_block * block_size_,
-                        &bytes_read_this_iteration));
-
-    // Write bytes out.
-    TEST_AND_RETURN_FALSE(
-        utils::PWriteAll(target_fd_,
-                         buf.data(),
-                         block_size_,
-                         dst_block * block_size_));
-
-    bytes_read += bytes_read_this_iteration;
-    TEST_AND_RETURN_FALSE(bytes_read_this_iteration ==
-                          static_cast<ssize_t>(block_size_));
-
-    if (operation.has_src_sha256_hash())
-      TEST_AND_RETURN_FALSE(source_hasher.Update(buf.data(), buf.size()));
-  }
+  brillo::Blob source_hash;
+  TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_,
+                                                     operation.src_extents(),
+                                                     target_fd_,
+                                                     operation.dst_extents(),
+                                                     block_size_,
+                                                     &source_hash));
 
   if (operation.has_src_sha256_hash()) {
-    TEST_AND_RETURN_FALSE(source_hasher.Finalize());
-    TEST_AND_RETURN_FALSE(
-        ValidateSourceHash(source_hasher.raw_hash(), operation, error));
+    TEST_AND_RETURN_FALSE(ValidateSourceHash(source_hash, operation, error));
   }
 
-  DCHECK_EQ(bytes_read, static_cast<ssize_t>(blocks_to_read * block_size_));
   return true;
 }
 
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index 1e06e1a..6f3d89e 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -818,20 +818,4 @@
   EXPECT_EQ(DeltaPerformer::kSupportedMajorPayloadVersion, major_version);
 }
 
-// Test that we recognize our own zlib compressor implementation as supported.
-// All other equivalent implementations should be added to
-// kCompatibleZlibFingerprint.
-TEST_F(DeltaPerformerTest, ZlibFingerprintMatch) {
-  string fingerprint;
-#ifdef __ANDROID__
-  const std::string kZlibFingerprintPath =
-      test_utils::GetBuildArtifactsPath("zlib_fingerprint");
-#else
-  const std::string kZlibFingerprintPath = "/etc/zlib_fingerprint";
-#endif  // __ANDROID__
-  EXPECT_TRUE(base::ReadFileToString(base::FilePath(kZlibFingerprintPath),
-                                     &fingerprint));
-  EXPECT_TRUE(utils::IsZlibCompatible(fingerprint));
-}
-
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/fake_file_descriptor.cc b/payload_consumer/fake_file_descriptor.cc
new file mode 100644
index 0000000..09bd2c9
--- /dev/null
+++ b/payload_consumer/fake_file_descriptor.cc
@@ -0,0 +1,76 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/fake_file_descriptor.h"
+
+namespace chromeos_update_engine {
+
+ssize_t FakeFileDescriptor::Read(void* buf, size_t count) {
+  // Record the read operation so it can later be inspected.
+  read_ops_.emplace_back(offset_, count);
+
+  // Check for the EOF condition first to avoid reporting it as a failure.
+  if (offset_ >= static_cast<uint64_t>(size_) || !count)
+    return 0;
+  // Find the first offset greater or equal than the current position where a
+  // failure will occur. This will mark the end of the read chunk.
+  uint64_t first_failure = size_;
+  for (const auto& failure : failure_ranges_) {
+    // A failure range that includes the current offset results in an
+    // immediate failure to read any bytes.
+    if (failure.first <= offset_ && offset_ < failure.first + failure.second) {
+      errno = EIO;
+      return -1;
+    }
+    if (failure.first > offset_)
+      first_failure = std::min(first_failure, failure.first);
+  }
+  count = std::min(static_cast<uint64_t>(count), first_failure - offset_);
+  static const char kHexChars[] = "0123456789ABCDEF";
+  for (size_t i = 0; i < count; ++i) {
+    // Encode the 16-bit number "offset_ / 4" as a hex digit in big-endian.
+    uint16_t current_num = offset_ / 4;
+    uint8_t current_digit = (current_num >> (4 * (3 - offset_ % 4))) & 0x0f;
+
+    static_cast<uint8_t*>(buf)[i] = kHexChars[current_digit];
+    offset_++;
+  }
+
+  return count;
+}
+
+off64_t FakeFileDescriptor::Seek(off64_t offset, int whence) {
+  switch (whence) {
+    case SEEK_SET:
+      offset_ = offset;
+      break;
+    case SEEK_CUR:
+      offset_ += offset;
+      break;
+    case SEEK_END:
+      if (offset > size_)
+        offset_ = 0;
+      else
+        offset_ = size_ - offset_;
+      break;
+    default:
+      errno = EINVAL;
+      return -1;
+  }
+  return offset_;
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/fake_file_descriptor.h b/payload_consumer/fake_file_descriptor.h
new file mode 100644
index 0000000..ad49373
--- /dev/null
+++ b/payload_consumer/fake_file_descriptor.h
@@ -0,0 +1,122 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_FAKE_FILE_DESCRIPTOR_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_FAKE_FILE_DESCRIPTOR_H_
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+#include <vector>
+
+#include "update_engine/payload_consumer/file_descriptor.h"
+
+namespace chromeos_update_engine {
+
+// A fake file descriptor with configurable errors. The file descriptor always
+// reads a fixed sequence of bytes, consisting on the concatenation of the
+// numbers 0, 1, 2... each one encoded in 4 bytes as the big-endian 16-bit
+// number encoded in hexadecimal. For example, the beginning of the stream in
+// ASCII is 0000000100020003... which corresponds to the numbers 0, 1, 2 and 3.
+class FakeFileDescriptor : public FileDescriptor {
+ public:
+  FakeFileDescriptor() = default;
+  ~FakeFileDescriptor() = default;
+
+  // FileDescriptor override methods.
+  bool Open(const char* path, int flags, mode_t mode) override {
+    if (open_)
+      return false;
+    open_ = true;
+    return true;
+  }
+
+  bool Open(const char* path, int flags) override {
+    return Open(path, flags, 0);
+  }
+
+  ssize_t Read(void* buf, size_t count) override;
+
+  ssize_t Write(const void* buf, size_t count) override {
+    // Read-only block device.
+    errno = EROFS;
+    return -1;
+  }
+
+  off64_t Seek(off64_t offset, int whence) override;
+
+  uint64_t BlockDevSize() override { return size_; }
+
+  bool BlkIoctl(int request,
+                uint64_t start,
+                uint64_t length,
+                int* result) override {
+    return false;
+  }
+
+  bool Close() override {
+    if (!open_)
+      return false;
+    open_ = false;
+    return true;
+  }
+
+  bool IsSettingErrno() override { return true; }
+
+  bool IsOpen() override { return open_; }
+
+  // Fake class configuration methods.
+
+  // Set the size of the file.
+  void SetFileSize(uint64_t size) { size_ = size; }
+
+  // Marks the range starting from |offset| bytes into the file and |length|
+  // size as a failure range. Reads from this range will always fail.
+  void AddFailureRange(uint64_t offset, uint64_t length) {
+    if (!length)
+      return;
+    failure_ranges_.emplace_back(offset, length);
+  }
+
+  // Return the list of ranges of bytes requested with a Read() as (offset,
+  // length), regardless of the Read() return value.
+  std::vector<std::pair<uint64_t, uint64_t>> GetReadOps() const {
+    return read_ops_;
+  }
+
+ private:
+  // Whether the fake file is open.
+  bool open_{false};
+
+  // The current file pointer offset into the fake file.
+  uint64_t offset_{0};
+
+  // The size of the file. Reads beyond |max_size_| will an EOF condition.
+  off64_t size_{std::numeric_limits<off64_t>::max()};
+
+  // The list of ranges represented as (start, length) in bytes where reads will
+  // always fail.
+  std::vector<std::pair<uint64_t, uint64_t>> failure_ranges_;
+
+  // List of reads performed as (offset, length) of the read request.
+  std::vector<std::pair<uint64_t, uint64_t>> read_ops_;
+
+  DISALLOW_COPY_AND_ASSIGN(FakeFileDescriptor);
+};
+
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_FAKE_FILE_DESCRIPTOR_H_
diff --git a/payload_consumer/file_descriptor.cc b/payload_consumer/file_descriptor.cc
index 8a23dea..ebe4428 100644
--- a/payload_consumer/file_descriptor.cc
+++ b/payload_consumer/file_descriptor.cc
@@ -127,12 +127,8 @@
   CHECK_GE(fd_, 0);
   if (IGNORE_EINTR(close(fd_)))
     return false;
-  Reset();
-  return true;
-}
-
-void EintrSafeFileDescriptor::Reset() {
   fd_ = -1;
+  return true;
 }
 
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/file_descriptor.h b/payload_consumer/file_descriptor.h
index 7bb2974..c8a5b15 100644
--- a/payload_consumer/file_descriptor.h
+++ b/payload_consumer/file_descriptor.h
@@ -39,12 +39,6 @@
 // * Write() returns the number of bytes written: this appears to be more useful
 //   for clients, who may wish to retry or otherwise do something useful with
 //   the remaining data that was not written.
-//
-// * Provides a Reset() method, which will force to abandon a currently open
-//   file descriptor and allow opening another file, without necessarily
-//   properly closing the old one. This may be useful in cases where a "closer"
-//   class does not care whether Close() was successful, but may need to reuse
-//   the same file descriptor again.
 
 namespace chromeos_update_engine {
 
@@ -98,10 +92,6 @@
   // errno accordingly.
   virtual bool Close() = 0;
 
-  // Resets the file descriptor, abandoning a currently open file and returning
-  // the descriptor to the closed state.
-  virtual void Reset() = 0;
-
   // Indicates whether or not an implementation sets meaningful errno.
   virtual bool IsSettingErrno() = 0;
 
@@ -129,7 +119,6 @@
                 uint64_t length,
                 int* result) override;
   bool Close() override;
-  void Reset() override;
   bool IsSettingErrno() override {
     return true;
   }
diff --git a/payload_consumer/file_descriptor_utils.cc b/payload_consumer/file_descriptor_utils.cc
new file mode 100644
index 0000000..f7f61a5
--- /dev/null
+++ b/payload_consumer/file_descriptor_utils.cc
@@ -0,0 +1,115 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/file_descriptor_utils.h"
+
+#include <algorithm>
+
+#include <base/logging.h>
+
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/extent_writer.h"
+
+using google::protobuf::RepeatedPtrField;
+using std::min;
+
+namespace chromeos_update_engine {
+
+namespace {
+
+// Size of the buffer used to copy blocks.
+const int kMaxCopyBufferSize = 1024 * 1024;
+
+// Return the total number of blocks in the passed |extents| list.
+uint64_t GetBlockCount(const RepeatedPtrField<Extent>& extents) {
+  uint64_t sum = 0;
+  for (const Extent& ext : extents) {
+    sum += ext.num_blocks();
+  }
+  return sum;
+}
+
+}  // namespace
+
+namespace fd_utils {
+
+bool CopyAndHashExtents(FileDescriptorPtr source,
+                        const RepeatedPtrField<Extent>& src_extents,
+                        FileDescriptorPtr target,
+                        const RepeatedPtrField<Extent>& tgt_extents,
+                        uint32_t block_size,
+                        brillo::Blob* hash_out) {
+  HashCalculator source_hasher;
+
+  uint64_t buffer_blocks = kMaxCopyBufferSize / block_size;
+  // Ensure we copy at least one block at a time.
+  if (buffer_blocks < 1)
+    buffer_blocks = 1;
+
+  uint64_t total_blocks = GetBlockCount(src_extents);
+  TEST_AND_RETURN_FALSE(total_blocks == GetBlockCount(tgt_extents));
+
+  brillo::Blob buf(buffer_blocks * block_size);
+
+  DirectExtentWriter writer;
+  std::vector<Extent> vec_tgt_extents;
+  vec_tgt_extents.reserve(tgt_extents.size());
+  for (const auto& ext : tgt_extents) {
+    vec_tgt_extents.push_back(ext);
+  }
+  TEST_AND_RETURN_FALSE(writer.Init(target, vec_tgt_extents, block_size));
+
+  for (const Extent& src_ext : src_extents) {
+    for (uint64_t src_ext_block = 0; src_ext_block < src_ext.num_blocks();
+         src_ext_block += buffer_blocks) {
+      uint64_t iteration_blocks =
+          min(buffer_blocks,
+              static_cast<uint64_t>(src_ext.num_blocks() - src_ext_block));
+      uint64_t src_start_block = src_ext.start_block() + src_ext_block;
+
+      ssize_t bytes_read_this_iteration;
+      TEST_AND_RETURN_FALSE(utils::PReadAll(source,
+                                            buf.data(),
+                                            iteration_blocks * block_size,
+                                            src_start_block * block_size,
+                                            &bytes_read_this_iteration));
+
+      TEST_AND_RETURN_FALSE(
+          bytes_read_this_iteration ==
+          static_cast<ssize_t>(iteration_blocks * block_size));
+
+      TEST_AND_RETURN_FALSE(
+          writer.Write(buf.data(), iteration_blocks * block_size));
+
+      if (hash_out) {
+        TEST_AND_RETURN_FALSE(
+            source_hasher.Update(buf.data(), iteration_blocks * block_size));
+      }
+    }
+  }
+  TEST_AND_RETURN_FALSE(writer.End());
+
+  if (hash_out) {
+    TEST_AND_RETURN_FALSE(source_hasher.Finalize());
+    *hash_out = source_hasher.raw_hash();
+  }
+  return true;
+}
+
+}  // namespace fd_utils
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/file_descriptor_utils.h b/payload_consumer/file_descriptor_utils.h
new file mode 100644
index 0000000..b73defb
--- /dev/null
+++ b/payload_consumer/file_descriptor_utils.h
@@ -0,0 +1,50 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_FILE_DESCRIPTOR_UTILS_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_FILE_DESCRIPTOR_UTILS_H_
+
+#include <vector>
+
+#include <brillo/secure_blob.h>
+
+#include "update_engine/payload_consumer/file_descriptor.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+namespace fd_utils {
+
+// Copy a blocks from the |source| file to the |target| file and hash the
+// contents. The blocks to copy from the |source| to the |target| files are
+// specified by the |src_extents| and |tgt_extents| list of Extents, which
+// must have the same length in number of blocks. Stores the hash of the
+// copied blocks in Blob pointed by |hash_out| if not null. The block size
+// is passed as |block_size|. In case of error reading or writing, returns
+// false and the value pointed by |hash_out| is undefined.
+// The |source| and |target| files must be different, or otherwise |src_extents|
+// and |tgt_extents| must not overlap.
+bool CopyAndHashExtents(
+    FileDescriptorPtr source,
+    const google::protobuf::RepeatedPtrField<Extent>& src_extents,
+    FileDescriptorPtr target,
+    const google::protobuf::RepeatedPtrField<Extent>& tgt_extents,
+    uint32_t block_size,
+    brillo::Blob* hash_out);
+
+}  // namespace fd_utils
+}  // namespace chromeos_update_engine
+
+#endif  // UPDATE_ENGINE_PAYLOAD_CONSUMER_FILE_DESCRIPTOR_UTILS_H_
diff --git a/payload_consumer/file_descriptor_utils_unittest.cc b/payload_consumer/file_descriptor_utils_unittest.cc
new file mode 100644
index 0000000..9910239
--- /dev/null
+++ b/payload_consumer/file_descriptor_utils_unittest.cc
@@ -0,0 +1,167 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/file_descriptor_utils.h"
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <gtest/gtest.h>
+
+#include <brillo/data_encoding.h>
+
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/fake_file_descriptor.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+
+namespace chromeos_update_engine {
+
+namespace {
+
+::google::protobuf::RepeatedPtrField<Extent> CreateExtentList(
+    const std::vector<std::pair<uint64_t, uint64_t>>& lst) {
+  ::google::protobuf::RepeatedPtrField<Extent> result;
+  for (const auto& item : lst) {
+    *result.Add() = ExtentForRange(item.first, item.second);
+  }
+  return result;
+}
+
+}  // namespace
+
+class FileDescriptorUtilsTest : public ::testing::Test {
+ protected:
+  void SetUp() override {
+    EXPECT_TRUE(utils::MakeTempFile("fd_tgt.XXXXXX", &tgt_path_, nullptr));
+    EXPECT_TRUE(target_->Open(tgt_path_.c_str(), O_RDWR));
+  }
+
+  // Check that the |target_| file contains |expected_contents|.
+  void ExpectTarget(const std::string& expected_contents) {
+    std::string target_contents;
+    EXPECT_TRUE(utils::ReadFile(tgt_path_, &target_contents));
+    EXPECT_EQ(expected_contents.size(), target_contents.size());
+    if (target_contents != expected_contents) {
+      ADD_FAILURE() << "Contents don't match.";
+      LOG(INFO) << "Expected contents:";
+      utils::HexDumpString(expected_contents);
+      LOG(INFO) << "Actual contents:";
+      utils::HexDumpString(target_contents);
+    }
+  }
+
+  // Path to the target temporary file.
+  std::string tgt_path_;
+
+  // Source and target file descriptor used for testing the tools.
+  FakeFileDescriptor* fake_source_{new FakeFileDescriptor()};
+  FileDescriptorPtr source_{fake_source_};
+  FileDescriptorPtr target_{new EintrSafeFileDescriptor()};
+};
+
+// Source and target extents should have the same number of blocks.
+TEST_F(FileDescriptorUtilsTest, CopyAndHashExtentsMismatchBlocksTest) {
+  auto src_extents = CreateExtentList({{1, 4}});
+  auto tgt_extents = CreateExtentList({{0, 5}});
+
+  EXPECT_FALSE(fd_utils::CopyAndHashExtents(
+      source_, src_extents, target_, tgt_extents, 4, nullptr));
+}
+
+// Failing to read from the source should fail the copy.
+TEST_F(FileDescriptorUtilsTest, CopyAndHashExtentsReadFailureTest) {
+  auto extents = CreateExtentList({{0, 5}});
+  fake_source_->AddFailureRange(10, 5);
+
+  EXPECT_FALSE(fd_utils::CopyAndHashExtents(
+      source_, extents, target_, extents, 4, nullptr));
+}
+
+// Failing to write to the target should fail the copy.
+TEST_F(FileDescriptorUtilsTest, CopyAndHashExtentsWriteFailureTest) {
+  auto src_extents = CreateExtentList({{0, 2}});
+  auto tgt_extents = CreateExtentList({{5, 2}});
+  fake_source_->AddFailureRange(5 * 4, 10);
+
+  // Note that we pass |source_| as the target as well, which should fail to
+  // write.
+  EXPECT_FALSE(fd_utils::CopyAndHashExtents(
+      source_, src_extents, source_, tgt_extents, 4, nullptr));
+}
+
+// Test that we can copy extents without hashing them, allowing a nullptr
+// pointer as hash_out.
+TEST_F(FileDescriptorUtilsTest, CopyAndHashExtentsWithoutHashingTest) {
+  auto extents = CreateExtentList({{0, 5}});
+
+  EXPECT_TRUE(fd_utils::CopyAndHashExtents(
+      source_, extents, target_, extents, 4, nullptr));
+  ExpectTarget("00000001000200030004");
+}
+
+// CopyAndHash() can take different number of extents in the source and target
+// files, as long as the number of blocks is the same. Test that it handles it
+// properly.
+TEST_F(FileDescriptorUtilsTest, CopyAndHashExtentsManyToOneTest) {
+  brillo::Blob hash_out;
+  // Reorder the input as 1 4 2 3 0.
+  auto src_extents = CreateExtentList({{1, 1}, {4, 1}, {2, 2}, {0, 1}});
+  auto tgt_extents = CreateExtentList({{0, 5}});
+
+  EXPECT_TRUE(fd_utils::CopyAndHashExtents(
+      source_, src_extents, target_, tgt_extents, 4, &hash_out));
+  const char kExpectedResult[] = "00010004000200030000";
+  ExpectTarget(kExpectedResult);
+
+  brillo::Blob expected_hash;
+  EXPECT_TRUE(HashCalculator::RawHashOfBytes(
+      kExpectedResult, strlen(kExpectedResult), &expected_hash));
+  EXPECT_EQ(expected_hash, hash_out);
+}
+
+TEST_F(FileDescriptorUtilsTest, CopyAndHashExtentsManyToManyTest) {
+  brillo::Blob hash_out;
+  auto src_extents = CreateExtentList({{1, 1}, {4, 1}, {2, 2}, {0, 1}});
+  auto tgt_extents = CreateExtentList({{2, 3}, {0, 2}});
+
+  EXPECT_TRUE(fd_utils::CopyAndHashExtents(
+      source_, src_extents, target_, tgt_extents, 4, &hash_out));
+  // The reads always match the source extent list of blocks (up to the
+  // internal buffer size).
+  std::vector<std::pair<uint64_t, uint64_t>> kExpectedOps = {
+      {4, 4}, {16, 4}, {8, 8}, {0, 4}};
+  EXPECT_EQ(kExpectedOps, fake_source_->GetReadOps());
+
+  // The output here is as in the previous test but the first 3 4-byte blocks
+  // are at the end of the stream. The expected hash is as in the previous
+  // example anyway since the hash doesn't depend on the order of the target
+  // blocks.
+  const char kExpectedResult[] = "00030000000100040002";
+  ExpectTarget(kExpectedResult);
+
+  // The data in the order that the reader processes (and hashes) it.
+  const char kExpectedOrderedData[] = "00010004000200030000";
+  brillo::Blob expected_hash;
+  EXPECT_TRUE(HashCalculator::RawHashOfBytes(
+      kExpectedOrderedData, strlen(kExpectedOrderedData), &expected_hash));
+  EXPECT_EQ(expected_hash, hash_out);
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc
index de0fd74..7d396b6 100644
--- a/payload_consumer/payload_constants.cc
+++ b/payload_consumer/payload_constants.cc
@@ -25,24 +25,13 @@
 const uint32_t kInPlaceMinorPayloadVersion = 1;
 const uint32_t kSourceMinorPayloadVersion = 2;
 const uint32_t kOpSrcHashMinorPayloadVersion = 3;
-const uint32_t kImgdiffMinorPayloadVersion = 4;
+const uint32_t kPuffdiffMinorPayloadVersion = 4;
 
 const char kLegacyPartitionNameKernel[] = "boot";
 const char kLegacyPartitionNameRoot[] = "system";
 
 const char kDeltaMagic[4] = {'C', 'r', 'A', 'U'};
 
-// The zlib in Android and Chrome OS are currently compatible with each other,
-// so they are sharing the same array, but if in the future they are no longer
-// compatible with each other, we coule make the same change on the other one to
-// make them compatible again or use ifdef here.
-const char kCompatibleZlibFingerprint[][65] = {
-    "ea973605ccbbdb24f59f449c5f65861a1a9bc7a4353377aaaa06cb3e0f1cfbd7",
-    "3747fa404cceb00a5ec3606fc779510aaa784d5864ab1d5c28b9e267c40aad5c",
-    // zlib 1.2.11
-    "61514794a2985bee78135fd67a2f1fd18e56f3c3e410fbc4552a0e05a701e47a",
-};
-
 const char* InstallOperationTypeName(InstallOperation_Type op_type) {
   switch (op_type) {
     case InstallOperation::BSDIFF:
@@ -63,8 +52,8 @@
       return "DISCARD";
     case InstallOperation::REPLACE_XZ:
       return "REPLACE_XZ";
-    case InstallOperation::IMGDIFF:
-      return "IMGDIFF";
+    case InstallOperation::PUFFDIFF:
+      return "PUFFDIFF";
   }
   return "<unknown_op>";
 }
diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h
index 7509ed2..1e2e810 100644
--- a/payload_consumer/payload_constants.h
+++ b/payload_consumer/payload_constants.h
@@ -43,9 +43,8 @@
 // The minor version that allows per-operation source hash.
 extern const uint32_t kOpSrcHashMinorPayloadVersion;
 
-// The minor version that allows IMGDIFF operation.
-extern const uint32_t kImgdiffMinorPayloadVersion;
-
+// The minor version that allows PUFFDIFF operation.
+extern const uint32_t kPuffdiffMinorPayloadVersion;
 
 // The kernel and rootfs partition names used by the BootControlInterface when
 // handling update payloads with a major version 1. The names of the updated
@@ -56,15 +55,6 @@
 extern const char kBspatchPath[];
 extern const char kDeltaMagic[4];
 
-// The list of compatible SHA256 hashes of zlib source code.
-// This is used to check if the source image have a compatible zlib (produce
-// same compressed result given the same input).
-// When a new fingerprint is found, please examine the changes in zlib source
-// carefully and determine if it's still compatible with previous version, if
-// yes then add the new fingerprint to this array, otherwise remove all previous
-// fingerprints in the array first, and only include the new fingerprint.
-extern const char kCompatibleZlibFingerprint[3][65];
-
 // A block number denoting a hole on a sparse file. Used on Extents to refer to
 // section of blocks not present on disk on a sparse file.
 const uint64_t kSparseHole = std::numeric_limits<uint64_t>::max();
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index e928912..1664960 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -32,6 +32,7 @@
 #include <base/strings/stringprintf.h>
 #include <base/threading/simple_thread.h>
 #include <brillo/data_encoding.h>
+#include <bsdiff/bsdiff.h>
 
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/subprocess.h"
@@ -50,9 +51,6 @@
 namespace chromeos_update_engine {
 namespace {
 
-const char* const kBsdiffPath = "bsdiff";
-const char* const kImgdiffPath = "imgdiff";
-
 // The maximum destination size allowed for bsdiff. In general, bsdiff should
 // work for arbitrary big files, but the payload generation and payload
 // application requires a significant amount of RAM. We put a hard-limit of
@@ -60,10 +58,10 @@
 // Chrome binary in ASan builders.
 const uint64_t kMaxBsdiffDestinationSize = 200 * 1024 * 1024;  // bytes
 
-// The maximum destination size allowed for imgdiff. In general, imgdiff should
-// work for arbitrary big files, but the payload application is quite memory
-// intensive, so we limit these operations to 50 MiB.
-const uint64_t kMaxImgdiffDestinationSize = 50 * 1024 * 1024;  // bytes
+// The maximum destination size allowed for puffdiff. In general, puffdiff
+// should work for arbitrary big files, but the payload application is quite
+// memory intensive, so we limit these operations to 50 MiB.
+const uint64_t kMaxPuffdiffDestinationSize = 50 * 1024 * 1024;  // bytes
 
 // Process a range of blocks from |range_start| to |range_end| in the extent at
 // position |*idx_p| of |extents|. If |do_remove| is true, this range will be
@@ -160,15 +158,6 @@
   return removed_bytes;
 }
 
-// Returns true if the given blob |data| contains gzip header magic.
-bool ContainsGZip(const brillo::Blob& data) {
-  const uint8_t kGZipMagic[] = {0x1f, 0x8b, 0x08, 0x00};
-  return std::search(data.begin(),
-                     data.end(),
-                     std::begin(kGZipMagic),
-                     std::end(kGZipMagic)) != data.end();
-}
-
 }  // namespace
 
 namespace diff_utils {
@@ -657,7 +646,7 @@
   uint64_t blocks_to_read = BlocksInExtents(old_extents);
   uint64_t blocks_to_write = BlocksInExtents(new_extents);
 
-  // Disable bsdiff and imgdiff when the data is too big.
+  // Disable bsdiff, and puffdiff when the data is too big.
   bool bsdiff_allowed =
       version.OperationAllowed(InstallOperation::SOURCE_BSDIFF) ||
       version.OperationAllowed(InstallOperation::BSDIFF);
@@ -668,12 +657,12 @@
     bsdiff_allowed = false;
   }
 
-  bool imgdiff_allowed = version.OperationAllowed(InstallOperation::IMGDIFF);
-  if (imgdiff_allowed &&
-      blocks_to_read * kBlockSize > kMaxImgdiffDestinationSize) {
-    LOG(INFO) << "imgdiff blacklisted, data too big: "
+  bool puffdiff_allowed = version.OperationAllowed(InstallOperation::PUFFDIFF);
+  if (puffdiff_allowed &&
+      blocks_to_read * kBlockSize > kMaxPuffdiffDestinationSize) {
+    LOG(INFO) << "puffdiff blacklisted, data too big: "
               << blocks_to_read * kBlockSize << " bytes";
-    imgdiff_allowed = false;
+    puffdiff_allowed = false;
   }
 
   // Make copies of the extents so we can modify them.
@@ -711,24 +700,21 @@
                              ? InstallOperation::SOURCE_COPY
                              : InstallOperation::MOVE);
       data_blob = brillo::Blob();
-    } else if (bsdiff_allowed || imgdiff_allowed) {
-      // If the source file is considered bsdiff safe (no bsdiff bugs
-      // triggered), see if BSDIFF encoding is smaller.
-      base::FilePath old_chunk;
-      TEST_AND_RETURN_FALSE(base::CreateTemporaryFile(&old_chunk));
-      ScopedPathUnlinker old_unlinker(old_chunk.value());
-      TEST_AND_RETURN_FALSE(utils::WriteFile(
-          old_chunk.value().c_str(), old_data.data(), old_data.size()));
-      base::FilePath new_chunk;
-      TEST_AND_RETURN_FALSE(base::CreateTemporaryFile(&new_chunk));
-      ScopedPathUnlinker new_unlinker(new_chunk.value());
-      TEST_AND_RETURN_FALSE(utils::WriteFile(
-          new_chunk.value().c_str(), new_data.data(), new_data.size()));
-
+    } else {
       if (bsdiff_allowed) {
+        base::FilePath patch;
+        TEST_AND_RETURN_FALSE(base::CreateTemporaryFile(&patch));
+        ScopedPathUnlinker unlinker(patch.value());
+
         brillo::Blob bsdiff_delta;
-        TEST_AND_RETURN_FALSE(DiffFiles(
-            kBsdiffPath, old_chunk.value(), new_chunk.value(), &bsdiff_delta));
+        TEST_AND_RETURN_FALSE(0 == bsdiff::bsdiff(old_data.data(),
+                                                  old_data.size(),
+                                                  new_data.data(),
+                                                  new_data.size(),
+                                                  patch.value().c_str(),
+                                                  nullptr));
+
+        TEST_AND_RETURN_FALSE(utils::ReadFile(patch.value(), &bsdiff_delta));
         CHECK_GT(bsdiff_delta.size(), static_cast<brillo::Blob::size_type>(0));
         if (bsdiff_delta.size() < data_blob.size()) {
           operation.set_type(
@@ -738,25 +724,9 @@
           data_blob = std::move(bsdiff_delta);
         }
       }
-      if (imgdiff_allowed && ContainsGZip(old_data) && ContainsGZip(new_data)) {
-        brillo::Blob imgdiff_delta;
-        // Imgdiff might fail in some cases, only use the result if it succeed,
-        // otherwise print the extents to analyze.
-        if (DiffFiles(kImgdiffPath,
-                      old_chunk.value(),
-                      new_chunk.value(),
-                      &imgdiff_delta) &&
-            imgdiff_delta.size() > 0) {
-          if (imgdiff_delta.size() < data_blob.size()) {
-            operation.set_type(InstallOperation::IMGDIFF);
-            data_blob = std::move(imgdiff_delta);
-          }
-        } else {
-          LOG(ERROR) << "Imgdiff failed with source extents: "
-                     << ExtentsToString(src_extents)
-                     << ", destination extents: "
-                     << ExtentsToString(dst_extents);
-        }
+      if (puffdiff_allowed) {
+        LOG(ERROR) << "puffdiff is not supported yet!";
+        return false;
       }
     }
   }
@@ -783,37 +753,6 @@
 
   *out_data = std::move(data_blob);
   *out_op = operation;
-
-  return true;
-}
-
-// Runs the bsdiff or imgdiff tool in |diff_path| on two files and returns the
-// resulting delta in |out|. Returns true on success.
-bool DiffFiles(const string& diff_path,
-               const string& old_file,
-               const string& new_file,
-               brillo::Blob* out) {
-  const string kPatchFile = "delta.patchXXXXXX";
-  string patch_file_path;
-
-  TEST_AND_RETURN_FALSE(
-      utils::MakeTempFile(kPatchFile, &patch_file_path, nullptr));
-
-  vector<string> cmd;
-  cmd.push_back(diff_path);
-  cmd.push_back(old_file);
-  cmd.push_back(new_file);
-  cmd.push_back(patch_file_path);
-
-  int rc = 1;
-  string stdout;
-  TEST_AND_RETURN_FALSE(Subprocess::SynchronousExec(cmd, &rc, &stdout));
-  if (rc != 0) {
-    LOG(ERROR) << diff_path << " returned " << rc << std::endl << stdout;
-    return false;
-  }
-  TEST_AND_RETURN_FALSE(utils::ReadFile(patch_file_path, out));
-  unlink(patch_file_path.c_str());
   return true;
 }
 
diff --git a/payload_generator/delta_diff_utils.h b/payload_generator/delta_diff_utils.h
index c9fef17..2d49459 100644
--- a/payload_generator/delta_diff_utils.h
+++ b/payload_generator/delta_diff_utils.h
@@ -93,7 +93,7 @@
 // fills in |out_op|. If there's no change in old and new files, it creates a
 // MOVE or SOURCE_COPY operation. If there is a change, the smallest of the
 // operations allowed in the given |version| (REPLACE, REPLACE_BZ, BSDIFF,
-// SOURCE_BSDIFF or IMGDIFF) wins.
+// SOURCE_BSDIFF, or PUFFDIFF) wins.
 // |new_extents| must not be empty. Returns true on success.
 bool ReadExtentsToDiff(const std::string& old_part,
                        const std::string& new_part,
@@ -103,13 +103,6 @@
                        brillo::Blob* out_data,
                        InstallOperation* out_op);
 
-// Runs the bsdiff or imgdiff tool in |diff_path| on two files and returns the
-// resulting delta in |out|. Returns true on success.
-bool DiffFiles(const std::string& diff_path,
-               const std::string& old_file,
-               const std::string& new_file,
-               brillo::Blob* out);
-
 // Generates the best allowed full operation to produce |new_data|. The allowed
 // operations are based on |payload_version|. The operation blob will be stored
 // in |out_blob| and the resulting operation type in |out_type|. Returns whether
diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc
index 232eab7..bb83942 100644
--- a/payload_generator/delta_diff_utils_unittest.cc
+++ b/payload_generator/delta_diff_utils_unittest.cc
@@ -131,7 +131,6 @@
                                   uint32_t minor_version) {
     BlobFileWriter blob_file(blob_fd_, &blob_size_);
     PayloadVersion version(kChromeOSMajorPayloadVersion, minor_version);
-    version.imgdiff_allowed = true;  // Assume no fingerprint mismatch.
     return diff_utils::DeltaMovedAndZeroBlocks(&aops_,
                                                old_part_.path,
                                                new_part_.path,
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index 3bd9ee6..698de09 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -19,6 +19,7 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <unistd.h>
+#include <xz.h>
 
 #include <string>
 #include <vector>
@@ -29,13 +30,14 @@
 #include <brillo/flag_helper.h>
 #include <brillo/key_value_store.h>
 
+#include "update_engine/common/fake_boot_control.h"
+#include "update_engine/common/fake_hardware.h"
 #include "update_engine/common/prefs.h"
 #include "update_engine/common/terminator.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/delta_performer.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
-#include "update_engine/payload_generator/delta_diff_utils.h"
 #include "update_engine/payload_generator/payload_generation_config.h"
 #include "update_engine/payload_generator/payload_signer.h"
 #include "update_engine/payload_generator/xz.h"
@@ -165,73 +167,83 @@
   }
 }
 
-void VerifySignedPayload(const string& in_file,
-                         const string& public_key) {
+int VerifySignedPayload(const string& in_file, const string& public_key) {
   LOG(INFO) << "Verifying signed payload.";
   LOG_IF(FATAL, in_file.empty())
       << "Must pass --in_file to verify signed payload.";
   LOG_IF(FATAL, public_key.empty())
       << "Must pass --public_key to verify signed payload.";
-  CHECK(PayloadSigner::VerifySignedPayload(in_file, public_key));
+  if (!PayloadSigner::VerifySignedPayload(in_file, public_key)) {
+    LOG(INFO) << "VerifySignedPayload failed";
+    return 1;
+  }
+
   LOG(INFO) << "Done verifying signed payload.";
+  return 0;
 }
 
 // TODO(deymo): This function is likely broken for deltas minor version 2 or
 // newer. Move this function to a new file and make the delta_performer
 // integration tests use this instead.
-void ApplyDelta(const string& in_file,
-                const string& old_kernel,
-                const string& old_rootfs,
-                const string& prefs_dir) {
+bool ApplyPayload(const string& payload_file,
+                  // Simply reuses the payload config used for payload
+                  // generation.
+                  const PayloadGenerationConfig& config) {
   LOG(INFO) << "Applying delta.";
-  LOG_IF(FATAL, old_rootfs.empty())
-      << "Must pass --old_image to apply delta.";
-  Prefs prefs;
+  FakeBootControl fake_boot_control;
+  FakeHardware fake_hardware;
+  MemoryPrefs prefs;
   InstallPlan install_plan;
-  LOG(INFO) << "Setting up preferences under: " << prefs_dir;
-  LOG_IF(ERROR, !prefs.Init(base::FilePath(prefs_dir)))
-      << "Failed to initialize preferences.";
-  // Get original checksums
-  LOG(INFO) << "Calculating original checksums";
-  ImageConfig old_image;
-  old_image.partitions.emplace_back(kLegacyPartitionNameRoot);
-  old_image.partitions.back().path = old_rootfs;
-  old_image.partitions.emplace_back(kLegacyPartitionNameKernel);
-  old_image.partitions.back().path = old_kernel;
-  CHECK(old_image.LoadImageSize());
-  for (const auto& old_part : old_image.partitions) {
-    PartitionInfo part_info;
-    CHECK(diff_utils::InitializePartitionInfo(old_part, &part_info));
-    InstallPlan::Partition part;
-    part.name = old_part.name;
-    part.source_hash.assign(part_info.hash().begin(),
-                            part_info.hash().end());
-    part.source_path = old_part.path;
-    // Apply the delta in-place to the old_part.
-    part.target_path = old_part.path;
-    install_plan.partitions.push_back(part);
+  InstallPlan::Payload payload;
+  install_plan.source_slot =
+      config.is_delta ? 0 : BootControlInterface::kInvalidSlot;
+  install_plan.target_slot = 1;
+  payload.type =
+      config.is_delta ? InstallPayloadType::kDelta : InstallPayloadType::kFull;
+
+  for (size_t i = 0; i < config.target.partitions.size(); i++) {
+    const string& part_name = config.target.partitions[i].name;
+    const string& target_path = config.target.partitions[i].path;
+    fake_boot_control.SetPartitionDevice(
+        part_name, install_plan.target_slot, target_path);
+
+    string source_path;
+    if (config.is_delta) {
+      TEST_AND_RETURN_FALSE(config.target.partitions.size() ==
+                            config.source.partitions.size());
+      source_path = config.source.partitions[i].path;
+      fake_boot_control.SetPartitionDevice(
+          part_name, install_plan.source_slot, source_path);
+    }
+
+    LOG(INFO) << "Install partition:"
+              << " source: " << source_path << " target: " << target_path;
   }
-  install_plan.payloads.resize(1);
+
   DeltaPerformer performer(&prefs,
-                           nullptr,
-                           nullptr,
+                           &fake_boot_control,
+                           &fake_hardware,
                            nullptr,
                            &install_plan,
-                           &install_plan.payloads[0]);
+                           &payload);
+
   brillo::Blob buf(1024 * 1024);
-  int fd = open(in_file.c_str(), O_RDONLY, 0);
+  int fd = open(payload_file.c_str(), O_RDONLY, 0);
   CHECK_GE(fd, 0);
   ScopedFdCloser fd_closer(&fd);
+  xz_crc32_init();
   for (off_t offset = 0;; offset += buf.size()) {
     ssize_t bytes_read;
     CHECK(utils::PReadAll(fd, buf.data(), buf.size(), offset, &bytes_read));
     if (bytes_read == 0)
       break;
-    CHECK_EQ(performer.Write(buf.data(), bytes_read), bytes_read);
+    TEST_AND_RETURN_FALSE(performer.Write(buf.data(), bytes_read));
   }
   CHECK_EQ(performer.Close(), 0);
   DeltaPerformer::ResetUpdateProgress(&prefs, false);
-  LOG(INFO) << "Done applying delta.";
+  LOG(INFO) << "Completed applying " << (config.is_delta ? "delta" : "full")
+            << " payload.";
+  return true;
 }
 
 int ExtractProperties(const string& payload_path, const string& props_file) {
@@ -293,8 +305,6 @@
   DEFINE_string(public_key, "", "Path to public key in .pem format");
   DEFINE_int32(public_key_version, -1,
                "DEPRECATED. Key-check version # of client");
-  DEFINE_string(prefs_dir, "/tmp/update_engine_prefs",
-                "Preferences directory, used with apply_delta");
   DEFINE_string(signature_size, "",
                 "Raw signature size used for hash calculation. "
                 "You may pass in multiple sizes by colon separating them. E.g. "
@@ -324,9 +334,6 @@
   DEFINE_string(properties_file, "",
                 "If passed, dumps the payload properties of the payload passed "
                 "in --in_file and exits.");
-  DEFINE_string(zlib_fingerprint, "",
-                "The fingerprint of zlib in the source image in hash string "
-                "format, used to check imgdiff compatibility.");
 
   DEFINE_string(old_channel, "",
                 "The channel for the old image. 'dev-channel', 'npo-channel', "
@@ -402,17 +409,11 @@
   if (!FLAGS_public_key.empty()) {
     LOG_IF(WARNING, FLAGS_public_key_version != -1)
         << "--public_key_version is deprecated and ignored.";
-    VerifySignedPayload(FLAGS_in_file, FLAGS_public_key);
-    return 0;
+    return VerifySignedPayload(FLAGS_in_file, FLAGS_public_key);
   }
   if (!FLAGS_properties_file.empty()) {
     return ExtractProperties(FLAGS_in_file, FLAGS_properties_file) ? 0 : 1;
   }
-  if (!FLAGS_in_file.empty()) {
-    ApplyDelta(FLAGS_in_file, FLAGS_old_kernel, FLAGS_old_image,
-               FLAGS_prefs_dir);
-    return 0;
-  }
 
   // A payload generation was requested. Convert the flags to a
   // PayloadGenerationConfig.
@@ -495,6 +496,10 @@
     }
   }
 
+  if (!FLAGS_in_file.empty()) {
+    return ApplyPayload(FLAGS_in_file, payload_config) ? 0 : 1;
+  }
+
   if (!FLAGS_new_postinstall_config_file.empty()) {
     LOG_IF(FATAL, FLAGS_major_version == kChromeOSMajorPayloadVersion)
         << "Postinstall config is only allowed in major version 2 or newer.";
@@ -570,19 +575,8 @@
     LOG(INFO) << "Using provided minor_version=" << FLAGS_minor_version;
   }
 
-  if (!FLAGS_zlib_fingerprint.empty()) {
-    if (utils::IsZlibCompatible(FLAGS_zlib_fingerprint)) {
-      payload_config.version.imgdiff_allowed = true;
-    } else {
-      LOG(INFO) << "IMGDIFF operation disabled due to fingerprint mismatch.";
-    }
-  }
-
-  if (payload_config.is_delta) {
-    LOG(INFO) << "Generating delta update";
-  } else {
-    LOG(INFO) << "Generating full update";
-  }
+  LOG(INFO) << "Generating " << (payload_config.is_delta ? "delta" : "full")
+            << " update";
 
   // From this point, all the options have been parsed.
   if (!payload_config.Validate()) {
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index e85d693..e2fec21 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -128,7 +128,7 @@
                         minor == kInPlaceMinorPayloadVersion ||
                         minor == kSourceMinorPayloadVersion ||
                         minor == kOpSrcHashMinorPayloadVersion ||
-                        minor == kImgdiffMinorPayloadVersion);
+                        minor == kPuffdiffMinorPayloadVersion);
   return true;
 }
 
@@ -151,7 +151,7 @@
       // The implementation of these operations had a bug in earlier versions
       // that prevents them from being used in any payload. We will enable
       // them for delta payloads for now.
-      return minor >= kImgdiffMinorPayloadVersion;
+      return minor >= kPuffdiffMinorPayloadVersion;
 
     // Delta operations:
     case InstallOperation::MOVE:
@@ -165,8 +165,8 @@
     case InstallOperation::SOURCE_BSDIFF:
       return minor >= kSourceMinorPayloadVersion;
 
-    case InstallOperation::IMGDIFF:
-      return minor >= kImgdiffMinorPayloadVersion && imgdiff_allowed;
+    case InstallOperation::PUFFDIFF:
+      return minor >= kPuffdiffMinorPayloadVersion;
   }
   return false;
 }
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index 8617d14..ca6fb04 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -137,10 +137,6 @@
 
   // The minor version of the payload.
   uint32_t minor;
-
-  // Wheter the IMGDIFF operation is allowed based on the available compressor
-  // in the delta_generator and the one supported by the target.
-  bool imgdiff_allowed = false;
 };
 
 // The PayloadGenerationConfig struct encapsulates all the configuration to
diff --git a/pylintrc b/pylintrc
index f83f8c6..f264479 100644
--- a/pylintrc
+++ b/pylintrc
@@ -263,7 +263,7 @@
 bad-functions=map,filter,input,apply,reduce
 
 # Good variable names which should always be accepted, separated by a comma
-good-names=i,j,k,ex,x,_
+good-names=i,j,k,ex,x,_,main
 
 # Bad variable names which should always be refused, separated by a comma
 bad-names=foo,bar,baz,toto,tutu,tata
diff --git a/real_system_state.cc b/real_system_state.cc
index 5cbf723..d99e658 100644
--- a/real_system_state.cc
+++ b/real_system_state.cc
@@ -24,9 +24,9 @@
 #include <base/time/time.h>
 #include <brillo/make_unique_ptr.h>
 #include <brillo/message_loops/message_loop.h>
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP || USE_CHROME_NETWORK_PROXY
 #include <chromeos/dbus/service_constants.h>
-#endif  // USE_LIBCROS
+#endif  // USE_CHROME_KIOSK_APP || USE_CHROME_NETWORK_PROXY
 
 #include "update_engine/common/boot_control.h"
 #include "update_engine/common/boot_control_stub.h"
@@ -65,14 +65,16 @@
     return false;
   }
 
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
   libcros_proxy_.reset(new org::chromium::LibCrosServiceInterfaceProxy(
       DBusConnection::Get()->GetDBus(), chromeos::kLibCrosServiceName));
+#endif  // USE_CHROME_KIOSK_APP
+#if USE_CHROME_NETWORK_PROXY
   network_proxy_service_proxy_.reset(
       new org::chromium::NetworkProxyServiceInterfaceProxy(
           DBusConnection::Get()->GetDBus(),
           chromeos::kNetworkProxyServiceName));
-#endif  // USE_LIBCROS
+#endif  // USE_CHROME_NETWORK_PROXY
 
   LOG_IF(INFO, !hardware_->IsNormalBootMode()) << "Booted in dev mode.";
   LOG_IF(INFO, !hardware_->IsOfficialBuild()) << "Booted non-official build.";
@@ -143,27 +145,28 @@
       new CertificateChecker(prefs_.get(), &openssl_wrapper_));
   certificate_checker_->Init();
 
-#if USE_LIBCROS
-  org::chromium::NetworkProxyServiceInterfaceProxyInterface* net_proxy =
-      network_proxy_service_proxy_.get();
-  org::chromium::LibCrosServiceInterfaceProxyInterface* libcros_proxy =
-      libcros_proxy_.get();
+  update_attempter_.reset(
+      new UpdateAttempter(this,
+                          certificate_checker_.get(),
+#if USE_CHROME_NETWORK_PROXY
+                          network_proxy_service_proxy_.get()));
 #else
-  org::chromium::NetworkProxyServiceInterfaceProxyInterface* net_proxy =
-      nullptr;
-  org::chromium::LibCrosServiceInterfaceProxyInterface* libcros_proxy =
-      nullptr;
-#endif  // USE_LIBCROS
+                          nullptr));
+#endif  // USE_CHROME_NETWORK_PROXY
 
   // Initialize the UpdateAttempter before the UpdateManager.
-  update_attempter_.reset(new UpdateAttempter(this, certificate_checker_.get(),
-                                              net_proxy));
   update_attempter_->Init();
 
   // Initialize the Update Manager using the default state factory.
   chromeos_update_manager::State* um_state =
-      chromeos_update_manager::DefaultStateFactory(
-          &policy_provider_, libcros_proxy, this);
+      chromeos_update_manager::DefaultStateFactory(&policy_provider_,
+#if USE_CHROME_KIOSK_APP
+                                                   libcros_proxy_.get(),
+#else
+                                                   nullptr,
+#endif  // USE_CHROME_KIOSK_APP
+                                                   this);
+
   if (!um_state) {
     LOG(ERROR) << "Failed to initialize the Update Manager.";
     return false;
diff --git a/real_system_state.h b/real_system_state.h
index 64964cd..6aee0af 100644
--- a/real_system_state.h
+++ b/real_system_state.h
@@ -25,10 +25,12 @@
 #include <metrics/metrics_library.h>
 #include <policy/device_policy.h>
 
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
 #include <libcros/dbus-proxies.h>
+#endif  // USE_CHROME_KIOSK_APP
+#if USE_CHROME_NETWORK_PROXY
 #include <network_proxy/dbus-proxies.h>
-#endif  // USE_LIBCROS
+#endif  // USE_CHROME_NETWORK_PROXY
 
 #include "update_engine/certificate_checker.h"
 #include "update_engine/common/boot_control_interface.h"
@@ -127,12 +129,14 @@
   inline bool system_rebooted() override { return system_rebooted_; }
 
  private:
-#if USE_LIBCROS
   // Real DBus proxies using the DBus connection.
+#if USE_CHROME_KIOSK_APP
   std::unique_ptr<org::chromium::LibCrosServiceInterfaceProxy> libcros_proxy_;
+#endif  // USE_CHROME_KIOSK_APP
+#if USE_CHROME_NETWORK_PROXY
   std::unique_ptr<org::chromium::NetworkProxyServiceInterfaceProxy>
       network_proxy_service_proxy_;
-#endif  // USE_LIBCROS
+#endif  // USE_CHROME_NETWORK_PROXY
 
   // Interface for the power manager.
   std::unique_ptr<PowerManagerInterface> power_manager_;
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index e62ba94..868e723 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -1,8 +1,20 @@
 #!/bin/bash
 
-# Copyright 2015 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 # Script to generate a Brillo update for use by the update engine.
 #
@@ -12,14 +24,15 @@
 #  hash        generate a payload or metadata hash
 #  sign        generate a signed payload
 #  properties  generate a properties file from a payload
+#  verify      verify a payload by recreating a target image.
 #
 #  Generate command arguments:
 #  --payload             generated unsigned payload output file
 #  --source_image        if defined, generate a delta payload from the specified
 #                        image to the target_image
 #  --target_image        the target image that should be sent to clients
-#  --metadata_size_file  if defined, generate a file containing the size of the payload
-#                        metadata in bytes to the specified file
+#  --metadata_size_file  if defined, generate a file containing the size of the
+#                        payload metadata in bytes to the specified file
 #
 #  Hash command arguments:
 #  --unsigned_payload    the input unsigned payload to generate the hash from
@@ -50,6 +63,10 @@
 #  --payload                 the input signed or unsigned payload
 #  --properties_file         the output path where to write the properties, or
 #                            '-' for stdout.
+#  Verify command arguments:
+#  --payload             payload input file
+#  --source_image        verify payload to the specified source image.
+#  --target_image        the target image to verify upon.
 
 
 # Exit codes:
@@ -85,6 +102,7 @@
 for signing."
 HELP_SIGN="sign: Insert the signatures into the unsigned payload."
 HELP_PROPERTIES="properties: Extract payload properties to a file."
+HELP_VERIFY="verify: Verify a (signed) update payload."
 
 usage() {
   echo "Supported commands:"
@@ -93,6 +111,7 @@
   echo "${HELP_HASH}"
   echo "${HELP_SIGN}"
   echo "${HELP_PROPERTIES}"
+  echo "${HELP_VERIFY}"
   echo
   echo "Use: \"$0 <command> --help\" for more options."
 }
@@ -123,6 +142,11 @@
   properties)
     FLAGS_HELP="${HELP_PROPERTIES}"
     ;;
+
+  verify)
+    FLAGS_HELP="${HELP_VERIFY}"
+    ;;
+
   *)
     echo "Unrecognized command: \"${COMMAND}\"" >&2
     usage >&2
@@ -174,6 +198,15 @@
     "Path to output the extracted property files. If '-' is passed stdout will \
 be used."
 fi
+if [[ "${COMMAND}" == "verify" ]]; then
+  DEFINE_string payload "" \
+    "Path to the input payload file."
+  DEFINE_string target_image "" \
+    "Path to the target image to verify upon."
+  DEFINE_string source_image "" \
+    "Optional: Path to a source image. If specified, the delta update is \
+applied to this."
+fi
 
 DEFINE_string work_dir "${TMPDIR:-/tmp}" "Where to dump temporary files."
 
@@ -210,9 +243,6 @@
 # Path to the postinstall config file in target image if exists.
 POSTINSTALL_CONFIG_FILE=""
 
-# The fingerprint of zlib in the source image.
-ZLIB_FINGERPRINT=""
-
 # read_option_int <file.txt> <option_key> [default_value]
 #
 # Reads the unsigned integer value associated with |option_key| in a key=value
@@ -332,11 +362,6 @@
   # updater supports a newer major version.
   FORCE_MAJOR_VERSION="1"
 
-  if [[ "${partitions_array}" == "SRC_PARTITIONS" ]]; then
-    # Copy from zlib_fingerprint in source image to stdout.
-    ZLIB_FINGERPRINT=$(e2cp "${root}":/etc/zlib_fingerprint -)
-  fi
-
   # When generating legacy Chrome OS images, we need to use "boot" and "system"
   # for the partition names to be compatible with updating Brillo devices with
   # Chrome OS images.
@@ -410,10 +435,6 @@
 Disabling deltas for this source version."
       exit ${EX_UNSUPPORTED_DELTA}
     fi
-
-    if [[ "${FORCE_MINOR_VERSION}" -ge 4 ]]; then
-      ZLIB_FINGERPRINT=$(unzip -p "${image}" "META/zlib_fingerprint.txt")
-    fi
   else
     # Target image
     local postinstall_config=$(create_tempfile "postinstall_config.XXXXXX")
@@ -535,9 +556,6 @@
     if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
       GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
     fi
-    if [[ -n "${ZLIB_FINGERPRINT}" ]]; then
-      GENERATOR_ARGS+=( --zlib_fingerprint="${ZLIB_FINGERPRINT}" )
-    fi
   fi
 
   if [[ -n "${FORCE_MAJOR_VERSION}" ]]; then
@@ -636,6 +654,91 @@
       -properties_file="${FLAGS_properties_file}"
 }
 
+validate_verify() {
+  [[ -n "${FLAGS_payload}" ]] ||
+    die "Error: you must specify an input filename with --payload FILENAME"
+
+  [[ -n "${FLAGS_target_image}" ]] ||
+    die "Error: you must specify a target image with --target_image FILENAME"
+}
+
+cmd_verify() {
+  local payload_type="delta"
+  if [[ -z "${FLAGS_source_image}" ]]; then
+    payload_type="full"
+  fi
+
+  echo "Extracting images for ${payload_type} update."
+
+  if [[ "${payload_type}" == "delta" ]]; then
+    extract_image "${FLAGS_source_image}" SRC_PARTITIONS
+  fi
+  extract_image "${FLAGS_target_image}" DST_PARTITIONS PARTITIONS_ORDER
+
+  declare -A TMP_PARTITIONS
+  for part in "${PARTITIONS_ORDER[@]}"; do
+    local tmp_part=$(create_tempfile "tmp_part.bin.XXXXXX")
+    echo "Creating temporary target partition ${tmp_part} for ${part}"
+    CLEANUP_FILES+=("${tmp_part}")
+    TMP_PARTITIONS[${part}]=${tmp_part}
+    local FILESIZE=$(stat -c%s "${DST_PARTITIONS[${part}]}")
+    echo "Truncating ${TMP_PARTITIONS[${part}]} to ${FILESIZE}"
+    truncate_file "${TMP_PARTITIONS[${part}]}" "${FILESIZE}"
+  done
+
+  echo "Verifying ${payload_type} update."
+  # Common payload args:
+  GENERATOR_ARGS=( -in_file="${FLAGS_payload}" )
+
+  local part old_partitions="" new_partitions="" partition_names=""
+  for part in "${PARTITIONS_ORDER[@]}"; do
+    if [[ -n "${partition_names}" ]]; then
+      partition_names+=":"
+      new_partitions+=":"
+      old_partitions+=":"
+    fi
+    partition_names+="${part}"
+    new_partitions+="${TMP_PARTITIONS[${part}]}"
+    old_partitions+="${SRC_PARTITIONS[${part}]:-}"
+  done
+
+  # Target image args:
+  GENERATOR_ARGS+=(
+    -partition_names="${partition_names}"
+    -new_partitions="${new_partitions}"
+  )
+
+  if [[ "${payload_type}" == "delta" ]]; then
+    # Source image args:
+    GENERATOR_ARGS+=(
+      -old_partitions="${old_partitions}"
+    )
+  fi
+
+  if [[ -n "${FORCE_MAJOR_VERSION}" ]]; then
+    GENERATOR_ARGS+=( --major_version="${FORCE_MAJOR_VERSION}" )
+  fi
+
+  echo "Running delta_generator to verify ${payload_type} payload with args: \
+${GENERATOR_ARGS[@]}"
+  "${GENERATOR}" "${GENERATOR_ARGS[@]}"
+
+  if [[ $? -eq 0 ]]; then
+    echo "Done applying ${payload_type} update."
+    echo "Checking the newly generated partitions against the target partitions"
+    for part in "${PARTITIONS_ORDER[@]}"; do
+      cmp "${TMP_PARTITIONS[${part}]}" "${DST_PARTITIONS[${part}]}"
+      local not_str=""
+      if [[ $? -ne 0 ]]; then
+        not_str="in"
+      fi
+      echo "The new partition (${part}) is ${not_str}valid."
+    done
+  else
+    echo "Failed to apply ${payload_type} update."
+  fi
+}
+
 # Sanity check that the real generator exists:
 GENERATOR="$(which delta_generator || true)"
 [[ -x "${GENERATOR}" ]] || die "can't find delta_generator"
@@ -653,4 +756,7 @@
   properties) validate_properties
               cmd_properties
               ;;
+  verify) validate_verify
+          cmd_verify
+          ;;
 esac
diff --git a/scripts/update_device.py b/scripts/update_device.py
index 75c58a7..a5be0a5 100755
--- a/scripts/update_device.py
+++ b/scripts/update_device.py
@@ -27,10 +27,17 @@
 import threading
 import zipfile
 
+import update_payload.payload
+
 
 # The path used to store the OTA package when applying the package from a file.
 OTA_PACKAGE_PATH = '/data/ota_package'
 
+# The path to the payload public key on the device.
+PAYLOAD_KEY_PATH = '/etc/update_engine/update-payload-key.pub.pem'
+
+# The port on the device that update_engine should connect to.
+DEVICE_PORT = 1234
 
 def CopyFileObjLength(fsrc, fdst, buffer_size=128 * 1024, copy_length=None):
   """Copy from a file object to another.
@@ -93,7 +100,7 @@
   """
 
   @staticmethod
-  def _ParseRange(range_str, file_size):
+  def _parse_range(range_str, file_size):
     """Parse an HTTP range string.
 
     Args:
@@ -141,11 +148,11 @@
       self.send_response(200)
 
     stat = os.fstat(f.fileno())
-    start_range, end_range = self._ParseRange(self.headers.get('range'),
-                                              stat.st_size)
+    start_range, end_range = self._parse_range(self.headers.get('range'),
+                                               stat.st_size)
     logging.info('Serving request for %s from %s [%d, %d) length: %d',
-             self.path, self.serving_payload, start_range, end_range,
-             end_range - start_range)
+                 self.path, self.serving_payload, start_range, end_range,
+                 end_range - start_range)
 
     self.send_header('Accept-Ranges', 'bytes')
     self.send_header('Content-Range',
@@ -161,6 +168,57 @@
     CopyFileObjLength(f, self.wfile, copy_length=end_range - start_range)
 
 
+  def do_POST(self):  # pylint: disable=invalid-name
+    """Reply with the omaha response xml."""
+    if self.path != '/update':
+      self.send_error(404, 'Unknown request')
+      return
+
+    if not self.serving_payload:
+      self.send_error(500, 'No serving payload set')
+      return
+
+    try:
+      f = open(self.serving_payload, 'rb')
+    except IOError:
+      self.send_error(404, 'File not found')
+      return
+
+    self.send_response(200)
+    self.send_header("Content-type", "text/xml")
+    self.end_headers()
+
+    stat = os.fstat(f.fileno())
+    sha256sum = subprocess.check_output(['sha256sum', self.serving_payload])
+    payload_hash = sha256sum.split()[0]
+    payload = update_payload.Payload(f)
+    payload.Init()
+
+    xml = '''
+        <?xml version="1.0" encoding="UTF-8"?>
+        <response protocol="3.0">
+          <app appid="appid">
+            <updatecheck status="ok">
+              <urls>
+                <url codebase="http://127.0.0.1:%d/"/>
+              </urls>
+              <manifest version="0.0.0.1">
+                <actions>
+                  <action event="install" run="payload"/>
+                  <action event="postinstall" MetadataSize="%d"/>
+                </actions>
+                <packages>
+                  <package hash_sha256="%s" name="payload" size="%d"/>
+                </packages>
+              </manifest>
+            </updatecheck>
+          </app>
+        </response>
+    ''' % (DEVICE_PORT, payload.metadata_size, payload_hash, stat.st_size)
+    self.wfile.write(xml.strip())
+    return
+
+
 class ServerThread(threading.Thread):
   """A thread for serving HTTP requests."""
 
@@ -203,6 +261,12 @@
           '--size=%d' % ota.size, '--headers="%s"' % headers]
 
 
+def OmahaUpdateCommand(omaha_url):
+  """Return the command to run to start the update in a device using Omaha."""
+  return ['update_engine_client', '--update', '--follow',
+          '--omaha_url=%s' % omaha_url]
+
+
 class AdbHost(object):
   """Represents a device connected via ADB."""
 
@@ -235,6 +299,22 @@
     p.wait()
     return p.returncode
 
+  def adb_output(self, command):
+    """Run an ADB command like "adb push" and return the output.
+
+    Args:
+      command: list of strings containing command and arguments to run
+
+    Returns:
+      the program's output as a string.
+
+    Raises:
+      subprocess.CalledProcessError on command exit != 0.
+    """
+    command = self._command_prefix + command
+    logging.info('Running: %s', ' '.join(str(x) for x in command))
+    return subprocess.check_output(command, universal_newlines=True)
+
 
 def main():
   parser = argparse.ArgumentParser(description='Android A/B OTA helper.')
@@ -248,6 +328,8 @@
                       help='The specific device to use.')
   parser.add_argument('--no-verbose', action='store_true',
                       help='Less verbose output')
+  parser.add_argument('--public-key', type=str, default='',
+                      help='Override the public key used to verify payload.')
   args = parser.parse_args()
   logging.basicConfig(
       level=logging.WARNING if args.no_verbose else logging.INFO)
@@ -262,6 +344,9 @@
   # List of commands to perform the update.
   cmds = []
 
+  help_cmd = ['shell', 'su', '0', 'update_engine_client', '--help']
+  use_omaha = 'omaha' in dut.adb_output(help_cmd)
+
   if args.file:
     # Update via pushing a file to /data.
     device_ota_file = os.path.join(OTA_PACKAGE_PATH, 'debug.zip')
@@ -273,16 +358,32 @@
   else:
     # Update via sending the payload over the network with an "adb reverse"
     # command.
-    device_port = 1234
-    payload_url = 'http://127.0.0.1:%d/payload' % device_port
+    payload_url = 'http://127.0.0.1:%d/payload' % DEVICE_PORT
     server_thread = StartServer(args.otafile)
     cmds.append(
-        ['reverse', 'tcp:%d' % device_port, 'tcp:%d' % server_thread.port])
-    finalize_cmds.append(['reverse', '--remove', 'tcp:%d' % device_port])
+        ['reverse', 'tcp:%d' % DEVICE_PORT, 'tcp:%d' % server_thread.port])
+    finalize_cmds.append(['reverse', '--remove', 'tcp:%d' % DEVICE_PORT])
+
+  if args.public_key:
+    payload_key_dir = os.path.dirname(PAYLOAD_KEY_PATH)
+    cmds.append(
+        ['shell', 'su', '0', 'mount', '-t', 'tmpfs', 'tmpfs', payload_key_dir])
+    # Allow adb push to payload_key_dir
+    cmds.append(['shell', 'su', '0', 'chcon', 'u:object_r:shell_data_file:s0',
+                 payload_key_dir])
+    cmds.append(['push', args.public_key, PAYLOAD_KEY_PATH])
+    # Allow update_engine to read it.
+    cmds.append(['shell', 'su', '0', 'chcon', '-R', 'u:object_r:system_file:s0',
+                 payload_key_dir])
+    finalize_cmds.append(['shell', 'su', '0', 'umount', payload_key_dir])
 
   try:
     # The main update command using the configured payload_url.
-    update_cmd = AndroidUpdateCommand(args.otafile, payload_url)
+    if use_omaha:
+      update_cmd = \
+          OmahaUpdateCommand('http://127.0.0.1:%d/update' % DEVICE_PORT)
+    else:
+      update_cmd = AndroidUpdateCommand(args.otafile, payload_url)
     cmds.append(['shell', 'su', '0'] + update_cmd)
 
     for cmd in cmds:
diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py
index e3708c7..fa419bd 100644
--- a/scripts/update_payload/applier.py
+++ b/scripts/update_payload/applier.py
@@ -195,14 +195,14 @@
   """
 
   def __init__(self, payload, bsdiff_in_place=True, bspatch_path=None,
-               imgpatch_path=None, truncate_to_expected_size=True):
+               puffpatch_path=None, truncate_to_expected_size=True):
     """Initialize the applier.
 
     Args:
       payload: the payload object to check
       bsdiff_in_place: whether to perform BSDIFF operation in-place (optional)
       bspatch_path: path to the bspatch binary (optional)
-      imgpatch_path: path to the imgpatch binary (optional)
+      puffpatch_path: path to the puffpatch binary (optional)
       truncate_to_expected_size: whether to truncate the resulting partitions
                                  to their expected sizes, as specified in the
                                  payload (optional)
@@ -213,7 +213,7 @@
     self.minor_version = payload.manifest.minor_version
     self.bsdiff_in_place = bsdiff_in_place
     self.bspatch_path = bspatch_path or 'bspatch'
-    self.imgpatch_path = imgpatch_path or 'imgpatch'
+    self.puffpatch_path = puffpatch_path or 'imgpatch'
     self.truncate_to_expected_size = truncate_to_expected_size
 
   def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
@@ -347,7 +347,7 @@
 
   def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file,
                           new_part_file):
-    """Applies a SOURCE_BSDIFF or IMGDIFF operation.
+    """Applies a SOURCE_BSDIFF or PUFFDIFF operation.
 
     Args:
       op: the operation object
@@ -373,7 +373,7 @@
 
     if (hasattr(new_part_file, 'fileno') and
         ((not old_part_file) or hasattr(old_part_file, 'fileno')) and
-        op.type != common.OpType.IMGDIFF):
+        op.type != common.OpType.PUFFDIFF):
       # Construct input and output extents argument for bspatch.
       in_extents_arg, _, _ = _ExtentsToBspatchArg(
           op.src_extents, block_size, '%s.src_extents' % op_name,
@@ -411,8 +411,8 @@
       # Invoke bspatch.
       patch_cmd = [self.bspatch_path, in_file_name, out_file_name,
                    patch_file_name]
-      if op.type == common.OpType.IMGDIFF:
-        patch_cmd[0] = self.imgpatch_path
+      if op.type == common.OpType.PUFFDIFF:
+        patch_cmd[0] = self.puffpatch_path
       subprocess.check_call(patch_cmd)
 
       # Read output.
@@ -468,7 +468,7 @@
       elif op.type == common.OpType.SOURCE_COPY:
         self._ApplySourceCopyOperation(op, op_name, old_part_file,
                                        new_part_file)
-      elif op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.IMGDIFF):
+      elif op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.PUFFDIFF):
         self._ApplyDiffOperation(op, op_name, data, old_part_file,
                                  new_part_file)
       else:
@@ -504,7 +504,7 @@
         shutil.copyfile(old_part_file_name, new_part_file_name)
       elif (self.minor_version == common.SOURCE_MINOR_PAYLOAD_VERSION or
             self.minor_version == common.OPSRCHASH_MINOR_PAYLOAD_VERSION or
-            self.minor_version == common.IMGDIFF_MINOR_PAYLOAD_VERSION):
+            self.minor_version == common.PUFFDIFF_MINOR_PAYLOAD_VERSION):
         # In minor version >= 2, we don't want to copy the partitions, so
         # instead just make the new partition file.
         open(new_part_file_name, 'w').close()
diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py
index e13ea13..3144395 100644
--- a/scripts/update_payload/checker.py
+++ b/scripts/update_payload/checker.py
@@ -816,7 +816,7 @@
       raise error.PayloadError('%s: excess dst blocks.' % op_name)
 
   def _CheckAnyDiffOperation(self, data_length, total_dst_blocks, op_name):
-    """Specific checks for BSDIFF, SOURCE_BSDIFF and IMGDIFF operations.
+    """Specific checks for BSDIFF, SOURCE_BSDIFF and PUFFDIFF operations.
 
     Args:
       data_length: The length of the data blob associated with the operation.
@@ -981,7 +981,7 @@
     elif op.type == common.OpType.SOURCE_BSDIFF and self.minor_version >= 2:
       self._CheckAnyDiffOperation(data_length, total_dst_blocks, op_name)
       self._CheckAnySourceOperation(op, total_src_blocks, op_name)
-    elif op.type == common.OpType.IMGDIFF and self.minor_version >= 4:
+    elif op.type == common.OpType.PUFFDIFF and self.minor_version >= 4:
       self._CheckAnyDiffOperation(data_length, total_dst_blocks, op_name)
       self._CheckAnySourceOperation(op, total_src_blocks, op_name)
     else:
@@ -1041,7 +1041,7 @@
         common.OpType.BSDIFF: 0,
         common.OpType.SOURCE_COPY: 0,
         common.OpType.SOURCE_BSDIFF: 0,
-        common.OpType.IMGDIFF: 0,
+        common.OpType.PUFFDIFF: 0,
     }
     # Total blob sizes for each operation type.
     op_blob_totals = {
@@ -1051,7 +1051,7 @@
         common.OpType.BSDIFF: 0,
         # SOURCE_COPY operations don't have blobs.
         common.OpType.SOURCE_BSDIFF: 0,
-        common.OpType.IMGDIFF: 0,
+        common.OpType.PUFFDIFF: 0,
     }
     # Counts of hashed vs unhashed operations.
     blob_hash_counts = {
diff --git a/scripts/update_payload/checker_unittest.py b/scripts/update_payload/checker_unittest.py
index 56b1a30..a0a5056 100755
--- a/scripts/update_payload/checker_unittest.py
+++ b/scripts/update_payload/checker_unittest.py
@@ -38,7 +38,7 @@
       'ZERO': common.OpType.ZERO,
       'DISCARD': common.OpType.DISCARD,
       'REPLACE_XZ': common.OpType.REPLACE_XZ,
-      'IMGDIFF': common.OpType.IMGDIFF,
+      'PUFFDIFF': common.OpType.PUFFDIFF,
   }
   return op_name_to_type[op_name]
 
diff --git a/scripts/update_payload/common.py b/scripts/update_payload/common.py
index 678fc5d..bab8a4f 100644
--- a/scripts/update_payload/common.py
+++ b/scripts/update_payload/common.py
@@ -27,7 +27,7 @@
 INPLACE_MINOR_PAYLOAD_VERSION = 1
 SOURCE_MINOR_PAYLOAD_VERSION = 2
 OPSRCHASH_MINOR_PAYLOAD_VERSION = 3
-IMGDIFF_MINOR_PAYLOAD_VERSION = 4
+PUFFDIFF_MINOR_PAYLOAD_VERSION = 4
 
 #
 # Payload operation types.
@@ -45,9 +45,9 @@
   ZERO = _CLASS.ZERO
   DISCARD = _CLASS.DISCARD
   REPLACE_XZ = _CLASS.REPLACE_XZ
-  IMGDIFF = _CLASS.IMGDIFF
+  PUFFDIFF = _CLASS.PUFFDIFF
   ALL = (REPLACE, REPLACE_BZ, MOVE, BSDIFF, SOURCE_COPY, SOURCE_BSDIFF, ZERO,
-         DISCARD, REPLACE_XZ, IMGDIFF)
+         DISCARD, REPLACE_XZ, PUFFDIFF)
   NAMES = {
       REPLACE: 'REPLACE',
       REPLACE_BZ: 'REPLACE_BZ',
@@ -58,7 +58,7 @@
       ZERO: 'ZERO',
       DISCARD: 'DISCARD',
       REPLACE_XZ: 'REPLACE_XZ',
-      IMGDIFF: 'IMGDIFF',
+      PUFFDIFF: 'PUFFDIFF',
   }
 
   def __init__(self):
diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py
index 46c475e..8cf87e3 100644
--- a/scripts/update_payload/update_metadata_pb2.py
+++ b/scripts/update_payload/update_metadata_pb2.py
@@ -13,7 +13,7 @@
 DESCRIPTOR = _descriptor.FileDescriptor(
   name='update_metadata.proto',
   package='chromeos_update_engine',
-  serialized_pb='\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xd2\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\r\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\r\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\x91\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x0b\n\x07IMGDIFF\x10\t\"\x88\x03\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\"\xc4\x05\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdateB\x02H\x03')
+  serialized_pb='\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xd3\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\r\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\r\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\x92\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x0c\n\x08PUFFDIFF\x10\t\"\xa6\x03\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\"\xc4\x05\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdateB\x02H\x03')
 
 
 
@@ -60,14 +60,14 @@
       options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='IMGDIFF', index=9, number=9,
+      name='PUFFDIFF', index=9, number=9,
       options=None,
       type=None),
   ],
   containing_type=None,
   options=None,
   serialized_start=712,
-  serialized_end=857,
+  serialized_end=858,
 )
 
 
@@ -347,7 +347,7 @@
   is_extendable=False,
   extension_ranges=[],
   serialized_start=391,
-  serialized_end=857,
+  serialized_end=858,
 )
 
 
@@ -414,6 +414,13 @@
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       options=None),
+    _descriptor.FieldDescriptor(
+      name='postinstall_optional', full_name='chromeos_update_engine.PartitionUpdate.postinstall_optional', index=8,
+      number=9, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
   ],
   extensions=[
   ],
@@ -423,8 +430,8 @@
   options=None,
   is_extendable=False,
   extension_ranges=[],
-  serialized_start=860,
-  serialized_end=1252,
+  serialized_start=861,
+  serialized_end=1283,
 )
 
 
@@ -535,8 +542,8 @@
   options=None,
   is_extendable=False,
   extension_ranges=[],
-  serialized_start=1255,
-  serialized_end=1963,
+  serialized_start=1286,
+  serialized_end=1994,
 )
 
 _SIGNATURES_SIGNATURE.containing_type = _SIGNATURES;
diff --git a/update_attempter.cc b/update_attempter.cc
index ff3b046..9cf9368 100644
--- a/update_attempter.cc
+++ b/update_attempter.cc
@@ -126,12 +126,12 @@
         network_proxy_service_proxy)
     : processor_(new ActionProcessor()),
       system_state_(system_state),
-#if USE_LIBCROS
+#if USE_CHROME_NETWORK_PROXY
       cert_checker_(cert_checker),
       chrome_proxy_resolver_(network_proxy_service_proxy) {
 #else
       cert_checker_(cert_checker) {
-#endif  // USE_LIBCROS
+#endif  // USE_CHROME_NETWORK_PROXY
 }
 
 UpdateAttempter::~UpdateAttempter() {
diff --git a/update_attempter.h b/update_attempter.h
index 7780357..b4e2f60 100644
--- a/update_attempter.h
+++ b/update_attempter.h
@@ -29,9 +29,9 @@
 #include <base/time/time.h>
 #include <gtest/gtest_prod.h>  // for FRIEND_TEST
 
-#if USE_LIBCROS
+#if USE_CHROME_NETWORK_PROXY
 #include "update_engine/chrome_browser_proxy_resolver.h"
-#endif  // USE_LIBCROS
+#endif  // USE_CHROME_NETWORK_PROXY
 #include "update_engine/certificate_checker.h"
 #include "update_engine/client_library/include/update_engine/update_status.h"
 #include "update_engine/common/action_processor.h"
@@ -304,13 +304,13 @@
   void MarkDeltaUpdateFailure();
 
   ProxyResolver* GetProxyResolver() {
-#if USE_LIBCROS
+#if USE_CHROME_NETWORK_PROXY
     return obeying_proxies_ ?
         reinterpret_cast<ProxyResolver*>(&chrome_proxy_resolver_) :
         reinterpret_cast<ProxyResolver*>(&direct_proxy_resolver_);
 #else
     return &direct_proxy_resolver_;
-#endif  // USE_LIBCROS
+#endif  // USE_CHROME_NETWORK_PROXY
   }
 
   // Sends a ping to Omaha.
@@ -452,9 +452,9 @@
 
   // Our two proxy resolvers
   DirectProxyResolver direct_proxy_resolver_;
-#if USE_LIBCROS
+#if USE_CHROME_NETWORK_PROXY
   ChromeBrowserProxyResolver chrome_proxy_resolver_;
-#endif  // USE_LIBCROS
+#endif  // USE_CHROME_NETWORK_PROXY
 
   // Originally, both of these flags are false. Once UpdateBootFlags is called,
   // |update_boot_flags_running_| is set to true. As soon as UpdateBootFlags
diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc
index 4928477..4ebc85c 100644
--- a/update_attempter_unittest.cc
+++ b/update_attempter_unittest.cc
@@ -31,10 +31,10 @@
 #include <policy/libpolicy.h>
 #include <policy/mock_device_policy.h>
 
-#if USE_LIBCROS
+#if USE_CHROME_NETWORK_PROXY
 #include "network_proxy/dbus-proxies.h"
 #include "network_proxy/dbus-proxy-mocks.h"
-#endif // USE_LIBCROS
+#endif  // USE_CHROME_NETWORK_PROXY
 #include "update_engine/common/fake_clock.h"
 #include "update_engine/common/fake_prefs.h"
 #include "update_engine/common/mock_action.h"
@@ -187,13 +187,13 @@
   brillo::BaseMessageLoop loop_{&base_loop_};
 
   FakeSystemState fake_system_state_;
-#if USE_LIBCROS
+#if USE_CHROME_NETWORK_PROXY
   NetworkProxyServiceInterfaceProxyMock network_proxy_service_proxy_mock_;
   UpdateAttempterUnderTest attempter_{&fake_system_state_,
                                       &network_proxy_service_proxy_mock_};
 #else
   UpdateAttempterUnderTest attempter_{&fake_system_state_, nullptr};
-#endif  // USE_LIBCROS
+#endif  // USE_CHROME_NETWORK_PROXY
   OpenSSLWrapper openssl_wrapper_;
   CertificateChecker certificate_checker_;
 
diff --git a/update_engine.gyp b/update_engine.gyp
index 6b7e5f4..467000e 100644
--- a/update_engine.gyp
+++ b/update_engine.gyp
@@ -14,6 +14,10 @@
 # limitations under the License.
 #
 {
+  'variables': {
+    'USE_chrome_network_proxy': '1',
+    'USE_chrome_kiosk_app': '1',
+  },
   'target_defaults': {
     'variables': {
       'deps': [
@@ -47,7 +51,8 @@
       'USE_BINDER=<(USE_binder)',
       'USE_DBUS=<(USE_dbus)',
       'USE_HWID_OVERRIDE=<(USE_hwid_override)',
-      'USE_LIBCROS=<(USE_libcros)',
+      'USE_CHROME_KIOSK_APP=<(USE_chrome_kiosk_app)',
+      'USE_CHROME_NETWORK_PROXY=<(USE_chrome_network_proxy)',
       'USE_MTD=<(USE_mtd)',
       'USE_OMAHA=1',
       'USE_SHILL=1',
@@ -98,32 +103,34 @@
       'includes': ['../../../platform2/common-mk/generate-dbus-adaptors.gypi'],
     },
     {
-      'target_name': 'update_engine-other-dbus-proxies',
+      'target_name': 'update_engine-dbus-libcros-client',
       'type': 'none',
-      'actions': [
-        {
-          'action_name': 'update_engine-dbus-libcros-client',
-          'variables': {
-            'mock_output_file': 'include/libcros/dbus-proxy-mocks.h',
-            'proxy_output_file': 'include/libcros/dbus-proxies.h'
-          },
-          'sources': [
-            'dbus_bindings/org.chromium.LibCrosService.dbus-xml',
-          ],
-          'includes': ['../../../platform2/common-mk/generate-dbus-proxies.gypi'],
+      'actions': [{
+        'action_name': 'update_engine-dbus-libcros-client-action',
+        'variables': {
+          'mock_output_file': 'include/libcros/dbus-proxy-mocks.h',
+          'proxy_output_file': 'include/libcros/dbus-proxies.h'
         },
-        {
-          'action_name': 'update_engine-dbus-network_proxy-client',
-          'variables': {
-            'mock_output_file': 'include/network_proxy/dbus-proxy-mocks.h',
-            'proxy_output_file': 'include/network_proxy/dbus-proxies.h'
-          },
-          'sources': [
-            'dbus_bindings/org.chromium.NetworkProxyService.dbus-xml',
-          ],
-          'includes': ['../../../platform2/common-mk/generate-dbus-proxies.gypi'],
+        'sources': [
+          'dbus_bindings/org.chromium.LibCrosService.dbus-xml',
+        ],
+        'includes': ['../../../platform2/common-mk/generate-dbus-proxies.gypi'],
+      }],
+    },
+    {
+      'target_name': 'update_engine-dbus-chrome_network_proxy-client',
+      'type': 'none',
+      'actions': [{
+        'action_name': 'update_engine-dbus-chrome_network_proxy-client-action',
+        'variables': {
+          'mock_output_file': 'include/network_proxy/dbus-proxy-mocks.h',
+          'proxy_output_file': 'include/network_proxy/dbus-proxies.h'
         },
-      ],
+        'sources': [
+          'dbus_bindings/org.chromium.NetworkProxyService.dbus-xml',
+        ],
+        'includes': ['../../../platform2/common-mk/generate-dbus-proxies.gypi'],
+      }],
     },
     # The payload application component and common dependencies.
     {
@@ -182,6 +189,7 @@
         'payload_consumer/download_action.cc',
         'payload_consumer/extent_writer.cc',
         'payload_consumer/file_descriptor.cc',
+        'payload_consumer/file_descriptor_utils.cc',
         'payload_consumer/file_writer.cc',
         'payload_consumer/filesystem_verifier_action.cc',
         'payload_consumer/install_plan.cc',
@@ -212,7 +220,6 @@
         'libpayload_consumer',
         'update_metadata-protos',
         'update_engine-dbus-adaptor',
-        'update_engine-other-dbus-proxies',
       ],
       'variables': {
         'exported_deps': [
@@ -292,14 +299,19 @@
         'update_status_utils.cc',
       ],
       'conditions': [
-        ['USE_libcros == 1', {
+        ['USE_chrome_network_proxy == 1', {
           'dependencies': [
-            'update_engine-other-dbus-proxies',
+            'update_engine-dbus-chrome_network_proxy-client',
           ],
           'sources': [
             'chrome_browser_proxy_resolver.cc',
           ],
         }],
+        ['USE_chrome_kiosk_app == 1', {
+          'dependencies': [
+            'update_engine-dbus-libcros-client',
+          ],
+        }],
       ],
     },
     # update_engine daemon.
@@ -373,6 +385,9 @@
             '<@(exported_deps)',
           ],
         },
+        'libraries': [
+          '-lbsdiff',
+        ],
       },
       'sources': [
         'payload_generator/ab_generator.cc',
@@ -519,6 +534,8 @@
             'payload_consumer/delta_performer_unittest.cc',
             'payload_consumer/download_action_unittest.cc',
             'payload_consumer/extent_writer_unittest.cc',
+            'payload_consumer/fake_file_descriptor.cc',
+            'payload_consumer/file_descriptor_utils_unittest.cc',
             'payload_consumer/file_writer_unittest.cc',
             'payload_consumer/filesystem_verifier_action_unittest.cc',
             'payload_consumer/postinstall_runner_action_unittest.cc',
@@ -563,11 +580,19 @@
             'testrunner.cc',
           ],
           'conditions': [
-            ['USE_libcros == 1', {
+            ['USE_chrome_network_proxy == 1', {
+              'dependencies': [
+                'update_engine-dbus-chrome_network_proxy-client',
+              ],
               'sources': [
                 'chrome_browser_proxy_resolver_unittest.cc',
               ],
             }],
+            ['USE_chrome_kiosk_app == 1', {
+              'dependencies': [
+                'update_engine-dbus-libcros-client',
+              ],
+            }],
           ],
         },
       ],
diff --git a/update_manager/real_system_provider.cc b/update_manager/real_system_provider.cc
index 9b968ca..fdf7e86 100644
--- a/update_manager/real_system_provider.cc
+++ b/update_manager/real_system_provider.cc
@@ -20,9 +20,9 @@
 #include <base/callback.h>
 #include <base/logging.h>
 #include <base/time/time.h>
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
 #include <libcros/dbus-proxies.h>
-#endif
+#endif  // USE_CHROME_KIOSK_APP
 
 #include "update_engine/common/utils.h"
 #include "update_engine/update_manager/generic_variables.h"
@@ -124,7 +124,7 @@
 
 bool RealSystemProvider::GetKioskAppRequiredPlatformVersion(
     string* required_platform_version) {
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
   brillo::ErrorPtr error;
   if (!libcros_proxy_->GetKioskAppRequiredPlatformVersion(
           required_platform_version, &error)) {
@@ -132,7 +132,7 @@
     required_platform_version->clear();
     return false;
   }
-#endif
+#endif  // USE_CHROME_KIOSK_APP
 
   return true;
 }
diff --git a/update_manager/real_system_provider.h b/update_manager/real_system_provider.h
index a62e1ae..80a8615 100644
--- a/update_manager/real_system_provider.h
+++ b/update_manager/real_system_provider.h
@@ -40,8 +40,12 @@
       chromeos_update_engine::BootControlInterface* boot_control,
       org::chromium::LibCrosServiceInterfaceProxyInterface* libcros_proxy)
       : hardware_(hardware),
+#if USE_CHROME_KIOSK_APP
         boot_control_(boot_control),
         libcros_proxy_(libcros_proxy) {}
+#else
+        boot_control_(boot_control) {}
+#endif  // USE_CHROME_KIOSK_APP
 
   // Initializes the provider and returns whether it succeeded.
   bool Init();
@@ -78,8 +82,9 @@
 
   chromeos_update_engine::HardwareInterface* const hardware_;
   chromeos_update_engine::BootControlInterface* const boot_control_;
-  org::chromium::LibCrosServiceInterfaceProxyInterface* const libcros_proxy_
-      ALLOW_UNUSED_TYPE;
+#if USE_CHROME_KIOSK_APP
+  org::chromium::LibCrosServiceInterfaceProxyInterface* const libcros_proxy_;
+#endif  // USE_CHROME_KIOSK_APP
 
   DISALLOW_COPY_AND_ASSIGN(RealSystemProvider);
 };
diff --git a/update_manager/real_system_provider_unittest.cc b/update_manager/real_system_provider_unittest.cc
index 821a6cc..33838e5 100644
--- a/update_manager/real_system_provider_unittest.cc
+++ b/update_manager/real_system_provider_unittest.cc
@@ -26,30 +26,30 @@
 #include "update_engine/common/fake_boot_control.h"
 #include "update_engine/common/fake_hardware.h"
 #include "update_engine/update_manager/umtest_utils.h"
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
 #include "libcros/dbus-proxies.h"
 #include "libcros/dbus-proxy-mocks.h"
 
 using org::chromium::LibCrosServiceInterfaceProxyMock;
-#endif  // USE_LIBCROS
+#endif  // USE_CHROME_KIOSK_APP
 using std::unique_ptr;
 using testing::_;
 using testing::DoAll;
 using testing::Return;
 using testing::SetArgPointee;
 
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
 namespace {
 const char kRequiredPlatformVersion[] ="1234.0.0";
 }  // namespace
-#endif  // USE_LIBCROS
+#endif  // USE_CHROME_KIOSK_APP
 
 namespace chromeos_update_manager {
 
 class UmRealSystemProviderTest : public ::testing::Test {
  protected:
   void SetUp() override {
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
     libcros_proxy_mock_.reset(new LibCrosServiceInterfaceProxyMock());
     ON_CALL(*libcros_proxy_mock_,
             GetKioskAppRequiredPlatformVersion(_, _, _))
@@ -61,7 +61,7 @@
 #else
     provider_.reset(
         new RealSystemProvider(&fake_hardware_, &fake_boot_control_, nullptr));
-#endif  // USE_LIBCROS
+#endif  // USE_CHROME_KIOSK_APP
     EXPECT_TRUE(provider_->Init());
   }
 
@@ -69,9 +69,9 @@
   chromeos_update_engine::FakeBootControl fake_boot_control_;
   unique_ptr<RealSystemProvider> provider_;
 
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
   unique_ptr<LibCrosServiceInterfaceProxyMock> libcros_proxy_mock_;
-#endif  // USE_LIBCROS
+#endif  // USE_CHROME_KIOSK_APP
 };
 
 TEST_F(UmRealSystemProviderTest, InitTest) {
@@ -91,7 +91,7 @@
   UmTestUtils::ExpectVariableHasValue(false, provider_->var_is_oobe_complete());
 }
 
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
 TEST_F(UmRealSystemProviderTest, KioskRequiredPlatformVersion) {
   UmTestUtils::ExpectVariableHasValue(
       std::string(kRequiredPlatformVersion),
@@ -129,6 +129,6 @@
   UmTestUtils::ExpectVariableHasValue(
       std::string(), provider_->var_kiosk_required_platform_version());
 }
-#endif
+#endif  // USE_CHROME_KIOSK_APP
 
 }  // namespace chromeos_update_manager
diff --git a/update_manager/state_factory.cc b/update_manager/state_factory.cc
index 70fc80b..a79b676 100644
--- a/update_manager/state_factory.cc
+++ b/update_manager/state_factory.cc
@@ -73,6 +73,7 @@
   unique_ptr<RealRandomProvider> random_provider(new RealRandomProvider());
   unique_ptr<RealSystemProvider> system_provider(new RealSystemProvider(
       system_state->hardware(), system_state->boot_control(), libcros_proxy));
+
   unique_ptr<RealTimeProvider> time_provider(new RealTimeProvider(clock));
   unique_ptr<RealUpdaterProvider> updater_provider(
       new RealUpdaterProvider(system_state));
diff --git a/update_metadata.proto b/update_metadata.proto
index 454c736..4fe5fd3 100644
--- a/update_metadata.proto
+++ b/update_metadata.proto
@@ -164,7 +164,7 @@
     REPLACE_XZ = 8; // Replace destination extents w/ attached xz data.
 
     // On minor version 4 or newer, these operations are supported:
-    IMGDIFF = 9; // The data is in imgdiff format.
+    PUFFDIFF = 9;  // The data is in puffdiff format.
   }
   required Type type = 1;
   // The offset into the delta file (after the protobuf)