Empty merge of Android 24Q2 Release (ab/11526283) to aosp-main-future

Bug: 337098550
Merged-In: I1e3144c7888db070fd5f73d1607e9e458f2932a2
Change-Id: Ia77202431d6971b9c417c3874cdc5d0809f92c37
diff --git a/Android.bp b/Android.bp
index 62afe09..4e5f6f5 100644
--- a/Android.bp
+++ b/Android.bp
@@ -422,6 +422,16 @@
 }
 
 cc_library_static {
+    name: "libupdate_engine_boot_control_nostats",
+    cflags: ["-DUE_DISABLE_STATS"],
+    defaults: [
+        "libupdate_engine_boot_control_defaults",
+        "libupdate_engine_boot_control_exports",
+        "libpayload_consumer_exports",
+    ],
+}
+
+cc_library_static {
     name: "libupdate_engine_boot_control_proto-full",
     defaults: [
         "libupdate_engine_boot_control_defaults",
@@ -564,6 +574,43 @@
     init_rc: ["update_engine.rc"],
 }
 
+// update_engine_nostats (type: executable)
+// ========================================================
+// update_engine daemon version without the stats integration.
+cc_binary {
+    name: "update_engine_nostats",
+    defaults: [
+        "ue_defaults",
+        "libupdate_engine_android_exports",
+    ],
+
+    static_libs: [
+        "libupdate_engine_android",
+        "libgflags",
+        "libupdate_engine_boot_control_nostats",
+    ],
+    required: [
+        "cacerts",
+        "otacerts",
+    ],
+
+    exclude_static_libs: [
+        "libstatslog_ue",
+        "libupdate_engine_boot_control"
+    ],
+
+    exclude_shared_libs: [
+        "libstatssocket",
+    ],
+
+    cflags: ["-DUE_DISABLE_STATS"],
+    srcs: [
+        "main.cc",
+        "common/metrics_reporter_stub.cc",
+    ],
+    init_rc: ["update_engine_nostats.rc"],
+}
+
 // update_engine_sideload (type: executable)
 // ========================================================
 // A binary executable equivalent to update_engine daemon that installs an update
@@ -579,7 +626,10 @@
     ],
     recovery: true,
 
-    cflags: ["-D_UE_SIDELOAD"],
+    cflags: [
+        "-D_UE_SIDELOAD",
+        "-DUE_DISABLE_STATS",
+    ],
     header_libs: ["libgtest_prod_headers"],
 
     srcs: [
diff --git a/aosp/cleanup_previous_update_action.cc b/aosp/cleanup_previous_update_action.cc
index 3b54f80..9c0843c 100644
--- a/aosp/cleanup_previous_update_action.cc
+++ b/aosp/cleanup_previous_update_action.cc
@@ -26,7 +26,7 @@
 #include <base/bind.h>
 #include <libsnapshot/snapshot.h>
 
-#ifndef __ANDROID_RECOVERY__
+#if !defined(__ANDROID_RECOVERY__) && !defined(UE_DISABLE_STATS)
 #include <statslog_ue.h>
 #endif
 
@@ -502,6 +502,8 @@
 
 #ifdef __ANDROID_RECOVERY__
   LOG(INFO) << "Skip reporting merge stats in recovery.";
+#elif defined(UE_DISABLE_STATS)
+  LOG(INFO) << "Skip reporting merge stats because metrics are disabled.";
 #else
   const auto& report = result->report();
 
diff --git a/aosp/cow_converter.cc b/aosp/cow_converter.cc
index 3e8e5fd..32aa12f 100644
--- a/aosp/cow_converter.cc
+++ b/aosp/cow_converter.cc
@@ -95,6 +95,7 @@
                                   manifest.block_size(),
                                   cow_writer.get(),
                                   partition.new_partition_info().size(),
+                                  partition.old_partition_info().size(),
                                   false));
   TEST_AND_RETURN_FALSE(cow_writer->Finalize());
   return true;
diff --git a/common/utils.cc b/common/utils.cc
index f0c045f..c2c72d4 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -86,6 +86,7 @@
 // The path to the kernel's boot_id.
 const char kBootIdPath[] = "/proc/sys/kernel/random/boot_id";
 
+}  // namespace
 // If |path| is absolute, or explicit relative to the current working directory,
 // leaves it as is. Otherwise, uses the system's temp directory, as defined by
 // base::GetTempDir() and prepends it to |path|. On success stores the full
@@ -110,8 +111,6 @@
   return true;
 }
 
-}  // namespace
-
 namespace utils {
 
 bool WriteFile(const char* path, const void* data, size_t data_len) {
@@ -413,6 +412,9 @@
 }
 
 bool DeleteDirectory(const char* dirname) {
+  if (!std::filesystem::exists(dirname)) {
+    return true;
+  }
   const std::string tmpdir = std::string(dirname) + "_deleted";
   std::filesystem::remove_all(tmpdir);
   if (rename(dirname, tmpdir.c_str()) != 0) {
@@ -621,7 +623,8 @@
    */
   rc = ioctl(fd, BLKROGET, &read_only_flag);
   if (rc != 0) {
-    PLOG(ERROR) << "Failed to read back block device read-only value:" << device;
+    PLOG(ERROR) << "Failed to read back block device read-only value:"
+                << device;
     return false;
   }
   if (read_only_flag == expected_flag) {
@@ -629,20 +632,20 @@
   }
 
   std::array<char, PATH_MAX> device_name;
-  char *pdevice = realpath(device.c_str(), device_name.data());
+  char* pdevice = realpath(device.c_str(), device_name.data());
   TEST_AND_RETURN_FALSE_ERRNO(pdevice);
 
   std::string real_path(pdevice);
   std::size_t offset = real_path.find_last_of('/');
-  if (offset == std::string::npos){
+  if (offset == std::string::npos) {
     LOG(ERROR) << "Could not find partition name from " << real_path;
     return false;
   }
   const std::string partition_name = real_path.substr(offset + 1);
 
   std::string force_ro_file = "/sys/block/" + partition_name + "/force_ro";
-  android::base::unique_fd fd_force_ro {
-    HANDLE_EINTR(open(force_ro_file.c_str(), O_WRONLY | O_CLOEXEC))};
+  android::base::unique_fd fd_force_ro{
+      HANDLE_EINTR(open(force_ro_file.c_str(), O_WRONLY | O_CLOEXEC))};
   TEST_AND_RETURN_FALSE_ERRNO(fd_force_ro >= 0);
 
   rc = write(fd_force_ro, expected_flag ? "1" : "0", 1);
@@ -651,12 +654,13 @@
   // Read back again
   rc = ioctl(fd, BLKROGET, &read_only_flag);
   if (rc != 0) {
-    PLOG(ERROR) << "Failed to read back block device read-only value:" << device;
+    PLOG(ERROR) << "Failed to read back block device read-only value:"
+                << device;
     return false;
   }
   if (read_only_flag != expected_flag) {
     LOG(ERROR) << "After modifying force_ro, marking block device " << device
-                << " as read_only=" << expected_flag;
+               << " as read_only=" << expected_flag;
     return false;
   }
   return true;
diff --git a/common/utils.h b/common/utils.h
index 6bb89f1..ae07b07 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -559,6 +559,8 @@
 [[nodiscard]] std::string_view ToStringView(const void* data,
                                             size_t size) noexcept;
 
+bool GetTempName(const std::string& path, base::FilePath* template_path);
+
 }  // namespace chromeos_update_engine
 
 #define TEST_AND_RETURN_FALSE_ERRNO(_x)                             \
diff --git a/main.cc b/main.cc
index 103e1a1..ac2f5bc 100644
--- a/main.cc
+++ b/main.cc
@@ -14,6 +14,7 @@
 // limitations under the License.
 //
 
+#include <stdlib.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <xz.h>
@@ -27,6 +28,7 @@
 #include "update_engine/common/logging.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/terminator.h"
+#include "update_engine/common/utils.h"
 
 using std::string;
 DEFINE_bool(logtofile, false, "Write logs to a file in log_dir.");
@@ -48,6 +50,14 @@
   bool log_to_system = FLAGS_logtostderr;
   bool log_to_file = FLAGS_logtofile || !FLAGS_logtostderr;
   chromeos_update_engine::SetupLogging(log_to_system, log_to_file);
+  base::FilePath tmpdir;
+  if (chromeos_update_engine::GetTempName("", &tmpdir)) {
+    LOG(INFO) << "Using temp dir " << tmpdir;
+    setenv("TMPDIR", tmpdir.value().c_str(), true);
+  } else {
+    PLOG(ERROR) << "Failed to create temporary directory, puffdiff will run "
+                   "w/o on disk cache, updates might take longer.";
+  }
   if (!FLAGS_foreground)
     PLOG_IF(FATAL, daemon(0, 0) == 1) << "daemon() failed";
 
diff --git a/payload_consumer/block_extent_writer.h b/payload_consumer/block_extent_writer.h
index eeae36d..516c24b 100644
--- a/payload_consumer/block_extent_writer.h
+++ b/payload_consumer/block_extent_writer.h
@@ -49,7 +49,7 @@
   bool NextExtent();
   [[nodiscard]] size_t ConsumeWithBuffer(const uint8_t* const bytes,
                                          const size_t count);
-  // It's a non-owning pointer, because PartitionWriter owns the CowWruter. This
+  // It's a non-owning pointer, because PartitionWriter owns the CowWriter. This
   // allows us to use a single instance of CowWriter for all operations applied
   // to the same partition.
   google::protobuf::RepeatedPtrField<Extent> extents_;
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index fe581b6..519ec71 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -503,7 +503,9 @@
     if (!CanPerformInstallOperation(op))
       return true;
     if (!ProcessOperation(&op, error)) {
-      LOG(ERROR) << "unable to process operation: " << *error;
+      LOG(ERROR) << "unable to process operation: "
+                 << InstallOperationTypeName(op.type())
+                 << " Error: " << utils::ErrorCodeToString(*error);
       return false;
     }
 
diff --git a/payload_generator/cow_size_estimator.cc b/payload_generator/cow_size_estimator.cc
index bb12113..2a97b06 100644
--- a/payload_generator/cow_size_estimator.cc
+++ b/payload_generator/cow_size_estimator.cc
@@ -17,7 +17,6 @@
 #include "update_engine/payload_generator/cow_size_estimator.h"
 
 #include <algorithm>
-#include <functional>
 #include <string>
 #include <utility>
 #include <vector>
@@ -26,6 +25,9 @@
 #include <libsnapshot/cow_writer.h>
 #include <libsnapshot/cow_format.h>
 
+#include "update_engine/payload_consumer/block_extent_writer.h"
+#include "update_engine/payload_consumer/snapshot_extent_writer.h"
+#include "update_engine/payload_consumer/xor_extent_writer.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/vabc_partition_writer.h"
 #include "update_engine/payload_generator/extent_ranges.h"
@@ -35,6 +37,17 @@
 namespace chromeos_update_engine {
 using android::snapshot::CreateCowEstimator;
 using android::snapshot::ICowWriter;
+// Compute XOR map, a map from dst extent to corresponding merge operation
+static ExtentMap<const CowMergeOperation*, ExtentLess> ComputeXorMap(
+    const google::protobuf::RepeatedPtrField<CowMergeOperation>& merge_ops) {
+  ExtentMap<const CowMergeOperation*, ExtentLess> xor_map;
+  for (const auto& merge_op : merge_ops) {
+    if (merge_op.type() == CowMergeOperation::COW_XOR) {
+      xor_map.AddExtent(merge_op.dst_extent(), &merge_op);
+    }
+  }
+  return xor_map;
+}
 
 bool CowDryRun(
     FileDescriptorPtr source_fd,
@@ -44,78 +57,113 @@
         merge_operations,
     const size_t block_size,
     android::snapshot::ICowWriter* cow_writer,
-    const size_t partition_size,
+    const size_t new_partition_size,
+    const size_t old_partition_size,
     const bool xor_enabled) {
   CHECK_NE(target_fd, nullptr);
   CHECK(target_fd->IsOpen());
   VABCPartitionWriter::WriteMergeSequence(merge_operations, cow_writer);
   ExtentRanges visited;
-  for (const auto& op : merge_operations) {
-    if (op.type() == CowMergeOperation::COW_COPY) {
-      visited.AddExtent(op.dst_extent());
-      cow_writer->AddCopy(op.dst_extent().start_block(),
-                          op.src_extent().start_block(),
-                          op.dst_extent().num_blocks());
-    } else if (op.type() == CowMergeOperation::COW_XOR && xor_enabled) {
-      CHECK_NE(source_fd, nullptr) << "Source fd is required to enable XOR ops";
-      CHECK(source_fd->IsOpen());
-      visited.AddExtent(op.dst_extent());
-      // dst block count is used, because
-      // src block count is probably(if src_offset > 0) 1 block
-      // larger than dst extent. Using it might lead to intreseting out of bound
-      // disk reads.
-      std::vector<unsigned char> old_data(op.dst_extent().num_blocks() *
-                                          block_size);
-      ssize_t bytes_read = 0;
-      if (!utils::PReadAll(
-              source_fd,
-              old_data.data(),
-              old_data.size(),
-              op.src_extent().start_block() * block_size + op.src_offset(),
-              &bytes_read)) {
-        PLOG(ERROR) << "Failed to read source data at " << op.src_extent();
-        return false;
-      }
-      std::vector<unsigned char> new_data(op.dst_extent().num_blocks() *
-                                          block_size);
-      if (!utils::PReadAll(target_fd,
-                           new_data.data(),
-                           new_data.size(),
-                           op.dst_extent().start_block() * block_size,
-                           &bytes_read)) {
-        PLOG(ERROR) << "Failed to read target data at " << op.dst_extent();
-        return false;
-      }
-      CHECK_GT(old_data.size(), 0UL);
-      CHECK_GT(new_data.size(), 0UL);
-      std::transform(new_data.begin(),
-                     new_data.end(),
-                     old_data.begin(),
-                     new_data.begin(),
-                     std::bit_xor<unsigned char>{});
-      CHECK(cow_writer->AddXorBlocks(op.dst_extent().start_block(),
-                                     new_data.data(),
-                                     new_data.size(),
-                                     op.src_extent().start_block(),
-                                     op.src_offset()));
+  SnapshotExtentWriter extent_writer(cow_writer);
+  ExtentMap<const CowMergeOperation*, ExtentLess> xor_map =
+      ComputeXorMap(merge_operations);
+  ExtentRanges copy_blocks;
+  for (const auto& cow_op : merge_operations) {
+    if (cow_op.type() != CowMergeOperation::COW_COPY) {
+      continue;
     }
-    // The value of label doesn't really matter, we just want to write some
-    // labels to simulate bahvior of update_engine. As update_engine writes
-    // labels every once a while when installing OTA, it's important that we do
-    // the same to get accurate size estimation.
-    cow_writer->AddLabel(0);
+    copy_blocks.AddExtent(cow_op.dst_extent());
   }
   for (const auto& op : operations) {
-    cow_writer->AddLabel(0);
-    if (op.type() == InstallOperation::ZERO) {
-      for (const auto& ext : op.dst_extents()) {
-        visited.AddExtent(ext);
-        cow_writer->AddZeroBlocks(ext.start_block(), ext.num_blocks());
+    switch (op.type()) {
+      case InstallOperation::SOURCE_BSDIFF:
+      case InstallOperation::BROTLI_BSDIFF:
+      case InstallOperation::PUFFDIFF:
+      case InstallOperation::ZUCCHINI:
+      case InstallOperation::LZ4DIFF_PUFFDIFF:
+      case InstallOperation::LZ4DIFF_BSDIFF: {
+        if (xor_enabled) {
+          std::unique_ptr<XORExtentWriter> writer =
+              std::make_unique<XORExtentWriter>(
+                  op, source_fd, cow_writer, xor_map, old_partition_size);
+          TEST_AND_RETURN_FALSE(writer->Init(op.dst_extents(), block_size));
+          for (const auto& ext : op.dst_extents()) {
+            visited.AddExtent(ext);
+            ssize_t bytes_read = 0;
+            std::vector<unsigned char> new_data(ext.num_blocks() * block_size);
+            if (!utils::PReadAll(target_fd,
+                                 new_data.data(),
+                                 new_data.size(),
+                                 ext.start_block() * block_size,
+                                 &bytes_read)) {
+              PLOG(ERROR) << "Failed to read target data at " << ext;
+              return false;
+            }
+            if (!writer->Write(new_data.data(),
+                               ext.num_blocks() * block_size)) {
+              LOG(ERROR) << "Failed to write XOR operation for extent: "
+                         << ext.start_block();
+              return false;
+            }
+          }
+          cow_writer->AddLabel(0);
+          break;
+        }
+        [[fallthrough]];
       }
+      case InstallOperation::REPLACE:
+      case InstallOperation::REPLACE_BZ:
+      case InstallOperation::REPLACE_XZ: {
+        TEST_AND_RETURN_FALSE(extent_writer.Init(op.dst_extents(), block_size));
+        for (const auto& ext : op.dst_extents()) {
+          visited.AddExtent(ext);
+          std::vector<unsigned char> data(ext.num_blocks() * block_size);
+          ssize_t bytes_read = 0;
+          if (!utils::PReadAll(target_fd,
+                               data.data(),
+                               data.size(),
+                               ext.start_block() * block_size,
+                               &bytes_read)) {
+            PLOG(ERROR) << "Failed to read new block data at " << ext;
+            return false;
+          }
+          if (!extent_writer.Write(data.data(), data.size())) {
+            LOG(ERROR) << "Failed to write REPLACE op for extent: "
+                       << ext.start_block();
+            return false;
+          }
+        }
+        cow_writer->AddLabel(0);
+        break;
+      }
+      case InstallOperation::ZERO:
+      case InstallOperation::DISCARD: {
+        for (const auto& ext : op.dst_extents()) {
+          visited.AddExtent(ext);
+          cow_writer->AddZeroBlocks(ext.start_block(), ext.num_blocks());
+        }
+        cow_writer->AddLabel(0);
+        break;
+      }
+      case InstallOperation::SOURCE_COPY: {
+        for (const auto& ext : op.dst_extents()) {
+          visited.AddExtent(ext);
+        }
+        if (!VABCPartitionWriter::ProcessSourceCopyOperation(
+                op, block_size, copy_blocks, source_fd, cow_writer, true)) {
+          LOG(ERROR) << "Failed to process source copy operation: " << op.type()
+                     << "\nsource extents: " << op.src_extents()
+                     << "\ndestination extents: " << op.dst_extents();
+          return false;
+        }
+        break;
+      }
+      default:
+        LOG(ERROR) << "unknown op: " << op.type();
     }
   }
-  cow_writer->AddLabel(0);
-  const size_t last_block = partition_size / block_size;
+
+  const size_t last_block = new_partition_size / block_size;
   const auto unvisited_extents =
       FilterExtentRanges({ExtentForRange(0, last_block)}, visited);
   for (const auto& ext : unvisited_extents) {
@@ -129,11 +177,23 @@
       PLOG(ERROR) << "Failed to read new block data at " << ext;
       return false;
     }
-    cow_writer->AddRawBlocks(ext.start_block(), data.data(), data.size());
+    auto to_write = data.size();
+    // FEC data written on device is chunked to 1mb. We want to mirror that here
+    while (to_write) {
+      auto curr_write = std::min(block_size, to_write);
+      cow_writer->AddRawBlocks(
+          ext.start_block() + ((data.size() - to_write) / block_size),
+          data.data() + (data.size() - to_write),
+          curr_write);
+      to_write -= curr_write;
+    }
+    CHECK_EQ(to_write, 0ULL);
     cow_writer->AddLabel(0);
   }
 
-  return cow_writer->Finalize();
+  TEST_AND_RETURN_FALSE(cow_writer->Finalize());
+
+  return true;
 }
 
 android::snapshot::CowSizeInfo EstimateCowSizeInfo(
@@ -144,18 +204,16 @@
         merge_operations,
     const size_t block_size,
     std::string compression,
-    const size_t partition_size,
+    const size_t new_partition_size,
+    const size_t old_partition_size,
     const bool xor_enabled,
     uint32_t cow_version,
     uint64_t compression_factor) {
   android::snapshot::CowOptions options{
       .block_size = static_cast<uint32_t>(block_size),
       .compression = std::move(compression),
-      .max_blocks = (partition_size / block_size),
+      .max_blocks = (new_partition_size / block_size),
       .compression_factor = compression_factor};
-  // b/322279333 use 4096 as estimation until we have an updated estimation
-  // algorithm
-  options.compression_factor = block_size;
   auto cow_writer = CreateCowEstimator(cow_version, options);
   CHECK_NE(cow_writer, nullptr) << "Could not create cow estimator";
   CHECK(CowDryRun(source_fd,
@@ -164,7 +222,8 @@
                   merge_operations,
                   block_size,
                   cow_writer.get(),
-                  partition_size,
+                  new_partition_size,
+                  old_partition_size,
                   xor_enabled));
   return cow_writer->GetCowSizeInfo();
 }
diff --git a/payload_generator/cow_size_estimator.h b/payload_generator/cow_size_estimator.h
index 060da43..9af3938 100644
--- a/payload_generator/cow_size_estimator.h
+++ b/payload_generator/cow_size_estimator.h
@@ -36,7 +36,8 @@
         merge_operations,
     const size_t block_size,
     std::string compression,
-    const size_t partition_size,
+    const size_t new_partition_size,
+    const size_t old_partition_size,
     bool xor_enabled,
     uint32_t cow_version,
     uint64_t compression_factor);
@@ -50,7 +51,8 @@
         merge_operations,
     size_t block_size,
     android::snapshot::ICowWriter* cow_writer,
-    size_t partition_size,
+    const size_t new_partition_size,
+    const size_t old_partition_size,
     bool xor_enabled);
 
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/deflate_utils.cc b/payload_generator/deflate_utils.cc
index cc2e4d6..d196799 100644
--- a/payload_generator/deflate_utils.cc
+++ b/payload_generator/deflate_utils.cc
@@ -27,6 +27,7 @@
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/extent_ranges.h"
+#include "update_engine/payload_generator/extent_utils.h"
 #include "update_engine/payload_generator/squashfs_filesystem.h"
 #include "update_engine/update_metadata.pb.h"
 
@@ -39,6 +40,12 @@
 namespace deflate_utils {
 namespace {
 
+constexpr std::ostream& operator<<(std::ostream& out,
+                                   const puffin::BitExtent& ext) {
+  out << "BitExtent(" << ext.offset << "," << ext.length << ")";
+  return out;
+}
+
 // The minimum size for a squashfs image to be processed.
 const uint64_t kMinimumSquashfsImageSize = 1 * 1024 * 1024;  // bytes
 
@@ -254,7 +261,8 @@
 
   // All given |in_deflates| items should've been inside one of the extents in
   // |extents|.
-  TEST_AND_RETURN_FALSE(in_deflates.size() == out_deflates->size());
+  TEST_EQ(in_deflates.size(), out_deflates->size());
+  Dedup(out_deflates);
 
   // Make sure all outgoing deflates are ordered and non-overlapping.
   auto result = std::adjacent_find(out_deflates->begin(),
@@ -262,7 +270,11 @@
                                    [](const BitExtent& a, const BitExtent& b) {
                                      return (a.offset + a.length) > b.offset;
                                    });
-  TEST_AND_RETURN_FALSE(result == out_deflates->end());
+  if (result != out_deflates->end()) {
+    LOG(ERROR) << "out_deflate is overlapped " << (*result) << ", "
+               << *(++result);
+    return false;
+  }
   return true;
 }
 
diff --git a/payload_generator/delta_diff_generator.cc b/payload_generator/delta_diff_generator.cc
index fb967fd..4abff92 100644
--- a/payload_generator/delta_diff_generator.cc
+++ b/payload_generator/delta_diff_generator.cc
@@ -128,11 +128,8 @@
       *operations.Add() = aop.op;
     }
 
-    FileDescriptorPtr source_fd = nullptr;
-    if (config_.enable_vabc_xor) {
-      source_fd = std::make_shared<EintrSafeFileDescriptor>();
-      source_fd->Open(old_part_.path.c_str(), O_RDONLY);
-    }
+    FileDescriptorPtr source_fd = std::make_shared<EintrSafeFileDescriptor>();
+    source_fd->Open(old_part_.path.c_str(), O_RDONLY);
 
     *cow_info_ = EstimateCowSizeInfo(
         std::move(source_fd),
@@ -142,10 +139,16 @@
         config_.block_size,
         config_.target.dynamic_partition_metadata->vabc_compression_param(),
         new_part_.size,
+        old_part_.size,
         config_.enable_vabc_xor,
         config_.target.dynamic_partition_metadata->cow_version(),
         config_.target.dynamic_partition_metadata->compression_factor());
 
+    // add a 1% overhead to our estimation
+    cow_info_->cow_size = cow_info_->cow_size * 1.01;
+    if (config_.target.dynamic_partition_metadata->cow_version() >= 3) {
+      cow_info_->op_count_max = std::max(int(cow_info_->op_count_max), 25);
+    }
     // ops buffer size == 0 for v2 version of cow format
     LOG(INFO) << "Estimated COW size for partition: " << new_part_.name << " "
               << cow_info_->cow_size
diff --git a/payload_generator/delta_diff_utils.h b/payload_generator/delta_diff_utils.h
index bfbcdf7..c40e267 100644
--- a/payload_generator/delta_diff_utils.h
+++ b/payload_generator/delta_diff_utils.h
@@ -185,12 +185,14 @@
     // Find all deflate positions inside the given extents and then put all
     // deflates together because we have already read all the extents into
     // one buffer.
+    Dedup(&old_deflates_);
+    Dedup(&new_deflates_);
     vector<puffin::BitExtent> src_deflates;
-    TEST_AND_RETURN(deflate_utils::FindAndCompactDeflates(
+    CHECK(deflate_utils::FindAndCompactDeflates(
         src_extents_, old_deflates_, &src_deflates));
 
     vector<puffin::BitExtent> dst_deflates;
-    TEST_AND_RETURN(deflate_utils::FindAndCompactDeflates(
+    CHECK(deflate_utils::FindAndCompactDeflates(
         dst_extents_, new_deflates_, &dst_deflates));
     puffin::RemoveEqualBitExtents(
         old_data_, new_data_, &src_deflates, &dst_deflates);
diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc
index b698339..53bbeaa 100644
--- a/payload_generator/delta_diff_utils_unittest.cc
+++ b/payload_generator/delta_diff_utils_unittest.cc
@@ -26,8 +26,10 @@
 #include <base/strings/stringprintf.h>
 #include <bsdiff/patch_writer.h>
 #include <gtest/gtest.h>
+#include <puffin/common.h>
 
-#include "payload_generator/filesystem_interface.h"
+#include "update_engine/payload_generator/deflate_utils.h"
+#include "update_engine/payload_generator/filesystem_interface.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
@@ -845,4 +847,19 @@
   ASSERT_EQ(aop.xor_ops[3].dst_extent().start_block(), 702UL);
 }
 
+TEST_F(DeltaDiffUtilsTest, FindAndCompactDeflates) {
+  std::vector<puffin::BitExtent> bit_extents{{114122 * 8 * kBlockSize, 1024},
+                                             {114122 * 8 * kBlockSize, 1024}};
+
+  std::vector<Extent> extents = {ExtentForRange(114122, 295),
+                                 ExtentForRange(114418, 16654),
+                                 ExtentForRange(131102, 1),
+                                 ExtentForRange(131104, 307),
+                                 ExtentForRange(131414, 4143),
+                                 ExtentForRange(135559, 8528)};
+  std::vector<puffin::BitExtent> out_deflates;
+  ASSERT_TRUE(deflate_utils::FindAndCompactDeflates(
+      extents, bit_extents, &out_deflates));
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/extent_utils.h b/payload_generator/extent_utils.h
index 52b6d1e..08636a1 100644
--- a/payload_generator/extent_utils.h
+++ b/payload_generator/extent_utils.h
@@ -174,6 +174,13 @@
              big.start_block() + big.num_blocks();
 }
 
+template <typename T>
+constexpr void Dedup(T* container) {
+  std::sort(container->begin(), container->end());
+  container->erase(std::unique(container->begin(), container->end()),
+                   container->end());
+}
+
 }  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_PAYLOAD_GENERATOR_EXTENT_UTILS_H_
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index 810d0de..470d622 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -214,7 +214,7 @@
     metadata->set_vabc_compression_param(compression_method);
     std::string cow_version;
     if (!store.GetString("virtual_ab_cow_version", &cow_version)) {
-      metadata->set_cow_version(android::snapshot::kCowVersionManifest);
+      metadata->set_cow_version(2);
     } else {
       uint32_t cow_version_num{};
       android::base::ParseUint(cow_version, &cow_version_num);
diff --git a/payload_generator/xz_android.cc b/payload_generator/xz_android.cc
index 9d157c4..97e2c32 100644
--- a/payload_generator/xz_android.cc
+++ b/payload_generator/xz_android.cc
@@ -16,9 +16,6 @@
 
 #include "update_engine/payload_generator/xz.h"
 
-#include <elf.h>
-#include <endian.h>
-
 #include <algorithm>
 
 #include <7zCrc.h>
@@ -68,37 +65,6 @@
   brillo::Blob* data_;
 };
 
-// Returns the filter id to be used to compress |data|.
-// Only BCJ filter for x86 and ARM ELF file are supported, returns 0 otherwise.
-int GetFilterID(const brillo::Blob& data) {
-  if (data.size() < sizeof(Elf32_Ehdr) ||
-      memcmp(data.data(), ELFMAG, SELFMAG) != 0)
-    return 0;
-
-  const Elf32_Ehdr* header = reinterpret_cast<const Elf32_Ehdr*>(data.data());
-
-  // Only little-endian is supported.
-  if (header->e_ident[EI_DATA] != ELFDATA2LSB)
-    return 0;
-
-  switch (le16toh(header->e_machine)) {
-    case EM_386:
-    case EM_X86_64:
-      return XZ_ID_X86;
-    case EM_ARM:
-      // Both ARM and ARM Thumb instructions could be found in the same ARM ELF
-      // file. We choose to use the ARM Thumb filter here because testing shows
-      // that it usually works better than the ARM filter.
-      return XZ_ID_ARMT;
-#ifdef EM_AARCH64
-    case EM_AARCH64:
-      // Neither the ARM nor the ARM Thumb filter works well with AArch64.
-      return 0;
-#endif
-  }
-  return 0;
-}
-
 }  // namespace
 
 namespace chromeos_update_engine {
@@ -139,7 +105,8 @@
   Lzma2EncProps_Normalize(&lzma2Props);
   props.lzma2Props = lzma2Props;
 
-  props.filterProps.id = GetFilterID(in);
+  // We do not use xz's BCJ filters (http://b/329112384).
+  props.filterProps.id = 0;
 
   BlobWriterStream out_writer(out);
   BlobReaderStream in_reader(in);
diff --git a/update_engine.rc b/update_engine.rc
index bc6447b..45f05af 100644
--- a/update_engine.rc
+++ b/update_engine.rc
@@ -1,3 +1,4 @@
+# LINT.IfChange
 service update_engine /system/bin/update_engine --logtostderr --logtofile --foreground
     class late_start
     user root
@@ -7,3 +8,4 @@
 
 on property:ro.boot.slot_suffix=*
     enable update_engine
+# LINT.ThenChange(update_engine_nostats.rc)
diff --git a/update_engine_nostats.rc b/update_engine_nostats.rc
new file mode 100644
index 0000000..512f0eb
--- /dev/null
+++ b/update_engine_nostats.rc
@@ -0,0 +1,11 @@
+# LINT.IfChange
+service update_engine /system/bin/update_engine_nostats --logtostderr --logtofile --foreground
+    class late_start
+    user root
+    group root system wakelock inet cache media_rw
+    task_profiles OtaProfiles
+    disabled
+
+on property:ro.boot.slot_suffix=*
+    enable update_engine
+# LINT.ThenChange(update_engine.rc)