[automerger skipped] Make update_engine use /data/misc/update_engine/cache as tmpdir am: 210c68fbeb -s ours

am skip reason: Merged-In Ibbbe8019c276fe0fa3f9ea197815ed6daac912fc with SHA-1 fab31ad0dc is already in history

Original change: https://googleplex-android-review.googlesource.com/c/platform/system/update_engine/+/26734791

Change-Id: Ic6bc630112320ecaa3eca7b30848eaac0c6730ea
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
diff --git a/Android.bp b/Android.bp
index 62afe09..4e5f6f5 100644
--- a/Android.bp
+++ b/Android.bp
@@ -422,6 +422,16 @@
 }
 
 cc_library_static {
+    name: "libupdate_engine_boot_control_nostats",
+    cflags: ["-DUE_DISABLE_STATS"],
+    defaults: [
+        "libupdate_engine_boot_control_defaults",
+        "libupdate_engine_boot_control_exports",
+        "libpayload_consumer_exports",
+    ],
+}
+
+cc_library_static {
     name: "libupdate_engine_boot_control_proto-full",
     defaults: [
         "libupdate_engine_boot_control_defaults",
@@ -564,6 +574,43 @@
     init_rc: ["update_engine.rc"],
 }
 
+// update_engine_nostats (type: executable)
+// ========================================================
+// update_engine daemon version without the stats integration.
+cc_binary {
+    name: "update_engine_nostats",
+    defaults: [
+        "ue_defaults",
+        "libupdate_engine_android_exports",
+    ],
+
+    static_libs: [
+        "libupdate_engine_android",
+        "libgflags",
+        "libupdate_engine_boot_control_nostats",
+    ],
+    required: [
+        "cacerts",
+        "otacerts",
+    ],
+
+    exclude_static_libs: [
+        "libstatslog_ue",
+        "libupdate_engine_boot_control"
+    ],
+
+    exclude_shared_libs: [
+        "libstatssocket",
+    ],
+
+    cflags: ["-DUE_DISABLE_STATS"],
+    srcs: [
+        "main.cc",
+        "common/metrics_reporter_stub.cc",
+    ],
+    init_rc: ["update_engine_nostats.rc"],
+}
+
 // update_engine_sideload (type: executable)
 // ========================================================
 // A binary executable equivalent to update_engine daemon that installs an update
@@ -579,7 +626,10 @@
     ],
     recovery: true,
 
-    cflags: ["-D_UE_SIDELOAD"],
+    cflags: [
+        "-D_UE_SIDELOAD",
+        "-DUE_DISABLE_STATS",
+    ],
     header_libs: ["libgtest_prod_headers"],
 
     srcs: [
diff --git a/aosp/cleanup_previous_update_action.cc b/aosp/cleanup_previous_update_action.cc
index 3b54f80..9c0843c 100644
--- a/aosp/cleanup_previous_update_action.cc
+++ b/aosp/cleanup_previous_update_action.cc
@@ -26,7 +26,7 @@
 #include <base/bind.h>
 #include <libsnapshot/snapshot.h>
 
-#ifndef __ANDROID_RECOVERY__
+#if !defined(__ANDROID_RECOVERY__) && !defined(UE_DISABLE_STATS)
 #include <statslog_ue.h>
 #endif
 
@@ -502,6 +502,8 @@
 
 #ifdef __ANDROID_RECOVERY__
   LOG(INFO) << "Skip reporting merge stats in recovery.";
+#elif defined(UE_DISABLE_STATS)
+  LOG(INFO) << "Skip reporting merge stats because metrics are disabled.";
 #else
   const auto& report = result->report();
 
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index fe581b6..519ec71 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -503,7 +503,9 @@
     if (!CanPerformInstallOperation(op))
       return true;
     if (!ProcessOperation(&op, error)) {
-      LOG(ERROR) << "unable to process operation: " << *error;
+      LOG(ERROR) << "unable to process operation: "
+                 << InstallOperationTypeName(op.type())
+                 << " Error: " << utils::ErrorCodeToString(*error);
       return false;
     }
 
diff --git a/payload_generator/cow_size_estimator.cc b/payload_generator/cow_size_estimator.cc
index bb12113..4be7ea2 100644
--- a/payload_generator/cow_size_estimator.cc
+++ b/payload_generator/cow_size_estimator.cc
@@ -17,7 +17,6 @@
 #include "update_engine/payload_generator/cow_size_estimator.h"
 
 #include <algorithm>
-#include <functional>
 #include <string>
 #include <utility>
 #include <vector>
@@ -26,6 +25,9 @@
 #include <libsnapshot/cow_writer.h>
 #include <libsnapshot/cow_format.h>
 
+#include "update_engine/payload_consumer/block_extent_writer.h"
+#include "update_engine/payload_consumer/snapshot_extent_writer.h"
+#include "update_engine/payload_consumer/xor_extent_writer.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/vabc_partition_writer.h"
 #include "update_engine/payload_generator/extent_ranges.h"
@@ -35,6 +37,17 @@
 namespace chromeos_update_engine {
 using android::snapshot::CreateCowEstimator;
 using android::snapshot::ICowWriter;
+// Compute XOR map, a map from dst extent to corresponding merge operation
+static ExtentMap<const CowMergeOperation*, ExtentLess> ComputeXorMap(
+    const google::protobuf::RepeatedPtrField<CowMergeOperation>& merge_ops) {
+  ExtentMap<const CowMergeOperation*, ExtentLess> xor_map;
+  for (const auto& merge_op : merge_ops) {
+    if (merge_op.type() == CowMergeOperation::COW_XOR) {
+      xor_map.AddExtent(merge_op.dst_extent(), &merge_op);
+    }
+  }
+  return xor_map;
+}
 
 bool CowDryRun(
     FileDescriptorPtr source_fd,
@@ -50,71 +63,96 @@
   CHECK(target_fd->IsOpen());
   VABCPartitionWriter::WriteMergeSequence(merge_operations, cow_writer);
   ExtentRanges visited;
-  for (const auto& op : merge_operations) {
-    if (op.type() == CowMergeOperation::COW_COPY) {
-      visited.AddExtent(op.dst_extent());
-      cow_writer->AddCopy(op.dst_extent().start_block(),
-                          op.src_extent().start_block(),
-                          op.dst_extent().num_blocks());
-    } else if (op.type() == CowMergeOperation::COW_XOR && xor_enabled) {
-      CHECK_NE(source_fd, nullptr) << "Source fd is required to enable XOR ops";
-      CHECK(source_fd->IsOpen());
-      visited.AddExtent(op.dst_extent());
-      // dst block count is used, because
-      // src block count is probably(if src_offset > 0) 1 block
-      // larger than dst extent. Using it might lead to intreseting out of bound
-      // disk reads.
-      std::vector<unsigned char> old_data(op.dst_extent().num_blocks() *
-                                          block_size);
-      ssize_t bytes_read = 0;
-      if (!utils::PReadAll(
-              source_fd,
-              old_data.data(),
-              old_data.size(),
-              op.src_extent().start_block() * block_size + op.src_offset(),
-              &bytes_read)) {
-        PLOG(ERROR) << "Failed to read source data at " << op.src_extent();
-        return false;
-      }
-      std::vector<unsigned char> new_data(op.dst_extent().num_blocks() *
-                                          block_size);
-      if (!utils::PReadAll(target_fd,
-                           new_data.data(),
-                           new_data.size(),
-                           op.dst_extent().start_block() * block_size,
-                           &bytes_read)) {
-        PLOG(ERROR) << "Failed to read target data at " << op.dst_extent();
-        return false;
-      }
-      CHECK_GT(old_data.size(), 0UL);
-      CHECK_GT(new_data.size(), 0UL);
-      std::transform(new_data.begin(),
-                     new_data.end(),
-                     old_data.begin(),
-                     new_data.begin(),
-                     std::bit_xor<unsigned char>{});
-      CHECK(cow_writer->AddXorBlocks(op.dst_extent().start_block(),
-                                     new_data.data(),
-                                     new_data.size(),
-                                     op.src_extent().start_block(),
-                                     op.src_offset()));
+  SnapshotExtentWriter extent_writer(cow_writer);
+  ExtentMap<const CowMergeOperation*, ExtentLess> xor_map =
+      ComputeXorMap(merge_operations);
+  ExtentRanges copy_blocks;
+  for (const auto& cow_op : merge_operations) {
+    if (cow_op.type() != CowMergeOperation::COW_COPY) {
+      continue;
     }
-    // The value of label doesn't really matter, we just want to write some
-    // labels to simulate bahvior of update_engine. As update_engine writes
-    // labels every once a while when installing OTA, it's important that we do
-    // the same to get accurate size estimation.
-    cow_writer->AddLabel(0);
+    copy_blocks.AddExtent(cow_op.dst_extent());
   }
   for (const auto& op : operations) {
-    cow_writer->AddLabel(0);
-    if (op.type() == InstallOperation::ZERO) {
-      for (const auto& ext : op.dst_extents()) {
-        visited.AddExtent(ext);
-        cow_writer->AddZeroBlocks(ext.start_block(), ext.num_blocks());
+    switch (op.type()) {
+      case InstallOperation::SOURCE_BSDIFF:
+      case InstallOperation::BROTLI_BSDIFF:
+      case InstallOperation::PUFFDIFF:
+      case InstallOperation::ZUCCHINI:
+      case InstallOperation::LZ4DIFF_PUFFDIFF:
+      case InstallOperation::LZ4DIFF_BSDIFF: {
+        if (xor_enabled) {
+          std::unique_ptr<XORExtentWriter> writer =
+              std::make_unique<XORExtentWriter>(
+                  op, source_fd, cow_writer, xor_map, partition_size);
+          TEST_AND_RETURN_FALSE(writer->Init(op.dst_extents(), block_size));
+          for (const auto& ext : op.dst_extents()) {
+            visited.AddExtent(ext);
+            ssize_t bytes_read = 0;
+            std::vector<unsigned char> new_data(ext.num_blocks() * block_size);
+            if (!utils::PReadAll(target_fd,
+                                 new_data.data(),
+                                 new_data.size(),
+                                 ext.start_block() * block_size,
+                                 &bytes_read)) {
+              PLOG(ERROR) << "Failed to read target data at " << ext;
+              return false;
+            }
+            writer->Write(new_data.data(), ext.num_blocks() * block_size);
+          }
+          cow_writer->AddLabel(0);
+          break;
+        }
+        [[fallthrough]];
       }
+      case InstallOperation::REPLACE:
+      case InstallOperation::REPLACE_BZ:
+      case InstallOperation::REPLACE_XZ: {
+        TEST_AND_RETURN_FALSE(extent_writer.Init(op.dst_extents(), block_size));
+        for (const auto& ext : op.dst_extents()) {
+          visited.AddExtent(ext);
+          std::vector<unsigned char> data(ext.num_blocks() * block_size);
+          ssize_t bytes_read = 0;
+          if (!utils::PReadAll(target_fd,
+                               data.data(),
+                               data.size(),
+                               ext.start_block() * block_size,
+                               &bytes_read)) {
+            PLOG(ERROR) << "Failed to read new block data at " << ext;
+            return false;
+          }
+          extent_writer.Write(data.data(), data.size());
+        }
+        cow_writer->AddLabel(0);
+        break;
+      }
+      case InstallOperation::ZERO:
+      case InstallOperation::DISCARD: {
+        for (const auto& ext : op.dst_extents()) {
+          visited.AddExtent(ext);
+          cow_writer->AddZeroBlocks(ext.start_block(), ext.num_blocks());
+        }
+        cow_writer->AddLabel(0);
+        break;
+      }
+      case InstallOperation::SOURCE_COPY: {
+        for (const auto& ext : op.dst_extents()) {
+          visited.AddExtent(ext);
+        }
+        if (!VABCPartitionWriter::ProcessSourceCopyOperation(
+                op, block_size, copy_blocks, source_fd, cow_writer, true)) {
+          LOG(ERROR) << "Failed to process source copy operation: " << op.type()
+                     << "\nsource extents: " << op.src_extents()
+                     << "\ndestination extents: " << op.dst_extents();
+          return false;
+        }
+        break;
+      }
+      default:
+        LOG(ERROR) << "unknown op: " << op.type();
     }
   }
-  cow_writer->AddLabel(0);
+
   const size_t last_block = partition_size / block_size;
   const auto unvisited_extents =
       FilterExtentRanges({ExtentForRange(0, last_block)}, visited);
@@ -129,11 +167,23 @@
       PLOG(ERROR) << "Failed to read new block data at " << ext;
       return false;
     }
-    cow_writer->AddRawBlocks(ext.start_block(), data.data(), data.size());
+    auto to_write = data.size();
+    // FEC data written on device is chunked to 1mb. We want to mirror that here
+    while (to_write) {
+      auto curr_write = std::min(block_size, to_write);
+      cow_writer->AddRawBlocks(
+          ext.start_block() + ((data.size() - to_write) / block_size),
+          data.data() + (data.size() - to_write),
+          curr_write);
+      to_write -= curr_write;
+    }
+    CHECK_EQ(to_write, 0ULL);
     cow_writer->AddLabel(0);
   }
 
-  return cow_writer->Finalize();
+  TEST_AND_RETURN_FALSE(cow_writer->Finalize());
+
+  return true;
 }
 
 android::snapshot::CowSizeInfo EstimateCowSizeInfo(
@@ -153,9 +203,6 @@
       .compression = std::move(compression),
       .max_blocks = (partition_size / block_size),
       .compression_factor = compression_factor};
-  // b/322279333 use 4096 as estimation until we have an updated estimation
-  // algorithm
-  options.compression_factor = block_size;
   auto cow_writer = CreateCowEstimator(cow_version, options);
   CHECK_NE(cow_writer, nullptr) << "Could not create cow estimator";
   CHECK(CowDryRun(source_fd,
diff --git a/payload_generator/deflate_utils.cc b/payload_generator/deflate_utils.cc
index cc2e4d6..d196799 100644
--- a/payload_generator/deflate_utils.cc
+++ b/payload_generator/deflate_utils.cc
@@ -27,6 +27,7 @@
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/extent_ranges.h"
+#include "update_engine/payload_generator/extent_utils.h"
 #include "update_engine/payload_generator/squashfs_filesystem.h"
 #include "update_engine/update_metadata.pb.h"
 
@@ -39,6 +40,12 @@
 namespace deflate_utils {
 namespace {
 
+constexpr std::ostream& operator<<(std::ostream& out,
+                                   const puffin::BitExtent& ext) {
+  out << "BitExtent(" << ext.offset << "," << ext.length << ")";
+  return out;
+}
+
 // The minimum size for a squashfs image to be processed.
 const uint64_t kMinimumSquashfsImageSize = 1 * 1024 * 1024;  // bytes
 
@@ -254,7 +261,8 @@
 
   // All given |in_deflates| items should've been inside one of the extents in
   // |extents|.
-  TEST_AND_RETURN_FALSE(in_deflates.size() == out_deflates->size());
+  TEST_EQ(in_deflates.size(), out_deflates->size());
+  Dedup(out_deflates);
 
   // Make sure all outgoing deflates are ordered and non-overlapping.
   auto result = std::adjacent_find(out_deflates->begin(),
@@ -262,7 +270,11 @@
                                    [](const BitExtent& a, const BitExtent& b) {
                                      return (a.offset + a.length) > b.offset;
                                    });
-  TEST_AND_RETURN_FALSE(result == out_deflates->end());
+  if (result != out_deflates->end()) {
+    LOG(ERROR) << "out_deflate is overlapped " << (*result) << ", "
+               << *(++result);
+    return false;
+  }
   return true;
 }
 
diff --git a/payload_generator/delta_diff_generator.cc b/payload_generator/delta_diff_generator.cc
index fb967fd..ed9bf4e 100644
--- a/payload_generator/delta_diff_generator.cc
+++ b/payload_generator/delta_diff_generator.cc
@@ -128,11 +128,8 @@
       *operations.Add() = aop.op;
     }
 
-    FileDescriptorPtr source_fd = nullptr;
-    if (config_.enable_vabc_xor) {
-      source_fd = std::make_shared<EintrSafeFileDescriptor>();
-      source_fd->Open(old_part_.path.c_str(), O_RDONLY);
-    }
+    FileDescriptorPtr source_fd = std::make_shared<EintrSafeFileDescriptor>();
+    source_fd->Open(old_part_.path.c_str(), O_RDONLY);
 
     *cow_info_ = EstimateCowSizeInfo(
         std::move(source_fd),
@@ -146,6 +143,10 @@
         config_.target.dynamic_partition_metadata->cow_version(),
         config_.target.dynamic_partition_metadata->compression_factor());
 
+    // add a 1% overhead to our estimation
+    cow_info_->cow_size += cow_info_->cow_size * 1.01;
+    cow_info_->op_count_max +=
+        std::max(int(cow_info_->op_count_max * 1.01), 25);
     // ops buffer size == 0 for v2 version of cow format
     LOG(INFO) << "Estimated COW size for partition: " << new_part_.name << " "
               << cow_info_->cow_size
diff --git a/payload_generator/delta_diff_utils.h b/payload_generator/delta_diff_utils.h
index bfbcdf7..c40e267 100644
--- a/payload_generator/delta_diff_utils.h
+++ b/payload_generator/delta_diff_utils.h
@@ -185,12 +185,14 @@
     // Find all deflate positions inside the given extents and then put all
     // deflates together because we have already read all the extents into
     // one buffer.
+    Dedup(&old_deflates_);
+    Dedup(&new_deflates_);
     vector<puffin::BitExtent> src_deflates;
-    TEST_AND_RETURN(deflate_utils::FindAndCompactDeflates(
+    CHECK(deflate_utils::FindAndCompactDeflates(
         src_extents_, old_deflates_, &src_deflates));
 
     vector<puffin::BitExtent> dst_deflates;
-    TEST_AND_RETURN(deflate_utils::FindAndCompactDeflates(
+    CHECK(deflate_utils::FindAndCompactDeflates(
         dst_extents_, new_deflates_, &dst_deflates));
     puffin::RemoveEqualBitExtents(
         old_data_, new_data_, &src_deflates, &dst_deflates);
diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc
index b698339..53bbeaa 100644
--- a/payload_generator/delta_diff_utils_unittest.cc
+++ b/payload_generator/delta_diff_utils_unittest.cc
@@ -26,8 +26,10 @@
 #include <base/strings/stringprintf.h>
 #include <bsdiff/patch_writer.h>
 #include <gtest/gtest.h>
+#include <puffin/common.h>
 
-#include "payload_generator/filesystem_interface.h"
+#include "update_engine/payload_generator/deflate_utils.h"
+#include "update_engine/payload_generator/filesystem_interface.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
@@ -845,4 +847,19 @@
   ASSERT_EQ(aop.xor_ops[3].dst_extent().start_block(), 702UL);
 }
 
+TEST_F(DeltaDiffUtilsTest, FindAndCompactDeflates) {
+  std::vector<puffin::BitExtent> bit_extents{{114122 * 8 * kBlockSize, 1024},
+                                             {114122 * 8 * kBlockSize, 1024}};
+
+  std::vector<Extent> extents = {ExtentForRange(114122, 295),
+                                 ExtentForRange(114418, 16654),
+                                 ExtentForRange(131102, 1),
+                                 ExtentForRange(131104, 307),
+                                 ExtentForRange(131414, 4143),
+                                 ExtentForRange(135559, 8528)};
+  std::vector<puffin::BitExtent> out_deflates;
+  ASSERT_TRUE(deflate_utils::FindAndCompactDeflates(
+      extents, bit_extents, &out_deflates));
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/extent_utils.h b/payload_generator/extent_utils.h
index 52b6d1e..08636a1 100644
--- a/payload_generator/extent_utils.h
+++ b/payload_generator/extent_utils.h
@@ -174,6 +174,13 @@
              big.start_block() + big.num_blocks();
 }
 
+template <typename T>
+constexpr void Dedup(T* container) {
+  std::sort(container->begin(), container->end());
+  container->erase(std::unique(container->begin(), container->end()),
+                   container->end());
+}
+
 }  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_PAYLOAD_GENERATOR_EXTENT_UTILS_H_
diff --git a/update_engine.rc b/update_engine.rc
index bc6447b..45f05af 100644
--- a/update_engine.rc
+++ b/update_engine.rc
@@ -1,3 +1,4 @@
+# LINT.IfChange
 service update_engine /system/bin/update_engine --logtostderr --logtofile --foreground
     class late_start
     user root
@@ -7,3 +8,4 @@
 
 on property:ro.boot.slot_suffix=*
     enable update_engine
+# LINT.ThenChange(update_engine_nostats.rc)
diff --git a/update_engine_nostats.rc b/update_engine_nostats.rc
new file mode 100644
index 0000000..512f0eb
--- /dev/null
+++ b/update_engine_nostats.rc
@@ -0,0 +1,11 @@
+# LINT.IfChange
+service update_engine /system/bin/update_engine_nostats --logtostderr --logtofile --foreground
+    class late_start
+    user root
+    group root system wakelock inet cache media_rw
+    task_profiles OtaProfiles
+    disabled
+
+on property:ro.boot.slot_suffix=*
+    enable update_engine
+# LINT.ThenChange(update_engine.rc)