update_engine: factor out source_copy method

Since estimator + vabc_partition_writer use the same logic for
processing source copy ops, we can factor this common code out

Bug: 322279333
Test: th
Change-Id: I0ef7b8ad50c9b987bd13cf323e97f426d60f62b5
diff --git a/payload_consumer/vabc_partition_writer.cc b/payload_consumer/vabc_partition_writer.cc
index 97de934..c879375 100644
--- a/payload_consumer/vabc_partition_writer.cc
+++ b/payload_consumer/vabc_partition_writer.cc
@@ -97,6 +97,78 @@
             << copy_blocks_.blocks() << " copy blocks";
 }
 
+bool VABCPartitionWriter::ProcessSourceCopyOperation(
+    const InstallOperation& operation,
+    const size_t block_size,
+    const ExtentRanges& copy_blocks,
+    const FileDescriptorPtr& source_fd,
+    android::snapshot::ICowWriter* cow_writer,
+    bool sequence_op_supported) {
+  // COPY ops are already handled during Init(), no need to do actual work, but
+  // we still want to verify that all blocks contain expected data.
+  TEST_AND_RETURN_FALSE(source_fd != nullptr);
+  std::vector<CowOperation> converted;
+
+  const auto& src_extents = operation.src_extents();
+  const auto& dst_extents = operation.dst_extents();
+  BlockIterator it1{src_extents};
+  BlockIterator it2{dst_extents};
+  const bool userSnapshots = android::base::GetBoolProperty(
+      "ro.virtual_ab.userspace.snapshots.enabled", false);
+  // For devices not supporting XOR, sequence op is not supported, so all COPY
+  // operations are written up front in strict merge order.
+  while (!it1.is_end() && !it2.is_end()) {
+    const auto src_block = *it1;
+    const auto dst_block = *it2;
+    ++it1;
+    ++it2;
+    if (src_block == dst_block) {
+      continue;
+    }
+    if (copy_blocks.ContainsBlock(dst_block)) {
+      if (sequence_op_supported) {
+        push_back(&converted, {CowOperation::CowCopy, src_block, dst_block, 1});
+      }
+    } else {
+      push_back(&converted,
+                {CowOperation::CowReplace, src_block, dst_block, 1});
+    }
+  }
+  std::vector<uint8_t> buffer;
+  for (const auto& cow_op : converted) {
+    if (cow_op.op == CowOperation::CowCopy) {
+      if (userSnapshots) {
+        cow_writer->AddCopy(
+            cow_op.dst_block, cow_op.src_block, cow_op.block_count);
+      } else {
+        // Add blocks in reverse order, because snapused specifically prefers
+        // this ordering. Since we already eliminated all self-overlapping
+        // SOURCE_COPY during delta generation, this should be safe to do.
+        for (size_t i = cow_op.block_count; i > 0; i--) {
+          TEST_AND_RETURN_FALSE(cow_writer->AddCopy(cow_op.dst_block + i - 1,
+                                                    cow_op.src_block + i - 1));
+        }
+      }
+      continue;
+    }
+    buffer.resize(block_size * cow_op.block_count);
+    ssize_t bytes_read = 0;
+    TEST_AND_RETURN_FALSE(utils::ReadAll(source_fd,
+                                         buffer.data(),
+                                         block_size * cow_op.block_count,
+                                         cow_op.src_block * block_size,
+                                         &bytes_read));
+    if (bytes_read <= 0 || static_cast<size_t>(bytes_read) != buffer.size()) {
+      LOG(ERROR) << "source_fd->Read failed: " << bytes_read
+                 << "\ncow op: " << cow_op.op;
+      return false;
+    }
+    TEST_AND_RETURN_FALSE(cow_writer->AddRawBlocks(
+        cow_op.dst_block, buffer.data(), buffer.size()));
+  }
+  return true;
+}
+
 bool VABCPartitionWriter::DoesDeviceSupportsXor() {
   return dynamic_control_->GetVirtualAbCompressionXorFeatureFlag().IsEnabled();
 }
@@ -272,70 +344,14 @@
 
 [[nodiscard]] bool VABCPartitionWriter::PerformSourceCopyOperation(
     const InstallOperation& operation, ErrorCode* error) {
-  // COPY ops are already handled during Init(), no need to do actual work, but
-  // we still want to verify that all blocks contain expected data.
   auto source_fd = verified_source_fd_.ChooseSourceFD(operation, error);
-  TEST_AND_RETURN_FALSE(source_fd != nullptr);
-  std::vector<CowOperation> converted;
 
-  const auto& src_extents = operation.src_extents();
-  const auto& dst_extents = operation.dst_extents();
-  BlockIterator it1{src_extents};
-  BlockIterator it2{dst_extents};
-  const bool userSnapshots = android::base::GetBoolProperty(
-      "ro.virtual_ab.userspace.snapshots.enabled", false);
-  // For devices not supporting XOR, sequence op is not supported, so all COPY
-  // operations are written up front in strict merge order.
-  const auto sequence_op_supported = DoesDeviceSupportsXor();
-  while (!it1.is_end() && !it2.is_end()) {
-    const auto src_block = *it1;
-    const auto dst_block = *it2;
-    ++it1;
-    ++it2;
-    if (src_block == dst_block) {
-      continue;
-    }
-    if (copy_blocks_.ContainsBlock(dst_block)) {
-      if (sequence_op_supported) {
-        push_back(&converted, {CowOperation::CowCopy, src_block, dst_block, 1});
-      }
-    } else {
-      push_back(&converted,
-                {CowOperation::CowReplace, src_block, dst_block, 1});
-    }
-  }
-  std::vector<uint8_t> buffer;
-  for (const auto& cow_op : converted) {
-    if (cow_op.op == CowOperation::CowCopy) {
-      if (userSnapshots) {
-        cow_writer_->AddCopy(
-            cow_op.dst_block, cow_op.src_block, cow_op.block_count);
-      } else {
-        // Add blocks in reverse order, because snapused specifically prefers
-        // this ordering. Since we already eliminated all self-overlapping
-        // SOURCE_COPY during delta generation, this should be safe to do.
-        for (size_t i = cow_op.block_count; i > 0; i--) {
-          TEST_AND_RETURN_FALSE(cow_writer_->AddCopy(cow_op.dst_block + i - 1,
-                                                     cow_op.src_block + i - 1));
-        }
-      }
-      continue;
-    }
-    buffer.resize(block_size_ * cow_op.block_count);
-    ssize_t bytes_read = 0;
-    TEST_AND_RETURN_FALSE(utils::ReadAll(source_fd,
-                                         buffer.data(),
-                                         block_size_ * cow_op.block_count,
-                                         cow_op.src_block * block_size_,
-                                         &bytes_read));
-    if (bytes_read <= 0 || static_cast<size_t>(bytes_read) != buffer.size()) {
-      LOG(ERROR) << "source_fd->Read failed: " << bytes_read;
-      return false;
-    }
-    TEST_AND_RETURN_FALSE(cow_writer_->AddRawBlocks(
-        cow_op.dst_block, buffer.data(), buffer.size()));
-  }
-  return true;
+  return ProcessSourceCopyOperation(operation,
+                                    block_size_,
+                                    copy_blocks_,
+                                    source_fd,
+                                    cow_writer_.get(),
+                                    DoesDeviceSupportsXor());
 }
 
 bool VABCPartitionWriter::PerformReplaceOperation(const InstallOperation& op,
diff --git a/payload_consumer/vabc_partition_writer.h b/payload_consumer/vabc_partition_writer.h
index 977fbe5..bd3db79 100644
--- a/payload_consumer/vabc_partition_writer.h
+++ b/payload_consumer/vabc_partition_writer.h
@@ -34,6 +34,14 @@
 namespace chromeos_update_engine {
 class VABCPartitionWriter final : public PartitionWriterInterface {
  public:
+  static bool ProcessSourceCopyOperation(
+      const InstallOperation& operation,
+      const size_t block_size,
+      const ExtentRanges& copy_blocks,
+      const FileDescriptorPtr& source_fd,
+      android::snapshot::ICowWriter* cow_writer,
+      bool sequence_op_supported);
+
   VABCPartitionWriter(const PartitionUpdate& partition_update,
                       const InstallPlan::Partition& install_part,
                       DynamicPartitionControlInterface* dynamic_control,