libsnapshot: CowWriter - AddCopy API change

AddCopy - When copy blocks are contiguous, send
'num_blocks' which are contiguous to libsnapshot_cow library.

This is required for batching the I/O requests.

Bug: 254188450
Test: Incremental OTA on Pixel (Tested legacy VAB as well)
Signed-off-by: Akilesh Kailash <akailash@google.com>
Change-Id: I5f04d88f720e86f5fcee1c775999c94226119270
diff --git a/payload_consumer/snapshot_extent_writer_unittest.cc b/payload_consumer/snapshot_extent_writer_unittest.cc
index 0c96c3e..d43d3a1 100644
--- a/payload_consumer/snapshot_extent_writer_unittest.cc
+++ b/payload_consumer/snapshot_extent_writer_unittest.cc
@@ -43,9 +43,14 @@
   using ICowWriter::ICowWriter;
   ~FakeCowWriter() = default;
 
-  bool EmitCopy(uint64_t new_block, uint64_t old_block) override {
-    operations_[new_block] = {.type = CowOp::COW_COPY,
-                              .source_block = static_cast<size_t>(old_block)};
+  bool EmitCopy(uint64_t new_block,
+                uint64_t old_block,
+                uint64_t num_blocks) override {
+    for (size_t i = 0; i < num_blocks; i++) {
+      operations_[new_block + i] = {
+          .type = CowOp::COW_COPY,
+          .source_block = static_cast<size_t>(old_block + i)};
+    }
     return true;
   }
   bool EmitRawBlocks(uint64_t new_block_start,
diff --git a/payload_consumer/vabc_partition_writer.cc b/payload_consumer/vabc_partition_writer.cc
index 8ae0b51..c3b2e41 100644
--- a/payload_consumer/vabc_partition_writer.cc
+++ b/payload_consumer/vabc_partition_writer.cc
@@ -229,19 +229,30 @@
   for (const auto& cow_op : converted) {
     std::vector<uint8_t> buffer;
     switch (cow_op.op) {
-      case CowOperation::CowCopy:
+      case CowOperation::CowCopy: {
         if (cow_op.src_block == cow_op.dst_block) {
           continue;
         }
-        // Add blocks in reverse order, because snapused specifically prefers
-        // this ordering. Since we already eliminated all self-overlapping
-        // SOURCE_COPY during delta generation, this should be safe to do.
-        for (size_t i = cow_op.block_count; i > 0; i--) {
-          TEST_AND_RETURN_FALSE(cow_writer->AddCopy(cow_op.dst_block + i - 1,
-                                                    cow_op.src_block + i - 1));
+
+        const bool userSnapshots = android::base::GetBoolProperty(
+            "ro.virtual_ab.userspace.snapshots.enabled", false);
+
+        if (userSnapshots) {
+          TEST_AND_RETURN_FALSE(cow_op.block_count != 0);
+          TEST_AND_RETURN_FALSE(cow_writer->AddCopy(
+              cow_op.dst_block, cow_op.src_block, cow_op.block_count));
+        } else {
+          // Add blocks in reverse order, because snapused specifically prefers
+          // this ordering. Since we already eliminated all self-overlapping
+          // SOURCE_COPY during delta generation, this should be safe to do.
+          for (size_t i = cow_op.block_count; i > 0; i--) {
+            TEST_AND_RETURN_FALSE(cow_writer->AddCopy(
+                cow_op.dst_block + i - 1, cow_op.src_block + i - 1));
+          }
         }
         break;
-      case CowOperation::CowReplace:
+      }
+      case CowOperation::CowReplace: {
         buffer.resize(block_size * cow_op.block_count);
         ssize_t bytes_read = 0;
         TEST_AND_RETURN_FALSE(utils::ReadAll(source_fd,
@@ -257,6 +268,7 @@
         TEST_AND_RETURN_FALSE(cow_writer->AddRawBlocks(
             cow_op.dst_block, buffer.data(), buffer.size()));
         break;
+      }
     }
   }
 
diff --git a/payload_consumer/vabc_partition_writer_unittest.cc b/payload_consumer/vabc_partition_writer_unittest.cc
index 20aa75f..49362ca 100644
--- a/payload_consumer/vabc_partition_writer_unittest.cc
+++ b/payload_consumer/vabc_partition_writer_unittest.cc
@@ -102,7 +102,7 @@
         EXPECT_CALL(*cow_writer, EmitSequenceData(_, _))
             .With(Args<1, 0>(ElementsAreArray(expected_merge_sequence)))
             .WillOnce(Return(true));
-        ON_CALL(*cow_writer, EmitCopy(_, _)).WillByDefault(Return(true));
+        ON_CALL(*cow_writer, EmitCopy(_, _, _)).WillByDefault(Return(true));
         ON_CALL(*cow_writer, EmitLabel(_)).WillByDefault(Return(true));
         return cow_writer;
       }));
@@ -127,7 +127,7 @@
             EXPECT_CALL(*cow_writer, EmitSequenceData(_, _))
                 .With(Args<1, 0>(ElementsAreArray(expected_merge_sequence)))
                 .WillOnce(Return(true));
-            ON_CALL(*cow_writer, EmitCopy(_, _)).WillByDefault(Return(true));
+            ON_CALL(*cow_writer, EmitCopy(_, _, _)).WillByDefault(Return(true));
             ON_CALL(*cow_writer, EmitLabel(_)).WillByDefault(Return(true));
             return cow_writer;
           }));
@@ -150,17 +150,17 @@
                 std::make_unique<android::snapshot::MockSnapshotWriter>(
                     android::snapshot::CowOptions{});
             Sequence s;
-            ON_CALL(*cow_writer, EmitCopy(_, _)).WillByDefault(Return(true));
+            ON_CALL(*cow_writer, EmitCopy(_, _, _)).WillByDefault(Return(true));
             ON_CALL(*cow_writer, EmitLabel(_)).WillByDefault(Return(true));
             ON_CALL(*cow_writer, Initialize()).WillByDefault(Return(true));
             EXPECT_CALL(*cow_writer, Initialize()).InSequence(s);
-            EXPECT_CALL(*cow_writer, EmitCopy(10, 5)).InSequence(s);
-            EXPECT_CALL(*cow_writer, EmitCopy(15, 10)).InSequence(s);
+            EXPECT_CALL(*cow_writer, EmitCopy(10, 5, 1)).InSequence(s);
+            EXPECT_CALL(*cow_writer, EmitCopy(15, 10, 1)).InSequence(s);
             // libsnapshot want blocks in reverser order, so 21 goes before 20
-            EXPECT_CALL(*cow_writer, EmitCopy(21, 16)).InSequence(s);
-            EXPECT_CALL(*cow_writer, EmitCopy(20, 15)).InSequence(s);
+            EXPECT_CALL(*cow_writer, EmitCopy(21, 16, 1)).InSequence(s);
+            EXPECT_CALL(*cow_writer, EmitCopy(20, 15, 1)).InSequence(s);
 
-            EXPECT_CALL(*cow_writer, EmitCopy(25, 20)).InSequence(s);
+            EXPECT_CALL(*cow_writer, EmitCopy(25, 20, 1)).InSequence(s);
             return cow_writer;
           }));
   ASSERT_TRUE(writer_.Init(&install_plan_, true, 0));
@@ -224,7 +224,7 @@
               .WillOnce(Return(true));
         }
         EXPECT_CALL(*cow_writer, Initialize()).Times(1);
-        EXPECT_CALL(*cow_writer, EmitCopy(_, _)).Times(0);
+        EXPECT_CALL(*cow_writer, EmitCopy(_, _, _)).Times(0);
         EXPECT_CALL(*cow_writer, EmitRawBlocks(_, _, _)).WillOnce(Return(true));
         EXPECT_CALL(*cow_writer, EmitXorBlocks(10, _, kBlockSize * 2, 5, 0))
             .WillOnce(Return(true));
diff --git a/payload_generator/cow_size_estimator.cc b/payload_generator/cow_size_estimator.cc
index 5326d13..0c918e7 100644
--- a/payload_generator/cow_size_estimator.cc
+++ b/payload_generator/cow_size_estimator.cc
@@ -52,10 +52,9 @@
   for (const auto& op : merge_operations) {
     if (op.type() == CowMergeOperation::COW_COPY) {
       visited.AddExtent(op.dst_extent());
-      for (size_t i = 0; i < op.dst_extent().num_blocks(); i++) {
-        cow_writer->AddCopy(op.dst_extent().start_block() + i,
-                            op.src_extent().start_block() + i);
-      }
+      cow_writer->AddCopy(op.dst_extent().start_block(),
+                          op.src_extent().start_block(),
+                          op.dst_extent().num_blocks());
     } else if (op.type() == CowMergeOperation::COW_XOR && xor_enabled) {
       CHECK_NE(source_fd, nullptr) << "Source fd is required to enable XOR ops";
       CHECK(source_fd->IsOpen());