Move all copy write processing to later in pipeline
In day 1 of VABC, all copy ops processing are done upfront in
VABCPartitionWriter::Init() call. This offers bad UX because
update_engine can't respond to pause/cancel calls while processing copy
ops, nor can it update progress bar on UI.
In aosp/2266600, portions of copy op progressing are moved to later in
install pipeline, which spreads out the computation over time. This CL
further moves more logic out of VABCPartitionWriter::Init() call.
Test: th
Bug: 274539246
Change-Id: I29d8e5486d88310a724311bfcb1cddfb8f6c9795
diff --git a/payload_consumer/vabc_partition_writer.cc b/payload_consumer/vabc_partition_writer.cc
index df9c0a6..2016af7 100644
--- a/payload_consumer/vabc_partition_writer.cc
+++ b/payload_consumer/vabc_partition_writer.cc
@@ -97,6 +97,40 @@
}
}
+bool VABCPartitionWriter::DoesDeviceSupportsXor() {
+ return dynamic_control_->GetVirtualAbCompressionXorFeatureFlag().IsEnabled();
+}
+
+bool VABCPartitionWriter::WriteAllCopyOps() {
+ const bool userSnapshots = android::base::GetBoolProperty(
+ "ro.virtual_ab.userspace.snapshots.enabled", false);
+ for (const auto& cow_op : partition_update_.merge_operations()) {
+ if (cow_op.type() != CowMergeOperation::COW_COPY) {
+ continue;
+ }
+ if (cow_op.dst_extent() == cow_op.src_extent()) {
+ continue;
+ }
+ if (userSnapshots) {
+ TEST_AND_RETURN_FALSE(cow_op.src_extent().num_blocks() != 0);
+ TEST_AND_RETURN_FALSE(
+ cow_writer_->AddCopy(cow_op.dst_extent().start_block(),
+ cow_op.src_extent().start_block(),
+ cow_op.src_extent().num_blocks()));
+ } else {
+ // Add blocks in reverse order, because snapused specifically prefers
+ // this ordering. Since we already eliminated all self-overlapping
+ // SOURCE_COPY during delta generation, this should be safe to do.
+ for (size_t i = cow_op.src_extent().num_blocks(); i > 0; i--) {
+ TEST_AND_RETURN_FALSE(
+ cow_writer_->AddCopy(cow_op.dst_extent().start_block() + i - 1,
+ cow_op.src_extent().start_block() + i - 1));
+ }
+ }
+ }
+ return true;
+}
+
bool VABCPartitionWriter::Init(const InstallPlan* install_plan,
bool source_may_exist,
size_t next_op_index) {
@@ -144,35 +178,21 @@
if (IsXorEnabled()) {
LOG(INFO) << "VABC XOR enabled for partition "
<< partition_update_.partition_name();
+ }
+ // When merge sequence is present in COW, snapuserd will merge blocks in
+ // order specified by the merge seuqnece op. Hence we have the freedom of
+ // writing COPY operations out of order. Delay processing of copy ops so
+ // that update_engine can be more responsive in progress updates.
+ if (DoesDeviceSupportsXor()) {
+ LOG(INFO) << "Snapuserd supports XOR and merge sequence, writing merge "
+ "sequence and delay writing COPY operations";
TEST_AND_RETURN_FALSE(WriteMergeSequence(
partition_update_.merge_operations(), cow_writer_.get()));
- }
- const bool userSnapshots = android::base::GetBoolProperty(
- "ro.virtual_ab.userspace.snapshots.enabled", false);
-
- for (const auto& cow_op : partition_update_.merge_operations()) {
- if (cow_op.type() != CowMergeOperation::COW_COPY) {
- continue;
- }
- if (cow_op.dst_extent() == cow_op.src_extent()) {
- continue;
- }
- if (userSnapshots) {
- TEST_AND_RETURN_FALSE(cow_op.src_extent().num_blocks() != 0);
- TEST_AND_RETURN_FALSE(
- cow_writer_->AddCopy(cow_op.dst_extent().start_block(),
- cow_op.src_extent().start_block(),
- cow_op.src_extent().num_blocks()));
- } else {
- // Add blocks in reverse order, because snapused specifically prefers
- // this ordering. Since we already eliminated all self-overlapping
- // SOURCE_COPY during delta generation, this should be safe to do.
- for (size_t i = cow_op.src_extent().num_blocks(); i > 0; i--) {
- TEST_AND_RETURN_FALSE(
- cow_writer_->AddCopy(cow_op.dst_extent().start_block() + i - 1,
- cow_op.src_extent().start_block() + i - 1));
- }
- }
+ } else {
+ LOG(INFO) << "Snapuserd does not support merge sequence, writing all "
+ "COPY operations up front, this may take few "
+ "minutes.";
+ TEST_AND_RETURN_FALSE(WriteAllCopyOps());
}
cow_writer_->AddLabel(0);
}
@@ -253,12 +273,19 @@
// we still want to verify that all blocks contain expected data.
auto source_fd = verified_source_fd_.ChooseSourceFD(operation, error);
TEST_AND_RETURN_FALSE(source_fd != nullptr);
+ // For devices not supporting XOR, sequence op is not supported, so all COPY
+ // operations are written up front in strict merge order.
+ if (!DoesDeviceSupportsXor()) {
+ return true;
+ }
std::vector<CowOperation> converted;
const auto& src_extents = operation.src_extents();
const auto& dst_extents = operation.dst_extents();
BlockIterator it1{src_extents};
BlockIterator it2{dst_extents};
+ const bool userSnapshots = android::base::GetBoolProperty(
+ "ro.virtual_ab.userspace.snapshots.enabled", false);
while (!it1.is_end() && !it2.is_end()) {
const auto src_block = *it1;
const auto dst_block = *it2;
@@ -267,13 +294,30 @@
if (src_block == dst_block) {
continue;
}
- if (!copy_blocks_.ContainsBlock(dst_block)) {
+ if (copy_blocks_.ContainsBlock(dst_block)) {
+ push_back(&converted, {CowOperation::CowCopy, src_block, dst_block, 1});
+ } else {
push_back(&converted,
{CowOperation::CowReplace, src_block, dst_block, 1});
}
}
std::vector<uint8_t> buffer;
for (const auto& cow_op : converted) {
+ if (cow_op.op == CowOperation::CowCopy) {
+ if (userSnapshots) {
+ cow_writer_->AddCopy(
+ cow_op.dst_block, cow_op.src_block, cow_op.block_count);
+ } else {
+ // Add blocks in reverse order, because snapused specifically prefers
+ // this ordering. Since we already eliminated all self-overlapping
+ // SOURCE_COPY during delta generation, this should be safe to do.
+ for (size_t i = cow_op.block_count; i > 0; i--) {
+ TEST_AND_RETURN_FALSE(cow_writer_->AddCopy(cow_op.dst_block + i - 1,
+ cow_op.src_block + i - 1));
+ }
+ }
+ continue;
+ }
buffer.resize(block_size_ * cow_op.block_count);
ssize_t bytes_read = 0;
TEST_AND_RETURN_FALSE(utils::ReadAll(source_fd,
diff --git a/payload_consumer/vabc_partition_writer.h b/payload_consumer/vabc_partition_writer.h
index 8393fe5..889f376 100644
--- a/payload_consumer/vabc_partition_writer.h
+++ b/payload_consumer/vabc_partition_writer.h
@@ -70,7 +70,9 @@
android::snapshot::ICowWriter* cow_writer);
private:
+ [[nodiscard]] bool DoesDeviceSupportsXor();
bool IsXorEnabled() const noexcept { return xor_map_.size() > 0; }
+ [[nodiscard]] bool WriteAllCopyOps();
std::unique_ptr<android::snapshot::ISnapshotWriter> cow_writer_;
[[nodiscard]] std::unique_ptr<ExtentWriter> CreateBaseExtentWriter();
diff --git a/payload_consumer/vabc_partition_writer_unittest.cc b/payload_consumer/vabc_partition_writer_unittest.cc
index 0bfa23b..b053bbf 100644
--- a/payload_consumer/vabc_partition_writer_unittest.cc
+++ b/payload_consumer/vabc_partition_writer_unittest.cc
@@ -24,11 +24,13 @@
#include <libsnapshot/cow_writer.h>
#include <libsnapshot/mock_snapshot_writer.h>
+#include "update_engine/common/error_code.h"
#include "update_engine/common/hash_calculator.h"
#include "update_engine/common/mock_dynamic_partition_control.h"
#include "update_engine/common/utils.h"
#include "update_engine/payload_consumer/vabc_partition_writer.h"
#include "update_engine/payload_generator/delta_diff_generator.h"
+#include "update_engine/payload_generator/extent_ranges.h"
#include "update_engine/update_metadata.pb.h"
namespace chromeos_update_engine {
@@ -55,6 +57,7 @@
}
protected:
+ void EmitBlockTest(bool xor_enabled);
CowMergeOperation* AddMergeOp(PartitionUpdate* partition,
std::array<size_t, 2> src_extent,
std::array<size_t, 2> dst_extent,
@@ -136,7 +139,19 @@
ASSERT_TRUE(writer_.Init(&install_plan_, true, 0));
}
-TEST_F(VABCPartitionWriterTest, EmitBlockTest) {
+TEST_F(VABCPartitionWriterTest, EmitBlockTestXor) {
+ return EmitBlockTest(true);
+}
+
+TEST_F(VABCPartitionWriterTest, EmitBlockTestNoXor) {
+ return EmitBlockTest(false);
+}
+
+void VABCPartitionWriterTest::EmitBlockTest(bool xor_enabled) {
+ if (xor_enabled) {
+ ON_CALL(dynamic_control_, GetVirtualAbCompressionXorFeatureFlag())
+ .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
+ }
AddMergeOp(&partition_update_, {5, 1}, {10, 1}, CowMergeOperation::COW_COPY);
AddMergeOp(&partition_update_, {10, 1}, {15, 1}, CowMergeOperation::COW_COPY);
AddMergeOp(&partition_update_, {15, 2}, {20, 2}, CowMergeOperation::COW_COPY);
@@ -144,25 +159,54 @@
VABCPartitionWriter writer_{
partition_update_, install_part_, &dynamic_control_, kBlockSize};
EXPECT_CALL(dynamic_control_, OpenCowWriter(fake_part_name, _, false))
- .WillOnce(Invoke(
- [](const std::string&, const std::optional<std::string>&, bool) {
- auto cow_writer =
- std::make_unique<android::snapshot::MockSnapshotWriter>(
- android::snapshot::CowOptions{});
- Sequence s;
- ON_CALL(*cow_writer, EmitCopy(_, _, _)).WillByDefault(Return(true));
- ON_CALL(*cow_writer, EmitLabel(_)).WillByDefault(Return(true));
- ON_CALL(*cow_writer, Initialize()).WillByDefault(Return(true));
- EXPECT_CALL(*cow_writer, Initialize()).InSequence(s);
- EXPECT_CALL(*cow_writer, EmitCopy(10, 5, 1)).InSequence(s);
- EXPECT_CALL(*cow_writer, EmitCopy(15, 10, 1)).InSequence(s);
- // libsnapshot want blocks in reverser order, so 21 goes before 20
- EXPECT_CALL(*cow_writer, EmitCopy(20, 15, 2)).InSequence(s);
+ .WillOnce(Invoke([xor_enabled](const std::string&,
+ const std::optional<std::string>&,
+ bool) {
+ auto cow_writer =
+ std::make_unique<android::snapshot::MockSnapshotWriter>(
+ android::snapshot::CowOptions{});
+ ON_CALL(*cow_writer, EmitCopy(_, _, _)).WillByDefault(Return(true));
+ ON_CALL(*cow_writer, EmitLabel(_)).WillByDefault(Return(true));
+ ON_CALL(*cow_writer, Initialize()).WillByDefault(Return(true));
+ EXPECT_CALL(*cow_writer, Initialize());
+ if (xor_enabled) {
+ EXPECT_CALL(*cow_writer, EmitSequenceData(_, _))
+ .WillOnce(Return(true));
+ EXPECT_CALL(*cow_writer, EmitCopy(10, 5, 1));
+ EXPECT_CALL(*cow_writer, EmitCopy(15, 10, 1));
+ // libsnapshot want blocks in reverser order, so 21 goes before 20
+ EXPECT_CALL(*cow_writer, EmitCopy(20, 15, 2));
- EXPECT_CALL(*cow_writer, EmitCopy(25, 20, 1)).InSequence(s);
- return cow_writer;
- }));
+ EXPECT_CALL(*cow_writer, EmitCopy(25, 20, 1));
+ EXPECT_CALL(*cow_writer, Finalize());
+ } else {
+ Sequence s;
+ EXPECT_CALL(*cow_writer, EmitCopy(10, 5, 1)).InSequence(s);
+ EXPECT_CALL(*cow_writer, EmitCopy(15, 10, 1)).InSequence(s);
+ // libsnapshot want blocks in reverser order, so 21 goes before 20
+ EXPECT_CALL(*cow_writer, EmitCopy(20, 15, 2)).InSequence(s);
+
+ EXPECT_CALL(*cow_writer, EmitCopy(25, 20, 1)).InSequence(s);
+ }
+ return cow_writer;
+ }));
ASSERT_TRUE(writer_.Init(&install_plan_, true, 0));
+ if (!xor_enabled) {
+ return;
+ }
+ InstallOperation install_op;
+ install_op.set_type(InstallOperation::SOURCE_COPY);
+ *install_op.add_src_extents() = ExtentForRange(5, 1);
+ *install_op.add_src_extents() = ExtentForRange(10, 1);
+ *install_op.add_src_extents() = ExtentForRange(15, 2);
+ *install_op.add_src_extents() = ExtentForRange(20, 1);
+
+ *install_op.add_dst_extents() = ExtentForRange(10, 1);
+ *install_op.add_dst_extents() = ExtentForRange(15, 1);
+ *install_op.add_dst_extents() = ExtentForRange(20, 2);
+ *install_op.add_dst_extents() = ExtentForRange(25, 1);
+ ErrorCode error{};
+ ASSERT_TRUE(writer_.PerformSourceCopyOperation(install_op, &error));
}
std::string GetNoopBSDIFF(size_t data_size) {