Merge changes from topic "CowXorOp"
* changes:
libsnapshot: Don't PrepMergeOps on resume
snapuserd: Add support for Xor ops in snapuserd
snapuserd: Rename Read Ahead Iterator
snapuserd: Add XorSink
libsnapshot: Clone worker readers from snapuserd
libsnapshot: Add support for Xor ops in Cow Format
diff --git a/fs_mgr/libsnapshot/cow_api_test.cpp b/fs_mgr/libsnapshot/cow_api_test.cpp
index ecfdefe..6066309 100644
--- a/fs_mgr/libsnapshot/cow_api_test.cpp
+++ b/fs_mgr/libsnapshot/cow_api_test.cpp
@@ -140,6 +140,85 @@
ASSERT_TRUE(iter->Done());
}
+TEST_F(CowTest, ReadWriteXor) {
+ CowOptions options;
+ options.cluster_ops = 0;
+ CowWriter writer(options);
+
+ ASSERT_TRUE(writer.Initialize(cow_->fd));
+
+ std::string data = "This is some data, believe it";
+ data.resize(options.block_size, '\0');
+
+ ASSERT_TRUE(writer.AddCopy(10, 20));
+ ASSERT_TRUE(writer.AddXorBlocks(50, data.data(), data.size(), 24, 10));
+ ASSERT_TRUE(writer.AddZeroBlocks(51, 2));
+ ASSERT_TRUE(writer.Finalize());
+
+ ASSERT_EQ(lseek(cow_->fd, 0, SEEK_SET), 0);
+
+ CowReader reader;
+ CowHeader header;
+ CowFooter footer;
+ ASSERT_TRUE(reader.Parse(cow_->fd));
+ ASSERT_TRUE(reader.GetHeader(&header));
+ ASSERT_TRUE(reader.GetFooter(&footer));
+ ASSERT_EQ(header.magic, kCowMagicNumber);
+ ASSERT_EQ(header.major_version, kCowVersionMajor);
+ ASSERT_EQ(header.minor_version, kCowVersionMinor);
+ ASSERT_EQ(header.block_size, options.block_size);
+ ASSERT_EQ(footer.op.num_ops, 4);
+
+ auto iter = reader.GetOpIter();
+ ASSERT_NE(iter, nullptr);
+ ASSERT_FALSE(iter->Done());
+ auto op = &iter->Get();
+
+ ASSERT_EQ(op->type, kCowCopyOp);
+ ASSERT_EQ(op->compression, kCowCompressNone);
+ ASSERT_EQ(op->data_length, 0);
+ ASSERT_EQ(op->new_block, 10);
+ ASSERT_EQ(op->source, 20);
+
+ StringSink sink;
+
+ iter->Next();
+ ASSERT_FALSE(iter->Done());
+ op = &iter->Get();
+
+ ASSERT_EQ(op->type, kCowXorOp);
+ ASSERT_EQ(op->compression, kCowCompressNone);
+ ASSERT_EQ(op->data_length, 4096);
+ ASSERT_EQ(op->new_block, 50);
+ ASSERT_EQ(op->source, 98314); // 4096 * 24 + 10
+ ASSERT_TRUE(reader.ReadData(*op, &sink));
+ ASSERT_EQ(sink.stream(), data);
+
+ iter->Next();
+ ASSERT_FALSE(iter->Done());
+ op = &iter->Get();
+
+ // Note: the zero operation gets split into two blocks.
+ ASSERT_EQ(op->type, kCowZeroOp);
+ ASSERT_EQ(op->compression, kCowCompressNone);
+ ASSERT_EQ(op->data_length, 0);
+ ASSERT_EQ(op->new_block, 51);
+ ASSERT_EQ(op->source, 0);
+
+ iter->Next();
+ ASSERT_FALSE(iter->Done());
+ op = &iter->Get();
+
+ ASSERT_EQ(op->type, kCowZeroOp);
+ ASSERT_EQ(op->compression, kCowCompressNone);
+ ASSERT_EQ(op->data_length, 0);
+ ASSERT_EQ(op->new_block, 52);
+ ASSERT_EQ(op->source, 0);
+
+ iter->Next();
+ ASSERT_TRUE(iter->Done());
+}
+
TEST_F(CowTest, CompressGz) {
CowOptions options;
options.cluster_ops = 0;
@@ -1034,6 +1113,54 @@
ASSERT_FALSE(reader.Parse(cow_->fd));
}
+TEST_F(CowTest, ResumeSeqOp) {
+ CowOptions options;
+ auto writer = std::make_unique<CowWriter>(options);
+ const int seq_len = 10;
+ uint32_t sequence[seq_len];
+ for (int i = 0; i < seq_len; i++) {
+ sequence[i] = i + 1;
+ }
+
+ ASSERT_TRUE(writer->Initialize(cow_->fd));
+
+ ASSERT_TRUE(writer->AddSequenceData(seq_len, sequence));
+ ASSERT_TRUE(writer->AddZeroBlocks(1, seq_len / 2));
+ ASSERT_TRUE(writer->AddLabel(1));
+ ASSERT_TRUE(writer->AddZeroBlocks(1 + seq_len / 2, 1));
+
+ ASSERT_EQ(lseek(cow_->fd, 0, SEEK_SET), 0);
+ auto reader = std::make_unique<CowReader>();
+ ASSERT_TRUE(reader->Parse(cow_->fd, 1));
+ auto itr = reader->GetRevMergeOpIter();
+ ASSERT_TRUE(itr->Done());
+
+ writer = std::make_unique<CowWriter>(options);
+ ASSERT_TRUE(writer->InitializeAppend(cow_->fd, 1));
+ ASSERT_TRUE(writer->AddZeroBlocks(1 + seq_len / 2, seq_len / 2));
+ ASSERT_TRUE(writer->Finalize());
+
+ ASSERT_EQ(lseek(cow_->fd, 0, SEEK_SET), 0);
+
+ reader = std::make_unique<CowReader>();
+ ASSERT_TRUE(reader->Parse(cow_->fd));
+
+ auto iter = reader->GetRevMergeOpIter();
+
+ uint64_t expected_block = 10;
+ while (!iter->Done() && expected_block > 0) {
+ ASSERT_FALSE(iter->Done());
+ const auto& op = iter->Get();
+
+ ASSERT_EQ(op.new_block, expected_block);
+
+ iter->Next();
+ expected_block--;
+ }
+ ASSERT_EQ(expected_block, 0);
+ ASSERT_TRUE(iter->Done());
+}
+
TEST_F(CowTest, RevMergeOpItrTest) {
CowOptions options;
options.cluster_ops = 5;
diff --git a/fs_mgr/libsnapshot/cow_format.cpp b/fs_mgr/libsnapshot/cow_format.cpp
index 3085f80..8e6bec7 100644
--- a/fs_mgr/libsnapshot/cow_format.cpp
+++ b/fs_mgr/libsnapshot/cow_format.cpp
@@ -37,6 +37,8 @@
os << "kCowLabelOp, ";
else if (op.type == kCowClusterOp)
os << "kCowClusterOp ";
+ else if (op.type == kCowXorOp)
+ os << "kCowXorOp ";
else if (op.type == kCowSequenceOp)
os << "kCowSequenceOp ";
else if (op.type == kCowFooterOp)
@@ -61,7 +63,7 @@
int64_t GetNextOpOffset(const CowOperation& op, uint32_t cluster_ops) {
if (op.type == kCowClusterOp) {
return op.source;
- } else if (op.type == kCowReplaceOp && cluster_ops == 0) {
+ } else if ((op.type == kCowReplaceOp || op.type == kCowXorOp) && cluster_ops == 0) {
return op.data_length;
} else {
return 0;
@@ -93,6 +95,7 @@
bool IsOrderedOp(const CowOperation& op) {
switch (op.type) {
case kCowCopyOp:
+ case kCowXorOp:
return true;
default:
return false;
diff --git a/fs_mgr/libsnapshot/cow_reader.cpp b/fs_mgr/libsnapshot/cow_reader.cpp
index ace6f59..773d978 100644
--- a/fs_mgr/libsnapshot/cow_reader.cpp
+++ b/fs_mgr/libsnapshot/cow_reader.cpp
@@ -34,7 +34,11 @@
namespace android {
namespace snapshot {
-CowReader::CowReader() : fd_(-1), header_(), fd_size_(0) {}
+CowReader::CowReader()
+ : fd_(-1),
+ header_(),
+ fd_size_(0),
+ merge_op_blocks_(std::make_shared<std::vector<uint32_t>>()) {}
static void SHA256(const void*, size_t, uint8_t[]) {
#if 0
@@ -45,6 +49,23 @@
#endif
}
+std::unique_ptr<CowReader> CowReader::CloneCowReader() {
+ auto cow = std::make_unique<CowReader>();
+ cow->owned_fd_.reset();
+ cow->header_ = header_;
+ cow->footer_ = footer_;
+ cow->fd_size_ = fd_size_;
+ cow->last_label_ = last_label_;
+ cow->ops_ = ops_;
+ cow->merge_op_blocks_ = merge_op_blocks_;
+ cow->block_map_ = block_map_;
+ cow->num_total_data_ops_ = num_total_data_ops_;
+ cow->num_ordered_ops_to_merge_ = num_ordered_ops_to_merge_;
+ cow->has_seq_ops_ = has_seq_ops_;
+ cow->data_loc_ = data_loc_;
+ return cow;
+}
+
bool CowReader::InitForMerge(android::base::unique_fd&& fd) {
owned_fd_ = std::move(fd);
fd_ = owned_fd_.get();
@@ -133,11 +154,14 @@
if (!ParseOps(label)) {
return false;
}
+ // If we're resuming a write, we're not ready to merge
+ if (label.has_value()) return true;
return PrepMergeOps();
}
bool CowReader::ParseOps(std::optional<uint64_t> label) {
uint64_t pos;
+ auto data_loc = std::make_shared<std::unordered_map<uint64_t, uint64_t>>();
// Skip the scratch space
if (header_.major_version >= 2 && (header_.buffer_size > 0)) {
@@ -157,6 +181,13 @@
// Reading a v1 version of COW which doesn't have buffer_size.
header_.buffer_size = 0;
}
+ uint64_t data_pos = 0;
+
+ if (header_.cluster_ops) {
+ data_pos = pos + header_.cluster_ops * sizeof(CowOperation);
+ } else {
+ data_pos = pos + sizeof(CowOperation);
+ }
auto ops_buffer = std::make_shared<std::vector<CowOperation>>();
uint64_t current_op_num = 0;
@@ -177,7 +208,11 @@
while (current_op_num < ops_buffer->size()) {
auto& current_op = ops_buffer->data()[current_op_num];
current_op_num++;
+ if (current_op.type == kCowXorOp) {
+ data_loc->insert({current_op.new_block, data_pos});
+ }
pos += sizeof(CowOperation) + GetNextOpOffset(current_op, header_.cluster_ops);
+ data_pos += current_op.data_length + GetNextDataOffset(current_op, header_.cluster_ops);
if (current_op.type == kCowClusterOp) {
break;
@@ -268,6 +303,7 @@
ops_ = ops_buffer;
ops_->shrink_to_fit();
+ data_loc_ = data_loc;
return true;
}
@@ -606,7 +642,13 @@
return false;
}
- CowDataStream stream(this, op.source, op.data_length);
+ uint64_t offset;
+ if (op.type == kCowXorOp) {
+ offset = data_loc_->at(op.new_block);
+ } else {
+ offset = op.source;
+ }
+ CowDataStream stream(this, offset, op.data_length);
decompressor->set_stream(&stream);
decompressor->set_sink(sink);
return decompressor->Decompress(header_.block_size);
diff --git a/fs_mgr/libsnapshot/cow_writer.cpp b/fs_mgr/libsnapshot/cow_writer.cpp
index ef30e32..5ce1d3b 100644
--- a/fs_mgr/libsnapshot/cow_writer.cpp
+++ b/fs_mgr/libsnapshot/cow_writer.cpp
@@ -58,10 +58,24 @@
return EmitRawBlocks(new_block_start, data, size);
}
-bool ICowWriter::AddXorBlocks(uint32_t /*new_block_start*/, const void* /*data*/, size_t /*size*/,
- uint32_t /*old_block*/, uint16_t /*offset*/) {
- LOG(ERROR) << "AddXorBlocks not yet implemented";
- return false;
+bool ICowWriter::AddXorBlocks(uint32_t new_block_start, const void* data, size_t size,
+ uint32_t old_block, uint16_t offset) {
+ if (size % options_.block_size != 0) {
+ LOG(ERROR) << "AddRawBlocks: size " << size << " is not a multiple of "
+ << options_.block_size;
+ return false;
+ }
+
+ uint64_t num_blocks = size / options_.block_size;
+ uint64_t last_block = new_block_start + num_blocks - 1;
+ if (!ValidateNewBlock(last_block)) {
+ return false;
+ }
+ if (offset >= options_.block_size) {
+ LOG(ERROR) << "AddXorBlocks: offset " << offset << " is not less than "
+ << options_.block_size;
+ }
+ return EmitXorBlocks(new_block_start, data, size, old_block, offset);
}
bool ICowWriter::AddZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) {
@@ -278,13 +292,27 @@
}
bool CowWriter::EmitRawBlocks(uint64_t new_block_start, const void* data, size_t size) {
+ return EmitBlocks(new_block_start, data, size, 0, 0, kCowReplaceOp);
+}
+
+bool CowWriter::EmitXorBlocks(uint32_t new_block_start, const void* data, size_t size,
+ uint32_t old_block, uint16_t offset) {
+ return EmitBlocks(new_block_start, data, size, old_block, offset, kCowXorOp);
+}
+
+bool CowWriter::EmitBlocks(uint64_t new_block_start, const void* data, size_t size,
+ uint64_t old_block, uint16_t offset, uint8_t type) {
const uint8_t* iter = reinterpret_cast<const uint8_t*>(data);
CHECK(!merge_in_progress_);
for (size_t i = 0; i < size / header_.block_size; i++) {
CowOperation op = {};
- op.type = kCowReplaceOp;
op.new_block = new_block_start + i;
- op.source = next_data_pos_;
+ op.type = type;
+ if (type == kCowXorOp) {
+ op.source = (old_block + i) * header_.block_size + offset;
+ } else {
+ op.source = next_data_pos_;
+ }
if (compression_) {
auto data = Compress(iter, header_.block_size);
diff --git a/fs_mgr/libsnapshot/include/libsnapshot/cow_format.h b/fs_mgr/libsnapshot/include/libsnapshot/cow_format.h
index 464046b..c15682a 100644
--- a/fs_mgr/libsnapshot/include/libsnapshot/cow_format.h
+++ b/fs_mgr/libsnapshot/include/libsnapshot/cow_format.h
@@ -138,6 +138,8 @@
// For Label operations, this is the value of the applied label.
//
// For Cluster operations, this is the length of the following data region
+ //
+ // For Xor operations, this is the byte location in the source image.
uint64_t source;
} __attribute__((packed));
@@ -148,6 +150,7 @@
static constexpr uint8_t kCowZeroOp = 3;
static constexpr uint8_t kCowLabelOp = 4;
static constexpr uint8_t kCowClusterOp = 5;
+static constexpr uint8_t kCowXorOp = 6;
static constexpr uint8_t kCowSequenceOp = 7;
static constexpr uint8_t kCowFooterOp = -1;
diff --git a/fs_mgr/libsnapshot/include/libsnapshot/cow_reader.h b/fs_mgr/libsnapshot/include/libsnapshot/cow_reader.h
index 6c3059c..0786e82 100644
--- a/fs_mgr/libsnapshot/include/libsnapshot/cow_reader.h
+++ b/fs_mgr/libsnapshot/include/libsnapshot/cow_reader.h
@@ -136,6 +136,9 @@
void CloseCowFd() { owned_fd_ = {}; }
+ // Creates a clone of the current CowReader without the file handlers
+ std::unique_ptr<CowReader> CloneCowReader();
+
private:
bool ParseOps(std::optional<uint64_t> label);
bool PrepMergeOps();
@@ -153,6 +156,7 @@
uint64_t num_total_data_ops_;
uint64_t num_ordered_ops_to_merge_;
bool has_seq_ops_;
+ std::shared_ptr<std::unordered_map<uint64_t, uint64_t>> data_loc_;
};
} // namespace snapshot
diff --git a/fs_mgr/libsnapshot/include/libsnapshot/cow_writer.h b/fs_mgr/libsnapshot/include/libsnapshot/cow_writer.h
index 4a807fb..e17b5c6 100644
--- a/fs_mgr/libsnapshot/include/libsnapshot/cow_writer.h
+++ b/fs_mgr/libsnapshot/include/libsnapshot/cow_writer.h
@@ -86,6 +86,8 @@
protected:
virtual bool EmitCopy(uint64_t new_block, uint64_t old_block) = 0;
virtual bool EmitRawBlocks(uint64_t new_block_start, const void* data, size_t size) = 0;
+ virtual bool EmitXorBlocks(uint32_t new_block_start, const void* data, size_t size,
+ uint32_t old_block, uint16_t offset) = 0;
virtual bool EmitZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) = 0;
virtual bool EmitLabel(uint64_t label) = 0;
virtual bool EmitSequenceData(size_t num_ops, const uint32_t* data) = 0;
@@ -122,6 +124,8 @@
protected:
virtual bool EmitCopy(uint64_t new_block, uint64_t old_block) override;
virtual bool EmitRawBlocks(uint64_t new_block_start, const void* data, size_t size) override;
+ virtual bool EmitXorBlocks(uint32_t new_block_start, const void* data, size_t size,
+ uint32_t old_block, uint16_t offset) override;
virtual bool EmitZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) override;
virtual bool EmitLabel(uint64_t label) override;
virtual bool EmitSequenceData(size_t num_ops, const uint32_t* data) override;
@@ -129,6 +133,8 @@
private:
bool EmitCluster();
bool EmitClusterIfNeeded();
+ bool EmitBlocks(uint64_t new_block_start, const void* data, size_t size, uint64_t old_block,
+ uint16_t offset, uint8_t type);
void SetupHeaders();
bool ParseOptions();
bool OpenForWrite();
diff --git a/fs_mgr/libsnapshot/include/libsnapshot/snapshot_writer.h b/fs_mgr/libsnapshot/include/libsnapshot/snapshot_writer.h
index c00dafa..b09e1ae 100644
--- a/fs_mgr/libsnapshot/include/libsnapshot/snapshot_writer.h
+++ b/fs_mgr/libsnapshot/include/libsnapshot/snapshot_writer.h
@@ -74,6 +74,8 @@
protected:
bool EmitCopy(uint64_t new_block, uint64_t old_block) override;
bool EmitRawBlocks(uint64_t new_block_start, const void* data, size_t size) override;
+ bool EmitXorBlocks(uint32_t new_block_start, const void* data, size_t size, uint32_t old_block,
+ uint16_t offset) override;
bool EmitZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) override;
bool EmitLabel(uint64_t label) override;
bool EmitSequenceData(size_t num_ops, const uint32_t* data) override;
@@ -102,6 +104,8 @@
protected:
bool EmitRawBlocks(uint64_t new_block_start, const void* data, size_t size) override;
bool EmitZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) override;
+ bool EmitXorBlocks(uint32_t new_block_start, const void* data, size_t size, uint32_t old_block,
+ uint16_t offset) override;
bool EmitCopy(uint64_t new_block, uint64_t old_block) override;
bool EmitLabel(uint64_t label) override;
bool EmitSequenceData(size_t num_ops, const uint32_t* data) override;
diff --git a/fs_mgr/libsnapshot/snapshot_reader.cpp b/fs_mgr/libsnapshot/snapshot_reader.cpp
index 5ee8e25..6546c2a 100644
--- a/fs_mgr/libsnapshot/snapshot_reader.cpp
+++ b/fs_mgr/libsnapshot/snapshot_reader.cpp
@@ -221,7 +221,7 @@
private:
size_t ignore_start_;
- char discard_[4096];
+ char discard_[BLOCK_SZ];
};
ssize_t CompressedSnapshotReader::ReadBlock(uint64_t chunk, IByteSink* sink, size_t start_offset,
@@ -277,6 +277,29 @@
errno = EIO;
return -1;
}
+ } else if (op->type == kCowXorOp) {
+ borrowed_fd fd = GetSourceFd();
+ if (fd < 0) {
+ // GetSourceFd sets errno.
+ return -1;
+ }
+
+ off64_t offset = op->source + start_offset;
+ char data[BLOCK_SZ];
+ if (!android::base::ReadFullyAtOffset(fd, &data, bytes_to_read, offset)) {
+ PLOG(ERROR) << "read " << *source_device_;
+ // ReadFullyAtOffset sets errno.
+ return -1;
+ }
+ PartialSink partial_sink(buffer, bytes_to_read, start_offset);
+ if (!cow_->ReadData(*op, &partial_sink)) {
+ LOG(ERROR) << "CompressedSnapshotReader failed to read xor op";
+ errno = EIO;
+ return -1;
+ }
+ for (size_t i = 0; i < bytes_to_read; i++) {
+ ((char*)buffer)[i] ^= data[i];
+ }
} else {
LOG(ERROR) << "CompressedSnapshotReader unknown op type: " << uint32_t(op->type);
errno = EINVAL;
diff --git a/fs_mgr/libsnapshot/snapshot_reader_test.cpp b/fs_mgr/libsnapshot/snapshot_reader_test.cpp
index 9373059..078f16e 100644
--- a/fs_mgr/libsnapshot/snapshot_reader_test.cpp
+++ b/fs_mgr/libsnapshot/snapshot_reader_test.cpp
@@ -63,7 +63,9 @@
void WriteCow(ISnapshotWriter* writer) {
std::string new_block = MakeNewBlockString();
+ std::string xor_block = MakeXorBlockString();
+ ASSERT_TRUE(writer->AddXorBlocks(1, xor_block.data(), xor_block.size(), 0, kBlockSize / 2));
ASSERT_TRUE(writer->AddCopy(3, 0));
ASSERT_TRUE(writer->AddRawBlocks(5, new_block.data(), new_block.size()));
ASSERT_TRUE(writer->AddZeroBlocks(7, 2));
@@ -75,7 +77,7 @@
ASSERT_NE(reader, nullptr);
// Test that unchanged blocks are not modified.
- std::unordered_set<size_t> changed_blocks = {3, 5, 7, 8};
+ std::unordered_set<size_t> changed_blocks = {1, 3, 5, 7, 8};
for (size_t i = 0; i < kBlockCount; i++) {
if (changed_blocks.count(i)) {
continue;
@@ -88,6 +90,17 @@
}
// Test that we can read back our modified blocks.
+ std::string data(kBlockSize, 0);
+ std::string offsetblock = base_blocks_[0].substr(kBlockSize / 2, kBlockSize / 2) +
+ base_blocks_[1].substr(0, kBlockSize / 2);
+ ASSERT_EQ(offsetblock.size(), kBlockSize);
+ ASSERT_EQ(reader->Seek(1 * kBlockSize, SEEK_SET), 1 * kBlockSize);
+ ASSERT_EQ(reader->Read(data.data(), data.size()), kBlockSize);
+ for (int i = 0; i < 100; i++) {
+ data[i] = (char)~(data[i]);
+ }
+ ASSERT_EQ(data, offsetblock);
+
std::string block(kBlockSize, 0);
ASSERT_EQ(reader->Seek(3 * kBlockSize, SEEK_SET), 3 * kBlockSize);
ASSERT_EQ(reader->Read(block.data(), block.size()), kBlockSize);
@@ -141,6 +154,12 @@
return new_block;
}
+ std::string MakeXorBlockString() {
+ std::string data(100, -1);
+ data.resize(kBlockSize, 0);
+ return data;
+ }
+
std::unique_ptr<TemporaryFile> base_;
std::unique_ptr<TemporaryFile> cow_;
std::vector<std::string> base_blocks_;
diff --git a/fs_mgr/libsnapshot/snapshot_writer.cpp b/fs_mgr/libsnapshot/snapshot_writer.cpp
index 34b3e87..3eda08e 100644
--- a/fs_mgr/libsnapshot/snapshot_writer.cpp
+++ b/fs_mgr/libsnapshot/snapshot_writer.cpp
@@ -106,6 +106,11 @@
return cow_->AddRawBlocks(new_block_start, data, size);
}
+bool CompressedSnapshotWriter::EmitXorBlocks(uint32_t new_block_start, const void* data,
+ size_t size, uint32_t old_block, uint16_t offset) {
+ return cow_->AddXorBlocks(new_block_start, data, size, old_block, offset);
+}
+
bool CompressedSnapshotWriter::EmitZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) {
return cow_->AddZeroBlocks(new_block_start, num_blocks);
}
@@ -157,6 +162,11 @@
return true;
}
+bool OnlineKernelSnapshotWriter::EmitXorBlocks(uint32_t, const void*, size_t, uint32_t, uint16_t) {
+ LOG(ERROR) << "EmitXorBlocks not implemented.";
+ return false;
+}
+
bool OnlineKernelSnapshotWriter::EmitZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) {
std::string zeroes(options_.block_size, 0);
for (uint64_t i = 0; i < num_blocks; i++) {
diff --git a/fs_mgr/libsnapshot/snapuserd/cow_snapuserd_test.cpp b/fs_mgr/libsnapshot/snapuserd/cow_snapuserd_test.cpp
index a718328..f4aef44 100644
--- a/fs_mgr/libsnapshot/snapuserd/cow_snapuserd_test.cpp
+++ b/fs_mgr/libsnapshot/snapuserd/cow_snapuserd_test.cpp
@@ -259,7 +259,7 @@
void CowSnapuserdTest::CreateBaseDevice() {
unique_fd rnd_fd;
- total_base_size_ = (size_ * 4);
+ total_base_size_ = (size_ * 5);
base_fd_ = CreateTempFile("base_device", total_base_size_);
ASSERT_GE(base_fd_, 0);
@@ -304,6 +304,11 @@
offset += size_;
ASSERT_EQ(ReadFullyAtOffset(snapshot_fd, snapuserd_buffer.get(), size_, offset), true);
ASSERT_EQ(memcmp(snapuserd_buffer.get(), (char*)orig_buffer_.get() + (size_ * 3), size_), 0);
+
+ // XOR
+ offset += size_;
+ ASSERT_EQ(ReadFullyAtOffset(snapshot_fd, snapuserd_buffer.get(), size_, offset), true);
+ ASSERT_EQ(memcmp(snapuserd_buffer.get(), (char*)orig_buffer_.get() + (size_ * 4), size_), 0);
}
void CowSnapuserdTest::CreateCowDeviceWithCopyOverlap_2() {
@@ -428,9 +433,10 @@
ASSERT_TRUE(writer.Initialize(cow_system_->fd));
size_t num_blocks = size_ / options.block_size;
- size_t blk_end_copy = num_blocks * 2;
+ size_t blk_end_copy = num_blocks * 3;
size_t source_blk = num_blocks - 1;
size_t blk_src_copy = blk_end_copy - 1;
+ uint16_t xor_offset = 5;
size_t x = num_blocks;
while (1) {
@@ -443,6 +449,11 @@
blk_src_copy -= 1;
}
+ for (size_t i = num_blocks; i > 0; i--) {
+ ASSERT_TRUE(writer.AddXorBlocks(num_blocks + i - 1,
+ &random_buffer_1_.get()[options.block_size * (i - 1)],
+ options.block_size, 2 * num_blocks + i - 1, xor_offset));
+ }
// Flush operations
ASSERT_TRUE(writer.Finalize());
// Construct the buffer required for validation
@@ -451,7 +462,11 @@
ASSERT_EQ(android::base::ReadFullyAtOffset(base_fd_, orig_buffer_.get(), total_base_size_, 0),
true);
// Merged Buffer
- memmove(orig_buffer_.get(), (char*)orig_buffer_.get() + size_, size_);
+ memmove(orig_buffer_.get(), (char*)orig_buffer_.get() + 2 * size_, size_);
+ memmove(orig_buffer_.get() + size_, (char*)orig_buffer_.get() + 2 * size_ + xor_offset, size_);
+ for (int i = 0; i < size_; i++) {
+ orig_buffer_.get()[size_ + i] ^= random_buffer_1_.get()[i];
+ }
}
void CowSnapuserdTest::CreateCowDeviceOrderedOps() {
@@ -473,6 +488,7 @@
offset += 1_MiB;
}
+ memset(random_buffer_1_.get(), 0, size_);
CowOptions options;
options.compression = "gz";
@@ -483,7 +499,8 @@
size_t num_blocks = size_ / options.block_size;
size_t x = num_blocks;
size_t source_blk = 0;
- size_t blk_src_copy = num_blocks;
+ size_t blk_src_copy = 2 * num_blocks;
+ uint16_t xor_offset = 5;
while (1) {
ASSERT_TRUE(writer.AddCopy(source_blk, blk_src_copy));
@@ -496,6 +513,8 @@
blk_src_copy += 1;
}
+ ASSERT_TRUE(writer.AddXorBlocks(num_blocks, random_buffer_1_.get(), size_, 2 * num_blocks,
+ xor_offset));
// Flush operations
ASSERT_TRUE(writer.Finalize());
// Construct the buffer required for validation
@@ -504,7 +523,11 @@
ASSERT_EQ(android::base::ReadFullyAtOffset(base_fd_, orig_buffer_.get(), total_base_size_, 0),
true);
// Merged Buffer
- memmove(orig_buffer_.get(), (char*)orig_buffer_.get() + size_, size_);
+ memmove(orig_buffer_.get(), (char*)orig_buffer_.get() + 2 * size_, size_);
+ memmove(orig_buffer_.get() + size_, (char*)orig_buffer_.get() + 2 * size_ + xor_offset, size_);
+ for (int i = 0; i < size_; i++) {
+ orig_buffer_.get()[size_ + i] ^= random_buffer_1_.get()[i];
+ }
}
void CowSnapuserdTest::CreateCowDevice() {
@@ -538,6 +561,17 @@
size_t source_blk = num_blocks - 1;
size_t blk_src_copy = blk_end_copy - 1;
+ uint32_t sequence[num_blocks * 2];
+ // Sequence for Copy ops
+ for (int i = 0; i < num_blocks; i++) {
+ sequence[i] = num_blocks - 1 - i;
+ }
+ // Sequence for Xor ops
+ for (int i = 0; i < num_blocks; i++) {
+ sequence[num_blocks + i] = 5 * num_blocks - 1 - i;
+ }
+ ASSERT_TRUE(writer.AddSequenceData(2 * num_blocks, sequence));
+
size_t x = num_blocks;
while (1) {
ASSERT_TRUE(writer.AddCopy(source_blk, blk_src_copy));
@@ -563,6 +597,11 @@
ASSERT_TRUE(writer.AddRawBlocks(blk_random2_replace_start, random_buffer_1_.get(), size_));
+ size_t blk_xor_start = blk_random2_replace_start + num_blocks;
+ size_t xor_offset = BLOCK_SZ / 2;
+ ASSERT_TRUE(writer.AddXorBlocks(blk_xor_start, random_buffer_1_.get(), size_, num_blocks,
+ xor_offset));
+
// Flush operations
ASSERT_TRUE(writer.Finalize());
// Construct the buffer required for validation
@@ -572,6 +611,13 @@
memcpy((char*)orig_buffer_.get() + size_, random_buffer_1_.get(), size_);
memcpy((char*)orig_buffer_.get() + (size_ * 2), (void*)zero_buffer.c_str(), size_);
memcpy((char*)orig_buffer_.get() + (size_ * 3), random_buffer_1_.get(), size_);
+ ASSERT_EQ(android::base::ReadFullyAtOffset(base_fd_, &orig_buffer_.get()[size_ * 4], size_,
+ size_ + xor_offset),
+ true);
+ for (int i = 0; i < size_; i++) {
+ orig_buffer_.get()[(size_ * 4) + i] =
+ (uint8_t)(orig_buffer_.get()[(size_ * 4) + i] ^ random_buffer_1_.get()[i]);
+ }
}
void CowSnapuserdTest::InitCowDevice() {
@@ -1039,6 +1085,35 @@
}
}
+TEST(Snapuserd_Test, xor_buffer) {
+ std::string data = "Test String";
+ std::string jumbled = {0x0C, 0x2A, 0x21, 0x54, 0x73, 0x27, 0x06, 0x1B, 0x07, 0x09, 0x46};
+ std::string result = "XOR String!";
+
+ BufferSink sink;
+ XorSink xor_sink;
+ sink.Initialize(sizeof(struct dm_user_header) + 10);
+ int buffsize = 5;
+ xor_sink.Initialize(&sink, buffsize);
+
+ void* buff = sink.GetPayloadBuffer(data.length());
+ memcpy(buff, data.data(), data.length());
+
+ size_t actual;
+ size_t count = 0;
+ while (count < data.length()) {
+ void* xor_buff = xor_sink.GetBuffer(10, &actual);
+ ASSERT_EQ(actual, buffsize);
+ ASSERT_NE(xor_buff, nullptr);
+ memcpy(xor_buff, jumbled.data() + count, buffsize);
+ xor_sink.ReturnData(xor_buff, actual);
+ count += actual;
+ }
+
+ std::string answer = reinterpret_cast<char*>(sink.GetPayloadBufPtr());
+ ASSERT_EQ(answer, result);
+}
+
TEST(Snapuserd_Test, Snapshot_Metadata) {
CowSnapuserdMetadataTest harness;
harness.Setup();
diff --git a/fs_mgr/libsnapshot/snapuserd/snapuserd.cpp b/fs_mgr/libsnapshot/snapuserd/snapuserd.cpp
index 31d0221..0aacab2 100644
--- a/fs_mgr/libsnapshot/snapuserd/snapuserd.cpp
+++ b/fs_mgr/libsnapshot/snapuserd/snapuserd.cpp
@@ -53,6 +53,10 @@
return true;
}
+std::unique_ptr<CowReader> Snapuserd::CloneReaderForWorker() {
+ return reader_->CloneCowReader();
+}
+
bool Snapuserd::CommitMerge(int num_merge_ops) {
struct CowHeader* ch = reinterpret_cast<struct CowHeader*>(mapped_addr_);
ch->num_merge_ops += num_merge_ops;
@@ -334,7 +338,7 @@
CowHeader header;
CowOptions options;
bool metadata_found = false;
- int replace_ops = 0, zero_ops = 0, copy_ops = 0;
+ int replace_ops = 0, zero_ops = 0, copy_ops = 0, xor_ops = 0;
SNAP_LOG(DEBUG) << "ReadMetadata: Parsing cow file";
@@ -439,12 +443,12 @@
std::vector<const CowOperation*> vec;
std::set<uint64_t> dest_blocks;
std::set<uint64_t> source_blocks;
- size_t pending_copy_ops = exceptions_per_area_ - num_ops;
- uint64_t total_copy_ops = reader_->get_num_ordered_ops_to_merge();
+ size_t pending_ordered_ops = exceptions_per_area_ - num_ops;
+ uint64_t total_ordered_ops = reader_->get_num_ordered_ops_to_merge();
SNAP_LOG(DEBUG) << " Processing copy-ops at Area: " << vec_.size()
<< " Number of replace/zero ops completed in this area: " << num_ops
- << " Pending copy ops for this area: " << pending_copy_ops;
+ << " Pending copy ops for this area: " << pending_ordered_ops;
while (!cowop_rm_iter->Done()) {
do {
@@ -497,24 +501,34 @@
// the merge of operations are done based on the ops present
// in the file.
//===========================================================
+ uint64_t block_source = cow_op->source;
+ uint64_t block_offset = 0;
+ if (cow_op->type == kCowXorOp) {
+ block_source /= BLOCK_SZ;
+ block_offset = cow_op->source % BLOCK_SZ;
+ }
if (prev_id.has_value()) {
- if (dest_blocks.count(cow_op->new_block) || source_blocks.count(cow_op->source)) {
+ if (dest_blocks.count(cow_op->new_block) || source_blocks.count(block_source) ||
+ (block_offset > 0 && source_blocks.count(block_source + 1))) {
break;
}
}
metadata_found = true;
- pending_copy_ops -= 1;
+ pending_ordered_ops -= 1;
vec.push_back(cow_op);
- dest_blocks.insert(cow_op->source);
+ dest_blocks.insert(block_source);
+ if (block_offset > 0) {
+ dest_blocks.insert(block_source + 1);
+ }
source_blocks.insert(cow_op->new_block);
prev_id = cow_op->new_block;
cowop_rm_iter->Next();
- } while (!cowop_rm_iter->Done() && pending_copy_ops);
+ } while (!cowop_rm_iter->Done() && pending_ordered_ops);
data_chunk_id = GetNextAllocatableChunkId(data_chunk_id);
- SNAP_LOG(DEBUG) << "Batch Merge copy-ops of size: " << vec.size()
+ SNAP_LOG(DEBUG) << "Batch Merge copy-ops/xor-ops of size: " << vec.size()
<< " Area: " << vec_.size() << " Area offset: " << offset
- << " Pending-copy-ops in this area: " << pending_copy_ops;
+ << " Pending-ordered-ops in this area: " << pending_ordered_ops;
for (size_t i = 0; i < vec.size(); i++) {
struct disk_exception* de =
@@ -528,13 +542,18 @@
chunk_vec_.push_back(std::make_pair(ChunkToSector(data_chunk_id), cow_op));
offset += sizeof(struct disk_exception);
num_ops += 1;
- copy_ops++;
+ if (cow_op->type == kCowCopyOp) {
+ copy_ops++;
+ } else { // it->second->type == kCowXorOp
+ xor_ops++;
+ }
+
if (read_ahead_feature_) {
read_ahead_ops_.push_back(cow_op);
}
SNAP_LOG(DEBUG) << num_ops << ":"
- << " Copy-op: "
+ << " Ordered-op: "
<< " Old-chunk: " << de->old_chunk << " New-chunk: " << de->new_chunk;
if (num_ops == exceptions_per_area_) {
@@ -554,22 +573,22 @@
SNAP_LOG(DEBUG) << "ReadMetadata() completed; Number of Areas: " << vec_.size();
}
- if (!(pending_copy_ops == 0)) {
- SNAP_LOG(ERROR)
- << "Invalid pending_copy_ops: expected: 0 found: " << pending_copy_ops;
+ if (!(pending_ordered_ops == 0)) {
+ SNAP_LOG(ERROR) << "Invalid pending_ordered_ops: expected: 0 found: "
+ << pending_ordered_ops;
return false;
}
- pending_copy_ops = exceptions_per_area_;
+ pending_ordered_ops = exceptions_per_area_;
}
data_chunk_id = GetNextAllocatableChunkId(data_chunk_id);
- total_copy_ops -= 1;
+ total_ordered_ops -= 1;
/*
* Split the number of ops based on the size of read-ahead buffer
* region. We need to ensure that kernel doesn't issue IO on blocks
* which are not read by the read-ahead thread.
*/
- if (read_ahead_feature_ && (total_copy_ops % num_ra_ops_per_iter == 0)) {
+ if (read_ahead_feature_ && (total_ordered_ops % num_ra_ops_per_iter == 0)) {
data_chunk_id = GetNextAllocatableChunkId(data_chunk_id);
}
}
@@ -598,8 +617,8 @@
SNAP_LOG(INFO) << "ReadMetadata completed. Final-chunk-id: " << data_chunk_id
<< " Num Sector: " << ChunkToSector(data_chunk_id)
<< " Replace-ops: " << replace_ops << " Zero-ops: " << zero_ops
- << " Copy-ops: " << copy_ops << " Areas: " << vec_.size()
- << " Num-ops-merged: " << header.num_merge_ops
+ << " Copy-ops: " << copy_ops << " Xor-ops: " << xor_ops
+ << " Areas: " << vec_.size() << " Num-ops-merged: " << header.num_merge_ops
<< " Total-data-ops: " << reader_->get_num_total_data_ops();
// Total number of sectors required for creating dm-user device
diff --git a/fs_mgr/libsnapshot/snapuserd/snapuserd.h b/fs_mgr/libsnapshot/snapuserd/snapuserd.h
index 95d2f77..b223e89 100644
--- a/fs_mgr/libsnapshot/snapuserd/snapuserd.h
+++ b/fs_mgr/libsnapshot/snapuserd/snapuserd.h
@@ -107,6 +107,20 @@
size_t buffer_size_;
};
+class XorSink : public IByteSink {
+ public:
+ void Initialize(BufferSink* sink, size_t size);
+ void Reset();
+ void* GetBuffer(size_t requested, size_t* actual) override;
+ bool ReturnData(void* buffer, size_t len) override;
+
+ private:
+ BufferSink* bufsink_;
+ std::unique_ptr<uint8_t[]> buffer_;
+ size_t buffer_size_;
+ size_t returned_;
+};
+
class Snapuserd;
class ReadAheadThread {
@@ -116,10 +130,10 @@
bool RunThread();
private:
- void InitializeIter();
- bool IterDone();
- void IterNext();
- const CowOperation* GetIterOp();
+ void InitializeRAIter();
+ bool RAIterDone();
+ void RAIterNext();
+ const CowOperation* GetRAOpIter();
void InitializeBuffer();
bool InitializeFds();
@@ -129,7 +143,7 @@
}
bool ReadAheadIOStart();
- void PrepareReadAhead(uint64_t* source_block, int* pending_ops, std::vector<uint64_t>& blocks);
+ void PrepareReadAhead(uint64_t* source_offset, int* pending_ops, std::vector<uint64_t>& blocks);
bool ReconstructDataFromCow();
void CheckOverlap(const CowOperation* cow_op);
@@ -187,7 +201,9 @@
// Processing COW operations
bool ProcessCowOp(const CowOperation* cow_op);
bool ProcessReplaceOp(const CowOperation* cow_op);
+ // Handles Copy and Xor
bool ProcessCopyOp(const CowOperation* cow_op);
+ bool ProcessXorOp(const CowOperation* cow_op);
bool ProcessZeroOp();
bool ReadFromBaseDevice(const CowOperation* cow_op);
@@ -206,6 +222,7 @@
std::unique_ptr<CowReader> reader_;
BufferSink bufsink_;
+ XorSink xorsink_;
std::string cow_device_;
std::string backing_store_device_;
@@ -244,6 +261,7 @@
void* GetExceptionBuffer(size_t i) { return vec_[i].get(); }
bool InitializeWorkers();
+ std::unique_ptr<CowReader> CloneReaderForWorker();
std::shared_ptr<Snapuserd> GetSharedPtr() { return shared_from_this(); }
std::vector<std::pair<sector_t, const CowOperation*>>& GetChunkVec() { return chunk_vec_; }
diff --git a/fs_mgr/libsnapshot/snapuserd/snapuserd_readahead.cpp b/fs_mgr/libsnapshot/snapuserd/snapuserd_readahead.cpp
index 6fc26a6..b868eed 100644
--- a/fs_mgr/libsnapshot/snapuserd/snapuserd_readahead.cpp
+++ b/fs_mgr/libsnapshot/snapuserd/snapuserd_readahead.cpp
@@ -172,24 +172,37 @@
}
void ReadAheadThread::CheckOverlap(const CowOperation* cow_op) {
- if (dest_blocks_.count(cow_op->new_block) || source_blocks_.count(cow_op->source)) {
+ uint64_t source_block = cow_op->source;
+ uint64_t source_offset = 0;
+ if (cow_op->type == kCowXorOp) {
+ source_block /= BLOCK_SZ;
+ source_offset = cow_op->source % BLOCK_SZ;
+ }
+ if (dest_blocks_.count(cow_op->new_block) || source_blocks_.count(source_block) ||
+ (source_offset > 0 && source_blocks_.count(source_block + 1))) {
overlap_ = true;
}
- dest_blocks_.insert(cow_op->source);
+ dest_blocks_.insert(source_block);
+ if (source_offset > 0) {
+ dest_blocks_.insert(source_block + 1);
+ }
source_blocks_.insert(cow_op->new_block);
}
-void ReadAheadThread::PrepareReadAhead(uint64_t* source_block, int* pending_ops,
+void ReadAheadThread::PrepareReadAhead(uint64_t* source_offset, int* pending_ops,
std::vector<uint64_t>& blocks) {
int num_ops = *pending_ops;
int nr_consecutive = 0;
- if (!IterDone() && num_ops) {
- // Get the first block
- const CowOperation* cow_op = GetIterOp();
- *source_block = cow_op->source;
- IterNext();
+ if (!RAIterDone() && num_ops) {
+ // Get the first block with offset
+ const CowOperation* cow_op = GetRAOpIter();
+ *source_offset = cow_op->source;
+ if (cow_op->type == kCowCopyOp) {
+ *source_offset *= BLOCK_SZ;
+ }
+ RAIterNext();
num_ops -= 1;
nr_consecutive = 1;
blocks.push_back(cow_op->new_block);
@@ -201,15 +214,19 @@
/*
* Find number of consecutive blocks working backwards.
*/
- while (!IterDone() && num_ops) {
- const CowOperation* op = GetIterOp();
- if (op->source != (*source_block - nr_consecutive)) {
+ while (!RAIterDone() && num_ops) {
+ const CowOperation* op = GetRAOpIter();
+ uint64_t next_offset = op->source;
+ if (cow_op->type == kCowCopyOp) {
+ next_offset *= BLOCK_SZ;
+ }
+ if (next_offset != (*source_offset - nr_consecutive * BLOCK_SZ)) {
break;
}
nr_consecutive += 1;
num_ops -= 1;
blocks.push_back(op->new_block);
- IterNext();
+ RAIterNext();
if (!overlap_) {
CheckOverlap(op);
@@ -247,12 +264,12 @@
// We are done re-constructing the mapping; however, we need to make sure
// all the COW operations to-be merged are present in the re-constructed
// mapping.
- while (!IterDone()) {
- const CowOperation* op = GetIterOp();
+ while (!RAIterDone()) {
+ const CowOperation* op = GetRAOpIter();
if (read_ahead_buffer_map.find(op->new_block) != read_ahead_buffer_map.end()) {
num_ops -= 1;
snapuserd_->SetFinalBlockMerged(op->new_block);
- IterNext();
+ RAIterNext();
} else {
// Verify that we have covered all the ops which were re-constructed
// from COW device - These are the ops which are being
@@ -312,10 +329,10 @@
source_blocks_.clear();
while (true) {
- uint64_t source_block;
+ uint64_t source_offset;
int linear_blocks;
- PrepareReadAhead(&source_block, &num_ops, blocks);
+ PrepareReadAhead(&source_offset, &num_ops, blocks);
linear_blocks = blocks.size();
if (linear_blocks == 0) {
// No more blocks to read
@@ -324,7 +341,7 @@
}
// Get the first block in the consecutive set of blocks
- source_block = source_block + 1 - linear_blocks;
+ source_offset = source_offset - (linear_blocks - 1) * BLOCK_SZ;
size_t io_size = (linear_blocks * BLOCK_SZ);
num_ops -= linear_blocks;
total_blocks_merged += linear_blocks;
@@ -358,10 +375,12 @@
// Read from the base device consecutive set of blocks in one shot
if (!android::base::ReadFullyAtOffset(backing_store_fd_,
(char*)read_ahead_buffer_ + buffer_offset, io_size,
- source_block * BLOCK_SZ)) {
- SNAP_PLOG(ERROR) << "Copy-op failed. Read from backing store: " << backing_store_device_
- << "at block :" << source_block << " buffer_offset : " << buffer_offset
- << " io_size : " << io_size << " buf-addr : " << read_ahead_buffer_;
+ source_offset)) {
+ SNAP_PLOG(ERROR) << "Ordered-op failed. Read from backing store: "
+ << backing_store_device_ << "at block :" << source_offset / BLOCK_SZ
+ << " offset :" << source_offset % BLOCK_SZ
+ << " buffer_offset : " << buffer_offset << " io_size : " << io_size
+ << " buf-addr : " << read_ahead_buffer_;
snapuserd_->ReadAheadIOFailed();
return false;
@@ -394,10 +413,10 @@
return false;
}
- InitializeIter();
+ InitializeRAIter();
InitializeBuffer();
- while (!IterDone()) {
+ while (!RAIterDone()) {
if (!ReadAheadIOStart()) {
return false;
}
@@ -433,21 +452,21 @@
return true;
}
-void ReadAheadThread::InitializeIter() {
+void ReadAheadThread::InitializeRAIter() {
std::vector<const CowOperation*>& read_ahead_ops = snapuserd_->GetReadAheadOpsVec();
read_ahead_iter_ = read_ahead_ops.rbegin();
}
-bool ReadAheadThread::IterDone() {
+bool ReadAheadThread::RAIterDone() {
std::vector<const CowOperation*>& read_ahead_ops = snapuserd_->GetReadAheadOpsVec();
return read_ahead_iter_ == read_ahead_ops.rend();
}
-void ReadAheadThread::IterNext() {
+void ReadAheadThread::RAIterNext() {
read_ahead_iter_++;
}
-const CowOperation* ReadAheadThread::GetIterOp() {
+const CowOperation* ReadAheadThread::GetRAOpIter() {
return *read_ahead_iter_;
}
diff --git a/fs_mgr/libsnapshot/snapuserd/snapuserd_worker.cpp b/fs_mgr/libsnapshot/snapuserd/snapuserd_worker.cpp
index 13d45fe..cdf9fe7 100644
--- a/fs_mgr/libsnapshot/snapuserd/snapuserd_worker.cpp
+++ b/fs_mgr/libsnapshot/snapuserd/snapuserd_worker.cpp
@@ -71,6 +71,39 @@
return msg->payload.buf;
}
+void XorSink::Initialize(BufferSink* sink, size_t size) {
+ bufsink_ = sink;
+ buffer_size_ = size;
+ returned_ = 0;
+ buffer_ = std::make_unique<uint8_t[]>(size);
+}
+
+void XorSink::Reset() {
+ returned_ = 0;
+}
+
+void* XorSink::GetBuffer(size_t requested, size_t* actual) {
+ if (requested > buffer_size_) {
+ *actual = buffer_size_;
+ } else {
+ *actual = requested;
+ }
+ return buffer_.get();
+}
+
+bool XorSink::ReturnData(void* buffer, size_t len) {
+ uint8_t* xor_data = reinterpret_cast<uint8_t*>(buffer);
+ uint8_t* buff = reinterpret_cast<uint8_t*>(bufsink_->GetPayloadBuffer(len + returned_));
+ if (buff == nullptr) {
+ return false;
+ }
+ for (size_t i = 0; i < len; i++) {
+ buff[returned_ + i] ^= xor_data[i];
+ }
+ returned_ += len;
+ return true;
+}
+
WorkerThread::WorkerThread(const std::string& cow_device, const std::string& backing_device,
const std::string& control_device, const std::string& misc_name,
std::shared_ptr<Snapuserd> snapuserd) {
@@ -105,11 +138,11 @@
}
bool WorkerThread::InitReader() {
- reader_ = std::make_unique<CowReader>();
+ reader_ = snapuserd_->CloneReaderForWorker();
+
if (!reader_->InitForMerge(std::move(cow_fd_))) {
return false;
}
-
return true;
}
@@ -150,10 +183,19 @@
}
SNAP_LOG(DEBUG) << " ReadFromBaseDevice...: new-block: " << cow_op->new_block
<< " Source: " << cow_op->source;
- if (!android::base::ReadFullyAtOffset(backing_store_fd_, buffer, BLOCK_SZ,
- cow_op->source * BLOCK_SZ)) {
- SNAP_PLOG(ERROR) << "Copy-op failed. Read from backing store: " << backing_store_device_
- << "at block :" << cow_op->source;
+ uint64_t offset = cow_op->source;
+ if (cow_op->type == kCowCopyOp) {
+ offset *= BLOCK_SZ;
+ }
+ if (!android::base::ReadFullyAtOffset(backing_store_fd_, buffer, BLOCK_SZ, offset)) {
+ std::string op;
+ if (cow_op->type == kCowCopyOp)
+ op = "Copy-op";
+ else {
+ op = "Xor-op";
+ }
+ SNAP_PLOG(ERROR) << op << " failed. Read from backing store: " << backing_store_device_
+ << "at block :" << offset / BLOCK_SZ << " offset:" << offset % BLOCK_SZ;
return false;
}
@@ -188,6 +230,23 @@
return true;
}
+bool WorkerThread::ProcessXorOp(const CowOperation* cow_op) {
+ if (!GetReadAheadPopulatedBuffer(cow_op)) {
+ SNAP_LOG(DEBUG) << " GetReadAheadPopulatedBuffer failed..."
+ << " new_block: " << cow_op->new_block;
+ if (!ReadFromBaseDevice(cow_op)) {
+ return false;
+ }
+ }
+ xorsink_.Reset();
+ if (!reader_->ReadData(*cow_op, &xorsink_)) {
+ SNAP_LOG(ERROR) << "ProcessXorOp failed for block " << cow_op->new_block;
+ return false;
+ }
+
+ return true;
+}
+
bool WorkerThread::ProcessZeroOp() {
// Zero out the entire block
void* buffer = bufsink_.GetPayloadBuffer(BLOCK_SZ);
@@ -219,6 +278,10 @@
return ProcessCopyOp(cow_op);
}
+ case kCowXorOp: {
+ return ProcessXorOp(cow_op);
+ }
+
default: {
SNAP_LOG(ERROR) << "Unknown operation-type found: " << cow_op->type;
}
@@ -470,10 +533,10 @@
}
int WorkerThread::GetNumberOfMergedOps(void* merged_buffer, void* unmerged_buffer, loff_t offset,
- int unmerged_exceptions, bool* copy_op, bool* commit) {
+ int unmerged_exceptions, bool* ordered_op, bool* commit) {
int merged_ops_cur_iter = 0;
std::unordered_map<uint64_t, void*>& read_ahead_buffer_map = snapuserd_->GetReadAheadMap();
- *copy_op = false;
+ *ordered_op = false;
std::vector<std::pair<sector_t, const CowOperation*>>& chunk_vec = snapuserd_->GetChunkVec();
// Find the operations which are merged in this cycle.
@@ -511,9 +574,9 @@
}
const CowOperation* cow_op = it->second;
- if (snapuserd_->IsReadAheadFeaturePresent() && cow_op->type == kCowCopyOp) {
- *copy_op = true;
- // Every single copy operation has to come from read-ahead
+ if (snapuserd_->IsReadAheadFeaturePresent() && IsOrderedOp(*cow_op)) {
+ *ordered_op = true;
+ // Every single ordered operation has to come from read-ahead
// cache.
if (read_ahead_buffer_map.find(cow_op->new_block) == read_ahead_buffer_map.end()) {
SNAP_LOG(ERROR)
@@ -557,7 +620,7 @@
bool WorkerThread::ProcessMergeComplete(chunk_t chunk, void* buffer) {
uint32_t stride = exceptions_per_area_ + 1;
const std::vector<std::unique_ptr<uint8_t[]>>& vec = snapuserd_->GetMetadataVec();
- bool copy_op = false;
+ bool ordered_op = false;
bool commit = false;
// ChunkID to vector index
@@ -582,7 +645,7 @@
}
int merged_ops_cur_iter = GetNumberOfMergedOps(buffer, vec[divresult.quot].get(), offset,
- unmerged_exceptions, ©_op, &commit);
+ unmerged_exceptions, &ordered_op, &commit);
// There should be at least one operation merged in this cycle
if (!(merged_ops_cur_iter > 0)) {
@@ -590,7 +653,7 @@
return false;
}
- if (copy_op) {
+ if (ordered_op) {
if (commit) {
// Push the flushing logic to read-ahead thread so that merge thread
// can make forward progress. Sync will happen in the background
@@ -819,6 +882,7 @@
bool WorkerThread::RunThread() {
InitializeBufsink();
+ xorsink_.Initialize(&bufsink_, BLOCK_SZ);
if (!InitializeFds()) {
return false;