Move partition writer to a separate file
Test: treehugger && serve an OTA update
Change-Id: I803692110841ce6d2207555ac7a682e9f989363d
diff --git a/Android.bp b/Android.bp
index acd3633..8e9ec17 100644
--- a/Android.bp
+++ b/Android.bp
@@ -178,6 +178,7 @@
"payload_consumer/payload_constants.cc",
"payload_consumer/payload_metadata.cc",
"payload_consumer/payload_verifier.cc",
+ "payload_consumer/partition_writer.cc",
"payload_consumer/postinstall_runner_action.cc",
"payload_consumer/verity_writer_android.cc",
"payload_consumer/xz_extent_writer.cc",
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index b49139e..87fc4cf 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -54,6 +54,7 @@
#include "update_engine/payload_consumer/extent_reader.h"
#include "update_engine/payload_consumer/extent_writer.h"
#include "update_engine/payload_consumer/partition_update_generator_interface.h"
+#include "update_engine/payload_consumer/partition_writer.h"
#if USE_FEC
#include "update_engine/payload_consumer/fec_file_descriptor.h"
#endif // USE_FEC
@@ -79,65 +80,6 @@
const int kUpdateStateOperationInvalid = -1;
const int kMaxResumedUpdateFailures = 10;
-const uint64_t kCacheSize = 1024 * 1024; // 1MB
-
-// Opens path for read/write. On success returns an open FileDescriptor
-// and sets *err to 0. On failure, sets *err to errno and returns nullptr.
-FileDescriptorPtr OpenFile(const char* path,
- int mode,
- bool cache_writes,
- int* err) {
- // Try to mark the block device read-only based on the mode. Ignore any
- // failure since this won't work when passing regular files.
- bool read_only = (mode & O_ACCMODE) == O_RDONLY;
- utils::SetBlockDeviceReadOnly(path, read_only);
-
- FileDescriptorPtr fd(new EintrSafeFileDescriptor());
- if (cache_writes && !read_only) {
- fd = FileDescriptorPtr(new CachedFileDescriptor(fd, kCacheSize));
- LOG(INFO) << "Caching writes.";
- }
- if (!fd->Open(path, mode, 000)) {
- *err = errno;
- PLOG(ERROR) << "Unable to open file " << path;
- return nullptr;
- }
- *err = 0;
- return fd;
-}
-
-// Discard the tail of the block device referenced by |fd|, from the offset
-// |data_size| until the end of the block device. Returns whether the data was
-// discarded.
-bool DiscardPartitionTail(const FileDescriptorPtr& fd, uint64_t data_size) {
- uint64_t part_size = fd->BlockDevSize();
- if (!part_size || part_size <= data_size)
- return false;
-
- struct blkioctl_request {
- int number;
- const char* name;
- };
- const vector<blkioctl_request> blkioctl_requests = {
- {BLKDISCARD, "BLKDISCARD"},
- {BLKSECDISCARD, "BLKSECDISCARD"},
-#ifdef BLKZEROOUT
- {BLKZEROOUT, "BLKZEROOUT"},
-#endif
- };
- for (const auto& req : blkioctl_requests) {
- int error = 0;
- if (fd->BlkIoctl(req.number, data_size, part_size - data_size, &error) &&
- error == 0) {
- return true;
- }
- LOG(WARNING) << "Error discarding the last "
- << (part_size - data_size) / 1024 << " KiB using ioctl("
- << req.name << ")";
- }
- return false;
-}
-
} // namespace
// Computes the ratio of |part| and |total|, scaled to |norm|, using integer
@@ -290,37 +232,6 @@
return err;
}
-int PartitionWriter::Close() {
- int err = 0;
- if (source_fd_ && !source_fd_->Close()) {
- err = errno;
- PLOG(ERROR) << "Error closing source partition";
- if (!err)
- err = 1;
- }
- source_fd_.reset();
- source_path_.clear();
-
- if (target_fd_ && !target_fd_->Close()) {
- err = errno;
- PLOG(ERROR) << "Error closing target partition";
- if (!err)
- err = 1;
- }
- target_fd_.reset();
- target_path_.clear();
-
- if (source_ecc_fd_ && !source_ecc_fd_->Close()) {
- err = errno;
- PLOG(ERROR) << "Error closing ECC source partition";
- if (!err)
- err = 1;
- }
- source_ecc_fd_.reset();
- source_ecc_open_failure_ = false;
- return -err;
-}
-
bool DeltaPerformer::OpenCurrentPartition() {
if (current_partition_ >= partitions_.size())
return false;
@@ -344,86 +255,6 @@
return partition_writer_->Init(install_plan_, source_may_exist);
}
-bool PartitionWriter::Init(const InstallPlan* install_plan,
- bool source_may_exist) {
- const PartitionUpdate& partition = partition_update_;
- uint32_t source_slot = install_plan->source_slot;
- uint32_t target_slot = install_plan->target_slot;
-
- // We shouldn't open the source partition in certain cases, e.g. some dynamic
- // partitions in delta payload, partitions included in the full payload for
- // partial updates. Use the source size as the indicator.
- if (source_may_exist && install_part_.source_size > 0) {
- source_path_ = install_part_.source_path;
- int err;
- source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, false, &err);
- if (!source_fd_) {
- LOG(ERROR) << "Unable to open source partition "
- << partition.partition_name() << " on slot "
- << BootControlInterface::SlotName(source_slot) << ", file "
- << source_path_;
- return false;
- }
- }
-
- target_path_ = install_part_.target_path;
- int err;
-
- int flags = O_RDWR;
- if (!interactive_)
- flags |= O_DSYNC;
-
- LOG(INFO) << "Opening " << target_path_ << " partition with"
- << (interactive_ ? "out" : "") << " O_DSYNC";
-
- target_fd_ = OpenFile(target_path_.c_str(), flags, true, &err);
- if (!target_fd_) {
- LOG(ERROR) << "Unable to open target partition "
- << partition.partition_name() << " on slot "
- << BootControlInterface::SlotName(target_slot) << ", file "
- << target_path_;
- return false;
- }
-
- LOG(INFO) << "Applying " << partition.operations().size()
- << " operations to partition \"" << partition.partition_name()
- << "\"";
-
- // Discard the end of the partition, but ignore failures.
- DiscardPartitionTail(target_fd_, install_part_.target_size);
-
- return true;
-}
-
-bool PartitionWriter::OpenCurrentECCPartition() {
- // No support for ECC for full payloads.
- // Full Paylods should not have any operation that requires ECCPartition.
- if (source_ecc_fd_)
- return true;
-
- if (source_ecc_open_failure_)
- return false;
-
-#if USE_FEC
- const PartitionUpdate& partition = partition_update_;
- const InstallPlan::Partition& install_part = install_part_;
- std::string path = install_part.source_path;
- FileDescriptorPtr fd(new FecFileDescriptor());
- if (!fd->Open(path.c_str(), O_RDONLY, 0)) {
- PLOG(ERROR) << "Unable to open ECC source partition "
- << partition.partition_name() << ", file " << path;
- source_ecc_open_failure_ = true;
- return false;
- }
- source_ecc_fd_ = fd;
-#else
- // No support for ECC compiled.
- source_ecc_open_failure_ = true;
-#endif // USE_FEC
-
- return !source_ecc_open_failure_;
-}
-
namespace {
void LogPartitionInfoHash(const PartitionInfo& info, const string& tag) {
@@ -1024,25 +855,6 @@
return true;
}
-bool PartitionWriter::PerformReplaceOperation(const InstallOperation& operation,
- const void* data,
- size_t count) {
- // Setup the ExtentWriter stack based on the operation type.
- std::unique_ptr<ExtentWriter> writer = std::make_unique<DirectExtentWriter>();
-
- if (operation.type() == InstallOperation::REPLACE_BZ) {
- writer.reset(new BzipExtentWriter(std::move(writer)));
- } else if (operation.type() == InstallOperation::REPLACE_XZ) {
- writer.reset(new XzExtentWriter(std::move(writer)));
- }
-
- TEST_AND_RETURN_FALSE(
- writer->Init(target_fd_, operation.dst_extents(), block_size_));
- TEST_AND_RETURN_FALSE(writer->Write(data, operation.data_length()));
-
- return target_fd_->Flush();
-}
-
bool DeltaPerformer::PerformZeroOrDiscardOperation(
const InstallOperation& operation) {
CHECK(operation.type() == InstallOperation::DISCARD ||
@@ -1051,42 +863,10 @@
// These operations have no blob.
TEST_AND_RETURN_FALSE(!operation.has_data_offset());
TEST_AND_RETURN_FALSE(!operation.has_data_length());
+
return partition_writer_->PerformZeroOrDiscardOperation(operation);
}
-bool PartitionWriter::PerformZeroOrDiscardOperation(
- const InstallOperation& operation) {
-#ifdef BLKZEROOUT
- bool attempt_ioctl = true;
- int request =
- (operation.type() == InstallOperation::ZERO ? BLKZEROOUT : BLKDISCARD);
-#else // !defined(BLKZEROOUT)
- bool attempt_ioctl = false;
- int request = 0;
-#endif // !defined(BLKZEROOUT)
-
- brillo::Blob zeros;
- for (const Extent& extent : operation.dst_extents()) {
- const uint64_t start = extent.start_block() * block_size_;
- const uint64_t length = extent.num_blocks() * block_size_;
- if (attempt_ioctl) {
- int result = 0;
- if (target_fd_->BlkIoctl(request, start, length, &result) && result == 0)
- continue;
- attempt_ioctl = false;
- }
- // In case of failure, we fall back to writing 0 to the selected region.
- zeros.resize(16 * block_size_);
- for (uint64_t offset = 0; offset < length; offset += zeros.size()) {
- uint64_t chunk_length =
- min(length - offset, static_cast<uint64_t>(zeros.size()));
- TEST_AND_RETURN_FALSE(utils::PWriteAll(
- target_fd_, zeros.data(), chunk_length, start + offset));
- }
- }
- return target_fd_->Flush();
-}
-
bool PartitionWriter::ValidateSourceHash(const brillo::Blob& calculated_hash,
const InstallOperation& operation,
const FileDescriptorPtr source_fd,
@@ -1134,172 +914,6 @@
return partition_writer_->PerformSourceCopyOperation(operation, error);
}
-bool PartitionWriter::PerformSourceCopyOperation(
- const InstallOperation& operation, ErrorCode* error) {
- TEST_AND_RETURN_FALSE(source_fd_ != nullptr);
-
- // The device may optimize the SOURCE_COPY operation.
- // Being this a device-specific optimization let DynamicPartitionController
- // decide it the operation should be skipped.
- const PartitionUpdate& partition = partition_update_;
- const auto& partition_control = dynamic_control_;
-
- InstallOperation buf;
- bool should_optimize = partition_control->OptimizeOperation(
- partition.partition_name(), operation, &buf);
- const InstallOperation& optimized = should_optimize ? buf : operation;
-
- if (operation.has_src_sha256_hash()) {
- bool read_ok;
- brillo::Blob source_hash;
- brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
- operation.src_sha256_hash().end());
-
- // We fall back to use the error corrected device if the hash of the raw
- // device doesn't match or there was an error reading the source partition.
- // Note that this code will also fall back if writing the target partition
- // fails.
- if (should_optimize) {
- // Hash operation.src_extents(), then copy optimized.src_extents to
- // optimized.dst_extents.
- read_ok =
- fd_utils::ReadAndHashExtents(
- source_fd_, operation.src_extents(), block_size_, &source_hash) &&
- fd_utils::CopyAndHashExtents(source_fd_,
- optimized.src_extents(),
- target_fd_,
- optimized.dst_extents(),
- block_size_,
- nullptr /* skip hashing */);
- } else {
- read_ok = fd_utils::CopyAndHashExtents(source_fd_,
- operation.src_extents(),
- target_fd_,
- operation.dst_extents(),
- block_size_,
- &source_hash);
- }
- if (read_ok && expected_source_hash == source_hash)
- return true;
- LOG(WARNING) << "Source hash from RAW device mismatched, attempting to "
- "correct using ECC";
- if (!OpenCurrentECCPartition()) {
- // The following function call will return false since the source hash
- // mismatches, but we still want to call it so it prints the appropriate
- // log message.
- return ValidateSourceHash(source_hash, operation, source_fd_, error);
- }
-
- LOG(WARNING) << "Source hash from RAW device mismatched: found "
- << base::HexEncode(source_hash.data(), source_hash.size())
- << ", expected "
- << base::HexEncode(expected_source_hash.data(),
- expected_source_hash.size());
- if (should_optimize) {
- TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
- source_ecc_fd_, operation.src_extents(), block_size_, &source_hash));
- TEST_AND_RETURN_FALSE(
- fd_utils::CopyAndHashExtents(source_ecc_fd_,
- optimized.src_extents(),
- target_fd_,
- optimized.dst_extents(),
- block_size_,
- nullptr /* skip hashing */));
- } else {
- TEST_AND_RETURN_FALSE(
- fd_utils::CopyAndHashExtents(source_ecc_fd_,
- operation.src_extents(),
- target_fd_,
- operation.dst_extents(),
- block_size_,
- &source_hash));
- }
- TEST_AND_RETURN_FALSE(
- ValidateSourceHash(source_hash, operation, source_ecc_fd_, error));
- // At this point reading from the error corrected device worked, but
- // reading from the raw device failed, so this is considered a recovered
- // failure.
- source_ecc_recovered_failures_++;
- } else {
- // When the operation doesn't include a source hash, we attempt the error
- // corrected device first since we can't verify the block in the raw device
- // at this point, but we fall back to the raw device since the error
- // corrected device can be shorter or not available.
-
- if (OpenCurrentECCPartition() &&
- fd_utils::CopyAndHashExtents(source_ecc_fd_,
- optimized.src_extents(),
- target_fd_,
- optimized.dst_extents(),
- block_size_,
- nullptr)) {
- return true;
- }
- TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_,
- optimized.src_extents(),
- target_fd_,
- optimized.dst_extents(),
- block_size_,
- nullptr));
- }
- return target_fd_->Flush();
-}
-
-FileDescriptorPtr PartitionWriter::ChooseSourceFD(
- const InstallOperation& operation, ErrorCode* error) {
- if (source_fd_ == nullptr) {
- LOG(ERROR) << "ChooseSourceFD fail: source_fd_ == nullptr";
- return nullptr;
- }
-
- if (!operation.has_src_sha256_hash()) {
- // When the operation doesn't include a source hash, we attempt the error
- // corrected device first since we can't verify the block in the raw device
- // at this point, but we first need to make sure all extents are readable
- // since the error corrected device can be shorter or not available.
- if (OpenCurrentECCPartition() &&
- fd_utils::ReadAndHashExtents(
- source_ecc_fd_, operation.src_extents(), block_size_, nullptr)) {
- return source_ecc_fd_;
- }
- return source_fd_;
- }
-
- brillo::Blob source_hash;
- brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
- operation.src_sha256_hash().end());
- if (fd_utils::ReadAndHashExtents(
- source_fd_, operation.src_extents(), block_size_, &source_hash) &&
- source_hash == expected_source_hash) {
- return source_fd_;
- }
- // We fall back to use the error corrected device if the hash of the raw
- // device doesn't match or there was an error reading the source partition.
- if (!OpenCurrentECCPartition()) {
- // The following function call will return false since the source hash
- // mismatches, but we still want to call it so it prints the appropriate
- // log message.
- ValidateSourceHash(source_hash, operation, source_fd_, error);
- return nullptr;
- }
- LOG(WARNING) << "Source hash from RAW device mismatched: found "
- << base::HexEncode(source_hash.data(), source_hash.size())
- << ", expected "
- << base::HexEncode(expected_source_hash.data(),
- expected_source_hash.size());
-
- if (fd_utils::ReadAndHashExtents(
- source_ecc_fd_, operation.src_extents(), block_size_, &source_hash) &&
- ValidateSourceHash(source_hash, operation, source_ecc_fd_, error)) {
- // At this point reading from the error corrected device worked, but
- // reading from the raw device failed, so this is considered a recovered
- // failure.
- source_ecc_recovered_failures_++;
- return source_ecc_fd_;
- }
- return nullptr;
-}
-
bool DeltaPerformer::ExtentsToBsdiffPositionsString(
const RepeatedPtrField<Extent>& extents,
uint64_t block_size,
@@ -1322,69 +936,6 @@
return true;
}
-namespace {
-
-class BsdiffExtentFile : public bsdiff::FileInterface {
- public:
- BsdiffExtentFile(std::unique_ptr<ExtentReader> reader, size_t size)
- : BsdiffExtentFile(std::move(reader), nullptr, size) {}
- BsdiffExtentFile(std::unique_ptr<ExtentWriter> writer, size_t size)
- : BsdiffExtentFile(nullptr, std::move(writer), size) {}
-
- ~BsdiffExtentFile() override = default;
-
- bool Read(void* buf, size_t count, size_t* bytes_read) override {
- TEST_AND_RETURN_FALSE(reader_->Read(buf, count));
- *bytes_read = count;
- offset_ += count;
- return true;
- }
-
- bool Write(const void* buf, size_t count, size_t* bytes_written) override {
- TEST_AND_RETURN_FALSE(writer_->Write(buf, count));
- *bytes_written = count;
- offset_ += count;
- return true;
- }
-
- bool Seek(off_t pos) override {
- if (reader_ != nullptr) {
- TEST_AND_RETURN_FALSE(reader_->Seek(pos));
- offset_ = pos;
- } else {
- // For writes technically there should be no change of position, or it
- // should be equivalent of current offset.
- TEST_AND_RETURN_FALSE(offset_ == static_cast<uint64_t>(pos));
- }
- return true;
- }
-
- bool Close() override { return true; }
-
- bool GetSize(uint64_t* size) override {
- *size = size_;
- return true;
- }
-
- private:
- BsdiffExtentFile(std::unique_ptr<ExtentReader> reader,
- std::unique_ptr<ExtentWriter> writer,
- size_t size)
- : reader_(std::move(reader)),
- writer_(std::move(writer)),
- size_(size),
- offset_(0) {}
-
- std::unique_ptr<ExtentReader> reader_;
- std::unique_ptr<ExtentWriter> writer_;
- uint64_t size_;
- uint64_t offset_;
-
- DISALLOW_COPY_AND_ASSIGN(BsdiffExtentFile);
-};
-
-} // namespace
-
bool DeltaPerformer::PerformSourceBsdiffOperation(
const InstallOperation& operation, ErrorCode* error) {
// Since we delete data off the beginning of the buffer as we use it,
@@ -1402,110 +953,6 @@
return true;
}
-bool PartitionWriter::PerformSourceBsdiffOperation(
- const InstallOperation& operation,
- ErrorCode* error,
- const void* data,
- size_t count) {
- FileDescriptorPtr source_fd = ChooseSourceFD(operation, error);
- TEST_AND_RETURN_FALSE(source_fd != nullptr);
-
- auto reader = std::make_unique<DirectExtentReader>();
- TEST_AND_RETURN_FALSE(
- reader->Init(source_fd, operation.src_extents(), block_size_));
- auto src_file = std::make_unique<BsdiffExtentFile>(
- std::move(reader),
- utils::BlocksInExtents(operation.src_extents()) * block_size_);
-
- auto writer = std::make_unique<DirectExtentWriter>();
- TEST_AND_RETURN_FALSE(
- writer->Init(target_fd_, operation.dst_extents(), block_size_));
- auto dst_file = std::make_unique<BsdiffExtentFile>(
- std::move(writer),
- utils::BlocksInExtents(operation.dst_extents()) * block_size_);
-
- TEST_AND_RETURN_FALSE(bsdiff::bspatch(std::move(src_file),
- std::move(dst_file),
- static_cast<const uint8_t*>(data),
- count) == 0);
- return target_fd_->Flush();
-}
-
-namespace {
-
-// A class to be passed to |puffpatch| for reading from |source_fd_| and writing
-// into |target_fd_|.
-class PuffinExtentStream : public puffin::StreamInterface {
- public:
- // Constructor for creating a stream for reading from an |ExtentReader|.
- PuffinExtentStream(std::unique_ptr<ExtentReader> reader, uint64_t size)
- : PuffinExtentStream(std::move(reader), nullptr, size) {}
-
- // Constructor for creating a stream for writing to an |ExtentWriter|.
- PuffinExtentStream(std::unique_ptr<ExtentWriter> writer, uint64_t size)
- : PuffinExtentStream(nullptr, std::move(writer), size) {}
-
- ~PuffinExtentStream() override = default;
-
- bool GetSize(uint64_t* size) const override {
- *size = size_;
- return true;
- }
-
- bool GetOffset(uint64_t* offset) const override {
- *offset = offset_;
- return true;
- }
-
- bool Seek(uint64_t offset) override {
- if (is_read_) {
- TEST_AND_RETURN_FALSE(reader_->Seek(offset));
- offset_ = offset;
- } else {
- // For writes technically there should be no change of position, or it
- // should equivalent of current offset.
- TEST_AND_RETURN_FALSE(offset_ == offset);
- }
- return true;
- }
-
- bool Read(void* buffer, size_t count) override {
- TEST_AND_RETURN_FALSE(is_read_);
- TEST_AND_RETURN_FALSE(reader_->Read(buffer, count));
- offset_ += count;
- return true;
- }
-
- bool Write(const void* buffer, size_t count) override {
- TEST_AND_RETURN_FALSE(!is_read_);
- TEST_AND_RETURN_FALSE(writer_->Write(buffer, count));
- offset_ += count;
- return true;
- }
-
- bool Close() override { return true; }
-
- private:
- PuffinExtentStream(std::unique_ptr<ExtentReader> reader,
- std::unique_ptr<ExtentWriter> writer,
- uint64_t size)
- : reader_(std::move(reader)),
- writer_(std::move(writer)),
- size_(size),
- offset_(0),
- is_read_(reader_ ? true : false) {}
-
- std::unique_ptr<ExtentReader> reader_;
- std::unique_ptr<ExtentWriter> writer_;
- uint64_t size_;
- uint64_t offset_;
- bool is_read_;
-
- DISALLOW_COPY_AND_ASSIGN(PuffinExtentStream);
-};
-
-} // namespace
-
bool DeltaPerformer::PerformPuffDiffOperation(const InstallOperation& operation,
ErrorCode* error) {
// Since we delete data off the beginning of the buffer as we use it,
@@ -1518,37 +965,6 @@
return true;
}
-bool PartitionWriter::PerformPuffDiffOperation(
- const InstallOperation& operation,
- ErrorCode* error,
- const void* data,
- size_t count) {
- FileDescriptorPtr source_fd = ChooseSourceFD(operation, error);
- TEST_AND_RETURN_FALSE(source_fd != nullptr);
-
- auto reader = std::make_unique<DirectExtentReader>();
- TEST_AND_RETURN_FALSE(
- reader->Init(source_fd, operation.src_extents(), block_size_));
- puffin::UniqueStreamPtr src_stream(new PuffinExtentStream(
- std::move(reader),
- utils::BlocksInExtents(operation.src_extents()) * block_size_));
-
- auto writer = std::make_unique<DirectExtentWriter>();
- TEST_AND_RETURN_FALSE(
- writer->Init(target_fd_, operation.dst_extents(), block_size_));
- puffin::UniqueStreamPtr dst_stream(new PuffinExtentStream(
- std::move(writer),
- utils::BlocksInExtents(operation.dst_extents()) * block_size_));
-
- const size_t kMaxCacheSize = 5 * 1024 * 1024; // Total 5MB cache.
- TEST_AND_RETURN_FALSE(puffin::PuffPatch(std::move(src_stream),
- std::move(dst_stream),
- static_cast<const uint8_t*>(data),
- count,
- kMaxCacheSize));
- return target_fd_->Flush();
-}
-
bool DeltaPerformer::ExtractSignatureMessage() {
TEST_AND_RETURN_FALSE(signatures_message_data_.empty());
TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset());
@@ -1557,11 +973,11 @@
buffer_.begin(), buffer_.begin() + manifest_.signatures_size());
// Save the signature blob because if the update is interrupted after the
- // download phase we don't go through this path anymore. Some alternatives to
- // consider:
+ // download phase we don't go through this path anymore. Some alternatives
+ // to consider:
//
- // 1. On resume, re-download the signature blob from the server and re-verify
- // it.
+ // 1. On resume, re-download the signature blob from the server and
+ // re-verify it.
//
// 2. Verify the signature as soon as it's received and don't checkpoint the
// blob and the signed sha-256 context.
@@ -1584,8 +1000,8 @@
return utils::ReadFile(public_key_path_, out_public_key);
}
- // If this is an official build then we are not allowed to use public key from
- // Omaha response.
+ // If this is an official build then we are not allowed to use public key
+ // from Omaha response.
if (!hardware_->IsOfficialBuild() && !install_plan_->public_key_rsa.empty()) {
LOG(INFO) << "Verifying using public key from Omaha response.";
return brillo::data_encoding::Base64Decode(install_plan_->public_key_rsa,
@@ -1704,9 +1120,9 @@
// Check version field for a given PartitionUpdate object. If an error
// is encountered, set |error_code| accordingly. If downgrade is detected,
- // |downgrade_detected| is set. Return true if the program should continue to
- // check the next partition or not, or false if it should exit early due to
- // errors.
+ // |downgrade_detected| is set. Return true if the program should continue
+ // to check the next partition or not, or false if it should exit early due
+ // to errors.
auto&& timestamp_valid = [this](const PartitionUpdate& partition,
bool allow_empty_version,
bool* downgrade_detected) -> ErrorCode {
@@ -1784,10 +1200,11 @@
const InstallOperation& operation) {
if (!operation.data_sha256_hash().size()) {
if (!operation.data_length()) {
- // Operations that do not have any data blob won't have any operation hash
- // either. So, these operations are always considered validated since the
- // metadata that contains all the non-data-blob portions of the operation
- // has already been validated. This is true for both HTTP and HTTPS cases.
+ // Operations that do not have any data blob won't have any operation
+ // hash either. So, these operations are always considered validated
+ // since the metadata that contains all the non-data-blob portions of
+ // the operation has already been validated. This is true for both HTTP
+ // and HTTPS cases.
return ErrorCode::kSuccess;
}
@@ -1926,8 +1343,8 @@
return false;
int64_t resumed_update_failures;
- // Note that storing this value is optional, but if it is there it should not
- // be more than the limit.
+ // Note that storing this value is optional, but if it is there it should
+ // not be more than the limit.
if (prefs->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures) &&
resumed_update_failures > kMaxResumedUpdateFailures)
return false;
@@ -2086,20 +1503,4 @@
return true;
}
-PartitionWriter::PartitionWriter(
- const PartitionUpdate& partition_update,
- const InstallPlan::Partition& install_part,
- DynamicPartitionControlInterface* dynamic_control,
- size_t block_size,
- bool is_interactive)
- : partition_update_(partition_update),
- install_part_(install_part),
- dynamic_control_(dynamic_control),
- interactive_(is_interactive),
- block_size_(block_size) {}
-
-PartitionWriter::~PartitionWriter() {
- Close();
-}
-
} // namespace chromeos_update_engine
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index bee7fde..d44f6c2 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -35,6 +35,7 @@
#include "update_engine/payload_consumer/file_descriptor.h"
#include "update_engine/payload_consumer/file_writer.h"
#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_consumer/partition_writer.h"
#include "update_engine/payload_consumer/payload_metadata.h"
#include "update_engine/payload_consumer/payload_verifier.h"
#include "update_engine/update_metadata.pb.h"
@@ -46,9 +47,6 @@
class HardwareInterface;
class PrefsInterface;
-// At the bottom of this file.
-class PartitionWriter;
-
// This class performs the actions in a delta update synchronously. The delta
// update itself should be passed in in chunks as it is received.
class DeltaPerformer : public FileWriter {
@@ -412,88 +410,6 @@
DISALLOW_COPY_AND_ASSIGN(DeltaPerformer);
};
-class PartitionWriter {
- public:
- PartitionWriter(const PartitionUpdate& partition_update,
- const InstallPlan::Partition& install_part,
- DynamicPartitionControlInterface* dynamic_control,
- size_t block_size,
- bool is_interactive);
- ~PartitionWriter();
- // Compare |calculated_hash| with source hash in |operation|, return false and
- // dump hash and set |error| if don't match.
- // |source_fd| is the file descriptor of the source partition.
- static bool ValidateSourceHash(const brillo::Blob& calculated_hash,
- const InstallOperation& operation,
- const FileDescriptorPtr source_fd,
- ErrorCode* error);
-
- // Perform necessary initialization work before InstallOperation can be
- // applied to this partition
- [[nodiscard]] bool Init(const InstallPlan* install_plan,
- bool source_may_exist);
-
- int Close();
-
- // These perform a specific type of operation and return true on success.
- // |error| will be set if source hash mismatch, otherwise |error| might not be
- // set even if it fails.
- [[nodiscard]] bool PerformReplaceOperation(const InstallOperation& operation,
- const void* data,
- size_t count);
- [[nodiscard]] bool PerformZeroOrDiscardOperation(
- const InstallOperation& operation);
-
- [[nodiscard]] bool PerformSourceCopyOperation(
- const InstallOperation& operation, ErrorCode* error);
- [[nodiscard]] bool PerformSourceBsdiffOperation(
- const InstallOperation& operation,
- ErrorCode* error,
- const void* data,
- size_t count);
- [[nodiscard]] bool PerformPuffDiffOperation(const InstallOperation& operation,
- ErrorCode* error,
- const void* data,
- size_t count);
-
- private:
- friend class PartitionWriterTest;
- FRIEND_TEST(PartitionWriterTest, ChooseSourceFDTest);
-
- bool OpenCurrentECCPartition();
- // For a given operation, choose the source fd to be used (raw device or error
- // correction device) based on the source operation hash.
- // Returns nullptr if the source hash mismatch cannot be corrected, and set
- // the |error| accordingly.
- FileDescriptorPtr ChooseSourceFD(const InstallOperation& operation,
- ErrorCode* error);
-
- const PartitionUpdate& partition_update_;
- const InstallPlan::Partition& install_part_;
- DynamicPartitionControlInterface* dynamic_control_;
- std::string source_path_;
- std::string target_path_;
- FileDescriptorPtr source_fd_;
- FileDescriptorPtr target_fd_;
- const bool interactive_;
- const size_t block_size_;
- // File descriptor of the error corrected source partition. Only set while
- // updating partition using a delta payload for a partition where error
- // correction is available. The size of the error corrected device is smaller
- // than the underlying raw device, since it doesn't include the error
- // correction blocks.
- FileDescriptorPtr source_ecc_fd_{nullptr};
-
- // The total number of operations that failed source hash verification but
- // passed after falling back to the error-corrected |source_ecc_fd_| device.
- uint64_t source_ecc_recovered_failures_{0};
-
- // Whether opening the current partition as an error-corrected device failed.
- // Used to avoid re-opening the same source partition if it is not actually
- // error corrected.
- bool source_ecc_open_failure_{false};
-};
-
} // namespace chromeos_update_engine
#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_DELTA_PERFORMER_H_
diff --git a/payload_consumer/partition_writer.cc b/payload_consumer/partition_writer.cc
new file mode 100644
index 0000000..d47ebee
--- /dev/null
+++ b/payload_consumer/partition_writer.cc
@@ -0,0 +1,644 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include <update_engine/payload_consumer/partition_writer.h>
+
+#include <fcntl.h>
+#include <linux/fs.h>
+
+#include <algorithm>
+#include <initializer_list>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include <base/strings/string_number_conversions.h>
+#include <bsdiff/bspatch.h>
+#include <puffin/puffpatch.h>
+#include <bsdiff/file_interface.h>
+#include <puffin/stream.h>
+
+#include "update_engine/common/terminator.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/bzip_extent_writer.h"
+#include "update_engine/payload_consumer/cached_file_descriptor.h"
+#include "update_engine/payload_consumer/extent_reader.h"
+#include "update_engine/payload_consumer/extent_writer.h"
+#include "update_engine/payload_consumer/fec_file_descriptor.h"
+#include "update_engine/payload_consumer/file_descriptor_utils.h"
+#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_consumer/mount_history.h"
+#include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_consumer/xz_extent_writer.h"
+
+namespace chromeos_update_engine {
+
+namespace {
+constexpr uint64_t kCacheSize = 1024 * 1024; // 1MB
+
+// Discard the tail of the block device referenced by |fd|, from the offset
+// |data_size| until the end of the block device. Returns whether the data was
+// discarded.
+
+bool DiscardPartitionTail(const FileDescriptorPtr& fd, uint64_t data_size) {
+ uint64_t part_size = fd->BlockDevSize();
+ if (!part_size || part_size <= data_size)
+ return false;
+
+ struct blkioctl_request {
+ int number;
+ const char* name;
+ };
+ const std::initializer_list<blkioctl_request> blkioctl_requests = {
+ {BLKDISCARD, "BLKDISCARD"},
+ {BLKSECDISCARD, "BLKSECDISCARD"},
+#ifdef BLKZEROOUT
+ {BLKZEROOUT, "BLKZEROOUT"},
+#endif
+ };
+ for (const auto& req : blkioctl_requests) {
+ int error = 0;
+ if (fd->BlkIoctl(req.number, data_size, part_size - data_size, &error) &&
+ error == 0) {
+ return true;
+ }
+ LOG(WARNING) << "Error discarding the last "
+ << (part_size - data_size) / 1024 << " KiB using ioctl("
+ << req.name << ")";
+ }
+ return false;
+}
+
+} // namespace
+
+// Opens path for read/write. On success returns an open FileDescriptor
+// and sets *err to 0. On failure, sets *err to errno and returns nullptr.
+FileDescriptorPtr OpenFile(const char* path,
+ int mode,
+ bool cache_writes,
+ int* err) {
+ // Try to mark the block device read-only based on the mode. Ignore any
+ // failure since this won't work when passing regular files.
+ bool read_only = (mode & O_ACCMODE) == O_RDONLY;
+ utils::SetBlockDeviceReadOnly(path, read_only);
+
+ FileDescriptorPtr fd(new EintrSafeFileDescriptor());
+ if (cache_writes && !read_only) {
+ fd = FileDescriptorPtr(new CachedFileDescriptor(fd, kCacheSize));
+ LOG(INFO) << "Caching writes.";
+ }
+ if (!fd->Open(path, mode, 000)) {
+ *err = errno;
+ PLOG(ERROR) << "Unable to open file " << path;
+ return nullptr;
+ }
+ *err = 0;
+ return fd;
+}
+
+class BsdiffExtentFile : public bsdiff::FileInterface {
+ public:
+ BsdiffExtentFile(std::unique_ptr<ExtentReader> reader, size_t size)
+ : BsdiffExtentFile(std::move(reader), nullptr, size) {}
+ BsdiffExtentFile(std::unique_ptr<ExtentWriter> writer, size_t size)
+ : BsdiffExtentFile(nullptr, std::move(writer), size) {}
+
+ ~BsdiffExtentFile() override = default;
+
+ bool Read(void* buf, size_t count, size_t* bytes_read) override {
+ TEST_AND_RETURN_FALSE(reader_->Read(buf, count));
+ *bytes_read = count;
+ offset_ += count;
+ return true;
+ }
+
+ bool Write(const void* buf, size_t count, size_t* bytes_written) override {
+ TEST_AND_RETURN_FALSE(writer_->Write(buf, count));
+ *bytes_written = count;
+ offset_ += count;
+ return true;
+ }
+
+ bool Seek(off_t pos) override {
+ if (reader_ != nullptr) {
+ TEST_AND_RETURN_FALSE(reader_->Seek(pos));
+ offset_ = pos;
+ } else {
+ // For writes technically there should be no change of position, or it
+ // should be equivalent of current offset.
+ TEST_AND_RETURN_FALSE(offset_ == static_cast<uint64_t>(pos));
+ }
+ return true;
+ }
+
+ bool Close() override { return true; }
+
+ bool GetSize(uint64_t* size) override {
+ *size = size_;
+ return true;
+ }
+
+ private:
+ BsdiffExtentFile(std::unique_ptr<ExtentReader> reader,
+ std::unique_ptr<ExtentWriter> writer,
+ size_t size)
+ : reader_(std::move(reader)),
+ writer_(std::move(writer)),
+ size_(size),
+ offset_(0) {}
+
+ std::unique_ptr<ExtentReader> reader_;
+ std::unique_ptr<ExtentWriter> writer_;
+ uint64_t size_;
+ uint64_t offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(BsdiffExtentFile);
+};
+// A class to be passed to |puffpatch| for reading from |source_fd_| and writing
+// into |target_fd_|.
+class PuffinExtentStream : public puffin::StreamInterface {
+ public:
+ // Constructor for creating a stream for reading from an |ExtentReader|.
+ PuffinExtentStream(std::unique_ptr<ExtentReader> reader, uint64_t size)
+ : PuffinExtentStream(std::move(reader), nullptr, size) {}
+
+ // Constructor for creating a stream for writing to an |ExtentWriter|.
+ PuffinExtentStream(std::unique_ptr<ExtentWriter> writer, uint64_t size)
+ : PuffinExtentStream(nullptr, std::move(writer), size) {}
+
+ ~PuffinExtentStream() override = default;
+
+ bool GetSize(uint64_t* size) const override {
+ *size = size_;
+ return true;
+ }
+
+ bool GetOffset(uint64_t* offset) const override {
+ *offset = offset_;
+ return true;
+ }
+
+ bool Seek(uint64_t offset) override {
+ if (is_read_) {
+ TEST_AND_RETURN_FALSE(reader_->Seek(offset));
+ offset_ = offset;
+ } else {
+ // For writes technically there should be no change of position, or it
+ // should equivalent of current offset.
+ TEST_AND_RETURN_FALSE(offset_ == offset);
+ }
+ return true;
+ }
+
+ bool Read(void* buffer, size_t count) override {
+ TEST_AND_RETURN_FALSE(is_read_);
+ TEST_AND_RETURN_FALSE(reader_->Read(buffer, count));
+ offset_ += count;
+ return true;
+ }
+
+ bool Write(const void* buffer, size_t count) override {
+ TEST_AND_RETURN_FALSE(!is_read_);
+ TEST_AND_RETURN_FALSE(writer_->Write(buffer, count));
+ offset_ += count;
+ return true;
+ }
+
+ bool Close() override { return true; }
+
+ private:
+ PuffinExtentStream(std::unique_ptr<ExtentReader> reader,
+ std::unique_ptr<ExtentWriter> writer,
+ uint64_t size)
+ : reader_(std::move(reader)),
+ writer_(std::move(writer)),
+ size_(size),
+ offset_(0),
+ is_read_(reader_ ? true : false) {}
+
+ std::unique_ptr<ExtentReader> reader_;
+ std::unique_ptr<ExtentWriter> writer_;
+ uint64_t size_;
+ uint64_t offset_;
+ bool is_read_;
+
+ DISALLOW_COPY_AND_ASSIGN(PuffinExtentStream);
+};
+
+PartitionWriter::PartitionWriter(
+ const PartitionUpdate& partition_update,
+ const InstallPlan::Partition& install_part,
+ DynamicPartitionControlInterface* dynamic_control,
+ size_t block_size,
+ bool is_interactive)
+ : partition_update_(partition_update),
+ install_part_(install_part),
+ dynamic_control_(dynamic_control),
+ interactive_(is_interactive),
+ block_size_(block_size) {}
+
+PartitionWriter::~PartitionWriter() {
+ Close();
+}
+
+bool PartitionWriter::Init(const InstallPlan* install_plan,
+ bool source_may_exist) {
+ const PartitionUpdate& partition = partition_update_;
+ uint32_t source_slot = install_plan->source_slot;
+ uint32_t target_slot = install_plan->target_slot;
+
+ // We shouldn't open the source partition in certain cases, e.g. some dynamic
+ // partitions in delta payload, partitions included in the full payload for
+ // partial updates. Use the source size as the indicator.
+ if (source_may_exist && install_part_.source_size > 0) {
+ source_path_ = install_part_.source_path;
+ int err;
+ source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, false, &err);
+ if (!source_fd_) {
+ LOG(ERROR) << "Unable to open source partition "
+ << partition.partition_name() << " on slot "
+ << BootControlInterface::SlotName(source_slot) << ", file "
+ << source_path_;
+ return false;
+ }
+ }
+
+ target_path_ = install_part_.target_path;
+ int err;
+
+ int flags = O_RDWR;
+ if (!interactive_)
+ flags |= O_DSYNC;
+
+ LOG(INFO) << "Opening " << target_path_ << " partition with"
+ << (interactive_ ? "out" : "") << " O_DSYNC";
+
+ target_fd_ = OpenFile(target_path_.c_str(), flags, true, &err);
+ if (!target_fd_) {
+ LOG(ERROR) << "Unable to open target partition "
+ << partition.partition_name() << " on slot "
+ << BootControlInterface::SlotName(target_slot) << ", file "
+ << target_path_;
+ return false;
+ }
+
+ LOG(INFO) << "Applying " << partition.operations().size()
+ << " operations to partition \"" << partition.partition_name()
+ << "\"";
+
+ // Discard the end of the partition, but ignore failures.
+ DiscardPartitionTail(target_fd_, install_part_.target_size);
+
+ return true;
+}
+
+bool PartitionWriter::PerformReplaceOperation(const InstallOperation& operation,
+ const void* data,
+ size_t count) {
+ // Setup the ExtentWriter stack based on the operation type.
+ std::unique_ptr<ExtentWriter> writer = std::make_unique<DirectExtentWriter>();
+
+ if (operation.type() == InstallOperation::REPLACE_BZ) {
+ writer.reset(new BzipExtentWriter(std::move(writer)));
+ } else if (operation.type() == InstallOperation::REPLACE_XZ) {
+ writer.reset(new XzExtentWriter(std::move(writer)));
+ }
+
+ TEST_AND_RETURN_FALSE(
+ writer->Init(target_fd_, operation.dst_extents(), block_size_));
+ TEST_AND_RETURN_FALSE(writer->Write(data, operation.data_length()));
+
+ return target_fd_->Flush();
+}
+
+bool PartitionWriter::PerformZeroOrDiscardOperation(
+ const InstallOperation& operation) {
+#ifdef BLKZEROOUT
+ bool attempt_ioctl = true;
+ int request =
+ (operation.type() == InstallOperation::ZERO ? BLKZEROOUT : BLKDISCARD);
+#else // !defined(BLKZEROOUT)
+ bool attempt_ioctl = false;
+ int request = 0;
+#endif // !defined(BLKZEROOUT)
+
+ brillo::Blob zeros;
+ for (const Extent& extent : operation.dst_extents()) {
+ const uint64_t start = extent.start_block() * block_size_;
+ const uint64_t length = extent.num_blocks() * block_size_;
+ if (attempt_ioctl) {
+ int result = 0;
+ if (target_fd_->BlkIoctl(request, start, length, &result) && result == 0)
+ continue;
+ attempt_ioctl = false;
+ }
+ // In case of failure, we fall back to writing 0 to the selected region.
+ zeros.resize(16 * block_size_);
+ for (uint64_t offset = 0; offset < length; offset += zeros.size()) {
+ uint64_t chunk_length =
+ std::min(length - offset, static_cast<uint64_t>(zeros.size()));
+ TEST_AND_RETURN_FALSE(utils::PWriteAll(
+ target_fd_, zeros.data(), chunk_length, start + offset));
+ }
+ }
+ return target_fd_->Flush();
+}
+
+bool PartitionWriter::PerformSourceCopyOperation(
+ const InstallOperation& operation, ErrorCode* error) {
+ TEST_AND_RETURN_FALSE(source_fd_ != nullptr);
+
+ // The device may optimize the SOURCE_COPY operation.
+ // Being this a device-specific optimization let DynamicPartitionController
+ // decide it the operation should be skipped.
+ const PartitionUpdate& partition = partition_update_;
+ const auto& partition_control = dynamic_control_;
+
+ InstallOperation buf;
+ bool should_optimize = partition_control->OptimizeOperation(
+ partition.partition_name(), operation, &buf);
+ const InstallOperation& optimized = should_optimize ? buf : operation;
+
+ if (operation.has_src_sha256_hash()) {
+ bool read_ok;
+ brillo::Blob source_hash;
+ brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
+ operation.src_sha256_hash().end());
+
+ // We fall back to use the error corrected device if the hash of the raw
+ // device doesn't match or there was an error reading the source partition.
+ // Note that this code will also fall back if writing the target partition
+ // fails.
+ if (should_optimize) {
+ // Hash operation.src_extents(), then copy optimized.src_extents to
+ // optimized.dst_extents.
+ read_ok =
+ fd_utils::ReadAndHashExtents(
+ source_fd_, operation.src_extents(), block_size_, &source_hash) &&
+ fd_utils::CopyAndHashExtents(source_fd_,
+ optimized.src_extents(),
+ target_fd_,
+ optimized.dst_extents(),
+ block_size_,
+ nullptr /* skip hashing */);
+ } else {
+ read_ok = fd_utils::CopyAndHashExtents(source_fd_,
+ operation.src_extents(),
+ target_fd_,
+ operation.dst_extents(),
+ block_size_,
+ &source_hash);
+ }
+ if (read_ok && expected_source_hash == source_hash)
+ return true;
+ LOG(WARNING) << "Source hash from RAW device mismatched, attempting to "
+ "correct using ECC";
+ if (!OpenCurrentECCPartition()) {
+ // The following function call will return false since the source hash
+ // mismatches, but we still want to call it so it prints the appropriate
+ // log message.
+ return ValidateSourceHash(source_hash, operation, source_fd_, error);
+ }
+
+ LOG(WARNING) << "Source hash from RAW device mismatched: found "
+ << base::HexEncode(source_hash.data(), source_hash.size())
+ << ", expected "
+ << base::HexEncode(expected_source_hash.data(),
+ expected_source_hash.size());
+ if (should_optimize) {
+ TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
+ source_ecc_fd_, operation.src_extents(), block_size_, &source_hash));
+ TEST_AND_RETURN_FALSE(
+ fd_utils::CopyAndHashExtents(source_ecc_fd_,
+ optimized.src_extents(),
+ target_fd_,
+ optimized.dst_extents(),
+ block_size_,
+ nullptr /* skip hashing */));
+ } else {
+ TEST_AND_RETURN_FALSE(
+ fd_utils::CopyAndHashExtents(source_ecc_fd_,
+ operation.src_extents(),
+ target_fd_,
+ operation.dst_extents(),
+ block_size_,
+ &source_hash));
+ }
+ TEST_AND_RETURN_FALSE(
+ ValidateSourceHash(source_hash, operation, source_ecc_fd_, error));
+ // At this point reading from the error corrected device worked, but
+ // reading from the raw device failed, so this is considered a recovered
+ // failure.
+ source_ecc_recovered_failures_++;
+ } else {
+ // When the operation doesn't include a source hash, we attempt the error
+ // corrected device first since we can't verify the block in the raw device
+ // at this point, but we fall back to the raw device since the error
+ // corrected device can be shorter or not available.
+
+ if (OpenCurrentECCPartition() &&
+ fd_utils::CopyAndHashExtents(source_ecc_fd_,
+ optimized.src_extents(),
+ target_fd_,
+ optimized.dst_extents(),
+ block_size_,
+ nullptr)) {
+ return true;
+ }
+ TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_,
+ optimized.src_extents(),
+ target_fd_,
+ optimized.dst_extents(),
+ block_size_,
+ nullptr));
+ }
+ return target_fd_->Flush();
+}
+bool PartitionWriter::PerformSourceBsdiffOperation(
+ const InstallOperation& operation,
+ ErrorCode* error,
+ const void* data,
+ size_t count) {
+ FileDescriptorPtr source_fd = ChooseSourceFD(operation, error);
+ TEST_AND_RETURN_FALSE(source_fd != nullptr);
+
+ auto reader = std::make_unique<DirectExtentReader>();
+ TEST_AND_RETURN_FALSE(
+ reader->Init(source_fd, operation.src_extents(), block_size_));
+ auto src_file = std::make_unique<BsdiffExtentFile>(
+ std::move(reader),
+ utils::BlocksInExtents(operation.src_extents()) * block_size_);
+
+ auto writer = std::make_unique<DirectExtentWriter>();
+ TEST_AND_RETURN_FALSE(
+ writer->Init(target_fd_, operation.dst_extents(), block_size_));
+ auto dst_file = std::make_unique<BsdiffExtentFile>(
+ std::move(writer),
+ utils::BlocksInExtents(operation.dst_extents()) * block_size_);
+
+ TEST_AND_RETURN_FALSE(bsdiff::bspatch(std::move(src_file),
+ std::move(dst_file),
+ reinterpret_cast<const uint8_t*>(data),
+ count) == 0);
+ return target_fd_->Flush();
+}
+
+bool PartitionWriter::PerformPuffDiffOperation(
+ const InstallOperation& operation,
+ ErrorCode* error,
+ const void* data,
+ size_t count) {
+ FileDescriptorPtr source_fd = ChooseSourceFD(operation, error);
+ TEST_AND_RETURN_FALSE(source_fd != nullptr);
+
+ auto reader = std::make_unique<DirectExtentReader>();
+ TEST_AND_RETURN_FALSE(
+ reader->Init(source_fd, operation.src_extents(), block_size_));
+ puffin::UniqueStreamPtr src_stream(new PuffinExtentStream(
+ std::move(reader),
+ utils::BlocksInExtents(operation.src_extents()) * block_size_));
+
+ auto writer = std::make_unique<DirectExtentWriter>();
+ TEST_AND_RETURN_FALSE(
+ writer->Init(target_fd_, operation.dst_extents(), block_size_));
+ puffin::UniqueStreamPtr dst_stream(new PuffinExtentStream(
+ std::move(writer),
+ utils::BlocksInExtents(operation.dst_extents()) * block_size_));
+
+ constexpr size_t kMaxCacheSize = 5 * 1024 * 1024; // Total 5MB cache.
+ TEST_AND_RETURN_FALSE(
+ puffin::PuffPatch(std::move(src_stream),
+ std::move(dst_stream),
+ reinterpret_cast<const uint8_t*>(data),
+ count,
+ kMaxCacheSize));
+ return target_fd_->Flush();
+}
+
+FileDescriptorPtr PartitionWriter::ChooseSourceFD(
+ const InstallOperation& operation, ErrorCode* error) {
+ if (source_fd_ == nullptr) {
+ LOG(ERROR) << "ChooseSourceFD fail: source_fd_ == nullptr";
+ return nullptr;
+ }
+
+ if (!operation.has_src_sha256_hash()) {
+ // When the operation doesn't include a source hash, we attempt the error
+ // corrected device first since we can't verify the block in the raw device
+ // at this point, but we first need to make sure all extents are readable
+ // since the error corrected device can be shorter or not available.
+ if (OpenCurrentECCPartition() &&
+ fd_utils::ReadAndHashExtents(
+ source_ecc_fd_, operation.src_extents(), block_size_, nullptr)) {
+ return source_ecc_fd_;
+ }
+ return source_fd_;
+ }
+
+ brillo::Blob source_hash;
+ brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
+ operation.src_sha256_hash().end());
+ if (fd_utils::ReadAndHashExtents(
+ source_fd_, operation.src_extents(), block_size_, &source_hash) &&
+ source_hash == expected_source_hash) {
+ return source_fd_;
+ }
+ // We fall back to use the error corrected device if the hash of the raw
+ // device doesn't match or there was an error reading the source partition.
+ if (!OpenCurrentECCPartition()) {
+ // The following function call will return false since the source hash
+ // mismatches, but we still want to call it so it prints the appropriate
+ // log message.
+ ValidateSourceHash(source_hash, operation, source_fd_, error);
+ return nullptr;
+ }
+ LOG(WARNING) << "Source hash from RAW device mismatched: found "
+ << base::HexEncode(source_hash.data(), source_hash.size())
+ << ", expected "
+ << base::HexEncode(expected_source_hash.data(),
+ expected_source_hash.size());
+
+ if (fd_utils::ReadAndHashExtents(
+ source_ecc_fd_, operation.src_extents(), block_size_, &source_hash) &&
+ ValidateSourceHash(source_hash, operation, source_ecc_fd_, error)) {
+ // At this point reading from the error corrected device worked, but
+ // reading from the raw device failed, so this is considered a recovered
+ // failure.
+ source_ecc_recovered_failures_++;
+ return source_ecc_fd_;
+ }
+ return nullptr;
+}
+
+bool PartitionWriter::OpenCurrentECCPartition() {
+ // No support for ECC for full payloads.
+ // Full payload should not have any opeartion that requires ECC partitions.
+ if (source_ecc_fd_)
+ return true;
+
+ if (source_ecc_open_failure_)
+ return false;
+
+#if USE_FEC
+ const PartitionUpdate& partition = partition_update_;
+ const InstallPlan::Partition& install_part = install_part_;
+ std::string path = install_part.source_path;
+ FileDescriptorPtr fd(new FecFileDescriptor());
+ if (!fd->Open(path.c_str(), O_RDONLY, 0)) {
+ PLOG(ERROR) << "Unable to open ECC source partition "
+ << partition.partition_name() << ", file " << path;
+ source_ecc_open_failure_ = true;
+ return false;
+ }
+ source_ecc_fd_ = fd;
+#else
+ // No support for ECC compiled.
+ source_ecc_open_failure_ = true;
+#endif // USE_FEC
+
+ return !source_ecc_open_failure_;
+}
+
+int PartitionWriter::Close() {
+ int err = 0;
+ if (source_fd_ && !source_fd_->Close()) {
+ err = errno;
+ PLOG(ERROR) << "Error closing source partition";
+ if (!err)
+ err = 1;
+ }
+ source_fd_.reset();
+ source_path_.clear();
+
+ if (target_fd_ && !target_fd_->Close()) {
+ err = errno;
+ PLOG(ERROR) << "Error closing target partition";
+ if (!err)
+ err = 1;
+ }
+ target_fd_.reset();
+ target_path_.clear();
+
+ if (source_ecc_fd_ && !source_ecc_fd_->Close()) {
+ err = errno;
+ PLOG(ERROR) << "Error closing ECC source partition";
+ if (!err)
+ err = 1;
+ }
+ source_ecc_fd_.reset();
+ source_ecc_open_failure_ = false;
+ return -err;
+}
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/partition_writer.h b/payload_consumer/partition_writer.h
new file mode 100644
index 0000000..624a411
--- /dev/null
+++ b/payload_consumer/partition_writer.h
@@ -0,0 +1,113 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PARTITION_WRITER_H_
+#define UPDATE_ENGINE_PARTITION_WRITER_H_
+
+#include <cstdint>
+#include <string>
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest_prod.h>
+
+#include "update_engine/common/dynamic_partition_control_interface.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
+#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/update_metadata.pb.h"
+namespace chromeos_update_engine {
+class PartitionWriter {
+ public:
+ PartitionWriter(const PartitionUpdate& partition_update,
+ const InstallPlan::Partition& install_part,
+ DynamicPartitionControlInterface* dynamic_control,
+ size_t block_size,
+ bool is_interactive);
+ ~PartitionWriter();
+ static bool ValidateSourceHash(const brillo::Blob& calculated_hash,
+ const InstallOperation& operation,
+ const FileDescriptorPtr source_fd,
+ ErrorCode* error);
+
+ // Perform necessary initialization work before InstallOperation can be
+ // applied to this partition
+ [[nodiscard]] bool Init(const InstallPlan* install_plan,
+ bool source_may_exist);
+
+ int Close();
+
+ // These perform a specific type of operation and return true on success.
+ // |error| will be set if source hash mismatch, otherwise |error| might not be
+ // set even if it fails.
+ [[nodiscard]] bool PerformReplaceOperation(const InstallOperation& operation,
+ const void* data,
+ size_t count);
+ [[nodiscard]] bool PerformZeroOrDiscardOperation(
+ const InstallOperation& operation);
+
+ [[nodiscard]] bool PerformSourceCopyOperation(
+ const InstallOperation& operation, ErrorCode* error);
+ [[nodiscard]] bool PerformSourceBsdiffOperation(
+ const InstallOperation& operation,
+ ErrorCode* error,
+ const void* data,
+ size_t count);
+ [[nodiscard]] bool PerformPuffDiffOperation(const InstallOperation& operation,
+ ErrorCode* error,
+ const void* data,
+ size_t count);
+
+ private:
+ friend class PartitionWriterTest;
+ FRIEND_TEST(PartitionWriterTest, ChooseSourceFDTest);
+
+ bool OpenCurrentECCPartition();
+ // For a given operation, choose the source fd to be used (raw device or error
+ // correction device) based on the source operation hash.
+ // Returns nullptr if the source hash mismatch cannot be corrected, and set
+ // the |error| accordingly.
+ FileDescriptorPtr ChooseSourceFD(const InstallOperation& operation,
+ ErrorCode* error);
+
+ const PartitionUpdate& partition_update_;
+ const InstallPlan::Partition& install_part_;
+ DynamicPartitionControlInterface* dynamic_control_;
+ // Path to source partition
+ std::string source_path_;
+ // Path to target partition
+ std::string target_path_;
+ FileDescriptorPtr source_fd_;
+ FileDescriptorPtr target_fd_;
+ const bool interactive_;
+ const size_t block_size_;
+ // File descriptor of the error corrected source partition. Only set while
+ // updating partition using a delta payload for a partition where error
+ // correction is available. The size of the error corrected device is smaller
+ // than the underlying raw device, since it doesn't include the error
+ // correction blocks.
+ FileDescriptorPtr source_ecc_fd_{nullptr};
+
+ // The total number of operations that failed source hash verification but
+ // passed after falling back to the error-corrected |source_ecc_fd_| device.
+ uint64_t source_ecc_recovered_failures_{0};
+
+ // Whether opening the current partition as an error-corrected device failed.
+ // Used to avoid re-opening the same source partition if it is not actually
+ // error corrected.
+ bool source_ecc_open_failure_{false};
+};
+} // namespace chromeos_update_engine
+
+#endif