update_engine: Merge remote-tracking branch 'cros/upstream' into cros/master
Since libchrome in AOSP is ahead of CrOS I had to guard against BASE_VER in a
few places to satisfy older libchromes.
file_fetcher.cc is now needed in delta_generator.
A few unittests need to be run as root.
BUG=chromium:916593
TEST=unittest
TEST=cros_generate_update_payload
TEST=cros flash
CQ-DEPEND=CL:1399261
Change-Id: If3497549e88e559f8ecc38f414259b9c774f4a44
diff --git a/payload_consumer/bzip_extent_writer.cc b/payload_consumer/bzip_extent_writer.cc
index 7828589..8926047 100644
--- a/payload_consumer/bzip_extent_writer.cc
+++ b/payload_consumer/bzip_extent_writer.cc
@@ -26,6 +26,7 @@
BzipExtentWriter::~BzipExtentWriter() {
TEST_AND_RETURN(BZ2_bzDecompressEnd(&stream_) == BZ_OK);
+ TEST_AND_RETURN(input_buffer_.empty());
}
bool BzipExtentWriter::Init(FileDescriptorPtr fd,
@@ -86,9 +87,4 @@
return true;
}
-bool BzipExtentWriter::EndImpl() {
- TEST_AND_RETURN_FALSE(input_buffer_.empty());
- return next_->End();
-}
-
} // namespace chromeos_update_engine
diff --git a/payload_consumer/bzip_extent_writer.h b/payload_consumer/bzip_extent_writer.h
index 710727f..023db75 100644
--- a/payload_consumer/bzip_extent_writer.h
+++ b/payload_consumer/bzip_extent_writer.h
@@ -44,7 +44,6 @@
const google::protobuf::RepeatedPtrField<Extent>& extents,
uint32_t block_size) override;
bool Write(const void* bytes, size_t count) override;
- bool EndImpl() override;
private:
std::unique_ptr<ExtentWriter> next_; // The underlying ExtentWriter.
diff --git a/payload_consumer/bzip_extent_writer_unittest.cc b/payload_consumer/bzip_extent_writer_unittest.cc
index bf050ef..c121e11 100644
--- a/payload_consumer/bzip_extent_writer_unittest.cc
+++ b/payload_consumer/bzip_extent_writer_unittest.cc
@@ -49,8 +49,6 @@
void TearDown() override {
fd_->Close();
}
- void WriteAlignedExtents(size_t chunk_size, size_t first_chunk_size);
- void TestZeroPad(bool aligned_size);
FileDescriptorPtr fd_;
test_utils::ScopedTempFile temp_file_{"BzipExtentWriterTest-file.XXXXXX"};
@@ -72,7 +70,6 @@
EXPECT_TRUE(
bzip_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
EXPECT_TRUE(bzip_writer.Write(test, sizeof(test)));
- EXPECT_TRUE(bzip_writer.End());
brillo::Blob buf;
EXPECT_TRUE(utils::ReadFile(temp_file_.path(), &buf));
@@ -100,8 +97,7 @@
for (size_t i = 0; i < decompressed_data.size(); ++i)
decompressed_data[i] = static_cast<uint8_t>("ABC\n"[i % 4]);
- vector<Extent> extents = {
- ExtentForRange(0, (kDecompressedLength + kBlockSize - 1) / kBlockSize)};
+ vector<Extent> extents = {ExtentForBytes(kBlockSize, 0, kDecompressedLength)};
BzipExtentWriter bzip_writer(std::make_unique<DirectExtentWriter>());
EXPECT_TRUE(
@@ -113,7 +109,6 @@
size_t this_chunk_size = min(kChunkSize, compressed_data.size() - i);
EXPECT_TRUE(bzip_writer.Write(&compressed_data[i], this_chunk_size));
}
- EXPECT_TRUE(bzip_writer.End());
// Check that the const input has not been clobbered.
test_utils::ExpectVectorsEq(original_compressed_data, compressed_data);
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index 50b95a0..7dcb5f7 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -21,6 +21,7 @@
#include <algorithm>
#include <cstring>
+#include <map>
#include <memory>
#include <string>
#include <utility>
@@ -608,6 +609,8 @@
// Clear the download buffer.
DiscardBuffer(false, metadata_size_);
+ block_size_ = manifest_.block_size();
+
// This populates |partitions_| and the |install_plan.partitions| with the
// list of partitions from the manifest.
if (!ParseManifestPartitions(error))
@@ -638,9 +641,11 @@
return false;
}
- if (!OpenCurrentPartition()) {
- *error = ErrorCode::kInstallDeviceOpenError;
- return false;
+ if (next_operation_num_ < acc_num_operations_[current_partition_]) {
+ if (!OpenCurrentPartition()) {
+ *error = ErrorCode::kInstallDeviceOpenError;
+ return false;
+ }
}
if (next_operation_num_ > 0)
@@ -657,9 +662,12 @@
// We know there are more operations to perform because we didn't reach the
// |num_total_operations_| limit yet.
- while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
+ if (next_operation_num_ >= acc_num_operations_[current_partition_]) {
CloseCurrentPartition();
- current_partition_++;
+ // Skip until there are operations for current_partition_.
+ while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
+ current_partition_++;
+ }
if (!OpenCurrentPartition()) {
*error = ErrorCode::kInstallDeviceOpenError;
return false;
@@ -869,9 +877,55 @@
install_part.target_size = info.size();
install_part.target_hash.assign(info.hash().begin(), info.hash().end());
+ install_part.block_size = block_size_;
+ if (partition.has_hash_tree_extent()) {
+ Extent extent = partition.hash_tree_data_extent();
+ install_part.hash_tree_data_offset = extent.start_block() * block_size_;
+ install_part.hash_tree_data_size = extent.num_blocks() * block_size_;
+ extent = partition.hash_tree_extent();
+ install_part.hash_tree_offset = extent.start_block() * block_size_;
+ install_part.hash_tree_size = extent.num_blocks() * block_size_;
+ uint64_t hash_tree_data_end =
+ install_part.hash_tree_data_offset + install_part.hash_tree_data_size;
+ if (install_part.hash_tree_offset < hash_tree_data_end) {
+ LOG(ERROR) << "Invalid hash tree extents, hash tree data ends at "
+ << hash_tree_data_end << ", but hash tree starts at "
+ << install_part.hash_tree_offset;
+ *error = ErrorCode::kDownloadNewPartitionInfoError;
+ return false;
+ }
+ install_part.hash_tree_algorithm = partition.hash_tree_algorithm();
+ install_part.hash_tree_salt.assign(partition.hash_tree_salt().begin(),
+ partition.hash_tree_salt().end());
+ }
+ if (partition.has_fec_extent()) {
+ Extent extent = partition.fec_data_extent();
+ install_part.fec_data_offset = extent.start_block() * block_size_;
+ install_part.fec_data_size = extent.num_blocks() * block_size_;
+ extent = partition.fec_extent();
+ install_part.fec_offset = extent.start_block() * block_size_;
+ install_part.fec_size = extent.num_blocks() * block_size_;
+ uint64_t fec_data_end =
+ install_part.fec_data_offset + install_part.fec_data_size;
+ if (install_part.fec_offset < fec_data_end) {
+ LOG(ERROR) << "Invalid fec extents, fec data ends at " << fec_data_end
+ << ", but fec starts at " << install_part.fec_offset;
+ *error = ErrorCode::kDownloadNewPartitionInfoError;
+ return false;
+ }
+ install_part.fec_roots = partition.fec_roots();
+ }
+
install_plan_->partitions.push_back(install_part);
}
+ if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) {
+ if (!InitPartitionMetadata()) {
+ *error = ErrorCode::kInstallDeviceOpenError;
+ return false;
+ }
+ }
+
if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) {
LOG(ERROR) << "Unable to determine all the partition devices.";
*error = ErrorCode::kInstallDeviceOpenError;
@@ -881,6 +935,49 @@
return true;
}
+bool DeltaPerformer::InitPartitionMetadata() {
+ BootControlInterface::PartitionMetadata partition_metadata;
+ if (manifest_.has_dynamic_partition_metadata()) {
+ std::map<string, uint64_t> partition_sizes;
+ for (const auto& partition : install_plan_->partitions) {
+ partition_sizes.emplace(partition.name, partition.target_size);
+ }
+ for (const auto& group : manifest_.dynamic_partition_metadata().groups()) {
+ BootControlInterface::PartitionMetadata::Group e;
+ e.name = group.name();
+ e.size = group.size();
+ for (const auto& partition_name : group.partition_names()) {
+ auto it = partition_sizes.find(partition_name);
+ if (it == partition_sizes.end()) {
+ // TODO(tbao): Support auto-filling partition info for framework-only
+ // OTA.
+ LOG(ERROR) << "dynamic_partition_metadata contains partition "
+ << partition_name
+ << " but it is not part of the manifest. "
+ << "This is not supported.";
+ return false;
+ }
+ e.partitions.push_back({partition_name, it->second});
+ }
+ partition_metadata.groups.push_back(std::move(e));
+ }
+ }
+
+ bool metadata_updated = false;
+ prefs_->GetBoolean(kPrefsDynamicPartitionMetadataUpdated, &metadata_updated);
+ if (!boot_control_->InitPartitionMetadata(
+ install_plan_->target_slot, partition_metadata, !metadata_updated)) {
+ LOG(ERROR) << "Unable to initialize partition metadata for slot "
+ << BootControlInterface::SlotName(install_plan_->target_slot);
+ return false;
+ }
+ TEST_AND_RETURN_FALSE(
+ prefs_->SetBoolean(kPrefsDynamicPartitionMetadataUpdated, true));
+ LOG(INFO) << "InitPartitionMetadata done.";
+
+ return true;
+}
+
bool DeltaPerformer::CanPerformInstallOperation(
const chromeos_update_engine::InstallOperation& operation) {
// If we don't have a data blob we can apply it right away.
@@ -917,8 +1014,7 @@
}
// Setup the ExtentWriter stack based on the operation type.
- std::unique_ptr<ExtentWriter> writer = std::make_unique<ZeroPadExtentWriter>(
- std::make_unique<DirectExtentWriter>());
+ std::unique_ptr<ExtentWriter> writer = std::make_unique<DirectExtentWriter>();
if (operation.type() == InstallOperation::REPLACE_BZ) {
writer.reset(new BzipExtentWriter(std::move(writer)));
@@ -929,7 +1025,6 @@
TEST_AND_RETURN_FALSE(
writer->Init(target_fd_, operation.dst_extents(), block_size_));
TEST_AND_RETURN_FALSE(writer->Write(buffer_.data(), operation.data_length()));
- TEST_AND_RETURN_FALSE(writer->End());
// Update buffer
DiscardBuffer(true, buffer_.size());
@@ -1289,12 +1384,7 @@
return true;
}
- bool Close() override {
- if (writer_ != nullptr) {
- TEST_AND_RETURN_FALSE(writer_->End());
- }
- return true;
- }
+ bool Close() override { return true; }
bool GetSize(uint64_t* size) override {
*size = size_;
@@ -1408,12 +1498,7 @@
return true;
}
- bool Close() override {
- if (!is_read_) {
- TEST_AND_RETURN_FALSE(writer_->End());
- }
- return true;
- }
+ bool Close() override { return true; }
private:
PuffinExtentStream(std::unique_ptr<ExtentReader> reader,
@@ -1588,6 +1673,24 @@
}
}
+ if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) {
+ LOG(ERROR) << "The current OS build timestamp ("
+ << hardware_->GetBuildTimestamp()
+ << ") is newer than the maximum timestamp in the manifest ("
+ << manifest_.max_timestamp() << ")";
+ return ErrorCode::kPayloadTimestampError;
+ }
+
+ if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
+ if (manifest_.has_dynamic_partition_metadata()) {
+ LOG(ERROR)
+ << "Should not contain dynamic_partition_metadata for major version "
+ << kChromeOSMajorPayloadVersion
+ << ". Please use major version 2 or above.";
+ return ErrorCode::kPayloadMismatchedType;
+ }
+ }
+
// TODO(garnold) we should be adding more and more manifest checks, such as
// partition boundaries etc (see chromium-os:37661).
@@ -1792,6 +1895,8 @@
prefs->SetInt64(kPrefsManifestSignatureSize, -1);
prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
prefs->Delete(kPrefsPostInstallSucceeded);
+ prefs->Delete(kPrefsVerityWritten);
+ prefs->Delete(kPrefsDynamicPartitionMetadataUpdated);
}
return true;
}
@@ -1840,7 +1945,6 @@
bool DeltaPerformer::PrimeUpdateState() {
CHECK(manifest_valid_);
- block_size_ = manifest_.block_size();
int64_t next_operation = kUpdateStateOperationInvalid;
if (!prefs_->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) ||
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index e3d429b..8597a37 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -272,6 +272,10 @@
// it up.
bool GetPublicKeyFromResponse(base::FilePath *out_tmp_key);
+ // After install_plan_ is filled with partition names and sizes, initialize
+ // metadata of partitions and map necessary devices before opening devices.
+ bool InitPartitionMetadata();
+
// Update Engine preference store.
PrefsInterface* prefs_;
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index 78647a5..0912764 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -39,6 +39,7 @@
#include "update_engine/common/utils.h"
#include "update_engine/payload_consumer/mock_download_action.h"
#include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_consumer/payload_metadata.h"
#include "update_engine/payload_consumer/payload_verifier.h"
#include "update_engine/payload_generator/delta_diff_generator.h"
#include "update_engine/payload_generator/payload_signer.h"
@@ -234,9 +235,7 @@
RSA_free(rsa);
}
int signature_size = GetSignatureSize(private_key_path);
- string hash_file;
- ASSERT_TRUE(utils::MakeTempFile("hash.XXXXXX", &hash_file, nullptr));
- ScopedPathUnlinker hash_unlinker(hash_file);
+ test_utils::ScopedTempFile hash_file("hash.XXXXXX");
string signature_size_string;
if (signature_test == kSignatureGeneratedShellRotateCl1 ||
signature_test == kSignatureGeneratedShellRotateCl2)
@@ -251,28 +250,25 @@
delta_generator_path.c_str(),
payload_path.c_str(),
signature_size_string.c_str(),
- hash_file.c_str())));
+ hash_file.path().c_str())));
// Sign the hash
brillo::Blob hash, signature;
- ASSERT_TRUE(utils::ReadFile(hash_file, &hash));
+ ASSERT_TRUE(utils::ReadFile(hash_file.path(), &hash));
ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature));
- string sig_file;
- ASSERT_TRUE(utils::MakeTempFile("signature.XXXXXX", &sig_file, nullptr));
- ScopedPathUnlinker sig_unlinker(sig_file);
- ASSERT_TRUE(test_utils::WriteFileVector(sig_file, signature));
+ test_utils::ScopedTempFile sig_file("signature.XXXXXX");
+ ASSERT_TRUE(test_utils::WriteFileVector(sig_file.path(), signature));
+ string sig_files = sig_file.path();
- string sig_file2;
- ASSERT_TRUE(utils::MakeTempFile("signature.XXXXXX", &sig_file2, nullptr));
- ScopedPathUnlinker sig2_unlinker(sig_file2);
+ test_utils::ScopedTempFile sig_file2("signature.XXXXXX");
if (signature_test == kSignatureGeneratedShellRotateCl1 ||
signature_test == kSignatureGeneratedShellRotateCl2) {
ASSERT_TRUE(PayloadSigner::SignHash(
hash, GetBuildArtifactsPath(kUnittestPrivateKey2Path), &signature));
- ASSERT_TRUE(test_utils::WriteFileVector(sig_file2, signature));
+ ASSERT_TRUE(test_utils::WriteFileVector(sig_file2.path(), signature));
// Append second sig file to first path
- sig_file += ":" + sig_file2;
+ sig_files += ":" + sig_file2.path();
}
ASSERT_EQ(0,
@@ -280,7 +276,7 @@
"%s -in_file=%s -payload_signature_file=%s -out_file=%s",
delta_generator_path.c_str(),
payload_path.c_str(),
- sig_file.c_str(),
+ sig_files.c_str(),
payload_path.c_str())));
int verify_result = System(base::StringPrintf(
"%s -in_file=%s -public_key=%s -public_key_version=%d",
@@ -586,16 +582,14 @@
uint32_t minor_version) {
// Check the metadata.
{
- DeltaArchiveManifest manifest;
- EXPECT_TRUE(PayloadSigner::LoadPayloadMetadata(state->delta_path,
- nullptr,
- &manifest,
- nullptr,
- &state->metadata_size,
- nullptr));
- LOG(INFO) << "Metadata size: " << state->metadata_size;
EXPECT_TRUE(utils::ReadFile(state->delta_path, &state->delta));
+ PayloadMetadata payload_metadata;
+ EXPECT_TRUE(payload_metadata.ParsePayloadHeader(state->delta));
+ state->metadata_size = payload_metadata.GetMetadataSize();
+ LOG(INFO) << "Metadata size: " << state->metadata_size;
+ DeltaArchiveManifest manifest;
+ EXPECT_TRUE(payload_metadata.GetManifest(state->delta, &manifest));
if (signature_test == kSignatureNone) {
EXPECT_FALSE(manifest.has_signatures_offset());
EXPECT_FALSE(manifest.has_signatures_size());
@@ -703,6 +697,8 @@
.WillRepeatedly(Return(true));
EXPECT_CALL(prefs, SetString(kPrefsUpdateStateSignedSHA256Context, _))
.WillRepeatedly(Return(true));
+ EXPECT_CALL(prefs, SetBoolean(kPrefsDynamicPartitionMetadataUpdated, _))
+ .WillRepeatedly(Return(true));
if (op_hash_test == kValidOperationData && signature_test != kSignatureNone) {
EXPECT_CALL(prefs, SetString(kPrefsUpdateStateSignatureBlob, _))
.WillOnce(Return(true));
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index 21f22d6..3cddee4 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -81,13 +81,16 @@
};
// Compressed data without checksum, generated with:
-// echo -n a | xz -9 --check=none | hexdump -v -e '" " 12/1 "0x%02x, " "\n"'
+// echo -n "a$(head -c 4095 /dev/zero)" | xz -9 --check=none |
+// hexdump -v -e '" " 12/1 "0x%02x, " "\n"'
const uint8_t kXzCompressedData[] = {
0xfd, 0x37, 0x7a, 0x58, 0x5a, 0x00, 0x00, 0x00, 0xff, 0x12, 0xd9, 0x41,
0x02, 0x00, 0x21, 0x01, 0x1c, 0x00, 0x00, 0x00, 0x10, 0xcf, 0x58, 0xcc,
- 0x01, 0x00, 0x00, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x11, 0x01,
- 0xad, 0xa6, 0x58, 0x04, 0x06, 0x72, 0x9e, 0x7a, 0x01, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x59, 0x5a,
+ 0xe0, 0x0f, 0xff, 0x00, 0x1b, 0x5d, 0x00, 0x30, 0x80, 0x33, 0xff, 0xdf,
+ 0xff, 0x51, 0xd6, 0xaf, 0x90, 0x1c, 0x1b, 0x4c, 0xaa, 0x3d, 0x7b, 0x28,
+ 0xe4, 0x7a, 0x74, 0xbc, 0xe5, 0xa7, 0x33, 0x4e, 0xcf, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x2f, 0x80, 0x20, 0x00, 0x00, 0x00, 0x92, 0x7c, 0x7b, 0x24,
+ 0xa8, 0x00, 0x0a, 0xfc, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x5a,
};
const uint8_t src_deflates[] = {
@@ -185,12 +188,8 @@
bool sign_payload,
uint64_t major_version,
uint32_t minor_version) {
- string blob_path;
- EXPECT_TRUE(utils::MakeTempFile("Blob-XXXXXX", &blob_path, nullptr));
- ScopedPathUnlinker blob_unlinker(blob_path);
- EXPECT_TRUE(utils::WriteFile(blob_path.c_str(),
- blob_data.data(),
- blob_data.size()));
+ test_utils::ScopedTempFile blob_file("Blob-XXXXXX");
+ EXPECT_TRUE(test_utils::WriteFileVector(blob_file.path(), blob_data));
PayloadGenerationConfig config;
config.version.major = major_version;
@@ -218,16 +217,16 @@
new_part.size = 0;
payload.AddPartition(old_part, new_part, {});
- string payload_path;
- EXPECT_TRUE(utils::MakeTempFile("Payload-XXXXXX", &payload_path, nullptr));
- ScopedPathUnlinker payload_unlinker(payload_path);
+ test_utils::ScopedTempFile payload_file("Payload-XXXXXX");
string private_key =
sign_payload ? GetBuildArtifactsPath(kUnittestPrivateKeyPath) : "";
- EXPECT_TRUE(payload.WritePayload(
- payload_path, blob_path, private_key, &payload_.metadata_size));
+ EXPECT_TRUE(payload.WritePayload(payload_file.path(),
+ blob_file.path(),
+ private_key,
+ &payload_.metadata_size));
brillo::Blob payload_data;
- EXPECT_TRUE(utils::ReadFile(payload_path, &payload_data));
+ EXPECT_TRUE(utils::ReadFile(payload_file.path(), &payload_data));
return payload_data;
}
@@ -268,16 +267,13 @@
const string& source_path,
const brillo::Blob& target_data,
bool expect_success) {
- string new_part;
- EXPECT_TRUE(utils::MakeTempFile("Partition-XXXXXX", &new_part, nullptr));
- ScopedPathUnlinker partition_unlinker(new_part);
- EXPECT_TRUE(utils::WriteFile(new_part.c_str(), target_data.data(),
- target_data.size()));
+ test_utils::ScopedTempFile new_part("Partition-XXXXXX");
+ EXPECT_TRUE(test_utils::WriteFileVector(new_part.path(), target_data));
// We installed the operations only in the rootfs partition, but the
// delta performer needs to access all the partitions.
fake_boot_control_.SetPartitionDevice(
- kPartitionNameRoot, install_plan_.target_slot, new_part);
+ kPartitionNameRoot, install_plan_.target_slot, new_part.path());
fake_boot_control_.SetPartitionDevice(
kPartitionNameRoot, install_plan_.source_slot, source_path);
fake_boot_control_.SetPartitionDevice(
@@ -290,7 +286,7 @@
EXPECT_EQ(0, performer_.Close());
brillo::Blob partition_data;
- EXPECT_TRUE(utils::ReadFile(new_part, &partition_data));
+ EXPECT_TRUE(utils::ReadFile(new_part.path(), &partition_data));
return partition_data;
}
@@ -515,8 +511,8 @@
TEST_F(DeltaPerformerTest, ReplaceXzOperationTest) {
brillo::Blob xz_data(std::begin(kXzCompressedData),
std::end(kXzCompressedData));
- // The compressed xz data contains only a single "a", but the operation should
- // pad the rest of the two blocks with zeros.
+ // The compressed xz data contains a single "a" and padded with zero for the
+ // rest of the block.
brillo::Blob expected_data = brillo::Blob(4096, 0);
expected_data[0] = 'a';
@@ -568,15 +564,10 @@
brillo::Blob payload_data = GeneratePayload(brillo::Blob(), {aop}, false);
- string source_path;
- EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX",
- &source_path, nullptr));
- ScopedPathUnlinker path_unlinker(source_path);
- EXPECT_TRUE(utils::WriteFile(source_path.c_str(),
- expected_data.data(),
- expected_data.size()));
+ test_utils::ScopedTempFile source("Source-XXXXXX");
+ EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data));
- EXPECT_EQ(expected_data, ApplyPayload(payload_data, source_path, true));
+ EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
}
TEST_F(DeltaPerformerTest, PuffdiffOperationTest) {
@@ -596,13 +587,11 @@
brillo::Blob payload_data = GeneratePayload(puffdiff_payload, {aop}, false);
- string source_path;
- EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
- ScopedPathUnlinker path_unlinker(source_path);
- EXPECT_TRUE(utils::WriteFile(source_path.c_str(), src.data(), src.size()));
+ test_utils::ScopedTempFile source("Source-XXXXXX");
+ EXPECT_TRUE(test_utils::WriteFileVector(source.path(), src));
brillo::Blob dst(std::begin(dst_deflates), std::end(dst_deflates));
- EXPECT_EQ(dst, ApplyPayload(payload_data, source_path, true));
+ EXPECT_EQ(dst, ApplyPayload(payload_data, source.path(), true));
}
TEST_F(DeltaPerformerTest, SourceHashMismatchTest) {
@@ -621,27 +610,21 @@
brillo::Blob payload_data = GeneratePayload(brillo::Blob(), {aop}, false);
- string source_path;
- EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
- ScopedPathUnlinker path_unlinker(source_path);
- EXPECT_TRUE(utils::WriteFile(source_path.c_str(), actual_data.data(),
- actual_data.size()));
+ test_utils::ScopedTempFile source("Source-XXXXXX");
+ EXPECT_TRUE(test_utils::WriteFileVector(source.path(), actual_data));
- EXPECT_EQ(actual_data, ApplyPayload(payload_data, source_path, false));
+ EXPECT_EQ(actual_data, ApplyPayload(payload_data, source.path(), false));
}
// Test that the error-corrected file descriptor is used to read the partition
// since the source partition doesn't match the operation hash.
TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyFallbackTest) {
- const size_t kCopyOperationSize = 4 * 4096;
- string source_path;
- EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
- ScopedPathUnlinker path_unlinker(source_path);
+ constexpr size_t kCopyOperationSize = 4 * 4096;
+ test_utils::ScopedTempFile source("Source-XXXXXX");
// Write invalid data to the source image, which doesn't match the expected
// hash.
brillo::Blob invalid_data(kCopyOperationSize, 0x55);
- EXPECT_TRUE(utils::WriteFile(
- source_path.c_str(), invalid_data.data(), invalid_data.size()));
+ EXPECT_TRUE(test_utils::WriteFileVector(source.path(), invalid_data));
// Setup the fec file descriptor as the fake stream, which matches
// |expected_data|.
@@ -649,7 +632,7 @@
brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize);
brillo::Blob payload_data = GenerateSourceCopyPayload(expected_data, true);
- EXPECT_EQ(expected_data, ApplyPayload(payload_data, source_path, true));
+ EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
// Verify that the fake_fec was actually used.
EXPECT_EQ(1U, fake_fec->GetReadOps().size());
EXPECT_EQ(1U, GetSourceEccRecoveredFailures());
@@ -659,14 +642,11 @@
// when no hash is available for SOURCE_COPY but it falls back to the normal
// file descriptor when the size of the error corrected one is too small.
TEST_F(DeltaPerformerTest, ErrorCorrectionSourceCopyWhenNoHashFallbackTest) {
- const size_t kCopyOperationSize = 4 * 4096;
- string source_path;
- EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
- ScopedPathUnlinker path_unlinker(source_path);
+ constexpr size_t kCopyOperationSize = 4 * 4096;
+ test_utils::ScopedTempFile source("Source-XXXXXX");
// Setup the source path with the right expected data.
brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize);
- EXPECT_TRUE(utils::WriteFile(
- source_path.c_str(), expected_data.data(), expected_data.size()));
+ EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data));
// Setup the fec file descriptor as the fake stream, with smaller data than
// the expected.
@@ -674,7 +654,7 @@
// The payload operation doesn't include an operation hash.
brillo::Blob payload_data = GenerateSourceCopyPayload(expected_data, false);
- EXPECT_EQ(expected_data, ApplyPayload(payload_data, source_path, true));
+ EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
// Verify that the fake_fec was attempted to be used. Since the file
// descriptor is shorter it can actually do more than one read to realize it
// reached the EOF.
@@ -685,18 +665,15 @@
}
TEST_F(DeltaPerformerTest, ChooseSourceFDTest) {
- const size_t kSourceSize = 4 * 4096;
- string source_path;
- EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
- ScopedPathUnlinker path_unlinker(source_path);
+ constexpr size_t kSourceSize = 4 * 4096;
+ test_utils::ScopedTempFile source("Source-XXXXXX");
// Write invalid data to the source image, which doesn't match the expected
// hash.
brillo::Blob invalid_data(kSourceSize, 0x55);
- EXPECT_TRUE(utils::WriteFile(
- source_path.c_str(), invalid_data.data(), invalid_data.size()));
+ EXPECT_TRUE(test_utils::WriteFileVector(source.path(), invalid_data));
performer_.source_fd_ = std::make_shared<EintrSafeFileDescriptor>();
- performer_.source_fd_->Open(source_path.c_str(), O_RDONLY);
+ performer_.source_fd_->Open(source.path().c_str(), O_RDONLY);
performer_.block_size_ = 4096;
// Setup the fec file descriptor as the fake stream, which matches
@@ -861,6 +838,20 @@
ErrorCode::kUnsupportedMinorPayloadVersion);
}
+TEST_F(DeltaPerformerTest, ValidateManifestDowngrade) {
+ // The Manifest we are validating.
+ DeltaArchiveManifest manifest;
+
+ manifest.set_minor_version(kFullPayloadMinorVersion);
+ manifest.set_max_timestamp(1);
+ fake_hardware_.SetBuildTimestamp(2);
+
+ RunManifestValidation(manifest,
+ kMaxSupportedMajorPayloadVersion,
+ InstallPayloadType::kFull,
+ ErrorCode::kPayloadTimestampError);
+}
+
TEST_F(DeltaPerformerTest, BrilloMetadataSignatureSizeTest) {
unsigned int seed = time(nullptr);
EXPECT_TRUE(performer_.Write(kDeltaMagic, sizeof(kDeltaMagic)));
diff --git a/payload_consumer/download_action.cc b/payload_consumer/download_action.cc
index ab9f2e8..516a456 100644
--- a/payload_consumer/download_action.cc
+++ b/payload_consumer/download_action.cc
@@ -56,7 +56,9 @@
delegate_(nullptr),
p2p_sharing_fd_(-1),
p2p_visible_(true) {
+#if BASE_VER < 576279
base::StatisticsRecorder::Initialize();
+#endif
}
DownloadAction::~DownloadAction() {}
diff --git a/payload_consumer/extent_writer.h b/payload_consumer/extent_writer.h
index 2c15861..9e53561 100644
--- a/payload_consumer/extent_writer.h
+++ b/payload_consumer/extent_writer.h
@@ -35,9 +35,7 @@
class ExtentWriter {
public:
ExtentWriter() = default;
- virtual ~ExtentWriter() {
- LOG_IF(ERROR, !end_called_) << "End() not called on ExtentWriter.";
- }
+ virtual ~ExtentWriter() = default;
// Returns true on success.
virtual bool Init(FileDescriptorPtr fd,
@@ -46,16 +44,6 @@
// Returns true on success.
virtual bool Write(const void* bytes, size_t count) = 0;
-
- // Should be called when all writing is complete. Returns true on success.
- // The fd is not closed. Caller is responsible for closing it.
- bool End() {
- end_called_ = true;
- return EndImpl();
- }
- virtual bool EndImpl() = 0;
- private:
- bool end_called_{false};
};
// DirectExtentWriter is probably the simplest ExtentWriter implementation.
@@ -76,7 +64,6 @@
return true;
}
bool Write(const void* bytes, size_t count) override;
- bool EndImpl() override { return true; }
private:
FileDescriptorPtr fd_{nullptr};
@@ -89,48 +76,6 @@
google::protobuf::RepeatedPtrField<Extent>::iterator cur_extent_;
};
-// Takes an underlying ExtentWriter to which all operations are delegated.
-// When End() is called, ZeroPadExtentWriter ensures that the total number
-// of bytes written is a multiple of block_size_. If not, it writes zeros
-// to pad as needed.
-
-class ZeroPadExtentWriter : public ExtentWriter {
- public:
- explicit ZeroPadExtentWriter(
- std::unique_ptr<ExtentWriter> underlying_extent_writer)
- : underlying_extent_writer_(std::move(underlying_extent_writer)) {}
- ~ZeroPadExtentWriter() override = default;
-
- bool Init(FileDescriptorPtr fd,
- const google::protobuf::RepeatedPtrField<Extent>& extents,
- uint32_t block_size) override {
- block_size_ = block_size;
- return underlying_extent_writer_->Init(fd, extents, block_size);
- }
- bool Write(const void* bytes, size_t count) override {
- if (underlying_extent_writer_->Write(bytes, count)) {
- bytes_written_mod_block_size_ += count;
- bytes_written_mod_block_size_ %= block_size_;
- return true;
- }
- return false;
- }
- bool EndImpl() override {
- if (bytes_written_mod_block_size_) {
- const size_t write_size = block_size_ - bytes_written_mod_block_size_;
- brillo::Blob zeros(write_size, 0);
- TEST_AND_RETURN_FALSE(underlying_extent_writer_->Write(zeros.data(),
- write_size));
- }
- return underlying_extent_writer_->End();
- }
-
- private:
- std::unique_ptr<ExtentWriter> underlying_extent_writer_;
- size_t block_size_{0};
- size_t bytes_written_mod_block_size_{0};
-};
-
} // namespace chromeos_update_engine
#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_EXTENT_WRITER_H_
diff --git a/payload_consumer/extent_writer_unittest.cc b/payload_consumer/extent_writer_unittest.cc
index 48b27cb..580c4a6 100644
--- a/payload_consumer/extent_writer_unittest.cc
+++ b/payload_consumer/extent_writer_unittest.cc
@@ -59,7 +59,6 @@
// resultant file should look like and ensure that the extent writer
// wrote the file correctly.
void WriteAlignedExtents(size_t chunk_size, size_t first_chunk_size);
- void TestZeroPad(bool aligned_size);
FileDescriptorPtr fd_;
test_utils::ScopedTempFile temp_file_{"ExtentWriterTest-file.XXXXXX"};
@@ -72,7 +71,6 @@
EXPECT_TRUE(
direct_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
EXPECT_TRUE(direct_writer.Write(bytes.data(), bytes.size()));
- EXPECT_TRUE(direct_writer.End());
EXPECT_EQ(static_cast<off_t>(kBlockSize + bytes.size()),
utils::FileSize(temp_file_.path()));
@@ -92,7 +90,6 @@
EXPECT_TRUE(
direct_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
EXPECT_TRUE(direct_writer.Write(nullptr, 0));
- EXPECT_TRUE(direct_writer.End());
}
TEST_F(ExtentWriterTest, OverflowExtentTest) {
@@ -127,7 +124,6 @@
EXPECT_TRUE(direct_writer.Write(&data[bytes_written], bytes_to_write));
bytes_written += bytes_to_write;
}
- EXPECT_TRUE(direct_writer.End());
EXPECT_EQ(static_cast<off_t>(data.size()),
utils::FileSize(temp_file_.path()));
@@ -146,50 +142,6 @@
ExpectVectorsEq(expected_file, result_file);
}
-TEST_F(ExtentWriterTest, ZeroPadNullTest) {
- TestZeroPad(true);
-}
-
-TEST_F(ExtentWriterTest, ZeroPadFillTest) {
- TestZeroPad(false);
-}
-
-void ExtentWriterTest::TestZeroPad(bool aligned_size) {
- vector<Extent> extents = {ExtentForRange(1, 1), ExtentForRange(0, 1)};
- brillo::Blob data(kBlockSize * 2);
- test_utils::FillWithData(&data);
-
- ZeroPadExtentWriter zero_pad_writer(std::make_unique<DirectExtentWriter>());
-
- EXPECT_TRUE(
- zero_pad_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
- size_t bytes_to_write = data.size();
- const size_t missing_bytes = (aligned_size ? 0 : 9);
- bytes_to_write -= missing_bytes;
- fd_->Seek(kBlockSize - missing_bytes, SEEK_SET);
- EXPECT_EQ(3, fd_->Write("xxx", 3));
- ASSERT_TRUE(zero_pad_writer.Write(data.data(), bytes_to_write));
- EXPECT_TRUE(zero_pad_writer.End());
-
- EXPECT_EQ(static_cast<off_t>(data.size()),
- utils::FileSize(temp_file_.path()));
-
- brillo::Blob result_file;
- EXPECT_TRUE(utils::ReadFile(temp_file_.path(), &result_file));
-
- brillo::Blob expected_file;
- expected_file.insert(expected_file.end(),
- data.begin() + kBlockSize,
- data.begin() + kBlockSize * 2);
- expected_file.insert(expected_file.end(),
- data.begin(), data.begin() + kBlockSize);
- if (missing_bytes) {
- memset(&expected_file[kBlockSize - missing_bytes], 0, missing_bytes);
- }
-
- ExpectVectorsEq(expected_file, result_file);
-}
-
TEST_F(ExtentWriterTest, SparseFileTest) {
vector<Extent> extents = {ExtentForRange(1, 1),
ExtentForRange(kSparseHole, 2),
@@ -211,7 +163,6 @@
EXPECT_TRUE(direct_writer.Write(data.data(), bytes_to_write));
bytes_written += bytes_to_write;
}
- EXPECT_TRUE(direct_writer.End());
// check file size, then data inside
ASSERT_EQ(static_cast<off_t>(2 * kBlockSize),
diff --git a/payload_consumer/fake_extent_writer.h b/payload_consumer/fake_extent_writer.h
index 4418a9e..7b2b7ac 100644
--- a/payload_consumer/fake_extent_writer.h
+++ b/payload_consumer/fake_extent_writer.h
@@ -40,26 +40,20 @@
return true;
};
bool Write(const void* bytes, size_t count) override {
- if (!init_called_ || end_called_)
+ if (!init_called_)
return false;
written_data_.insert(written_data_.end(),
reinterpret_cast<const uint8_t*>(bytes),
reinterpret_cast<const uint8_t*>(bytes) + count);
return true;
}
- bool EndImpl() override {
- end_called_ = true;
- return true;
- }
// Fake methods.
bool InitCalled() { return init_called_; }
- bool EndCalled() { return end_called_; }
brillo::Blob WrittenData() { return written_data_; }
private:
bool init_called_{false};
- bool end_called_{false};
brillo::Blob written_data_;
DISALLOW_COPY_AND_ASSIGN(FakeExtentWriter);
diff --git a/payload_consumer/file_descriptor_utils.cc b/payload_consumer/file_descriptor_utils.cc
index ebfb977..846cbd7 100644
--- a/payload_consumer/file_descriptor_utils.cc
+++ b/payload_consumer/file_descriptor_utils.cc
@@ -88,7 +88,6 @@
utils::BlocksInExtents(tgt_extents));
TEST_AND_RETURN_FALSE(
CommonHashExtents(source, src_extents, &writer, block_size, hash_out));
- TEST_AND_RETURN_FALSE(writer.End());
return true;
}
diff --git a/payload_consumer/file_writer_unittest.cc b/payload_consumer/file_writer_unittest.cc
index 92837c8..05df307 100644
--- a/payload_consumer/file_writer_unittest.cc
+++ b/payload_consumer/file_writer_unittest.cc
@@ -36,19 +36,17 @@
TEST(FileWriterTest, SimpleTest) {
// Create a uniquely named file for testing.
- string path;
- ASSERT_TRUE(utils::MakeTempFile("FileWriterTest-XXXXXX", &path, nullptr));
- ScopedPathUnlinker path_unlinker(path);
-
+ test_utils::ScopedTempFile file("FileWriterTest-XXXXXX");
DirectFileWriter file_writer;
- EXPECT_EQ(0, file_writer.Open(path.c_str(),
- O_CREAT | O_LARGEFILE | O_TRUNC | O_WRONLY,
- 0644));
+ EXPECT_EQ(0,
+ file_writer.Open(file.path().c_str(),
+ O_CREAT | O_LARGEFILE | O_TRUNC | O_WRONLY,
+ 0644));
EXPECT_TRUE(file_writer.Write("test", 4));
brillo::Blob actual_data;
- EXPECT_TRUE(utils::ReadFile(path, &actual_data));
+ EXPECT_TRUE(utils::ReadFile(file.path(), &actual_data));
- EXPECT_FALSE(memcmp("test", actual_data.data(), actual_data.size()));
+ EXPECT_EQ("test", string(actual_data.begin(), actual_data.end()));
EXPECT_EQ(0, file_writer.Close());
}
@@ -61,14 +59,12 @@
TEST(FileWriterTest, WriteErrorTest) {
// Create a uniquely named file for testing.
- string path;
- ASSERT_TRUE(utils::MakeTempFile("FileWriterTest-XXXXXX", &path, nullptr));
- ScopedPathUnlinker path_unlinker(path);
-
+ test_utils::ScopedTempFile file("FileWriterTest-XXXXXX");
DirectFileWriter file_writer;
- EXPECT_EQ(0, file_writer.Open(path.c_str(),
- O_CREAT | O_LARGEFILE | O_TRUNC | O_RDONLY,
- 0644));
+ EXPECT_EQ(0,
+ file_writer.Open(file.path().c_str(),
+ O_CREAT | O_LARGEFILE | O_TRUNC | O_RDONLY,
+ 0644));
EXPECT_FALSE(file_writer.Write("x", 1));
EXPECT_EQ(0, file_writer.Close());
}
diff --git a/payload_consumer/filesystem_verifier_action.cc b/payload_consumer/filesystem_verifier_action.cc
index 6a379e5..c9cb5af 100644
--- a/payload_consumer/filesystem_verifier_action.cc
+++ b/payload_consumer/filesystem_verifier_action.cc
@@ -29,10 +29,7 @@
#include <brillo/data_encoding.h>
#include <brillo/streams/file_stream.h>
-#include "update_engine/common/boot_control_interface.h"
#include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/delta_performer.h"
-#include "update_engine/payload_consumer/payload_constants.h"
using brillo::data_encoding::Base64Encode;
using std::string;
@@ -87,24 +84,38 @@
Cleanup(ErrorCode::kSuccess);
return;
}
- InstallPlan::Partition& partition =
+ const InstallPlan::Partition& partition =
install_plan_.partitions[partition_index_];
string part_path;
switch (verifier_step_) {
case VerifierStep::kVerifySourceHash:
part_path = partition.source_path;
- remaining_size_ = partition.source_size;
+ partition_size_ = partition.source_size;
break;
case VerifierStep::kVerifyTargetHash:
part_path = partition.target_path;
- remaining_size_ = partition.target_size;
+ partition_size_ = partition.target_size;
break;
}
+
+ if (part_path.empty()) {
+ if (partition_size_ == 0) {
+ LOG(INFO) << "Skip hashing partition " << partition_index_ << " ("
+ << partition.name << ") because size is 0.";
+ partition_index_++;
+ StartPartitionHashing();
+ return;
+ }
+ LOG(ERROR) << "Cannot hash partition " << partition_index_ << " ("
+ << partition.name
+ << ") because its device path cannot be determined.";
+ Cleanup(ErrorCode::kFilesystemVerifierError);
+ return;
+ }
+
LOG(INFO) << "Hashing partition " << partition_index_ << " ("
<< partition.name << ") on device " << part_path;
- if (part_path.empty())
- return Cleanup(ErrorCode::kFilesystemVerifierError);
brillo::ErrorPtr error;
src_stream_ = brillo::FileStream::Open(
@@ -115,33 +126,55 @@
if (!src_stream_) {
LOG(ERROR) << "Unable to open " << part_path << " for reading";
- return Cleanup(ErrorCode::kFilesystemVerifierError);
+ Cleanup(ErrorCode::kFilesystemVerifierError);
+ return;
}
buffer_.resize(kReadFileBufferSize);
- read_done_ = false;
- hasher_.reset(new HashCalculator());
+ hasher_ = std::make_unique<HashCalculator>();
+
+ offset_ = 0;
+ if (verifier_step_ == VerifierStep::kVerifyTargetHash &&
+ install_plan_.write_verity) {
+ if (!verity_writer_->Init(partition)) {
+ Cleanup(ErrorCode::kVerityCalculationError);
+ return;
+ }
+ }
// Start the first read.
ScheduleRead();
}
void FilesystemVerifierAction::ScheduleRead() {
- size_t bytes_to_read = std::min(static_cast<int64_t>(buffer_.size()),
- remaining_size_);
+ const InstallPlan::Partition& partition =
+ install_plan_.partitions[partition_index_];
+
+ // We can only start reading anything past |hash_tree_offset| after we have
+ // already read all the data blocks that the hash tree covers. The same
+ // applies to FEC.
+ uint64_t read_end = partition_size_;
+ if (partition.hash_tree_size != 0 &&
+ offset_ < partition.hash_tree_data_offset + partition.hash_tree_data_size)
+ read_end = std::min(read_end, partition.hash_tree_offset);
+ if (partition.fec_size != 0 &&
+ offset_ < partition.fec_data_offset + partition.fec_data_size)
+ read_end = std::min(read_end, partition.fec_offset);
+ size_t bytes_to_read =
+ std::min(static_cast<uint64_t>(buffer_.size()), read_end - offset_);
if (!bytes_to_read) {
- OnReadDoneCallback(0);
+ FinishPartitionHashing();
return;
}
bool read_async_ok = src_stream_->ReadAsync(
- buffer_.data(),
- bytes_to_read,
- base::Bind(&FilesystemVerifierAction::OnReadDoneCallback,
- base::Unretained(this)),
- base::Bind(&FilesystemVerifierAction::OnReadErrorCallback,
- base::Unretained(this)),
- nullptr);
+ buffer_.data(),
+ bytes_to_read,
+ base::Bind(&FilesystemVerifierAction::OnReadDoneCallback,
+ base::Unretained(this)),
+ base::Bind(&FilesystemVerifierAction::OnReadErrorCallback,
+ base::Unretained(this)),
+ nullptr);
if (!read_async_ok) {
LOG(ERROR) << "Unable to schedule an asynchronous read from the stream.";
@@ -150,31 +183,40 @@
}
void FilesystemVerifierAction::OnReadDoneCallback(size_t bytes_read) {
+ if (cancelled_) {
+ Cleanup(ErrorCode::kError);
+ return;
+ }
+
if (bytes_read == 0) {
- read_done_ = true;
- } else {
- remaining_size_ -= bytes_read;
- CHECK(!read_done_);
- if (!hasher_->Update(buffer_.data(), bytes_read)) {
- LOG(ERROR) << "Unable to update the hash.";
- Cleanup(ErrorCode::kError);
+ LOG(ERROR) << "Failed to read the remaining " << partition_size_ - offset_
+ << " bytes from partition "
+ << install_plan_.partitions[partition_index_].name;
+ Cleanup(ErrorCode::kFilesystemVerifierError);
+ return;
+ }
+
+ if (!hasher_->Update(buffer_.data(), bytes_read)) {
+ LOG(ERROR) << "Unable to update the hash.";
+ Cleanup(ErrorCode::kError);
+ return;
+ }
+
+ if (verifier_step_ == VerifierStep::kVerifyTargetHash &&
+ install_plan_.write_verity) {
+ if (!verity_writer_->Update(offset_, buffer_.data(), bytes_read)) {
+ Cleanup(ErrorCode::kVerityCalculationError);
return;
}
}
- // We either terminate the current partition or have more data to read.
- if (cancelled_)
- return Cleanup(ErrorCode::kError);
+ offset_ += bytes_read;
- if (read_done_ || remaining_size_ == 0) {
- if (remaining_size_ != 0) {
- LOG(ERROR) << "Failed to read the remaining " << remaining_size_
- << " bytes from partition "
- << install_plan_.partitions[partition_index_].name;
- return Cleanup(ErrorCode::kFilesystemVerifierError);
- }
- return FinishPartitionHashing();
+ if (offset_ == partition_size_) {
+ FinishPartitionHashing();
+ return;
}
+
ScheduleRead();
}
@@ -188,7 +230,8 @@
void FilesystemVerifierAction::FinishPartitionHashing() {
if (!hasher_->Finalize()) {
LOG(ERROR) << "Unable to finalize the hash.";
- return Cleanup(ErrorCode::kError);
+ Cleanup(ErrorCode::kError);
+ return;
}
InstallPlan::Partition& partition =
install_plan_.partitions[partition_index_];
@@ -202,7 +245,8 @@
<< "' partition verification failed.";
if (partition.source_hash.empty()) {
// No need to verify source if it is a full payload.
- return Cleanup(ErrorCode::kNewRootfsVerificationError);
+ Cleanup(ErrorCode::kNewRootfsVerificationError);
+ return;
}
// If we have not verified source partition yet, now that the target
// partition does not match, and it's not a full payload, we need to
@@ -238,7 +282,8 @@
"-binary | openssl base64";
LOG(INFO) << "To get the checksum of partitions in a bin file, "
<< "run: .../src/scripts/sha256_partitions.sh .../file.bin";
- return Cleanup(ErrorCode::kDownloadStateInitializationError);
+ Cleanup(ErrorCode::kDownloadStateInitializationError);
+ return;
}
// The action will skip kVerifySourceHash step if target partition hash
// matches, if we are in this step, it means target hash does not match,
@@ -246,7 +291,8 @@
// code to reflect the error in target partition.
// We only need to verify the source partition which the target hash does
// not match, the rest of the partitions don't matter.
- return Cleanup(ErrorCode::kNewRootfsVerificationError);
+ Cleanup(ErrorCode::kNewRootfsVerificationError);
+ return;
}
// Start hashing the next partition, if any.
hasher_.reset();
diff --git a/payload_consumer/filesystem_verifier_action.h b/payload_consumer/filesystem_verifier_action.h
index a21fc2a..83d6668 100644
--- a/payload_consumer/filesystem_verifier_action.h
+++ b/payload_consumer/filesystem_verifier_action.h
@@ -29,6 +29,7 @@
#include "update_engine/common/action.h"
#include "update_engine/common/hash_calculator.h"
#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_consumer/verity_writer_interface.h"
// This action will hash all the partitions of the target slot involved in the
// update. The hashes are then verified against the ones in the InstallPlan.
@@ -50,7 +51,9 @@
class FilesystemVerifierAction : public InstallPlanAction {
public:
- FilesystemVerifierAction() = default;
+ FilesystemVerifierAction()
+ : verity_writer_(verity_writer::CreateVerityWriter()) {}
+ ~FilesystemVerifierAction() override = default;
void PerformAction() override;
void TerminateProcessing() override;
@@ -95,7 +98,6 @@
// Buffer for storing data we read.
brillo::Blob buffer_;
- bool read_done_{false}; // true if reached EOF on the input stream.
bool cancelled_{false}; // true if the action has been cancelled.
// The install plan we're passed in via the input pipe.
@@ -104,10 +106,18 @@
// Calculates the hash of the data.
std::unique_ptr<HashCalculator> hasher_;
- // Reads and hashes this many bytes from the head of the input stream. This
- // field is initialized from the corresponding InstallPlan::Partition size,
- // when the partition starts to be hashed.
- int64_t remaining_size_{0};
+ // Write verity data of the current partition.
+ std::unique_ptr<VerityWriterInterface> verity_writer_;
+
+ // Reads and hashes this many bytes from the head of the input stream. When
+ // the partition starts to be hashed, this field is initialized from the
+ // corresponding InstallPlan::Partition size which is the total size
+ // update_engine is expected to write, and may be smaller than the size of the
+ // partition in gpt.
+ uint64_t partition_size_{0};
+
+ // The byte offset that we are reading in the current partition.
+ uint64_t offset_{0};
DISALLOW_COPY_AND_ASSIGN(FilesystemVerifierAction);
};
diff --git a/payload_consumer/filesystem_verifier_action_unittest.cc b/payload_consumer/filesystem_verifier_action_unittest.cc
index 33f6cc7..f7789f4 100644
--- a/payload_consumer/filesystem_verifier_action_unittest.cc
+++ b/payload_consumer/filesystem_verifier_action_unittest.cc
@@ -16,32 +16,23 @@
#include "update_engine/payload_consumer/filesystem_verifier_action.h"
-#include <fcntl.h>
-
#include <memory>
-#include <set>
#include <string>
#include <utility>
-#include <vector>
#include <base/bind.h>
#include <base/posix/eintr_wrapper.h>
-#include <base/strings/string_util.h>
-#include <base/strings/stringprintf.h>
#include <brillo/message_loops/fake_message_loop.h>
#include <brillo/message_loops/message_loop_utils.h>
-#include <gmock/gmock.h>
+#include <brillo/secure_blob.h>
#include <gtest/gtest.h>
#include "update_engine/common/hash_calculator.h"
#include "update_engine/common/test_utils.h"
#include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/payload_constants.h"
using brillo::MessageLoop;
-using std::set;
using std::string;
-using std::vector;
namespace chromeos_update_engine {
@@ -58,7 +49,10 @@
// Returns true iff test has completed successfully.
bool DoTest(bool terminate_early, bool hash_fail);
+ void BuildActions(const InstallPlan& install_plan);
+
brillo::FakeMessageLoop loop_{nullptr};
+ ActionProcessor processor_;
};
class FilesystemVerifierActionTestDelegate : public ActionProcessorDelegate {
@@ -98,13 +92,7 @@
bool FilesystemVerifierActionTest::DoTest(bool terminate_early,
bool hash_fail) {
- string a_loop_file;
-
- if (!(utils::MakeTempFile("a_loop_file.XXXXXX", &a_loop_file, nullptr))) {
- ADD_FAILURE();
- return false;
- }
- ScopedPathUnlinker a_loop_file_unlinker(a_loop_file);
+ test_utils::ScopedTempFile a_loop_file("a_loop_file.XXXXXX");
// Make random data for a.
const size_t kLoopFileSize = 10 * 1024 * 1024 + 512;
@@ -112,7 +100,7 @@
test_utils::FillWithData(&a_loop_data);
// Write data to disk
- if (!(test_utils::WriteFileVector(a_loop_file, a_loop_data))) {
+ if (!(test_utils::WriteFileVector(a_loop_file.path(), a_loop_data))) {
ADD_FAILURE();
return false;
}
@@ -120,13 +108,13 @@
// Attach loop devices to the files
string a_dev;
test_utils::ScopedLoopbackDeviceBinder a_dev_releaser(
- a_loop_file, false, &a_dev);
+ a_loop_file.path(), false, &a_dev);
if (!(a_dev_releaser.is_bound())) {
ADD_FAILURE();
return false;
}
- LOG(INFO) << "verifying: " << a_loop_file << " (" << a_dev << ")";
+ LOG(INFO) << "verifying: " << a_loop_file.path() << " (" << a_dev << ")";
bool success = true;
@@ -150,21 +138,10 @@
}
install_plan.partitions = {part};
- auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
- feeder_action->set_obj(install_plan);
- auto copier_action = std::make_unique<FilesystemVerifierAction>();
- auto collector_action =
- std::make_unique<ObjectCollectorAction<InstallPlan>>();
+ BuildActions(install_plan);
- BondActions(feeder_action.get(), copier_action.get());
- BondActions(copier_action.get(), collector_action.get());
-
- ActionProcessor processor;
FilesystemVerifierActionTestDelegate delegate;
- processor.set_delegate(&delegate);
- processor.EnqueueAction(std::move(feeder_action));
- processor.EnqueueAction(std::move(copier_action));
- processor.EnqueueAction(std::move(collector_action));
+ processor_.set_delegate(&delegate);
loop_.PostTask(FROM_HERE,
base::Bind(
@@ -174,7 +151,7 @@
processor->StopProcessing();
}
},
- base::Unretained(&processor),
+ base::Unretained(&processor_),
terminate_early));
loop_.Run();
@@ -210,6 +187,23 @@
return success;
}
+void FilesystemVerifierActionTest::BuildActions(
+ const InstallPlan& install_plan) {
+ auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
+ auto verifier_action = std::make_unique<FilesystemVerifierAction>();
+ auto collector_action =
+ std::make_unique<ObjectCollectorAction<InstallPlan>>();
+
+ feeder_action->set_obj(install_plan);
+
+ BondActions(feeder_action.get(), verifier_action.get());
+ BondActions(verifier_action.get(), collector_action.get());
+
+ processor_.EnqueueAction(std::move(feeder_action));
+ processor_.EnqueueAction(std::move(verifier_action));
+ processor_.EnqueueAction(std::move(collector_action));
+}
+
class FilesystemVerifierActionTest2Delegate : public ActionProcessorDelegate {
public:
void ActionCompleted(ActionProcessor* processor,
@@ -225,31 +219,25 @@
};
TEST_F(FilesystemVerifierActionTest, MissingInputObjectTest) {
- ActionProcessor processor;
- FilesystemVerifierActionTest2Delegate delegate;
-
- processor.set_delegate(&delegate);
-
auto copier_action = std::make_unique<FilesystemVerifierAction>();
auto collector_action =
std::make_unique<ObjectCollectorAction<InstallPlan>>();
BondActions(copier_action.get(), collector_action.get());
- processor.EnqueueAction(std::move(copier_action));
- processor.EnqueueAction(std::move(collector_action));
- processor.StartProcessing();
- EXPECT_FALSE(processor.IsRunning());
+ processor_.EnqueueAction(std::move(copier_action));
+ processor_.EnqueueAction(std::move(collector_action));
+
+ FilesystemVerifierActionTest2Delegate delegate;
+ processor_.set_delegate(&delegate);
+
+ processor_.StartProcessing();
+ EXPECT_FALSE(processor_.IsRunning());
EXPECT_TRUE(delegate.ran_);
EXPECT_EQ(ErrorCode::kError, delegate.code_);
}
TEST_F(FilesystemVerifierActionTest, NonExistentDriveTest) {
- ActionProcessor processor;
- FilesystemVerifierActionTest2Delegate delegate;
-
- processor.set_delegate(&delegate);
-
InstallPlan install_plan;
InstallPlan::Partition part;
part.name = "nope";
@@ -257,22 +245,15 @@
part.target_path = "/no/such/file";
install_plan.partitions = {part};
- auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
- auto verifier_action = std::make_unique<FilesystemVerifierAction>();
- auto collector_action =
- std::make_unique<ObjectCollectorAction<InstallPlan>>();
+ BuildActions(install_plan);
- feeder_action->set_obj(install_plan);
+ FilesystemVerifierActionTest2Delegate delegate;
+ processor_.set_delegate(&delegate);
- BondActions(verifier_action.get(), collector_action.get());
-
- processor.EnqueueAction(std::move(feeder_action));
- processor.EnqueueAction(std::move(verifier_action));
- processor.EnqueueAction(std::move(collector_action));
- processor.StartProcessing();
- EXPECT_FALSE(processor.IsRunning());
+ processor_.StartProcessing();
+ EXPECT_FALSE(processor_.IsRunning());
EXPECT_TRUE(delegate.ran_);
- EXPECT_EQ(ErrorCode::kError, delegate.code_);
+ EXPECT_EQ(ErrorCode::kFilesystemVerifierError, delegate.code_);
}
TEST_F(FilesystemVerifierActionTest, RunAsRootVerifyHashTest) {
@@ -292,4 +273,112 @@
while (loop_.RunOnce(false)) {}
}
+#ifdef __ANDROID__
+TEST_F(FilesystemVerifierActionTest, RunAsRootWriteVerityTest) {
+ test_utils::ScopedTempFile part_file("part_file.XXXXXX");
+ constexpr size_t filesystem_size = 200 * 4096;
+ constexpr size_t part_size = 256 * 4096;
+ brillo::Blob part_data(filesystem_size, 0x1);
+ part_data.resize(part_size);
+ ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
+ string target_path;
+ test_utils::ScopedLoopbackDeviceBinder target_device(
+ part_file.path(), true, &target_path);
+
+ InstallPlan install_plan;
+ InstallPlan::Partition part;
+ part.name = "part";
+ part.target_path = target_path;
+ part.target_size = part_size;
+ part.block_size = 4096;
+ part.hash_tree_algorithm = "sha1";
+ part.hash_tree_data_offset = 0;
+ part.hash_tree_data_size = filesystem_size;
+ part.hash_tree_offset = filesystem_size;
+ part.hash_tree_size = 3 * 4096;
+ part.fec_data_offset = 0;
+ part.fec_data_size = filesystem_size + part.hash_tree_size;
+ part.fec_offset = part.fec_data_size;
+ part.fec_size = 2 * 4096;
+ part.fec_roots = 2;
+ // for i in {1..$((200 * 4096))}; do echo -n -e '\x1' >> part; done
+ // avbtool add_hashtree_footer --image part --partition_size $((256 * 4096))
+ // --partition_name part --do_not_append_vbmeta_image
+ // --output_vbmeta_image vbmeta
+ // truncate -s $((256 * 4096)) part
+ // sha256sum part | xxd -r -p | hexdump -v -e '/1 "0x%02x, "'
+ part.target_hash = {0x28, 0xd4, 0x96, 0x75, 0x4c, 0xf5, 0x8a, 0x3e,
+ 0x31, 0x85, 0x08, 0x92, 0x85, 0x62, 0xf0, 0x37,
+ 0xbc, 0x8d, 0x7e, 0xa4, 0xcb, 0x24, 0x18, 0x7b,
+ 0xf3, 0xeb, 0xb5, 0x8d, 0x6f, 0xc8, 0xd8, 0x1a};
+ // avbtool info_image --image vbmeta | grep Salt | cut -d':' -f 2 |
+ // xxd -r -p | hexdump -v -e '/1 "0x%02x, "'
+ part.hash_tree_salt = {0x9e, 0xcb, 0xf8, 0xd5, 0x0b, 0xb4, 0x43,
+ 0x0a, 0x7a, 0x10, 0xad, 0x96, 0xd7, 0x15,
+ 0x70, 0xba, 0xed, 0x27, 0xe2, 0xae};
+ install_plan.partitions = {part};
+
+ BuildActions(install_plan);
+
+ FilesystemVerifierActionTestDelegate delegate;
+ processor_.set_delegate(&delegate);
+
+ loop_.PostTask(
+ FROM_HERE,
+ base::Bind(
+ [](ActionProcessor* processor) { processor->StartProcessing(); },
+ base::Unretained(&processor_)));
+ loop_.Run();
+
+ EXPECT_FALSE(processor_.IsRunning());
+ EXPECT_TRUE(delegate.ran());
+ EXPECT_EQ(ErrorCode::kSuccess, delegate.code());
+}
+#endif // __ANDROID__
+
+TEST_F(FilesystemVerifierActionTest, RunAsRootSkipWriteVerityTest) {
+ test_utils::ScopedTempFile part_file("part_file.XXXXXX");
+ constexpr size_t filesystem_size = 200 * 4096;
+ constexpr size_t part_size = 256 * 4096;
+ brillo::Blob part_data(part_size);
+ test_utils::FillWithData(&part_data);
+ ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
+ string target_path;
+ test_utils::ScopedLoopbackDeviceBinder target_device(
+ part_file.path(), true, &target_path);
+
+ InstallPlan install_plan;
+ install_plan.write_verity = false;
+ InstallPlan::Partition part;
+ part.name = "part";
+ part.target_path = target_path;
+ part.target_size = part_size;
+ part.block_size = 4096;
+ part.hash_tree_data_offset = 0;
+ part.hash_tree_data_size = filesystem_size;
+ part.hash_tree_offset = filesystem_size;
+ part.hash_tree_size = 3 * 4096;
+ part.fec_data_offset = 0;
+ part.fec_data_size = filesystem_size + part.hash_tree_size;
+ part.fec_offset = part.fec_data_size;
+ part.fec_size = 2 * 4096;
+ EXPECT_TRUE(HashCalculator::RawHashOfData(part_data, &part.target_hash));
+ install_plan.partitions = {part};
+
+ BuildActions(install_plan);
+
+ FilesystemVerifierActionTestDelegate delegate;
+ processor_.set_delegate(&delegate);
+
+ loop_.PostTask(
+ FROM_HERE,
+ base::Bind(
+ [](ActionProcessor* processor) { processor->StartProcessing(); },
+ base::Unretained(&processor_)));
+ loop_.Run();
+
+ EXPECT_FALSE(processor_.IsRunning());
+ EXPECT_TRUE(delegate.ran());
+ EXPECT_EQ(ErrorCode::kSuccess, delegate.code());
+}
} // namespace chromeos_update_engine
diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc
index 45112d6..1fa27ab 100644
--- a/payload_consumer/install_plan.cc
+++ b/payload_consumer/install_plan.cc
@@ -90,7 +90,9 @@
<< ", powerwash_required: " << utils::ToString(powerwash_required)
<< ", switch_slot_on_reboot: "
<< utils::ToString(switch_slot_on_reboot)
- << ", run_post_install: " << utils::ToString(run_post_install);
+ << ", run_post_install: " << utils::ToString(run_post_install)
+ << ", is_rollback: " << utils::ToString(is_rollback)
+ << ", write_verity: " << utils::ToString(write_verity);
}
bool InstallPlan::LoadPartitionsFromSlots(BootControlInterface* boot_control) {
@@ -98,14 +100,17 @@
for (Partition& partition : partitions) {
if (source_slot != BootControlInterface::kInvalidSlot) {
result = boot_control->GetPartitionDevice(
- partition.name, source_slot, &partition.source_path) && result;
+ partition.name, source_slot, &partition.source_path) &&
+ result;
} else {
partition.source_path.clear();
}
- if (target_slot != BootControlInterface::kInvalidSlot) {
+ if (target_slot != BootControlInterface::kInvalidSlot &&
+ partition.target_size > 0) {
result = boot_control->GetPartitionDevice(
- partition.name, target_slot, &partition.target_path) && result;
+ partition.name, target_slot, &partition.target_path) &&
+ result;
} else {
partition.target_path.clear();
}
diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h
index 929cad3..755d913 100644
--- a/payload_consumer/install_plan.h
+++ b/payload_consumer/install_plan.h
@@ -46,7 +46,7 @@
void Dump() const;
- // Load the |source_path| and |target_path| of all |partitions| based on the
+ // Loads the |source_path| and |target_path| of all |partitions| based on the
// |source_slot| and |target_slot| if available. Returns whether it succeeded
// to load all the partitions for the valid slots.
bool LoadPartitionsFromSlots(BootControlInterface* boot_control);
@@ -101,6 +101,7 @@
std::string target_path;
uint64_t target_size{0};
brillo::Blob target_hash;
+ uint32_t block_size{0};
// Whether we should run the postinstall script from this partition and the
// postinstall parameters.
@@ -108,6 +109,21 @@
std::string postinstall_path;
std::string filesystem_type;
bool postinstall_optional{false};
+
+ // Verity hash tree and FEC config. See update_metadata.proto for details.
+ // All offsets and sizes are in bytes.
+ uint64_t hash_tree_data_offset{0};
+ uint64_t hash_tree_data_size{0};
+ uint64_t hash_tree_offset{0};
+ uint64_t hash_tree_size{0};
+ std::string hash_tree_algorithm;
+ brillo::Blob hash_tree_salt;
+
+ uint64_t fec_data_offset{0};
+ uint64_t fec_data_size{0};
+ uint64_t fec_offset{0};
+ uint64_t fec_size{0};
+ uint32_t fec_roots{0};
};
std::vector<Partition> partitions;
@@ -130,6 +146,10 @@
// True if this update is a rollback.
bool is_rollback{false};
+ // True if the update should write verity.
+ // False otherwise.
+ bool write_verity{true};
+
// If not blank, a base-64 encoded representation of the PEM-encoded
// public key in the response.
std::string public_key_rsa;
diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc
index 6e7cd00..213d798 100644
--- a/payload_consumer/payload_constants.cc
+++ b/payload_consumer/payload_constants.cc
@@ -22,7 +22,7 @@
const uint64_t kBrilloMajorPayloadVersion = 2;
const uint32_t kMinSupportedMinorPayloadVersion = 1;
-const uint32_t kMaxSupportedMinorPayloadVersion = 5;
+const uint32_t kMaxSupportedMinorPayloadVersion = 6;
const uint32_t kFullPayloadMinorVersion = 0;
const uint32_t kInPlaceMinorPayloadVersion = 1;
@@ -30,6 +30,7 @@
const uint32_t kOpSrcHashMinorPayloadVersion = 3;
const uint32_t kBrotliBsdiffMinorPayloadVersion = 4;
const uint32_t kPuffdiffMinorPayloadVersion = 5;
+const uint32_t kVerityMinorPayloadVersion = 6;
const uint64_t kMinSupportedMajorPayloadVersion = 1;
const uint64_t kMaxSupportedMajorPayloadVersion = 2;
diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h
index 0833484..7f76898 100644
--- a/payload_consumer/payload_constants.h
+++ b/payload_consumer/payload_constants.h
@@ -53,6 +53,9 @@
// The minor version that allows PUFFDIFF operation.
extern const uint32_t kPuffdiffMinorPayloadVersion;
+// The minor version that allows Verity hash tree and FEC generation.
+extern const uint32_t kVerityMinorPayloadVersion;
+
// The minimum and maximum supported minor version.
extern const uint32_t kMinSupportedMinorPayloadVersion;
extern const uint32_t kMaxSupportedMinorPayloadVersion;
diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc
index f700228..3079feb 100644
--- a/payload_consumer/payload_metadata.cc
+++ b/payload_consumer/payload_metadata.cc
@@ -107,6 +107,13 @@
kDeltaManifestSizeSize);
manifest_size_ = be64toh(manifest_size_); // switch big endian to host
+ metadata_size_ = manifest_offset + manifest_size_;
+ if (metadata_size_ < manifest_size_) {
+ // Overflow detected.
+ *error = ErrorCode::kDownloadInvalidMetadataSize;
+ return MetadataParseResult::kError;
+ }
+
if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
// Parse the metadata signature size.
static_assert(
@@ -121,11 +128,21 @@
&payload[metadata_signature_size_offset],
kDeltaMetadataSignatureSizeSize);
metadata_signature_size_ = be32toh(metadata_signature_size_);
+
+ if (metadata_size_ + metadata_signature_size_ < metadata_size_) {
+ // Overflow detected.
+ *error = ErrorCode::kDownloadInvalidMetadataSize;
+ return MetadataParseResult::kError;
+ }
}
- metadata_size_ = manifest_offset + manifest_size_;
return MetadataParseResult::kSuccess;
}
+bool PayloadMetadata::ParsePayloadHeader(const brillo::Blob& payload) {
+ ErrorCode error;
+ return ParsePayloadHeader(payload, &error) == MetadataParseResult::kSuccess;
+}
+
bool PayloadMetadata::GetManifest(const brillo::Blob& payload,
DeltaArchiveManifest* out_manifest) const {
uint64_t manifest_offset;
diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h
index fc1d128..8748f6f 100644
--- a/payload_consumer/payload_metadata.h
+++ b/payload_consumer/payload_metadata.h
@@ -55,6 +55,8 @@
// the payload.
MetadataParseResult ParsePayloadHeader(const brillo::Blob& payload,
ErrorCode* error);
+ // Simpler version of the above, returns true on success.
+ bool ParsePayloadHeader(const brillo::Blob& payload);
// Given the |payload|, verifies that the signed hash of its metadata matches
// |metadata_signature| (if present) or the metadata signature in payload
diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc
index 17c8909..8381472 100644
--- a/payload_consumer/postinstall_runner_action_unittest.cc
+++ b/payload_consumer/postinstall_runner_action_unittest.cc
@@ -329,7 +329,7 @@
// SElinux labels are only set on Android.
TEST_F(PostinstallRunnerActionTest, RunAsRootCheckFileContextsTest) {
ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
- RunPosinstallAction(loop.dev(), "bin/self_check_context", false, false);
+ RunPostinstallAction(loop.dev(), "bin/self_check_context", false, false);
EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
}
#endif // __ANDROID__
diff --git a/payload_consumer/verity_writer_android.cc b/payload_consumer/verity_writer_android.cc
new file mode 100644
index 0000000..06d1489
--- /dev/null
+++ b/payload_consumer/verity_writer_android.cc
@@ -0,0 +1,192 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/verity_writer_android.h"
+
+#include <fcntl.h>
+
+#include <algorithm>
+#include <memory>
+
+#include <base/logging.h>
+#include <base/posix/eintr_wrapper.h>
+#include <fec/ecc.h>
+extern "C" {
+#include <fec.h>
+}
+
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+namespace verity_writer {
+std::unique_ptr<VerityWriterInterface> CreateVerityWriter() {
+ return std::make_unique<VerityWriterAndroid>();
+}
+} // namespace verity_writer
+
+bool VerityWriterAndroid::Init(const InstallPlan::Partition& partition) {
+ partition_ = &partition;
+
+ if (partition_->hash_tree_size != 0) {
+ auto hash_function =
+ HashTreeBuilder::HashFunction(partition_->hash_tree_algorithm);
+ if (hash_function == nullptr) {
+ LOG(ERROR) << "Verity hash algorithm not supported: "
+ << partition_->hash_tree_algorithm;
+ return false;
+ }
+ hash_tree_builder_ = std::make_unique<HashTreeBuilder>(
+ partition_->block_size, hash_function);
+ TEST_AND_RETURN_FALSE(hash_tree_builder_->Initialize(
+ partition_->hash_tree_data_size, partition_->hash_tree_salt));
+ if (hash_tree_builder_->CalculateSize(partition_->hash_tree_data_size) !=
+ partition_->hash_tree_size) {
+ LOG(ERROR) << "Verity hash tree size does not match, stored: "
+ << partition_->hash_tree_size << ", calculated: "
+ << hash_tree_builder_->CalculateSize(
+ partition_->hash_tree_data_size);
+ return false;
+ }
+ }
+ return true;
+}
+
+bool VerityWriterAndroid::Update(uint64_t offset,
+ const uint8_t* buffer,
+ size_t size) {
+ if (partition_->hash_tree_size != 0) {
+ uint64_t hash_tree_data_end =
+ partition_->hash_tree_data_offset + partition_->hash_tree_data_size;
+ uint64_t start_offset = std::max(offset, partition_->hash_tree_data_offset);
+ uint64_t end_offset = std::min(offset + size, hash_tree_data_end);
+ if (start_offset < end_offset) {
+ TEST_AND_RETURN_FALSE(hash_tree_builder_->Update(
+ buffer + start_offset - offset, end_offset - start_offset));
+
+ if (end_offset == hash_tree_data_end) {
+ // All hash tree data blocks has been hashed, write hash tree to disk.
+ int fd = HANDLE_EINTR(open(partition_->target_path.c_str(), O_WRONLY));
+ if (fd < 0) {
+ PLOG(ERROR) << "Failed to open " << partition_->target_path
+ << " to write hash tree.";
+ return false;
+ }
+ ScopedFdCloser fd_closer(&fd);
+
+ LOG(INFO) << "Writing verity hash tree to " << partition_->target_path;
+ TEST_AND_RETURN_FALSE(hash_tree_builder_->BuildHashTree());
+ TEST_AND_RETURN_FALSE(hash_tree_builder_->WriteHashTreeToFd(
+ fd, partition_->hash_tree_offset));
+ hash_tree_builder_.reset();
+ }
+ }
+ }
+ if (partition_->fec_size != 0) {
+ uint64_t fec_data_end =
+ partition_->fec_data_offset + partition_->fec_data_size;
+ if (offset < fec_data_end && offset + size >= fec_data_end) {
+ LOG(INFO) << "Writing verity FEC to " << partition_->target_path;
+ TEST_AND_RETURN_FALSE(EncodeFEC(partition_->target_path,
+ partition_->fec_data_offset,
+ partition_->fec_data_size,
+ partition_->fec_offset,
+ partition_->fec_size,
+ partition_->fec_roots,
+ partition_->block_size,
+ false /* verify_mode */));
+ }
+ }
+ return true;
+}
+
+bool VerityWriterAndroid::EncodeFEC(const std::string& path,
+ uint64_t data_offset,
+ uint64_t data_size,
+ uint64_t fec_offset,
+ uint64_t fec_size,
+ uint32_t fec_roots,
+ uint32_t block_size,
+ bool verify_mode) {
+ TEST_AND_RETURN_FALSE(data_size % block_size == 0);
+ TEST_AND_RETURN_FALSE(fec_roots >= 0 && fec_roots < FEC_RSM);
+ // This is the N in RS(M, N), which is the number of bytes for each rs block.
+ size_t rs_n = FEC_RSM - fec_roots;
+ uint64_t rounds = utils::DivRoundUp(data_size / block_size, rs_n);
+ TEST_AND_RETURN_FALSE(rounds * fec_roots * block_size == fec_size);
+
+ std::unique_ptr<void, decltype(&free_rs_char)> rs_char(
+ init_rs_char(FEC_PARAMS(fec_roots)), &free_rs_char);
+ TEST_AND_RETURN_FALSE(rs_char != nullptr);
+
+ int fd = HANDLE_EINTR(open(path.c_str(), verify_mode ? O_RDONLY : O_RDWR));
+ if (fd < 0) {
+ PLOG(ERROR) << "Failed to open " << path << " to write FEC.";
+ return false;
+ }
+ ScopedFdCloser fd_closer(&fd);
+
+ for (size_t i = 0; i < rounds; i++) {
+ // Encodes |block_size| number of rs blocks each round so that we can read
+ // one block each time instead of 1 byte to increase random read
+ // performance. This uses about 1 MiB memory for 4K block size.
+ brillo::Blob rs_blocks(block_size * rs_n);
+ for (size_t j = 0; j < rs_n; j++) {
+ brillo::Blob buffer(block_size, 0);
+ uint64_t offset =
+ fec_ecc_interleave(i * rs_n * block_size + j, rs_n, rounds);
+ // Don't read past |data_size|, treat them as 0.
+ if (offset < data_size) {
+ ssize_t bytes_read = 0;
+ TEST_AND_RETURN_FALSE(utils::PReadAll(fd,
+ buffer.data(),
+ buffer.size(),
+ data_offset + offset,
+ &bytes_read));
+ TEST_AND_RETURN_FALSE(bytes_read ==
+ static_cast<ssize_t>(buffer.size()));
+ }
+ for (size_t k = 0; k < buffer.size(); k++) {
+ rs_blocks[k * rs_n + j] = buffer[k];
+ }
+ }
+ brillo::Blob fec(block_size * fec_roots);
+ for (size_t j = 0; j < block_size; j++) {
+ // Encode [j * rs_n : (j + 1) * rs_n) in |rs_blocks| and write |fec_roots|
+ // number of parity bytes to |j * fec_roots| in |fec|.
+ encode_rs_char(rs_char.get(),
+ rs_blocks.data() + j * rs_n,
+ fec.data() + j * fec_roots);
+ }
+
+ if (verify_mode) {
+ brillo::Blob fec_read(fec.size());
+ ssize_t bytes_read = 0;
+ TEST_AND_RETURN_FALSE(utils::PReadAll(
+ fd, fec_read.data(), fec_read.size(), fec_offset, &bytes_read));
+ TEST_AND_RETURN_FALSE(bytes_read ==
+ static_cast<ssize_t>(fec_read.size()));
+ TEST_AND_RETURN_FALSE(fec == fec_read);
+ } else {
+ TEST_AND_RETURN_FALSE(
+ utils::PWriteAll(fd, fec.data(), fec.size(), fec_offset));
+ }
+ fec_offset += fec.size();
+ }
+
+ return true;
+}
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/verity_writer_android.h b/payload_consumer/verity_writer_android.h
new file mode 100644
index 0000000..05a5856
--- /dev/null
+++ b/payload_consumer/verity_writer_android.h
@@ -0,0 +1,62 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_ANDROID_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_ANDROID_H_
+
+#include <memory>
+#include <string>
+
+#include <verity/hash_tree_builder.h>
+
+#include "update_engine/payload_consumer/verity_writer_interface.h"
+
+namespace chromeos_update_engine {
+
+class VerityWriterAndroid : public VerityWriterInterface {
+ public:
+ VerityWriterAndroid() = default;
+ ~VerityWriterAndroid() override = default;
+
+ bool Init(const InstallPlan::Partition& partition) override;
+ bool Update(uint64_t offset, const uint8_t* buffer, size_t size) override;
+
+ // Read [data_offset : data_offset + data_size) from |path| and encode FEC
+ // data, if |verify_mode|, then compare the encoded FEC with the one in
+ // |path|, otherwise write the encoded FEC to |path|. We can't encode as we go
+ // in each Update() like hash tree, because for every rs block, its data are
+ // spreaded across entire |data_size|, unless we can cache all data in
+ // memory, we have to re-read them from disk.
+ static bool EncodeFEC(const std::string& path,
+ uint64_t data_offset,
+ uint64_t data_size,
+ uint64_t fec_offset,
+ uint64_t fec_size,
+ uint32_t fec_roots,
+ uint32_t block_size,
+ bool verify_mode);
+
+ private:
+ const InstallPlan::Partition* partition_ = nullptr;
+
+ std::unique_ptr<HashTreeBuilder> hash_tree_builder_;
+
+ DISALLOW_COPY_AND_ASSIGN(VerityWriterAndroid);
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_ANDROID_H_
diff --git a/payload_consumer/verity_writer_android_unittest.cc b/payload_consumer/verity_writer_android_unittest.cc
new file mode 100644
index 0000000..f943ce8
--- /dev/null
+++ b/payload_consumer/verity_writer_android_unittest.cc
@@ -0,0 +1,120 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/verity_writer_android.h"
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+class VerityWriterAndroidTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ partition_.target_path = temp_file_.path();
+ partition_.block_size = 4096;
+ partition_.hash_tree_data_offset = 0;
+ partition_.hash_tree_data_size = 4096;
+ partition_.hash_tree_offset = 4096;
+ partition_.hash_tree_size = 4096;
+ partition_.hash_tree_algorithm = "sha1";
+ partition_.fec_roots = 2;
+ }
+
+ VerityWriterAndroid verity_writer_;
+ InstallPlan::Partition partition_;
+ test_utils::ScopedTempFile temp_file_;
+};
+
+TEST_F(VerityWriterAndroidTest, SimpleTest) {
+ brillo::Blob part_data(8192);
+ test_utils::WriteFileVector(partition_.target_path, part_data);
+ ASSERT_TRUE(verity_writer_.Init(partition_));
+ EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), 4096));
+ EXPECT_TRUE(verity_writer_.Update(4096, part_data.data() + 4096, 4096));
+ brillo::Blob actual_part;
+ utils::ReadFile(partition_.target_path, &actual_part);
+ // dd if=/dev/zero bs=4096 count=1 2>/dev/null | sha1sum | xxd -r -p |
+ // hexdump -v -e '/1 "0x%02x, "'
+ brillo::Blob hash = {0x1c, 0xea, 0xf7, 0x3d, 0xf4, 0x0e, 0x53,
+ 0x1d, 0xf3, 0xbf, 0xb2, 0x6b, 0x4f, 0xb7,
+ 0xcd, 0x95, 0xfb, 0x7b, 0xff, 0x1d};
+ memcpy(part_data.data() + 4096, hash.data(), hash.size());
+ EXPECT_EQ(part_data, actual_part);
+}
+
+TEST_F(VerityWriterAndroidTest, NoOpTest) {
+ partition_.hash_tree_data_size = 0;
+ partition_.hash_tree_size = 0;
+ brillo::Blob part_data(4096);
+ ASSERT_TRUE(verity_writer_.Init(partition_));
+ EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), part_data.size()));
+ EXPECT_TRUE(verity_writer_.Update(4096, part_data.data(), part_data.size()));
+ EXPECT_TRUE(verity_writer_.Update(8192, part_data.data(), part_data.size()));
+}
+
+TEST_F(VerityWriterAndroidTest, InvalidHashAlgorithmTest) {
+ partition_.hash_tree_algorithm = "sha123";
+ EXPECT_FALSE(verity_writer_.Init(partition_));
+}
+
+TEST_F(VerityWriterAndroidTest, WrongHashTreeSizeTest) {
+ partition_.hash_tree_size = 8192;
+ EXPECT_FALSE(verity_writer_.Init(partition_));
+}
+
+TEST_F(VerityWriterAndroidTest, SHA256Test) {
+ partition_.hash_tree_algorithm = "sha256";
+ brillo::Blob part_data(8192);
+ test_utils::WriteFileVector(partition_.target_path, part_data);
+ ASSERT_TRUE(verity_writer_.Init(partition_));
+ EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), 4096));
+ EXPECT_TRUE(verity_writer_.Update(4096, part_data.data() + 4096, 4096));
+ brillo::Blob actual_part;
+ utils::ReadFile(partition_.target_path, &actual_part);
+ // dd if=/dev/zero bs=4096 count=1 2>/dev/null | sha256sum | xxd -r -p |
+ // hexdump -v -e '/1 "0x%02x, "'
+ brillo::Blob hash = {0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9,
+ 0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b, 0x02,
+ 0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a,
+ 0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89, 0x2c, 0xa7};
+ memcpy(part_data.data() + 4096, hash.data(), hash.size());
+ EXPECT_EQ(part_data, actual_part);
+}
+
+TEST_F(VerityWriterAndroidTest, FECTest) {
+ partition_.fec_data_offset = 0;
+ partition_.fec_data_size = 4096;
+ partition_.fec_offset = 4096;
+ partition_.fec_size = 2 * 4096;
+ brillo::Blob part_data(3 * 4096, 0x1);
+ test_utils::WriteFileVector(partition_.target_path, part_data);
+ ASSERT_TRUE(verity_writer_.Init(partition_));
+ EXPECT_TRUE(verity_writer_.Update(0, part_data.data(), part_data.size()));
+ brillo::Blob actual_part;
+ utils::ReadFile(partition_.target_path, &actual_part);
+ // Write FEC data.
+ for (size_t i = 4096; i < part_data.size(); i += 2) {
+ part_data[i] = 0x8e;
+ part_data[i + 1] = 0x8f;
+ }
+ EXPECT_EQ(part_data, actual_part);
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/verity_writer_interface.h b/payload_consumer/verity_writer_interface.h
new file mode 100644
index 0000000..a3ecef3
--- /dev/null
+++ b/payload_consumer/verity_writer_interface.h
@@ -0,0 +1,53 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_INTERFACE_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_INTERFACE_H_
+
+#include <cstdint>
+#include <memory>
+
+#include <base/macros.h>
+
+#include "update_engine/payload_consumer/install_plan.h"
+
+namespace chromeos_update_engine {
+
+class VerityWriterInterface {
+ public:
+ virtual ~VerityWriterInterface() = default;
+
+ virtual bool Init(const InstallPlan::Partition& partition) = 0;
+ // Update partition data at [offset : offset + size) stored in |buffer|.
+ // Data not in |hash_tree_data_extent| or |fec_data_extent| is ignored.
+ // Will write verity data to the target partition once all the necessary
+ // blocks has passed.
+ virtual bool Update(uint64_t offset, const uint8_t* buffer, size_t size) = 0;
+
+ protected:
+ VerityWriterInterface() = default;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(VerityWriterInterface);
+};
+
+namespace verity_writer {
+std::unique_ptr<VerityWriterInterface> CreateVerityWriter();
+}
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_INTERFACE_H_
diff --git a/payload_consumer/verity_writer_stub.cc b/payload_consumer/verity_writer_stub.cc
new file mode 100644
index 0000000..a0e2467
--- /dev/null
+++ b/payload_consumer/verity_writer_stub.cc
@@ -0,0 +1,39 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/verity_writer_stub.h"
+
+#include <memory>
+
+namespace chromeos_update_engine {
+
+namespace verity_writer {
+std::unique_ptr<VerityWriterInterface> CreateVerityWriter() {
+ return std::make_unique<VerityWriterStub>();
+}
+} // namespace verity_writer
+
+bool VerityWriterStub::Init(const InstallPlan::Partition& partition) {
+ return partition.hash_tree_size == 0 && partition.fec_size == 0;
+}
+
+bool VerityWriterStub::Update(uint64_t offset,
+ const uint8_t* buffer,
+ size_t size) {
+ return true;
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/verity_writer_stub.h b/payload_consumer/verity_writer_stub.h
new file mode 100644
index 0000000..ea5e574
--- /dev/null
+++ b/payload_consumer/verity_writer_stub.h
@@ -0,0 +1,38 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_STUB_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_STUB_H_
+
+#include "update_engine/payload_consumer/verity_writer_interface.h"
+
+namespace chromeos_update_engine {
+
+class VerityWriterStub : public VerityWriterInterface {
+ public:
+ VerityWriterStub() = default;
+ ~VerityWriterStub() override = default;
+
+ bool Init(const InstallPlan::Partition& partition) override;
+ bool Update(uint64_t offset, const uint8_t* buffer, size_t size) override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(VerityWriterStub);
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_VERITY_WRITER_STUB_H_
diff --git a/payload_consumer/xz_extent_writer.cc b/payload_consumer/xz_extent_writer.cc
index 343ed80..835dcf7 100644
--- a/payload_consumer/xz_extent_writer.cc
+++ b/payload_consumer/xz_extent_writer.cc
@@ -52,6 +52,7 @@
XzExtentWriter::~XzExtentWriter() {
xz_dec_end(stream_);
+ TEST_AND_RETURN(input_buffer_.empty());
}
bool XzExtentWriter::Init(FileDescriptorPtr fd,
@@ -110,9 +111,4 @@
return true;
}
-bool XzExtentWriter::EndImpl() {
- TEST_AND_RETURN_FALSE(input_buffer_.empty());
- return underlying_writer_->End();
-}
-
} // namespace chromeos_update_engine
diff --git a/payload_consumer/xz_extent_writer.h b/payload_consumer/xz_extent_writer.h
index 5e50256..e022274 100644
--- a/payload_consumer/xz_extent_writer.h
+++ b/payload_consumer/xz_extent_writer.h
@@ -43,7 +43,6 @@
const google::protobuf::RepeatedPtrField<Extent>& extents,
uint32_t block_size) override;
bool Write(const void* bytes, size_t count) override;
- bool EndImpl() override;
private:
// The underlying ExtentWriter.
diff --git a/payload_consumer/xz_extent_writer_unittest.cc b/payload_consumer/xz_extent_writer_unittest.cc
index c8bcdf9..76a53a4 100644
--- a/payload_consumer/xz_extent_writer_unittest.cc
+++ b/payload_consumer/xz_extent_writer_unittest.cc
@@ -89,10 +89,8 @@
void WriteAll(const brillo::Blob& compressed) {
EXPECT_TRUE(xz_writer_->Init(fd_, {}, 1024));
EXPECT_TRUE(xz_writer_->Write(compressed.data(), compressed.size()));
- EXPECT_TRUE(xz_writer_->End());
EXPECT_TRUE(fake_extent_writer_->InitCalled());
- EXPECT_TRUE(fake_extent_writer_->EndCalled());
}
// Owned by |xz_writer_|. This object is invalidated after |xz_writer_| is
@@ -109,7 +107,6 @@
TEST_F(XzExtentWriterTest, CreateAndDestroy) {
// Test that no Init() or End() called doesn't crash the program.
EXPECT_FALSE(fake_extent_writer_->InitCalled());
- EXPECT_FALSE(fake_extent_writer_->EndCalled());
}
TEST_F(XzExtentWriterTest, CompressedSampleData) {
@@ -137,9 +134,6 @@
EXPECT_TRUE(xz_writer_->Init(fd_, {}, 1024));
// The sample_data_ is an uncompressed string.
EXPECT_FALSE(xz_writer_->Write(sample_data_.data(), sample_data_.size()));
- EXPECT_TRUE(xz_writer_->End());
-
- EXPECT_TRUE(fake_extent_writer_->EndCalled());
}
TEST_F(XzExtentWriterTest, PartialDataIsKept) {
@@ -149,7 +143,6 @@
for (uint8_t byte : compressed) {
EXPECT_TRUE(xz_writer_->Write(&byte, 1));
}
- EXPECT_TRUE(xz_writer_->End());
// The sample_data_ is an uncompressed string.
brillo::Blob expected_data(30 * 1024, 'a');