update_engine: Merge remote-tracking branch 'cros/upstream' into cros/master
Done with:
git merge cros/upstream --commit -s recursive
- Added EC key support and its unittests.
- Resolved a conlict on error codes. Since Android versions are not
uploading any UMA metrics, I gave the priority to the Android version
Since they can't be changed.
- Changed the openssl functions to get1 version (from get0) version
because of a current issue with gale. Once the issue is resolved we
need to change them back.
- Some remaining styling issues fixed by clang-format
BUG=b:163153182
TEST=CQ passes
TEST=unittests
Change-Id: Ib95034422b92433ce26e28336bc4806b34910d38
diff --git a/payload_consumer/certificate_parser_android.cc b/payload_consumer/certificate_parser_android.cc
new file mode 100644
index 0000000..4a20547
--- /dev/null
+++ b/payload_consumer/certificate_parser_android.cc
@@ -0,0 +1,121 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/certificate_parser_android.h"
+
+#include <memory>
+#include <utility>
+
+#include <base/logging.h>
+#include <openssl/bio.h>
+#include <openssl/pem.h>
+#include <ziparchive/zip_archive.h>
+
+#include "update_engine/payload_consumer/certificate_parser_interface.h"
+
+namespace {
+bool IterateZipEntriesAndSearchForKeys(
+ const ZipArchiveHandle& handle, std::vector<std::vector<uint8_t>>* result) {
+ void* cookie;
+ int32_t iter_status = StartIteration(handle, &cookie, "", "x509.pem");
+ if (iter_status != 0) {
+ LOG(ERROR) << "Failed to iterate over entries in the certificate zipfile: "
+ << ErrorCodeString(iter_status);
+ return false;
+ }
+ std::unique_ptr<void, decltype(&EndIteration)> guard(cookie, EndIteration);
+
+ std::vector<std::vector<uint8_t>> pem_keys;
+ std::string_view name;
+ ZipEntry entry;
+ while ((iter_status = Next(cookie, &entry, &name)) == 0) {
+ std::vector<uint8_t> pem_content(entry.uncompressed_length);
+ if (int32_t extract_status = ExtractToMemory(
+ handle, &entry, pem_content.data(), pem_content.size());
+ extract_status != 0) {
+ LOG(ERROR) << "Failed to extract " << name << ": "
+ << ErrorCodeString(extract_status);
+ return false;
+ }
+ pem_keys.push_back(pem_content);
+ }
+
+ if (iter_status != -1) {
+ LOG(ERROR) << "Error while iterating over zip entries: "
+ << ErrorCodeString(iter_status);
+ return false;
+ }
+
+ *result = std::move(pem_keys);
+ return true;
+}
+
+} // namespace
+
+namespace chromeos_update_engine {
+bool CertificateParserAndroid::ReadPublicKeysFromCertificates(
+ const std::string& path,
+ std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>*
+ out_public_keys) {
+ out_public_keys->clear();
+
+ ZipArchiveHandle handle;
+ if (int32_t open_status = OpenArchive(path.c_str(), &handle);
+ open_status != 0) {
+ LOG(ERROR) << "Failed to open " << path << ": "
+ << ErrorCodeString(open_status);
+ return false;
+ }
+
+ std::vector<std::vector<uint8_t>> pem_certs;
+ if (!IterateZipEntriesAndSearchForKeys(handle, &pem_certs)) {
+ CloseArchive(handle);
+ return false;
+ }
+ CloseArchive(handle);
+
+ // Convert the certificates into public keys. Stop and return false if we
+ // encounter an error.
+ std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>> result;
+ for (const auto& cert : pem_certs) {
+ std::unique_ptr<BIO, decltype(&BIO_free)> input(
+ BIO_new_mem_buf(cert.data(), cert.size()), BIO_free);
+
+ std::unique_ptr<X509, decltype(&X509_free)> x509(
+ PEM_read_bio_X509(input.get(), nullptr, nullptr, nullptr), X509_free);
+ if (!x509) {
+ LOG(ERROR) << "Failed to read x509 certificate";
+ return false;
+ }
+
+ std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)> public_key(
+ X509_get_pubkey(x509.get()), EVP_PKEY_free);
+ if (!public_key) {
+ LOG(ERROR) << "Failed to extract the public key from x509 certificate";
+ return false;
+ }
+ result.push_back(std::move(public_key));
+ }
+
+ *out_public_keys = std::move(result);
+ return true;
+}
+
+std::unique_ptr<CertificateParserInterface> CreateCertificateParser() {
+ return std::make_unique<CertificateParserAndroid>();
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/certificate_parser_android.h b/payload_consumer/certificate_parser_android.h
new file mode 100644
index 0000000..ccb9293
--- /dev/null
+++ b/payload_consumer/certificate_parser_android.h
@@ -0,0 +1,46 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CERTIFICATE_PARSER_ANDROID_H_
+#define UPDATE_ENGINE_CERTIFICATE_PARSER_ANDROID_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <base/macros.h>
+
+#include "payload_consumer/certificate_parser_interface.h"
+
+namespace chromeos_update_engine {
+// This class parses the certificates from a zip file. Because the Android
+// build system stores the certs in otacerts.zip.
+class CertificateParserAndroid : public CertificateParserInterface {
+ public:
+ CertificateParserAndroid() = default;
+
+ bool ReadPublicKeysFromCertificates(
+ const std::string& path,
+ std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>*
+ out_public_keys) override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CertificateParserAndroid);
+};
+
+} // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/certificate_parser_android_unittest.cc b/payload_consumer/certificate_parser_android_unittest.cc
new file mode 100644
index 0000000..e300414
--- /dev/null
+++ b/payload_consumer/certificate_parser_android_unittest.cc
@@ -0,0 +1,61 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/certificate_parser_interface.h"
+
+#include <string>
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/payload_verifier.h"
+#include "update_engine/payload_generator/payload_signer.h"
+
+namespace chromeos_update_engine {
+
+extern const char* kUnittestPrivateKeyPath;
+const char* kUnittestOtacertsPath = "otacerts.zip";
+
+TEST(CertificateParserAndroidTest, ParseZipArchive) {
+ std::string ota_cert =
+ test_utils::GetBuildArtifactsPath(kUnittestOtacertsPath);
+ ASSERT_TRUE(utils::FileExists(ota_cert.c_str()));
+
+ std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>> keys;
+ auto parser = CreateCertificateParser();
+ ASSERT_TRUE(parser->ReadPublicKeysFromCertificates(ota_cert, &keys));
+ ASSERT_EQ(1u, keys.size());
+}
+
+TEST(CertificateParserAndroidTest, VerifySignature) {
+ brillo::Blob hash_blob;
+ ASSERT_TRUE(HashCalculator::RawHashOfData({'x'}, &hash_blob));
+ brillo::Blob sig_blob;
+ ASSERT_TRUE(PayloadSigner::SignHash(
+ hash_blob,
+ test_utils::GetBuildArtifactsPath(kUnittestPrivateKeyPath),
+ &sig_blob));
+
+ auto verifier = PayloadVerifier::CreateInstanceFromZipPath(
+ test_utils::GetBuildArtifactsPath(kUnittestOtacertsPath));
+ ASSERT_TRUE(verifier != nullptr);
+ ASSERT_TRUE(verifier->VerifyRawSignature(sig_blob, hash_blob, nullptr));
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/certificate_parser_interface.h b/payload_consumer/certificate_parser_interface.h
new file mode 100644
index 0000000..dad23d2
--- /dev/null
+++ b/payload_consumer/certificate_parser_interface.h
@@ -0,0 +1,44 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CERTIFICATE_PARSER_INTERFACE_H_
+#define UPDATE_ENGINE_CERTIFICATE_PARSER_INTERFACE_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <openssl/pem.h>
+
+namespace chromeos_update_engine {
+
+// This class parses the PEM encoded X509 certificates from |path|; and
+// passes the parsed public keys to the caller.
+class CertificateParserInterface {
+ public:
+ virtual ~CertificateParserInterface() = default;
+
+ virtual bool ReadPublicKeysFromCertificates(
+ const std::string& path,
+ std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>*
+ out_public_keys) = 0;
+};
+
+std::unique_ptr<CertificateParserInterface> CreateCertificateParser();
+
+} // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/certificate_parser_stub.cc b/payload_consumer/certificate_parser_stub.cc
new file mode 100644
index 0000000..a365ab8
--- /dev/null
+++ b/payload_consumer/certificate_parser_stub.cc
@@ -0,0 +1,31 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/certificate_parser_stub.h"
+
+namespace chromeos_update_engine {
+bool CertificateParserStub::ReadPublicKeysFromCertificates(
+ const std::string& path,
+ std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>*
+ out_public_keys) {
+ return true;
+}
+
+std::unique_ptr<CertificateParserInterface> CreateCertificateParser() {
+ return std::make_unique<CertificateParserStub>();
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/certificate_parser_stub.h b/payload_consumer/certificate_parser_stub.h
new file mode 100644
index 0000000..a51c2c6
--- /dev/null
+++ b/payload_consumer/certificate_parser_stub.h
@@ -0,0 +1,44 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_CERTIFICATE_PARSER_STUB_H_
+#define UPDATE_ENGINE_CERTIFICATE_PARSER_STUB_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <base/macros.h>
+
+#include "update_engine/payload_consumer/certificate_parser_interface.h"
+
+namespace chromeos_update_engine {
+class CertificateParserStub : public CertificateParserInterface {
+ public:
+ CertificateParserStub() = default;
+
+ bool ReadPublicKeysFromCertificates(
+ const std::string& path,
+ std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>*
+ out_public_keys) override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CertificateParserStub);
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_CERTIFICATE_PARSER_STUB_H_
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index 95dfbcc..7375d37 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -23,6 +23,7 @@
#include <cstring>
#include <map>
#include <memory>
+#include <set>
#include <string>
#include <utility>
#include <vector>
@@ -40,15 +41,19 @@
#include <puffin/puffpatch.h>
#include "update_engine/common/constants.h"
+#include "update_engine/common/error_code.h"
+#include "update_engine/common/error_code_utils.h"
#include "update_engine/common/hardware_interface.h"
#include "update_engine/common/prefs_interface.h"
#include "update_engine/common/subprocess.h"
#include "update_engine/common/terminator.h"
#include "update_engine/payload_consumer/bzip_extent_writer.h"
#include "update_engine/payload_consumer/cached_file_descriptor.h"
+#include "update_engine/payload_consumer/certificate_parser_interface.h"
#include "update_engine/payload_consumer/download_action.h"
#include "update_engine/payload_consumer/extent_reader.h"
#include "update_engine/payload_consumer/extent_writer.h"
+#include "update_engine/payload_consumer/partition_update_generator_interface.h"
#if USE_FEC
#include "update_engine/payload_consumer/fec_file_descriptor.h"
#endif // USE_FEC
@@ -315,8 +320,14 @@
install_plan_->partitions.size() - partitions_.size();
const InstallPlan::Partition& install_part =
install_plan_->partitions[num_previous_partitions + current_partition_];
- // Open source fds if we have a delta payload.
- if (payload_->type == InstallPayloadType::kDelta) {
+ // Open source fds if we have a delta payload, or for partitions in the
+ // partial update.
+ bool source_may_exist = manifest_.partial_update() ||
+ payload_->type == InstallPayloadType::kDelta;
+ // We shouldn't open the source partition in certain cases, e.g. some dynamic
+ // partitions in delta payload, partitions included in the full payload for
+ // partial updates. Use the source size as the indicator.
+ if (source_may_exist && install_part.source_size > 0) {
source_path_ = install_part.source_path;
int err;
source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, false, &err);
@@ -495,17 +506,19 @@
<< "Trusting metadata size in payload = " << metadata_size_;
}
- string public_key;
- if (!GetPublicKey(&public_key)) {
- LOG(ERROR) << "Failed to get public key.";
+ auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
+ if (!payload_verifier) {
+ LOG(ERROR) << "Failed to create payload verifier.";
*error = ErrorCode::kDownloadMetadataSignatureVerificationError;
- return MetadataParseResult::kError;
+ if (perform_verification) {
+ return MetadataParseResult::kError;
+ }
+ } else {
+ // We have the full metadata in |payload|. Verify its integrity
+ // and authenticity based on the information we have in Omaha response.
+ *error = payload_metadata_.ValidateMetadataSignature(
+ payload, payload_->metadata_signature, *payload_verifier);
}
-
- // We have the full metadata in |payload|. Verify its integrity
- // and authenticity based on the information we have in Omaha response.
- *error = payload_metadata_.ValidateMetadataSignature(
- payload, payload_->metadata_signature, public_key);
if (*error != ErrorCode::kSuccess) {
if (install_plan_->hash_checks_mandatory) {
// The autoupdate_CatchBadSignatures test checks for this string
@@ -574,6 +587,9 @@
if ((*error = ValidateManifest()) != ErrorCode::kSuccess)
return false;
manifest_valid_ = true;
+ if (!install_plan_->is_resume) {
+ prefs_->SetString(kPrefsManifestBytes, {buffer_.begin(), buffer_.end()});
+ }
// Clear the download buffer.
DiscardBuffer(false, metadata_size_);
@@ -726,7 +742,7 @@
CheckpointUpdateProgress(false);
}
- // In major version 2, we don't add dummy operation to the payload.
+ // In major version 2, we don't add unused operation to the payload.
// If we already extracted the signature we should skip this step.
if (manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
signatures_message_data_.empty()) {
@@ -767,7 +783,61 @@
for (const PartitionUpdate& partition : manifest_.partitions()) {
partitions_.push_back(partition);
}
+
+ // For VAB and partial updates, the partition preparation will copy the
+ // dynamic partitions metadata to the target metadata slot, and rename the
+ // slot suffix of the partitions in the metadata.
+ if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) {
+ uint64_t required_size = 0;
+ if (!PreparePartitionsForUpdate(&required_size)) {
+ if (required_size > 0) {
+ *error = ErrorCode::kNotEnoughSpace;
+ } else {
+ *error = ErrorCode::kInstallDeviceOpenError;
+ }
+ return false;
+ }
+ }
+
+ // Partitions in manifest are no longer needed after preparing partitions.
manifest_.clear_partitions();
+ // TODO(xunchang) TBD: allow partial update only on devices with dynamic
+ // partition.
+ if (manifest_.partial_update()) {
+ std::set<std::string> touched_partitions;
+ for (const auto& partition_update : partitions_) {
+ touched_partitions.insert(partition_update.partition_name());
+ }
+
+ auto generator = partition_update_generator::Create(boot_control_,
+ manifest_.block_size());
+ std::vector<PartitionUpdate> untouched_static_partitions;
+ TEST_AND_RETURN_FALSE(
+ generator->GenerateOperationsForPartitionsNotInPayload(
+ install_plan_->source_slot,
+ install_plan_->target_slot,
+ touched_partitions,
+ &untouched_static_partitions));
+ partitions_.insert(partitions_.end(),
+ untouched_static_partitions.begin(),
+ untouched_static_partitions.end());
+
+ // Save the untouched dynamic partitions in install plan.
+ std::vector<std::string> dynamic_partitions;
+ if (!boot_control_->GetDynamicPartitionControl()
+ ->ListDynamicPartitionsForSlot(install_plan_->source_slot,
+ &dynamic_partitions)) {
+ LOG(ERROR) << "Failed to load dynamic partitions from slot "
+ << install_plan_->source_slot;
+ return false;
+ }
+ install_plan_->untouched_dynamic_partitions.clear();
+ for (const auto& name : dynamic_partitions) {
+ if (touched_partitions.find(name) == touched_partitions.end()) {
+ install_plan_->untouched_dynamic_partitions.push_back(name);
+ }
+ }
+ }
// Fill in the InstallPlan::partitions based on the partitions from the
// payload.
@@ -842,13 +912,9 @@
install_plan_->partitions.push_back(install_part);
}
- if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) {
- if (!InitPartitionMetadata()) {
- *error = ErrorCode::kInstallDeviceOpenError;
- return false;
- }
- }
-
+ // TODO(xunchang) only need to load the partitions for those in payload.
+ // Because we have already loaded the other once when generating SOURCE_COPY
+ // operations.
if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) {
LOG(ERROR) << "Unable to determine all the partition devices.";
*error = ErrorCode::kInstallDeviceOpenError;
@@ -858,45 +924,57 @@
return true;
}
-bool DeltaPerformer::InitPartitionMetadata() {
- BootControlInterface::PartitionMetadata partition_metadata;
- if (manifest_.has_dynamic_partition_metadata()) {
- std::map<string, uint64_t> partition_sizes;
- for (const auto& partition : install_plan_->partitions) {
- partition_sizes.emplace(partition.name, partition.target_size);
- }
- for (const auto& group : manifest_.dynamic_partition_metadata().groups()) {
- BootControlInterface::PartitionMetadata::Group e;
- e.name = group.name();
- e.size = group.size();
- for (const auto& partition_name : group.partition_names()) {
- auto it = partition_sizes.find(partition_name);
- if (it == partition_sizes.end()) {
- // TODO(tbao): Support auto-filling partition info for framework-only
- // OTA.
- LOG(ERROR) << "dynamic_partition_metadata contains partition "
- << partition_name
- << " but it is not part of the manifest. "
- << "This is not supported.";
- return false;
- }
- e.partitions.push_back({partition_name, it->second});
- }
- partition_metadata.groups.push_back(std::move(e));
- }
+bool DeltaPerformer::PreparePartitionsForUpdate(uint64_t* required_size) {
+ // Call static PreparePartitionsForUpdate with hash from
+ // kPrefsUpdateCheckResponseHash to ensure hash of payload that space is
+ // preallocated for is the same as the hash of payload being applied.
+ string update_check_response_hash;
+ ignore_result(prefs_->GetString(kPrefsUpdateCheckResponseHash,
+ &update_check_response_hash));
+ return PreparePartitionsForUpdate(prefs_,
+ boot_control_,
+ install_plan_->target_slot,
+ manifest_,
+ update_check_response_hash,
+ required_size);
+}
+
+bool DeltaPerformer::PreparePartitionsForUpdate(
+ PrefsInterface* prefs,
+ BootControlInterface* boot_control,
+ BootControlInterface::Slot target_slot,
+ const DeltaArchiveManifest& manifest,
+ const std::string& update_check_response_hash,
+ uint64_t* required_size) {
+ string last_hash;
+ ignore_result(
+ prefs->GetString(kPrefsDynamicPartitionMetadataUpdated, &last_hash));
+
+ bool is_resume = !update_check_response_hash.empty() &&
+ last_hash == update_check_response_hash;
+
+ if (is_resume) {
+ LOG(INFO) << "Using previously prepared partitions for update. hash = "
+ << last_hash;
+ } else {
+ LOG(INFO) << "Preparing partitions for new update. last hash = "
+ << last_hash << ", new hash = " << update_check_response_hash;
}
- bool metadata_updated = false;
- prefs_->GetBoolean(kPrefsDynamicPartitionMetadataUpdated, &metadata_updated);
- if (!boot_control_->InitPartitionMetadata(
- install_plan_->target_slot, partition_metadata, !metadata_updated)) {
+ if (!boot_control->GetDynamicPartitionControl()->PreparePartitionsForUpdate(
+ boot_control->GetCurrentSlot(),
+ target_slot,
+ manifest,
+ !is_resume /* should update */,
+ required_size)) {
LOG(ERROR) << "Unable to initialize partition metadata for slot "
- << BootControlInterface::SlotName(install_plan_->target_slot);
+ << BootControlInterface::SlotName(target_slot);
return false;
}
- TEST_AND_RETURN_FALSE(
- prefs_->SetBoolean(kPrefsDynamicPartitionMetadataUpdated, true));
- LOG(INFO) << "InitPartitionMetadata done.";
+
+ TEST_AND_RETURN_FALSE(prefs->SetString(kPrefsDynamicPartitionMetadataUpdated,
+ update_check_response_hash));
+ LOG(INFO) << "PreparePartitionsForUpdate done.";
return true;
}
@@ -1031,7 +1109,21 @@
if (operation.has_dst_length())
TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
+ TEST_AND_RETURN_FALSE(source_fd_ != nullptr);
+
+ // The device may optimize the SOURCE_COPY operation.
+ // Being this a device-specific optimization let DynamicPartitionController
+ // decide it the operation should be skipped.
+ const PartitionUpdate& partition = partitions_[current_partition_];
+ const auto& partition_control = boot_control_->GetDynamicPartitionControl();
+
+ InstallOperation buf;
+ bool should_optimize = partition_control->OptimizeOperation(
+ partition.partition_name(), operation, &buf);
+ const InstallOperation& optimized = should_optimize ? buf : operation;
+
if (operation.has_src_sha256_hash()) {
+ bool read_ok;
brillo::Blob source_hash;
brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
operation.src_sha256_hash().end());
@@ -1040,15 +1132,30 @@
// device doesn't match or there was an error reading the source partition.
// Note that this code will also fall back if writing the target partition
// fails.
- bool read_ok = fd_utils::CopyAndHashExtents(source_fd_,
- operation.src_extents(),
- target_fd_,
- operation.dst_extents(),
- block_size_,
- &source_hash);
+ if (should_optimize) {
+ // Hash operation.src_extents(), then copy optimized.src_extents to
+ // optimized.dst_extents.
+ read_ok =
+ fd_utils::ReadAndHashExtents(
+ source_fd_, operation.src_extents(), block_size_, &source_hash) &&
+ fd_utils::CopyAndHashExtents(source_fd_,
+ optimized.src_extents(),
+ target_fd_,
+ optimized.dst_extents(),
+ block_size_,
+ nullptr /* skip hashing */);
+ } else {
+ read_ok = fd_utils::CopyAndHashExtents(source_fd_,
+ operation.src_extents(),
+ target_fd_,
+ operation.dst_extents(),
+ block_size_,
+ &source_hash);
+ }
if (read_ok && expected_source_hash == source_hash)
return true;
-
+ LOG(WARNING) << "Source hash from RAW device mismatched, attempting to "
+ "correct using ECC";
if (!OpenCurrentECCPartition()) {
// The following function call will return false since the source hash
// mismatches, but we still want to call it so it prints the appropriate
@@ -1061,13 +1168,25 @@
<< ", expected "
<< base::HexEncode(expected_source_hash.data(),
expected_source_hash.size());
-
- TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_ecc_fd_,
- operation.src_extents(),
- target_fd_,
- operation.dst_extents(),
- block_size_,
- &source_hash));
+ if (should_optimize) {
+ TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
+ source_ecc_fd_, operation.src_extents(), block_size_, &source_hash));
+ TEST_AND_RETURN_FALSE(
+ fd_utils::CopyAndHashExtents(source_ecc_fd_,
+ optimized.src_extents(),
+ target_fd_,
+ optimized.dst_extents(),
+ block_size_,
+ nullptr /* skip hashing */));
+ } else {
+ TEST_AND_RETURN_FALSE(
+ fd_utils::CopyAndHashExtents(source_ecc_fd_,
+ operation.src_extents(),
+ target_fd_,
+ operation.dst_extents(),
+ block_size_,
+ &source_hash));
+ }
TEST_AND_RETURN_FALSE(
ValidateSourceHash(source_hash, operation, source_ecc_fd_, error));
// At this point reading from the the error corrected device worked, but
@@ -1079,19 +1198,20 @@
// corrected device first since we can't verify the block in the raw device
// at this point, but we fall back to the raw device since the error
// corrected device can be shorter or not available.
+
if (OpenCurrentECCPartition() &&
fd_utils::CopyAndHashExtents(source_ecc_fd_,
- operation.src_extents(),
+ optimized.src_extents(),
target_fd_,
- operation.dst_extents(),
+ optimized.dst_extents(),
block_size_,
nullptr)) {
return true;
}
TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_,
- operation.src_extents(),
+ optimized.src_extents(),
target_fd_,
- operation.dst_extents(),
+ optimized.dst_extents(),
block_size_,
nullptr));
}
@@ -1100,6 +1220,11 @@
FileDescriptorPtr DeltaPerformer::ChooseSourceFD(
const InstallOperation& operation, ErrorCode* error) {
+ if (source_fd_ == nullptr) {
+ LOG(ERROR) << "ChooseSourceFD fail: source_fd_ == nullptr";
+ return nullptr;
+ }
+
if (!operation.has_src_sha256_hash()) {
// When the operation doesn't include a source hash, we attempt the error
// corrected device first since we can't verify the block in the raw device
@@ -1396,8 +1521,7 @@
// blob and the signed sha-256 context.
LOG_IF(WARNING,
!prefs_->SetString(kPrefsUpdateStateSignatureBlob,
- string(signatures_message_data_.begin(),
- signatures_message_data_.end())))
+ signatures_message_data_))
<< "Unable to store the signature blob.";
LOG(INFO) << "Extracted signature data of size "
@@ -1421,14 +1545,35 @@
return brillo::data_encoding::Base64Decode(install_plan_->public_key_rsa,
out_public_key);
}
-
+ LOG(INFO) << "No public keys found for verification.";
return true;
}
-ErrorCode DeltaPerformer::ValidateManifest() {
- // Perform assorted checks to sanity check the manifest, make sure it
- // matches data from other sources, and that it is a supported version.
+std::pair<std::unique_ptr<PayloadVerifier>, bool>
+DeltaPerformer::CreatePayloadVerifier() {
+ if (utils::FileExists(update_certificates_path_.c_str())) {
+ LOG(INFO) << "Verifying using certificates: " << update_certificates_path_;
+ return {
+ PayloadVerifier::CreateInstanceFromZipPath(update_certificates_path_),
+ true};
+ }
+ string public_key;
+ if (!GetPublicKey(&public_key)) {
+ LOG(ERROR) << "Failed to read public key";
+ return {nullptr, true};
+ }
+
+ // Skips the verification if the public key is empty.
+ if (public_key.empty()) {
+ return {nullptr, false};
+ }
+ return {PayloadVerifier::CreateInstance(public_key), true};
+}
+
+ErrorCode DeltaPerformer::ValidateManifest() {
+ // Perform assorted checks to validation check the manifest, make sure it
+ // matches data from other sources, and that it is a supported version.
bool has_old_fields = std::any_of(manifest_.partitions().begin(),
manifest_.partitions().end(),
[](const PartitionUpdate& partition) {
@@ -1436,9 +1581,12 @@
});
// The presence of an old partition hash is the sole indicator for a delta
- // update.
+ // update. Also, always treat the partial update as delta so that we can
+ // perform the minor version check correctly.
InstallPayloadType actual_payload_type =
- has_old_fields ? InstallPayloadType::kDelta : InstallPayloadType::kFull;
+ (has_old_fields || manifest_.partial_update())
+ ? InstallPayloadType::kDelta
+ : InstallPayloadType::kFull;
if (payload_->type == InstallPayloadType::kUnknown) {
LOG(INFO) << "Detected a '"
@@ -1453,8 +1601,8 @@
<< "' payload.";
return ErrorCode::kPayloadMismatchedType;
}
-
// Check that the minor version is compatible.
+ // TODO(xunchang) increment minor version & add check for partial update
if (actual_payload_type == InstallPayloadType::kFull) {
if (manifest_.minor_version() != kFullPayloadMinorVersion) {
LOG(ERROR) << "Manifest contains minor version "
@@ -1475,6 +1623,88 @@
}
}
+ ErrorCode error_code = CheckTimestampError();
+ if (error_code != ErrorCode::kSuccess) {
+ if (error_code == ErrorCode::kPayloadTimestampError) {
+ if (!hardware_->AllowDowngrade()) {
+ return ErrorCode::kPayloadTimestampError;
+ }
+ LOG(INFO) << "The current OS build allows downgrade, continuing to apply"
+ " the payload with an older timestamp.";
+ } else {
+ LOG(ERROR) << "Timestamp check returned "
+ << utils::ErrorCodeToString(error_code);
+ return error_code;
+ }
+ }
+
+ // TODO(crbug.com/37661) we should be adding more and more manifest checks,
+ // such as partition boundaries, etc.
+
+ return ErrorCode::kSuccess;
+}
+
+ErrorCode DeltaPerformer::CheckTimestampError() const {
+ bool is_partial_update =
+ manifest_.has_partial_update() && manifest_.partial_update();
+ const auto& partitions = manifest_.partitions();
+
+ // Check version field for a given PartitionUpdate object. If an error
+ // is encountered, set |error_code| accordingly. If downgrade is detected,
+ // |downgrade_detected| is set. Return true if the program should continue to
+ // check the next partition or not, or false if it should exit early due to
+ // errors.
+ auto&& timestamp_valid = [this](const PartitionUpdate& partition,
+ bool allow_empty_version,
+ bool* downgrade_detected) -> ErrorCode {
+ if (!partition.has_version()) {
+ if (allow_empty_version) {
+ return ErrorCode::kSuccess;
+ }
+ LOG(ERROR)
+ << "PartitionUpdate " << partition.partition_name()
+ << " does ot have a version field. Not allowed in partial updates.";
+ return ErrorCode::kDownloadManifestParseError;
+ }
+
+ auto error_code = hardware_->IsPartitionUpdateValid(
+ partition.partition_name(), partition.version());
+ switch (error_code) {
+ case ErrorCode::kSuccess:
+ break;
+ case ErrorCode::kPayloadTimestampError:
+ *downgrade_detected = true;
+ LOG(WARNING) << "PartitionUpdate " << partition.partition_name()
+ << " has an older version than partition on device.";
+ break;
+ default:
+ LOG(ERROR) << "IsPartitionUpdateValid(" << partition.partition_name()
+ << ") returned" << utils::ErrorCodeToString(error_code);
+ break;
+ }
+ return error_code;
+ };
+
+ bool downgrade_detected = false;
+
+ if (is_partial_update) {
+ // for partial updates, all partition MUST have valid timestamps
+ // But max_timestamp can be empty
+ for (const auto& partition : partitions) {
+ auto error_code = timestamp_valid(
+ partition, false /* allow_empty_version */, &downgrade_detected);
+ if (error_code != ErrorCode::kSuccess &&
+ error_code != ErrorCode::kPayloadTimestampError) {
+ return error_code;
+ }
+ }
+ if (downgrade_detected) {
+ return ErrorCode::kPayloadTimestampError;
+ }
+ return ErrorCode::kSuccess;
+ }
+
+ // For non-partial updates, check max_timestamp first.
if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) {
LOG(ERROR) << "The current OS build timestamp ("
<< hardware_->GetBuildTimestamp()
@@ -1482,10 +1712,18 @@
<< manifest_.max_timestamp() << ")";
return ErrorCode::kPayloadTimestampError;
}
-
- // TODO(crbug.com/37661) we should be adding more and more manifest checks,
- // such as partition boundaries, etc.
-
+ // Otherwise... partitions can have empty timestamps.
+ for (const auto& partition : partitions) {
+ auto error_code = timestamp_valid(
+ partition, true /* allow_empty_version */, &downgrade_detected);
+ if (error_code != ErrorCode::kSuccess &&
+ error_code != ErrorCode::kPayloadTimestampError) {
+ return error_code;
+ }
+ }
+ if (downgrade_detected) {
+ return ErrorCode::kPayloadTimestampError;
+ }
return ErrorCode::kSuccess;
}
@@ -1505,7 +1743,7 @@
// corresponding update should have been produced with the operation
// hashes. So if it happens it means either we've turned operation hash
// generation off in DeltaDiffGenerator or it's a regression of some sort.
- // One caveat though: The last operation is a dummy signature operation
+ // One caveat though: The last operation is a unused signature operation
// that doesn't have a hash at the time the manifest is created. So we
// should not complaint about that operation. This operation can be
// recognized by the fact that it's offset is mentioned in the manifest.
@@ -1563,12 +1801,6 @@
ErrorCode DeltaPerformer::VerifyPayload(
const brillo::Blob& update_check_response_hash,
const uint64_t update_check_response_size) {
- string public_key;
- if (!GetPublicKey(&public_key)) {
- LOG(ERROR) << "Failed to get public key.";
- return ErrorCode::kDownloadPayloadPubKeyVerificationError;
- }
-
// Verifies the download size.
if (update_check_response_size !=
metadata_size_ + metadata_signature_size_ + buffer_offset_) {
@@ -1579,6 +1811,16 @@
return ErrorCode::kPayloadSizeMismatchError;
}
+ auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
+ if (!perform_verification) {
+ LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
+ return ErrorCode::kSuccess;
+ }
+ if (!payload_verifier) {
+ LOG(ERROR) << "Failed to create the payload verifier.";
+ return ErrorCode::kDownloadPayloadPubKeyVerificationError;
+ }
+
// Verifies the payload hash.
TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError,
!payload_hash_calculator_.raw_hash().empty());
@@ -1586,21 +1828,13 @@
ErrorCode::kPayloadHashMismatchError,
payload_hash_calculator_.raw_hash() == update_check_response_hash);
- // Verifies the signed payload hash.
- if (public_key.empty()) {
- LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
- return ErrorCode::kSuccess;
- }
TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError,
!signatures_message_data_.empty());
brillo::Blob hash_data = signed_hash_calculator_.raw_hash();
TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
- PayloadVerifier::PadRSA2048SHA256Hash(&hash_data));
- TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
- !hash_data.empty());
+ hash_data.size() == kSHA256Size);
- if (!PayloadVerifier::VerifySignature(
- signatures_message_data_, public_key, hash_data)) {
+ if (!payload_verifier->VerifySignature(signatures_message_data_, hash_data)) {
// The autoupdate_CatchBadSignatures test checks for this string
// in log-files. Keep in sync.
LOG(ERROR) << "Public key verification failed, thus update failed.";
@@ -1645,7 +1879,7 @@
resumed_update_failures > kMaxResumedUpdateFailures)
return false;
- // Sanity check the rest.
+ // Validation check the rest.
int64_t next_data_offset = -1;
if (!(prefs->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
next_data_offset >= 0))
@@ -1670,7 +1904,10 @@
return true;
}
-bool DeltaPerformer::ResetUpdateProgress(PrefsInterface* prefs, bool quick) {
+bool DeltaPerformer::ResetUpdateProgress(
+ PrefsInterface* prefs,
+ bool quick,
+ bool skip_dynamic_partititon_metadata_updated) {
TEST_AND_RETURN_FALSE(prefs->SetInt64(kPrefsUpdateStateNextOperation,
kUpdateStateOperationInvalid));
if (!quick) {
@@ -1684,7 +1921,11 @@
prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
prefs->Delete(kPrefsPostInstallSucceeded);
prefs->Delete(kPrefsVerityWritten);
- prefs->Delete(kPrefsDynamicPartitionMetadataUpdated);
+
+ if (!skip_dynamic_partititon_metadata_updated) {
+ LOG(INFO) << "Resetting recorded hash for prepared partitions.";
+ prefs->Delete(kPrefsDynamicPartitionMetadataUpdated);
+ }
}
return true;
}
@@ -1758,11 +1999,7 @@
signed_hash_calculator_.SetContext(signed_hash_context));
}
- string signature_blob;
- if (prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signature_blob)) {
- signatures_message_data_.assign(signature_blob.begin(),
- signature_blob.end());
- }
+ prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signatures_message_data_);
string hash_context;
TEST_AND_RETURN_FALSE(
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index 7860747..88076af 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -20,7 +20,9 @@
#include <inttypes.h>
#include <limits>
+#include <memory>
#include <string>
+#include <utility>
#include <vector>
#include <base/time/time.h>
@@ -34,6 +36,7 @@
#include "update_engine/payload_consumer/file_writer.h"
#include "update_engine/payload_consumer/install_plan.h"
#include "update_engine/payload_consumer/payload_metadata.h"
+#include "update_engine/payload_consumer/payload_verifier.h"
#include "update_engine/update_metadata.pb.h"
namespace chromeos_update_engine {
@@ -45,7 +48,6 @@
// This class performs the actions in a delta update synchronously. The delta
// update itself should be passed in in chunks as it is received.
-
class DeltaPerformer : public FileWriter {
public:
// Defines the granularity of progress logging in terms of how many "completed
@@ -75,7 +77,9 @@
download_delegate_(download_delegate),
install_plan_(install_plan),
payload_(payload),
- interactive_(interactive) {}
+ interactive_(interactive) {
+ CHECK(install_plan_);
+ }
// FileWriter's Write implementation where caller doesn't care about
// error codes.
@@ -140,9 +144,14 @@
// Resets the persistent update progress state to indicate that an update
// can't be resumed. Performs a quick update-in-progress reset if |quick| is
- // true, otherwise resets all progress-related update state. Returns true on
- // success, false otherwise.
- static bool ResetUpdateProgress(PrefsInterface* prefs, bool quick);
+ // true, otherwise resets all progress-related update state.
+ // If |skip_dynamic_partititon_metadata_updated| is true, do not reset
+ // dynamic-partition-metadata-updated.
+ // Returns true on success, false otherwise.
+ static bool ResetUpdateProgress(
+ PrefsInterface* prefs,
+ bool quick,
+ bool skip_dynamic_partititon_metadata_updated = false);
// Attempts to parse the update metadata starting from the beginning of
// |payload|. On success, returns kMetadataParseSuccess. Returns
@@ -156,6 +165,11 @@
public_key_path_ = public_key_path;
}
+ void set_update_certificates_path(
+ const std::string& update_certificates_path) {
+ update_certificates_path_ = update_certificates_path;
+ }
+
// Return true if header parsing is finished and no errors occurred.
bool IsHeaderParsed() const;
@@ -171,6 +185,24 @@
const FileDescriptorPtr source_fd,
ErrorCode* error);
+ // Initialize partitions and allocate required space for an update with the
+ // given |manifest|. |update_check_response_hash| is used to check if the
+ // previous call to this function corresponds to the same payload.
+ // - Same payload: not make any persistent modifications (not write to disk)
+ // - Different payload: make persistent modifications (write to disk)
+ // In both cases, in-memory flags are updated. This function must be called
+ // on the payload at least once (to update in-memory flags) before writing
+ // (applying) the payload.
+ // If error due to insufficient space, |required_size| is set to the required
+ // size on the device to apply the payload.
+ static bool PreparePartitionsForUpdate(
+ PrefsInterface* prefs,
+ BootControlInterface* boot_control,
+ BootControlInterface::Slot target_slot,
+ const DeltaArchiveManifest& manifest,
+ const std::string& update_check_response_hash,
+ uint64_t* required_size);
+
private:
friend class DeltaPerformerTest;
friend class DeltaPerformerIntegrationTest;
@@ -266,9 +298,25 @@
// |out_public_key|. Returns false on failures.
bool GetPublicKey(std::string* out_public_key);
+ // Creates a PayloadVerifier from the zip file containing certificates. If the
+ // path to the zip file doesn't exist, falls back to use the public key.
+ // Returns a tuple with the created PayloadVerifier and if we should perform
+ // the verification.
+ std::pair<std::unique_ptr<PayloadVerifier>, bool> CreatePayloadVerifier();
+
// After install_plan_ is filled with partition names and sizes, initialize
// metadata of partitions and map necessary devices before opening devices.
- bool InitPartitionMetadata();
+ // Also see comment for the static PreparePartitionsForUpdate().
+ bool PreparePartitionsForUpdate(uint64_t* required_size);
+
+ // Check if current manifest contains timestamp errors.
+ // Return:
+ // - kSuccess if update is valid.
+ // - kPayloadTimestampError if downgrade is detected
+ // - kDownloadManifestParseError if |new_version| has an incorrect format
+ // - Other error values if the source of error is known, or kError for
+ // a generic error on the device.
+ ErrorCode CheckTimestampError() const;
// Update Engine preference store.
PrefsInterface* prefs_;
@@ -370,12 +418,15 @@
HashCalculator signed_hash_calculator_;
// Signatures message blob extracted directly from the payload.
- brillo::Blob signatures_message_data_;
+ std::string signatures_message_data_;
// The public key to be used. Provided as a member so that tests can
// override with test keys.
std::string public_key_path_{constants::kUpdatePayloadPublicKeyPath};
+ // The path to the zip file with X509 certificates.
+ std::string update_certificates_path_{constants::kUpdateCertificatesPath};
+
// The number of bytes received so far, used for progress tracking.
size_t total_bytes_received_{0};
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index af6682a..f2aeb03 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -28,6 +28,7 @@
#include <base/stl_util.h>
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
+#include <gmock/gmock-matchers.h>
#include <google/protobuf/repeated_field.h>
#include <gtest/gtest.h>
#include <openssl/pem.h>
@@ -35,9 +36,12 @@
#include "update_engine/common/constants.h"
#include "update_engine/common/fake_boot_control.h"
#include "update_engine/common/fake_hardware.h"
+#include "update_engine/common/fake_prefs.h"
#include "update_engine/common/mock_prefs.h"
#include "update_engine/common/test_utils.h"
#include "update_engine/common/utils.h"
+#include "update_engine/hardware_android.h"
+#include "update_engine/payload_consumer/install_plan.h"
#include "update_engine/payload_consumer/mock_download_action.h"
#include "update_engine/payload_consumer/payload_constants.h"
#include "update_engine/payload_consumer/payload_metadata.h"
@@ -61,6 +65,8 @@
extern const char* kUnittestPublicKeyPath;
extern const char* kUnittestPrivateKey2Path;
extern const char* kUnittestPublicKey2Path;
+extern const char* kUnittestPrivateKeyECPath;
+extern const char* kUnittestPublicKeyECPath;
static const uint32_t kDefaultKernelSize = 4096; // Something small for a test
// clang-format off
@@ -109,6 +115,7 @@
kSignatureGeneratedPlaceholder, // Insert placeholder signatures, then real.
kSignatureGeneratedPlaceholderMismatch, // Insert a wrong sized placeholder.
kSignatureGeneratedShell, // Sign the generated payload through shell cmds.
+ kSignatureGeneratedShellECKey, // Sign with a EC key through shell cmds.
kSignatureGeneratedShellBadKey, // Sign with a bad key through shell cmds.
kSignatureGeneratedShellRotateCl1, // Rotate key, test client v1
kSignatureGeneratedShellRotateCl2, // Rotate key, test client v2
@@ -121,7 +128,41 @@
} // namespace
-class DeltaPerformerIntegrationTest : public ::testing::Test {};
+class DeltaPerformerIntegrationTest : public ::testing::Test {
+ public:
+ void RunManifestValidation(const DeltaArchiveManifest& manifest,
+ uint64_t major_version,
+ ErrorCode expected) {
+ FakePrefs prefs;
+ InstallPlan::Payload payload;
+ InstallPlan install_plan;
+ DeltaPerformer performer{&prefs,
+ nullptr,
+ &fake_hardware_,
+ nullptr,
+ &install_plan,
+ &payload,
+ false /* interactive*/};
+ // Delta performer will treat manifest as kDelta payload
+ // if it's a partial update.
+ payload.type = manifest.partial_update() ? InstallPayloadType::kDelta
+ : InstallPayloadType::kFull;
+
+ // The Manifest we are validating.
+ performer.manifest_.CopyFrom(manifest);
+ performer.major_payload_version_ = major_version;
+
+ EXPECT_EQ(expected, performer.ValidateManifest());
+ }
+ void AddPartition(DeltaArchiveManifest* manifest,
+ std::string name,
+ int timestamp) {
+ auto& partition = *manifest->add_partitions();
+ partition.set_version(std::to_string(timestamp));
+ partition.set_partition_name(name);
+ }
+ FakeHardware fake_hardware_;
+};
static void CompareFilesByBlock(const string& a_file,
const string& b_file,
@@ -166,29 +207,26 @@
return true;
}
-static size_t GetSignatureSize(const string& private_key_path) {
- const brillo::Blob data(1, 'x');
- brillo::Blob hash;
- EXPECT_TRUE(HashCalculator::RawHashOfData(data, &hash));
- brillo::Blob signature;
- EXPECT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature));
- return signature.size();
-}
-
-static bool InsertSignaturePlaceholder(int signature_size,
+static bool InsertSignaturePlaceholder(size_t signature_size,
const string& payload_path,
uint64_t* out_metadata_size) {
vector<brillo::Blob> signatures;
signatures.push_back(brillo::Blob(signature_size, 0));
- return PayloadSigner::AddSignatureToPayload(
- payload_path, signatures, {}, payload_path, out_metadata_size);
+ return PayloadSigner::AddSignatureToPayload(payload_path,
+ {signature_size},
+ signatures,
+ {},
+ payload_path,
+ out_metadata_size);
}
static void SignGeneratedPayload(const string& payload_path,
uint64_t* out_metadata_size) {
string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath);
- int signature_size = GetSignatureSize(private_key_path);
+ size_t signature_size;
+ ASSERT_TRUE(PayloadSigner::GetMaximumSignatureSize(private_key_path,
+ &signature_size));
brillo::Blob metadata_hash, payload_hash;
ASSERT_TRUE(PayloadSigner::HashPayloadForSigning(
payload_path, {signature_size}, &payload_hash, &metadata_hash));
@@ -198,6 +236,7 @@
ASSERT_TRUE(PayloadSigner::SignHash(
metadata_hash, private_key_path, &metadata_signature));
ASSERT_TRUE(PayloadSigner::AddSignatureToPayload(payload_path,
+ {signature_size},
{payload_signature},
{metadata_signature},
payload_path,
@@ -206,28 +245,112 @@
payload_path, GetBuildArtifactsPath(kUnittestPublicKeyPath)));
}
-static void SignHashToFile(const string& hash_file,
- const string& signature_file,
- const string& private_key_file) {
- brillo::Blob hash, signature;
- ASSERT_TRUE(utils::ReadFile(hash_file, &hash));
- ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_file, &signature));
- ASSERT_TRUE(test_utils::WriteFileVector(signature_file, signature));
+static void SignGeneratedShellPayloadWithKeys(
+ const string& payload_path,
+ const vector<string>& private_key_paths,
+ const string& public_key_path,
+ bool verification_success) {
+ vector<string> signature_size_strings;
+ for (const auto& key_path : private_key_paths) {
+ size_t signature_size;
+ ASSERT_TRUE(
+ PayloadSigner::GetMaximumSignatureSize(key_path, &signature_size));
+ signature_size_strings.push_back(base::StringPrintf("%zu", signature_size));
+ }
+ string signature_size_string = base::JoinString(signature_size_strings, ":");
+
+ test_utils::ScopedTempFile hash_file("hash.XXXXXX"),
+ metadata_hash_file("hash.XXXXXX");
+ string delta_generator_path = GetBuildArtifactsPath("delta_generator");
+ ASSERT_EQ(0,
+ System(base::StringPrintf(
+ "%s -in_file=%s -signature_size=%s -out_hash_file=%s "
+ "-out_metadata_hash_file=%s",
+ delta_generator_path.c_str(),
+ payload_path.c_str(),
+ signature_size_string.c_str(),
+ hash_file.path().c_str(),
+ metadata_hash_file.path().c_str())));
+
+ // Sign the hash with all private keys.
+ vector<test_utils::ScopedTempFile> sig_files, metadata_sig_files;
+ vector<string> sig_file_paths, metadata_sig_file_paths;
+ for (const auto& key_path : private_key_paths) {
+ brillo::Blob hash, signature;
+ ASSERT_TRUE(utils::ReadFile(hash_file.path(), &hash));
+ ASSERT_TRUE(PayloadSigner::SignHash(hash, key_path, &signature));
+
+ test_utils::ScopedTempFile sig_file("signature.XXXXXX");
+ ASSERT_TRUE(test_utils::WriteFileVector(sig_file.path(), signature));
+ sig_file_paths.push_back(sig_file.path());
+ sig_files.push_back(std::move(sig_file));
+
+ brillo::Blob metadata_hash, metadata_signature;
+ ASSERT_TRUE(utils::ReadFile(metadata_hash_file.path(), &metadata_hash));
+ ASSERT_TRUE(
+ PayloadSigner::SignHash(metadata_hash, key_path, &metadata_signature));
+
+ test_utils::ScopedTempFile metadata_sig_file("signature.XXXXXX");
+ ASSERT_TRUE(test_utils::WriteFileVector(metadata_sig_file.path(),
+ metadata_signature));
+
+ metadata_sig_file_paths.push_back(metadata_sig_file.path());
+ metadata_sig_files.push_back(std::move(metadata_sig_file));
+ }
+ string sig_files_string = base::JoinString(sig_file_paths, ":");
+ string metadata_sig_files_string =
+ base::JoinString(metadata_sig_file_paths, ":");
+
+ // Add the signature to the payload.
+ ASSERT_EQ(0,
+ System(base::StringPrintf("%s --signature_size=%s -in_file=%s "
+ "-payload_signature_file=%s "
+ "-metadata_signature_file=%s "
+ "-out_file=%s",
+ delta_generator_path.c_str(),
+ signature_size_string.c_str(),
+ payload_path.c_str(),
+ sig_files_string.c_str(),
+ metadata_sig_files_string.c_str(),
+ payload_path.c_str())));
+
+ int verify_result = System(base::StringPrintf("%s -in_file=%s -public_key=%s",
+ delta_generator_path.c_str(),
+ payload_path.c_str(),
+ public_key_path.c_str()));
+
+ if (verification_success) {
+ ASSERT_EQ(0, verify_result);
+ } else {
+ ASSERT_NE(0, verify_result);
+ }
}
static void SignGeneratedShellPayload(SignatureTest signature_test,
const string& payload_path) {
- string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath);
+ vector<SignatureTest> supported_test = {
+ kSignatureGeneratedShell,
+ kSignatureGeneratedShellBadKey,
+ kSignatureGeneratedShellECKey,
+ kSignatureGeneratedShellRotateCl1,
+ kSignatureGeneratedShellRotateCl2,
+ };
+ ASSERT_TRUE(std::find(supported_test.begin(),
+ supported_test.end(),
+ signature_test) != supported_test.end());
+
+ string private_key_path;
if (signature_test == kSignatureGeneratedShellBadKey) {
ASSERT_TRUE(utils::MakeTempFile("key.XXXXXX", &private_key_path, nullptr));
+ } else if (signature_test == kSignatureGeneratedShellECKey) {
+ private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyECPath);
} else {
- ASSERT_TRUE(signature_test == kSignatureGeneratedShell ||
- signature_test == kSignatureGeneratedShellRotateCl1 ||
- signature_test == kSignatureGeneratedShellRotateCl2);
+ private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath);
}
ScopedPathUnlinker key_unlinker(private_key_path);
key_unlinker.set_should_remove(signature_test ==
kSignatureGeneratedShellBadKey);
+
// Generates a new private key that will not match the public key.
if (signature_test == kSignatureGeneratedShellBadKey) {
LOG(INFO) << "Generating a mismatched private key.";
@@ -246,78 +369,26 @@
fclose(fprikey);
RSA_free(rsa);
}
- int signature_size = GetSignatureSize(private_key_path);
- test_utils::ScopedTempFile payload_hash_file("hash.XXXXXX"),
- metadata_hash_file("hash.XXXXXX");
- string signature_size_string;
- if (signature_test == kSignatureGeneratedShellRotateCl1 ||
- signature_test == kSignatureGeneratedShellRotateCl2)
- signature_size_string =
- base::StringPrintf("%d:%d", signature_size, signature_size);
- else
- signature_size_string = base::StringPrintf("%d", signature_size);
- string delta_generator_path = GetBuildArtifactsPath("delta_generator");
- ASSERT_EQ(0,
- System(base::StringPrintf(
- "%s -in_file=%s -signature_size=%s -out_hash_file=%s "
- "-out_metadata_hash_file=%s",
- delta_generator_path.c_str(),
- payload_path.c_str(),
- signature_size_string.c_str(),
- payload_hash_file.path().c_str(),
- metadata_hash_file.path().c_str())));
- // Sign the payload hash.
- test_utils::ScopedTempFile payload_signature_file("signature.XXXXXX");
- SignHashToFile(payload_hash_file.path(),
- payload_signature_file.path(),
- private_key_path);
- string payload_sig_files = payload_signature_file.path();
- // Sign the metadata hash.
- test_utils::ScopedTempFile metadata_signature_file("signature.XXXXXX");
- SignHashToFile(metadata_hash_file.path(),
- metadata_signature_file.path(),
- private_key_path);
- string metadata_sig_files = metadata_signature_file.path();
-
- test_utils::ScopedTempFile payload_signature_file2("signature.XXXXXX");
- test_utils::ScopedTempFile metadata_signature_file2("signature.XXXXXX");
+ vector<string> private_key_paths = {private_key_path};
if (signature_test == kSignatureGeneratedShellRotateCl1 ||
signature_test == kSignatureGeneratedShellRotateCl2) {
- SignHashToFile(payload_hash_file.path(),
- payload_signature_file2.path(),
- GetBuildArtifactsPath(kUnittestPrivateKey2Path));
- SignHashToFile(metadata_hash_file.path(),
- metadata_signature_file2.path(),
- GetBuildArtifactsPath(kUnittestPrivateKey2Path));
- // Append second sig file to first path
- payload_sig_files += ":" + payload_signature_file2.path();
- metadata_sig_files += ":" + metadata_signature_file2.path();
+ private_key_paths.push_back(
+ GetBuildArtifactsPath(kUnittestPrivateKey2Path));
}
- ASSERT_EQ(
- 0,
- System(base::StringPrintf("%s -in_file=%s -payload_signature_file=%s "
- "-metadata_signature_file=%s -out_file=%s",
- delta_generator_path.c_str(),
- payload_path.c_str(),
- payload_sig_files.c_str(),
- metadata_sig_files.c_str(),
- payload_path.c_str())));
- int verify_result = System(base::StringPrintf(
- "%s -in_file=%s -public_key=%s -public_key_version=%d",
- delta_generator_path.c_str(),
- payload_path.c_str(),
- (signature_test == kSignatureGeneratedShellRotateCl2
- ? GetBuildArtifactsPath(kUnittestPublicKey2Path)
- : GetBuildArtifactsPath(kUnittestPublicKeyPath))
- .c_str(),
- signature_test == kSignatureGeneratedShellRotateCl2 ? 2 : 1));
- if (signature_test == kSignatureGeneratedShellBadKey) {
- ASSERT_NE(0, verify_result);
+ std::string public_key;
+ if (signature_test == kSignatureGeneratedShellRotateCl2) {
+ public_key = GetBuildArtifactsPath(kUnittestPublicKey2Path);
+ } else if (signature_test == kSignatureGeneratedShellECKey) {
+ public_key = GetBuildArtifactsPath(kUnittestPublicKeyECPath);
} else {
- ASSERT_EQ(0, verify_result);
+ public_key = GetBuildArtifactsPath(kUnittestPublicKeyPath);
}
+
+ bool verification_success = signature_test != kSignatureGeneratedShellBadKey;
+ SignGeneratedShellPayloadWithKeys(
+ payload_path, private_key_paths, public_key, verification_success);
}
static void GenerateDeltaFile(bool full_kernel,
@@ -549,8 +620,9 @@
if (signature_test == kSignatureGeneratedPlaceholder ||
signature_test == kSignatureGeneratedPlaceholderMismatch) {
- int signature_size =
- GetSignatureSize(GetBuildArtifactsPath(kUnittestPrivateKeyPath));
+ size_t signature_size;
+ ASSERT_TRUE(PayloadSigner::GetMaximumSignatureSize(
+ GetBuildArtifactsPath(kUnittestPrivateKeyPath), &signature_size));
LOG(INFO) << "Inserting placeholder signature.";
ASSERT_TRUE(InsertSignaturePlaceholder(
signature_size, state->delta_path, &state->metadata_size));
@@ -573,6 +645,7 @@
LOG(INFO) << "Signing payload.";
SignGeneratedPayload(state->delta_path, &state->metadata_size);
} else if (signature_test == kSignatureGeneratedShell ||
+ signature_test == kSignatureGeneratedShellECKey ||
signature_test == kSignatureGeneratedShellBadKey ||
signature_test == kSignatureGeneratedShellRotateCl1 ||
signature_test == kSignatureGeneratedShellRotateCl2) {
@@ -617,15 +690,16 @@
EXPECT_EQ(2, sigs_message.signatures_size());
else
EXPECT_EQ(1, sigs_message.signatures_size());
- const Signatures_Signature& signature = sigs_message.signatures(0);
- EXPECT_EQ(1U, signature.version());
+ const Signatures::Signature& signature = sigs_message.signatures(0);
- uint64_t expected_sig_data_length = 0;
vector<string> key_paths{GetBuildArtifactsPath(kUnittestPrivateKeyPath)};
- if (signature_test == kSignatureGeneratedShellRotateCl1 ||
- signature_test == kSignatureGeneratedShellRotateCl2) {
+ if (signature_test == kSignatureGeneratedShellECKey) {
+ key_paths = {GetBuildArtifactsPath(kUnittestPrivateKeyECPath)};
+ } else if (signature_test == kSignatureGeneratedShellRotateCl1 ||
+ signature_test == kSignatureGeneratedShellRotateCl2) {
key_paths.push_back(GetBuildArtifactsPath(kUnittestPrivateKey2Path));
}
+ uint64_t expected_sig_data_length = 0;
EXPECT_TRUE(PayloadSigner::SignatureBlobLength(
key_paths, &expected_sig_data_length));
EXPECT_EQ(expected_sig_data_length, manifest.signatures_size());
@@ -701,7 +775,12 @@
.WillRepeatedly(Return(true));
EXPECT_CALL(prefs, SetString(kPrefsUpdateStateSignedSHA256Context, _))
.WillRepeatedly(Return(true));
- EXPECT_CALL(prefs, SetBoolean(kPrefsDynamicPartitionMetadataUpdated, _))
+ EXPECT_CALL(prefs, SetString(kPrefsDynamicPartitionMetadataUpdated, _))
+ .WillRepeatedly(Return(true));
+ EXPECT_CALL(prefs,
+ SetString(kPrefsManifestBytes,
+ testing::SizeIs(state->metadata_signature_size +
+ state->metadata_size)))
.WillRepeatedly(Return(true));
if (op_hash_test == kValidOperationData && signature_test != kSignatureNone) {
EXPECT_CALL(prefs, SetString(kPrefsUpdateStateSignatureBlob, _))
@@ -733,7 +812,9 @@
ASSERT_TRUE(PayloadSigner::GetMetadataSignature(
state->delta.data(),
state->metadata_size,
- GetBuildArtifactsPath(kUnittestPrivateKeyPath),
+ (signature_test == kSignatureGeneratedShellECKey)
+ ? GetBuildArtifactsPath(kUnittestPrivateKeyECPath)
+ : GetBuildArtifactsPath(kUnittestPrivateKeyPath),
&install_plan->payloads[0].metadata_signature));
EXPECT_FALSE(install_plan->payloads[0].metadata_signature.empty());
@@ -744,9 +825,12 @@
install_plan,
&install_plan->payloads[0],
false /* interactive */);
- string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath);
+ string public_key_path = signature_test == kSignatureGeneratedShellECKey
+ ? GetBuildArtifactsPath(kUnittestPublicKeyECPath)
+ : GetBuildArtifactsPath(kUnittestPublicKeyPath);
EXPECT_TRUE(utils::FileExists(public_key_path.c_str()));
(*performer)->set_public_key_path(public_key_path);
+ (*performer)->set_update_certificates_path("");
EXPECT_EQ(static_cast<off_t>(state->image_size),
HashCalculator::RawHashOfFile(
@@ -948,13 +1032,13 @@
delete performer;
}
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageTest) {
DoSmallImageTest(
false, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest,
- RunAsRootSmallImageSignaturePlaceholderTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+ RunAsRootSmallImageSignaturePlaceholderTest) {
DoSmallImageTest(false,
false,
-1,
@@ -963,8 +1047,8 @@
kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest,
- RunAsRootSmallImageSignaturePlaceholderMismatchTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+ RunAsRootSmallImageSignaturePlaceholderMismatchTest) {
DeltaState state;
GenerateDeltaFile(false,
false,
@@ -974,7 +1058,7 @@
kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageChunksTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageChunksTest) {
DoSmallImageTest(false,
false,
kBlockSize,
@@ -983,31 +1067,28 @@
kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest, RunAsRootFullKernelSmallImageTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootFullKernelSmallImageTest) {
DoSmallImageTest(
true, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest, RunAsRootFullSmallImageTest) {
- DoSmallImageTest(true,
- true,
- -1,
- kSignatureGenerator,
- true,
- kFullPayloadMinorVersion);
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootFullSmallImageTest) {
+ DoSmallImageTest(
+ true, true, -1, kSignatureGenerator, true, kFullPayloadMinorVersion);
}
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignNoneTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignNoneTest) {
DoSmallImageTest(
false, false, -1, kSignatureNone, false, kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedTest) {
DoSmallImageTest(
false, false, -1, kSignatureGenerated, true, kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+ RunAsRootSmallImageSignGeneratedShellTest) {
DoSmallImageTest(false,
false,
-1,
@@ -1016,8 +1097,18 @@
kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest,
- RunAsRootSmallImageSignGeneratedShellBadKeyTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+ RunAsRootSmallImageSignGeneratedShellECKeyTest) {
+ DoSmallImageTest(false,
+ false,
+ -1,
+ kSignatureGeneratedShellECKey,
+ false,
+ kSourceMinorPayloadVersion);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+ RunAsRootSmallImageSignGeneratedShellBadKeyTest) {
DoSmallImageTest(false,
false,
-1,
@@ -1026,8 +1117,8 @@
kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest,
- RunAsRootSmallImageSignGeneratedShellRotateCl1Test) {
+TEST_F(DeltaPerformerIntegrationTest,
+ RunAsRootSmallImageSignGeneratedShellRotateCl1Test) {
DoSmallImageTest(false,
false,
-1,
@@ -1036,8 +1127,8 @@
kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest,
- RunAsRootSmallImageSignGeneratedShellRotateCl2Test) {
+TEST_F(DeltaPerformerIntegrationTest,
+ RunAsRootSmallImageSignGeneratedShellRotateCl2Test) {
DoSmallImageTest(false,
false,
-1,
@@ -1046,18 +1137,137 @@
kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSourceOpsTest) {
- DoSmallImageTest(false,
- false,
- -1,
- kSignatureGenerator,
- false,
- kSourceMinorPayloadVersion);
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageSourceOpsTest) {
+ DoSmallImageTest(
+ false, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest,
- RunAsRootMandatoryOperationHashMismatchTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+ RunAsRootMandatoryOperationHashMismatchTest) {
DoOperationHashMismatchTest(kInvalidOperationData, true);
}
+TEST_F(DeltaPerformerIntegrationTest, ValidatePerPartitionTimestampSuccess) {
+ // The Manifest we are validating.
+ DeltaArchiveManifest manifest;
+
+ fake_hardware_.SetVersion("system", "5");
+ fake_hardware_.SetVersion("product", "99");
+ fake_hardware_.SetBuildTimestamp(1);
+
+ manifest.set_minor_version(kFullPayloadMinorVersion);
+ manifest.set_max_timestamp(2);
+ AddPartition(&manifest, "system", 10);
+ AddPartition(&manifest, "product", 100);
+
+ RunManifestValidation(
+ manifest, kMaxSupportedMajorPayloadVersion, ErrorCode::kSuccess);
+}
+
+TEST_F(DeltaPerformerIntegrationTest, ValidatePerPartitionTimestampFailure) {
+ // The Manifest we are validating.
+ DeltaArchiveManifest manifest;
+
+ fake_hardware_.SetVersion("system", "5");
+ fake_hardware_.SetVersion("product", "99");
+ fake_hardware_.SetBuildTimestamp(1);
+
+ manifest.set_minor_version(kFullPayloadMinorVersion);
+ manifest.set_max_timestamp(2);
+ AddPartition(&manifest, "system", 10);
+ AddPartition(&manifest, "product", 98);
+
+ RunManifestValidation(manifest,
+ kMaxSupportedMajorPayloadVersion,
+ ErrorCode::kPayloadTimestampError);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+ ValidatePerPartitionTimestampMissingTimestamp) {
+ // The Manifest we are validating.
+ DeltaArchiveManifest manifest;
+
+ fake_hardware_.SetVersion("system", "5");
+ fake_hardware_.SetVersion("product", "99");
+ fake_hardware_.SetBuildTimestamp(1);
+
+ manifest.set_minor_version(kFullPayloadMinorVersion);
+ manifest.set_max_timestamp(2);
+ AddPartition(&manifest, "system", 10);
+ {
+ auto& partition = *manifest.add_partitions();
+ // For complete updates, missing timestamp should not trigger
+ // timestamp error.
+ partition.set_partition_name("product");
+ }
+
+ RunManifestValidation(
+ manifest, kMaxSupportedMajorPayloadVersion, ErrorCode::kSuccess);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+ ValidatePerPartitionTimestampPartialUpdatePass) {
+ fake_hardware_.SetVersion("system", "5");
+ fake_hardware_.SetVersion("product", "99");
+
+ DeltaArchiveManifest manifest;
+ manifest.set_minor_version(kPartialUpdateMinorPayloadVersion);
+ manifest.set_partial_update(true);
+ AddPartition(&manifest, "product", 100);
+ RunManifestValidation(
+ manifest, kMaxSupportedMajorPayloadVersion, ErrorCode::kSuccess);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+ ValidatePerPartitionTimestampPartialUpdateDowngrade) {
+ fake_hardware_.SetVersion("system", "5");
+ fake_hardware_.SetVersion("product", "99");
+
+ DeltaArchiveManifest manifest;
+ manifest.set_minor_version(kPartialUpdateMinorPayloadVersion);
+ manifest.set_partial_update(true);
+ AddPartition(&manifest, "product", 98);
+ RunManifestValidation(manifest,
+ kMaxSupportedMajorPayloadVersion,
+ ErrorCode::kPayloadTimestampError);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+ ValidatePerPartitionTimestampPartialUpdateMissingVersion) {
+ fake_hardware_.SetVersion("system", "5");
+ fake_hardware_.SetVersion("product", "99");
+
+ DeltaArchiveManifest manifest;
+ manifest.set_minor_version(kPartialUpdateMinorPayloadVersion);
+ manifest.set_partial_update(true);
+ {
+ auto& partition = *manifest.add_partitions();
+ // For partial updates, missing timestamp should trigger an error
+ partition.set_partition_name("product");
+ // has_version() == false.
+ }
+ RunManifestValidation(manifest,
+ kMaxSupportedMajorPayloadVersion,
+ ErrorCode::kDownloadManifestParseError);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+ ValidatePerPartitionTimestampPartialUpdateEmptyVersion) {
+ fake_hardware_.SetVersion("system", "5");
+ fake_hardware_.SetVersion("product", "99");
+
+ DeltaArchiveManifest manifest;
+ manifest.set_minor_version(kPartialUpdateMinorPayloadVersion);
+ manifest.set_partial_update(true);
+ {
+ auto& partition = *manifest.add_partitions();
+ // For partial updates, invalid timestamp should trigger an error
+ partition.set_partition_name("product");
+ partition.set_version("something");
+ }
+ RunManifestValidation(manifest,
+ kMaxSupportedMajorPayloadVersion,
+ ErrorCode::kDownloadManifestParseError);
+}
+
} // namespace chromeos_update_engine
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index 3901195..449201c 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -36,9 +36,11 @@
#include <gtest/gtest.h>
#include "update_engine/common/constants.h"
+#include "update_engine/common/error_code.h"
#include "update_engine/common/fake_boot_control.h"
#include "update_engine/common/fake_hardware.h"
#include "update_engine/common/fake_prefs.h"
+#include "update_engine/common/hardware_interface.h"
#include "update_engine/common/test_utils.h"
#include "update_engine/common/utils.h"
#include "update_engine/payload_consumer/fake_file_descriptor.h"
@@ -161,6 +163,11 @@
install_plan_.target_slot = 1;
EXPECT_CALL(mock_delegate_, ShouldCancel(_))
.WillRepeatedly(testing::Return(false));
+ performer_.set_update_certificates_path("");
+ // Set the public key corresponding to the unittest private key.
+ string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath);
+ EXPECT_TRUE(utils::FileExists(public_key_path.c_str()));
+ performer_.set_public_key_path(public_key_path);
}
// Test helper placed where it can easily be friended from DeltaPerformer.
@@ -179,19 +186,22 @@
brillo::Blob GeneratePayload(const brillo::Blob& blob_data,
const vector<AnnotatedOperation>& aops,
- bool sign_payload) {
+ bool sign_payload,
+ PartitionConfig* old_part = nullptr) {
return GeneratePayload(blob_data,
aops,
sign_payload,
kMaxSupportedMajorPayloadVersion,
- kMaxSupportedMinorPayloadVersion);
+ kMaxSupportedMinorPayloadVersion,
+ old_part);
}
brillo::Blob GeneratePayload(const brillo::Blob& blob_data,
const vector<AnnotatedOperation>& aops,
bool sign_payload,
uint64_t major_version,
- uint32_t minor_version) {
+ uint32_t minor_version,
+ PartitionConfig* old_part = nullptr) {
test_utils::ScopedTempFile blob_file("Blob-XXXXXX");
EXPECT_TRUE(test_utils::WriteFileVector(blob_file.path(), blob_data));
@@ -202,24 +212,29 @@
PayloadFile payload;
EXPECT_TRUE(payload.Init(config));
- PartitionConfig old_part(kPartitionNameRoot);
+ std::unique_ptr<PartitionConfig> old_part_uptr;
+ if (!old_part) {
+ old_part_uptr = std::make_unique<PartitionConfig>(kPartitionNameRoot);
+ old_part = old_part_uptr.get();
+ }
if (minor_version != kFullPayloadMinorVersion) {
// When generating a delta payload we need to include the old partition
// information to mark it as a delta payload.
- old_part.path = "/dev/null";
- old_part.size = 0;
+ if (old_part->path.empty()) {
+ old_part->path = "/dev/null";
+ }
}
PartitionConfig new_part(kPartitionNameRoot);
new_part.path = "/dev/zero";
new_part.size = 1234;
- payload.AddPartition(old_part, new_part, aops);
+ payload.AddPartition(*old_part, new_part, aops, {});
// We include a kernel partition without operations.
- old_part.name = kPartitionNameKernel;
+ old_part->name = kPartitionNameKernel;
new_part.name = kPartitionNameKernel;
new_part.size = 0;
- payload.AddPartition(old_part, new_part, {});
+ payload.AddPartition(*old_part, new_part, {}, {});
test_utils::ScopedTempFile payload_file("Payload-XXXXXX");
string private_key =
@@ -235,7 +250,8 @@
}
brillo::Blob GenerateSourceCopyPayload(const brillo::Blob& copied_data,
- bool add_hash) {
+ bool add_hash,
+ PartitionConfig* old_part = nullptr) {
PayloadGenerationConfig config;
const uint64_t kDefaultBlockSize = config.block_size;
EXPECT_EQ(0U, copied_data.size() % kDefaultBlockSize);
@@ -249,7 +265,7 @@
if (add_hash)
aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
- return GeneratePayload(brillo::Blob(), {aop}, false);
+ return GeneratePayload(brillo::Blob(), {aop}, false, old_part);
}
// Apply |payload_data| on partition specified in |source_path|.
@@ -316,7 +332,7 @@
// When filling in size in manifest, exclude the size of the 24-byte header.
uint64_t size_in_manifest = htobe64(actual_metadata_size - 24);
performer_.Write(&size_in_manifest, 8, &error_code);
- uint32_t signature_size = htobe64(10);
+ auto signature_size = htobe64(10);
bool result = performer_.Write(&signature_size, 4, &error_code);
if (expected_metadata_size == actual_metadata_size ||
!hash_checks_mandatory) {
@@ -389,12 +405,6 @@
expected_error = ErrorCode::kSuccess;
}
- // Use the public key corresponding to the private key used above to
- // sign the metadata.
- string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath);
- EXPECT_TRUE(utils::FileExists(public_key_path.c_str()));
- performer_.set_public_key_path(public_key_path);
-
// Init actual_error with an invalid value so that we make sure
// ParsePayloadMetadata properly populates it in all cases.
actual_error = ErrorCode::kUmaReportedMax;
@@ -581,11 +591,16 @@
EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash));
aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
- brillo::Blob payload_data = GeneratePayload(brillo::Blob(), {aop}, false);
-
test_utils::ScopedTempFile source("Source-XXXXXX");
EXPECT_TRUE(test_utils::WriteFileVector(source.path(), expected_data));
+ PartitionConfig old_part(kPartitionNameRoot);
+ old_part.path = source.path();
+ old_part.size = expected_data.size();
+
+ brillo::Blob payload_data =
+ GeneratePayload(brillo::Blob(), {aop}, false, &old_part);
+
EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
}
@@ -604,11 +619,16 @@
EXPECT_TRUE(HashCalculator::RawHashOfData(src, &src_hash));
aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
- brillo::Blob payload_data = GeneratePayload(puffdiff_payload, {aop}, false);
-
test_utils::ScopedTempFile source("Source-XXXXXX");
EXPECT_TRUE(test_utils::WriteFileVector(source.path(), src));
+ PartitionConfig old_part(kPartitionNameRoot);
+ old_part.path = source.path();
+ old_part.size = src.size();
+
+ brillo::Blob payload_data =
+ GeneratePayload(puffdiff_payload, {aop}, false, &old_part);
+
brillo::Blob dst(std::begin(dst_deflates), std::end(dst_deflates));
EXPECT_EQ(dst, ApplyPayload(payload_data, source.path(), true));
}
@@ -627,11 +647,16 @@
EXPECT_TRUE(HashCalculator::RawHashOfData(expected_data, &src_hash));
aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
- brillo::Blob payload_data = GeneratePayload(brillo::Blob(), {aop}, false);
-
test_utils::ScopedTempFile source("Source-XXXXXX");
EXPECT_TRUE(test_utils::WriteFileVector(source.path(), actual_data));
+ PartitionConfig old_part(kPartitionNameRoot);
+ old_part.path = source.path();
+ old_part.size = actual_data.size();
+
+ brillo::Blob payload_data =
+ GeneratePayload(brillo::Blob(), {aop}, false, &old_part);
+
EXPECT_EQ(actual_data, ApplyPayload(payload_data, source.path(), false));
}
@@ -650,7 +675,12 @@
FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize);
brillo::Blob expected_data = FakeFileDescriptorData(kCopyOperationSize);
- brillo::Blob payload_data = GenerateSourceCopyPayload(expected_data, true);
+ PartitionConfig old_part(kPartitionNameRoot);
+ old_part.path = source.path();
+ old_part.size = invalid_data.size();
+
+ brillo::Blob payload_data =
+ GenerateSourceCopyPayload(expected_data, true, &old_part);
EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
// Verify that the fake_fec was actually used.
EXPECT_EQ(1U, fake_fec->GetReadOps().size());
@@ -671,8 +701,13 @@
// the expected.
FakeFileDescriptor* fake_fec = SetFakeECCFile(kCopyOperationSize / 2);
+ PartitionConfig old_part(kPartitionNameRoot);
+ old_part.path = source.path();
+ old_part.size = expected_data.size();
+
// The payload operation doesn't include an operation hash.
- brillo::Blob payload_data = GenerateSourceCopyPayload(expected_data, false);
+ brillo::Blob payload_data =
+ GenerateSourceCopyPayload(expected_data, false, &old_part);
EXPECT_EQ(expected_data, ApplyPayload(payload_data, source.path(), true));
// Verify that the fake_fec was attempted to be used. Since the file
// descriptor is shorter it can actually do more than one read to realize it
@@ -866,6 +901,24 @@
ErrorCode::kPayloadTimestampError);
}
+TEST_F(DeltaPerformerTest, ValidatePerPartitionTimestampSuccess) {
+ // The Manifest we are validating.
+ DeltaArchiveManifest manifest;
+
+ manifest.set_minor_version(kFullPayloadMinorVersion);
+ manifest.set_max_timestamp(2);
+ fake_hardware_.SetBuildTimestamp(1);
+ auto& partition = *manifest.add_partitions();
+ partition.set_version("10");
+ partition.set_partition_name("system");
+ fake_hardware_.SetVersion("system", "5");
+
+ RunManifestValidation(manifest,
+ kMaxSupportedMajorPayloadVersion,
+ InstallPayloadType::kFull,
+ ErrorCode::kSuccess);
+}
+
TEST_F(DeltaPerformerTest, BrilloMetadataSignatureSizeTest) {
unsigned int seed = time(nullptr);
EXPECT_TRUE(performer_.Write(kDeltaMagic, sizeof(kDeltaMagic)));
@@ -968,7 +1021,6 @@
brillo::Blob payload_data = GeneratePayload(
{}, {}, true, kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion);
install_plan_.hash_checks_mandatory = true;
- performer_.set_public_key_path(GetBuildArtifactsPath(kUnittestPublicKeyPath));
payload_.size = payload_data.size();
ErrorCode error;
EXPECT_EQ(MetadataParseResult::kSuccess,
diff --git a/payload_consumer/download_action.cc b/payload_consumer/download_action.cc
index 45df5a9..ea99892 100644
--- a/payload_consumer/download_action.cc
+++ b/payload_consumer/download_action.cc
@@ -55,8 +55,7 @@
code_(ErrorCode::kSuccess),
delegate_(nullptr),
p2p_sharing_fd_(-1),
- p2p_visible_(true) {
-}
+ p2p_visible_(true) {}
DownloadAction::~DownloadAction() {}
@@ -203,18 +202,76 @@
StartDownloading();
}
+bool DownloadAction::LoadCachedManifest(int64_t manifest_size) {
+ std::string cached_manifest_bytes;
+ if (!prefs_->GetString(kPrefsManifestBytes, &cached_manifest_bytes) ||
+ cached_manifest_bytes.size() <= 0) {
+ LOG(INFO) << "Cached Manifest data not found";
+ return false;
+ }
+ if (static_cast<int64_t>(cached_manifest_bytes.size()) != manifest_size) {
+ LOG(WARNING) << "Cached metadata has unexpected size: "
+ << cached_manifest_bytes.size() << " vs. " << manifest_size;
+ return false;
+ }
+
+ ErrorCode error;
+ const bool success =
+ delta_performer_->Write(
+ cached_manifest_bytes.data(), cached_manifest_bytes.size(), &error) &&
+ delta_performer_->IsManifestValid();
+ if (success) {
+ LOG(INFO) << "Successfully parsed cached manifest";
+ } else {
+ // If parsing of cached data failed, fall back to fetch them using HTTP
+ LOG(WARNING) << "Cached manifest data fails to load, error code:"
+ << static_cast<int>(error) << "," << error;
+ }
+ return success;
+}
+
void DownloadAction::StartDownloading() {
download_active_ = true;
http_fetcher_->ClearRanges();
+
+ if (writer_ && writer_ != delta_performer_.get()) {
+ LOG(INFO) << "Using writer for test.";
+ } else {
+ delta_performer_.reset(new DeltaPerformer(prefs_,
+ boot_control_,
+ hardware_,
+ delegate_,
+ &install_plan_,
+ payload_,
+ interactive_));
+ writer_ = delta_performer_.get();
+ }
+
if (install_plan_.is_resume &&
payload_ == &install_plan_.payloads[resume_payload_index_]) {
- // Resuming an update so fetch the update manifest metadata first.
+ // Resuming an update so parse the cached manifest first
int64_t manifest_metadata_size = 0;
int64_t manifest_signature_size = 0;
prefs_->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size);
prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size);
- http_fetcher_->AddRange(base_offset_,
- manifest_metadata_size + manifest_signature_size);
+
+ // TODO(zhangkelvin) Add unittest for success and fallback route
+ if (!LoadCachedManifest(manifest_metadata_size + manifest_signature_size)) {
+ if (delta_performer_) {
+ // Create a new DeltaPerformer to reset all its state
+ delta_performer_ = std::make_unique<DeltaPerformer>(prefs_,
+ boot_control_,
+ hardware_,
+ delegate_,
+ &install_plan_,
+ payload_,
+ interactive_);
+ writer_ = delta_performer_.get();
+ }
+ http_fetcher_->AddRange(base_offset_,
+ manifest_metadata_size + manifest_signature_size);
+ }
+
// If there're remaining unprocessed data blobs, fetch them. Be careful not
// to request data beyond the end of the payload to avoid 416 HTTP response
// error codes.
@@ -238,18 +295,6 @@
}
}
- if (writer_ && writer_ != delta_performer_.get()) {
- LOG(INFO) << "Using writer for test.";
- } else {
- delta_performer_.reset(new DeltaPerformer(prefs_,
- boot_control_,
- hardware_,
- delegate_,
- &install_plan_,
- payload_,
- interactive_));
- writer_ = delta_performer_.get();
- }
if (system_state_ != nullptr) {
const PayloadStateInterface* payload_state = system_state_->payload_state();
string file_id = utils::CalculateP2PFileId(payload_->hash, payload_->size);
diff --git a/payload_consumer/download_action.h b/payload_consumer/download_action.h
index 1777e22..6928443 100644
--- a/payload_consumer/download_action.h
+++ b/payload_consumer/download_action.h
@@ -124,19 +124,20 @@
bool SetupP2PSharingFd();
// Writes |length| bytes of payload from |data| into |file_offset|
- // of the p2p file. Also does sanity checks; for example ensures we
+ // of the p2p file. Also does validation checks; for example ensures we
// don't end up with a file with holes in it.
//
// This method does nothing if SetupP2PSharingFd() hasn't been
// called or if CloseP2PSharingFd() has been called.
void WriteToP2PFile(const void* data, size_t length, off_t file_offset);
+ // Attempt to load cached manifest data from prefs
+ // return true on success, false otherwise.
+ bool LoadCachedManifest(int64_t manifest_size);
+
// Start downloading the current payload using delta_performer.
void StartDownloading();
- // The InstallPlan passed in
- InstallPlan install_plan_;
-
// Pointer to the current payload in install_plan_.payloads.
InstallPlan::Payload* payload_{nullptr};
diff --git a/payload_consumer/download_action_android_unittest.cc b/payload_consumer/download_action_android_unittest.cc
new file mode 100644
index 0000000..f78845f
--- /dev/null
+++ b/payload_consumer/download_action_android_unittest.cc
@@ -0,0 +1,90 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "common/mock_action_processor.h"
+#include <gmock/gmock-actions.h>
+#include <gmock/gmock-function-mocker.h>
+#include <gmock/gmock-spec-builders.h>
+
+#include "payload_consumer/install_plan.h"
+#include "update_engine/common/action_pipe.h"
+#include "update_engine/common/boot_control_stub.h"
+#include "update_engine/common/constants.h"
+#include "update_engine/common/mock_http_fetcher.h"
+#include "update_engine/common/mock_prefs.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/payload_consumer/download_action.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <memory>
+
+namespace chromeos_update_engine {
+using testing::_;
+using testing::DoAll;
+using testing::Return;
+using testing::SetArgPointee;
+
+class DownloadActionTest : public ::testing::Test {
+ public:
+ static constexpr int64_t METADATA_SIZE = 1024;
+ static constexpr int64_t SIGNATURE_SIZE = 256;
+ std::shared_ptr<ActionPipe<InstallPlan>> action_pipe{
+ new ActionPipe<InstallPlan>()};
+};
+
+TEST_F(DownloadActionTest, CacheManifestInvalid) {
+ std::string data(METADATA_SIZE + SIGNATURE_SIZE, '-');
+ MockPrefs prefs;
+ EXPECT_CALL(prefs, GetInt64(kPrefsUpdateStatePayloadIndex, _))
+ .WillRepeatedly(DoAll(SetArgPointee<1>(0L), Return(true)));
+ EXPECT_CALL(prefs, GetInt64(kPrefsManifestMetadataSize, _))
+ .WillRepeatedly(DoAll(SetArgPointee<1>(METADATA_SIZE), Return(true)));
+ EXPECT_CALL(prefs, GetInt64(kPrefsManifestSignatureSize, _))
+ .WillRepeatedly(DoAll(SetArgPointee<1>(SIGNATURE_SIZE), Return(true)));
+ EXPECT_CALL(prefs, GetInt64(kPrefsUpdateStateNextDataOffset, _))
+ .WillRepeatedly(DoAll(SetArgPointee<1>(0L), Return(true)));
+ EXPECT_CALL(prefs, GetString(kPrefsManifestBytes, _))
+ .WillRepeatedly(DoAll(SetArgPointee<1>(data), Return(true)));
+
+ BootControlStub boot_control;
+ MockHttpFetcher* http_fetcher =
+ new MockHttpFetcher(data.data(), data.size(), nullptr);
+ http_fetcher->set_delay(false);
+ InstallPlan install_plan;
+ auto& payload = install_plan.payloads.emplace_back();
+ install_plan.download_url = "http://fake_url.invalid";
+ payload.size = data.size();
+ payload.payload_urls.emplace_back("http://fake_url.invalid");
+ install_plan.is_resume = true;
+ action_pipe->set_contents(install_plan);
+
+ // takes ownership of passed in HttpFetcher
+ auto download_action =
+ std::make_unique<DownloadAction>(&prefs,
+ &boot_control,
+ nullptr,
+ nullptr,
+ http_fetcher,
+ false /* interactive */);
+ download_action->set_in_pipe(action_pipe);
+ MockActionProcessor mock_processor;
+ download_action->SetProcessor(&mock_processor);
+ download_action->PerformAction();
+ ASSERT_EQ(download_action->http_fetcher()->GetBytesDownloaded(), data.size());
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/filesystem_verifier_action.cc b/payload_consumer/filesystem_verifier_action.cc
index f9e7f81..61917ea 100644
--- a/payload_consumer/filesystem_verifier_action.cc
+++ b/payload_consumer/filesystem_verifier_action.cc
@@ -28,6 +28,7 @@
#include <base/bind.h>
#include <brillo/data_encoding.h>
#include <brillo/streams/file_stream.h>
+#include <base/strings/string_util.h>
#include "update_engine/common/utils.h"
@@ -77,11 +78,32 @@
return;
if (code == ErrorCode::kSuccess && HasOutputPipe())
SetOutputObject(install_plan_);
+ UpdateProgress(1.0);
processor_->ActionComplete(this, code);
}
+void FilesystemVerifierAction::UpdateProgress(double progress) {
+ if (delegate_ != nullptr) {
+ delegate_->OnVerifyProgressUpdate(progress);
+ }
+}
+
void FilesystemVerifierAction::StartPartitionHashing() {
if (partition_index_ == install_plan_.partitions.size()) {
+ if (!install_plan_.untouched_dynamic_partitions.empty()) {
+ LOG(INFO) << "Verifying extents of untouched dynamic partitions ["
+ << base::JoinString(install_plan_.untouched_dynamic_partitions,
+ ", ")
+ << "]";
+ if (!dynamic_control_->VerifyExtentsForUntouchedPartitions(
+ install_plan_.source_slot,
+ install_plan_.target_slot,
+ install_plan_.untouched_dynamic_partitions)) {
+ Cleanup(ErrorCode::kFilesystemVerifierError);
+ return;
+ }
+ }
+
Cleanup(ErrorCode::kSuccess);
return;
}
@@ -188,7 +210,6 @@
Cleanup(ErrorCode::kError);
return;
}
-
if (bytes_read == 0) {
LOG(ERROR) << "Failed to read the remaining " << partition_size_ - offset_
<< " bytes from partition "
@@ -203,6 +224,13 @@
return;
}
+ // WE don't consider sizes of each partition. Every partition
+ // has the same length on progress bar.
+ // TODO(zhangkelvin) Take sizes of each partition into account
+
+ UpdateProgress(
+ (static_cast<double>(offset_) / partition_size_ + partition_index_) /
+ install_plan_.partitions.size());
if (verifier_step_ == VerifierStep::kVerifyTargetHash &&
install_plan_.write_verity) {
if (!verity_writer_->Update(offset_, buffer_.data(), bytes_read)) {
diff --git a/payload_consumer/filesystem_verifier_action.h b/payload_consumer/filesystem_verifier_action.h
index 83d6668..6a8823a 100644
--- a/payload_consumer/filesystem_verifier_action.h
+++ b/payload_consumer/filesystem_verifier_action.h
@@ -49,15 +49,34 @@
kVerifySourceHash,
};
+class FilesystemVerifyDelegate {
+ public:
+ virtual ~FilesystemVerifyDelegate() = default;
+ virtual void OnVerifyProgressUpdate(double progress) = 0;
+};
+
class FilesystemVerifierAction : public InstallPlanAction {
public:
- FilesystemVerifierAction()
- : verity_writer_(verity_writer::CreateVerityWriter()) {}
+ explicit FilesystemVerifierAction(
+ DynamicPartitionControlInterface* dynamic_control)
+ : verity_writer_(verity_writer::CreateVerityWriter()),
+ dynamic_control_(dynamic_control) {
+ CHECK(dynamic_control_);
+ }
+
~FilesystemVerifierAction() override = default;
void PerformAction() override;
void TerminateProcessing() override;
+ // Used for listening to progress updates
+ void set_delegate(FilesystemVerifyDelegate* delegate) {
+ this->delegate_ = delegate;
+ }
+ [[nodiscard]] FilesystemVerifyDelegate* get_delegate() const {
+ return this->delegate_;
+ }
+
// Debugging/logging
static std::string StaticType() { return "FilesystemVerifierAction"; }
std::string Type() const override { return StaticType(); }
@@ -85,6 +104,9 @@
// true if TerminateProcessing() was called.
void Cleanup(ErrorCode code);
+ // Invoke delegate callback to report progress, if delegate is not null
+ void UpdateProgress(double progress);
+
// The type of the partition that we are verifying.
VerifierStep verifier_step_ = VerifierStep::kVerifyTargetHash;
@@ -100,15 +122,15 @@
bool cancelled_{false}; // true if the action has been cancelled.
- // The install plan we're passed in via the input pipe.
- InstallPlan install_plan_;
-
// Calculates the hash of the data.
std::unique_ptr<HashCalculator> hasher_;
// Write verity data of the current partition.
std::unique_ptr<VerityWriterInterface> verity_writer_;
+ // Verifies the untouched dynamic partitions for partial updates.
+ DynamicPartitionControlInterface* dynamic_control_{nullptr};
+
// Reads and hashes this many bytes from the head of the input stream. When
// the partition starts to be hashed, this field is initialized from the
// corresponding InstallPlan::Partition size which is the total size
@@ -119,6 +141,9 @@
// The byte offset that we are reading in the current partition.
uint64_t offset_{0};
+ // An observer that observes progress updates of this action.
+ FilesystemVerifyDelegate* delegate_{};
+
DISALLOW_COPY_AND_ASSIGN(FilesystemVerifierAction);
};
diff --git a/payload_consumer/filesystem_verifier_action_unittest.cc b/payload_consumer/filesystem_verifier_action_unittest.cc
index cb33404..2971849 100644
--- a/payload_consumer/filesystem_verifier_action_unittest.cc
+++ b/payload_consumer/filesystem_verifier_action_unittest.cc
@@ -27,6 +27,7 @@
#include <brillo/secure_blob.h>
#include <gtest/gtest.h>
+#include "update_engine/common/dynamic_partition_control_stub.h"
#include "update_engine/common/hash_calculator.h"
#include "update_engine/common/test_utils.h"
#include "update_engine/common/utils.h"
@@ -51,6 +52,7 @@
brillo::FakeMessageLoop loop_{nullptr};
ActionProcessor processor_;
+ DynamicPartitionControlStub dynamic_control_stub_;
};
class FilesystemVerifierActionTestDelegate : public ActionProcessorDelegate {
@@ -188,7 +190,8 @@
void FilesystemVerifierActionTest::BuildActions(
const InstallPlan& install_plan) {
auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
- auto verifier_action = std::make_unique<FilesystemVerifierAction>();
+ auto verifier_action =
+ std::make_unique<FilesystemVerifierAction>(&dynamic_control_stub_);
auto collector_action =
std::make_unique<ObjectCollectorAction<InstallPlan>>();
@@ -217,7 +220,8 @@
};
TEST_F(FilesystemVerifierActionTest, MissingInputObjectTest) {
- auto copier_action = std::make_unique<FilesystemVerifierAction>();
+ auto copier_action =
+ std::make_unique<FilesystemVerifierAction>(&dynamic_control_stub_);
auto collector_action =
std::make_unique<ObjectCollectorAction<InstallPlan>>();
diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc
index 4638fbe..c7ef7b2 100644
--- a/payload_consumer/install_plan.cc
+++ b/payload_consumer/install_plan.cc
@@ -87,12 +87,19 @@
base::StringPrintf(", system_version: %s", system_version.c_str());
}
+ string url_str = download_url;
+ if (base::StartsWith(
+ url_str, "fd://", base::CompareCase::INSENSITIVE_ASCII)) {
+ int fd = std::stoi(url_str.substr(strlen("fd://")));
+ url_str = utils::GetFilePath(fd);
+ }
+
LOG(INFO) << "InstallPlan: " << (is_resume ? "resume" : "new_update")
<< version_str
<< ", source_slot: " << BootControlInterface::SlotName(source_slot)
<< ", target_slot: " << BootControlInterface::SlotName(target_slot)
- << ", initial url: " << download_url << payloads_str
- << partitions_str << ", hash_checks_mandatory: "
+ << ", initial url: " << url_str << payloads_str << partitions_str
+ << ", hash_checks_mandatory: "
<< utils::ToString(hash_checks_mandatory)
<< ", powerwash_required: " << utils::ToString(powerwash_required)
<< ", switch_slot_on_reboot: "
@@ -105,7 +112,8 @@
bool InstallPlan::LoadPartitionsFromSlots(BootControlInterface* boot_control) {
bool result = true;
for (Partition& partition : partitions) {
- if (source_slot != BootControlInterface::kInvalidSlot) {
+ if (source_slot != BootControlInterface::kInvalidSlot &&
+ partition.source_size > 0) {
result = boot_control->GetPartitionDevice(
partition.name, source_slot, &partition.source_path) &&
result;
diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h
index 7a95ab4..f04c650 100644
--- a/payload_consumer/install_plan.h
+++ b/payload_consumer/install_plan.h
@@ -158,6 +158,10 @@
// If not blank, a base-64 encoded representation of the PEM-encoded
// public key in the response.
std::string public_key_rsa;
+
+ // The name of dynamic partitions not included in the payload. Only used
+ // for partial updates.
+ std::vector<std::string> untouched_dynamic_partitions;
};
class InstallPlanAction;
@@ -195,9 +199,10 @@
typedef ActionTraits<InstallPlanAction>::InputObjectType InputObjectType;
typedef ActionTraits<InstallPlanAction>::OutputObjectType OutputObjectType;
- private:
+ protected:
InstallPlan install_plan_;
+ private:
DISALLOW_COPY_AND_ASSIGN(InstallPlanAction);
};
diff --git a/payload_consumer/partition_update_generator_android.cc b/payload_consumer/partition_update_generator_android.cc
new file mode 100644
index 0000000..25771e1
--- /dev/null
+++ b/payload_consumer/partition_update_generator_android.cc
@@ -0,0 +1,178 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/partition_update_generator_android.h"
+
+#include <filesystem>
+#include <memory>
+#include <utility>
+
+#include <android-base/properties.h>
+#include <android-base/strings.h>
+#include <base/logging.h>
+#include <base/strings/string_split.h>
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+PartitionUpdateGeneratorAndroid::PartitionUpdateGeneratorAndroid(
+ BootControlInterface* boot_control, size_t block_size)
+ : boot_control_(boot_control), block_size_(block_size) {}
+
+bool PartitionUpdateGeneratorAndroid::
+ GenerateOperationsForPartitionsNotInPayload(
+ BootControlInterface::Slot source_slot,
+ BootControlInterface::Slot target_slot,
+ const std::set<std::string>& partitions_in_payload,
+ std::vector<PartitionUpdate>* update_list) {
+ auto ab_partitions = GetAbPartitionsOnDevice();
+ if (ab_partitions.empty()) {
+ LOG(ERROR) << "Failed to load static a/b partitions";
+ return false;
+ }
+
+ std::vector<PartitionUpdate> partition_updates;
+ for (const auto& partition_name : ab_partitions) {
+ if (partitions_in_payload.find(partition_name) !=
+ partitions_in_payload.end()) {
+ LOG(INFO) << partition_name << " has included in payload";
+ continue;
+ }
+ bool is_source_dynamic = false;
+ std::string source_device;
+
+ TEST_AND_RETURN_FALSE(
+ boot_control_->GetPartitionDevice(partition_name,
+ source_slot,
+ true, /* not_in_payload */
+ &source_device,
+ &is_source_dynamic));
+ bool is_target_dynamic = false;
+ std::string target_device;
+ TEST_AND_RETURN_FALSE(boot_control_->GetPartitionDevice(
+ partition_name, target_slot, true, &target_device, &is_target_dynamic));
+
+ if (is_source_dynamic || is_target_dynamic) {
+ if (is_source_dynamic != is_target_dynamic) {
+ LOG(ERROR) << "Partition " << partition_name << " is expected to be a"
+ << " static partition. source slot is "
+ << (is_source_dynamic ? "" : "not")
+ << " dynamic, and target slot " << target_slot << " is "
+ << (is_target_dynamic ? "" : "not") << " dynamic.";
+ return false;
+ } else {
+ continue;
+ }
+ }
+
+ auto source_size = utils::FileSize(source_device);
+ auto target_size = utils::FileSize(target_device);
+ if (source_size == -1 || target_size == -1 || source_size != target_size ||
+ source_size % block_size_ != 0) {
+ LOG(ERROR) << "Invalid partition size. source size " << source_size
+ << ", target size " << target_size;
+ return false;
+ }
+
+ auto partition_update = CreatePartitionUpdate(
+ partition_name, source_device, target_device, source_size);
+ if (!partition_update.has_value()) {
+ LOG(ERROR) << "Failed to create partition update for " << partition_name;
+ return false;
+ }
+ partition_updates.push_back(partition_update.value());
+ }
+ *update_list = std::move(partition_updates);
+ return true;
+}
+
+std::vector<std::string>
+PartitionUpdateGeneratorAndroid::GetAbPartitionsOnDevice() const {
+ auto partition_list_str =
+ android::base::GetProperty("ro.product.ab_ota_partitions", "");
+ return base::SplitString(partition_list_str,
+ ",",
+ base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
+}
+
+std::optional<PartitionUpdate>
+PartitionUpdateGeneratorAndroid::CreatePartitionUpdate(
+ const std::string& partition_name,
+ const std::string& source_device,
+ const std::string& target_device,
+ int64_t partition_size) {
+ PartitionUpdate partition_update;
+ partition_update.set_partition_name(partition_name);
+ auto old_partition_info = partition_update.mutable_old_partition_info();
+ old_partition_info->set_size(partition_size);
+
+ auto raw_hash = CalculateHashForPartition(source_device, partition_size);
+ if (!raw_hash.has_value()) {
+ LOG(ERROR) << "Failed to calculate hash for partition " << source_device
+ << " size: " << partition_size;
+ return {};
+ }
+ old_partition_info->set_hash(raw_hash->data(), raw_hash->size());
+ auto new_partition_info = partition_update.mutable_new_partition_info();
+ new_partition_info->set_size(partition_size);
+ new_partition_info->set_hash(raw_hash->data(), raw_hash->size());
+
+ auto copy_operation = partition_update.add_operations();
+ copy_operation->set_type(InstallOperation::SOURCE_COPY);
+ Extent copy_extent;
+ copy_extent.set_start_block(0);
+ copy_extent.set_num_blocks(partition_size / block_size_);
+
+ *copy_operation->add_src_extents() = copy_extent;
+ *copy_operation->add_dst_extents() = copy_extent;
+
+ return partition_update;
+}
+
+std::optional<brillo::Blob>
+PartitionUpdateGeneratorAndroid::CalculateHashForPartition(
+ const std::string& block_device, int64_t partition_size) {
+ // TODO(xunchang) compute the hash with ecc partitions first, the hashing
+ // behavior should match the one in SOURCE_COPY. Also, we don't have the
+ // correct hash for source partition.
+ // An alternative way is to verify the written bytes match the read bytes
+ // during filesystem verification. This could probably save us a read of
+ // partitions here.
+ brillo::Blob raw_hash;
+ if (HashCalculator::RawHashOfFile(block_device, partition_size, &raw_hash) !=
+ partition_size) {
+ LOG(ERROR) << "Failed to calculate hash for " << block_device;
+ return std::nullopt;
+ }
+
+ return raw_hash;
+}
+
+namespace partition_update_generator {
+std::unique_ptr<PartitionUpdateGeneratorInterface> Create(
+ BootControlInterface* boot_control, size_t block_size) {
+ CHECK(boot_control);
+
+ return std::unique_ptr<PartitionUpdateGeneratorInterface>(
+ new PartitionUpdateGeneratorAndroid(boot_control, block_size));
+}
+} // namespace partition_update_generator
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/partition_update_generator_android.h b/payload_consumer/partition_update_generator_android.h
new file mode 100644
index 0000000..0330c99
--- /dev/null
+++ b/payload_consumer/partition_update_generator_android.h
@@ -0,0 +1,68 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_ANDROID_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_ANDROID_H_
+
+#include <optional>
+#include <set>
+#include <string>
+#include <vector>
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest_prod.h> // for FRIEND_TEST
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/payload_consumer/partition_update_generator_interface.h"
+
+namespace chromeos_update_engine {
+
+class PartitionUpdateGeneratorAndroid
+ : public PartitionUpdateGeneratorInterface {
+ public:
+ PartitionUpdateGeneratorAndroid(BootControlInterface* boot_control,
+ size_t block_size);
+
+ bool GenerateOperationsForPartitionsNotInPayload(
+ BootControlInterface::Slot source_slot,
+ BootControlInterface::Slot target_slot,
+ const std::set<std::string>& partitions_in_payload,
+ std::vector<PartitionUpdate>* update_list) override;
+ virtual std::vector<std::string> GetAbPartitionsOnDevice() const;
+
+ private:
+ friend class PartitionUpdateGeneratorAndroidTest;
+ FRIEND_TEST(PartitionUpdateGeneratorAndroidTest, GetStaticPartitions);
+ FRIEND_TEST(PartitionUpdateGeneratorAndroidTest, CreatePartitionUpdate);
+
+ // Creates a PartitionUpdate object for a given partition to update from
+ // source to target. Returns std::nullopt on failure.
+ std::optional<PartitionUpdate> CreatePartitionUpdate(
+ const std::string& partition_name,
+ const std::string& source_device,
+ const std::string& target_device,
+ int64_t partition_size);
+
+ std::optional<brillo::Blob> CalculateHashForPartition(
+ const std::string& block_device, int64_t partition_size);
+
+ BootControlInterface* boot_control_;
+ size_t block_size_;
+};
+
+} // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/partition_update_generator_android_unittest.cc b/payload_consumer/partition_update_generator_android_unittest.cc
new file mode 100644
index 0000000..86d025e
--- /dev/null
+++ b/payload_consumer/partition_update_generator_android_unittest.cc
@@ -0,0 +1,159 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/partition_update_generator_android.h"
+
+#include <map>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include <android-base/strings.h>
+#include <brillo/secure_blob.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/fake_boot_control.h"
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+class FakePartitionUpdateGenerator : public PartitionUpdateGeneratorAndroid {
+ public:
+ std::vector<std::string> GetAbPartitionsOnDevice() const {
+ return ab_partitions_;
+ }
+ using PartitionUpdateGeneratorAndroid::PartitionUpdateGeneratorAndroid;
+ std::vector<std::string> ab_partitions_;
+};
+
+class PartitionUpdateGeneratorAndroidTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ ASSERT_TRUE(device_dir_.CreateUniqueTempDir());
+ boot_control_ = std::make_unique<FakeBootControl>();
+ ASSERT_TRUE(boot_control_);
+ boot_control_->SetNumSlots(2);
+ generator_ = std::make_unique<FakePartitionUpdateGenerator>(
+ boot_control_.get(), 4096);
+ ASSERT_TRUE(generator_);
+ }
+
+ std::unique_ptr<FakePartitionUpdateGenerator> generator_;
+ std::unique_ptr<FakeBootControl> boot_control_;
+
+ base::ScopedTempDir device_dir_;
+ std::map<std::string, std::string> device_map_;
+
+ void SetUpBlockDevice(const std::map<std::string, std::string>& contents) {
+ std::set<std::string> partition_base_names;
+ for (const auto& [name, content] : contents) {
+ auto path = device_dir_.GetPath().value() + "/" + name;
+ ASSERT_TRUE(
+ utils::WriteFile(path.c_str(), content.data(), content.size()));
+
+ if (android::base::EndsWith(name, "_a")) {
+ auto prefix = name.substr(0, name.size() - 2);
+ boot_control_->SetPartitionDevice(prefix, 0, path);
+ partition_base_names.emplace(prefix);
+ } else if (android::base::EndsWith(name, "_b")) {
+ auto prefix = name.substr(0, name.size() - 2);
+ boot_control_->SetPartitionDevice(prefix, 1, path);
+ partition_base_names.emplace(prefix);
+ }
+ device_map_[name] = std::move(path);
+ }
+ generator_->ab_partitions_ = {partition_base_names.begin(),
+ partition_base_names.end()};
+ }
+
+ void CheckPartitionUpdate(const std::string& name,
+ const std::string& content,
+ const PartitionUpdate& partition_update) {
+ ASSERT_EQ(name, partition_update.partition_name());
+
+ brillo::Blob out_hash;
+ ASSERT_TRUE(HashCalculator::RawHashOfBytes(
+ content.data(), content.size(), &out_hash));
+ ASSERT_EQ(std::string(out_hash.begin(), out_hash.end()),
+ partition_update.old_partition_info().hash());
+ ASSERT_EQ(std::string(out_hash.begin(), out_hash.end()),
+ partition_update.new_partition_info().hash());
+
+ ASSERT_EQ(1, partition_update.operations_size());
+ const auto& operation = partition_update.operations(0);
+ ASSERT_EQ(InstallOperation::SOURCE_COPY, operation.type());
+
+ ASSERT_EQ(1, operation.src_extents_size());
+ ASSERT_EQ(0u, operation.src_extents(0).start_block());
+ ASSERT_EQ(content.size() / 4096, operation.src_extents(0).num_blocks());
+
+ ASSERT_EQ(1, operation.dst_extents_size());
+ ASSERT_EQ(0u, operation.dst_extents(0).start_block());
+ ASSERT_EQ(content.size() / 4096, operation.dst_extents(0).num_blocks());
+ }
+};
+
+TEST_F(PartitionUpdateGeneratorAndroidTest, CreatePartitionUpdate) {
+ auto system_contents = std::string(4096 * 2, '1');
+ auto boot_contents = std::string(4096 * 5, 'b');
+ std::map<std::string, std::string> contents = {
+ {"system_a", system_contents},
+ {"system_b", std::string(4096 * 2, 0)},
+ {"boot_a", boot_contents},
+ {"boot_b", std::string(4096 * 5, 0)},
+ };
+ SetUpBlockDevice(contents);
+
+ auto system_partition_update = generator_->CreatePartitionUpdate(
+ "system", device_map_["system_a"], device_map_["system_b"], 4096 * 2);
+ ASSERT_TRUE(system_partition_update.has_value());
+ CheckPartitionUpdate(
+ "system", system_contents, system_partition_update.value());
+
+ auto boot_partition_update = generator_->CreatePartitionUpdate(
+ "boot", device_map_["boot_a"], device_map_["boot_b"], 4096 * 5);
+ ASSERT_TRUE(boot_partition_update.has_value());
+ CheckPartitionUpdate("boot", boot_contents, boot_partition_update.value());
+}
+
+TEST_F(PartitionUpdateGeneratorAndroidTest, GenerateOperations) {
+ auto system_contents = std::string(4096 * 10, '2');
+ auto boot_contents = std::string(4096 * 5, 'b');
+ std::map<std::string, std::string> contents = {
+ {"system_a", system_contents},
+ {"system_b", std::string(4096 * 10, 0)},
+ {"boot_a", boot_contents},
+ {"boot_b", std::string(4096 * 5, 0)},
+ {"vendor_a", ""},
+ {"vendor_b", ""},
+ {"persist", ""},
+ };
+ SetUpBlockDevice(contents);
+
+ std::vector<PartitionUpdate> update_list;
+ ASSERT_TRUE(generator_->GenerateOperationsForPartitionsNotInPayload(
+ 0, 1, std::set<std::string>{"vendor"}, &update_list));
+
+ ASSERT_EQ(2u, update_list.size());
+ CheckPartitionUpdate("boot", boot_contents, update_list[0]);
+ CheckPartitionUpdate("system", system_contents, update_list[1]);
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/partition_update_generator_interface.h b/payload_consumer/partition_update_generator_interface.h
new file mode 100644
index 0000000..3fa3dfb
--- /dev/null
+++ b/payload_consumer/partition_update_generator_interface.h
@@ -0,0 +1,55 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_INTERFACE_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_INTERFACE_H_
+
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "update_engine/common/boot_control_interface.h"
+
+namespace chromeos_update_engine {
+class PartitionUpdate;
+
+// This class parses the partitions that are not included in the payload of a
+// partial A/B update. And it generates additional operations for these
+// partitions to make the update complete.
+class PartitionUpdateGeneratorInterface {
+ public:
+ virtual ~PartitionUpdateGeneratorInterface() = default;
+
+ // Adds PartitionUpdate for partitions not included in the payload. For static
+ // partitions, it generates SOURCE_COPY operations to copy the bytes from the
+ // source slot to target slot. For dynamic partitions, it only calculates the
+ // partition hash for the filesystem verification later.
+ virtual bool GenerateOperationsForPartitionsNotInPayload(
+ BootControlInterface::Slot source_slot,
+ BootControlInterface::Slot target_slot,
+ const std::set<std::string>& partitions_in_payload,
+ std::vector<PartitionUpdate>* update_list) = 0;
+};
+
+namespace partition_update_generator {
+std::unique_ptr<PartitionUpdateGeneratorInterface> Create(
+ BootControlInterface* boot_control, size_t block_size);
+}
+
+} // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/partition_update_generator_stub.cc b/payload_consumer/partition_update_generator_stub.cc
new file mode 100644
index 0000000..cfbd5e1
--- /dev/null
+++ b/payload_consumer/partition_update_generator_stub.cc
@@ -0,0 +1,38 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/partition_update_generator_stub.h"
+
+#include <memory>
+
+namespace chromeos_update_engine {
+
+bool PartitionUpdateGeneratorStub::GenerateOperationsForPartitionsNotInPayload(
+ chromeos_update_engine::BootControlInterface::Slot source_slot,
+ chromeos_update_engine::BootControlInterface::Slot target_slot,
+ const std::set<std::string>& partitions_in_payload,
+ std::vector<PartitionUpdate>* update_list) {
+ return true;
+}
+
+namespace partition_update_generator {
+std::unique_ptr<PartitionUpdateGeneratorInterface> Create(
+ BootControlInterface* boot_control, size_t block_size) {
+ return std::make_unique<PartitionUpdateGeneratorStub>();
+}
+} // namespace partition_update_generator
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/partition_update_generator_stub.h b/payload_consumer/partition_update_generator_stub.h
new file mode 100644
index 0000000..282875e
--- /dev/null
+++ b/payload_consumer/partition_update_generator_stub.h
@@ -0,0 +1,40 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_STUB_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PARTITION_UPDATE_GENERATOR_STUB_H_
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/payload_consumer/partition_update_generator_interface.h"
+
+namespace chromeos_update_engine {
+class PartitionUpdateGeneratorStub : public PartitionUpdateGeneratorInterface {
+ public:
+ PartitionUpdateGeneratorStub() = default;
+ bool GenerateOperationsForPartitionsNotInPayload(
+ BootControlInterface::Slot source_slot,
+ BootControlInterface::Slot target_slot,
+ const std::set<std::string>& partitions_in_payload,
+ std::vector<PartitionUpdate>* update_list) override;
+};
+
+} // namespace chromeos_update_engine
+
+#endif
diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc
index 299bcfc..663ab81 100644
--- a/payload_consumer/payload_constants.cc
+++ b/payload_consumer/payload_constants.cc
@@ -33,9 +33,11 @@
const uint32_t kBrotliBsdiffMinorPayloadVersion = 4;
const uint32_t kPuffdiffMinorPayloadVersion = 5;
const uint32_t kVerityMinorPayloadVersion = 6;
+const uint32_t kPartialUpdateMinorPayloadVersion = 7;
const uint32_t kMinSupportedMinorPayloadVersion = kSourceMinorPayloadVersion;
-const uint32_t kMaxSupportedMinorPayloadVersion = kVerityMinorPayloadVersion;
+const uint32_t kMaxSupportedMinorPayloadVersion =
+ kPartialUpdateMinorPayloadVersion;
const uint64_t kMaxPayloadHeaderSize = 24;
@@ -44,7 +46,7 @@
const char kDeltaMagic[4] = {'C', 'r', 'A', 'U'};
-const char* InstallOperationTypeName(InstallOperation_Type op_type) {
+const char* InstallOperationTypeName(InstallOperation::Type op_type) {
switch (op_type) {
case InstallOperation::REPLACE:
return "REPLACE";
diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h
index 888fa2a..03647ee 100644
--- a/payload_consumer/payload_constants.h
+++ b/payload_consumer/payload_constants.h
@@ -56,6 +56,9 @@
// The minor version that allows Verity hash tree and FEC generation.
extern const uint32_t kVerityMinorPayloadVersion;
+// The minor version that allows partial update, e.g. kernel only update.
+extern const uint32_t kPartialUpdateMinorPayloadVersion;
+
// The minimum and maximum supported minor version.
extern const uint32_t kMinSupportedMinorPayloadVersion;
extern const uint32_t kMaxSupportedMinorPayloadVersion;
@@ -77,7 +80,7 @@
const uint64_t kSparseHole = std::numeric_limits<uint64_t>::max();
// Return the name of the operation type.
-const char* InstallOperationTypeName(InstallOperation_Type op_type);
+const char* InstallOperationTypeName(InstallOperation::Type op_type);
} // namespace chromeos_update_engine
diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc
index b83001a..2cb73eb 100644
--- a/payload_consumer/payload_metadata.cc
+++ b/payload_consumer/payload_metadata.cc
@@ -18,8 +18,10 @@
#include <endian.h>
+#include <base/strings/stringprintf.h>
#include <brillo/data_encoding.h>
+#include "update_engine/common/constants.h"
#include "update_engine/common/hash_calculator.h"
#include "update_engine/common/utils.h"
#include "update_engine/payload_consumer/payload_constants.h"
@@ -54,7 +56,18 @@
// Validate the magic string.
if (memcmp(payload.data(), kDeltaMagic, sizeof(kDeltaMagic)) != 0) {
- LOG(ERROR) << "Bad payload format -- invalid delta magic.";
+ LOG(ERROR) << "Bad payload format -- invalid delta magic: "
+ << base::StringPrintf("%02x%02x%02x%02x",
+ payload[0],
+ payload[1],
+ payload[2],
+ payload[3])
+ << " Expected: "
+ << base::StringPrintf("%02x%02x%02x%02x",
+ kDeltaMagic[0],
+ kDeltaMagic[1],
+ kDeltaMagic[2],
+ kDeltaMagic[3]);
*error = ErrorCode::kDownloadInvalidMetadataMagicString;
return MetadataParseResult::kError;
}
@@ -131,12 +144,16 @@
ErrorCode PayloadMetadata::ValidateMetadataSignature(
const brillo::Blob& payload,
- const std::string& metadata_signature,
- const std::string& pem_public_key) const {
+ const string& metadata_signature,
+ const PayloadVerifier& payload_verifier) const {
if (payload.size() < metadata_size_ + metadata_signature_size_)
return ErrorCode::kDownloadMetadataSignatureError;
- brillo::Blob metadata_signature_blob, metadata_signature_protobuf_blob;
+ // A single signature in raw bytes.
+ brillo::Blob metadata_signature_blob;
+ // The serialized Signatures protobuf message stored in major version >=2
+ // payload, it may contain multiple signatures.
+ string metadata_signature_protobuf;
if (!metadata_signature.empty()) {
// Convert base64-encoded signature to raw bytes.
if (!brillo::data_encoding::Base64Decode(metadata_signature,
@@ -146,49 +163,43 @@
return ErrorCode::kDownloadMetadataSignatureError;
}
} else {
- metadata_signature_protobuf_blob.assign(
+ metadata_signature_protobuf.assign(
payload.begin() + metadata_size_,
payload.begin() + metadata_size_ + metadata_signature_size_);
}
- if (metadata_signature_blob.empty() &&
- metadata_signature_protobuf_blob.empty()) {
+ if (metadata_signature_blob.empty() && metadata_signature_protobuf.empty()) {
LOG(ERROR) << "Missing mandatory metadata signature in both Omaha "
<< "response and payload.";
return ErrorCode::kDownloadMetadataSignatureMissingError;
}
- brillo::Blob calculated_metadata_hash;
+ brillo::Blob metadata_hash;
if (!HashCalculator::RawHashOfBytes(
- payload.data(), metadata_size_, &calculated_metadata_hash)) {
+ payload.data(), metadata_size_, &metadata_hash)) {
LOG(ERROR) << "Unable to compute actual hash of manifest";
return ErrorCode::kDownloadMetadataSignatureVerificationError;
}
- PayloadVerifier::PadRSA2048SHA256Hash(&calculated_metadata_hash);
- if (calculated_metadata_hash.empty()) {
- LOG(ERROR) << "Computed actual hash of metadata is empty.";
+ if (metadata_hash.size() != kSHA256Size) {
+ LOG(ERROR) << "Computed actual hash of metadata has incorrect size: "
+ << metadata_hash.size();
return ErrorCode::kDownloadMetadataSignatureVerificationError;
}
if (!metadata_signature_blob.empty()) {
- brillo::Blob expected_metadata_hash;
- if (!PayloadVerifier::GetRawHashFromSignature(
- metadata_signature_blob, pem_public_key, &expected_metadata_hash)) {
- LOG(ERROR) << "Unable to compute expected hash from metadata signature";
- return ErrorCode::kDownloadMetadataSignatureError;
- }
- if (calculated_metadata_hash != expected_metadata_hash) {
- LOG(ERROR) << "Manifest hash verification failed. Expected hash = ";
- utils::HexDumpVector(expected_metadata_hash);
- LOG(ERROR) << "Calculated hash = ";
- utils::HexDumpVector(calculated_metadata_hash);
+ brillo::Blob decrypted_signature;
+ if (!payload_verifier.VerifyRawSignature(
+ metadata_signature_blob, metadata_hash, &decrypted_signature)) {
+ LOG(ERROR) << "Manifest hash verification failed. Decrypted hash = ";
+ utils::HexDumpVector(decrypted_signature);
+ LOG(ERROR) << "Calculated hash before padding = ";
+ utils::HexDumpVector(metadata_hash);
return ErrorCode::kDownloadMetadataSignatureMismatch;
}
} else {
- if (!PayloadVerifier::VerifySignature(metadata_signature_protobuf_blob,
- pem_public_key,
- calculated_metadata_hash)) {
+ if (!payload_verifier.VerifySignature(metadata_signature_protobuf,
+ metadata_hash)) {
LOG(ERROR) << "Manifest hash verification failed.";
return ErrorCode::kDownloadMetadataSignatureMismatch;
}
diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h
index be43c41..8b36f53 100644
--- a/payload_consumer/payload_metadata.h
+++ b/payload_consumer/payload_metadata.h
@@ -27,6 +27,7 @@
#include "update_engine/common/error_code.h"
#include "update_engine/common/platform_constants.h"
+#include "update_engine/payload_consumer/payload_verifier.h"
#include "update_engine/update_metadata.pb.h"
namespace chromeos_update_engine {
@@ -62,12 +63,13 @@
// |metadata_signature| (if present) or the metadata signature in payload
// itself (if present). Returns ErrorCode::kSuccess on match or a suitable
// error code otherwise. This method must be called before any part of the
- // metadata is parsed so that a man-in-the-middle attack on the SSL connection
+ // metadata is parsed so that an on-path attack on the SSL connection
// to the payload server doesn't exploit any vulnerability in the code that
// parses the protocol buffer.
- ErrorCode ValidateMetadataSignature(const brillo::Blob& payload,
- const std::string& metadata_signature,
- const std::string& pem_public_key) const;
+ ErrorCode ValidateMetadataSignature(
+ const brillo::Blob& payload,
+ const std::string& metadata_signature,
+ const PayloadVerifier& payload_verifier) const;
// Returns the major payload version. If the version was not yet parsed,
// returns zero.
diff --git a/payload_consumer/payload_verifier.cc b/payload_consumer/payload_verifier.cc
index 2f7c133..7fd2b8e 100644
--- a/payload_consumer/payload_verifier.cc
+++ b/payload_consumer/payload_verifier.cc
@@ -16,13 +16,16 @@
#include "update_engine/payload_consumer/payload_verifier.h"
+#include <utility>
#include <vector>
#include <base/logging.h>
#include <openssl/pem.h>
+#include "update_engine/common/constants.h"
#include "update_engine/common/hash_calculator.h"
#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/certificate_parser_interface.h"
#include "update_engine/update_metadata.pb.h"
using std::string;
@@ -31,61 +34,73 @@
namespace {
-// The following is a standard PKCS1-v1_5 padding for SHA256 signatures, as
-// defined in RFC3447. It is prepended to the actual signature (32 bytes) to
-// form a sequence of 256 bytes (2048 bits) that is amenable to RSA signing. The
-// padded hash will look as follows:
+// The ASN.1 DigestInfo prefix for encoding SHA256 digest. The complete 51-byte
+// DigestInfo consists of 19-byte SHA256_DIGEST_INFO_PREFIX and 32-byte SHA256
+// digest.
//
-// 0x00 0x01 0xff ... 0xff 0x00 ASN1HEADER SHA256HASH
-// |--------------205-----------||----19----||----32----|
-//
-// where ASN1HEADER is the ASN.1 description of the signed data. The complete 51
-// bytes of actual data (i.e. the ASN.1 header complete with the hash) are
-// packed as follows:
-//
-// SEQUENCE(2+49) {
+// SEQUENCE(2+49) {
// SEQUENCE(2+13) {
-// OBJECT(2+9) id-sha256
-// NULL(2+0)
+// OBJECT(2+9) id-sha256
+// NULL(2+0)
// }
// OCTET STRING(2+32) <actual signature bytes...>
-// }
-// clang-format off
-const uint8_t kRSA2048SHA256Padding[] = {
- // PKCS1-v1_5 padding
- 0x00, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0x00,
- // ASN.1 header
- 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03,
- 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20,
+// }
+const uint8_t kSHA256DigestInfoPrefix[] = {
+ 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01,
+ 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20,
};
-// clang-format on
} // namespace
-bool PayloadVerifier::VerifySignature(const brillo::Blob& signature_blob,
- const string& pem_public_key,
- const brillo::Blob& hash_data) {
+std::unique_ptr<PayloadVerifier> PayloadVerifier::CreateInstance(
+ const std::string& pem_public_key) {
+ std::unique_ptr<BIO, decltype(&BIO_free)> bp(
+ BIO_new_mem_buf(pem_public_key.data(), pem_public_key.size()), BIO_free);
+ if (!bp) {
+ LOG(ERROR) << "Failed to read " << pem_public_key << " into buffer.";
+ return nullptr;
+ }
+
+ auto pub_key = std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>(
+ PEM_read_bio_PUBKEY(bp.get(), nullptr, nullptr, nullptr), EVP_PKEY_free);
+ if (!pub_key) {
+ LOG(ERROR) << "Failed to parse the public key in: " << pem_public_key;
+ return nullptr;
+ }
+
+ std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>> keys;
+ keys.emplace_back(std::move(pub_key));
+ return std::unique_ptr<PayloadVerifier>(new PayloadVerifier(std::move(keys)));
+}
+
+std::unique_ptr<PayloadVerifier> PayloadVerifier::CreateInstanceFromZipPath(
+ const std::string& certificate_zip_path) {
+ auto parser = CreateCertificateParser();
+ if (!parser) {
+ LOG(ERROR) << "Failed to create certificate parser from "
+ << certificate_zip_path;
+ return nullptr;
+ }
+
+ std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>> public_keys;
+ if (!parser->ReadPublicKeysFromCertificates(certificate_zip_path,
+ &public_keys) ||
+ public_keys.empty()) {
+ LOG(ERROR) << "Failed to parse public keys in: " << certificate_zip_path;
+ return nullptr;
+ }
+
+ return std::unique_ptr<PayloadVerifier>(
+ new PayloadVerifier(std::move(public_keys)));
+}
+
+bool PayloadVerifier::VerifySignature(
+ const string& signature_proto, const brillo::Blob& sha256_hash_data) const {
+ TEST_AND_RETURN_FALSE(!public_keys_.empty());
+
Signatures signatures;
- LOG(INFO) << "signature blob size = " << signature_blob.size();
- TEST_AND_RETURN_FALSE(
- signatures.ParseFromArray(signature_blob.data(), signature_blob.size()));
+ LOG(INFO) << "signature blob size = " << signature_proto.size();
+ TEST_AND_RETURN_FALSE(signatures.ParseFromString(signature_proto));
if (!signatures.signatures_size()) {
LOG(ERROR) << "No signatures stored in the blob.";
@@ -95,41 +110,109 @@
std::vector<brillo::Blob> tested_hashes;
// Tries every signature in the signature blob.
for (int i = 0; i < signatures.signatures_size(); i++) {
- const Signatures_Signature& signature = signatures.signatures(i);
- brillo::Blob sig_data(signature.data().begin(), signature.data().end());
- brillo::Blob sig_hash_data;
- if (!GetRawHashFromSignature(sig_data, pem_public_key, &sig_hash_data))
- continue;
+ const Signatures::Signature& signature = signatures.signatures(i);
+ brillo::Blob sig_data;
+ if (signature.has_unpadded_signature_size()) {
+ TEST_AND_RETURN_FALSE(signature.unpadded_signature_size() <=
+ signature.data().size());
+ LOG(INFO) << "Truncating the signature to its unpadded size: "
+ << signature.unpadded_signature_size() << ".";
+ sig_data.assign(
+ signature.data().begin(),
+ signature.data().begin() + signature.unpadded_signature_size());
+ } else {
+ sig_data.assign(signature.data().begin(), signature.data().end());
+ }
- if (hash_data == sig_hash_data) {
+ brillo::Blob sig_hash_data;
+ if (VerifyRawSignature(sig_data, sha256_hash_data, &sig_hash_data)) {
LOG(INFO) << "Verified correct signature " << i + 1 << " out of "
<< signatures.signatures_size() << " signatures.";
return true;
}
- tested_hashes.push_back(sig_hash_data);
+ if (!sig_hash_data.empty()) {
+ tested_hashes.push_back(sig_hash_data);
+ }
}
LOG(ERROR) << "None of the " << signatures.signatures_size()
- << " signatures is correct. Expected:";
- utils::HexDumpVector(hash_data);
- LOG(ERROR) << "But found decrypted hashes:";
+ << " signatures is correct. Expected hash before padding:";
+ utils::HexDumpVector(sha256_hash_data);
+ LOG(ERROR) << "But found RSA decrypted hashes:";
for (const auto& sig_hash_data : tested_hashes) {
utils::HexDumpVector(sig_hash_data);
}
return false;
}
-bool PayloadVerifier::GetRawHashFromSignature(const brillo::Blob& sig_data,
- const string& pem_public_key,
- brillo::Blob* out_hash_data) {
+bool PayloadVerifier::VerifyRawSignature(
+ const brillo::Blob& sig_data,
+ const brillo::Blob& sha256_hash_data,
+ brillo::Blob* decrypted_sig_data) const {
+ TEST_AND_RETURN_FALSE(!public_keys_.empty());
+
+ for (const auto& public_key : public_keys_) {
+ int key_type = EVP_PKEY_id(public_key.get());
+ if (key_type == EVP_PKEY_RSA) {
+ brillo::Blob sig_hash_data;
+ if (!GetRawHashFromSignature(
+ sig_data, public_key.get(), &sig_hash_data)) {
+ LOG(WARNING)
+ << "Failed to get the raw hash with RSA key. Trying other keys.";
+ continue;
+ }
+
+ if (decrypted_sig_data != nullptr) {
+ *decrypted_sig_data = sig_hash_data;
+ }
+
+ brillo::Blob padded_hash_data = sha256_hash_data;
+ TEST_AND_RETURN_FALSE(
+ PadRSASHA256Hash(&padded_hash_data, sig_hash_data.size()));
+
+ if (padded_hash_data == sig_hash_data) {
+ return true;
+ }
+ }
+
+ if (key_type == EVP_PKEY_EC) {
+ // TODO(b/158580694): Switch back to get0 version and remove manual
+ // freeing of the object once the bug is resolved or gale has been moved
+ // to informational.
+ EC_KEY* ec_key = EVP_PKEY_get1_EC_KEY(public_key.get());
+ TEST_AND_RETURN_FALSE(ec_key != nullptr);
+ if (ECDSA_verify(0,
+ sha256_hash_data.data(),
+ sha256_hash_data.size(),
+ sig_data.data(),
+ sig_data.size(),
+ ec_key) == 1) {
+ EC_KEY_free(ec_key);
+ return true;
+ }
+ EC_KEY_free(ec_key);
+ }
+
+ LOG(ERROR) << "Unsupported key type " << key_type;
+ return false;
+ }
+ LOG(INFO) << "Failed to verify the signature with " << public_keys_.size()
+ << " keys.";
+ return false;
+}
+
+bool PayloadVerifier::GetRawHashFromSignature(
+ const brillo::Blob& sig_data,
+ const EVP_PKEY* public_key,
+ brillo::Blob* out_hash_data) const {
+ // TODO(b/158580694): Switch back to get0 version and remove manual freeing of
+ // the object once the bug is resolved or gale has been moved to
+ // informational.
+ //
// The code below executes the equivalent of:
//
- // openssl rsautl -verify -pubin -inkey <(echo |pem_public_key|)
+ // openssl rsautl -verify -pubin -inkey <(echo pem_public_key)
// -in |sig_data| -out |out_hash_data|
-
- BIO* bp = BIO_new_mem_buf(pem_public_key.data(), pem_public_key.size());
- char dummy_password[] = {' ', 0}; // Ensure no password is read from stdin.
- RSA* rsa = PEM_read_bio_RSA_PUBKEY(bp, nullptr, nullptr, dummy_password);
- BIO_free(bp);
+ RSA* rsa = EVP_PKEY_get1_RSA(const_cast<EVP_PKEY*>(public_key));
TEST_AND_RETURN_FALSE(rsa != nullptr);
unsigned int keysize = RSA_size(rsa);
@@ -151,13 +234,30 @@
return true;
}
-bool PayloadVerifier::PadRSA2048SHA256Hash(brillo::Blob* hash) {
- TEST_AND_RETURN_FALSE(hash->size() == 32);
- hash->insert(hash->begin(),
- reinterpret_cast<const char*>(kRSA2048SHA256Padding),
- reinterpret_cast<const char*>(kRSA2048SHA256Padding +
- sizeof(kRSA2048SHA256Padding)));
- TEST_AND_RETURN_FALSE(hash->size() == 256);
+bool PayloadVerifier::PadRSASHA256Hash(brillo::Blob* hash, size_t rsa_size) {
+ TEST_AND_RETURN_FALSE(hash->size() == kSHA256Size);
+ TEST_AND_RETURN_FALSE(rsa_size == 256 || rsa_size == 512);
+
+ // The following is a standard PKCS1-v1_5 padding for SHA256 signatures, as
+ // defined in RFC3447 section 9.2. It is prepended to the actual signature
+ // (32 bytes) to form a sequence of 256|512 bytes (2048|4096 bits) that is
+ // amenable to RSA signing. The padded hash will look as follows:
+ //
+ // 0x00 0x01 0xff ... 0xff 0x00 ASN1HEADER SHA256HASH
+ // |-----------205|461----------||----19----||----32----|
+ size_t padding_string_size =
+ rsa_size - hash->size() - sizeof(kSHA256DigestInfoPrefix) - 3;
+ brillo::Blob padded_result = brillo::CombineBlobs({
+ {0x00, 0x01},
+ brillo::Blob(padding_string_size, 0xff),
+ {0x00},
+ brillo::Blob(kSHA256DigestInfoPrefix,
+ kSHA256DigestInfoPrefix + sizeof(kSHA256DigestInfoPrefix)),
+ *hash,
+ });
+
+ *hash = std::move(padded_result);
+ TEST_AND_RETURN_FALSE(hash->size() == rsa_size);
return true;
}
diff --git a/payload_consumer/payload_verifier.h b/payload_consumer/payload_verifier.h
index ec23ef2..bc5231f 100644
--- a/payload_consumer/payload_verifier.h
+++ b/payload_consumer/payload_verifier.h
@@ -17,47 +17,72 @@
#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_PAYLOAD_VERIFIER_H_
#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PAYLOAD_VERIFIER_H_
+#include <memory>
#include <string>
+#include <utility>
+#include <vector>
-#include <base/macros.h>
#include <brillo/secure_blob.h>
+#include <openssl/evp.h>
#include "update_engine/update_metadata.pb.h"
-// This class encapsulates methods used for payload signature verification.
-// See payload_generator/payload_signer.h for payload signing.
+// This class holds the public keys and implements methods used for payload
+// signature verification. See payload_generator/payload_signer.h for payload
+// signing.
namespace chromeos_update_engine {
class PayloadVerifier {
public:
- // Interprets |signature_blob| as a protocol buffer containing the Signatures
- // message and decrypts each signature data using the |pem_public_key|.
- // |pem_public_key| should be a PEM format RSA public key data.
- // Returns whether *any* of the decrypted hashes matches the |hash_data|.
- // In case of any error parsing the signatures or the public key, returns
- // false.
- static bool VerifySignature(const brillo::Blob& signature_blob,
- const std::string& pem_public_key,
- const brillo::Blob& hash_data);
+ // Pads a SHA256 hash so that it may be encrypted/signed with RSA2048 or
+ // RSA4096 using the PKCS#1 v1.5 scheme.
+ // hash should be a pointer to vector of exactly 256 bits. |rsa_size| must be
+ // one of 256 or 512 bytes. The vector will be modified in place and will
+ // result in having a length of 2048 or 4096 bits, depending on the rsa size.
+ // Returns true on success, false otherwise.
+ static bool PadRSASHA256Hash(brillo::Blob* hash, size_t rsa_size);
- // Decrypts |sig_data| with the given |pem_public_key| and populates
- // |out_hash_data| with the decoded raw hash. |pem_public_key| should be a PEM
- // format RSA public key data. Returns true if successful, false otherwise.
- static bool GetRawHashFromSignature(const brillo::Blob& sig_data,
- const std::string& pem_public_key,
- brillo::Blob* out_hash_data);
+ // Parses the input as a PEM encoded public string. And creates a
+ // PayloadVerifier with that public key for signature verification.
+ static std::unique_ptr<PayloadVerifier> CreateInstance(
+ const std::string& pem_public_key);
- // Pads a SHA256 hash so that it may be encrypted/signed with RSA2048
- // using the PKCS#1 v1.5 scheme.
- // hash should be a pointer to vector of exactly 256 bits. The vector
- // will be modified in place and will result in having a length of
- // 2048 bits. Returns true on success, false otherwise.
- static bool PadRSA2048SHA256Hash(brillo::Blob* hash);
+ // Extracts the public keys from the certificates contained in the input
+ // zip file. And creates a PayloadVerifier with these public keys.
+ static std::unique_ptr<PayloadVerifier> CreateInstanceFromZipPath(
+ const std::string& certificate_zip_path);
+
+ // Interprets |signature_proto| as a protocol buffer containing the
+ // |Signatures| message and decrypts each signature data using the stored
+ // public key. Pads the 32 bytes |sha256_hash_data| to 256 or 512 bytes
+ // according to the PKCS#1 v1.5 standard; and returns whether *any* of the
+ // decrypted hashes matches the padded hash data. In case of any error parsing
+ // the signatures, returns false.
+ bool VerifySignature(const std::string& signature_proto,
+ const brillo::Blob& sha256_hash_data) const;
+
+ // Verifies if |sig_data| is a raw signature of the hash |sha256_hash_data|.
+ // If PayloadVerifier is using RSA as the public key, further puts the
+ // decrypted data of |sig_data| into |decrypted_sig_data|.
+ bool VerifyRawSignature(const brillo::Blob& sig_data,
+ const brillo::Blob& sha256_hash_data,
+ brillo::Blob* decrypted_sig_data) const;
private:
- // This should never be constructed
- DISALLOW_IMPLICIT_CONSTRUCTORS(PayloadVerifier);
+ explicit PayloadVerifier(
+ std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>>&&
+ public_keys)
+ : public_keys_(std::move(public_keys)) {}
+
+ // Decrypts |sig_data| with the given |public_key| and populates
+ // |out_hash_data| with the decoded raw hash. Returns true if successful,
+ // false otherwise.
+ bool GetRawHashFromSignature(const brillo::Blob& sig_data,
+ const EVP_PKEY* public_key,
+ brillo::Blob* out_hash_data) const;
+
+ std::vector<std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>> public_keys_;
};
} // namespace chromeos_update_engine
diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc
index a0b67ea..e8fa81b 100644
--- a/payload_consumer/postinstall_runner_action.cc
+++ b/payload_consumer/postinstall_runner_action.cc
@@ -50,6 +50,7 @@
namespace chromeos_update_engine {
+using brillo::MessageLoop;
using std::string;
using std::vector;
@@ -75,10 +76,17 @@
partition_weight_.resize(install_plan_.partitions.size());
total_weight_ = 0;
for (size_t i = 0; i < install_plan_.partitions.size(); ++i) {
+ auto& partition = install_plan_.partitions[i];
+ if (!install_plan_.run_post_install && partition.postinstall_optional) {
+ partition.run_postinstall = false;
+ LOG(INFO) << "Skipping optional post-install for partition "
+ << partition.name << " according to install plan.";
+ }
+
// TODO(deymo): This code sets the weight to all the postinstall commands,
// but we could remember how long they took in the past and use those
// values.
- partition_weight_[i] = install_plan_.partitions[i].run_postinstall;
+ partition_weight_[i] = partition.run_postinstall;
total_weight_ += partition_weight_[i];
}
accumulated_weight_ = 0;
@@ -88,11 +96,6 @@
}
void PostinstallRunnerAction::PerformPartitionPostinstall() {
- if (!install_plan_.run_post_install) {
- LOG(INFO) << "Skipping post-install according to install plan.";
- return CompletePostinstall(ErrorCode::kSuccess);
- }
-
if (install_plan_.download_url.empty()) {
LOG(INFO) << "Skipping post-install during rollback";
return CompletePostinstall(ErrorCode::kSuccess);
@@ -290,6 +293,7 @@
progress_fd_ = -1;
progress_controller_.reset();
+
progress_buffer_.clear();
}
@@ -336,8 +340,13 @@
// steps succeeded.
if (error_code == ErrorCode::kSuccess) {
if (install_plan_.switch_slot_on_reboot) {
- if (!boot_control_->SetActiveBootSlot(install_plan_.target_slot)) {
+ if (!boot_control_->GetDynamicPartitionControl()->FinishUpdate(
+ install_plan_.powerwash_required) ||
+ !boot_control_->SetActiveBootSlot(install_plan_.target_slot)) {
error_code = ErrorCode::kPostinstallRunnerError;
+ } else {
+ // Schedules warm reset on next reboot, ignores the error.
+ hardware_->SetWarmReset(true);
}
} else {
error_code = ErrorCode::kUpdatedButNotActive;
diff --git a/payload_consumer/postinstall_runner_action.h b/payload_consumer/postinstall_runner_action.h
index 838b235..e404107 100644
--- a/payload_consumer/postinstall_runner_action.h
+++ b/payload_consumer/postinstall_runner_action.h
@@ -97,8 +97,6 @@
// ready. Called when the post-install script was run for all the partitions.
void CompletePostinstall(ErrorCode error_code);
- InstallPlan install_plan_;
-
// The path where the filesystem will be mounted during post-install.
std::string fs_mount_dir_;
@@ -141,6 +139,7 @@
// The parent progress file descriptor used to watch for progress reports from
// the postinstall program and the task watching for them.
int progress_fd_{-1};
+
std::unique_ptr<base::FileDescriptorWatcher::Controller> progress_controller_;
// A buffer of a partial read line from the progress file descriptor.
diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc
index 84f2c2c..cf5158b 100644
--- a/payload_consumer/postinstall_runner_action_unittest.cc
+++ b/payload_consumer/postinstall_runner_action_unittest.cc
@@ -103,6 +103,8 @@
bool is_rollback,
bool save_rollback_data);
+ void RunPostinstallActionWithInstallPlan(const InstallPlan& install_plan);
+
public:
void ResumeRunningAction() {
ASSERT_NE(nullptr, postinstall_action_);
@@ -180,9 +182,6 @@
bool powerwash_required,
bool is_rollback,
bool save_rollback_data) {
- ActionProcessor processor;
- processor_ = &processor;
- auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
InstallPlan::Partition part;
part.name = "part";
part.target_path = device_path;
@@ -194,6 +193,14 @@
install_plan.powerwash_required = powerwash_required;
install_plan.is_rollback = is_rollback;
install_plan.rollback_data_save_requested = save_rollback_data;
+ RunPostinstallActionWithInstallPlan(install_plan);
+}
+
+void PostinstallRunnerActionTest::RunPostinstallActionWithInstallPlan(
+ const chromeos_update_engine::InstallPlan& install_plan) {
+ ActionProcessor processor;
+ processor_ = &processor;
+ auto feeder_action = std::make_unique<ObjectFeederAction<InstallPlan>>();
feeder_action->set_obj(install_plan);
auto runner_action = std::make_unique<PostinstallRunnerAction>(
&fake_boot_control_, &fake_hardware_);
@@ -220,7 +227,7 @@
EXPECT_TRUE(processor_delegate_.processing_stopped_called_ ||
processor_delegate_.processing_done_called_);
if (processor_delegate_.processing_done_called_) {
- // Sanity check that the code was set when the processor finishes.
+ // Validation check that the code was set when the processor finishes.
EXPECT_TRUE(processor_delegate_.code_set_);
}
}
@@ -335,6 +342,27 @@
EXPECT_FALSE(fake_hardware_.GetIsRollbackPowerwashScheduled());
}
+TEST_F(PostinstallRunnerActionTest, RunAsRootSkipOptionalPostinstallTest) {
+ InstallPlan::Partition part;
+ part.name = "part";
+ part.target_path = "/dev/null";
+ part.run_postinstall = true;
+ part.postinstall_path = kPostinstallDefaultScript;
+ part.postinstall_optional = true;
+ InstallPlan install_plan;
+ install_plan.partitions = {part};
+ install_plan.download_url = "http://127.0.0.1:8080/update";
+
+ // Optional postinstalls will be skipped, and the postinstall action succeeds.
+ RunPostinstallActionWithInstallPlan(install_plan);
+ EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
+
+ part.postinstall_optional = false;
+ install_plan.partitions = {part};
+ RunPostinstallActionWithInstallPlan(install_plan);
+ EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
+}
+
// Check that the failures from the postinstall script cause the action to
// fail.
TEST_F(PostinstallRunnerActionTest, RunAsRootErrScriptTest) {
diff --git a/payload_consumer/verity_writer_android.cc b/payload_consumer/verity_writer_android.cc
index 06d1489..d5437b6 100644
--- a/payload_consumer/verity_writer_android.cc
+++ b/payload_consumer/verity_writer_android.cc
@@ -41,6 +41,9 @@
bool VerityWriterAndroid::Init(const InstallPlan::Partition& partition) {
partition_ = &partition;
+ if (partition_->hash_tree_size != 0 || partition_->fec_size != 0) {
+ utils::SetBlockDeviceReadOnly(partition_->target_path, false);
+ }
if (partition_->hash_tree_size != 0) {
auto hash_function =
HashTreeBuilder::HashFunction(partition_->hash_tree_algorithm);