Add maximum timestamp to the payload. am: 8d853bab09 am: d7edd85fdf am: d494488b30 am: 0b6878b7b6 -s ours am: aec8cf10d9 -s ours
am: e0e6604695 -s ours
Change-Id: Iafcf14b9c8f0f24106ba1925adce0c0e8f11f9ef
diff --git a/Android.mk b/Android.mk
index 4801a49..a3a7017 100644
--- a/Android.mk
+++ b/Android.mk
@@ -976,6 +976,7 @@
LOCAL_SRC_FILES += \
common_service_unittest.cc \
fake_system_state.cc \
+ image_properties_android_unittest.cc \
metrics_utils_unittest.cc \
omaha_request_action_unittest.cc \
omaha_request_params_unittest.cc \
@@ -1013,7 +1014,7 @@
# Update payload signing public key.
# ========================================================
-ifdef BRILLO
+ifeq ($(PRODUCT_IOT),true)
include $(CLEAR_VARS)
LOCAL_MODULE := brillo-update-payload-key
LOCAL_MODULE_CLASS := ETC
@@ -1022,7 +1023,7 @@
LOCAL_SRC_FILES := update_payload_key/brillo-update-payload-key.pub.pem
LOCAL_BUILT_MODULE_STEM := update_payload_key/brillo-update-payload-key.pub.pem
include $(BUILD_PREBUILT)
-endif # BRILLO
+endif # PRODUCT_IOT
# Brillo update payload generation script
# ========================================================
diff --git a/binder_bindings/android/os/IUpdateEngine.aidl b/binder_bindings/android/os/IUpdateEngine.aidl
index 67f828a..7e26752 100644
--- a/binder_bindings/android/os/IUpdateEngine.aidl
+++ b/binder_bindings/android/os/IUpdateEngine.aidl
@@ -28,6 +28,8 @@
/** @hide */
boolean bind(IUpdateEngineCallback callback);
/** @hide */
+ boolean unbind(IUpdateEngineCallback callback);
+ /** @hide */
void suspend();
/** @hide */
void resume();
diff --git a/binder_service_android.cc b/binder_service_android.cc
index 872f64c..e179c62 100644
--- a/binder_service_android.cc
+++ b/binder_service_android.cc
@@ -63,12 +63,15 @@
const android::sp<IUpdateEngineCallback>& callback, bool* return_value) {
callbacks_.emplace_back(callback);
+ const android::sp<IBinder>& callback_binder =
+ IUpdateEngineCallback::asBinder(callback);
auto binder_wrapper = android::BinderWrapper::Get();
binder_wrapper->RegisterForDeathNotifications(
- IUpdateEngineCallback::asBinder(callback),
- base::Bind(&BinderUpdateEngineAndroidService::UnbindCallback,
- base::Unretained(this),
- base::Unretained(callback.get())));
+ callback_binder,
+ base::Bind(
+ base::IgnoreResult(&BinderUpdateEngineAndroidService::UnbindCallback),
+ base::Unretained(this),
+ base::Unretained(callback_binder.get())));
// Send an status update on connection (except when no update sent so far),
// since the status update is oneway and we don't need to wait for the
@@ -80,6 +83,17 @@
return Status::ok();
}
+Status BinderUpdateEngineAndroidService::unbind(
+ const android::sp<IUpdateEngineCallback>& callback, bool* return_value) {
+ const android::sp<IBinder>& callback_binder =
+ IUpdateEngineCallback::asBinder(callback);
+ auto binder_wrapper = android::BinderWrapper::Get();
+ binder_wrapper->UnregisterForDeathNotifications(callback_binder);
+
+ *return_value = UnbindCallback(callback_binder.get());
+ return Status::ok();
+}
+
Status BinderUpdateEngineAndroidService::applyPayload(
const android::String16& url,
int64_t payload_offset,
@@ -128,19 +142,19 @@
return Status::ok();
}
-void BinderUpdateEngineAndroidService::UnbindCallback(
- IUpdateEngineCallback* callback) {
- auto it =
- std::find_if(callbacks_.begin(),
- callbacks_.end(),
- [&callback](const android::sp<IUpdateEngineCallback>& elem) {
- return elem.get() == callback;
- });
+bool BinderUpdateEngineAndroidService::UnbindCallback(const IBinder* callback) {
+ auto it = std::find_if(
+ callbacks_.begin(),
+ callbacks_.end(),
+ [&callback](const android::sp<IUpdateEngineCallback>& elem) {
+ return IUpdateEngineCallback::asBinder(elem).get() == callback;
+ });
if (it == callbacks_.end()) {
- LOG(ERROR) << "Got death notification for unknown callback.";
- return;
+ LOG(ERROR) << "Unable to unbind unknown callback.";
+ return false;
}
callbacks_.erase(it);
+ return true;
}
} // namespace chromeos_update_engine
diff --git a/binder_service_android.h b/binder_service_android.h
index 3293c0e..7d66fcc 100644
--- a/binder_service_android.h
+++ b/binder_service_android.h
@@ -61,6 +61,9 @@
android::binder::Status bind(
const android::sp<android::os::IUpdateEngineCallback>& callback,
bool* return_value) override;
+ android::binder::Status unbind(
+ const android::sp<android::os::IUpdateEngineCallback>& callback,
+ bool* return_value) override;
android::binder::Status suspend() override;
android::binder::Status resume() override;
android::binder::Status cancel() override;
@@ -68,8 +71,9 @@
private:
// Remove the passed |callback| from the list of registered callbacks. Called
- // whenever the callback object is destroyed.
- void UnbindCallback(android::os::IUpdateEngineCallback* callback);
+ // on unbind() or whenever the callback object is destroyed.
+ // Returns true on success.
+ bool UnbindCallback(const IBinder* callback);
// List of currently bound callbacks.
std::vector<android::sp<android::os::IUpdateEngineCallback>> callbacks_;
diff --git a/common/constants.cc b/common/constants.cc
index 88d0445..c0a6e27 100644
--- a/common/constants.cc
+++ b/common/constants.cc
@@ -78,6 +78,7 @@
const char kPrefsUpdateStateNextDataLength[] = "update-state-next-data-length";
const char kPrefsUpdateStateNextDataOffset[] = "update-state-next-data-offset";
const char kPrefsUpdateStateNextOperation[] = "update-state-next-operation";
+const char kPrefsUpdateStatePayloadIndex[] = "update-state-payload-index";
const char kPrefsUpdateStateSHA256Context[] = "update-state-sha-256-context";
const char kPrefsUpdateStateSignatureBlob[] = "update-state-signature-blob";
const char kPrefsUpdateStateSignedSHA256Context[] =
diff --git a/common/constants.h b/common/constants.h
index ab66921..776e726 100644
--- a/common/constants.h
+++ b/common/constants.h
@@ -79,6 +79,7 @@
extern const char kPrefsUpdateStateNextDataLength[];
extern const char kPrefsUpdateStateNextDataOffset[];
extern const char kPrefsUpdateStateNextOperation[];
+extern const char kPrefsUpdateStatePayloadIndex[];
extern const char kPrefsUpdateStateSHA256Context[];
extern const char kPrefsUpdateStateSignatureBlob[];
extern const char kPrefsUpdateStateSignedSHA256Context[];
diff --git a/common/hash_calculator.cc b/common/hash_calculator.cc
index de6e0f9..ebfdb6e 100644
--- a/common/hash_calculator.cc
+++ b/common/hash_calculator.cc
@@ -20,7 +20,6 @@
#include <base/logging.h>
#include <base/posix/eintr_wrapper.h>
-#include <brillo/data_encoding.h>
#include "update_engine/common/utils.h"
@@ -37,7 +36,7 @@
// Mostly just passes the data through to OpenSSL's SHA256_Update()
bool HashCalculator::Update(const void* data, size_t length) {
TEST_AND_RETURN_FALSE(valid_);
- TEST_AND_RETURN_FALSE(hash_.empty());
+ TEST_AND_RETURN_FALSE(raw_hash_.empty());
static_assert(sizeof(size_t) <= sizeof(unsigned long), // NOLINT(runtime/int)
"length param may be truncated in SHA256_Update");
TEST_AND_RETURN_FALSE(SHA256_Update(&ctx_, data, length) == 1);
@@ -73,16 +72,11 @@
}
// Call Finalize() when all data has been passed in. This mostly just
-// calls OpenSSL's SHA256_Final() and then base64 encodes the hash.
+// calls OpenSSL's SHA256_Final().
bool HashCalculator::Finalize() {
- TEST_AND_RETURN_FALSE(hash_.empty());
TEST_AND_RETURN_FALSE(raw_hash_.empty());
raw_hash_.resize(SHA256_DIGEST_LENGTH);
TEST_AND_RETURN_FALSE(SHA256_Final(raw_hash_.data(), &ctx_) == 1);
-
- // Convert raw_hash_ to base64 encoding and store it in hash_.
- hash_ = brillo::data_encoding::Base64Encode(raw_hash_.data(),
- raw_hash_.size());
return true;
}
@@ -115,21 +109,6 @@
return res;
}
-string HashCalculator::HashOfBytes(const void* data, size_t length) {
- HashCalculator calc;
- calc.Update(data, length);
- calc.Finalize();
- return calc.hash();
-}
-
-string HashCalculator::HashOfString(const string& str) {
- return HashOfBytes(str.data(), str.size());
-}
-
-string HashCalculator::HashOfData(const brillo::Blob& data) {
- return HashOfBytes(data.data(), data.size());
-}
-
string HashCalculator::GetContext() const {
return string(reinterpret_cast<const char*>(&ctx_), sizeof(ctx_));
}
diff --git a/common/hash_calculator.h b/common/hash_calculator.h
index f749585..06d2cfb 100644
--- a/common/hash_calculator.h
+++ b/common/hash_calculator.h
@@ -27,11 +27,11 @@
#include <base/macros.h>
#include <brillo/secure_blob.h>
-// Omaha uses base64 encoded SHA-256 as the hash. This class provides a simple
-// wrapper around OpenSSL providing such a formatted hash of data passed in.
+// This class provides a simple wrapper around OpenSSL providing a hash of data
+// passed in.
// The methods of this class must be called in a very specific order: First the
// ctor (of course), then 0 or more calls to Update(), then Finalize(), then 0
-// or more calls to hash().
+// or more calls to raw_hash().
namespace chromeos_update_engine {
@@ -50,17 +50,10 @@
off_t UpdateFile(const std::string& name, off_t length);
// Call Finalize() when all data has been passed in. This method tells
- // OpenSSl that no more data will come in and base64 encodes the resulting
- // hash.
+ // OpenSSL that no more data will come in.
// Returns true on success.
bool Finalize();
- // Gets the hash. Finalize() must have been called.
- const std::string& hash() const {
- DCHECK(!hash_.empty()) << "Call Finalize() first";
- return hash_;
- }
-
const brillo::Blob& raw_hash() const {
DCHECK(!raw_hash_.empty()) << "Call Finalize() first";
return raw_hash_;
@@ -83,15 +76,9 @@
static off_t RawHashOfFile(const std::string& name, off_t length,
brillo::Blob* out_hash);
- // Used by tests
- static std::string HashOfBytes(const void* data, size_t length);
- static std::string HashOfString(const std::string& str);
- static std::string HashOfData(const brillo::Blob& data);
-
private:
- // If non-empty, the final base64 encoded hash and the raw hash. Will only be
- // set to non-empty when Finalize is called.
- std::string hash_;
+ // If non-empty, the final raw hash. Will only be set to non-empty when
+ // Finalize is called.
brillo::Blob raw_hash_;
// Init success
diff --git a/common/hash_calculator_unittest.cc b/common/hash_calculator_unittest.cc
index 436e6a7..233237b 100644
--- a/common/hash_calculator_unittest.cc
+++ b/common/hash_calculator_unittest.cc
@@ -22,6 +22,7 @@
#include <string>
#include <vector>
+#include <brillo/data_encoding.h>
#include <brillo/secure_blob.h>
#include <gtest/gtest.h>
@@ -33,9 +34,8 @@
namespace chromeos_update_engine {
// Generated by running this on a linux shell:
-// $ echo -n hi | openssl dgst -sha256 -binary | openssl base64
-static const char kExpectedHash[] =
- "j0NDRmSPa5bfid2pAcUXaxCm2Dlh3TwayItZstwyeqQ=";
+// $ echo -n hi | openssl dgst -sha256 -binary |
+// hexdump -v -e '" " 12/1 "0x%02x, " "\n"'
static const uint8_t kExpectedRawHash[] = {
0x8f, 0x43, 0x43, 0x46, 0x64, 0x8f, 0x6b, 0x96,
0xdf, 0x89, 0xdd, 0xa9, 0x01, 0xc5, 0x17, 0x6b,
@@ -52,7 +52,6 @@
HashCalculator calc;
calc.Update("hi", 2);
calc.Finalize();
- EXPECT_EQ(kExpectedHash, calc.hash());
brillo::Blob raw_hash(std::begin(kExpectedRawHash),
std::end(kExpectedRawHash));
EXPECT_TRUE(raw_hash == calc.raw_hash());
@@ -63,7 +62,6 @@
calc.Update("h", 1);
calc.Update("i", 1);
calc.Finalize();
- EXPECT_EQ(kExpectedHash, calc.hash());
brillo::Blob raw_hash(std::begin(kExpectedRawHash),
std::end(kExpectedRawHash));
EXPECT_TRUE(raw_hash == calc.raw_hash());
@@ -78,7 +76,6 @@
calc_next.SetContext(calc_context);
calc_next.Update("i", 1);
calc_next.Finalize();
- EXPECT_EQ(kExpectedHash, calc_next.hash());
brillo::Blob raw_hash(std::begin(kExpectedRawHash),
std::end(kExpectedRawHash));
EXPECT_TRUE(raw_hash == calc_next.raw_hash());
@@ -106,7 +103,8 @@
// echo -n $C
// let C=C+1
// done | openssl dgst -sha256 -binary | openssl base64
- EXPECT_EQ("NZf8k6SPBkYMvhaX8YgzuMgbkLP1XZ+neM8K5wcSsf8=", calc.hash());
+ EXPECT_EQ("NZf8k6SPBkYMvhaX8YgzuMgbkLP1XZ+neM8K5wcSsf8=",
+ brillo::data_encoding::Base64Encode(calc.raw_hash()));
}
TEST_F(HashCalculatorTest, UpdateFileSimpleTest) {
@@ -121,7 +119,6 @@
HashCalculator calc;
EXPECT_EQ(2, calc.UpdateFile(data_path, kLengths[i]));
EXPECT_TRUE(calc.Finalize());
- EXPECT_EQ(kExpectedHash, calc.hash());
brillo::Blob raw_hash(std::begin(kExpectedRawHash),
std::end(kExpectedRawHash));
EXPECT_TRUE(raw_hash == calc.raw_hash());
@@ -132,7 +129,8 @@
EXPECT_EQ(1, calc.UpdateFile(data_path, 1));
EXPECT_TRUE(calc.Finalize());
// echo -n h | openssl dgst -sha256 -binary | openssl base64
- EXPECT_EQ("qqlAJmTxpB9A67xSyZk+tmrrNmYClY/fqig7ceZNsSM=", calc.hash());
+ EXPECT_EQ("qqlAJmTxpB9A67xSyZk+tmrrNmYClY/fqig7ceZNsSM=",
+ brillo::data_encoding::Base64Encode(calc.raw_hash()));
}
TEST_F(HashCalculatorTest, RawHashOfFileSimpleTest) {
diff --git a/common/utils.cc b/common/utils.cc
index ea748c1..f528660 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -944,8 +944,16 @@
return str;
}
-string CalculateP2PFileId(const string& payload_hash, size_t payload_size) {
- string encoded_hash = brillo::data_encoding::Base64Encode(payload_hash);
+// The P2P file id should be the same for devices running new version and old
+// version so that they can share it with each other. The hash in the response
+// was base64 encoded, but now that we switched to use "hash_sha256" field which
+// is hex encoded, we have to convert them back to base64 for P2P. However, the
+// base64 encoded hash was base64 encoded here again historically for some
+// reason, so we keep the same behavior here.
+string CalculateP2PFileId(const brillo::Blob& payload_hash,
+ size_t payload_size) {
+ string encoded_hash = brillo::data_encoding::Base64Encode(
+ brillo::data_encoding::Base64Encode(payload_hash));
return base::StringPrintf("cros_update_size_%" PRIuS "_hash_%s",
payload_size,
encoded_hash.c_str());
diff --git a/common/utils.h b/common/utils.h
index 8cccc24..eaf2640 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -53,7 +53,7 @@
std::string StringVectorToString(const std::vector<std::string> &vec_str);
// Calculates the p2p file id from payload hash and size
-std::string CalculateP2PFileId(const std::string& payload_hash,
+std::string CalculateP2PFileId(const brillo::Blob& payload_hash,
size_t payload_size);
// Parse the firmware version from one line of output from the
diff --git a/image_properties.h b/image_properties.h
index ba6ce44..4f94eeb 100644
--- a/image_properties.h
+++ b/image_properties.h
@@ -33,14 +33,22 @@
std::string product_id;
// The canary-channel product id.
std::string canary_product_id;
+ // The system id for the Android Things SoM, empty for Chrome OS.
+ std::string system_id;
// The product version of this image.
std::string version;
+ // The system version of this image.
+ std::string system_version;
// A unique string that identifies this build. Normally a combination of the
// the version, signing keys and build target.
std::string build_fingerprint;
+ // The Android build type, should be either 'user', 'userdebug' or 'eng'.
+ // It's empty string on other platform.
+ std::string build_type;
+
// The board name this image was built for.
std::string board;
diff --git a/image_properties_android.cc b/image_properties_android.cc
index e3b7616..d52c40b 100644
--- a/image_properties_android.cc
+++ b/image_properties_android.cc
@@ -20,6 +20,7 @@
#include <base/logging.h>
#include <brillo/osrelease_reader.h>
+#include <brillo/strings/string_utils.h>
#include <cutils/properties.h>
#include "update_engine/common/boot_control_interface.h"
@@ -28,13 +29,16 @@
#include "update_engine/common/prefs_interface.h"
#include "update_engine/system_state.h"
+using std::string;
+
namespace chromeos_update_engine {
namespace {
-// Build time properties name used in Brillo.
+// Build time properties name used in Android Things.
const char kProductId[] = "product_id";
const char kProductVersion[] = "product_version";
+const char kSystemId[] = "system_id";
const char kSystemVersion[] = "system_version";
// Prefs used to store the target channel and powerwash settings.
@@ -44,11 +48,15 @@
// System properties that identifies the "board".
const char kPropProductName[] = "ro.product.name";
const char kPropBuildFingerprint[] = "ro.build.fingerprint";
+const char kPropBuildType[] = "ro.build.type";
-std::string GetStringWithDefault(const brillo::OsReleaseReader& osrelease,
- const std::string& key,
- const std::string& default_value) {
- std::string result;
+// A prefix added to the path, used for testing.
+const char* root_prefix = nullptr;
+
+string GetStringWithDefault(const brillo::OsReleaseReader& osrelease,
+ const string& key,
+ const string& default_value) {
+ string result;
if (osrelease.GetString(key, &result))
return result;
LOG(INFO) << "Cannot load ImageProperty " << key << ", using default value "
@@ -59,22 +67,35 @@
} // namespace
namespace test {
-void SetImagePropertiesRootPrefix(const char* /* test_root_prefix */) {}
+void SetImagePropertiesRootPrefix(const char* test_root_prefix) {
+ root_prefix = test_root_prefix;
+}
} // namespace test
ImageProperties LoadImageProperties(SystemState* system_state) {
ImageProperties result;
brillo::OsReleaseReader osrelease;
- osrelease.Load();
- result.product_id = GetStringWithDefault(
- osrelease, kProductId, "developer-boards:brillo-starter-board");
+ if (root_prefix)
+ osrelease.LoadTestingOnly(base::FilePath(root_prefix));
+ else
+ osrelease.Load();
+ result.product_id =
+ GetStringWithDefault(osrelease, kProductId, "invalid-product");
+ result.system_id = GetStringWithDefault(
+ osrelease, kSystemId, "developer-boards:brillo-starter-board");
+ // Update the system id to match the prefix of product id for testing.
+ string prefix, not_used, system_id;
+ if (brillo::string_utils::SplitAtFirst(
+ result.product_id, ":", &prefix, ¬_used, false) &&
+ brillo::string_utils::SplitAtFirst(
+ result.system_id, ":", ¬_used, &system_id, false)) {
+ result.system_id = prefix + ":" + system_id;
+ }
result.canary_product_id = result.product_id;
- std::string system_version =
- GetStringWithDefault(osrelease, kSystemVersion, "0.0.0");
- std::string product_version =
- GetStringWithDefault(osrelease, kProductVersion, "0");
- result.version = system_version + "." + product_version;
+ result.version = GetStringWithDefault(osrelease, kProductVersion, "0.0.0.0");
+ result.system_version =
+ GetStringWithDefault(osrelease, kSystemVersion, "0.0.0.0");
char prop[PROPERTY_VALUE_MAX];
property_get(kPropProductName, prop, "brillo");
@@ -83,14 +104,17 @@
property_get(kPropBuildFingerprint, prop, "none");
result.build_fingerprint = prop;
+ property_get(kPropBuildType, prop, "");
+ result.build_type = prop;
+
// Brillo images don't have a channel assigned. We stored the name of the
// channel where we got the image from in prefs at the time of the update, so
// we use that as the current channel if available. During provisioning, there
// is no value assigned, so we default to the "stable-channel".
- std::string current_channel_key =
+ string current_channel_key =
kPrefsChannelOnSlotPrefix +
std::to_string(system_state->boot_control()->GetCurrentSlot());
- std::string current_channel;
+ string current_channel;
if (!system_state->prefs()->Exists(current_channel_key) ||
!system_state->prefs()->GetString(current_channel_key, ¤t_channel))
current_channel = "stable-channel";
diff --git a/image_properties_android_unittest.cc b/image_properties_android_unittest.cc
new file mode 100644
index 0000000..9bbb8b0
--- /dev/null
+++ b/image_properties_android_unittest.cc
@@ -0,0 +1,90 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/image_properties.h"
+
+#include <string>
+
+#include <base/files/file_util.h>
+#include <base/files/scoped_temp_dir.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/constants.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/fake_system_state.h"
+
+using chromeos_update_engine::test_utils::WriteFileString;
+using std::string;
+
+namespace chromeos_update_engine {
+
+class ImagePropertiesTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ // Create a uniquely named test directory.
+ ASSERT_TRUE(tempdir_.CreateUniqueTempDir());
+ osrelease_dir_ = tempdir_.path().Append("etc/os-release.d");
+ EXPECT_TRUE(base::CreateDirectory(osrelease_dir_));
+ test::SetImagePropertiesRootPrefix(tempdir_.path().value().c_str());
+ }
+
+ void WriteOsRelease(const string& key, const string& value) {
+ ASSERT_TRUE(WriteFileString(osrelease_dir_.Append(key).value(), value));
+ }
+
+ FakeSystemState fake_system_state_;
+
+ base::ScopedTempDir tempdir_;
+ base::FilePath osrelease_dir_;
+};
+
+TEST_F(ImagePropertiesTest, SimpleTest) {
+ WriteOsRelease("product_id", "abc");
+ WriteOsRelease("system_id", "def");
+ WriteOsRelease("product_version", "1.2.3.4");
+ WriteOsRelease("system_version", "5.6.7.8");
+ ImageProperties props = LoadImageProperties(&fake_system_state_);
+ EXPECT_EQ("abc", props.product_id);
+ EXPECT_EQ("def", props.system_id);
+ EXPECT_EQ("1.2.3.4", props.version);
+ EXPECT_EQ("5.6.7.8", props.system_version);
+ EXPECT_EQ("stable-channel", props.current_channel);
+ EXPECT_EQ(constants::kOmahaDefaultProductionURL, props.omaha_url);
+}
+
+TEST_F(ImagePropertiesTest, IDPrefixTest) {
+ WriteOsRelease("product_id", "abc:def");
+ WriteOsRelease("system_id", "foo:bar");
+ ImageProperties props = LoadImageProperties(&fake_system_state_);
+ EXPECT_EQ("abc:def", props.product_id);
+ EXPECT_EQ("abc:bar", props.system_id);
+}
+
+TEST_F(ImagePropertiesTest, IDInvalidPrefixTest) {
+ WriteOsRelease("product_id", "def");
+ WriteOsRelease("system_id", "foo:bar");
+ ImageProperties props = LoadImageProperties(&fake_system_state_);
+ EXPECT_EQ("def", props.product_id);
+ EXPECT_EQ("foo:bar", props.system_id);
+
+ WriteOsRelease("product_id", "abc:def");
+ WriteOsRelease("system_id", "bar");
+ props = LoadImageProperties(&fake_system_state_);
+ EXPECT_EQ("abc:def", props.product_id);
+ EXPECT_EQ("bar", props.system_id);
+}
+
+} // namespace chromeos_update_engine
diff --git a/mock_payload_state.h b/mock_payload_state.h
index 2f654c7..6dccc64 100644
--- a/mock_payload_state.h
+++ b/mock_payload_state.h
@@ -52,6 +52,7 @@
MOCK_METHOD1(SetUsingP2PForSharing, void(bool value));
MOCK_METHOD1(SetScatteringWaitPeriod, void(base::TimeDelta));
MOCK_METHOD1(SetP2PUrl, void(const std::string&));
+ MOCK_METHOD0(NextPayload, bool());
// Getters.
MOCK_METHOD0(GetResponseSignature, std::string());
diff --git a/omaha_request_action.cc b/omaha_request_action.cc
index b06de09..c3bbf9d 100644
--- a/omaha_request_action.cc
+++ b/omaha_request_action.cc
@@ -27,6 +27,7 @@
#include <base/logging.h>
#include <base/rand_util.h>
#include <base/strings/string_number_conversions.h>
+#include <base/strings/string_split.h>
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
#include <base/time/time.h>
@@ -70,7 +71,6 @@
static const char* kTagMoreInfo = "MoreInfo";
// Deprecated: "NeedsAdmin"
static const char* kTagPrompt = "Prompt";
-static const char* kTagSha256 = "sha256";
static const char* kTagDisableP2PForDownloading = "DisableP2PForDownloading";
static const char* kTagDisableP2PForSharing = "DisableP2PForSharing";
static const char* kTagPublicKeyRsa = "PublicKeyRsa";
@@ -204,10 +204,16 @@
arg_name.c_str(), escaped_xml_value.c_str());
}
+struct OmahaAppData {
+ string id;
+ string version;
+};
+
// Returns an XML that corresponds to the entire <app> node of the Omaha
// request based on the given parameters.
string GetAppXml(const OmahaEvent* event,
OmahaRequestParams* params,
+ const OmahaAppData& app_data,
bool ping_only,
bool include_ping,
int ping_active_days,
@@ -226,10 +232,10 @@
LOG(INFO) << "Passing OS version as 0.0.0.0 as we are set to powerwash "
<< "on downgrading to the version in the more stable channel";
app_versions = "version=\"0.0.0.0\" from_version=\"" +
- XmlEncodeWithDefault(params->app_version(), "0.0.0.0") + "\" ";
+ XmlEncodeWithDefault(app_data.version, "0.0.0.0") + "\" ";
} else {
app_versions = "version=\"" +
- XmlEncodeWithDefault(params->app_version(), "0.0.0.0") + "\" ";
+ XmlEncodeWithDefault(app_data.version, "0.0.0.0") + "\" ";
}
string download_channel = params->download_channel();
@@ -264,12 +270,19 @@
"fingerprint=\"" + XmlEncodeWithDefault(params->os_build_fingerprint(), "") + "\" ";
}
+ string buildtype_arg;
+ if (!params->os_build_type().empty()) {
+ buildtype_arg = "os_build_type=\"" +
+ XmlEncodeWithDefault(params->os_build_type(), "") + "\" ";
+ }
+
string app_xml = " <app "
- "appid=\"" + XmlEncodeWithDefault(params->GetAppId(), "") + "\" " +
+ "appid=\"" + XmlEncodeWithDefault(app_data.id, "") + "\" " +
app_cohort_args +
app_versions +
app_channels +
fingerprint_arg +
+ buildtype_arg +
"lang=\"" + XmlEncodeWithDefault(params->app_lang(), "en-US") + "\" " +
"board=\"" + XmlEncodeWithDefault(params->os_board(), "") + "\" " +
"hardware_class=\"" + XmlEncodeWithDefault(params->hwid(), "") + "\" " +
@@ -306,9 +319,30 @@
int install_date_in_days,
SystemState* system_state) {
string os_xml = GetOsXml(params);
- string app_xml = GetAppXml(event, params, ping_only, include_ping,
- ping_active_days, ping_roll_call_days,
- install_date_in_days, system_state);
+ OmahaAppData product_app = {.id = params->GetAppId(),
+ .version = params->app_version()};
+ string app_xml = GetAppXml(event,
+ params,
+ product_app,
+ ping_only,
+ include_ping,
+ ping_active_days,
+ ping_roll_call_days,
+ install_date_in_days,
+ system_state);
+ if (!params->system_app_id().empty()) {
+ OmahaAppData system_app = {.id = params->system_app_id(),
+ .version = params->system_version()};
+ app_xml += GetAppXml(event,
+ params,
+ system_app,
+ ping_only,
+ include_ping,
+ ping_active_days,
+ ping_roll_call_days,
+ install_date_in_days,
+ system_state);
+ }
string install_source = base::StringPrintf("installsource=\"%s\" ",
(params->interactive() ? "ondemandupdate" : "scheduler"));
@@ -352,16 +386,25 @@
bool app_cohort_set = false;
bool app_cohorthint_set = false;
bool app_cohortname_set = false;
- string updatecheck_status;
string updatecheck_poll_interval;
map<string, string> updatecheck_attrs;
string daystart_elapsed_days;
string daystart_elapsed_seconds;
- vector<string> url_codebase;
- string package_name;
- string package_size;
- string manifest_version;
- map<string, string> action_postinstall_attrs;
+
+ struct App {
+ vector<string> url_codebase;
+ string manifest_version;
+ map<string, string> action_postinstall_attrs;
+ string updatecheck_status;
+
+ struct Package {
+ string name;
+ string size;
+ string hash;
+ };
+ vector<Package> packages;
+ };
+ vector<App> apps;
};
namespace {
@@ -386,6 +429,7 @@
}
if (data->current_path == "/response/app") {
+ data->apps.emplace_back();
if (attrs.find("cohort") != attrs.end()) {
data->app_cohort_set = true;
data->app_cohort = attrs["cohort"];
@@ -399,9 +443,10 @@
data->app_cohortname = attrs["cohortname"];
}
} else if (data->current_path == "/response/app/updatecheck") {
- // There is only supposed to be a single <updatecheck> element.
- data->updatecheck_status = attrs["status"];
- data->updatecheck_poll_interval = attrs["PollInterval"];
+ if (!data->apps.empty())
+ data->apps.back().updatecheck_status = attrs["status"];
+ if (data->updatecheck_poll_interval.empty())
+ data->updatecheck_poll_interval = attrs["PollInterval"];
// Omaha sends arbitrary key-value pairs as extra attributes starting with
// an underscore.
for (const auto& attr : attrs) {
@@ -414,20 +459,24 @@
data->daystart_elapsed_seconds = attrs["elapsed_seconds"];
} else if (data->current_path == "/response/app/updatecheck/urls/url") {
// Look at all <url> elements.
- data->url_codebase.push_back(attrs["codebase"]);
- } else if (data->package_name.empty() && data->current_path ==
+ if (!data->apps.empty())
+ data->apps.back().url_codebase.push_back(attrs["codebase"]);
+ } else if (data->current_path ==
"/response/app/updatecheck/manifest/packages/package") {
- // Only look at the first <package>.
- data->package_name = attrs["name"];
- data->package_size = attrs["size"];
+ // Look at all <package> elements.
+ if (!data->apps.empty())
+ data->apps.back().packages.push_back({.name = attrs["name"],
+ .size = attrs["size"],
+ .hash = attrs["hash_sha256"]});
} else if (data->current_path == "/response/app/updatecheck/manifest") {
// Get the version.
- data->manifest_version = attrs[kTagVersion];
+ if (!data->apps.empty())
+ data->apps.back().manifest_version = attrs[kTagVersion];
} else if (data->current_path ==
"/response/app/updatecheck/manifest/actions/action") {
// We only care about the postinstall action.
- if (attrs["event"] == "postinstall") {
- data->action_postinstall_attrs = attrs;
+ if (attrs["event"] == "postinstall" && !data->apps.empty()) {
+ data->apps.back().action_postinstall_attrs = std::move(attrs);
}
}
}
@@ -726,15 +775,112 @@
prefs->SetInt64(kPrefsLastRollCallPingDay, daystart.ToInternalValue());
return true;
}
+
+// Parses the package node in the given XML document and populates
+// |output_object| if valid. Returns true if we should continue the parsing.
+// False otherwise, in which case it sets any error code using |completer|.
+bool ParsePackage(OmahaParserData::App* app,
+ OmahaResponse* output_object,
+ ScopedActionCompleter* completer) {
+ if (app->updatecheck_status == "noupdate") {
+ if (!app->packages.empty()) {
+ LOG(ERROR) << "No update in this <app> but <package> is not empty.";
+ completer->set_code(ErrorCode::kOmahaResponseInvalid);
+ return false;
+ }
+ return true;
+ }
+ if (app->packages.empty()) {
+ LOG(ERROR) << "Omaha Response has no packages";
+ completer->set_code(ErrorCode::kOmahaResponseInvalid);
+ return false;
+ }
+ if (app->url_codebase.empty()) {
+ LOG(ERROR) << "No Omaha Response URLs";
+ completer->set_code(ErrorCode::kOmahaResponseInvalid);
+ return false;
+ }
+ LOG(INFO) << "Found " << app->url_codebase.size() << " url(s)";
+ vector<string> metadata_sizes =
+ base::SplitString(app->action_postinstall_attrs[kTagMetadataSize],
+ ":",
+ base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_ALL);
+ vector<string> metadata_signatures =
+ base::SplitString(app->action_postinstall_attrs[kTagMetadataSignatureRsa],
+ ":",
+ base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_ALL);
+ vector<string> is_delta_payloads =
+ base::SplitString(app->action_postinstall_attrs[kTagIsDeltaPayload],
+ ":",
+ base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_ALL);
+ for (size_t i = 0; i < app->packages.size(); i++) {
+ const auto& package = app->packages[i];
+ if (package.name.empty()) {
+ LOG(ERROR) << "Omaha Response has empty package name";
+ completer->set_code(ErrorCode::kOmahaResponseInvalid);
+ return false;
+ }
+ LOG(INFO) << "Found package " << package.name;
+
+ OmahaResponse::Package out_package;
+ for (const string& codebase : app->url_codebase) {
+ if (codebase.empty()) {
+ LOG(ERROR) << "Omaha Response URL has empty codebase";
+ completer->set_code(ErrorCode::kOmahaResponseInvalid);
+ return false;
+ }
+ out_package.payload_urls.push_back(codebase + package.name);
+ }
+ // Parse the payload size.
+ base::StringToUint64(package.size, &out_package.size);
+ if (out_package.size <= 0) {
+ LOG(ERROR) << "Omaha Response has invalid payload size: " << package.size;
+ completer->set_code(ErrorCode::kOmahaResponseInvalid);
+ return false;
+ }
+ LOG(INFO) << "Payload size = " << out_package.size << " bytes";
+
+ if (i < metadata_sizes.size())
+ base::StringToUint64(metadata_sizes[i], &out_package.metadata_size);
+ LOG(INFO) << "Payload metadata size = " << out_package.metadata_size
+ << " bytes";
+
+ if (i < metadata_signatures.size())
+ out_package.metadata_signature = metadata_signatures[i];
+ LOG(INFO) << "Payload metadata signature = "
+ << out_package.metadata_signature;
+
+ out_package.hash = package.hash;
+ if (out_package.hash.empty()) {
+ LOG(ERROR) << "Omaha Response has empty hash_sha256 value";
+ completer->set_code(ErrorCode::kOmahaResponseInvalid);
+ return false;
+ }
+ LOG(INFO) << "Payload hash = " << out_package.hash;
+
+ if (i < is_delta_payloads.size())
+ out_package.is_delta = ParseBool(is_delta_payloads[i]);
+ LOG(INFO) << "Payload is delta = " << utils::ToString(out_package.is_delta);
+
+ output_object->packages.push_back(std::move(out_package));
+ }
+
+ return true;
+}
+
} // namespace
bool OmahaRequestAction::ParseResponse(OmahaParserData* parser_data,
OmahaResponse* output_object,
ScopedActionCompleter* completer) {
- if (parser_data->updatecheck_status.empty()) {
+ if (parser_data->apps.empty()) {
completer->set_code(ErrorCode::kOmahaResponseInvalid);
return false;
}
+ LOG(INFO) << "Found " << parser_data->apps.size() << " <app>.";
// chromium-os:37289: The PollInterval is not supported by Omaha server
// currently. But still keeping this existing code in case we ever decide to
@@ -786,97 +932,62 @@
if (!ParseStatus(parser_data, output_object, completer))
return false;
- // Note: ParseUrls MUST be called before ParsePackage as ParsePackage
- // appends the package name to the URLs populated in this method.
- if (!ParseUrls(parser_data, output_object, completer))
- return false;
-
- if (!ParsePackage(parser_data, output_object, completer))
- return false;
-
if (!ParseParams(parser_data, output_object, completer))
return false;
+ // Package has to be parsed after Params now because ParseParams need to make
+ // sure that postinstall action exists.
+ for (auto& app : parser_data->apps)
+ if (!ParsePackage(&app, output_object, completer))
+ return false;
+
return true;
}
bool OmahaRequestAction::ParseStatus(OmahaParserData* parser_data,
OmahaResponse* output_object,
ScopedActionCompleter* completer) {
- const string& status = parser_data->updatecheck_status;
- if (status == "noupdate") {
- LOG(INFO) << "No update.";
- output_object->update_exists = false;
- SetOutputObject(*output_object);
- completer->set_code(ErrorCode::kSuccess);
- return false;
- }
-
- if (status != "ok") {
- LOG(ERROR) << "Unknown Omaha response status: " << status;
- completer->set_code(ErrorCode::kOmahaResponseInvalid);
- return false;
- }
-
- return true;
-}
-
-bool OmahaRequestAction::ParseUrls(OmahaParserData* parser_data,
- OmahaResponse* output_object,
- ScopedActionCompleter* completer) {
- if (parser_data->url_codebase.empty()) {
- LOG(ERROR) << "No Omaha Response URLs";
- completer->set_code(ErrorCode::kOmahaResponseInvalid);
- return false;
- }
-
- LOG(INFO) << "Found " << parser_data->url_codebase.size() << " url(s)";
- output_object->payload_urls.clear();
- for (const auto& codebase : parser_data->url_codebase) {
- if (codebase.empty()) {
- LOG(ERROR) << "Omaha Response URL has empty codebase";
+ output_object->update_exists = false;
+ for (size_t i = 0; i < parser_data->apps.size(); i++) {
+ const string& status = parser_data->apps[i].updatecheck_status;
+ if (status == "noupdate") {
+ // Don't update if any app has status="noupdate".
+ LOG(INFO) << "No update for <app> " << i;
+ output_object->update_exists = false;
+ break;
+ } else if (status == "ok") {
+ if (parser_data->apps[i].action_postinstall_attrs["noupdate"] == "true") {
+ // noupdate="true" in postinstall attributes means it's an update to
+ // self, only update if there's at least one app really have update.
+ LOG(INFO) << "Update to self for <app> " << i;
+ } else {
+ LOG(INFO) << "Update for <app> " << i;
+ output_object->update_exists = true;
+ }
+ } else {
+ LOG(ERROR) << "Unknown Omaha response status: " << status;
completer->set_code(ErrorCode::kOmahaResponseInvalid);
return false;
}
- output_object->payload_urls.push_back(codebase);
+ }
+ if (!output_object->update_exists) {
+ SetOutputObject(*output_object);
+ completer->set_code(ErrorCode::kSuccess);
}
- return true;
-}
-
-bool OmahaRequestAction::ParsePackage(OmahaParserData* parser_data,
- OmahaResponse* output_object,
- ScopedActionCompleter* completer) {
- if (parser_data->package_name.empty()) {
- LOG(ERROR) << "Omaha Response has empty package name";
- completer->set_code(ErrorCode::kOmahaResponseInvalid);
- return false;
- }
-
- // Append the package name to each URL in our list so that we don't
- // propagate the urlBase vs packageName distinctions beyond this point.
- // From now on, we only need to use payload_urls.
- for (auto& payload_url : output_object->payload_urls)
- payload_url += parser_data->package_name;
-
- // Parse the payload size.
- off_t size = ParseInt(parser_data->package_size);
- if (size <= 0) {
- LOG(ERROR) << "Omaha Response has invalid payload size: " << size;
- completer->set_code(ErrorCode::kOmahaResponseInvalid);
- return false;
- }
- output_object->size = size;
-
- LOG(INFO) << "Payload size = " << output_object->size << " bytes";
-
- return true;
+ return output_object->update_exists;
}
bool OmahaRequestAction::ParseParams(OmahaParserData* parser_data,
OmahaResponse* output_object,
ScopedActionCompleter* completer) {
- output_object->version = parser_data->manifest_version;
+ map<string, string> attrs;
+ for (auto& app : parser_data->apps) {
+ if (!app.manifest_version.empty() && output_object->version.empty())
+ output_object->version = app.manifest_version;
+ if (!app.action_postinstall_attrs.empty() && attrs.empty())
+ attrs = app.action_postinstall_attrs;
+ }
if (output_object->version.empty()) {
LOG(ERROR) << "Omaha Response does not have version in manifest!";
completer->set_code(ErrorCode::kOmahaResponseInvalid);
@@ -886,24 +997,14 @@
LOG(INFO) << "Received omaha response to update to version "
<< output_object->version;
- map<string, string> attrs = parser_data->action_postinstall_attrs;
if (attrs.empty()) {
LOG(ERROR) << "Omaha Response has no postinstall event action";
completer->set_code(ErrorCode::kOmahaResponseInvalid);
return false;
}
- output_object->hash = attrs[kTagSha256];
- if (output_object->hash.empty()) {
- LOG(ERROR) << "Omaha Response has empty sha256 value";
- completer->set_code(ErrorCode::kOmahaResponseInvalid);
- return false;
- }
-
// Get the optional properties one by one.
output_object->more_info_url = attrs[kTagMoreInfo];
- output_object->metadata_size = ParseInt(attrs[kTagMetadataSize]);
- output_object->metadata_signature = attrs[kTagMetadataSignatureRsa];
output_object->prompt = ParseBool(attrs[kTagPrompt]);
output_object->deadline = attrs[kTagDeadline];
output_object->max_days_to_scatter = ParseInt(attrs[kTagMaxDaysToScatter]);
@@ -917,8 +1018,6 @@
if (!base::StringToUint(max, &output_object->max_failure_count_per_url))
output_object->max_failure_count_per_url = kDefaultMaxFailureCountPerUrl;
- output_object->is_delta_payload = ParseBool(attrs[kTagIsDeltaPayload]);
-
output_object->disable_payload_backoff =
ParseBool(attrs[kTagDisablePayloadBackoff]);
@@ -1133,10 +1232,15 @@
next_data_offset + next_data_length;
}
- string file_id = utils::CalculateP2PFileId(response.hash, response.size);
+ // TODO(senj): Fix P2P for multiple package.
+ brillo::Blob raw_hash;
+ if (!base::HexStringToBytes(response.packages[0].hash, &raw_hash))
+ return;
+ string file_id =
+ utils::CalculateP2PFileId(raw_hash, response.packages[0].size);
if (system_state_->p2p_manager()) {
- LOG(INFO) << "Checking if payload is available via p2p, file_id="
- << file_id << " minimum_size=" << minimum_size;
+ LOG(INFO) << "Checking if payload is available via p2p, file_id=" << file_id
+ << " minimum_size=" << minimum_size;
system_state_->p2p_manager()->LookupUrlForFile(
file_id,
minimum_size,
diff --git a/omaha_request_action.h b/omaha_request_action.h
index 2915a6a..924da40 100644
--- a/omaha_request_action.h
+++ b/omaha_request_action.h
@@ -274,13 +274,6 @@
OmahaResponse* output_object,
ScopedActionCompleter* completer);
- // Parses the package node in the given XML document and populates
- // |output_object| if valid. Returns true if we should continue the parsing.
- // False otherwise, in which case it sets any error code using |completer|.
- bool ParsePackage(OmahaParserData* parser_data,
- OmahaResponse* output_object,
- ScopedActionCompleter* completer);
-
// Parses the other parameters in the given XML document and populates
// |output_object| if valid. Returns true if we should continue the parsing.
// False otherwise, in which case it sets any error code using |completer|.
diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc
index 1c1d25c..9091031 100644
--- a/omaha_request_action_unittest.cc
+++ b/omaha_request_action_unittest.cc
@@ -76,47 +76,88 @@
string entity_str;
if (include_entity)
entity_str = "<!DOCTYPE response [<!ENTITY CrOS \"ChromeOS\">]>";
- return
- "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" +
- entity_str + "<response protocol=\"3.0\">"
- "<daystart elapsed_seconds=\"100\"/>"
- "<app appid=\"" + app_id + "\" " +
- (include_cohorts ? "cohort=\"" + cohort + "\" cohorthint=\"" +
- cohorthint + "\" cohortname=\"" + cohortname + "\" " : "") +
- " status=\"ok\">"
- "<ping status=\"ok\"/>"
- "<updatecheck status=\"noupdate\"/></app></response>";
+ return "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + entity_str +
+ "<response protocol=\"3.0\">"
+ "<daystart elapsed_seconds=\"100\"/>"
+ "<app appid=\"" +
+ app_id + "\" " +
+ (include_cohorts
+ ? "cohort=\"" + cohort + "\" cohorthint=\"" + cohorthint +
+ "\" cohortname=\"" + cohortname + "\" "
+ : "") +
+ " status=\"ok\">"
+ "<ping status=\"ok\"/>"
+ "<updatecheck status=\"noupdate\"/></app>" +
+ (multi_app_no_update
+ ? "<app><updatecheck status=\"noupdate\"/></app>"
+ : "") +
+ "</response>";
}
string GetUpdateResponse() const {
- return
- "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
- "protocol=\"3.0\">"
- "<daystart elapsed_seconds=\"100\"" +
- (elapsed_days.empty() ? "" : (" elapsed_days=\"" + elapsed_days + "\""))
- + "/>"
- "<app appid=\"" + app_id + "\" " +
- (include_cohorts ? "cohort=\"" + cohort + "\" cohorthint=\"" +
- cohorthint + "\" cohortname=\"" + cohortname + "\" " : "") +
- " status=\"ok\">"
- "<ping status=\"ok\"/><updatecheck status=\"ok\">"
- "<urls><url codebase=\"" + codebase + "\"/></urls>"
- "<manifest version=\"" + version + "\">"
- "<packages><package hash=\"not-used\" name=\"" + filename + "\" "
- "size=\"" + base::Int64ToString(size) + "\"/></packages>"
- "<actions><action event=\"postinstall\" "
- "ChromeOSVersion=\"" + version + "\" "
- "MoreInfo=\"" + more_info_url + "\" Prompt=\"" + prompt + "\" "
- "IsDelta=\"true\" "
- "IsDeltaPayload=\"true\" "
- "MaxDaysToScatter=\"" + max_days_to_scatter + "\" "
- "sha256=\"" + hash + "\" "
- "needsadmin=\"" + needsadmin + "\" " +
- (deadline.empty() ? "" : ("deadline=\"" + deadline + "\" ")) +
- (disable_p2p_for_downloading ?
- "DisableP2PForDownloading=\"true\" " : "") +
- (disable_p2p_for_sharing ? "DisableP2PForSharing=\"true\" " : "") +
- "/></actions></manifest></updatecheck></app></response>";
+ return "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+ "protocol=\"3.0\">"
+ "<daystart elapsed_seconds=\"100\"" +
+ (elapsed_days.empty() ? ""
+ : (" elapsed_days=\"" + elapsed_days + "\"")) +
+ "/>"
+ "<app appid=\"" +
+ app_id + "\" " +
+ (include_cohorts
+ ? "cohort=\"" + cohort + "\" cohorthint=\"" + cohorthint +
+ "\" cohortname=\"" + cohortname + "\" "
+ : "") +
+ " status=\"ok\">"
+ "<ping status=\"ok\"/><updatecheck status=\"ok\">"
+ "<urls><url codebase=\"" +
+ codebase +
+ "\"/></urls>"
+ "<manifest version=\"" +
+ version +
+ "\">"
+ "<packages><package hash=\"not-used\" name=\"" +
+ filename + "\" size=\"" + base::Int64ToString(size) +
+ "\" hash_sha256=\"" + hash + "\"/>" +
+ (multi_package ? "<package name=\"package2\" size=\"222\" "
+ "hash_sha256=\"hash2\"/>"
+ : "") +
+ "</packages>"
+ "<actions><action event=\"postinstall\" MetadataSize=\"11" +
+ (multi_package ? ":22" : "") + "\" ChromeOSVersion=\"" + version +
+ "\" MoreInfo=\"" + more_info_url + "\" Prompt=\"" + prompt +
+ "\" "
+ "IsDelta=\"true\" "
+ "IsDeltaPayload=\"true" +
+ (multi_package ? ":false" : "") +
+ "\" "
+ "MaxDaysToScatter=\"" +
+ max_days_to_scatter +
+ "\" "
+ "sha256=\"not-used\" "
+ "needsadmin=\"" +
+ needsadmin + "\" " +
+ (deadline.empty() ? "" : ("deadline=\"" + deadline + "\" ")) +
+ (disable_p2p_for_downloading ? "DisableP2PForDownloading=\"true\" "
+ : "") +
+ (disable_p2p_for_sharing ? "DisableP2PForSharing=\"true\" " : "") +
+ "/></actions></manifest></updatecheck></app>" +
+ (multi_app
+ ? "<app><updatecheck status=\"ok\"><urls><url codebase=\"" +
+ codebase2 +
+ "\"/></urls><manifest><packages>"
+ "<package name=\"package3\" size=\"333\" "
+ "hash_sha256=\"hash3\"/></packages>"
+ "<actions><action event=\"postinstall\" " +
+ (multi_app_self_update
+ ? "noupdate=\"true\" IsDeltaPayload=\"true\" "
+ : "IsDeltaPayload=\"false\" ") +
+ "MetadataSize=\"33\"/></actions>"
+ "</manifest></updatecheck></app>"
+ : "") +
+ (multi_app_no_update
+ ? "<app><updatecheck status=\"noupdate\"/></app>"
+ : "") +
+ "</response>";
}
// Return the payload URL, which is split in two fields in the XML response.
@@ -129,10 +170,11 @@
string more_info_url = "http://more/info";
string prompt = "true";
string codebase = "http://code/base/";
+ string codebase2 = "http://code/base/2/";
string filename = "file.signed";
- string hash = "HASH1234=";
+ string hash = "4841534831323334";
string needsadmin = "false";
- int64_t size = 123;
+ uint64_t size = 123;
string deadline = "";
string max_days_to_scatter = "7";
string elapsed_days = "42";
@@ -149,6 +191,15 @@
// Whether to include the CrOS <!ENTITY> in the XML response.
bool include_entity = false;
+
+ // Whether to include more than one app.
+ bool multi_app = false;
+ // Whether to include an app with noupdate="true".
+ bool multi_app_self_update = false;
+ // Whether to include an additional app with status="noupdate".
+ bool multi_app_no_update = false;
+ // Whether to include more than one package in an app.
+ bool multi_package = false;
};
} // namespace
@@ -430,6 +481,56 @@
EXPECT_FALSE(response.update_exists);
}
+TEST_F(OmahaRequestActionTest, MultiAppNoUpdateTest) {
+ OmahaResponse response;
+ fake_update_response_.multi_app_no_update = true;
+ ASSERT_TRUE(TestUpdateCheck(nullptr, // request_params
+ fake_update_response_.GetNoUpdateResponse(),
+ -1,
+ false, // ping_only
+ ErrorCode::kSuccess,
+ metrics::CheckResult::kNoUpdateAvailable,
+ metrics::CheckReaction::kUnset,
+ metrics::DownloadErrorCode::kUnset,
+ &response,
+ nullptr));
+ EXPECT_FALSE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, MultiAppNoPartialUpdateTest) {
+ OmahaResponse response;
+ fake_update_response_.multi_app_no_update = true;
+ ASSERT_TRUE(TestUpdateCheck(nullptr, // request_params
+ fake_update_response_.GetUpdateResponse(),
+ -1,
+ false, // ping_only
+ ErrorCode::kSuccess,
+ metrics::CheckResult::kNoUpdateAvailable,
+ metrics::CheckReaction::kUnset,
+ metrics::DownloadErrorCode::kUnset,
+ &response,
+ nullptr));
+ EXPECT_FALSE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, NoSelfUpdateTest) {
+ OmahaResponse response;
+ ASSERT_TRUE(TestUpdateCheck(
+ nullptr, // request_params
+ "<response><app><updatecheck status=\"ok\"><manifest><actions><action "
+ "event=\"postinstall\" noupdate=\"true\"/></actions>"
+ "</manifest></updatecheck></app></response>",
+ -1,
+ false, // ping_only
+ ErrorCode::kSuccess,
+ metrics::CheckResult::kNoUpdateAvailable,
+ metrics::CheckReaction::kUnset,
+ metrics::DownloadErrorCode::kUnset,
+ &response,
+ nullptr));
+ EXPECT_FALSE(response.update_exists);
+}
+
// Test that all the values in the response are parsed in a normal update
// response.
TEST_F(OmahaRequestActionTest, ValidUpdateTest) {
@@ -447,12 +548,13 @@
&response,
nullptr));
EXPECT_TRUE(response.update_exists);
- EXPECT_TRUE(response.update_exists);
EXPECT_EQ(fake_update_response_.version, response.version);
- EXPECT_EQ(fake_update_response_.GetPayloadUrl(), response.payload_urls[0]);
+ EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
+ response.packages[0].payload_urls[0]);
EXPECT_EQ(fake_update_response_.more_info_url, response.more_info_url);
- EXPECT_EQ(fake_update_response_.hash, response.hash);
- EXPECT_EQ(fake_update_response_.size, response.size);
+ EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash);
+ EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
+ EXPECT_EQ(true, response.packages[0].is_delta);
EXPECT_EQ(fake_update_response_.prompt == "true", response.prompt);
EXPECT_EQ(fake_update_response_.deadline, response.deadline);
// Omaha cohort attribets are not set in the response, so they should not be
@@ -462,6 +564,131 @@
EXPECT_FALSE(fake_prefs_.Exists(kPrefsOmahaCohortName));
}
+TEST_F(OmahaRequestActionTest, MultiPackageUpdateTest) {
+ OmahaResponse response;
+ fake_update_response_.multi_package = true;
+ ASSERT_TRUE(TestUpdateCheck(nullptr, // request_params
+ fake_update_response_.GetUpdateResponse(),
+ -1,
+ false, // ping_only
+ ErrorCode::kSuccess,
+ metrics::CheckResult::kUpdateAvailable,
+ metrics::CheckReaction::kUpdating,
+ metrics::DownloadErrorCode::kUnset,
+ &response,
+ nullptr));
+ EXPECT_TRUE(response.update_exists);
+ EXPECT_EQ(fake_update_response_.version, response.version);
+ EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
+ response.packages[0].payload_urls[0]);
+ EXPECT_EQ(fake_update_response_.codebase + "package2",
+ response.packages[1].payload_urls[0]);
+ EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash);
+ EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
+ EXPECT_EQ(true, response.packages[0].is_delta);
+ EXPECT_EQ(11u, response.packages[0].metadata_size);
+ ASSERT_EQ(2u, response.packages.size());
+ EXPECT_EQ(string("hash2"), response.packages[1].hash);
+ EXPECT_EQ(222u, response.packages[1].size);
+ EXPECT_EQ(22u, response.packages[1].metadata_size);
+ EXPECT_EQ(false, response.packages[1].is_delta);
+}
+
+TEST_F(OmahaRequestActionTest, MultiAppUpdateTest) {
+ OmahaResponse response;
+ fake_update_response_.multi_app = true;
+ ASSERT_TRUE(TestUpdateCheck(nullptr, // request_params
+ fake_update_response_.GetUpdateResponse(),
+ -1,
+ false, // ping_only
+ ErrorCode::kSuccess,
+ metrics::CheckResult::kUpdateAvailable,
+ metrics::CheckReaction::kUpdating,
+ metrics::DownloadErrorCode::kUnset,
+ &response,
+ nullptr));
+ EXPECT_TRUE(response.update_exists);
+ EXPECT_EQ(fake_update_response_.version, response.version);
+ EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
+ response.packages[0].payload_urls[0]);
+ EXPECT_EQ(fake_update_response_.codebase2 + "package3",
+ response.packages[1].payload_urls[0]);
+ EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash);
+ EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
+ EXPECT_EQ(11u, response.packages[0].metadata_size);
+ EXPECT_EQ(true, response.packages[0].is_delta);
+ ASSERT_EQ(2u, response.packages.size());
+ EXPECT_EQ(string("hash3"), response.packages[1].hash);
+ EXPECT_EQ(333u, response.packages[1].size);
+ EXPECT_EQ(33u, response.packages[1].metadata_size);
+ EXPECT_EQ(false, response.packages[1].is_delta);
+}
+
+TEST_F(OmahaRequestActionTest, MultiAppPartialUpdateTest) {
+ OmahaResponse response;
+ fake_update_response_.multi_app = true;
+ fake_update_response_.multi_app_self_update = true;
+ ASSERT_TRUE(TestUpdateCheck(nullptr, // request_params
+ fake_update_response_.GetUpdateResponse(),
+ -1,
+ false, // ping_only
+ ErrorCode::kSuccess,
+ metrics::CheckResult::kUpdateAvailable,
+ metrics::CheckReaction::kUpdating,
+ metrics::DownloadErrorCode::kUnset,
+ &response,
+ nullptr));
+ EXPECT_TRUE(response.update_exists);
+ EXPECT_EQ(fake_update_response_.version, response.version);
+ EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
+ response.packages[0].payload_urls[0]);
+ EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash);
+ EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
+ EXPECT_EQ(11u, response.packages[0].metadata_size);
+ ASSERT_EQ(2u, response.packages.size());
+ EXPECT_EQ(string("hash3"), response.packages[1].hash);
+ EXPECT_EQ(333u, response.packages[1].size);
+ EXPECT_EQ(33u, response.packages[1].metadata_size);
+ EXPECT_EQ(true, response.packages[1].is_delta);
+}
+
+TEST_F(OmahaRequestActionTest, MultiAppMultiPackageUpdateTest) {
+ OmahaResponse response;
+ fake_update_response_.multi_app = true;
+ fake_update_response_.multi_package = true;
+ ASSERT_TRUE(TestUpdateCheck(nullptr, // request_params
+ fake_update_response_.GetUpdateResponse(),
+ -1,
+ false, // ping_only
+ ErrorCode::kSuccess,
+ metrics::CheckResult::kUpdateAvailable,
+ metrics::CheckReaction::kUpdating,
+ metrics::DownloadErrorCode::kUnset,
+ &response,
+ nullptr));
+ EXPECT_TRUE(response.update_exists);
+ EXPECT_EQ(fake_update_response_.version, response.version);
+ EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
+ response.packages[0].payload_urls[0]);
+ EXPECT_EQ(fake_update_response_.codebase + "package2",
+ response.packages[1].payload_urls[0]);
+ EXPECT_EQ(fake_update_response_.codebase2 + "package3",
+ response.packages[2].payload_urls[0]);
+ EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash);
+ EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
+ EXPECT_EQ(11u, response.packages[0].metadata_size);
+ EXPECT_EQ(true, response.packages[0].is_delta);
+ ASSERT_EQ(3u, response.packages.size());
+ EXPECT_EQ(string("hash2"), response.packages[1].hash);
+ EXPECT_EQ(222u, response.packages[1].size);
+ EXPECT_EQ(22u, response.packages[1].metadata_size);
+ EXPECT_EQ(false, response.packages[1].is_delta);
+ EXPECT_EQ(string("hash3"), response.packages[2].hash);
+ EXPECT_EQ(333u, response.packages[2].size);
+ EXPECT_EQ(33u, response.packages[2].metadata_size);
+ EXPECT_EQ(false, response.packages[2].is_delta);
+}
+
TEST_F(OmahaRequestActionTest, ExtraHeadersSentTest) {
const string http_response = "<?xml invalid response";
request_params_.set_interactive(true);
@@ -1041,13 +1268,13 @@
"<urls><url codebase=\"http://missing/field/test/\"/></urls>"
"<manifest version=\"10.2.3.4\">"
"<packages><package hash=\"not-used\" name=\"f\" "
- "size=\"587\"/></packages>"
+ "size=\"587\" hash_sha256=\"lkq34j5345\"/></packages>"
"<actions><action event=\"postinstall\" "
"ChromeOSVersion=\"10.2.3.4\" "
"Prompt=\"false\" "
"IsDelta=\"true\" "
"IsDeltaPayload=\"false\" "
- "sha256=\"lkq34j5345\" "
+ "sha256=\"not-used\" "
"needsadmin=\"true\" "
"/></actions></manifest></updatecheck></app></response>";
LOG(INFO) << "Input Response = " << input_response;
@@ -1065,10 +1292,11 @@
nullptr));
EXPECT_TRUE(response.update_exists);
EXPECT_EQ("10.2.3.4", response.version);
- EXPECT_EQ("http://missing/field/test/f", response.payload_urls[0]);
+ EXPECT_EQ("http://missing/field/test/f",
+ response.packages[0].payload_urls[0]);
EXPECT_EQ("", response.more_info_url);
- EXPECT_EQ("lkq34j5345", response.hash);
- EXPECT_EQ(587, response.size);
+ EXPECT_EQ("lkq34j5345", response.packages[0].hash);
+ EXPECT_EQ(587u, response.packages[0].size);
EXPECT_FALSE(response.prompt);
EXPECT_TRUE(response.deadline.empty());
}
@@ -1202,15 +1430,16 @@
&response,
nullptr));
- EXPECT_EQ(response.more_info_url, "testthe<url");
- EXPECT_EQ(response.payload_urls[0], "testthe&codebase/file.signed");
- EXPECT_EQ(response.deadline, "<20110101");
+ EXPECT_EQ("testthe<url", response.more_info_url);
+ EXPECT_EQ("testthe&codebase/file.signed",
+ response.packages[0].payload_urls[0]);
+ EXPECT_EQ("<20110101", response.deadline);
}
TEST_F(OmahaRequestActionTest, ParseIntTest) {
OmahaResponse response;
// overflows int32_t:
- fake_update_response_.size = 123123123123123ll;
+ fake_update_response_.size = 123123123123123ull;
ASSERT_TRUE(
TestUpdateCheck(nullptr, // request_params
fake_update_response_.GetUpdateResponse(),
@@ -1223,7 +1452,7 @@
&response,
nullptr));
- EXPECT_EQ(response.size, 123123123123123ll);
+ EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
}
TEST_F(OmahaRequestActionTest, FormatUpdateCheckOutputTest) {
diff --git a/omaha_request_params.cc b/omaha_request_params.cc
index 3402451..3ba7037 100644
--- a/omaha_request_params.cc
+++ b/omaha_request_params.cc
@@ -77,7 +77,10 @@
LOG(INFO) << "Running from channel " << image_props_.current_channel;
os_platform_ = constants::kOmahaPlatformName;
- os_version_ = OmahaRequestParams::kOsVersion;
+ if (!image_props_.system_version.empty())
+ os_version_ = image_props_.system_version;
+ else
+ os_version_ = OmahaRequestParams::kOsVersion;
if (!in_app_version.empty())
image_props_.version = in_app_version;
diff --git a/omaha_request_params.h b/omaha_request_params.h
index 3a28ed1..f8e9438 100644
--- a/omaha_request_params.h
+++ b/omaha_request_params.h
@@ -105,10 +105,12 @@
inline std::string os_build_fingerprint() const {
return image_props_.build_fingerprint;
}
+ inline std::string os_build_type() const { return image_props_.build_type; }
inline std::string board_app_id() const { return image_props_.product_id; }
inline std::string canary_app_id() const {
return image_props_.canary_product_id;
}
+ inline std::string system_app_id() const { return image_props_.system_id; }
inline void set_app_id(const std::string& app_id) {
image_props_.product_id = app_id;
image_props_.canary_product_id = app_id;
@@ -122,6 +124,9 @@
image_props_.version = version;
}
inline std::string app_version() const { return image_props_.version; }
+ inline std::string system_version() const {
+ return image_props_.system_version;
+ }
inline std::string current_channel() const {
return image_props_.current_channel;
diff --git a/omaha_response.h b/omaha_response.h
index 60ec4ac..c702068 100644
--- a/omaha_response.h
+++ b/omaha_response.h
@@ -38,16 +38,22 @@
// These are only valid if update_exists is true:
std::string version;
- // The ordered list of URLs in the Omaha response. Each item is a complete
- // URL (i.e. in terms of Omaha XML, each value is a urlBase + packageName)
- std::vector<std::string> payload_urls;
+ struct Package {
+ // The ordered list of URLs in the Omaha response. Each item is a complete
+ // URL (i.e. in terms of Omaha XML, each value is a urlBase + packageName)
+ std::vector<std::string> payload_urls;
+ uint64_t size = 0;
+ uint64_t metadata_size = 0;
+ std::string metadata_signature;
+ std::string hash;
+ // True if the payload described in this response is a delta payload.
+ // False if it's a full payload.
+ bool is_delta = false;
+ };
+ std::vector<Package> packages;
std::string more_info_url;
- std::string hash;
- std::string metadata_signature;
std::string deadline;
- off_t size = 0;
- off_t metadata_size = 0;
int max_days_to_scatter = 0;
// The number of URL-related failures to tolerate before moving on to the
// next URL in the current pass. This is a configurable value from the
@@ -55,10 +61,6 @@
uint32_t max_failure_count_per_url = 0;
bool prompt = false;
- // True if the payload described in this response is a delta payload.
- // False if it's a full payload.
- bool is_delta_payload = false;
-
// True if the Omaha rule instructs us to disable the back-off logic
// on the client altogether. False otherwise.
bool disable_payload_backoff = false;
diff --git a/omaha_response_handler_action.cc b/omaha_response_handler_action.cc
index 33380d7..189fe6b 100644
--- a/omaha_response_handler_action.cc
+++ b/omaha_response_handler_action.cc
@@ -19,6 +19,7 @@
#include <string>
#include <base/logging.h>
+#include <base/strings/string_number_conversions.h>
#include <base/strings/string_util.h>
#include <policy/device_policy.h>
@@ -85,28 +86,40 @@
}
// Fill up the other properties based on the response.
- install_plan_.payload_size = response.size;
- install_plan_.payload_hash = response.hash;
- install_plan_.metadata_size = response.metadata_size;
- install_plan_.metadata_signature = response.metadata_signature;
+ string update_check_response_hash;
+ for (const auto& package : response.packages) {
+ brillo::Blob raw_hash;
+ if (!base::HexStringToBytes(package.hash, &raw_hash)) {
+ LOG(ERROR) << "Failed to convert payload hash from hex string to bytes: "
+ << package.hash;
+ completer.set_code(ErrorCode::kOmahaResponseInvalid);
+ return;
+ }
+ install_plan_.payloads.push_back(
+ {.size = package.size,
+ .metadata_size = package.metadata_size,
+ .metadata_signature = package.metadata_signature,
+ .hash = raw_hash,
+ .type = package.is_delta ? InstallPayloadType::kDelta
+ : InstallPayloadType::kFull});
+ update_check_response_hash += package.hash + ":";
+ }
install_plan_.public_key_rsa = response.public_key_rsa;
install_plan_.hash_checks_mandatory = AreHashChecksMandatory(response);
- install_plan_.is_resume =
- DeltaPerformer::CanResumeUpdate(system_state_->prefs(), response.hash);
+ install_plan_.is_resume = DeltaPerformer::CanResumeUpdate(
+ system_state_->prefs(), update_check_response_hash);
if (install_plan_.is_resume) {
payload_state->UpdateResumed();
} else {
payload_state->UpdateRestarted();
- LOG_IF(WARNING, !DeltaPerformer::ResetUpdateProgress(
- system_state_->prefs(), false))
+ LOG_IF(WARNING,
+ !DeltaPerformer::ResetUpdateProgress(system_state_->prefs(), false))
<< "Unable to reset the update progress.";
- LOG_IF(WARNING, !system_state_->prefs()->SetString(
- kPrefsUpdateCheckResponseHash, response.hash))
+ LOG_IF(WARNING,
+ !system_state_->prefs()->SetString(kPrefsUpdateCheckResponseHash,
+ update_check_response_hash))
<< "Unable to save the update check response hash.";
}
- install_plan_.payload_type = response.is_delta_payload
- ? InstallPayloadType::kDelta
- : InstallPayloadType::kFull;
install_plan_.source_slot = system_state_->boot_control()->GetCurrentSlot();
install_plan_.target_slot = install_plan_.source_slot == 0 ? 1 : 0;
@@ -193,12 +206,14 @@
// mandatory because we could be downloading the payload from any URL later
// on. It's really hard to do book-keeping based on each byte being
// downloaded to see whether we only used HTTPS throughout.
- for (size_t i = 0; i < response.payload_urls.size(); i++) {
- if (!base::StartsWith(response.payload_urls[i], "https://",
- base::CompareCase::INSENSITIVE_ASCII)) {
- LOG(INFO) << "Mandating payload hash checks since Omaha response "
- << "contains non-HTTPS URL(s)";
- return true;
+ for (const auto& package : response.packages) {
+ for (const string& payload_url : package.payload_urls) {
+ if (!base::StartsWith(
+ payload_url, "https://", base::CompareCase::INSENSITIVE_ASCII)) {
+ LOG(INFO) << "Mandating payload hash checks since Omaha response "
+ << "contains non-HTTPS URL(s)";
+ return true;
+ }
}
}
diff --git a/omaha_response_handler_action_unittest.cc b/omaha_response_handler_action_unittest.cc
index 60b139b..75cd819 100644
--- a/omaha_response_handler_action_unittest.cc
+++ b/omaha_response_handler_action_unittest.cc
@@ -59,6 +59,8 @@
InstallPlan* out);
FakeSystemState fake_system_state_;
+ // "Hash+"
+ const brillo::Blob expected_hash_ = {0x48, 0x61, 0x73, 0x68, 0x2b};
};
class OmahaResponseHandlerActionProcessorDelegate
@@ -90,6 +92,7 @@
"very_long_name_and_no_slashes-very_long_name_and_no_slashes"
"-the_update_a.b.c.d_DELTA_.tgz";
const char* const kBadVersion = "don't update me";
+const char* const kPayloadHashHex = "486173682b";
} // namespace
bool OmahaResponseHandlerActionTest::DoTest(
@@ -103,8 +106,11 @@
ObjectFeederAction<OmahaResponse> feeder_action;
feeder_action.set_obj(in);
if (in.update_exists && in.version != kBadVersion) {
+ string expected_hash;
+ for (const auto& package : in.packages)
+ expected_hash += package.hash + ":";
EXPECT_CALL(*(fake_system_state_.mock_prefs()),
- SetString(kPrefsUpdateCheckResponseHash, in.hash))
+ SetString(kPrefsUpdateCheckResponseHash, expected_hash))
.WillOnce(Return(true));
int slot = 1 - fake_system_state_.fake_boot_control()->GetCurrentSlot();
@@ -113,7 +119,7 @@
.WillOnce(Return(true));
}
- string current_url = in.payload_urls.size() ? in.payload_urls[0] : "";
+ string current_url = in.packages.size() ? in.packages[0].payload_urls[0] : "";
EXPECT_CALL(*(fake_system_state_.mock_payload_state()), GetCurrentUrl())
.WillRepeatedly(Return(current_url));
@@ -146,16 +152,17 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("http://foo/the_update_a.b.c.d.tgz");
+ in.packages.push_back(
+ {.payload_urls = {"http://foo/the_update_a.b.c.d.tgz"},
+ .size = 12,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = "HASH+";
- in.size = 12;
in.prompt = false;
in.deadline = "20101020";
InstallPlan install_plan;
EXPECT_TRUE(DoTest(in, test_deadline_file, &install_plan));
- EXPECT_EQ(in.payload_urls[0], install_plan.download_url);
- EXPECT_EQ(in.hash, install_plan.payload_hash);
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_EQ(1U, install_plan.target_slot);
string deadline;
EXPECT_TRUE(utils::ReadFile(test_deadline_file, &deadline));
@@ -171,17 +178,18 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("http://foo/the_update_a.b.c.d.tgz");
+ in.packages.push_back(
+ {.payload_urls = {"http://foo/the_update_a.b.c.d.tgz"},
+ .size = 12,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = "HASHj+";
- in.size = 12;
in.prompt = true;
InstallPlan install_plan;
// Set the other slot as current.
fake_system_state_.fake_boot_control()->SetCurrentSlot(1);
EXPECT_TRUE(DoTest(in, test_deadline_file, &install_plan));
- EXPECT_EQ(in.payload_urls[0], install_plan.download_url);
- EXPECT_EQ(in.hash, install_plan.payload_hash);
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_EQ(0U, install_plan.target_slot);
string deadline;
EXPECT_TRUE(utils::ReadFile(test_deadline_file, &deadline) &&
@@ -192,17 +200,16 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back(kLongName);
+ in.packages.push_back(
+ {.payload_urls = {kLongName}, .size = 12, .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = "HASHj+";
- in.size = 12;
in.prompt = true;
in.deadline = "some-deadline";
InstallPlan install_plan;
fake_system_state_.fake_boot_control()->SetCurrentSlot(0);
EXPECT_TRUE(DoTest(in, test_deadline_file, &install_plan));
- EXPECT_EQ(in.payload_urls[0], install_plan.download_url);
- EXPECT_EQ(in.hash, install_plan.payload_hash);
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_EQ(1U, install_plan.target_slot);
string deadline;
EXPECT_TRUE(utils::ReadFile(test_deadline_file, &deadline));
@@ -219,22 +226,45 @@
EXPECT_TRUE(install_plan.partitions.empty());
}
+TEST_F(OmahaResponseHandlerActionTest, MultiPackageTest) {
+ OmahaResponse in;
+ in.update_exists = true;
+ in.version = "a.b.c.d";
+ in.packages.push_back({.payload_urls = {"http://package/1"},
+ .size = 1,
+ .hash = kPayloadHashHex});
+ in.packages.push_back({.payload_urls = {"http://package/2"},
+ .size = 2,
+ .hash = kPayloadHashHex});
+ in.more_info_url = "http://more/info";
+ InstallPlan install_plan;
+ EXPECT_TRUE(DoTest(in, "", &install_plan));
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(2u, install_plan.payloads.size());
+ EXPECT_EQ(in.packages[0].size, install_plan.payloads[0].size);
+ EXPECT_EQ(in.packages[1].size, install_plan.payloads[1].size);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[1].hash);
+ EXPECT_EQ(in.version, install_plan.version);
+}
+
TEST_F(OmahaResponseHandlerActionTest, HashChecksForHttpTest) {
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("http://test.should/need/hash.checks.signed");
+ in.packages.push_back(
+ {.payload_urls = {"http://test.should/need/hash.checks.signed"},
+ .size = 12,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = "HASHj+";
- in.size = 12;
// Hash checks are always skipped for non-official update URLs.
EXPECT_CALL(*(fake_system_state_.mock_request_params()),
IsUpdateUrlOfficial())
.WillRepeatedly(Return(true));
InstallPlan install_plan;
EXPECT_TRUE(DoTest(in, "", &install_plan));
- EXPECT_EQ(in.payload_urls[0], install_plan.download_url);
- EXPECT_EQ(in.hash, install_plan.payload_hash);
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_TRUE(install_plan.hash_checks_mandatory);
EXPECT_EQ(in.version, install_plan.version);
}
@@ -243,17 +273,18 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("http://url.normally/needs/hash.checks.signed");
+ in.packages.push_back(
+ {.payload_urls = {"http://url.normally/needs/hash.checks.signed"},
+ .size = 12,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = "HASHj+";
- in.size = 12;
EXPECT_CALL(*(fake_system_state_.mock_request_params()),
IsUpdateUrlOfficial())
.WillRepeatedly(Return(false));
InstallPlan install_plan;
EXPECT_TRUE(DoTest(in, "", &install_plan));
- EXPECT_EQ(in.payload_urls[0], install_plan.download_url);
- EXPECT_EQ(in.hash, install_plan.payload_hash);
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_FALSE(install_plan.hash_checks_mandatory);
EXPECT_EQ(in.version, install_plan.version);
}
@@ -264,18 +295,19 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("http://url.normally/needs/hash.checks.signed");
+ in.packages.push_back(
+ {.payload_urls = {"http://url.normally/needs/hash.checks.signed"},
+ .size = 12,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = "HASHj+";
- in.size = 12;
EXPECT_CALL(*(fake_system_state_.mock_request_params()),
IsUpdateUrlOfficial())
.WillRepeatedly(Return(true));
fake_system_state_.fake_hardware()->SetIsOfficialBuild(false);
InstallPlan install_plan;
EXPECT_TRUE(DoTest(in, "", &install_plan));
- EXPECT_EQ(in.payload_urls[0], install_plan.download_url);
- EXPECT_EQ(in.hash, install_plan.payload_hash);
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_FALSE(install_plan.hash_checks_mandatory);
EXPECT_EQ(in.version, install_plan.version);
}
@@ -284,17 +316,18 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("https://test.should.not/need/hash.checks.signed");
+ in.packages.push_back(
+ {.payload_urls = {"https://test.should/need/hash.checks.signed"},
+ .size = 12,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = "HASHj+";
- in.size = 12;
EXPECT_CALL(*(fake_system_state_.mock_request_params()),
IsUpdateUrlOfficial())
.WillRepeatedly(Return(true));
InstallPlan install_plan;
EXPECT_TRUE(DoTest(in, "", &install_plan));
- EXPECT_EQ(in.payload_urls[0], install_plan.download_url);
- EXPECT_EQ(in.hash, install_plan.payload_hash);
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_FALSE(install_plan.hash_checks_mandatory);
EXPECT_EQ(in.version, install_plan.version);
}
@@ -303,18 +336,19 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("http://test.should.still/need/hash.checks");
- in.payload_urls.push_back("https://test.should.still/need/hash.checks");
+ in.packages.push_back(
+ {.payload_urls = {"http://test.should.still/need/hash.checks",
+ "https://test.should.still/need/hash.checks"},
+ .size = 12,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = "HASHj+";
- in.size = 12;
EXPECT_CALL(*(fake_system_state_.mock_request_params()),
IsUpdateUrlOfficial())
.WillRepeatedly(Return(true));
InstallPlan install_plan;
EXPECT_TRUE(DoTest(in, "", &install_plan));
- EXPECT_EQ(in.payload_urls[0], install_plan.download_url);
- EXPECT_EQ(in.hash, install_plan.payload_hash);
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
EXPECT_TRUE(install_plan.hash_checks_mandatory);
EXPECT_EQ(in.version, install_plan.version);
}
@@ -323,10 +357,10 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("https://MoreStableChannelTest");
+ in.packages.push_back({.payload_urls = {"https://MoreStableChannelTest"},
+ .size = 1,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = "HASHjk";
- in.size = 15;
// Create a uniquely named test directory.
base::ScopedTempDir tempdir;
@@ -358,10 +392,10 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("https://LessStableChannelTest");
+ in.packages.push_back({.payload_urls = {"https://LessStableChannelTest"},
+ .size = 15,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = "HASHjk";
- in.size = 15;
// Create a uniquely named test directory.
base::ScopedTempDir tempdir;
@@ -393,10 +427,11 @@
OmahaResponse in;
in.update_exists = true;
in.version = "a.b.c.d";
- in.payload_urls.push_back("https://would.not/cause/hash/checks");
+ in.packages.push_back(
+ {.payload_urls = {"https://would.not/cause/hash/checks"},
+ .size = 12,
+ .hash = kPayloadHashHex});
in.more_info_url = "http://more/info";
- in.hash = "HASHj+";
- in.size = 12;
OmahaRequestParams params(&fake_system_state_);
// We're using a real OmahaRequestParams object here so we can't mock
@@ -416,8 +451,8 @@
InstallPlan install_plan;
EXPECT_TRUE(DoTest(in, "", &install_plan));
- EXPECT_EQ(in.hash, install_plan.payload_hash);
- EXPECT_EQ(install_plan.download_url, p2p_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+ EXPECT_EQ(p2p_url, install_plan.download_url);
EXPECT_TRUE(install_plan.hash_checks_mandatory);
}
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index c406493..e158b33 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -145,8 +145,8 @@
const char* name;
};
const vector<blkioctl_request> blkioctl_requests = {
- {BLKSECDISCARD, "BLKSECDISCARD"},
{BLKDISCARD, "BLKDISCARD"},
+ {BLKSECDISCARD, "BLKSECDISCARD"},
#ifdef BLKZEROOUT
{BLKZEROOUT, "BLKZEROOUT"},
#endif
@@ -187,7 +187,7 @@
}
// Format download total count and percentage.
- size_t payload_size = install_plan_->payload_size;
+ size_t payload_size = payload_->size;
string payload_size_str("?");
string downloaded_percentage_str("");
if (payload_size) {
@@ -222,7 +222,7 @@
// eliminated once we ensure that the payload_size in the install plan is
// always given and is non-zero. This currently isn't the case during unit
// tests (see chromium-os:37969).
- size_t payload_size = install_plan_->payload_size;
+ size_t payload_size = payload_->size;
unsigned actual_operations_weight = kProgressOperationsWeight;
if (payload_size)
new_overall_progress += min(
@@ -335,10 +335,14 @@
return false;
const PartitionUpdate& partition = partitions_[current_partition_];
+ size_t num_previous_partitions =
+ install_plan_->partitions.size() - partitions_.size();
+ const InstallPlan::Partition& install_part =
+ install_plan_->partitions[num_previous_partitions + current_partition_];
// Open source fds if we have a delta payload with minor version >= 2.
- if (install_plan_->payload_type == InstallPayloadType::kDelta &&
+ if (payload_->type == InstallPayloadType::kDelta &&
GetMinorVersion() != kInPlaceMinorPayloadVersion) {
- source_path_ = install_plan_->partitions[current_partition_].source_path;
+ source_path_ = install_part.source_path;
int err;
source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, &err);
if (!source_fd_) {
@@ -350,7 +354,7 @@
}
}
- target_path_ = install_plan_->partitions[current_partition_].target_path;
+ target_path_ = install_part.target_path;
int err;
target_fd_ = OpenFile(target_path_.c_str(), O_RDWR, &err);
if (!target_fd_) {
@@ -366,8 +370,7 @@
<< "\"";
// Discard the end of the partition, but ignore failures.
- DiscardPartitionTail(
- target_fd_, install_plan_->partitions[current_partition_].target_size);
+ DiscardPartitionTail(target_fd_, install_part.target_size);
return true;
}
@@ -428,7 +431,7 @@
if (manifest_.has_minor_version()) {
return manifest_.minor_version();
} else {
- return install_plan_->payload_type == InstallPayloadType::kDelta
+ return payload_->type == InstallPayloadType::kDelta
? kSupportedMinorPayloadVersion
: kFullPayloadMinorVersion;
}
@@ -518,9 +521,9 @@
// beyond the expected metadata size.
metadata_size_ = manifest_offset + manifest_size_;
if (install_plan_->hash_checks_mandatory) {
- if (install_plan_->metadata_size != metadata_size_) {
+ if (payload_->metadata_size != metadata_size_) {
LOG(ERROR) << "Mandatory metadata size in Omaha response ("
- << install_plan_->metadata_size
+ << payload_->metadata_size
<< ") is missing/incorrect, actual = " << metadata_size_;
*error = ErrorCode::kDownloadInvalidMetadataSize;
return kMetadataParseError;
@@ -537,13 +540,13 @@
// here. This is logged here (after we received the full metadata data) so
// that we just log once (instead of logging n times) if it takes n
// DeltaPerformer::Write calls to download the full manifest.
- if (install_plan_->metadata_size == metadata_size_) {
+ if (payload_->metadata_size == metadata_size_) {
LOG(INFO) << "Manifest size in payload matches expected value from Omaha";
} else {
// For mandatory-cases, we'd have already returned a kMetadataParseError
// above. We'll be here only for non-mandatory cases. Just send a UMA stat.
LOG(WARNING) << "Ignoring missing/incorrect metadata size ("
- << install_plan_->metadata_size
+ << payload_->metadata_size
<< ") in Omaha response as validation is not mandatory. "
<< "Trusting metadata size in payload = " << metadata_size_;
}
@@ -623,6 +626,12 @@
if (!ParseManifestPartitions(error))
return false;
+ // |install_plan.partitions| was filled in, nothing need to be done here if
+ // the payload was already applied, returns false to terminate http fetcher,
+ // but keep |error| as ErrorCode::kSuccess.
+ if (payload_->already_applied)
+ return false;
+
num_total_operations_ = 0;
for (const auto& partition : partitions_) {
num_total_operations_ += partition.operations_size();
@@ -687,7 +696,7 @@
// NOTE: If hash checks are mandatory and if metadata_signature is empty,
// we would have already failed in ParsePayloadMetadata method and thus not
// even be here. So no need to handle that case again here.
- if (!install_plan_->metadata_signature.empty()) {
+ if (!payload_->metadata_signature.empty()) {
// Note: Validate must be called only if CanPerformInstallOperation is
// called. Otherwise, we might be failing operations before even if there
// isn't sufficient data to compute the proper hash.
@@ -831,7 +840,6 @@
// Fill in the InstallPlan::partitions based on the partitions from the
// payload.
- install_plan_->partitions.clear();
for (const auto& partition : partitions_) {
InstallPlan::Partition install_part;
install_part.name = partition.partition_name();
@@ -1323,18 +1331,18 @@
return ErrorCode::kDownloadMetadataSignatureError;
brillo::Blob metadata_signature_blob, metadata_signature_protobuf_blob;
- if (!install_plan_->metadata_signature.empty()) {
+ if (!payload_->metadata_signature.empty()) {
// Convert base64-encoded signature to raw bytes.
- if (!brillo::data_encoding::Base64Decode(
- install_plan_->metadata_signature, &metadata_signature_blob)) {
+ if (!brillo::data_encoding::Base64Decode(payload_->metadata_signature,
+ &metadata_signature_blob)) {
LOG(ERROR) << "Unable to decode base64 metadata signature: "
- << install_plan_->metadata_signature;
+ << payload_->metadata_signature;
return ErrorCode::kDownloadMetadataSignatureError;
}
} else if (major_payload_version_ == kBrilloMajorPayloadVersion) {
- metadata_signature_protobuf_blob.assign(payload.begin() + metadata_size_,
- payload.begin() + metadata_size_ +
- metadata_signature_size_);
+ metadata_signature_protobuf_blob.assign(
+ payload.begin() + metadata_size_,
+ payload.begin() + metadata_size_ + metadata_signature_size_);
}
if (metadata_signature_blob.empty() &&
@@ -1361,14 +1369,13 @@
LOG(INFO) << "Verifying metadata hash signature using public key: "
<< path_to_public_key.value();
- HashCalculator metadata_hasher;
- metadata_hasher.Update(payload.data(), metadata_size_);
- if (!metadata_hasher.Finalize()) {
+ brillo::Blob calculated_metadata_hash;
+ if (!HashCalculator::RawHashOfBytes(
+ payload.data(), metadata_size_, &calculated_metadata_hash)) {
LOG(ERROR) << "Unable to compute actual hash of manifest";
return ErrorCode::kDownloadMetadataSignatureVerificationError;
}
- brillo::Blob calculated_metadata_hash = metadata_hasher.raw_hash();
PayloadVerifier::PadRSA2048SHA256Hash(&calculated_metadata_hash);
if (calculated_metadata_hash.empty()) {
LOG(ERROR) << "Computed actual hash of metadata is empty.";
@@ -1420,14 +1427,14 @@
InstallPayloadType actual_payload_type =
has_old_fields ? InstallPayloadType::kDelta : InstallPayloadType::kFull;
- if (install_plan_->payload_type == InstallPayloadType::kUnknown) {
+ if (payload_->type == InstallPayloadType::kUnknown) {
LOG(INFO) << "Detected a '"
<< InstallPayloadTypeToString(actual_payload_type)
<< "' payload.";
- install_plan_->payload_type = actual_payload_type;
- } else if (install_plan_->payload_type != actual_payload_type) {
+ payload_->type = actual_payload_type;
+ } else if (payload_->type != actual_payload_type) {
LOG(ERROR) << "InstallPlan expected a '"
- << InstallPayloadTypeToString(install_plan_->payload_type)
+ << InstallPayloadTypeToString(payload_->type)
<< "' payload but the downloaded manifest contains a '"
<< InstallPayloadTypeToString(actual_payload_type)
<< "' payload.";
@@ -1523,15 +1530,14 @@
(operation.data_sha256_hash().data() +
operation.data_sha256_hash().size()));
- HashCalculator operation_hasher;
- operation_hasher.Update(buffer_.data(), operation.data_length());
- if (!operation_hasher.Finalize()) {
+ brillo::Blob calculated_op_hash;
+ if (!HashCalculator::RawHashOfBytes(
+ buffer_.data(), operation.data_length(), &calculated_op_hash)) {
LOG(ERROR) << "Unable to compute actual hash of operation "
<< next_operation_num_;
return ErrorCode::kDownloadOperationHashVerificationError;
}
- brillo::Blob calculated_op_hash = operation_hasher.raw_hash();
if (calculated_op_hash != expected_op_hash) {
LOG(ERROR) << "Hash verification failed for operation "
<< next_operation_num_ << ". Expected hash = ";
@@ -1554,7 +1560,7 @@
} while (0);
ErrorCode DeltaPerformer::VerifyPayload(
- const string& update_check_response_hash,
+ const brillo::Blob& update_check_response_hash,
const uint64_t update_check_response_size) {
// See if we should use the public RSA key in the Omaha response.
@@ -1576,11 +1582,11 @@
buffer_offset_);
// Verifies the payload hash.
- const string& payload_hash_data = payload_hash_calculator_.hash();
TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError,
- !payload_hash_data.empty());
- TEST_AND_RETURN_VAL(ErrorCode::kPayloadHashMismatchError,
- payload_hash_data == update_check_response_hash);
+ !payload_hash_calculator_.raw_hash().empty());
+ TEST_AND_RETURN_VAL(
+ ErrorCode::kPayloadHashMismatchError,
+ payload_hash_calculator_.raw_hash() == update_check_response_hash);
// Verifies the signed payload hash.
if (!utils::FileExists(path_to_public_key.value().c_str())) {
@@ -1681,7 +1687,6 @@
TEST_AND_RETURN_FALSE(prefs->SetInt64(kPrefsUpdateStateNextOperation,
kUpdateStateOperationInvalid));
if (!quick) {
- prefs->SetString(kPrefsUpdateCheckResponseHash, "");
prefs->SetInt64(kPrefsUpdateStateNextDataOffset, -1);
prefs->SetInt64(kPrefsUpdateStateNextDataLength, 0);
prefs->SetString(kPrefsUpdateStateSHA256Context, "");
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index 71d7178..f363a4c 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -78,12 +78,14 @@
BootControlInterface* boot_control,
HardwareInterface* hardware,
DownloadActionDelegate* download_delegate,
- InstallPlan* install_plan)
+ InstallPlan* install_plan,
+ InstallPlan::Payload* payload)
: prefs_(prefs),
boot_control_(boot_control),
hardware_(hardware),
download_delegate_(download_delegate),
- install_plan_(install_plan) {}
+ install_plan_(install_plan),
+ payload_(payload) {}
// FileWriter's Write implementation where caller doesn't care about
// error codes.
@@ -113,13 +115,13 @@
bool IsManifestValid();
// Verifies the downloaded payload against the signed hash included in the
- // payload, against the update check hash (which is in base64 format) and
- // size using the public key and returns ErrorCode::kSuccess on success, an
- // error code on failure. This method should be called after closing the
- // stream. Note this method skips the signed hash check if the public key is
- // unavailable; it returns ErrorCode::kSignedDeltaPayloadExpectedError if the
- // public key is available but the delta payload doesn't include a signature.
- ErrorCode VerifyPayload(const std::string& update_check_response_hash,
+ // payload, against the update check hash and size using the public key and
+ // returns ErrorCode::kSuccess on success, an error code on failure.
+ // This method should be called after closing the stream. Note this method
+ // skips the signed hash check if the public key is unavailable; it returns
+ // ErrorCode::kSignedDeltaPayloadExpectedError if the public key is available
+ // but the delta payload doesn't include a signature.
+ ErrorCode VerifyPayload(const brillo::Blob& update_check_response_hash,
const uint64_t update_check_response_size);
// Converts an ordered collection of Extent objects which contain data of
@@ -303,6 +305,9 @@
// Install Plan based on Omaha Response.
InstallPlan* install_plan_;
+ // Pointer to the current payload in install_plan_.payloads.
+ InstallPlan::Payload* payload_{nullptr};
+
// File descriptor of the source partition. Only set while updating a
// partition when using a delta payload.
FileDescriptorPtr source_fd_{nullptr};
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index afbb8dc..bc67d93 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -720,10 +720,10 @@
// Update the A image in place.
InstallPlan* install_plan = &state->install_plan;
install_plan->hash_checks_mandatory = hash_checks_mandatory;
- install_plan->metadata_size = state->metadata_size;
- install_plan->payload_type = (full_kernel && full_rootfs)
- ? InstallPayloadType::kFull
- : InstallPayloadType::kDelta;
+ install_plan->payloads = {{.metadata_size = state->metadata_size,
+ .type = (full_kernel && full_rootfs)
+ ? InstallPayloadType::kFull
+ : InstallPayloadType::kDelta}};
install_plan->source_slot = 0;
install_plan->target_slot = 1;
@@ -739,14 +739,15 @@
state->delta.data(),
state->metadata_size,
GetBuildArtifactsPath(kUnittestPrivateKeyPath),
- &install_plan->metadata_signature));
- EXPECT_FALSE(install_plan->metadata_signature.empty());
+ &install_plan->payloads[0].metadata_signature));
+ EXPECT_FALSE(install_plan->payloads[0].metadata_signature.empty());
*performer = new DeltaPerformer(&prefs,
&state->fake_boot_control_,
&state->fake_hardware_,
&state->mock_delegate_,
- install_plan);
+ install_plan,
+ &install_plan->payloads[0]);
string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath);
EXPECT_TRUE(utils::FileExists(public_key_path.c_str()));
(*performer)->set_public_key_path(public_key_path);
@@ -761,9 +762,8 @@
state->old_kernel_data,
&kernel_part.source_hash));
- // This partitions are normally filed by the FilesystemVerifierAction with
- // the source hashes used for deltas.
- install_plan->partitions = {root_part, kernel_part};
+ // The partitions should be empty before DeltaPerformer.
+ install_plan->partitions.clear();
// With minor version 2, we want the target to be the new image, result_img,
// but with version 1, we want to update A in place.
@@ -854,11 +854,11 @@
int expected_times = (expected_result == ErrorCode::kSuccess) ? 1 : 0;
EXPECT_CALL(state->mock_delegate_, DownloadComplete()).Times(expected_times);
- LOG(INFO) << "Verifying payload for expected result "
- << expected_result;
- EXPECT_EQ(expected_result, performer->VerifyPayload(
- HashCalculator::HashOfData(state->delta),
- state->delta.size()));
+ LOG(INFO) << "Verifying payload for expected result " << expected_result;
+ brillo::Blob expected_hash;
+ HashCalculator::RawHashOfData(state->delta, &expected_hash);
+ EXPECT_EQ(expected_result,
+ performer->VerifyPayload(expected_hash, state->delta.size()));
LOG(INFO) << "Verified payload.";
if (expected_result != ErrorCode::kSuccess) {
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index ad0c301..fbdf1ab 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -103,7 +103,7 @@
uint64_t major_version,
InstallPayloadType payload_type,
ErrorCode expected) {
- install_plan_.payload_type = payload_type;
+ payload_.type = payload_type;
// The Manifest we are validating.
performer_.manifest_.CopyFrom(manifest);
@@ -164,7 +164,7 @@
string private_key =
sign_payload ? GetBuildArtifactsPath(kUnittestPrivateKeyPath) : "";
EXPECT_TRUE(payload.WritePayload(
- payload_path, blob_path, private_key, &install_plan_.metadata_size));
+ payload_path, blob_path, private_key, &payload_.metadata_size));
brillo::Blob payload_data;
EXPECT_TRUE(utils::ReadFile(payload_path, &payload_data));
@@ -231,7 +231,7 @@
uint64_t version = htobe64(kChromeOSMajorPayloadVersion);
EXPECT_TRUE(performer_.Write(&version, 8));
- install_plan_.metadata_size = expected_metadata_size;
+ payload_.metadata_size = expected_metadata_size;
ErrorCode error_code;
// When filling in size in manifest, exclude the size of the 20-byte header.
uint64_t size_in_manifest = htobe64(actual_metadata_size - 20);
@@ -268,13 +268,13 @@
// Fill up the metadata signature in install plan according to the test.
switch (metadata_signature_test) {
case kEmptyMetadataSignature:
- install_plan_.metadata_signature.clear();
+ payload_.metadata_signature.clear();
expected_result = DeltaPerformer::kMetadataParseError;
expected_error = ErrorCode::kDownloadMetadataSignatureMissingError;
break;
case kInvalidMetadataSignature:
- install_plan_.metadata_signature = kBogusMetadataSignature1;
+ payload_.metadata_signature = kBogusMetadataSignature1;
expected_result = DeltaPerformer::kMetadataParseError;
expected_error = ErrorCode::kDownloadMetadataSignatureMismatch;
break;
@@ -286,10 +286,10 @@
// then we can get to manifest signature checks.
ASSERT_TRUE(PayloadSigner::GetMetadataSignature(
payload.data(),
- install_plan_.metadata_size,
+ payload_.metadata_size,
GetBuildArtifactsPath(kUnittestPrivateKeyPath),
- &install_plan_.metadata_signature));
- EXPECT_FALSE(install_plan_.metadata_signature.empty());
+ &payload_.metadata_signature));
+ EXPECT_FALSE(payload_.metadata_signature.empty());
expected_result = DeltaPerformer::kMetadataParseSuccess;
expected_error = ErrorCode::kSuccess;
break;
@@ -317,7 +317,7 @@
// Check that the parsed metadata size is what's expected. This test
// implicitly confirms that the metadata signature is valid, if required.
- EXPECT_EQ(install_plan_.metadata_size, performer_.GetMetadataSize());
+ EXPECT_EQ(payload_.metadata_size, performer_.GetMetadataSize());
}
void SetSupportedMajorVersion(uint64_t major_version) {
@@ -325,15 +325,20 @@
}
FakePrefs prefs_;
InstallPlan install_plan_;
+ InstallPlan::Payload payload_;
FakeBootControl fake_boot_control_;
FakeHardware fake_hardware_;
MockDownloadActionDelegate mock_delegate_;
- DeltaPerformer performer_{
- &prefs_, &fake_boot_control_, &fake_hardware_, &mock_delegate_, &install_plan_};
+ DeltaPerformer performer_{&prefs_,
+ &fake_boot_control_,
+ &fake_hardware_,
+ &mock_delegate_,
+ &install_plan_,
+ &payload_};
};
TEST_F(DeltaPerformerTest, FullPayloadWriteTest) {
- install_plan_.payload_type = InstallPayloadType::kFull;
+ payload_.type = InstallPayloadType::kFull;
brillo::Blob expected_data = brillo::Blob(std::begin(kRandomString),
std::end(kRandomString));
expected_data.resize(4096); // block size
@@ -352,7 +357,7 @@
}
TEST_F(DeltaPerformerTest, ShouldCancelTest) {
- install_plan_.payload_type = InstallPayloadType::kFull;
+ payload_.type = InstallPayloadType::kFull;
brillo::Blob expected_data = brillo::Blob(std::begin(kRandomString),
std::end(kRandomString));
expected_data.resize(4096); // block size
@@ -680,7 +685,7 @@
install_plan_.hash_checks_mandatory = true;
// Just set these value so that we can use ValidateMetadataSignature directly.
performer_.major_payload_version_ = kBrilloMajorPayloadVersion;
- performer_.metadata_size_ = install_plan_.metadata_size;
+ performer_.metadata_size_ = payload_.metadata_size;
uint64_t signature_length;
EXPECT_TRUE(PayloadSigner::SignatureBlobLength(
{GetBuildArtifactsPath(kUnittestPrivateKeyPath)}, &signature_length));
diff --git a/payload_consumer/download_action.cc b/payload_consumer/download_action.cc
index 084848e..c3a5016 100644
--- a/payload_consumer/download_action.cc
+++ b/payload_consumer/download_action.cc
@@ -23,11 +23,11 @@
#include <vector>
#include <base/files/file_path.h>
-#include <base/strings/stringprintf.h>
#include "update_engine/common/action_pipe.h"
#include "update_engine/common/boot_control_interface.h"
#include "update_engine/common/error_code_utils.h"
+#include "update_engine/common/multi_range_http_fetcher.h"
#include "update_engine/common/utils.h"
#include "update_engine/omaha_request_params.h"
#include "update_engine/p2p_manager.h"
@@ -48,14 +48,12 @@
boot_control_(boot_control),
hardware_(hardware),
system_state_(system_state),
- http_fetcher_(http_fetcher),
+ http_fetcher_(new MultiRangeHttpFetcher(http_fetcher)),
writer_(nullptr),
code_(ErrorCode::kSuccess),
delegate_(nullptr),
- bytes_received_(0),
p2p_sharing_fd_(-1),
- p2p_visible_(true) {
-}
+ p2p_visible_(true) {}
DownloadAction::~DownloadAction() {}
@@ -84,7 +82,7 @@
bool DownloadAction::SetupP2PSharingFd() {
P2PManager *p2p_manager = system_state_->p2p_manager();
- if (!p2p_manager->FileShare(p2p_file_id_, install_plan_.payload_size)) {
+ if (!p2p_manager->FileShare(p2p_file_id_, payload_->size)) {
LOG(ERROR) << "Unable to share file via p2p";
CloseP2PSharingFd(true); // delete p2p file
return false;
@@ -172,10 +170,28 @@
// Get the InstallPlan and read it
CHECK(HasInputObject());
install_plan_ = GetInputObject();
- bytes_received_ = 0;
-
install_plan_.Dump();
+ bytes_received_ = 0;
+ bytes_total_ = 0;
+ for (const auto& payload : install_plan_.payloads)
+ bytes_total_ += payload.size;
+
+ if (install_plan_.is_resume) {
+ int64_t payload_index = 0;
+ if (prefs_->GetInt64(kPrefsUpdateStatePayloadIndex, &payload_index) &&
+ static_cast<size_t>(payload_index) < install_plan_.payloads.size()) {
+ // Save the index for the resume payload before downloading any previous
+ // payload, otherwise it will be overwritten.
+ resume_payload_index_ = payload_index;
+ for (int i = 0; i < payload_index; i++)
+ install_plan_.payloads[i].already_applied = true;
+ }
+ }
+ // TODO(senj): check that install plan has at least one payload.
+ if (!payload_)
+ payload_ = &install_plan_.payloads[0];
+
LOG(INFO) << "Marking new slot as unbootable";
if (!boot_control_->MarkSlotUnbootable(install_plan_.target_slot)) {
LOG(WARNING) << "Unable to mark new slot "
@@ -183,19 +199,54 @@
<< ". Proceeding with the update anyway.";
}
- if (writer_) {
+ StartDownloading();
+}
+
+void DownloadAction::StartDownloading() {
+ download_active_ = true;
+ http_fetcher_->ClearRanges();
+ if (install_plan_.is_resume &&
+ payload_ == &install_plan_.payloads[resume_payload_index_]) {
+ // Resuming an update so fetch the update manifest metadata first.
+ int64_t manifest_metadata_size = 0;
+ int64_t manifest_signature_size = 0;
+ prefs_->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size);
+ prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size);
+ http_fetcher_->AddRange(base_offset_,
+ manifest_metadata_size + manifest_signature_size);
+ // If there're remaining unprocessed data blobs, fetch them. Be careful not
+ // to request data beyond the end of the payload to avoid 416 HTTP response
+ // error codes.
+ int64_t next_data_offset = 0;
+ prefs_->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset);
+ uint64_t resume_offset =
+ manifest_metadata_size + manifest_signature_size + next_data_offset;
+ if (!payload_->size) {
+ http_fetcher_->AddRange(base_offset_ + resume_offset);
+ } else if (resume_offset < payload_->size) {
+ http_fetcher_->AddRange(base_offset_ + resume_offset,
+ payload_->size - resume_offset);
+ }
+ } else {
+ if (payload_->size) {
+ http_fetcher_->AddRange(base_offset_, payload_->size);
+ } else {
+ // If no payload size is passed we assume we read until the end of the
+ // stream.
+ http_fetcher_->AddRange(base_offset_);
+ }
+ }
+
+ if (writer_ && writer_ != delta_performer_.get()) {
LOG(INFO) << "Using writer for test.";
} else {
delta_performer_.reset(new DeltaPerformer(
- prefs_, boot_control_, hardware_, delegate_, &install_plan_));
+ prefs_, boot_control_, hardware_, delegate_, &install_plan_, payload_));
writer_ = delta_performer_.get();
}
- download_active_ = true;
-
if (system_state_ != nullptr) {
const PayloadStateInterface* payload_state = system_state_->payload_state();
- string file_id = utils::CalculateP2PFileId(install_plan_.payload_hash,
- install_plan_.payload_size);
+ string file_id = utils::CalculateP2PFileId(payload_->hash, payload_->size);
if (payload_state->GetUsingP2PForSharing()) {
// If we're sharing the update, store the file_id to convey
// that we should write to the file.
@@ -267,13 +318,14 @@
bytes_received_ += length;
if (delegate_ && download_active_) {
- delegate_->BytesReceived(
- length, bytes_received_, install_plan_.payload_size);
+ delegate_->BytesReceived(length, bytes_received_, bytes_total_);
}
if (writer_ && !writer_->Write(bytes, length, &code_)) {
- LOG(ERROR) << "Error " << utils::ErrorCodeToString(code_) << " (" << code_
- << ") in DeltaPerformer's Write method when "
- << "processing the received payload -- Terminating processing";
+ if (code_ != ErrorCode::kSuccess) {
+ LOG(ERROR) << "Error " << utils::ErrorCodeToString(code_) << " (" << code_
+ << ") in DeltaPerformer's Write method when "
+ << "processing the received payload -- Terminating processing";
+ }
// Delete p2p file, if applicable.
if (!p2p_file_id_.empty())
CloseP2PSharingFd(true);
@@ -303,14 +355,25 @@
ErrorCode code =
successful ? ErrorCode::kSuccess : ErrorCode::kDownloadTransferError;
if (code == ErrorCode::kSuccess && delta_performer_.get()) {
- code = delta_performer_->VerifyPayload(install_plan_.payload_hash,
- install_plan_.payload_size);
+ if (!payload_->already_applied)
+ code = delta_performer_->VerifyPayload(payload_->hash, payload_->size);
if (code != ErrorCode::kSuccess) {
LOG(ERROR) << "Download of " << install_plan_.download_url
<< " failed due to payload verification error.";
// Delete p2p file, if applicable.
if (!p2p_file_id_.empty())
CloseP2PSharingFd(true);
+ } else if (payload_ < &install_plan_.payloads.back() &&
+ system_state_->payload_state()->NextPayload()) {
+ // No need to reset if this payload was already applied.
+ if (!payload_->already_applied)
+ DeltaPerformer::ResetUpdateProgress(prefs_, false);
+ // Start downloading next payload.
+ payload_++;
+ install_plan_.download_url =
+ system_state_->payload_state()->GetCurrentUrl();
+ StartDownloading();
+ return;
}
}
@@ -320,9 +383,13 @@
processor_->ActionComplete(this, code);
}
-void DownloadAction::TransferTerminated(HttpFetcher *fetcher) {
+void DownloadAction::TransferTerminated(HttpFetcher* fetcher) {
if (code_ != ErrorCode::kSuccess) {
processor_->ActionComplete(this, code_);
+ } else if (payload_->already_applied) {
+ LOG(INFO) << "TransferTerminated with ErrorCode::kSuccess when the current "
+ "payload has already applied, treating as TransferComplete.";
+ TransferComplete(fetcher, true);
}
}
diff --git a/payload_consumer/download_action.h b/payload_consumer/download_action.h
index 285930a..d0e6000 100644
--- a/payload_consumer/download_action.h
+++ b/payload_consumer/download_action.h
@@ -27,6 +27,7 @@
#include "update_engine/common/action.h"
#include "update_engine/common/boot_control_interface.h"
#include "update_engine/common/http_fetcher.h"
+#include "update_engine/common/multi_range_http_fetcher.h"
#include "update_engine/payload_consumer/delta_performer.h"
#include "update_engine/payload_consumer/install_plan.h"
#include "update_engine/system_state.h"
@@ -106,6 +107,8 @@
delegate_ = delegate;
}
+ void set_base_offset(int64_t base_offset) { base_offset_ = base_offset; }
+
HttpFetcher* http_fetcher() { return http_fetcher_.get(); }
// Returns the p2p file id for the file being written or the empty
@@ -131,9 +134,15 @@
// called or if CloseP2PSharingFd() has been called.
void WriteToP2PFile(const void* data, size_t length, off_t file_offset);
+ // Start downloading the current payload using delta_performer.
+ void StartDownloading();
+
// The InstallPlan passed in
InstallPlan install_plan_;
+ // Pointer to the current payload in install_plan_.payloads.
+ InstallPlan::Payload* payload_{nullptr};
+
// SystemState required pointers.
PrefsInterface* prefs_;
BootControlInterface* boot_control_;
@@ -142,8 +151,8 @@
// Global context for the system.
SystemState* system_state_;
- // Pointer to the HttpFetcher that does the http work.
- std::unique_ptr<HttpFetcher> http_fetcher_;
+ // Pointer to the MultiRangeHttpFetcher that does the http work.
+ std::unique_ptr<MultiRangeHttpFetcher> http_fetcher_;
// The FileWriter that downloaded data should be written to. It will
// either point to *decompressing_file_writer_ or *delta_performer_.
@@ -157,7 +166,8 @@
// For reporting status to outsiders
DownloadActionDelegate* delegate_;
- uint64_t bytes_received_;
+ uint64_t bytes_received_{0};
+ uint64_t bytes_total_{0};
bool download_active_{false};
// The file-id for the file we're sharing or the empty string
@@ -171,6 +181,12 @@
// Set to |false| if p2p file is not visible.
bool p2p_visible_;
+ // Loaded from prefs before downloading any payload.
+ size_t resume_payload_index_{0};
+
+ // Offset of the payload in the download URL, used by UpdateAttempterAndroid.
+ int64_t base_offset_{0};
+
DISALLOW_COPY_AND_ASSIGN(DownloadAction);
};
diff --git a/payload_consumer/download_action_unittest.cc b/payload_consumer/download_action_unittest.cc
index 5e9ef5c..7d3ac6c 100644
--- a/payload_consumer/download_action_unittest.cc
+++ b/payload_consumer/download_action_unittest.cc
@@ -139,13 +139,13 @@
0, writer.Open(output_temp_file.path().c_str(), O_WRONLY | O_CREAT, 0));
writer.set_fail_write(fail_write);
- // We pull off the first byte from data and seek past it.
- string hash = HashCalculator::HashOfBytes(&data[1], data.size() - 1);
- uint64_t size = data.size();
+ uint64_t size = data.size() - 1;
InstallPlan install_plan;
- install_plan.payload_type = InstallPayloadType::kDelta;
- install_plan.payload_size = size;
- install_plan.payload_hash = hash;
+ install_plan.payloads.push_back(
+ {.size = size, .type = InstallPayloadType::kDelta});
+ // We pull off the first byte from data and seek past it.
+ EXPECT_TRUE(HashCalculator::RawHashOfBytes(
+ &data[1], data.size() - 1, &install_plan.payloads[0].hash));
install_plan.source_slot = 0;
install_plan.target_slot = 1;
// We mark both slots as bootable. Only the target slot should be unbootable
@@ -174,7 +174,7 @@
download_action.set_delegate(&download_delegate);
if (data.size() > kMockHttpFetcherChunkSize)
EXPECT_CALL(download_delegate,
- BytesReceived(_, 1 + kMockHttpFetcherChunkSize, _));
+ BytesReceived(_, kMockHttpFetcherChunkSize, _));
EXPECT_CALL(download_delegate, BytesReceived(_, _, _)).Times(AtLeast(1));
}
ErrorCode expected_code = ErrorCode::kSuccess;
@@ -272,6 +272,7 @@
// takes ownership of passed in HttpFetcher
ObjectFeederAction<InstallPlan> feeder_action;
InstallPlan install_plan;
+ install_plan.payloads.resize(1);
feeder_action.set_obj(install_plan);
FakeSystemState fake_system_state_;
MockPrefs prefs;
@@ -370,8 +371,9 @@
// takes ownership of passed in HttpFetcher
InstallPlan install_plan;
- install_plan.payload_size = 1;
- install_plan.payload_hash = HashCalculator::HashOfString("x");
+ install_plan.payloads.push_back({.size = 1});
+ EXPECT_TRUE(
+ HashCalculator::RawHashOfData({'x'}, &install_plan.payloads[0].hash));
ObjectFeederAction<InstallPlan> feeder_action;
feeder_action.set_obj(install_plan);
MockPrefs prefs;
@@ -455,8 +457,9 @@
EXPECT_EQ(
0, writer.Open(output_temp_file.path().c_str(), O_WRONLY | O_CREAT, 0));
InstallPlan install_plan;
- install_plan.payload_size = data_.length();
- install_plan.payload_hash = "1234hash";
+ install_plan.payloads.push_back(
+ {.size = data_.length(),
+ .hash = {'1', '2', '3', '4', 'h', 'a', 's', 'h'}});
ObjectFeederAction<InstallPlan> feeder_action;
feeder_action.set_obj(install_plan);
MockPrefs prefs;
@@ -569,7 +572,8 @@
// Prepare the file with existing data before starting to write to
// it via DownloadAction.
- string file_id = utils::CalculateP2PFileId("1234hash", data_.length());
+ string file_id = utils::CalculateP2PFileId(
+ {'1', '2', '3', '4', 'h', 'a', 's', 'h'}, data_.length());
ASSERT_TRUE(p2p_manager_->FileShare(file_id, data_.length()));
string existing_data;
for (unsigned int i = 0; i < 1000; i++)
@@ -606,7 +610,8 @@
// Prepare the file with all existing data before starting to write
// to it via DownloadAction.
- string file_id = utils::CalculateP2PFileId("1234hash", data_.length());
+ string file_id = utils::CalculateP2PFileId(
+ {'1', '2', '3', '4', 'h', 'a', 's', 'h'}, data_.length());
ASSERT_TRUE(p2p_manager_->FileShare(file_id, data_.length()));
string existing_data;
for (unsigned int i = 0; i < 1000; i++)
diff --git a/payload_consumer/filesystem_verifier_action.cc b/payload_consumer/filesystem_verifier_action.cc
index 5156f96..5edde9e 100644
--- a/payload_consumer/filesystem_verifier_action.cc
+++ b/payload_consumer/filesystem_verifier_action.cc
@@ -34,16 +34,13 @@
#include "update_engine/payload_consumer/delta_performer.h"
#include "update_engine/payload_consumer/payload_constants.h"
+using brillo::data_encoding::Base64Encode;
using std::string;
namespace chromeos_update_engine {
namespace {
const off_t kReadFileBufferSize = 128 * 1024;
-
-string StringForHashBytes(const brillo::Blob& hash) {
- return brillo::data_encoding::Base64Encode(hash.data(), hash.size());
-}
} // namespace
void FilesystemVerifierAction::PerformAction() {
@@ -199,15 +196,18 @@
}
InstallPlan::Partition& partition =
install_plan_.partitions[partition_index_];
- LOG(INFO) << "Hash of " << partition.name << ": " << hasher_->hash();
+ LOG(INFO) << "Hash of " << partition.name << ": "
+ << Base64Encode(hasher_->raw_hash());
switch (verifier_step_) {
case VerifierStep::kVerifyTargetHash:
if (partition.target_hash != hasher_->raw_hash()) {
LOG(ERROR) << "New '" << partition.name
<< "' partition verification failed.";
- if (install_plan_.payload_type == InstallPayloadType::kFull)
+ if (partition.source_hash.empty()) {
+ // No need to verify source if it is a full payload.
return Cleanup(ErrorCode::kNewRootfsVerificationError);
+ }
// If we have not verified source partition yet, now that the target
// partition does not match, and it's not a full payload, we need to
// switch to kVerifySourceHash step to check if it's because the source
@@ -231,9 +231,9 @@
" means that the delta I've been given doesn't match my"
" existing system. The "
<< partition.name << " partition I have has hash: "
- << StringForHashBytes(hasher_->raw_hash())
+ << Base64Encode(hasher_->raw_hash())
<< " but the update expected me to have "
- << StringForHashBytes(partition.source_hash) << " .";
+ << Base64Encode(partition.source_hash) << " .";
LOG(INFO) << "To get the checksum of the " << partition.name
<< " partition run this command: dd if="
<< partition.source_path
diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc
index b04da74..d5d745b 100644
--- a/payload_consumer/install_plan.cc
+++ b/payload_consumer/install_plan.cc
@@ -18,6 +18,7 @@
#include <base/format_macros.h>
#include <base/logging.h>
+#include <base/strings/string_number_conversions.h>
#include <base/strings/stringprintf.h>
#include "update_engine/common/utils.h"
@@ -41,15 +42,9 @@
bool InstallPlan::operator==(const InstallPlan& that) const {
return ((is_resume == that.is_resume) &&
- (payload_type == that.payload_type) &&
- (download_url == that.download_url) &&
- (payload_size == that.payload_size) &&
- (payload_hash == that.payload_hash) &&
- (metadata_size == that.metadata_size) &&
- (metadata_signature == that.metadata_signature) &&
+ (download_url == that.download_url) && (payloads == that.payloads) &&
(source_slot == that.source_slot) &&
- (target_slot == that.target_slot) &&
- (partitions == that.partitions));
+ (target_slot == that.target_slot) && (partitions == that.partitions));
}
bool InstallPlan::operator!=(const InstallPlan& that) const {
@@ -67,20 +62,24 @@
partition.target_size,
utils::ToString(partition.run_postinstall).c_str());
}
+ string payloads_str;
+ for (const auto& payload : payloads) {
+ payloads_str += base::StringPrintf(
+ ", payload: (size: %" PRIu64 ", metadata_size: %" PRIu64
+ ", metadata signature: %s, hash: %s, payload type: %s)",
+ payload.size,
+ payload.metadata_size,
+ payload.metadata_signature.c_str(),
+ base::HexEncode(payload.hash.data(), payload.hash.size()).c_str(),
+ InstallPayloadTypeToString(payload.type).c_str());
+ }
- LOG(INFO) << "InstallPlan: "
- << (is_resume ? "resume" : "new_update")
- << ", payload type: " << InstallPayloadTypeToString(payload_type)
+ LOG(INFO) << "InstallPlan: " << (is_resume ? "resume" : "new_update")
<< ", source_slot: " << BootControlInterface::SlotName(source_slot)
<< ", target_slot: " << BootControlInterface::SlotName(target_slot)
- << ", url: " << download_url
- << ", payload size: " << payload_size
- << ", payload hash: " << payload_hash
- << ", metadata size: " << metadata_size
- << ", metadata signature: " << metadata_signature
- << partitions_str
- << ", hash_checks_mandatory: " << utils::ToString(
- hash_checks_mandatory)
+ << ", url: " << download_url << payloads_str << partitions_str
+ << ", hash_checks_mandatory: "
+ << utils::ToString(hash_checks_mandatory)
<< ", powerwash_required: " << utils::ToString(powerwash_required);
}
diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h
index 3f0005c..6dd5a73 100644
--- a/payload_consumer/install_plan.h
+++ b/payload_consumer/install_plan.h
@@ -52,14 +52,28 @@
bool LoadPartitionsFromSlots(BootControlInterface* boot_control);
bool is_resume{false};
- InstallPayloadType payload_type{InstallPayloadType::kUnknown};
std::string download_url; // url to download from
std::string version; // version we are installing.
- uint64_t payload_size{0}; // size of the payload
- std::string payload_hash; // SHA256 hash of the payload
- uint64_t metadata_size{0}; // size of the metadata
- std::string metadata_signature; // signature of the metadata
+ struct Payload {
+ uint64_t size = 0; // size of the payload
+ uint64_t metadata_size = 0; // size of the metadata
+ std::string metadata_signature; // signature of the metadata in base64
+ brillo::Blob hash; // SHA256 hash of the payload
+ InstallPayloadType type{InstallPayloadType::kUnknown};
+ // Only download manifest and fill in partitions in install plan without
+ // apply the payload if true. Will be set by DownloadAction when resuming
+ // multi-payload.
+ bool already_applied = false;
+
+ bool operator==(const Payload& that) const {
+ return size == that.size && metadata_size == that.metadata_size &&
+ metadata_signature == that.metadata_signature &&
+ hash == that.hash && type == that.type &&
+ already_applied == that.already_applied;
+ }
+ };
+ std::vector<Payload> payloads;
// The partition slots used for the update.
BootControlInterface::Slot source_slot{BootControlInterface::kInvalidSlot};
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index 045d52f..e928912 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -31,6 +31,7 @@
#include <base/format_macros.h>
#include <base/strings/stringprintf.h>
#include <base/threading/simple_thread.h>
+#include <brillo/data_encoding.h>
#include "update_engine/common/hash_calculator.h"
#include "update_engine/common/subprocess.h"
@@ -845,7 +846,8 @@
TEST_AND_RETURN_FALSE(hasher.Finalize());
const brillo::Blob& hash = hasher.raw_hash();
info->set_hash(hash.data(), hash.size());
- LOG(INFO) << part.path << ": size=" << part.size << " hash=" << hasher.hash();
+ LOG(INFO) << part.path << ": size=" << part.size
+ << " hash=" << brillo::data_encoding::Base64Encode(hash);
return true;
}
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index 85785c5..cd99a51 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -211,8 +211,13 @@
part.target_path = old_part.path;
install_plan.partitions.push_back(part);
}
-
- DeltaPerformer performer(&prefs, nullptr, nullptr, nullptr, &install_plan);
+ install_plan.payloads.resize(1);
+ DeltaPerformer performer(&prefs,
+ nullptr,
+ nullptr,
+ nullptr,
+ &install_plan,
+ &install_plan.payloads[0]);
brillo::Blob buf(1024 * 1024);
int fd = open(in_file.c_str(), O_RDONLY, 0);
CHECK_GE(fd, 0);
diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc
index c650fc4..4cb117d 100644
--- a/payload_generator/payload_file.cc
+++ b/payload_generator/payload_file.cc
@@ -313,10 +313,8 @@
bool PayloadFile::AddOperationHash(InstallOperation* op,
const brillo::Blob& buf) {
- HashCalculator hasher;
- TEST_AND_RETURN_FALSE(hasher.Update(buf.data(), buf.size()));
- TEST_AND_RETURN_FALSE(hasher.Finalize());
- const brillo::Blob& hash = hasher.raw_hash();
+ brillo::Blob hash;
+ TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfData(buf, &hash));
op->set_data_sha256_hash(hash.data(), hash.size());
return true;
}
diff --git a/payload_state.cc b/payload_state.cc
index 5b86ec2..96181ea 100644
--- a/payload_state.cc
+++ b/payload_state.cc
@@ -122,11 +122,16 @@
return;
}
+ // Always start from payload index 0, even for resume, to download partition
+ // info from previous payloads.
+ payload_index_ = 0;
+
// This is the earliest point at which we can validate whether the URL index
// we loaded from the persisted state is a valid value. If the response
// hasn't changed but the URL index is invalid, it's indicative of some
// tampering of the persisted state.
- if (static_cast<uint32_t>(url_index_) >= candidate_urls_.size()) {
+ if (payload_index_ >= candidate_urls_.size() ||
+ url_index_ >= candidate_urls_[payload_index_].size()) {
LOG(INFO) << "Resetting all payload state as the url index seems to have "
"been tampered with";
ResetPersistedState();
@@ -239,6 +244,7 @@
// Reset the number of responses seen since it counts from the last
// successful update, e.g. now.
SetNumResponsesSeen(0);
+ SetPayloadIndex(0);
CreateSystemUpdatedMarkerFile();
}
@@ -386,14 +392,16 @@
LOG(INFO) << "Payload backoff disabled for interactive update checks.";
return false;
}
- if (response_.is_delta_payload) {
- // If delta payloads fail, we want to fallback quickly to full payloads as
- // they are more likely to succeed. Exponential backoffs would greatly
- // slow down the fallback to full payloads. So we don't backoff for delta
- // payloads.
- LOG(INFO) << "No backoffs for delta payloads. "
- << "Can proceed with the download";
- return false;
+ for (const auto& package : response_.packages) {
+ if (package.is_delta) {
+ // If delta payloads fail, we want to fallback quickly to full payloads as
+ // they are more likely to succeed. Exponential backoffs would greatly
+ // slow down the fallback to full payloads. So we don't backoff for delta
+ // payloads.
+ LOG(INFO) << "No backoffs for delta payloads. "
+ << "Can proceed with the download";
+ return false;
+ }
}
if (!system_state_->hardware()->IsOfficialBuild()) {
@@ -434,7 +442,7 @@
void PayloadState::IncrementFullPayloadAttemptNumber() {
// Update the payload attempt number for full payloads and the backoff time.
- if (response_.is_delta_payload) {
+ if (response_.packages[payload_index_].is_delta) {
LOG(INFO) << "Not incrementing payload attempt number for delta payloads";
return;
}
@@ -445,21 +453,23 @@
}
void PayloadState::IncrementUrlIndex() {
- uint32_t next_url_index = GetUrlIndex() + 1;
- if (next_url_index < candidate_urls_.size()) {
+ size_t next_url_index = url_index_ + 1;
+ size_t max_url_size = 0;
+ for (const auto& urls : candidate_urls_)
+ max_url_size = std::max(max_url_size, urls.size());
+ if (next_url_index < max_url_size) {
LOG(INFO) << "Incrementing the URL index for next attempt";
SetUrlIndex(next_url_index);
} else {
- LOG(INFO) << "Resetting the current URL index (" << GetUrlIndex() << ") to "
- << "0 as we only have " << candidate_urls_.size()
- << " candidate URL(s)";
+ LOG(INFO) << "Resetting the current URL index (" << url_index_ << ") to "
+ << "0 as we only have " << max_url_size << " candidate URL(s)";
SetUrlIndex(0);
IncrementPayloadAttemptNumber();
IncrementFullPayloadAttemptNumber();
}
// If we have multiple URLs, record that we just switched to another one
- if (candidate_urls_.size() > 1)
+ if (max_url_size > 1)
SetUrlSwitchCount(url_switch_count_ + 1);
// Whenever we update the URL index, we should also clear the URL failure
@@ -520,12 +530,14 @@
if (using_p2p_for_downloading_) {
current_download_source_ = kDownloadSourceHttpPeer;
- } else if (GetUrlIndex() < candidate_urls_.size()) {
- string current_url = candidate_urls_[GetUrlIndex()];
- if (base::StartsWith(current_url, "https://",
- base::CompareCase::INSENSITIVE_ASCII)) {
+ } else if (payload_index_ < candidate_urls_.size() &&
+ candidate_urls_[payload_index_].size() != 0) {
+ const string& current_url = candidate_urls_[payload_index_][GetUrlIndex()];
+ if (base::StartsWith(
+ current_url, "https://", base::CompareCase::INSENSITIVE_ASCII)) {
current_download_source_ = kDownloadSourceHttpsServer;
- } else if (base::StartsWith(current_url, "http://",
+ } else if (base::StartsWith(current_url,
+ "http://",
base::CompareCase::INSENSITIVE_ASCII)) {
current_download_source_ = kDownloadSourceHttpServer;
}
@@ -549,16 +561,17 @@
}
PayloadType PayloadState::CalculatePayloadType() {
- PayloadType payload_type;
- OmahaRequestParams* params = system_state_->request_params();
- if (response_.is_delta_payload) {
- payload_type = kPayloadTypeDelta;
- } else if (params->delta_okay()) {
- payload_type = kPayloadTypeFull;
- } else { // Full payload, delta was not allowed by request.
- payload_type = kPayloadTypeForcedFull;
+ for (const auto& package : response_.packages) {
+ if (package.is_delta) {
+ return kPayloadTypeDelta;
+ }
}
- return payload_type;
+ OmahaRequestParams* params = system_state_->request_params();
+ if (params->delta_okay()) {
+ return kPayloadTypeFull;
+ }
+ // Full payload, delta was not allowed by request.
+ return kPayloadTypeForcedFull;
}
// TODO(zeuthen): Currently we don't report the UpdateEngine.Attempt.*
@@ -570,7 +583,7 @@
PayloadType payload_type = CalculatePayloadType();
- int64_t payload_size = response_.size;
+ int64_t payload_size = GetPayloadSize();
int64_t payload_bytes_downloaded = attempt_num_bytes_downloaded_;
@@ -716,7 +729,7 @@
PayloadType payload_type = CalculatePayloadType();
- int64_t payload_size = response_.size;
+ int64_t payload_size = GetPayloadSize();
int attempt_count = GetPayloadAttemptNumber();
@@ -755,6 +768,7 @@
void PayloadState::ResetPersistedState() {
SetPayloadAttemptNumber(0);
SetFullPayloadAttemptNumber(0);
+ SetPayloadIndex(0);
SetUrlIndex(0);
SetUrlFailureCount(0);
SetUrlSwitchCount(0);
@@ -804,27 +818,33 @@
}
string PayloadState::CalculateResponseSignature() {
- string response_sign = base::StringPrintf(
- "NumURLs = %d\n", static_cast<int>(candidate_urls_.size()));
+ string response_sign;
+ for (size_t i = 0; i < response_.packages.size(); i++) {
+ const auto& package = response_.packages[i];
+ response_sign += base::StringPrintf(
+ "Payload %zu:\n"
+ " Size = %ju\n"
+ " Sha256 Hash = %s\n"
+ " Metadata Size = %ju\n"
+ " Metadata Signature = %s\n"
+ " Is Delta = %d\n"
+ " NumURLs = %zu\n",
+ i,
+ static_cast<uintmax_t>(package.size),
+ package.hash.c_str(),
+ static_cast<uintmax_t>(package.metadata_size),
+ package.metadata_signature.c_str(),
+ package.is_delta,
+ candidate_urls_[i].size());
- for (size_t i = 0; i < candidate_urls_.size(); i++)
- response_sign += base::StringPrintf("Candidate Url%d = %s\n",
- static_cast<int>(i),
- candidate_urls_[i].c_str());
+ for (size_t j = 0; j < candidate_urls_[i].size(); j++)
+ response_sign += base::StringPrintf(
+ " Candidate Url%zu = %s\n", j, candidate_urls_[i][j].c_str());
+ }
response_sign += base::StringPrintf(
- "Payload Size = %ju\n"
- "Payload Sha256 Hash = %s\n"
- "Metadata Size = %ju\n"
- "Metadata Signature = %s\n"
- "Is Delta Payload = %d\n"
"Max Failure Count Per Url = %d\n"
"Disable Payload Backoff = %d\n",
- static_cast<uintmax_t>(response_.size),
- response_.hash.c_str(),
- static_cast<uintmax_t>(response_.metadata_size),
- response_.metadata_signature.c_str(),
- response_.is_delta_payload,
response_.max_failure_count_per_url,
response_.disable_payload_backoff);
return response_sign;
@@ -871,6 +891,20 @@
full_payload_attempt_number_);
}
+void PayloadState::SetPayloadIndex(size_t payload_index) {
+ CHECK(prefs_);
+ payload_index_ = payload_index;
+ LOG(INFO) << "Payload Index = " << payload_index_;
+ prefs_->SetInt64(kPrefsUpdateStatePayloadIndex, payload_index_);
+}
+
+bool PayloadState::NextPayload() {
+ if (payload_index_ + 1 >= candidate_urls_.size())
+ return false;
+ SetPayloadIndex(payload_index_ + 1);
+ return true;
+}
+
void PayloadState::LoadUrlIndex() {
SetUrlIndex(GetPersistedValue(kPrefsCurrentUrlIndex));
}
@@ -1173,20 +1207,22 @@
}
candidate_urls_.clear();
- for (size_t i = 0; i < response_.payload_urls.size(); i++) {
- string candidate_url = response_.payload_urls[i];
- if (base::StartsWith(candidate_url, "http://",
- base::CompareCase::INSENSITIVE_ASCII) &&
- !http_url_ok) {
- continue;
+ for (const auto& package : response_.packages) {
+ candidate_urls_.emplace_back();
+ for (const string& candidate_url : package.payload_urls) {
+ if (base::StartsWith(
+ candidate_url, "http://", base::CompareCase::INSENSITIVE_ASCII) &&
+ !http_url_ok) {
+ continue;
+ }
+ candidate_urls_.back().push_back(candidate_url);
+ LOG(INFO) << "Candidate Url" << (candidate_urls_.back().size() - 1)
+ << ": " << candidate_url;
}
- candidate_urls_.push_back(candidate_url);
- LOG(INFO) << "Candidate Url" << (candidate_urls_.size() - 1)
- << ": " << candidate_url;
+ LOG(INFO) << "Found " << candidate_urls_.back().size() << " candidate URLs "
+ << "out of " << package.payload_urls.size()
+ << " URLs supplied in package " << candidate_urls_.size() - 1;
}
-
- LOG(INFO) << "Found " << candidate_urls_.size() << " candidate URLs "
- << "out of " << response_.payload_urls.size() << " URLs supplied";
}
void PayloadState::CreateSystemUpdatedMarkerFile() {
@@ -1395,4 +1431,11 @@
return true;
}
+int64_t PayloadState::GetPayloadSize() {
+ int64_t payload_size = 0;
+ for (const auto& package : response_.packages)
+ payload_size += package.size;
+ return payload_size;
+}
+
} // namespace chromeos_update_engine
diff --git a/payload_state.h b/payload_state.h
index 46711b6..699fc74 100644
--- a/payload_state.h
+++ b/payload_state.h
@@ -79,7 +79,9 @@
}
inline std::string GetCurrentUrl() override {
- return candidate_urls_.size() ? candidate_urls_[url_index_] : "";
+ return candidate_urls_.size() && candidate_urls_[payload_index_].size()
+ ? candidate_urls_[payload_index_][url_index_]
+ : "";
}
inline uint32_t GetUrlFailureCount() override {
@@ -151,6 +153,8 @@
return attempt_error_code_;
}
+ bool NextPayload() override;
+
private:
enum class AttemptType {
kUpdate,
@@ -270,6 +274,11 @@
// of a process restart.
void SetFullPayloadAttemptNumber(int payload_attempt_number);
+ // Sets the current payload index to the given value. Also persists the value
+ // being set so that we resume from the same value in case of a process
+ // restart.
+ void SetPayloadIndex(size_t payload_index);
+
// Initializes the current URL index from the persisted state.
void LoadUrlIndex();
@@ -368,7 +377,9 @@
void ResetRollbackVersion();
inline uint32_t GetUrlIndex() {
- return url_index_;
+ return url_index_ ? std::min(candidate_urls_[payload_index_].size() - 1,
+ url_index_)
+ : 0;
}
// Computes the list of candidate URLs from the total list of payload URLs in
@@ -420,6 +431,9 @@
// Loads the persisted scattering wallclock-based wait period.
void LoadScatteringWaitPeriod();
+ // Get the total size of all payloads.
+ int64_t GetPayloadSize();
+
// The global state of the system.
SystemState* system_state_;
@@ -468,12 +482,15 @@
// we resume from the same value in case of a process restart.
int full_payload_attempt_number_;
+ // The index of the current payload.
+ size_t payload_index_ = 0;
+
// The index of the current URL. This type is different from the one in the
// accessor methods because PrefsInterface supports only int64_t but we want
// to provide a stronger abstraction of uint32_t. Each update to this value
// is persisted so we resume from the same value in case of a process
// restart.
- int64_t url_index_;
+ size_t url_index_;
// The count of failures encountered in the current attempt to download using
// the current URL (specified by url_index_). Each update to this value is
@@ -543,7 +560,7 @@
// The ordered list of the subset of payload URL candidates which are
// allowed as per device policy.
- std::vector<std::string> candidate_urls_;
+ std::vector<std::vector<std::string>> candidate_urls_;
// This stores a blacklisted version set as part of rollback. When we rollback
// we store the version of the os from which we are rolling back from in order
diff --git a/payload_state_interface.h b/payload_state_interface.h
index 68798ee..4aa25e3 100644
--- a/payload_state_interface.h
+++ b/payload_state_interface.h
@@ -193,6 +193,9 @@
virtual void SetP2PUrl(const std::string& url) = 0;
virtual std::string GetP2PUrl() const = 0;
virtual ErrorCode GetAttemptErrorCode() const = 0;
+
+ // Switch to next payload.
+ virtual bool NextPayload() = 0;
};
} // namespace chromeos_update_engine
diff --git a/payload_state_unittest.cc b/payload_state_unittest.cc
index b671722..4546180 100644
--- a/payload_state_unittest.cc
+++ b/payload_state_unittest.cc
@@ -61,41 +61,44 @@
static void SetupPayloadStateWith2Urls(string hash,
bool http_enabled,
+ bool is_delta_payload,
PayloadState* payload_state,
OmahaResponse* response) {
- response->payload_urls.clear();
- response->payload_urls.push_back("http://test");
- response->payload_urls.push_back("https://test");
- response->size = 523456789;
- response->hash = hash;
- response->metadata_size = 558123;
- response->metadata_signature = "metasign";
+ response->packages.clear();
+ response->packages.push_back({.payload_urls = {"http://test", "https://test"},
+ .size = 523456789,
+ .metadata_size = 558123,
+ .metadata_signature = "metasign",
+ .hash = hash,
+ .is_delta = is_delta_payload});
response->max_failure_count_per_url = 3;
payload_state->SetResponse(*response);
string stored_response_sign = payload_state->GetResponseSignature();
string expected_url_https_only =
- "NumURLs = 1\n"
- "Candidate Url0 = https://test\n";
+ " NumURLs = 1\n"
+ " Candidate Url0 = https://test\n";
string expected_urls_both =
- "NumURLs = 2\n"
- "Candidate Url0 = http://test\n"
- "Candidate Url1 = https://test\n";
+ " NumURLs = 2\n"
+ " Candidate Url0 = http://test\n"
+ " Candidate Url1 = https://test\n";
- string expected_response_sign =
- (http_enabled ? expected_urls_both : expected_url_https_only) +
- base::StringPrintf("Payload Size = 523456789\n"
- "Payload Sha256 Hash = %s\n"
- "Metadata Size = 558123\n"
- "Metadata Signature = metasign\n"
- "Is Delta Payload = %d\n"
- "Max Failure Count Per Url = %d\n"
- "Disable Payload Backoff = %d\n",
- hash.c_str(),
- response->is_delta_payload,
- response->max_failure_count_per_url,
- response->disable_payload_backoff);
+ string expected_response_sign = base::StringPrintf(
+ "Payload 0:\n"
+ " Size = 523456789\n"
+ " Sha256 Hash = %s\n"
+ " Metadata Size = 558123\n"
+ " Metadata Signature = metasign\n"
+ " Is Delta = %d\n"
+ "%s"
+ "Max Failure Count Per Url = %d\n"
+ "Disable Payload Backoff = %d\n",
+ hash.c_str(),
+ response->packages[0].is_delta,
+ (http_enabled ? expected_urls_both : expected_url_https_only).c_str(),
+ response->max_failure_count_per_url,
+ response->disable_payload_backoff);
EXPECT_EQ(expected_response_sign, stored_response_sign);
}
@@ -129,14 +132,9 @@
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
payload_state.SetResponse(response);
string stored_response_sign = payload_state.GetResponseSignature();
- string expected_response_sign = "NumURLs = 0\n"
- "Payload Size = 0\n"
- "Payload Sha256 Hash = \n"
- "Metadata Size = 0\n"
- "Metadata Signature = \n"
- "Is Delta Payload = 0\n"
- "Max Failure Count Per Url = 0\n"
- "Disable Payload Backoff = 0\n";
+ string expected_response_sign =
+ "Max Failure Count Per Url = 0\n"
+ "Disable Payload Backoff = 0\n";
EXPECT_EQ(expected_response_sign, stored_response_sign);
EXPECT_EQ("", payload_state.GetCurrentUrl());
EXPECT_EQ(0U, payload_state.GetUrlFailureCount());
@@ -146,11 +144,11 @@
TEST(PayloadStateTest, SetResponseWorksWithSingleUrl) {
OmahaResponse response;
- response.payload_urls.push_back("https://single.url.test");
- response.size = 123456789;
- response.hash = "hash";
- response.metadata_size = 58123;
- response.metadata_signature = "msign";
+ response.packages.push_back({.payload_urls = {"https://single.url.test"},
+ .size = 123456789,
+ .metadata_size = 58123,
+ .metadata_signature = "msign",
+ .hash = "hash"});
FakeSystemState fake_system_state;
NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
@@ -180,15 +178,17 @@
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
payload_state.SetResponse(response);
string stored_response_sign = payload_state.GetResponseSignature();
- string expected_response_sign = "NumURLs = 1\n"
- "Candidate Url0 = https://single.url.test\n"
- "Payload Size = 123456789\n"
- "Payload Sha256 Hash = hash\n"
- "Metadata Size = 58123\n"
- "Metadata Signature = msign\n"
- "Is Delta Payload = 0\n"
- "Max Failure Count Per Url = 0\n"
- "Disable Payload Backoff = 0\n";
+ string expected_response_sign =
+ "Payload 0:\n"
+ " Size = 123456789\n"
+ " Sha256 Hash = hash\n"
+ " Metadata Size = 58123\n"
+ " Metadata Signature = msign\n"
+ " Is Delta = 0\n"
+ " NumURLs = 1\n"
+ " Candidate Url0 = https://single.url.test\n"
+ "Max Failure Count Per Url = 0\n"
+ "Disable Payload Backoff = 0\n";
EXPECT_EQ(expected_response_sign, stored_response_sign);
EXPECT_EQ("https://single.url.test", payload_state.GetCurrentUrl());
EXPECT_EQ(0U, payload_state.GetUrlFailureCount());
@@ -198,12 +198,12 @@
TEST(PayloadStateTest, SetResponseWorksWithMultipleUrls) {
OmahaResponse response;
- response.payload_urls.push_back("http://multiple.url.test");
- response.payload_urls.push_back("https://multiple.url.test");
- response.size = 523456789;
- response.hash = "rhash";
- response.metadata_size = 558123;
- response.metadata_signature = "metasign";
+ response.packages.push_back({.payload_urls = {"http://multiple.url.test",
+ "https://multiple.url.test"},
+ .size = 523456789,
+ .metadata_size = 558123,
+ .metadata_signature = "metasign",
+ .hash = "rhash"});
FakeSystemState fake_system_state;
NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
EXPECT_CALL(*prefs, SetInt64(_, _)).Times(AnyNumber());
@@ -230,16 +230,18 @@
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
payload_state.SetResponse(response);
string stored_response_sign = payload_state.GetResponseSignature();
- string expected_response_sign = "NumURLs = 2\n"
- "Candidate Url0 = http://multiple.url.test\n"
- "Candidate Url1 = https://multiple.url.test\n"
- "Payload Size = 523456789\n"
- "Payload Sha256 Hash = rhash\n"
- "Metadata Size = 558123\n"
- "Metadata Signature = metasign\n"
- "Is Delta Payload = 0\n"
- "Max Failure Count Per Url = 0\n"
- "Disable Payload Backoff = 0\n";
+ string expected_response_sign =
+ "Payload 0:\n"
+ " Size = 523456789\n"
+ " Sha256 Hash = rhash\n"
+ " Metadata Size = 558123\n"
+ " Metadata Signature = metasign\n"
+ " Is Delta = 0\n"
+ " NumURLs = 2\n"
+ " Candidate Url0 = http://multiple.url.test\n"
+ " Candidate Url1 = https://multiple.url.test\n"
+ "Max Failure Count Per Url = 0\n"
+ "Disable Payload Backoff = 0\n";
EXPECT_EQ(expected_response_sign, stored_response_sign);
EXPECT_EQ("http://multiple.url.test", payload_state.GetCurrentUrl());
EXPECT_EQ(0U, payload_state.GetUrlFailureCount());
@@ -281,7 +283,8 @@
// This does a SetResponse which causes all the states to be set to 0 for
// the first time.
- SetupPayloadStateWith2Urls("Hash1235", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash1235", true, false, &payload_state, &response);
EXPECT_EQ("http://test", payload_state.GetCurrentUrl());
// Verify that on the first error, the URL index advances to 1.
@@ -314,7 +317,8 @@
.Times(AnyNumber());
// Set the first response.
- SetupPayloadStateWith2Urls("Hash5823", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash5823", true, false, &payload_state, &response);
EXPECT_EQ(1, payload_state.GetNumResponsesSeen());
// Advance the URL index to 1 by faking an error.
@@ -324,7 +328,8 @@
EXPECT_EQ(1U, payload_state.GetUrlSwitchCount());
// Now, slightly change the response and set it again.
- SetupPayloadStateWith2Urls("Hash8225", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash8225", true, false, &payload_state, &response);
EXPECT_EQ(2, payload_state.GetNumResponsesSeen());
// Fake an error again.
@@ -333,7 +338,8 @@
EXPECT_EQ(1U, payload_state.GetUrlSwitchCount());
// Return a third different response.
- SetupPayloadStateWith2Urls("Hash9999", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash9999", true, false, &payload_state, &response);
EXPECT_EQ(3, payload_state.GetNumResponsesSeen());
// Make sure the url index was reset to 0 because of the new response.
@@ -404,7 +410,8 @@
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash5873", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash5873", true, false, &payload_state, &response);
EXPECT_EQ(1, payload_state.GetNumResponsesSeen());
// This should advance the URL index.
@@ -483,7 +490,8 @@
EXPECT_TRUE(payload_state.ShouldBackoffDownload());
// Now, slightly change the response and set it again.
- SetupPayloadStateWith2Urls("Hash8532", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash8532", true, false, &payload_state, &response);
EXPECT_EQ(2, payload_state.GetNumResponsesSeen());
// Make sure the url index was reset to 0 because of the new response.
@@ -497,7 +505,6 @@
TEST(PayloadStateTest, PayloadAttemptNumberIncreasesOnSuccessfulFullDownload) {
OmahaResponse response;
- response.is_delta_payload = false;
PayloadState payload_state;
FakeSystemState fake_system_state;
NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
@@ -523,7 +530,8 @@
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash8593", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash8593", true, false, &payload_state, &response);
// This should just advance the payload attempt number;
EXPECT_EQ(0, payload_state.GetPayloadAttemptNumber());
@@ -538,7 +546,6 @@
TEST(PayloadStateTest, PayloadAttemptNumberIncreasesOnSuccessfulDeltaDownload) {
OmahaResponse response;
- response.is_delta_payload = true;
PayloadState payload_state;
FakeSystemState fake_system_state;
NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
@@ -563,7 +570,7 @@
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash8593", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls("Hash8593", true, true, &payload_state, &response);
// This should just advance the payload attempt number;
EXPECT_EQ(0, payload_state.GetPayloadAttemptNumber());
@@ -582,7 +589,8 @@
FakeSystemState fake_system_state;
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash4427", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash4427", true, false, &payload_state, &response);
// Generate enough events to advance URL index, failure count and
// payload attempt number all to 1.
@@ -618,7 +626,8 @@
// response was different. We want to specifically test that even if the
// response is same, we should reset the state if we find it corrupted.
EXPECT_TRUE(payload_state.Initialize(&fake_system_state2));
- SetupPayloadStateWith2Urls("Hash4427", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash4427", true, false, &payload_state, &response);
// Make sure all counters get reset to 0 because of the corrupted URL index
// we supplied above.
@@ -631,7 +640,6 @@
TEST(PayloadStateTest, NoBackoffInteractiveChecks) {
OmahaResponse response;
- response.is_delta_payload = false;
PayloadState payload_state;
FakeSystemState fake_system_state;
OmahaRequestParams params(&fake_system_state);
@@ -639,7 +647,8 @@
fake_system_state.set_request_params(¶ms);
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash6437", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash6437", true, false, &payload_state, &response);
// Simulate two failures (enough to cause payload backoff) and check
// again that we're ready to re-download without any backoff as this is
@@ -654,7 +663,6 @@
TEST(PayloadStateTest, NoBackoffForP2PUpdates) {
OmahaResponse response;
- response.is_delta_payload = false;
PayloadState payload_state;
FakeSystemState fake_system_state;
OmahaRequestParams params(&fake_system_state);
@@ -662,7 +670,8 @@
fake_system_state.set_request_params(¶ms);
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash6437", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash6437", true, false, &payload_state, &response);
// Simulate two failures (enough to cause payload backoff) and check
// again that we're ready to re-download without any backoff as this is
@@ -685,12 +694,11 @@
TEST(PayloadStateTest, NoBackoffForDeltaPayloads) {
OmahaResponse response;
- response.is_delta_payload = true;
PayloadState payload_state;
FakeSystemState fake_system_state;
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash6437", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls("Hash6437", true, true, &payload_state, &response);
// Simulate a successful download and see that we're ready to download
// again without any backoff as this is a delta payload.
@@ -730,12 +738,12 @@
TEST(PayloadStateTest, BackoffPeriodsAreInCorrectRange) {
OmahaResponse response;
- response.is_delta_payload = false;
PayloadState payload_state;
FakeSystemState fake_system_state;
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash8939", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash8939", true, false, &payload_state, &response);
CheckPayloadBackoffState(&payload_state, 1, TimeDelta::FromDays(1));
CheckPayloadBackoffState(&payload_state, 2, TimeDelta::FromDays(2));
@@ -756,7 +764,8 @@
FakeSystemState fake_system_state;
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash8939", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash8939", true, false, &payload_state, &response);
// Simulate a successful download and see that we are ready to download
// again without any backoff.
@@ -784,7 +793,8 @@
uint64_t http_total = 0;
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash3286", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash3286", true, false, &payload_state, &response);
EXPECT_EQ(1, payload_state.GetNumResponsesSeen());
// Simulate a previous attempt with in order to set an initial non-zero value
@@ -801,7 +811,8 @@
// Change the response hash so as to simulate a new response which will
// reset the current bytes downloaded, but not the total bytes downloaded.
- SetupPayloadStateWith2Urls("Hash9904", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash9904", true, false, &payload_state, &response);
EXPECT_EQ(2, payload_state.GetNumResponsesSeen());
// First, simulate successful download of a few bytes over HTTP.
@@ -903,7 +914,8 @@
FakeSystemState fake_system_state;
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash3286", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash3286", true, false, &payload_state, &response);
// Simulate progress in order to mark HTTP as one of the sources used.
uint64_t num_bytes = 42 * 1000 * 1000;
@@ -934,7 +946,8 @@
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
// Set the first response.
- SetupPayloadStateWith2Urls("Hash5823", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash5823", true, false, &payload_state, &response);
uint64_t num_bytes = 10000;
payload_state.DownloadProgress(num_bytes);
@@ -1033,6 +1046,7 @@
TEST(PayloadStateTest, DurationsAreCorrect) {
OmahaResponse response;
+ response.packages.resize(1);
PayloadState payload_state;
FakeSystemState fake_system_state;
FakeClock fake_clock;
@@ -1050,7 +1064,8 @@
// Check that durations are correct for a successful update where
// time has advanced 7 seconds on the wall clock and 4 seconds on
// the monotonic clock.
- SetupPayloadStateWith2Urls("Hash8593", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash8593", true, false, &payload_state, &response);
fake_clock.SetWallclockTime(Time::FromInternalValue(8000000));
fake_clock.SetMonotonicTime(Time::FromInternalValue(6000000));
payload_state.UpdateSucceeded();
@@ -1058,7 +1073,8 @@
EXPECT_EQ(payload_state.GetUpdateDurationUptime().InMicroseconds(), 4000000);
// Check that durations are reset when a new response comes in.
- SetupPayloadStateWith2Urls("Hash8594", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash8594", true, false, &payload_state, &response);
EXPECT_EQ(payload_state.GetUpdateDuration().InMicroseconds(), 0);
EXPECT_EQ(payload_state.GetUpdateDurationUptime().InMicroseconds(), 0);
@@ -1076,6 +1092,7 @@
fake_clock.SetMonotonicTime(Time::FromInternalValue(5000));
PayloadState payload_state2;
EXPECT_TRUE(payload_state2.Initialize(&fake_system_state));
+ payload_state2.SetResponse(response);
EXPECT_EQ(payload_state2.GetUpdateDuration().InMicroseconds(), 10000000);
EXPECT_EQ(payload_state2.GetUpdateDurationUptime().InMicroseconds(),
10000000);
@@ -1106,7 +1123,8 @@
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
// Make the update succeed.
- SetupPayloadStateWith2Urls("Hash8593", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash8593", true, false, &payload_state, &response);
payload_state.UpdateSucceeded();
// Check that the marker was written.
@@ -1206,6 +1224,9 @@
// abnormally).
fake_system_state.set_prefs(&fake_prefs);
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+ OmahaResponse response;
+ response.packages.resize(1);
+ payload_state.SetResponse(response);
EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(_, _, _, _, _))
.Times(AnyNumber());
@@ -1245,7 +1266,8 @@
.WillRepeatedly(Return(false));
// Set the first response.
- SetupPayloadStateWith2Urls("Hash8433", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash8433", true, false, &payload_state, &response);
// Check that we use the HTTP URL since there is no value set for allowing
// http.
@@ -1256,7 +1278,8 @@
.WillRepeatedly(DoAll(SetArgumentPointee<0>(false), Return(true)));
// Reset state and set again.
- SetupPayloadStateWith2Urls("Hash8433", false, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash8433", false, false, &payload_state, &response);
// Check that we skip the HTTP URL and use only the HTTPS url.
EXPECT_EQ("https://test", payload_state.GetCurrentUrl());
@@ -1270,7 +1293,8 @@
EXPECT_EQ(0U, payload_state.GetUrlSwitchCount());
// Now, slightly change the response and set it again.
- SetupPayloadStateWith2Urls("Hash2399", false, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash2399", false, false, &payload_state, &response);
// Check that we still skip the HTTP URL and use only the HTTPS url.
EXPECT_EQ("https://test", payload_state.GetCurrentUrl());
@@ -1286,7 +1310,8 @@
// so that we can test that the state is reset not because of the
// hash but because of the policy change which results in candidate url
// list change.
- SetupPayloadStateWith2Urls("Hash2399", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash2399", true, false, &payload_state, &response);
// Check that we use the HTTP URL now and the failure count is reset.
EXPECT_EQ("http://test", payload_state.GetCurrentUrl());
@@ -1302,12 +1327,11 @@
TEST(PayloadStateTest, PayloadTypeMetricWhenTypeIsDelta) {
OmahaResponse response;
- response.is_delta_payload = true;
PayloadState payload_state;
FakeSystemState fake_system_state;
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash6437", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls("Hash6437", true, true, &payload_state, &response);
// Simulate a successful download and update.
payload_state.DownloadComplete();
@@ -1328,7 +1352,7 @@
fake_system_state.set_request_params(¶ms);
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash6437", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls("Hash6437", true, true, &payload_state, &response);
payload_state.DownloadComplete();
@@ -1343,12 +1367,12 @@
TEST(PayloadStateTest, PayloadTypeMetricWhenTypeIsForcedFull) {
OmahaResponse response;
- response.is_delta_payload = false;
PayloadState payload_state;
FakeSystemState fake_system_state;
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash6437", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash6437", true, false, &payload_state, &response);
// Mock the request to a request where the delta was disabled.
OmahaRequestParams params(&fake_system_state);
@@ -1371,12 +1395,12 @@
TEST(PayloadStateTest, PayloadTypeMetricWhenTypeIsFull) {
OmahaResponse response;
- response.is_delta_payload = false;
PayloadState payload_state;
FakeSystemState fake_system_state;
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash6437", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash6437", true, false, &payload_state, &response);
// Mock the request to a request where the delta is enabled, although the
// result is full.
@@ -1406,7 +1430,8 @@
fake_system_state.set_prefs(&fake_prefs);
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash3141", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash3141", true, false, &payload_state, &response);
// Simulate a successful download and update.
payload_state.DownloadComplete();
@@ -1448,7 +1473,8 @@
fake_boot_control->SetCurrentSlot(0);
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash3141", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash3141", true, false, &payload_state, &response);
// Simulate a successful download and update.
payload_state.DownloadComplete();
@@ -1477,7 +1503,8 @@
fake_system_state.set_prefs(&fake_prefs);
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash3141", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash3141", true, false, &payload_state, &response);
// Simulate a successful download and update.
payload_state.DownloadComplete();
@@ -1519,7 +1546,8 @@
fake_system_state.set_prefs(&fake_prefs);
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash8593", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash8593", true, false, &payload_state, &response);
// Should allow exactly kMaxP2PAttempts...
for (int n = 0; n < kMaxP2PAttempts; n++) {
@@ -1541,7 +1569,8 @@
fake_system_state.set_clock(&fake_clock);
fake_system_state.set_prefs(&fake_prefs);
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash8593", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash8593", true, false, &payload_state, &response);
// Set the clock to 1 second.
Time epoch = Time::FromInternalValue(1000000);
@@ -1584,7 +1613,8 @@
fake_system_state.set_prefs(&fake_prefs);
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash8593", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash8593", true, false, &payload_state, &response);
Time null_time = Time();
EXPECT_EQ(null_time, payload_state.GetP2PFirstAttemptTimestamp());
@@ -1600,7 +1630,8 @@
fake_system_state.set_clock(&fake_clock);
fake_system_state.set_prefs(&fake_prefs);
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash8593", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash8593", true, false, &payload_state, &response);
// Set the clock to something known.
Time time = Time::FromInternalValue(12345);
@@ -1629,7 +1660,8 @@
fake_system_state.set_prefs(&fake_prefs);
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls("Hash8593", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash8593", true, false, &payload_state, &response);
// Set the clock to something known.
Time time = Time::FromInternalValue(12345);
@@ -1641,7 +1673,8 @@
EXPECT_EQ(time, payload_state.GetP2PFirstAttemptTimestamp());
// Set a new response...
- SetupPayloadStateWith2Urls("Hash9904", true, &payload_state, &response);
+ SetupPayloadStateWith2Urls(
+ "Hash9904", true, false, &payload_state, &response);
// ... and check that it clears the P2P state vars.
Time null_time = Time();
diff --git a/update_attempter.cc b/update_attempter.cc
index 8e25819..ff3b046 100644
--- a/update_attempter.cc
+++ b/update_attempter.cc
@@ -32,6 +32,7 @@
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
#include <brillo/bind_lambda.h>
+#include <brillo/data_encoding.h>
#include <brillo/errors/error_codes.h>
#include <brillo/make_unique_ptr.h>
#include <brillo/message_loops/message_loop.h>
@@ -44,7 +45,6 @@
#include "update_engine/common/clock_interface.h"
#include "update_engine/common/constants.h"
#include "update_engine/common/hardware_interface.h"
-#include "update_engine/common/multi_range_http_fetcher.h"
#include "update_engine/common/platform_constants.h"
#include "update_engine/common/prefs_interface.h"
#include "update_engine/common/subprocess.h"
@@ -618,12 +618,12 @@
LibcurlHttpFetcher* download_fetcher =
new LibcurlHttpFetcher(GetProxyResolver(), system_state_->hardware());
download_fetcher->set_server_to_check(ServerToCheck::kDownload);
- shared_ptr<DownloadAction> download_action(new DownloadAction(
- prefs_,
- system_state_->boot_control(),
- system_state_->hardware(),
- system_state_,
- new MultiRangeHttpFetcher(download_fetcher))); // passes ownership
+ shared_ptr<DownloadAction> download_action(
+ new DownloadAction(prefs_,
+ system_state_->boot_control(),
+ system_state_->hardware(),
+ system_state_,
+ download_fetcher)); // passes ownership
shared_ptr<OmahaRequestAction> download_finished_action(
new OmahaRequestAction(
system_state_,
@@ -938,8 +938,12 @@
response_handler_action_->install_plan();
// Generate an unique payload identifier.
- const string target_version_uid =
- install_plan.payload_hash + ":" + install_plan.metadata_signature;
+ string target_version_uid;
+ for (const auto& payload : install_plan.payloads) {
+ target_version_uid +=
+ brillo::data_encoding::Base64Encode(payload.hash) + ":" +
+ payload.metadata_signature + ":";
+ }
// Expect to reboot into the new version to send the proper metric during
// next boot.
@@ -1030,8 +1034,9 @@
const InstallPlan& plan = response_handler_action_->install_plan();
UpdateLastCheckedTime();
new_version_ = plan.version;
- new_payload_size_ = plan.payload_size;
- SetupDownload();
+ new_payload_size_ = 0;
+ for (const auto& payload : plan.payloads)
+ new_payload_size_ += payload.size;
cpu_limiter_.StartLimiter();
SetStatusAndNotify(UpdateStatus::UPDATE_AVAILABLE);
} else if (type == DownloadAction::StaticType()) {
@@ -1319,32 +1324,6 @@
prefs_->SetInt64(kPrefsDeltaUpdateFailures, ++delta_failures);
}
-void UpdateAttempter::SetupDownload() {
- MultiRangeHttpFetcher* fetcher =
- static_cast<MultiRangeHttpFetcher*>(download_action_->http_fetcher());
- fetcher->ClearRanges();
- if (response_handler_action_->install_plan().is_resume) {
- // Resuming an update so fetch the update manifest metadata first.
- int64_t manifest_metadata_size = 0;
- int64_t manifest_signature_size = 0;
- prefs_->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size);
- prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size);
- fetcher->AddRange(0, manifest_metadata_size + manifest_signature_size);
- // If there're remaining unprocessed data blobs, fetch them. Be careful not
- // to request data beyond the end of the payload to avoid 416 HTTP response
- // error codes.
- int64_t next_data_offset = 0;
- prefs_->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset);
- uint64_t resume_offset =
- manifest_metadata_size + manifest_signature_size + next_data_offset;
- if (resume_offset < response_handler_action_->install_plan().payload_size) {
- fetcher->AddRange(resume_offset);
- }
- } else {
- fetcher->AddRange(0);
- }
-}
-
void UpdateAttempter::PingOmaha() {
if (!processor_->IsRunning()) {
shared_ptr<OmahaRequestAction> ping_action(new OmahaRequestAction(
diff --git a/update_attempter.h b/update_attempter.h
index 193e172..7780357 100644
--- a/update_attempter.h
+++ b/update_attempter.h
@@ -281,9 +281,6 @@
// Sets the status to the given status and notifies a status update over dbus.
void SetStatusAndNotify(UpdateStatus status);
- // Sets up the download parameters after receiving the update check response.
- void SetupDownload();
-
// Creates an error event object in |error_event_| to be included in an
// OmahaRequestAction once the current action processor is done.
void CreatePendingErrorEvent(AbstractAction* action, ErrorCode code);
diff --git a/update_attempter_android.cc b/update_attempter_android.cc
index da43b6a..6c992c8 100644
--- a/update_attempter_android.cc
+++ b/update_attempter_android.cc
@@ -24,13 +24,13 @@
#include <base/logging.h>
#include <base/strings/string_number_conversions.h>
#include <brillo/bind_lambda.h>
+#include <brillo/data_encoding.h>
#include <brillo/message_loops/message_loop.h>
#include <brillo/strings/string_utils.h>
#include <log/log.h>
#include "update_engine/common/constants.h"
#include "update_engine/common/file_fetcher.h"
-#include "update_engine/common/multi_range_http_fetcher.h"
#include "update_engine/common/utils.h"
#include "update_engine/daemon_state_interface.h"
#include "update_engine/network_selector.h"
@@ -145,19 +145,27 @@
install_plan_.download_url = payload_url;
install_plan_.version = "";
base_offset_ = payload_offset;
- install_plan_.payload_size = payload_size;
- if (!install_plan_.payload_size) {
+ InstallPlan::Payload payload;
+ payload.size = payload_size;
+ if (!payload.size) {
if (!base::StringToUint64(headers[kPayloadPropertyFileSize],
- &install_plan_.payload_size)) {
- install_plan_.payload_size = 0;
+ &payload.size)) {
+ payload.size = 0;
}
}
- install_plan_.payload_hash = headers[kPayloadPropertyFileHash];
- if (!base::StringToUint64(headers[kPayloadPropertyMetadataSize],
- &install_plan_.metadata_size)) {
- install_plan_.metadata_size = 0;
+ if (!brillo::data_encoding::Base64Decode(headers[kPayloadPropertyFileHash],
+ &payload.hash)) {
+ LOG(WARNING) << "Unable to decode base64 file hash: "
+ << headers[kPayloadPropertyFileHash];
}
- install_plan_.metadata_signature = "";
+ if (!base::StringToUint64(headers[kPayloadPropertyMetadataSize],
+ &payload.metadata_size)) {
+ payload.metadata_size = 0;
+ }
+ // The |payload.type| is not used anymore since minor_version 3.
+ payload.type = InstallPayloadType::kUnknown;
+ install_plan_.payloads.push_back(payload);
+
// The |public_key_rsa| key would override the public key stored on disk.
install_plan_.public_key_rsa = "";
@@ -172,9 +180,6 @@
LOG(WARNING) << "Unable to save the update check response hash.";
}
}
- // The |payload_type| is not used anymore since minor_version 3.
- install_plan_.payload_type = InstallPayloadType::kUnknown;
-
install_plan_.source_slot = boot_control_->GetCurrentSlot();
install_plan_.target_slot = install_plan_.source_slot == 0 ? 1 : 0;
@@ -193,7 +198,10 @@
"Invalid network_id: " + headers[kPayloadPropertyNetworkId]);
}
if (!network_selector_->SetProcessNetwork(network_id)) {
- LOG(WARNING) << "Unable to set network_id, continuing with the update.";
+ return LogAndSetError(
+ error,
+ FROM_HERE,
+ "Unable to set network_id: " + headers[kPayloadPropertyNetworkId]);
}
}
@@ -201,7 +209,6 @@
install_plan_.Dump();
BuildUpdateActions(payload_url);
- SetupDownload();
// Setup extra headers.
HttpFetcher* fetcher = download_action_->http_fetcher();
if (!headers[kPayloadPropertyAuthorization].empty())
@@ -429,9 +436,11 @@
void UpdateAttempterAndroid::SetStatusAndNotify(UpdateStatus status) {
status_ = status;
+ size_t payload_size =
+ install_plan_.payloads.empty() ? 0 : install_plan_.payloads[0].size;
for (auto observer : daemon_state_->service_observers()) {
observer->SendStatusUpdate(
- 0, download_progress_, status_, "", install_plan_.payload_size);
+ 0, download_progress_, status_, "", payload_size);
}
last_notify_time_ = TimeTicks::Now();
}
@@ -458,12 +467,12 @@
download_fetcher = libcurl_fetcher;
#endif // _UE_SIDELOAD
}
- shared_ptr<DownloadAction> download_action(new DownloadAction(
- prefs_,
- boot_control_,
- hardware_,
- nullptr, // system_state, not used.
- new MultiRangeHttpFetcher(download_fetcher))); // passes ownership
+ shared_ptr<DownloadAction> download_action(
+ new DownloadAction(prefs_,
+ boot_control_,
+ hardware_,
+ nullptr, // system_state, not used.
+ download_fetcher)); // passes ownership
shared_ptr<FilesystemVerifierAction> filesystem_verifier_action(
new FilesystemVerifierAction());
@@ -471,6 +480,7 @@
new PostinstallRunnerAction(boot_control_, hardware_));
download_action->set_delegate(this);
+ download_action->set_base_offset(base_offset_);
download_action_ = download_action;
postinstall_runner_action->set_delegate(this);
@@ -491,42 +501,6 @@
processor_->EnqueueAction(action.get());
}
-void UpdateAttempterAndroid::SetupDownload() {
- MultiRangeHttpFetcher* fetcher =
- static_cast<MultiRangeHttpFetcher*>(download_action_->http_fetcher());
- fetcher->ClearRanges();
- if (install_plan_.is_resume) {
- // Resuming an update so fetch the update manifest metadata first.
- int64_t manifest_metadata_size = 0;
- int64_t manifest_signature_size = 0;
- prefs_->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size);
- prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size);
- fetcher->AddRange(base_offset_,
- manifest_metadata_size + manifest_signature_size);
- // If there're remaining unprocessed data blobs, fetch them. Be careful not
- // to request data beyond the end of the payload to avoid 416 HTTP response
- // error codes.
- int64_t next_data_offset = 0;
- prefs_->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset);
- uint64_t resume_offset =
- manifest_metadata_size + manifest_signature_size + next_data_offset;
- if (!install_plan_.payload_size) {
- fetcher->AddRange(base_offset_ + resume_offset);
- } else if (resume_offset < install_plan_.payload_size) {
- fetcher->AddRange(base_offset_ + resume_offset,
- install_plan_.payload_size - resume_offset);
- }
- } else {
- if (install_plan_.payload_size) {
- fetcher->AddRange(base_offset_, install_plan_.payload_size);
- } else {
- // If no payload size is passed we assume we read until the end of the
- // stream.
- fetcher->AddRange(base_offset_);
- }
- }
-}
-
bool UpdateAttempterAndroid::WriteUpdateCompletedMarker() {
string boot_id;
TEST_AND_RETURN_FALSE(utils::GetBootId(&boot_id));
diff --git a/update_attempter_android.h b/update_attempter_android.h
index 6a5c227..167191e 100644
--- a/update_attempter_android.h
+++ b/update_attempter_android.h
@@ -110,10 +110,6 @@
// applying an update from the given |url|.
void BuildUpdateActions(const std::string& url);
- // Sets up the download parameters based on the update requested on the
- // |install_plan_|.
- void SetupDownload();
-
// Writes to the processing completed marker. Does nothing if
// |update_completed_marker_| is empty.
bool WriteUpdateCompletedMarker();
diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc
index 93bcc5c..4928477 100644
--- a/update_attempter_unittest.cc
+++ b/update_attempter_unittest.cc
@@ -218,7 +218,6 @@
DownloadAction action(prefs_, nullptr, nullptr, nullptr, fetcher.release());
EXPECT_CALL(*prefs_, GetInt64(kPrefsDeltaUpdateFailures, _)).Times(0);
attempter_.ActionCompleted(nullptr, &action, ErrorCode::kSuccess);
- EXPECT_EQ(503, attempter_.http_response_code());
EXPECT_EQ(UpdateStatus::FINALIZING, attempter_.status());
ASSERT_EQ(nullptr, attempter_.error_event_.get());
}
@@ -339,8 +338,7 @@
.Times(0);
OmahaResponse response;
string url1 = "http://url1";
- response.payload_urls.push_back(url1);
- response.payload_urls.push_back("https://url");
+ response.packages.push_back({.payload_urls = {url1, "https://url"}});
EXPECT_CALL(*(fake_system_state_.mock_payload_state()), GetCurrentUrl())
.WillRepeatedly(Return(url1));
fake_system_state_.mock_payload_state()->SetResponse(response);