Merge Android R (rvc-dev-plus-aosp-without-vendor@6692709)
Bug: 166295507
Merged-In: I2134676a178c19da694127050b3998b7fc1a357b
Change-Id: Iafc4e1359f6c16296fdaa5bdf8a99e8c76abf343
diff --git a/Android.bp b/Android.bp
index ecf3585..b6ee476 100644
--- a/Android.bp
+++ b/Android.bp
@@ -272,6 +272,7 @@
"libbrillo-binder",
"libcurl",
"libcutils",
+ "libupdate_engine_stable-cpp",
"liblog",
"libssl",
"libstatslog",
@@ -298,13 +299,13 @@
srcs: [
":libupdate_engine_aidl",
"binder_service_android.cc",
+ "binder_service_stable_android.cc",
"certificate_checker.cc",
"daemon_android.cc",
"daemon_state_android.cc",
"hardware_android.cc",
"libcurl_http_fetcher.cc",
"logging_android.cc",
- "metrics_reporter_android.cc",
"metrics_utils.cc",
"network_selector_android.cc",
"update_attempter_android.cc",
@@ -329,7 +330,7 @@
"otacerts",
],
- srcs: ["main.cc"],
+ srcs: ["main.cc", "metrics_reporter_android.cc"],
init_rc: ["update_engine.rc"],
}
@@ -671,6 +672,7 @@
"payload_consumer/certificate_parser_android_unittest.cc",
"payload_consumer/delta_performer_integration_test.cc",
"payload_consumer/delta_performer_unittest.cc",
+ "payload_consumer/download_action_android_unittest.cc",
"payload_consumer/extent_reader_unittest.cc",
"payload_consumer/extent_writer_unittest.cc",
"payload_consumer/fake_file_descriptor.cc",
@@ -703,6 +705,7 @@
"testrunner.cc",
"update_attempter_android_unittest.cc",
"update_status_utils_unittest.cc",
+ "metrics_reporter_stub.cc",
],
}
@@ -726,3 +729,12 @@
},
},
}
+
+// update_engine header library
+cc_library_headers {
+ name: "libupdate_engine_headers",
+ export_include_dirs: ["."],
+ apex_available: [
+ "com.android.gki.*",
+ ],
+}
diff --git a/binder_service_android.cc b/binder_service_android.cc
index 6b8a552..0c8bc2f 100644
--- a/binder_service_android.cc
+++ b/binder_service_android.cc
@@ -24,6 +24,8 @@
#include <brillo/errors/error.h>
#include <utils/String8.h>
+#include "update_engine/binder_service_android_common.h"
+
using android::binder::Status;
using android::os::IUpdateEngineCallback;
using android::os::ParcelFileDescriptor;
@@ -31,23 +33,6 @@
using std::vector;
using update_engine::UpdateEngineStatus;
-namespace {
-Status ErrorPtrToStatus(const brillo::ErrorPtr& error) {
- return Status::fromServiceSpecificError(
- 1, android::String8{error->GetMessage().c_str()});
-}
-
-vector<string> ToVecString(const vector<android::String16>& inp) {
- vector<string> out;
- out.reserve(inp.size());
- for (const auto& e : inp) {
- out.emplace_back(android::String8{e}.string());
- }
- return out;
-}
-
-} // namespace
-
namespace chromeos_update_engine {
BinderUpdateEngineAndroidService::BinderUpdateEngineAndroidService(
diff --git a/binder_service_android_common.h b/binder_service_android_common.h
new file mode 100644
index 0000000..fc621d9
--- /dev/null
+++ b/binder_service_android_common.h
@@ -0,0 +1,45 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_BINDER_SERVICE_ANDROID_COMMON_H_
+#define UPDATE_ENGINE_BINDER_SERVICE_ANDROID_COMMON_H_
+
+#include <string>
+#include <vector>
+
+#include <binder/Status.h>
+
+namespace chromeos_update_engine {
+
+static inline android::binder::Status ErrorPtrToStatus(
+ const brillo::ErrorPtr& error) {
+ return android::binder::Status::fromServiceSpecificError(
+ 1, android::String8{error->GetMessage().c_str()});
+}
+
+static inline std::vector<std::string> ToVecString(
+ const std::vector<android::String16>& inp) {
+ std::vector<std::string> out;
+ out.reserve(inp.size());
+ for (const auto& e : inp) {
+ out.emplace_back(android::String8{e}.string());
+ }
+ return out;
+}
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_BINDER_SERVICE_ANDROID_COMMON_H_
diff --git a/binder_service_stable_android.cc b/binder_service_stable_android.cc
new file mode 100644
index 0000000..a12b349
--- /dev/null
+++ b/binder_service_stable_android.cc
@@ -0,0 +1,132 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/binder_service_stable_android.h"
+
+#include <memory>
+
+#include <base/bind.h>
+#include <base/logging.h>
+#include <binderwrapper/binder_wrapper.h>
+#include <brillo/errors/error.h>
+#include <utils/String8.h>
+
+#include "update_engine/binder_service_android_common.h"
+
+using android::binder::Status;
+using android::os::IUpdateEngineStableCallback;
+using android::os::ParcelFileDescriptor;
+using std::string;
+using std::vector;
+using update_engine::UpdateEngineStatus;
+
+namespace chromeos_update_engine {
+
+BinderUpdateEngineAndroidStableService::BinderUpdateEngineAndroidStableService(
+ ServiceDelegateAndroidInterface* service_delegate)
+ : service_delegate_(service_delegate) {}
+
+void BinderUpdateEngineAndroidStableService::SendStatusUpdate(
+ const UpdateEngineStatus& update_engine_status) {
+ last_status_ = static_cast<int>(update_engine_status.status);
+ last_progress_ = update_engine_status.progress;
+ if (callback_) {
+ callback_->onStatusUpdate(last_status_, last_progress_);
+ }
+}
+
+void BinderUpdateEngineAndroidStableService::SendPayloadApplicationComplete(
+ ErrorCode error_code) {
+ if (callback_) {
+ callback_->onPayloadApplicationComplete(static_cast<int>(error_code));
+ }
+}
+
+Status BinderUpdateEngineAndroidStableService::bind(
+ const android::sp<IUpdateEngineStableCallback>& callback,
+ bool* return_value) {
+ // Reject binding if another callback is already bound.
+ if (callback_ != nullptr) {
+ LOG(ERROR) << "Another callback is already bound. Can't bind new callback.";
+ *return_value = false;
+ return Status::ok();
+ }
+
+ // See BinderUpdateEngineAndroidService::bind.
+ if (last_status_ != -1) {
+ auto status = callback->onStatusUpdate(last_status_, last_progress_);
+ if (!status.isOk()) {
+ LOG(ERROR) << "Failed to call onStatusUpdate() from callback: "
+ << status.toString8();
+ *return_value = false;
+ return Status::ok();
+ }
+ }
+
+ callback_ = callback;
+
+ const android::sp<IBinder>& callback_binder =
+ IUpdateEngineStableCallback::asBinder(callback);
+ auto binder_wrapper = android::BinderWrapper::Get();
+ binder_wrapper->RegisterForDeathNotifications(
+ callback_binder,
+ base::Bind(base::IgnoreResult(
+ &BinderUpdateEngineAndroidStableService::UnbindCallback),
+ base::Unretained(this),
+ base::Unretained(callback_binder.get())));
+
+ *return_value = true;
+ return Status::ok();
+}
+
+Status BinderUpdateEngineAndroidStableService::unbind(
+ const android::sp<IUpdateEngineStableCallback>& callback,
+ bool* return_value) {
+ const android::sp<IBinder>& callback_binder =
+ IUpdateEngineStableCallback::asBinder(callback);
+ auto binder_wrapper = android::BinderWrapper::Get();
+ binder_wrapper->UnregisterForDeathNotifications(callback_binder);
+
+ *return_value = UnbindCallback(callback_binder.get());
+ return Status::ok();
+}
+
+Status BinderUpdateEngineAndroidStableService::applyPayloadFd(
+ const ParcelFileDescriptor& pfd,
+ int64_t payload_offset,
+ int64_t payload_size,
+ const vector<android::String16>& header_kv_pairs) {
+ vector<string> str_headers = ToVecString(header_kv_pairs);
+
+ brillo::ErrorPtr error;
+ if (!service_delegate_->ApplyPayload(
+ pfd.get(), payload_offset, payload_size, str_headers, &error)) {
+ return ErrorPtrToStatus(error);
+ }
+ return Status::ok();
+}
+
+bool BinderUpdateEngineAndroidStableService::UnbindCallback(
+ const IBinder* callback) {
+ if (IUpdateEngineStableCallback::asBinder(callback_).get() != callback) {
+ LOG(ERROR) << "Unable to unbind unknown callback.";
+ return false;
+ }
+ callback_ = nullptr;
+ return true;
+}
+
+} // namespace chromeos_update_engine
diff --git a/binder_service_stable_android.h b/binder_service_stable_android.h
new file mode 100644
index 0000000..1667798
--- /dev/null
+++ b/binder_service_stable_android.h
@@ -0,0 +1,85 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_BINDER_SERVICE_STABLE_ANDROID_H_
+#define UPDATE_ENGINE_BINDER_SERVICE_STABLE_ANDROID_H_
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include <utils/Errors.h>
+#include <utils/String16.h>
+#include <utils/StrongPointer.h>
+
+#include "android/os/BnUpdateEngineStable.h"
+#include "android/os/IUpdateEngineStableCallback.h"
+#include "update_engine/service_delegate_android_interface.h"
+#include "update_engine/service_observer_interface.h"
+
+namespace chromeos_update_engine {
+
+class BinderUpdateEngineAndroidStableService
+ : public android::os::BnUpdateEngineStable,
+ public ServiceObserverInterface {
+ public:
+ explicit BinderUpdateEngineAndroidStableService(
+ ServiceDelegateAndroidInterface* service_delegate);
+ ~BinderUpdateEngineAndroidStableService() override = default;
+
+ const char* ServiceName() const {
+ return "android.os.UpdateEngineStableService";
+ }
+
+ // ServiceObserverInterface overrides.
+ void SendStatusUpdate(
+ const update_engine::UpdateEngineStatus& update_engine_status) override;
+ void SendPayloadApplicationComplete(ErrorCode error_code) override;
+
+ // android::os::BnUpdateEngineStable overrides.
+ android::binder::Status applyPayloadFd(
+ const ::android::os::ParcelFileDescriptor& pfd,
+ int64_t payload_offset,
+ int64_t payload_size,
+ const std::vector<android::String16>& header_kv_pairs) override;
+ android::binder::Status bind(
+ const android::sp<android::os::IUpdateEngineStableCallback>& callback,
+ bool* return_value) override;
+ android::binder::Status unbind(
+ const android::sp<android::os::IUpdateEngineStableCallback>& callback,
+ bool* return_value) override;
+
+ private:
+ // Remove the passed |callback| from the list of registered callbacks. Called
+ // on unbind() or whenever the callback object is destroyed.
+ // Returns true on success.
+ bool UnbindCallback(const IBinder* callback);
+
+ // Bound callback. The stable interface only supports one callback at a time.
+ android::sp<android::os::IUpdateEngineStableCallback> callback_;
+
+ // Cached copy of the last status update sent. Used to send an initial
+ // notification when bind() is called from the client.
+ int last_status_{-1};
+ double last_progress_{0.0};
+
+ ServiceDelegateAndroidInterface* service_delegate_;
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_BINDER_SERVICE_STABLE_ANDROID_H_
diff --git a/boot_control_chromeos.h b/boot_control_chromeos.h
index f90e65b..4271672 100644
--- a/boot_control_chromeos.h
+++ b/boot_control_chromeos.h
@@ -82,7 +82,7 @@
// Extracts DLC module ID and package ID from partition name. The structure of
// the partition name is dlc/<dlc-id>/<dlc-package>. For example:
- // dlc/dummy-dlc/dummy-package
+ // dlc/fake-dlc/fake-package
bool ParseDlcPartitionName(const std::string partition_name,
std::string* dlc_id,
std::string* dlc_package) const;
diff --git a/common/action_pipe.h b/common/action_pipe.h
index 0c98ee1..4c56812 100644
--- a/common/action_pipe.h
+++ b/common/action_pipe.h
@@ -79,6 +79,8 @@
private:
ObjectType contents_;
+ // Give unit test access
+ friend class DownloadActionTest;
// The ctor is private. This is because this class should construct itself
// via the static Bond() method.
diff --git a/common/action_processor.h b/common/action_processor.h
index 735a106..ad98cc9 100644
--- a/common/action_processor.h
+++ b/common/action_processor.h
@@ -89,7 +89,7 @@
// But this call deletes the action if there no other object has a reference
// to it, so in that case, the caller should not try to access any of its
// member variables after this call.
- void ActionComplete(AbstractAction* actionptr, ErrorCode code);
+ virtual void ActionComplete(AbstractAction* actionptr, ErrorCode code);
private:
FRIEND_TEST(ActionProcessorTest, ChainActionsTest);
diff --git a/common/fake_boot_control.h b/common/fake_boot_control.h
index adbacd6..5d8823a 100644
--- a/common/fake_boot_control.h
+++ b/common/fake_boot_control.h
@@ -57,6 +57,9 @@
if (part_it == devices_[slot].end())
return false;
*device = part_it->second;
+ if (is_dynamic != nullptr) {
+ *is_dynamic = false;
+ }
return true;
}
diff --git a/common/fake_hardware.h b/common/fake_hardware.h
index 2a8e81d..30c0897 100644
--- a/common/fake_hardware.h
+++ b/common/fake_hardware.h
@@ -19,10 +19,12 @@
#include <map>
#include <string>
+#include <utility>
#include <base/time/time.h>
#include "update_engine/common/hardware_interface.h"
+#include "update_engine/common/utils.h"
namespace chromeos_update_engine {
@@ -207,6 +209,18 @@
bool GetIsRollbackPowerwashScheduled() const {
return powerwash_scheduled_ && save_rollback_data_;
}
+ std::string GetVersionForLogging(
+ const std::string& partition_name) const override {
+ return partition_timestamps_[partition_name];
+ }
+ void SetVersion(const std::string& partition_name, std::string timestamp) {
+ partition_timestamps_[partition_name] = std::move(timestamp);
+ }
+ bool IsPartitionUpdateValid(const std::string& partition_name,
+ const std::string& new_version) const override {
+ const auto old_version = GetVersionForLogging(partition_name);
+ return utils::IsTimestampNewer(old_version, new_version);
+ }
private:
bool is_official_build_{true};
@@ -230,6 +244,7 @@
int64_t build_timestamp_{0};
bool first_active_omaha_ping_sent_{false};
bool warm_reset_{false};
+ mutable std::map<std::string, std::string> partition_timestamps_;
DISALLOW_COPY_AND_ASSIGN(FakeHardware);
};
diff --git a/common/hardware_interface.h b/common/hardware_interface.h
index 4f0305f..0fffbfb 100644
--- a/common/hardware_interface.h
+++ b/common/hardware_interface.h
@@ -142,6 +142,19 @@
// If |warm_reset| is true, sets the warm reset to indicate a warm reset is
// needed on the next reboot. Otherwise, clears the flag.
virtual void SetWarmReset(bool warm_reset) = 0;
+
+ // Return the version/timestamp for partition `partition_name`.
+ // Don't make any assumption about the formatting of returned string.
+ // Only used for logging/debugging purposes.
+ virtual std::string GetVersionForLogging(
+ const std::string& partition_name) const = 0;
+
+ // Return true if and only if `new_version` is "newer" than the
+ // version number of partition `partition_name`. The notion of
+ // "newer" is defined by this function. Caller should not make
+ // any assumption about the underlying logic.
+ virtual bool IsPartitionUpdateValid(const std::string& partition_name,
+ const std::string& new_version) const = 0;
};
} // namespace chromeos_update_engine
diff --git a/common/mock_action_processor.h b/common/mock_action_processor.h
index 4c62109..9785776 100644
--- a/common/mock_action_processor.h
+++ b/common/mock_action_processor.h
@@ -32,6 +32,8 @@
MOCK_METHOD0(StartProcessing, void());
MOCK_METHOD1(EnqueueAction, void(AbstractAction* action));
+ MOCK_METHOD2(ActionComplete, void(AbstractAction*, ErrorCode));
+
// This is a legacy workaround described in:
// https://github.com/google/googletest/blob/master/googlemock/docs/CookBook.md#legacy-workarounds-for-move-only-types-legacymoveonly
void EnqueueAction(std::unique_ptr<AbstractAction> action) override {
diff --git a/common/mock_http_fetcher.cc b/common/mock_http_fetcher.cc
index 10e3b9e..1b3cd7d 100644
--- a/common/mock_http_fetcher.cc
+++ b/common/mock_http_fetcher.cc
@@ -22,6 +22,7 @@
#include <base/logging.h>
#include <base/strings/string_util.h>
#include <base/time/time.h>
+#include <brillo/message_loops/message_loop.h>
#include <gtest/gtest.h>
// This is a mock implementation of HttpFetcher which is useful for testing.
@@ -43,12 +44,12 @@
SignalTransferComplete();
return;
}
- if (sent_size_ < data_.size())
+ if (sent_offset_ < data_.size())
SendData(true);
}
void MockHttpFetcher::SendData(bool skip_delivery) {
- if (fail_transfer_ || sent_size_ == data_.size()) {
+ if (fail_transfer_ || sent_offset_ == data_.size()) {
SignalTransferComplete();
return;
}
@@ -60,19 +61,22 @@
// Setup timeout callback even if the transfer is about to be completed in
// order to get a call to |TransferComplete|.
- if (timeout_id_ == MessageLoop::kTaskIdNull) {
+ if (timeout_id_ == MessageLoop::kTaskIdNull && delay_) {
+ CHECK(MessageLoop::current());
timeout_id_ = MessageLoop::current()->PostDelayedTask(
FROM_HERE,
base::Bind(&MockHttpFetcher::TimeoutCallback, base::Unretained(this)),
base::TimeDelta::FromMilliseconds(10));
}
- if (!skip_delivery) {
+ if (!skip_delivery || !delay_) {
const size_t chunk_size =
- min(kMockHttpFetcherChunkSize, data_.size() - sent_size_);
- sent_size_ += chunk_size;
+ min(kMockHttpFetcherChunkSize, data_.size() - sent_offset_);
+ sent_offset_ += chunk_size;
+ bytes_sent_ += chunk_size;
CHECK(delegate_);
- delegate_->ReceivedBytes(this, &data_[sent_size_ - chunk_size], chunk_size);
+ delegate_->ReceivedBytes(
+ this, &data_[sent_offset_ - chunk_size], chunk_size);
}
// We may get terminated and deleted right after |ReceivedBytes| call, so we
// should not access any class member variable after this call.
@@ -81,7 +85,7 @@
void MockHttpFetcher::TimeoutCallback() {
CHECK(!paused_);
timeout_id_ = MessageLoop::kTaskIdNull;
- CHECK_LE(sent_size_, data_.size());
+ CHECK_LE(sent_offset_, data_.size());
// Same here, we should not access any member variable after this call.
SendData(false);
}
@@ -90,10 +94,15 @@
// The transfer cannot be resumed.
void MockHttpFetcher::TerminateTransfer() {
LOG(INFO) << "Terminating transfer.";
- // Kill any timeout, it is ok to call with kTaskIdNull.
- MessageLoop::current()->CancelTask(timeout_id_);
- timeout_id_ = MessageLoop::kTaskIdNull;
- delegate_->TransferTerminated(this);
+ // During testing, MessageLoop may or may not be available.
+ // So don't call CancelTask() unless necessary.
+ if (timeout_id_ != MessageLoop::kTaskIdNull) {
+ MessageLoop::current()->CancelTask(timeout_id_);
+ timeout_id_ = MessageLoop::kTaskIdNull;
+ }
+ if (delegate_) {
+ delegate_->TransferTerminated(this);
+ }
}
void MockHttpFetcher::SetHeader(const std::string& header_name,
diff --git a/common/mock_http_fetcher.h b/common/mock_http_fetcher.h
index 0f04319..ea5b83d 100644
--- a/common/mock_http_fetcher.h
+++ b/common/mock_http_fetcher.h
@@ -46,7 +46,7 @@
size_t size,
ProxyResolver* proxy_resolver)
: HttpFetcher(proxy_resolver),
- sent_size_(0),
+ sent_offset_(0),
timeout_id_(brillo::MessageLoop::kTaskIdNull),
paused_(false),
fail_transfer_(false),
@@ -64,7 +64,7 @@
// Ignores this.
void SetOffset(off_t offset) override {
- sent_size_ = offset;
+ sent_offset_ = offset;
if (delegate_)
delegate_->SeekToOffset(offset);
}
@@ -76,8 +76,8 @@
void set_connect_timeout(int connect_timeout_seconds) override {}
void set_max_retry_count(int max_retry_count) override {}
- // Dummy: no bytes were downloaded.
- size_t GetBytesDownloaded() override { return sent_size_; }
+ // No bytes were downloaded in the mock class.
+ size_t GetBytesDownloaded() override { return bytes_sent_; }
// Begins the transfer if it hasn't already begun.
void BeginTransfer(const std::string& url) override;
@@ -113,6 +113,8 @@
const brillo::Blob& post_data() const { return post_data_; }
+ void set_delay(bool delay) { delay_ = delay; }
+
private:
// Sends data to the delegate and sets up a timeout callback if needed. There
// must be a delegate. If |skip_delivery| is true, no bytes will be delivered,
@@ -129,8 +131,11 @@
// A full copy of the data we'll return to the delegate
brillo::Blob data_;
- // The number of bytes we've sent so far
- size_t sent_size_;
+ // The current offset, marks the first byte that will be sent next
+ size_t sent_offset_{0};
+
+ // Total number of bytes transferred
+ size_t bytes_sent_{0};
// The extra headers set.
std::map<std::string, std::string> extra_headers_;
@@ -140,13 +145,16 @@
brillo::MessageLoop::TaskId timeout_id_;
// True iff the fetcher is paused.
- bool paused_;
+ bool paused_{false};
// Set to true if the transfer should fail.
- bool fail_transfer_;
+ bool fail_transfer_{false};
// Set to true if BeginTransfer should EXPECT fail.
- bool never_use_;
+ bool never_use_{false};
+
+ // Whether it should wait for 10ms before sending data to delegates
+ bool delay_{true};
DISALLOW_COPY_AND_ASSIGN(MockHttpFetcher);
};
diff --git a/common/subprocess.cc b/common/subprocess.cc
index 298a65c..3e197fb 100644
--- a/common/subprocess.cc
+++ b/common/subprocess.cc
@@ -129,12 +129,7 @@
if (!ok || eof) {
// There was either an error or an EOF condition, so we are done watching
// the file descriptor.
-#ifdef __ANDROID__
- MessageLoop::current()->CancelTask(record->stdout_task_id);
- record->stdout_task_id = MessageLoop::kTaskIdNull;
-#else
record->stdout_controller.reset();
-#endif // __ANDROID__
return;
}
} while (bytes_read);
@@ -149,12 +144,7 @@
// Make sure we read any remaining process output and then close the pipe.
OnStdoutReady(record);
-#ifdef __ANDROID__
- MessageLoop::current()->CancelTask(record->stdout_task_id);
- record->stdout_task_id = MessageLoop::kTaskIdNull;
-#else
record->stdout_controller.reset();
-#endif // __ANDROID__
// Don't print any log if the subprocess exited with exit code 0.
if (info.si_code != CLD_EXITED) {
@@ -209,18 +199,9 @@
<< record->stdout_fd << ".";
}
-#ifdef __ANDROID__
- record->stdout_task_id = MessageLoop::current()->WatchFileDescriptor(
- FROM_HERE,
- record->stdout_fd,
- MessageLoop::WatchMode::kWatchRead,
- true,
- base::Bind(&Subprocess::OnStdoutReady, record.get()));
-#else
record->stdout_controller = base::FileDescriptorWatcher::WatchReadable(
record->stdout_fd,
base::BindRepeating(&Subprocess::OnStdoutReady, record.get()));
-#endif // __ANDROID__
subprocess_records_[pid] = std::move(record);
return pid;
diff --git a/common/subprocess.h b/common/subprocess.h
index f1b9f1f..432d4cb 100644
--- a/common/subprocess.h
+++ b/common/subprocess.h
@@ -123,12 +123,8 @@
// These are used to monitor the stdout of the running process, including
// the stderr if it was redirected.
-#ifdef __ANDROID__
- brillo::MessageLoop::TaskId stdout_task_id{
- brillo::MessageLoop::kTaskIdNull};
-#else
std::unique_ptr<base::FileDescriptorWatcher::Controller> stdout_controller;
-#endif // __ANDROID__
+
int stdout_fd{-1};
std::string stdout;
};
diff --git a/common/subprocess_unittest.cc b/common/subprocess_unittest.cc
index bc52b83..74fee61 100644
--- a/common/subprocess_unittest.cc
+++ b/common/subprocess_unittest.cc
@@ -74,9 +74,7 @@
brillo::BaseMessageLoop loop_{&base_loop_};
brillo::AsynchronousSignalHandler async_signal_handler_;
Subprocess subprocess_;
-#ifndef __ANDROID__
unique_ptr<base::FileDescriptorWatcher::Controller> watcher_;
-#endif // __ANDROID__
};
@@ -261,23 +259,6 @@
int fifo_fd = HANDLE_EINTR(open(fifo_path.c_str(), O_RDONLY));
EXPECT_GE(fifo_fd, 0);
-#ifdef __ANDROID__
- loop_.WatchFileDescriptor(FROM_HERE,
- fifo_fd,
- MessageLoop::WatchMode::kWatchRead,
- false,
- base::Bind(
- [](int fifo_fd, uint32_t tag) {
- char c;
- EXPECT_EQ(1,
- HANDLE_EINTR(read(fifo_fd, &c, 1)));
- EXPECT_EQ('X', c);
- LOG(INFO) << "Killing tag " << tag;
- Subprocess::Get().KillExec(tag);
- },
- fifo_fd,
- tag));
-#else
watcher_ = base::FileDescriptorWatcher::WatchReadable(
fifo_fd,
base::Bind(
@@ -295,12 +276,11 @@
base::Unretained(&watcher_),
fifo_fd,
tag));
-#endif // __ANDROID__
// This test would leak a callback that runs when the child process exits
// unless we wait for it to run.
brillo::MessageLoopRunUntil(
- &loop_, TimeDelta::FromSeconds(120), base::Bind([] {
+ &loop_, TimeDelta::FromSeconds(20), base::Bind([] {
return Subprocess::Get().subprocess_records_.empty();
}));
EXPECT_TRUE(Subprocess::Get().subprocess_records_.empty());
diff --git a/common/test_utils.h b/common/test_utils.h
index 44b7aa1..63ea749 100644
--- a/common/test_utils.h
+++ b/common/test_utils.h
@@ -78,7 +78,7 @@
void FillWithData(brillo::Blob* buffer);
-// Compare the value of native array for download source parameter.
+// Compare the value of builtin array for download source parameter.
MATCHER_P(DownloadSourceMatcher, source_array, "") {
return std::equal(source_array, source_array + kNumDownloadSources, arg);
}
diff --git a/common/utils.cc b/common/utils.cc
index 3e3d830..bbb155f 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -820,7 +820,7 @@
return base_code;
}
-string StringVectorToString(const vector<string> &vec_str) {
+string StringVectorToString(const vector<string>& vec_str) {
string str = "[";
for (vector<string>::const_iterator i = vec_str.begin(); i != vec_str.end();
++i) {
@@ -849,7 +849,7 @@
encoded_hash.c_str());
}
-bool ConvertToOmahaInstallDate(Time time, int *out_num_days) {
+bool ConvertToOmahaInstallDate(Time time, int* out_num_days) {
time_t unix_time = time.ToTimeT();
// Output of: date +"%s" --date="Jan 1, 2007 0:00 PST".
const time_t kOmahaEpoch = 1167638400;
@@ -982,6 +982,29 @@
return base::NumberToString(base::StringPieceHash()(str_to_convert));
}
+static bool ParseTimestamp(const std::string& str, int64_t* out) {
+ if (!base::StringToInt64(str, out)) {
+ LOG(WARNING) << "Invalid timestamp: " << str;
+ return false;
+ }
+ return true;
+}
+
+bool IsTimestampNewer(const std::string& old_version,
+ const std::string& new_version) {
+ if (old_version.empty() || new_version.empty()) {
+ LOG(WARNING)
+ << "One of old/new timestamp is empty, permit update anyway. Old: "
+ << old_version << " New: " << new_version;
+ return true;
+ }
+ int64_t old_ver = 0;
+ TEST_AND_RETURN_FALSE(ParseTimestamp(old_version, &old_ver));
+ int64_t new_ver = 0;
+ TEST_AND_RETURN_FALSE(ParseTimestamp(new_version, &new_ver));
+ return old_ver <= new_ver;
+}
+
} // namespace utils
} // namespace chromeos_update_engine
diff --git a/common/utils.h b/common/utils.h
index 23ac03d..5dfee3b 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -323,6 +323,12 @@
// with |Excluder| as the exclusion name.
std::string GetExclusionName(const std::string& str_to_convert);
+// Parse `old_version` and `new_version` as integer timestamps and
+// return true if `new_version` is larger/newer.
+// Returns true if either one is empty. Return false if
+bool IsTimestampNewer(const std::string& old_version,
+ const std::string& new_version);
+
} // namespace utils
// Utility class to close a file descriptor
diff --git a/common/utils_unittest.cc b/common/utils_unittest.cc
index ebcc548..37871d2 100644
--- a/common/utils_unittest.cc
+++ b/common/utils_unittest.cc
@@ -481,4 +481,12 @@
IGNORE_EINTR(close(fd));
}
+TEST(UtilsTest, ValidatePerPartitionTimestamp) {
+ ASSERT_FALSE(utils::IsTimestampNewer("10", "5"));
+ ASSERT_TRUE(utils::IsTimestampNewer("10", "11"));
+ ASSERT_FALSE(utils::IsTimestampNewer("10", "lol"));
+ ASSERT_FALSE(utils::IsTimestampNewer("lol", "ZZZ"));
+ ASSERT_TRUE(utils::IsTimestampNewer("10", ""));
+}
+
} // namespace chromeos_update_engine
diff --git a/daemon_android.cc b/daemon_android.cc
index 1aa921f..313d7dd 100644
--- a/daemon_android.cc
+++ b/daemon_android.cc
@@ -47,16 +47,26 @@
LOG_IF(ERROR, !daemon_state_android->Initialize())
<< "Failed to initialize system state.";
+ auto binder_wrapper = android::BinderWrapper::Get();
+
// Create the Binder Service.
binder_service_ = new BinderUpdateEngineAndroidService{
daemon_state_android->service_delegate()};
- auto binder_wrapper = android::BinderWrapper::Get();
if (!binder_wrapper->RegisterService(binder_service_->ServiceName(),
binder_service_)) {
LOG(ERROR) << "Failed to register binder service.";
}
-
daemon_state_->AddObserver(binder_service_.get());
+
+ // Create the stable binder service.
+ stable_binder_service_ = new BinderUpdateEngineAndroidStableService{
+ daemon_state_android->service_delegate()};
+ if (!binder_wrapper->RegisterService(stable_binder_service_->ServiceName(),
+ stable_binder_service_)) {
+ LOG(ERROR) << "Failed to register stable binder service.";
+ }
+ daemon_state_->AddObserver(stable_binder_service_.get());
+
daemon_state_->StartUpdater();
return EX_OK;
}
diff --git a/daemon_android.h b/daemon_android.h
index baead37..f0c028e 100644
--- a/daemon_android.h
+++ b/daemon_android.h
@@ -22,6 +22,7 @@
#include <brillo/binder_watcher.h>
#include "update_engine/binder_service_android.h"
+#include "update_engine/binder_service_stable_android.h"
#include "update_engine/common/subprocess.h"
#include "update_engine/daemon_base.h"
#include "update_engine/daemon_state_interface.h"
@@ -43,6 +44,7 @@
brillo::BinderWatcher binder_watcher_;
android::sp<BinderUpdateEngineAndroidService> binder_service_;
+ android::sp<BinderUpdateEngineAndroidStableService> stable_binder_service_;
// The daemon state with all the required daemon classes for the configured
// platform.
diff --git a/dynamic_partition_control_android.cc b/dynamic_partition_control_android.cc
index ba749d9..ccb99ba 100644
--- a/dynamic_partition_control_android.cc
+++ b/dynamic_partition_control_android.cc
@@ -434,17 +434,17 @@
return false;
}
+ if (!SetTargetBuildVars(manifest)) {
+ return false;
+ }
+
// Although the current build supports dynamic partitions, the given payload
// doesn't use it for target partitions. This could happen when applying a
// retrofit update. Skip updating the partition metadata for the target slot.
- is_target_dynamic_ = !manifest.dynamic_partition_metadata().groups().empty();
if (!is_target_dynamic_) {
return true;
}
- target_supports_snapshot_ =
- manifest.dynamic_partition_metadata().snapshot_enabled();
-
if (!update)
return true;
@@ -505,6 +505,52 @@
return true;
}
+bool DynamicPartitionControlAndroid::SetTargetBuildVars(
+ const DeltaArchiveManifest& manifest) {
+ // Precondition: current build supports dynamic partition.
+ CHECK(GetDynamicPartitionsFeatureFlag().IsEnabled());
+
+ bool is_target_dynamic =
+ !manifest.dynamic_partition_metadata().groups().empty();
+ bool target_supports_snapshot =
+ manifest.dynamic_partition_metadata().snapshot_enabled();
+
+ if (manifest.partial_update()) {
+ // Partial updates requires DAP. On partial updates that does not involve
+ // dynamic partitions, groups() can be empty, so also assume
+ // is_target_dynamic in this case. This assumption should be safe because we
+ // also check target_supports_snapshot below, which presumably also implies
+ // target build supports dynamic partition.
+ if (!is_target_dynamic) {
+ LOG(INFO) << "Assuming target build supports dynamic partitions for "
+ "partial updates.";
+ is_target_dynamic = true;
+ }
+
+ // Partial updates requires Virtual A/B. Double check that both current
+ // build and target build supports Virtual A/B.
+ if (!GetVirtualAbFeatureFlag().IsEnabled()) {
+ LOG(ERROR) << "Partial update cannot be applied on a device that does "
+ "not support snapshots.";
+ return false;
+ }
+ if (!target_supports_snapshot) {
+ LOG(ERROR) << "Cannot apply partial update to a build that does not "
+ "support snapshots.";
+ return false;
+ }
+ }
+
+ // Store the flags.
+ is_target_dynamic_ = is_target_dynamic;
+ // If !is_target_dynamic_, leave target_supports_snapshot_ unset because
+ // snapshots would not work without dynamic partition.
+ if (is_target_dynamic_) {
+ target_supports_snapshot_ = target_supports_snapshot;
+ }
+ return true;
+}
+
namespace {
// Try our best to erase AVB footer.
class AvbFooterEraser {
@@ -792,6 +838,11 @@
MetadataBuilder* builder,
uint32_t target_slot,
const DeltaArchiveManifest& manifest) {
+ // Check preconditions.
+ CHECK(!GetVirtualAbFeatureFlag().IsEnabled() || IsRecovery())
+ << "UpdatePartitionMetadata is called on a Virtual A/B device "
+ "but source partitions is not deleted. This is not allowed.";
+
// If applying downgrade from Virtual A/B to non-Virtual A/B, the left-over
// COW group needs to be deleted to ensure there are enough space to create
// target partitions.
@@ -807,7 +858,12 @@
std::string expr;
uint64_t allocatable_space = builder->AllocatableSpace();
- if (!GetDynamicPartitionsFeatureFlag().IsRetrofit()) {
+ // On device retrofitting dynamic partitions, allocatable_space = super.
+ // On device launching dynamic partitions w/o VAB,
+ // allocatable_space = super / 2.
+ // On device launching dynamic partitions with VAB, allocatable_space = super.
+ if (!GetDynamicPartitionsFeatureFlag().IsRetrofit() &&
+ !GetVirtualAbFeatureFlag().IsEnabled()) {
allocatable_space /= 2;
expr = "half of ";
}
diff --git a/dynamic_partition_control_android.h b/dynamic_partition_control_android.h
index 08656fd..49967f6 100644
--- a/dynamic_partition_control_android.h
+++ b/dynamic_partition_control_android.h
@@ -203,8 +203,11 @@
bool force_writable,
std::string* path);
- // Update |builder| according to |partition_metadata|, assuming the device
- // does not have Virtual A/B.
+ // Update |builder| according to |partition_metadata|.
+ // - In Android mode, this is only called when the device
+ // does not have Virtual A/B.
+ // - When sideloading, this maybe called as a fallback path if CoW cannot
+ // be created.
bool UpdatePartitionMetadata(android::fs_mgr::MetadataBuilder* builder,
uint32_t target_slot,
const DeltaArchiveManifest& manifest);
@@ -267,6 +270,10 @@
// doing anything.
bool EnsureMetadataMounted();
+ // Set boolean flags related to target build. This includes flags like
+ // target_supports_snapshot_ and is_target_dynamic_.
+ bool SetTargetBuildVars(const DeltaArchiveManifest& manifest);
+
std::set<std::string> mapped_devices_;
const FeatureFlag dynamic_partitions_;
const FeatureFlag virtual_ab_;
diff --git a/dynamic_partition_control_android_unittest.cc b/dynamic_partition_control_android_unittest.cc
index 4154b36..223e177 100644
--- a/dynamic_partition_control_android_unittest.cc
+++ b/dynamic_partition_control_android_unittest.cc
@@ -113,21 +113,24 @@
// |slot|.
void SetMetadata(uint32_t slot,
const PartitionSuffixSizes& sizes,
- uint32_t partition_attr = 0) {
+ uint32_t partition_attr = 0,
+ uint64_t super_size = kDefaultSuperSize) {
EXPECT_CALL(dynamicControl(),
LoadMetadataBuilder(GetSuperDevice(slot), slot))
.Times(AnyNumber())
- .WillRepeatedly(Invoke([sizes, partition_attr](auto, auto) {
+ .WillRepeatedly(Invoke([=](auto, auto) {
return NewFakeMetadata(PartitionSuffixSizesToManifest(sizes),
- partition_attr);
+ partition_attr,
+ super_size);
}));
EXPECT_CALL(dynamicControl(),
LoadMetadataBuilder(GetSuperDevice(slot), slot, _))
.Times(AnyNumber())
- .WillRepeatedly(Invoke([sizes, partition_attr](auto, auto, auto) {
+ .WillRepeatedly(Invoke([=](auto, auto, auto) {
return NewFakeMetadata(PartitionSuffixSizesToManifest(sizes),
- partition_attr);
+ partition_attr,
+ super_size);
}));
}
@@ -1006,8 +1009,11 @@
return dynamicControl().RealPrepareDynamicPartitionsForUpdate(
source_slot, target_slot, manifest, delete_source);
}));
+ // Only one slot of space in super
+ uint64_t super_size = kDefaultGroupSize + 1_MiB;
// Expectation on PrepareDynamicPartitionsForUpdate
- SetMetadata(source(), {{S("system"), 2_GiB}, {S("vendor"), 1_GiB}});
+ SetMetadata(
+ source(), {{S("system"), 2_GiB}, {S("vendor"), 1_GiB}}, 0, super_size);
ExpectUnmap({T("system"), T("vendor")});
// Expect that the source partitions aren't present in target super metadata.
ExpectStoreMetadata({{T("system"), 3_GiB}, {T("vendor"), 1_GiB}});
diff --git a/dynamic_partition_test_utils.h b/dynamic_partition_test_utils.h
index 70a176b..d701dce 100644
--- a/dynamic_partition_test_utils.h
+++ b/dynamic_partition_test_utils.h
@@ -175,9 +175,11 @@
}
inline std::unique_ptr<MetadataBuilder> NewFakeMetadata(
- const DeltaArchiveManifest& manifest, uint32_t partition_attr = 0) {
+ const DeltaArchiveManifest& manifest,
+ uint32_t partition_attr = 0,
+ uint64_t super_size = kDefaultSuperSize) {
auto builder =
- MetadataBuilder::New(kDefaultSuperSize, kFakeMetadataSize, kMaxNumSlots);
+ MetadataBuilder::New(super_size, kFakeMetadataSize, kMaxNumSlots);
for (const auto& group : manifest.dynamic_partition_metadata().groups()) {
EXPECT_TRUE(builder->AddGroup(group.name(), group.size()));
for (const auto& partition_name : group.partition_names()) {
diff --git a/excluder_chromeos_unittest.cc b/excluder_chromeos_unittest.cc
index a8c14b3..dba77e4 100644
--- a/excluder_chromeos_unittest.cc
+++ b/excluder_chromeos_unittest.cc
@@ -29,7 +29,7 @@
namespace chromeos_update_engine {
-constexpr char kDummyHash[] =
+constexpr char kFakeHash[] =
"71ff43d76e2488e394e46872f5b066cc25e394c2c3e3790dd319517883b33db1";
class ExcluderChromeOSTest : public ::testing::Test {
@@ -47,20 +47,20 @@
};
TEST_F(ExcluderChromeOSTest, ExclusionCheck) {
- EXPECT_FALSE(excluder_->IsExcluded(kDummyHash));
- EXPECT_TRUE(excluder_->Exclude(kDummyHash));
- EXPECT_TRUE(excluder_->IsExcluded(kDummyHash));
+ EXPECT_FALSE(excluder_->IsExcluded(kFakeHash));
+ EXPECT_TRUE(excluder_->Exclude(kFakeHash));
+ EXPECT_TRUE(excluder_->IsExcluded(kFakeHash));
}
TEST_F(ExcluderChromeOSTest, ResetFlow) {
EXPECT_TRUE(excluder_->Exclude("abc"));
- EXPECT_TRUE(excluder_->Exclude(kDummyHash));
+ EXPECT_TRUE(excluder_->Exclude(kFakeHash));
EXPECT_TRUE(excluder_->IsExcluded("abc"));
- EXPECT_TRUE(excluder_->IsExcluded(kDummyHash));
+ EXPECT_TRUE(excluder_->IsExcluded(kFakeHash));
EXPECT_TRUE(excluder_->Reset());
EXPECT_FALSE(excluder_->IsExcluded("abc"));
- EXPECT_FALSE(excluder_->IsExcluded(kDummyHash));
+ EXPECT_FALSE(excluder_->IsExcluded(kFakeHash));
}
} // namespace chromeos_update_engine
diff --git a/hardware_android.cc b/hardware_android.cc
index 0bf05e4..659e67e 100644
--- a/hardware_android.cc
+++ b/hardware_android.cc
@@ -19,13 +19,17 @@
#include <sys/types.h>
#include <memory>
+#include <string>
+#include <string_view>
+#include <android-base/parseint.h>
#include <android-base/properties.h>
#include <base/files/file_util.h>
#include <bootloader_message/bootloader_message.h>
#include "update_engine/common/hardware.h"
#include "update_engine/common/platform_constants.h"
+#include "update_engine/common/utils.h"
using android::base::GetBoolProperty;
using android::base::GetIntProperty;
@@ -223,4 +227,24 @@
}
}
+std::string HardwareAndroid::GetVersionForLogging(
+ const std::string& partition_name) const {
+ return android::base::GetProperty("ro." + partition_name + ".build.date.utc",
+ "");
+}
+
+bool HardwareAndroid::IsPartitionUpdateValid(
+ const std::string& partition_name, const std::string& new_version) const {
+ const auto old_version = GetVersionForLogging(partition_name);
+ // TODO(zhangkelvin) for some partitions, missing a current timestamp should
+ // be an error, e.g. system, vendor, product etc.
+ auto applicable = utils::IsTimestampNewer(old_version, new_version);
+ if (!applicable) {
+ LOG(ERROR) << "Timestamp on partition " << partition_name
+ << " is newer than update. Partition timestamp: " << old_version
+ << " Update timestamp: " << new_version;
+ }
+ return applicable;
+}
+
} // namespace chromeos_update_engine
diff --git a/hardware_android.h b/hardware_android.h
index e0368f9..2e55f97 100644
--- a/hardware_android.h
+++ b/hardware_android.h
@@ -18,6 +18,7 @@
#define UPDATE_ENGINE_HARDWARE_ANDROID_H_
#include <string>
+#include <string_view>
#include <base/macros.h>
#include <base/time/time.h>
@@ -28,7 +29,7 @@
namespace chromeos_update_engine {
// Implements the real interface with the hardware in the Android platform.
-class HardwareAndroid final : public HardwareInterface {
+class HardwareAndroid : public HardwareInterface {
public:
HardwareAndroid() = default;
~HardwareAndroid() override = default;
@@ -58,6 +59,11 @@
bool GetFirstActiveOmahaPingSent() const override;
bool SetFirstActiveOmahaPingSent() override;
void SetWarmReset(bool warm_reset) override;
+ [[nodiscard]] std::string GetVersionForLogging(
+ const std::string& partition_name) const override;
+ [[nodiscard]] bool IsPartitionUpdateValid(
+ const std::string& partition_name,
+ const std::string& new_version) const override;
private:
DISALLOW_COPY_AND_ASSIGN(HardwareAndroid);
diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc
index 5ff1b29..58f30db 100644
--- a/hardware_chromeos.cc
+++ b/hardware_chromeos.cc
@@ -47,7 +47,7 @@
const char kOOBECompletedMarker[] = "/home/chronos/.oobe_completed";
// The stateful directory used by update_engine to store powerwash-safe files.
-// The files stored here must be whitelisted in the powerwash scripts.
+// The files stored here must be added to the powerwash script allowlist.
const char kPowerwashSafeDirectory[] =
"/mnt/stateful_partition/unencrypted/preserve";
@@ -384,4 +384,15 @@
void HardwareChromeOS::SetWarmReset(bool warm_reset) {}
+std::string HardwareChromeOS::GetVersionForLogging(
+ const std::string& partition_name) const {
+ // TODO(zhangkelvin) Implement per-partition timestamp for Chrome OS.
+ return "";
+}
+bool HardwareChromeOS::IsPartitionUpdateValid(
+ const std::string& partition_name, const std::string& new_version) const {
+ // TODO(zhangkelvin) Implement per-partition timestamp for Chrome OS.
+ return true;
+}
+
} // namespace chromeos_update_engine
diff --git a/hardware_chromeos.h b/hardware_chromeos.h
index e14ae9a..49fed88 100644
--- a/hardware_chromeos.h
+++ b/hardware_chromeos.h
@@ -63,6 +63,10 @@
bool GetFirstActiveOmahaPingSent() const override;
bool SetFirstActiveOmahaPingSent() override;
void SetWarmReset(bool warm_reset) override;
+ std::string GetVersionForLogging(
+ const std::string& partition_name) const override;
+ bool IsPartitionUpdateValid(const std::string& partition_name,
+ const std::string& new_version) const override;
private:
friend class HardwareChromeOSTest;
diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc
index 7c53a2d..f8aed7c 100644
--- a/libcurl_http_fetcher.cc
+++ b/libcurl_http_fetcher.cc
@@ -31,6 +31,8 @@
#include <base/strings/string_split.h>
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
+#include <base/threading/thread_task_runner_handle.h>
+
#ifdef __ANDROID__
#include <cutils/qtaguid.h>
@@ -81,23 +83,9 @@
LibcurlHttpFetcher* fetcher = static_cast<LibcurlHttpFetcher*>(clientp);
// Stop watching the socket before closing it.
-#ifdef __ANDROID__
- for (size_t t = 0; t < arraysize(fetcher->fd_task_maps_); ++t) {
- const auto fd_task_pair = fetcher->fd_task_maps_[t].find(item);
- if (fd_task_pair != fetcher->fd_task_maps_[t].end()) {
- if (!MessageLoop::current()->CancelTask(fd_task_pair->second)) {
- LOG(WARNING) << "Error canceling the watch task "
- << fd_task_pair->second << " for "
- << (t ? "writing" : "reading") << " the fd " << item;
- }
- fetcher->fd_task_maps_[t].erase(item);
- }
- }
-#else
for (size_t t = 0; t < base::size(fetcher->fd_controller_maps_); ++t) {
fetcher->fd_controller_maps_[t].erase(item);
}
-#endif // __ANDROID__
// Documentation for this callback says to return 0 on success or 1 on error.
if (!IGNORE_EINTR(close(item)))
@@ -471,6 +459,19 @@
// There's either more work to do or we are paused, so we just keep the
// file descriptors to watch up to date and exit, until we are done with the
// work and we are not paused.
+#ifdef __ANDROID__
+ // When there's no base::SingleThreadTaskRunner on current thread, it's not
+ // possible to watch file descriptors. Just poll it later. This usually
+ // happens if brillo::FakeMessageLoop is used.
+ if (!base::ThreadTaskRunnerHandle::IsSet()) {
+ MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&LibcurlHttpFetcher::CurlPerformOnce,
+ base::Unretained(this)),
+ TimeDelta::FromSeconds(1));
+ return;
+ }
+#endif
SetupMessageLoopSources();
return;
}
@@ -691,63 +692,6 @@
// We should iterate through all file descriptors up to libcurl's fd_max or
// the highest one we're tracking, whichever is larger.
-#ifdef __ANDROID__
- for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) {
- if (!fd_task_maps_[t].empty())
- fd_max = max(fd_max, fd_task_maps_[t].rbegin()->first);
- }
-
- // For each fd, if we're not tracking it, track it. If we are tracking it, but
- // libcurl doesn't care about it anymore, stop tracking it. After this loop,
- // there should be exactly as many tasks scheduled in fd_task_maps_[0|1] as
- // there are read/write fds that we're tracking.
- for (int fd = 0; fd <= fd_max; ++fd) {
- // Note that fd_exc is unused in the current version of libcurl so is_exc
- // should always be false.
- bool is_exc = FD_ISSET(fd, &fd_exc) != 0;
- bool must_track[2] = {
- is_exc || (FD_ISSET(fd, &fd_read) != 0), // track 0 -- read
- is_exc || (FD_ISSET(fd, &fd_write) != 0) // track 1 -- write
- };
- MessageLoop::WatchMode watch_modes[2] = {
- MessageLoop::WatchMode::kWatchRead,
- MessageLoop::WatchMode::kWatchWrite,
- };
-
- for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) {
- auto fd_task_it = fd_task_maps_[t].find(fd);
- bool tracked = fd_task_it != fd_task_maps_[t].end();
-
- if (!must_track[t]) {
- // If we have an outstanding io_channel, remove it.
- if (tracked) {
- MessageLoop::current()->CancelTask(fd_task_it->second);
- fd_task_maps_[t].erase(fd_task_it);
- }
- continue;
- }
-
- // If we are already tracking this fd, continue -- nothing to do.
- if (tracked)
- continue;
-
- // Track a new fd.
- fd_task_maps_[t][fd] = MessageLoop::current()->WatchFileDescriptor(
- FROM_HERE,
- fd,
- watch_modes[t],
- true, // persistent
- base::Bind(&LibcurlHttpFetcher::CurlPerformOnce,
- base::Unretained(this)));
-
- static int io_counter = 0;
- io_counter++;
- if (io_counter % 50 == 0) {
- LOG(INFO) << "io_counter = " << io_counter;
- }
- }
- }
-#else
for (size_t t = 0; t < base::size(fd_controller_maps_); ++t) {
if (!fd_controller_maps_[t].empty())
fd_max = max(fd_max, fd_controller_maps_[t].rbegin()->first);
@@ -803,7 +747,6 @@
}
}
}
-#endif // __ANDROID__
// Set up a timeout callback for libcurl.
if (timeout_id_ == MessageLoop::kTaskIdNull) {
@@ -848,22 +791,9 @@
MessageLoop::current()->CancelTask(timeout_id_);
timeout_id_ = MessageLoop::kTaskIdNull;
-#ifdef __ANDROID__
- for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) {
- for (const auto& fd_taks_pair : fd_task_maps_[t]) {
- if (!MessageLoop::current()->CancelTask(fd_taks_pair.second)) {
- LOG(WARNING) << "Error canceling the watch task " << fd_taks_pair.second
- << " for " << (t ? "writing" : "reading") << " the fd "
- << fd_taks_pair.first;
- }
- }
- fd_task_maps_[t].clear();
- }
-#else
for (size_t t = 0; t < base::size(fd_controller_maps_); ++t) {
fd_controller_maps_[t].clear();
}
-#endif // __ANDROID__
if (curl_http_headers_) {
curl_slist_free_all(curl_http_headers_);
diff --git a/libcurl_http_fetcher.h b/libcurl_http_fetcher.h
index 4854f40..4e91b69 100644
--- a/libcurl_http_fetcher.h
+++ b/libcurl_http_fetcher.h
@@ -255,12 +255,8 @@
// the message loop. libcurl may open/close descriptors and switch their
// directions so maintain two separate lists so that watch conditions can be
// set appropriately.
-#ifdef __ANDROID__
- std::map<int, brillo::MessageLoop::TaskId> fd_task_maps_[2];
-#else
std::map<int, std::unique_ptr<base::FileDescriptorWatcher::Controller>>
fd_controller_maps_[2];
-#endif // __ANDROID__
// The TaskId of the timer we're waiting on. kTaskIdNull if we are not waiting
// on it.
diff --git a/libcurl_http_fetcher_unittest.cc b/libcurl_http_fetcher_unittest.cc
index 8064b99..874ef2e 100644
--- a/libcurl_http_fetcher_unittest.cc
+++ b/libcurl_http_fetcher_unittest.cc
@@ -94,37 +94,24 @@
no_network_max_retries);
}
-#ifdef __ANDROID__
-TEST_F(LibcurlHttpFetcherTest, CouldntResolveHostTest) {
- int no_network_max_retries = 1;
- libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries);
-
- // This test actually sends request to internet but according to
- // https://tools.ietf.org/html/rfc2606#section-2, .invalid domain names are
- // reserved and sure to be invalid. Ideally we should mock libcurl or
- // reorganize LibcurlHttpFetcher so the part that sends request can be mocked
- // easily.
- // TODO(xiaochu) Refactor LibcurlHttpFetcher (and its relates) so it's
- // easier to mock the part that depends on internet connectivity.
- libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid");
- while (loop_.PendingTasks()) {
- loop_.RunOnce(true);
- }
-
- // If libcurl fails to resolve the name, we call res_init() to reload
- // resolv.conf and retry exactly once more. See crbug.com/982813 for details.
- EXPECT_EQ(libcurl_fetcher_.get_no_network_max_retries(),
- no_network_max_retries + 1);
-}
-#else
TEST_F(LibcurlHttpFetcherTest, CouldNotResolveHostTest) {
int no_network_max_retries = 1;
libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries);
libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid");
+#ifdef __ANDROID__
+ // It's slower on Android that libcurl handle may not finish within 1 cycle.
+ // Will need to wait for more cycles until it finishes. Original test didn't
+ // correctly handle when we need to re-watch libcurl fds.
+ while (loop_.PendingTasks() &&
+ libcurl_fetcher_.GetAuxiliaryErrorCode() == ErrorCode::kSuccess) {
+ loop_.RunOnce(true);
+ }
+#else
// The first time it can't resolve.
loop_.RunOnce(true);
+#endif
EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(),
ErrorCode::kUnresolvedHostError);
@@ -154,8 +141,18 @@
// easier to mock the part that depends on internet connectivity.
libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid");
+#ifdef __ANDROID__
+ // It's slower on Android that libcurl handle may not finish within 1 cycle.
+ // Will need to wait for more cycles until it finishes. Original test didn't
+ // correctly handle when we need to re-watch libcurl fds.
+ while (loop_.PendingTasks() &&
+ libcurl_fetcher_.GetAuxiliaryErrorCode() == ErrorCode::kSuccess) {
+ loop_.RunOnce(true);
+ }
+#else
// The first time it can't resolve.
loop_.RunOnce(true);
+#endif
EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(),
ErrorCode::kUnresolvedHostError);
@@ -168,9 +165,19 @@
[this]() { libcurl_fetcher_.http_response_code_ = 0; }));
libcurl_fetcher_.transfer_size_ = 10;
+#ifdef __ANDROID__
+ // It's slower on Android that libcurl handle may not finish within 1 cycle.
+ // Will need to wait for more cycles until it finishes. Original test didn't
+ // correctly handle when we need to re-watch libcurl fds.
+ while (loop_.PendingTasks() && libcurl_fetcher_.GetAuxiliaryErrorCode() ==
+ ErrorCode::kUnresolvedHostError) {
+ loop_.RunOnce(true);
+ }
+#else
// This time the host is resolved. But after that again we can't resolve
// anymore (See above).
loop_.RunOnce(true);
+#endif
EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(),
ErrorCode::kUnresolvedHostRecovered);
@@ -186,7 +193,6 @@
EXPECT_EQ(libcurl_fetcher_.get_no_network_max_retries(),
no_network_max_retries + 1);
}
-#endif // __ANDROID__
TEST_F(LibcurlHttpFetcherTest, HttpFetcherStateMachineRetryFailedTest) {
state_machine_.UpdateState(true);
diff --git a/omaha_request_action.cc b/omaha_request_action.cc
index 3a0b91c..95e1250 100644
--- a/omaha_request_action.cc
+++ b/omaha_request_action.cc
@@ -351,7 +351,7 @@
// If we have the value stored on disk, just return it.
int64_t stored_value;
if (prefs->GetInt64(kPrefsInstallDateDays, &stored_value)) {
- // Convert and sanity-check.
+ // Convert and validity-check.
int install_date_days = static_cast<int>(stored_value);
if (install_date_days >= 0)
return install_date_days;
@@ -952,10 +952,10 @@
int code = GetHTTPResponseCode();
LOG(ERROR) << "Omaha request network transfer failed with HTTPResponseCode="
<< code;
- // Makes sure we send sane error values.
+ // Makes sure we send proper error values.
if (code < 0 || code >= 1000) {
code = 999;
- LOG(WARNING) << "Converting to sane HTTPResponseCode=" << code;
+ LOG(WARNING) << "Converting to proper HTTPResponseCode=" << code;
}
completer.set_code(static_cast<ErrorCode>(
static_cast<int>(ErrorCode::kOmahaRequestHTTPResponseBase) + code));
diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc
index 097b9f1..e2857f1 100644
--- a/omaha_request_builder_xml.cc
+++ b/omaha_request_builder_xml.cc
@@ -216,7 +216,7 @@
if (!prefs_->GetString(prefs_key, &cohort_value) || cohort_value.empty())
return "";
}
- // This is a sanity check to avoid sending a huge XML file back to Ohama due
+ // This is a validity check to avoid sending a huge XML file back to Ohama due
// to a compromised stateful partition making the update check fail in low
// network environments envent after a reboot.
if (cohort_value.size() > 1024) {
diff --git a/omaha_request_params.cc b/omaha_request_params.cc
index d4b8d64..8a2e3dc 100644
--- a/omaha_request_params.cc
+++ b/omaha_request_params.cc
@@ -66,7 +66,7 @@
image_props_ = LoadImageProperties(system_state_);
mutable_image_props_ = LoadMutableImageProperties(system_state_);
- // Sanity check the channel names.
+ // Validation check the channel names.
if (!IsValidChannel(image_props_.current_channel))
image_props_.current_channel = "stable-channel";
if (!IsValidChannel(mutable_image_props_.target_channel))
diff --git a/omaha_request_params.h b/omaha_request_params.h
index 3452965..76fc806 100644
--- a/omaha_request_params.h
+++ b/omaha_request_params.h
@@ -328,7 +328,7 @@
bool ToMoreStableChannel() const;
// Returns True if we should store the fw/ec versions based on our hwid_.
- // Compares hwid to a set of whitelisted prefixes.
+ // Compares hwid to a set of prefixes in the allowlist.
bool CollectECFWVersions() const;
// Gets the machine type (e.g. "i686").
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index 68f38df..aa0b4f5 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -740,7 +740,7 @@
CheckpointUpdateProgress(false);
}
- // In major version 2, we don't add dummy operation to the payload.
+ // In major version 2, we don't add unused operation to the payload.
// If we already extracted the signature we should skip this step.
if (manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
signatures_message_data_.empty()) {
@@ -1152,7 +1152,8 @@
}
if (read_ok && expected_source_hash == source_hash)
return true;
-
+ LOG(WARNING) << "Source hash from RAW device mismatched, attempting to "
+ "correct using ECC";
if (!OpenCurrentECCPartition()) {
// The following function call will return false since the source hash
// mismatches, but we still want to call it so it prints the appropriate
@@ -1165,7 +1166,6 @@
<< ", expected "
<< base::HexEncode(expected_source_hash.data(),
expected_source_hash.size());
-
if (should_optimize) {
TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
source_ecc_fd_, operation.src_extents(), block_size_, &source_hash));
@@ -1570,7 +1570,7 @@
}
ErrorCode DeltaPerformer::ValidateManifest() {
- // Perform assorted checks to sanity check the manifest, make sure it
+ // Perform assorted checks to validation check the manifest, make sure it
// matches data from other sources, and that it is a supported version.
bool has_old_fields = std::any_of(manifest_.partitions().begin(),
manifest_.partitions().end(),
@@ -1579,9 +1579,12 @@
});
// The presence of an old partition hash is the sole indicator for a delta
- // update.
+ // update. Also, always treat the partial update as delta so that we can
+ // perform the minor version check correctly.
InstallPayloadType actual_payload_type =
- has_old_fields ? InstallPayloadType::kDelta : InstallPayloadType::kFull;
+ (has_old_fields || manifest_.partial_update())
+ ? InstallPayloadType::kDelta
+ : InstallPayloadType::kFull;
if (payload_->type == InstallPayloadType::kUnknown) {
LOG(INFO) << "Detected a '"
@@ -1625,17 +1628,15 @@
LOG(ERROR) << "Manifest contains deprecated fields.";
return ErrorCode::kPayloadMismatchedType;
}
-
- if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) {
- LOG(ERROR) << "The current OS build timestamp ("
- << hardware_->GetBuildTimestamp()
- << ") is newer than the maximum timestamp in the manifest ("
- << manifest_.max_timestamp() << ")";
+ TimestampCheckResult result = CheckTimestampError();
+ if (result == TimestampCheckResult::DOWNGRADE) {
if (!hardware_->AllowDowngrade()) {
return ErrorCode::kPayloadTimestampError;
}
LOG(INFO) << "The current OS build allows downgrade, continuing to apply"
" the payload with an older timestamp.";
+ } else if (result == TimestampCheckResult::FAILURE) {
+ return ErrorCode::kPayloadTimestampError;
}
// TODO(crbug.com/37661) we should be adding more and more manifest checks,
@@ -1644,6 +1645,53 @@
return ErrorCode::kSuccess;
}
+TimestampCheckResult DeltaPerformer::CheckTimestampError() const {
+ bool is_partial_update =
+ manifest_.has_partial_update() && manifest_.partial_update();
+ const auto& partitions = manifest_.partitions();
+ auto&& timestamp_valid = [this](const PartitionUpdate& partition) {
+ return hardware_->IsPartitionUpdateValid(partition.partition_name(),
+ partition.version());
+ };
+ if (is_partial_update) {
+ // for partial updates, all partition MUST have valid timestamps
+ // But max_timestamp can be empty
+ for (const auto& partition : partitions) {
+ if (!partition.has_version()) {
+ LOG(ERROR)
+ << "PartitionUpdate " << partition.partition_name()
+ << " does ot have a version field. Not allowed in partial updates.";
+ return TimestampCheckResult::FAILURE;
+ }
+ if (!timestamp_valid(partition)) {
+ // Warning because the system might allow downgrade.
+ LOG(WARNING) << "PartitionUpdate " << partition.partition_name()
+ << " has an older version than partition on device.";
+ return TimestampCheckResult::DOWNGRADE;
+ }
+ }
+
+ return TimestampCheckResult::SUCCESS;
+ }
+ if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) {
+ LOG(ERROR) << "The current OS build timestamp ("
+ << hardware_->GetBuildTimestamp()
+ << ") is newer than the maximum timestamp in the manifest ("
+ << manifest_.max_timestamp() << ")";
+ return TimestampCheckResult::DOWNGRADE;
+ }
+ // Otherwise... partitions can have empty timestamps.
+ for (const auto& partition : partitions) {
+ if (partition.has_version() && !timestamp_valid(partition)) {
+ // Warning because the system might allow downgrade.
+ LOG(WARNING) << "PartitionUpdate " << partition.partition_name()
+ << " has an older version than partition on device.";
+ return TimestampCheckResult::DOWNGRADE;
+ }
+ }
+ return TimestampCheckResult::SUCCESS;
+}
+
ErrorCode DeltaPerformer::ValidateOperationHash(
const InstallOperation& operation) {
if (!operation.data_sha256_hash().size()) {
@@ -1660,7 +1708,7 @@
// corresponding update should have been produced with the operation
// hashes. So if it happens it means either we've turned operation hash
// generation off in DeltaDiffGenerator or it's a regression of some sort.
- // One caveat though: The last operation is a dummy signature operation
+ // One caveat though: The last operation is a unused signature operation
// that doesn't have a hash at the time the manifest is created. So we
// should not complaint about that operation. This operation can be
// recognized by the fact that it's offset is mentioned in the manifest.
@@ -1795,7 +1843,7 @@
resumed_update_failures > kMaxResumedUpdateFailures)
return false;
- // Sanity check the rest.
+ // Validation check the rest.
int64_t next_data_offset = -1;
if (!(prefs->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
next_data_offset >= 0))
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index 2d1768d..0718ef6 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -49,6 +49,12 @@
// This class performs the actions in a delta update synchronously. The delta
// update itself should be passed in in chunks as it is received.
+enum class TimestampCheckResult {
+ SUCCESS,
+ FAILURE,
+ DOWNGRADE,
+};
+
class DeltaPerformer : public FileWriter {
public:
// Defines the granularity of progress logging in terms of how many "completed
@@ -310,6 +316,10 @@
// Also see comment for the static PreparePartitionsForUpdate().
bool PreparePartitionsForUpdate(uint64_t* required_size);
+ // Check if current manifest contains timestamp errors. (ill-formed or
+ // downgrade)
+ TimestampCheckResult CheckTimestampError() const;
+
// Update Engine preference store.
PrefsInterface* prefs_;
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index acbecad..c257b28 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -36,9 +36,12 @@
#include "update_engine/common/constants.h"
#include "update_engine/common/fake_boot_control.h"
#include "update_engine/common/fake_hardware.h"
+#include "update_engine/common/fake_prefs.h"
#include "update_engine/common/mock_prefs.h"
#include "update_engine/common/test_utils.h"
#include "update_engine/common/utils.h"
+#include "update_engine/hardware_android.h"
+#include "update_engine/payload_consumer/install_plan.h"
#include "update_engine/payload_consumer/mock_download_action.h"
#include "update_engine/payload_consumer/payload_constants.h"
#include "update_engine/payload_consumer/payload_metadata.h"
@@ -125,7 +128,41 @@
} // namespace
-class DeltaPerformerIntegrationTest : public ::testing::Test {};
+class DeltaPerformerIntegrationTest : public ::testing::Test {
+ public:
+ void RunManifestValidation(const DeltaArchiveManifest& manifest,
+ uint64_t major_version,
+ ErrorCode expected) {
+ FakePrefs prefs;
+ InstallPlan::Payload payload;
+ InstallPlan install_plan;
+ DeltaPerformer performer{&prefs,
+ nullptr,
+ &fake_hardware_,
+ nullptr,
+ &install_plan,
+ &payload,
+ false /* interactive*/};
+ // Delta performer will treat manifest as kDelta payload
+ // if it's a partial update.
+ payload.type = manifest.partial_update() ? InstallPayloadType::kDelta
+ : InstallPayloadType::kFull;
+
+ // The Manifest we are validating.
+ performer.manifest_.CopyFrom(manifest);
+ performer.major_payload_version_ = major_version;
+
+ EXPECT_EQ(expected, performer.ValidateManifest());
+ }
+ void AddPartition(DeltaArchiveManifest* manifest,
+ std::string name,
+ int timestamp) {
+ auto& partition = *manifest->add_partitions();
+ partition.set_version(std::to_string(timestamp));
+ partition.set_partition_name(name);
+ }
+ FakeHardware fake_hardware_;
+};
static void CompareFilesByBlock(const string& a_file,
const string& b_file,
@@ -995,13 +1032,13 @@
delete performer;
}
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageTest) {
DoSmallImageTest(
false, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest,
- RunAsRootSmallImageSignaturePlaceholderTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+ RunAsRootSmallImageSignaturePlaceholderTest) {
DoSmallImageTest(false,
false,
-1,
@@ -1010,8 +1047,8 @@
kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest,
- RunAsRootSmallImageSignaturePlaceholderMismatchTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+ RunAsRootSmallImageSignaturePlaceholderMismatchTest) {
DeltaState state;
GenerateDeltaFile(false,
false,
@@ -1021,7 +1058,7 @@
kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageChunksTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageChunksTest) {
DoSmallImageTest(false,
false,
kBlockSize,
@@ -1030,27 +1067,28 @@
kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest, RunAsRootFullKernelSmallImageTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootFullKernelSmallImageTest) {
DoSmallImageTest(
true, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest, RunAsRootFullSmallImageTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootFullSmallImageTest) {
DoSmallImageTest(
true, true, -1, kSignatureGenerator, true, kFullPayloadMinorVersion);
}
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignNoneTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignNoneTest) {
DoSmallImageTest(
false, false, -1, kSignatureNone, false, kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedTest) {
DoSmallImageTest(
false, false, -1, kSignatureGenerated, true, kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+ RunAsRootSmallImageSignGeneratedShellTest) {
DoSmallImageTest(false,
false,
-1,
@@ -1059,8 +1097,8 @@
kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest,
- RunAsRootSmallImageSignGeneratedShellECKeyTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+ RunAsRootSmallImageSignGeneratedShellECKeyTest) {
DoSmallImageTest(false,
false,
-1,
@@ -1069,8 +1107,8 @@
kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest,
- RunAsRootSmallImageSignGeneratedShellBadKeyTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+ RunAsRootSmallImageSignGeneratedShellBadKeyTest) {
DoSmallImageTest(false,
false,
-1,
@@ -1079,8 +1117,8 @@
kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest,
- RunAsRootSmallImageSignGeneratedShellRotateCl1Test) {
+TEST_F(DeltaPerformerIntegrationTest,
+ RunAsRootSmallImageSignGeneratedShellRotateCl1Test) {
DoSmallImageTest(false,
false,
-1,
@@ -1089,8 +1127,8 @@
kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest,
- RunAsRootSmallImageSignGeneratedShellRotateCl2Test) {
+TEST_F(DeltaPerformerIntegrationTest,
+ RunAsRootSmallImageSignGeneratedShellRotateCl2Test) {
DoSmallImageTest(false,
false,
-1,
@@ -1099,14 +1137,97 @@
kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSourceOpsTest) {
+TEST_F(DeltaPerformerIntegrationTest, RunAsRootSmallImageSourceOpsTest) {
DoSmallImageTest(
false, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion);
}
-TEST(DeltaPerformerIntegrationTest,
- RunAsRootMandatoryOperationHashMismatchTest) {
+TEST_F(DeltaPerformerIntegrationTest,
+ RunAsRootMandatoryOperationHashMismatchTest) {
DoOperationHashMismatchTest(kInvalidOperationData, true);
}
+TEST_F(DeltaPerformerIntegrationTest, ValidatePerPartitionTimestampSuccess) {
+ // The Manifest we are validating.
+ DeltaArchiveManifest manifest;
+
+ fake_hardware_.SetVersion("system", "5");
+ fake_hardware_.SetVersion("product", "99");
+ fake_hardware_.SetBuildTimestamp(1);
+
+ manifest.set_minor_version(kFullPayloadMinorVersion);
+ manifest.set_max_timestamp(2);
+ AddPartition(&manifest, "system", 10);
+ AddPartition(&manifest, "product", 100);
+
+ RunManifestValidation(
+ manifest, kMaxSupportedMajorPayloadVersion, ErrorCode::kSuccess);
+}
+
+TEST_F(DeltaPerformerIntegrationTest, ValidatePerPartitionTimestampFailure) {
+ // The Manifest we are validating.
+ DeltaArchiveManifest manifest;
+
+ fake_hardware_.SetVersion("system", "5");
+ fake_hardware_.SetVersion("product", "99");
+ fake_hardware_.SetBuildTimestamp(1);
+
+ manifest.set_minor_version(kFullPayloadMinorVersion);
+ manifest.set_max_timestamp(2);
+ AddPartition(&manifest, "system", 10);
+ AddPartition(&manifest, "product", 98);
+
+ RunManifestValidation(manifest,
+ kMaxSupportedMajorPayloadVersion,
+ ErrorCode::kPayloadTimestampError);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+ ValidatePerPartitionTimestampMissingTimestamp) {
+ // The Manifest we are validating.
+ DeltaArchiveManifest manifest;
+
+ fake_hardware_.SetVersion("system", "5");
+ fake_hardware_.SetVersion("product", "99");
+ fake_hardware_.SetBuildTimestamp(1);
+
+ manifest.set_minor_version(kFullPayloadMinorVersion);
+ manifest.set_max_timestamp(2);
+ AddPartition(&manifest, "system", 10);
+ {
+ auto& partition = *manifest.add_partitions();
+ // For complete updates, missing timestamp should not trigger
+ // timestamp error.
+ partition.set_partition_name("product");
+ }
+
+ RunManifestValidation(
+ manifest, kMaxSupportedMajorPayloadVersion, ErrorCode::kSuccess);
+}
+
+TEST_F(DeltaPerformerIntegrationTest,
+ ValidatePerPartitionTimestampPartialUpdate) {
+ // The Manifest we are validating.
+ DeltaArchiveManifest manifest;
+
+ fake_hardware_.SetVersion("system", "5");
+ fake_hardware_.SetVersion("product", "99");
+ fake_hardware_.SetBuildTimestamp(1);
+
+ manifest.set_minor_version(kPartialUpdateMinorPayloadVersion);
+ manifest.set_max_timestamp(2);
+ manifest.set_partial_update(true);
+ AddPartition(&manifest, "system", 10);
+ {
+ auto& partition = *manifest.add_partitions();
+ // For partial updates, missing timestamp should
+ // trigger an error
+ partition.set_partition_name("product");
+ }
+
+ RunManifestValidation(manifest,
+ kMaxSupportedMajorPayloadVersion,
+ ErrorCode::kPayloadTimestampError);
+}
+
} // namespace chromeos_update_engine
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index 44107cd..fbd754f 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -36,9 +36,11 @@
#include <gtest/gtest.h>
#include "update_engine/common/constants.h"
+#include "update_engine/common/error_code.h"
#include "update_engine/common/fake_boot_control.h"
#include "update_engine/common/fake_hardware.h"
#include "update_engine/common/fake_prefs.h"
+#include "update_engine/common/hardware_interface.h"
#include "update_engine/common/test_utils.h"
#include "update_engine/common/utils.h"
#include "update_engine/payload_consumer/fake_file_descriptor.h"
@@ -899,6 +901,24 @@
ErrorCode::kPayloadTimestampError);
}
+TEST_F(DeltaPerformerTest, ValidatePerPartitionTimestampSuccess) {
+ // The Manifest we are validating.
+ DeltaArchiveManifest manifest;
+
+ manifest.set_minor_version(kFullPayloadMinorVersion);
+ manifest.set_max_timestamp(2);
+ fake_hardware_.SetBuildTimestamp(1);
+ auto& partition = *manifest.add_partitions();
+ partition.set_version("10");
+ partition.set_partition_name("system");
+ fake_hardware_.SetVersion("system", "5");
+
+ RunManifestValidation(manifest,
+ kMaxSupportedMajorPayloadVersion,
+ InstallPayloadType::kFull,
+ ErrorCode::kSuccess);
+}
+
TEST_F(DeltaPerformerTest, BrilloMetadataSignatureSizeTest) {
unsigned int seed = time(nullptr);
EXPECT_TRUE(performer_.Write(kDeltaMagic, sizeof(kDeltaMagic)));
diff --git a/payload_consumer/download_action.h b/payload_consumer/download_action.h
index 740416d..6928443 100644
--- a/payload_consumer/download_action.h
+++ b/payload_consumer/download_action.h
@@ -124,7 +124,7 @@
bool SetupP2PSharingFd();
// Writes |length| bytes of payload from |data| into |file_offset|
- // of the p2p file. Also does sanity checks; for example ensures we
+ // of the p2p file. Also does validation checks; for example ensures we
// don't end up with a file with holes in it.
//
// This method does nothing if SetupP2PSharingFd() hasn't been
diff --git a/payload_consumer/download_action_android_unittest.cc b/payload_consumer/download_action_android_unittest.cc
new file mode 100644
index 0000000..f78845f
--- /dev/null
+++ b/payload_consumer/download_action_android_unittest.cc
@@ -0,0 +1,90 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "common/mock_action_processor.h"
+#include <gmock/gmock-actions.h>
+#include <gmock/gmock-function-mocker.h>
+#include <gmock/gmock-spec-builders.h>
+
+#include "payload_consumer/install_plan.h"
+#include "update_engine/common/action_pipe.h"
+#include "update_engine/common/boot_control_stub.h"
+#include "update_engine/common/constants.h"
+#include "update_engine/common/mock_http_fetcher.h"
+#include "update_engine/common/mock_prefs.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/payload_consumer/download_action.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <memory>
+
+namespace chromeos_update_engine {
+using testing::_;
+using testing::DoAll;
+using testing::Return;
+using testing::SetArgPointee;
+
+class DownloadActionTest : public ::testing::Test {
+ public:
+ static constexpr int64_t METADATA_SIZE = 1024;
+ static constexpr int64_t SIGNATURE_SIZE = 256;
+ std::shared_ptr<ActionPipe<InstallPlan>> action_pipe{
+ new ActionPipe<InstallPlan>()};
+};
+
+TEST_F(DownloadActionTest, CacheManifestInvalid) {
+ std::string data(METADATA_SIZE + SIGNATURE_SIZE, '-');
+ MockPrefs prefs;
+ EXPECT_CALL(prefs, GetInt64(kPrefsUpdateStatePayloadIndex, _))
+ .WillRepeatedly(DoAll(SetArgPointee<1>(0L), Return(true)));
+ EXPECT_CALL(prefs, GetInt64(kPrefsManifestMetadataSize, _))
+ .WillRepeatedly(DoAll(SetArgPointee<1>(METADATA_SIZE), Return(true)));
+ EXPECT_CALL(prefs, GetInt64(kPrefsManifestSignatureSize, _))
+ .WillRepeatedly(DoAll(SetArgPointee<1>(SIGNATURE_SIZE), Return(true)));
+ EXPECT_CALL(prefs, GetInt64(kPrefsUpdateStateNextDataOffset, _))
+ .WillRepeatedly(DoAll(SetArgPointee<1>(0L), Return(true)));
+ EXPECT_CALL(prefs, GetString(kPrefsManifestBytes, _))
+ .WillRepeatedly(DoAll(SetArgPointee<1>(data), Return(true)));
+
+ BootControlStub boot_control;
+ MockHttpFetcher* http_fetcher =
+ new MockHttpFetcher(data.data(), data.size(), nullptr);
+ http_fetcher->set_delay(false);
+ InstallPlan install_plan;
+ auto& payload = install_plan.payloads.emplace_back();
+ install_plan.download_url = "http://fake_url.invalid";
+ payload.size = data.size();
+ payload.payload_urls.emplace_back("http://fake_url.invalid");
+ install_plan.is_resume = true;
+ action_pipe->set_contents(install_plan);
+
+ // takes ownership of passed in HttpFetcher
+ auto download_action =
+ std::make_unique<DownloadAction>(&prefs,
+ &boot_control,
+ nullptr,
+ nullptr,
+ http_fetcher,
+ false /* interactive */);
+ download_action->set_in_pipe(action_pipe);
+ MockActionProcessor mock_processor;
+ download_action->SetProcessor(&mock_processor);
+ download_action->PerformAction();
+ ASSERT_EQ(download_action->http_fetcher()->GetBytesDownloaded(), data.size());
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/partition_update_generator_android.cc b/payload_consumer/partition_update_generator_android.cc
index 5768dd6..d5d5313 100644
--- a/payload_consumer/partition_update_generator_android.cc
+++ b/payload_consumer/partition_update_generator_android.cc
@@ -18,30 +18,23 @@
#include <filesystem>
#include <memory>
-#include <set>
-#include <string_view>
#include <utility>
+#include <android-base/properties.h>
#include <android-base/strings.h>
#include <base/logging.h>
+#include <base/strings/string_split.h>
+#include "update_engine/common/boot_control_interface.h"
#include "update_engine/common/hash_calculator.h"
#include "update_engine/common/utils.h"
-namespace {
-// TODO(xunchang) use definition in fs_mgr, e.g. fs_mgr_get_slot_suffix
-const char* SUFFIX_A = "_a";
-const char* SUFFIX_B = "_b";
-} // namespace
-
namespace chromeos_update_engine {
PartitionUpdateGeneratorAndroid::PartitionUpdateGeneratorAndroid(
BootControlInterface* boot_control,
- std::string device_dir,
size_t block_size)
: boot_control_(boot_control),
- block_device_dir_(std::move(device_dir)),
block_size_(block_size) {}
bool PartitionUpdateGeneratorAndroid::
@@ -50,22 +43,57 @@
BootControlInterface::Slot target_slot,
const std::set<std::string>& partitions_in_payload,
std::vector<PartitionUpdate>* update_list) {
- auto ab_partitions = GetStaticAbPartitionsOnDevice();
- if (!ab_partitions.has_value()) {
+ auto ab_partitions = GetAbPartitionsOnDevice();
+ if (ab_partitions.empty()) {
LOG(ERROR) << "Failed to load static a/b partitions";
return false;
}
std::vector<PartitionUpdate> partition_updates;
- for (const auto& partition_name : ab_partitions.value()) {
+ for (const auto& partition_name : ab_partitions) {
if (partitions_in_payload.find(partition_name) !=
partitions_in_payload.end()) {
LOG(INFO) << partition_name << " has included in payload";
continue;
}
+ bool is_source_dynamic = false;
+ std::string source_device;
- auto partition_update =
- CreatePartitionUpdate(partition_name, source_slot, target_slot);
+ TEST_AND_RETURN_FALSE(
+ boot_control_->GetPartitionDevice(partition_name,
+ source_slot,
+ true, /* not_in_payload */
+ &source_device,
+ &is_source_dynamic));
+ bool is_target_dynamic = false;
+ std::string target_device;
+ TEST_AND_RETURN_FALSE(boot_control_->GetPartitionDevice(
+ partition_name, target_slot, true, &target_device, &is_target_dynamic));
+
+ if (is_source_dynamic || is_target_dynamic) {
+ if (is_source_dynamic != is_target_dynamic) {
+ LOG(ERROR) << "Partition " << partition_name << " is expected to be a"
+ << " static partition. source slot is "
+ << (is_source_dynamic ? "" : "not")
+ << " dynamic, and target slot " << target_slot << " is "
+ << (is_target_dynamic ? "" : "not") << " dynamic.";
+ return false;
+ } else {
+ continue;
+ }
+ }
+
+ auto source_size = utils::FileSize(source_device);
+ auto target_size = utils::FileSize(target_device);
+ if (source_size == -1 || target_size == -1 || source_size != target_size ||
+ source_size % block_size_ != 0) {
+ LOG(ERROR) << "Invalid partition size. source size " << source_size
+ << ", target size " << target_size;
+ return false;
+ }
+
+ auto partition_update = CreatePartitionUpdate(
+ partition_name, source_device, target_device, source_size);
if (!partition_update.has_value()) {
LOG(ERROR) << "Failed to create partition update for " << partition_name;
return false;
@@ -76,98 +104,14 @@
return true;
}
-std::optional<std::set<std::string>>
-PartitionUpdateGeneratorAndroid::GetStaticAbPartitionsOnDevice() {
- if (std::error_code error_code;
- !std::filesystem::exists(block_device_dir_, error_code) || error_code) {
- LOG(ERROR) << "Failed to find " << block_device_dir_ << " "
- << error_code.message();
- return std::nullopt;
- }
-
- std::error_code error_code;
- auto it = std::filesystem::directory_iterator(block_device_dir_, error_code);
- if (error_code) {
- LOG(ERROR) << "Failed to iterate " << block_device_dir_ << " "
- << error_code.message();
- return std::nullopt;
- }
-
- std::set<std::string> partitions_with_suffix;
- for (const auto& entry : it) {
- auto partition_name = entry.path().filename().string();
- if (android::base::EndsWith(partition_name, SUFFIX_A) ||
- android::base::EndsWith(partition_name, SUFFIX_B)) {
- partitions_with_suffix.insert(partition_name);
- }
- }
-
- // Second iteration to add the partition name without suffixes.
- std::set<std::string> ab_partitions;
- for (std::string_view name : partitions_with_suffix) {
- if (!android::base::ConsumeSuffix(&name, SUFFIX_A)) {
- continue;
- }
-
- // Add to the output list if the partition exist for both slot a and b.
- auto base_name = std::string(name);
- if (partitions_with_suffix.find(base_name + SUFFIX_B) !=
- partitions_with_suffix.end()) {
- ab_partitions.insert(base_name);
- } else {
- LOG(WARNING) << "Failed to find the b partition for " << base_name;
- }
- }
-
- return ab_partitions;
-}
-
-std::optional<PartitionUpdate>
-PartitionUpdateGeneratorAndroid::CreatePartitionUpdate(
- const std::string& partition_name,
- BootControlInterface::Slot source_slot,
- BootControlInterface::Slot target_slot) {
- bool is_source_dynamic = false;
- std::string source_device;
- if (!boot_control_->GetPartitionDevice(partition_name,
- source_slot,
- true, /* not_in_payload */
- &source_device,
- &is_source_dynamic)) {
- LOG(ERROR) << "Failed to load source " << partition_name;
- return std::nullopt;
- }
- bool is_target_dynamic = false;
- std::string target_device;
- if (!boot_control_->GetPartitionDevice(partition_name,
- target_slot,
- true,
- &target_device,
- &is_target_dynamic)) {
- LOG(ERROR) << "Failed to load target " << partition_name;
- return std::nullopt;
- }
-
- if (is_source_dynamic || is_target_dynamic) {
- LOG(ERROR) << "Partition " << partition_name << " is expected to be a"
- << " static partition. source slot is "
- << (is_source_dynamic ? "" : "not")
- << " dynamic, and target slot " << target_slot << " is "
- << (is_target_dynamic ? "" : "not") << " dynamic.";
- return std::nullopt;
- }
-
- auto source_size = utils::FileSize(source_device);
- auto target_size = utils::FileSize(target_device);
- if (source_size == -1 || target_size == -1 || source_size != target_size ||
- source_size % block_size_ != 0) {
- LOG(ERROR) << "Invalid partition size. source size " << source_size
- << ", target size " << target_size;
- return std::nullopt;
- }
-
- return CreatePartitionUpdate(
- partition_name, source_device, target_device, source_size);
+std::vector<std::string>
+PartitionUpdateGeneratorAndroid::GetAbPartitionsOnDevice() const {
+ auto partition_list_str =
+ android::base::GetProperty("ro.product.ab_ota_partitions", "");
+ return base::SplitString(partition_list_str,
+ ",",
+ base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
}
std::optional<PartitionUpdate>
@@ -183,6 +127,8 @@
auto raw_hash = CalculateHashForPartition(source_device, partition_size);
if (!raw_hash.has_value()) {
+ LOG(ERROR) << "Failed to calculate hash for partition " << source_device
+ << " size: " << partition_size;
return {};
}
old_partition_info->set_hash(raw_hash->data(), raw_hash->size());
@@ -225,16 +171,9 @@
std::unique_ptr<PartitionUpdateGeneratorInterface> Create(
BootControlInterface* boot_control, size_t block_size) {
CHECK(boot_control);
- auto dynamic_control = boot_control->GetDynamicPartitionControl();
- CHECK(dynamic_control);
- std::string dir_path;
- if (!dynamic_control->GetDeviceDir(&dir_path)) {
- return nullptr;
- }
return std::unique_ptr<PartitionUpdateGeneratorInterface>(
- new PartitionUpdateGeneratorAndroid(
- boot_control, std::move(dir_path), block_size));
+ new PartitionUpdateGeneratorAndroid(boot_control, block_size));
}
} // namespace partition_update_generator
diff --git a/payload_consumer/partition_update_generator_android.h b/payload_consumer/partition_update_generator_android.h
index 97b7d83..0330c99 100644
--- a/payload_consumer/partition_update_generator_android.h
+++ b/payload_consumer/partition_update_generator_android.h
@@ -29,11 +29,11 @@
#include "update_engine/payload_consumer/partition_update_generator_interface.h"
namespace chromeos_update_engine {
+
class PartitionUpdateGeneratorAndroid
: public PartitionUpdateGeneratorInterface {
public:
PartitionUpdateGeneratorAndroid(BootControlInterface* boot_control,
- std::string device_dir,
size_t block_size);
bool GenerateOperationsForPartitionsNotInPayload(
@@ -41,15 +41,13 @@
BootControlInterface::Slot target_slot,
const std::set<std::string>& partitions_in_payload,
std::vector<PartitionUpdate>* update_list) override;
+ virtual std::vector<std::string> GetAbPartitionsOnDevice() const;
private:
friend class PartitionUpdateGeneratorAndroidTest;
FRIEND_TEST(PartitionUpdateGeneratorAndroidTest, GetStaticPartitions);
FRIEND_TEST(PartitionUpdateGeneratorAndroidTest, CreatePartitionUpdate);
- // Gets the name of the static a/b partitions on the device.
- std::optional<std::set<std::string>> GetStaticAbPartitionsOnDevice();
-
// Creates a PartitionUpdate object for a given partition to update from
// source to target. Returns std::nullopt on failure.
std::optional<PartitionUpdate> CreatePartitionUpdate(
@@ -58,17 +56,10 @@
const std::string& target_device,
int64_t partition_size);
- std::optional<PartitionUpdate> CreatePartitionUpdate(
- const std::string& partition_name,
- BootControlInterface::Slot source_slot,
- BootControlInterface::Slot target_slot);
-
std::optional<brillo::Blob> CalculateHashForPartition(
const std::string& block_device, int64_t partition_size);
BootControlInterface* boot_control_;
- // Path to look for a/b partitions
- std::string block_device_dir_;
size_t block_size_;
};
diff --git a/payload_consumer/partition_update_generator_android_unittest.cc b/payload_consumer/partition_update_generator_android_unittest.cc
index c3be9db..86d025e 100644
--- a/payload_consumer/partition_update_generator_android_unittest.cc
+++ b/payload_consumer/partition_update_generator_android_unittest.cc
@@ -19,12 +19,14 @@
#include <map>
#include <memory>
#include <set>
+#include <utility>
#include <vector>
#include <android-base/strings.h>
#include <brillo/secure_blob.h>
#include <gtest/gtest.h>
+#include "update_engine/common/boot_control_interface.h"
#include "update_engine/common/fake_boot_control.h"
#include "update_engine/common/hash_calculator.h"
#include "update_engine/common/test_utils.h"
@@ -32,40 +34,53 @@
namespace chromeos_update_engine {
+class FakePartitionUpdateGenerator : public PartitionUpdateGeneratorAndroid {
+ public:
+ std::vector<std::string> GetAbPartitionsOnDevice() const {
+ return ab_partitions_;
+ }
+ using PartitionUpdateGeneratorAndroid::PartitionUpdateGeneratorAndroid;
+ std::vector<std::string> ab_partitions_;
+};
+
class PartitionUpdateGeneratorAndroidTest : public ::testing::Test {
protected:
void SetUp() override {
ASSERT_TRUE(device_dir_.CreateUniqueTempDir());
boot_control_ = std::make_unique<FakeBootControl>();
- boot_control_->SetNumSlots(2);
- auto generator =
- partition_update_generator::Create(boot_control_.get(), 4096);
- generator_.reset(
- static_cast<PartitionUpdateGeneratorAndroid*>(generator.release()));
ASSERT_TRUE(boot_control_);
+ boot_control_->SetNumSlots(2);
+ generator_ = std::make_unique<FakePartitionUpdateGenerator>(
+ boot_control_.get(), 4096);
ASSERT_TRUE(generator_);
- generator_->block_device_dir_ = device_dir_.GetPath().value();
}
- std::unique_ptr<PartitionUpdateGeneratorAndroid> generator_;
+ std::unique_ptr<FakePartitionUpdateGenerator> generator_;
std::unique_ptr<FakeBootControl> boot_control_;
base::ScopedTempDir device_dir_;
+ std::map<std::string, std::string> device_map_;
void SetUpBlockDevice(const std::map<std::string, std::string>& contents) {
+ std::set<std::string> partition_base_names;
for (const auto& [name, content] : contents) {
- auto path = generator_->block_device_dir_ + "/" + name;
+ auto path = device_dir_.GetPath().value() + "/" + name;
ASSERT_TRUE(
utils::WriteFile(path.c_str(), content.data(), content.size()));
if (android::base::EndsWith(name, "_a")) {
- boot_control_->SetPartitionDevice(
- name.substr(0, name.size() - 2), 0, path);
+ auto prefix = name.substr(0, name.size() - 2);
+ boot_control_->SetPartitionDevice(prefix, 0, path);
+ partition_base_names.emplace(prefix);
} else if (android::base::EndsWith(name, "_b")) {
- boot_control_->SetPartitionDevice(
- name.substr(0, name.size() - 2), 1, path);
+ auto prefix = name.substr(0, name.size() - 2);
+ boot_control_->SetPartitionDevice(prefix, 1, path);
+ partition_base_names.emplace(prefix);
}
+ device_map_[name] = std::move(path);
}
+ generator_->ab_partitions_ = {partition_base_names.begin(),
+ partition_base_names.end()};
}
void CheckPartitionUpdate(const std::string& name,
@@ -95,25 +110,6 @@
}
};
-TEST_F(PartitionUpdateGeneratorAndroidTest, GetStaticPartitions) {
- std::map<std::string, std::string> contents = {
- {"system_a", ""},
- {"system_b", ""},
- {"vendor_a", ""},
- {"vendor_b", ""},
- {"persist", ""},
- {"vbmeta_a", ""},
- {"vbmeta_b", ""},
- {"boot_a", ""},
- {"boot_b", ""},
- };
-
- SetUpBlockDevice(contents);
- auto partitions = generator_->GetStaticAbPartitionsOnDevice();
- ASSERT_EQ(std::set<std::string>({"system", "vendor", "vbmeta", "boot"}),
- partitions);
-}
-
TEST_F(PartitionUpdateGeneratorAndroidTest, CreatePartitionUpdate) {
auto system_contents = std::string(4096 * 2, '1');
auto boot_contents = std::string(4096 * 5, 'b');
@@ -125,13 +121,14 @@
};
SetUpBlockDevice(contents);
- auto system_partition_update =
- generator_->CreatePartitionUpdate("system", 0, 1);
+ auto system_partition_update = generator_->CreatePartitionUpdate(
+ "system", device_map_["system_a"], device_map_["system_b"], 4096 * 2);
ASSERT_TRUE(system_partition_update.has_value());
CheckPartitionUpdate(
"system", system_contents, system_partition_update.value());
- auto boot_partition_update = generator_->CreatePartitionUpdate("boot", 0, 1);
+ auto boot_partition_update = generator_->CreatePartitionUpdate(
+ "boot", device_map_["boot_a"], device_map_["boot_b"], 4096 * 5);
ASSERT_TRUE(boot_partition_update.has_value());
CheckPartitionUpdate("boot", boot_contents, boot_partition_update.value());
}
diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc
index 1c987bd..d62a0ec 100644
--- a/payload_consumer/payload_constants.cc
+++ b/payload_consumer/payload_constants.cc
@@ -33,9 +33,11 @@
const uint32_t kBrotliBsdiffMinorPayloadVersion = 4;
const uint32_t kPuffdiffMinorPayloadVersion = 5;
const uint32_t kVerityMinorPayloadVersion = 6;
+const uint32_t kPartialUpdateMinorPayloadVersion = 7;
const uint32_t kMinSupportedMinorPayloadVersion = kSourceMinorPayloadVersion;
-const uint32_t kMaxSupportedMinorPayloadVersion = kVerityMinorPayloadVersion;
+const uint32_t kMaxSupportedMinorPayloadVersion =
+ kPartialUpdateMinorPayloadVersion;
const uint64_t kMaxPayloadHeaderSize = 24;
diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h
index 5c2d17c..03647ee 100644
--- a/payload_consumer/payload_constants.h
+++ b/payload_consumer/payload_constants.h
@@ -56,6 +56,9 @@
// The minor version that allows Verity hash tree and FEC generation.
extern const uint32_t kVerityMinorPayloadVersion;
+// The minor version that allows partial update, e.g. kernel only update.
+extern const uint32_t kPartialUpdateMinorPayloadVersion;
+
// The minimum and maximum supported minor version.
extern const uint32_t kMinSupportedMinorPayloadVersion;
extern const uint32_t kMaxSupportedMinorPayloadVersion;
diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc
index 01f3b62..2cb73eb 100644
--- a/payload_consumer/payload_metadata.cc
+++ b/payload_consumer/payload_metadata.cc
@@ -18,6 +18,7 @@
#include <endian.h>
+#include <base/strings/stringprintf.h>
#include <brillo/data_encoding.h>
#include "update_engine/common/constants.h"
@@ -55,7 +56,18 @@
// Validate the magic string.
if (memcmp(payload.data(), kDeltaMagic, sizeof(kDeltaMagic)) != 0) {
- LOG(ERROR) << "Bad payload format -- invalid delta magic.";
+ LOG(ERROR) << "Bad payload format -- invalid delta magic: "
+ << base::StringPrintf("%02x%02x%02x%02x",
+ payload[0],
+ payload[1],
+ payload[2],
+ payload[3])
+ << " Expected: "
+ << base::StringPrintf("%02x%02x%02x%02x",
+ kDeltaMagic[0],
+ kDeltaMagic[1],
+ kDeltaMagic[2],
+ kDeltaMagic[3]);
*error = ErrorCode::kDownloadInvalidMetadataMagicString;
return MetadataParseResult::kError;
}
diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h
index cc42253..8b36f53 100644
--- a/payload_consumer/payload_metadata.h
+++ b/payload_consumer/payload_metadata.h
@@ -63,7 +63,7 @@
// |metadata_signature| (if present) or the metadata signature in payload
// itself (if present). Returns ErrorCode::kSuccess on match or a suitable
// error code otherwise. This method must be called before any part of the
- // metadata is parsed so that a man-in-the-middle attack on the SSL connection
+ // metadata is parsed so that an on-path attack on the SSL connection
// to the payload server doesn't exploit any vulnerability in the code that
// parses the protocol buffer.
ErrorCode ValidateMetadataSignature(
diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc
index c520c7e..94d0392 100644
--- a/payload_consumer/postinstall_runner_action.cc
+++ b/payload_consumer/postinstall_runner_action.cc
@@ -220,20 +220,10 @@
PLOG(ERROR) << "Unable to set non-blocking I/O mode on fd " << progress_fd_;
}
-#ifdef __ANDROID__
- progress_task_ = MessageLoop::current()->WatchFileDescriptor(
- FROM_HERE,
- progress_fd_,
- MessageLoop::WatchMode::kWatchRead,
- true,
- base::Bind(&PostinstallRunnerAction::OnProgressFdReady,
- base::Unretained(this)));
-#else
progress_controller_ = base::FileDescriptorWatcher::WatchReadable(
progress_fd_,
base::BindRepeating(&PostinstallRunnerAction::OnProgressFdReady,
base::Unretained(this)));
-#endif // __ANDROID__
}
@@ -259,12 +249,7 @@
if (!ok || eof) {
// There was either an error or an EOF condition, so we are done watching
// the file descriptor.
-#ifdef __ANDROID__
- MessageLoop::current()->CancelTask(progress_task_);
- progress_task_ = MessageLoop::kTaskIdNull;
-#else
progress_controller_.reset();
-#endif // __ANDROID__
return;
}
} while (bytes_read);
@@ -308,14 +293,7 @@
fs_mount_dir_.clear();
progress_fd_ = -1;
-#ifdef __ANDROID__
- if (progress_task_ != MessageLoop::kTaskIdNull) {
- MessageLoop::current()->CancelTask(progress_task_);
- progress_task_ = MessageLoop::kTaskIdNull;
- }
-#else
progress_controller_.reset();
-#endif // __ANDROID__
progress_buffer_.clear();
}
diff --git a/payload_consumer/postinstall_runner_action.h b/payload_consumer/postinstall_runner_action.h
index bbc9e8c..e404107 100644
--- a/payload_consumer/postinstall_runner_action.h
+++ b/payload_consumer/postinstall_runner_action.h
@@ -140,11 +140,7 @@
// the postinstall program and the task watching for them.
int progress_fd_{-1};
-#ifdef __ANDROID__
- brillo::MessageLoop::TaskId progress_task_{brillo::MessageLoop::kTaskIdNull};
-#else
std::unique_ptr<base::FileDescriptorWatcher::Controller> progress_controller_;
-#endif // __ANDROID__
// A buffer of a partial read line from the progress file descriptor.
std::string progress_buffer_;
diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc
index 0041d31..cf5158b 100644
--- a/payload_consumer/postinstall_runner_action_unittest.cc
+++ b/payload_consumer/postinstall_runner_action_unittest.cc
@@ -227,7 +227,7 @@
EXPECT_TRUE(processor_delegate_.processing_stopped_called_ ||
processor_delegate_.processing_done_called_);
if (processor_delegate_.processing_done_called_) {
- // Sanity check that the code was set when the processor finishes.
+ // Validation check that the code was set when the processor finishes.
EXPECT_TRUE(processor_delegate_.code_set_);
}
}
diff --git a/payload_generator/blob_file_writer.cc b/payload_generator/blob_file_writer.cc
index 7cdeb35..a1afe87 100644
--- a/payload_generator/blob_file_writer.cc
+++ b/payload_generator/blob_file_writer.cc
@@ -38,9 +38,9 @@
return result;
}
-void BlobFileWriter::SetTotalBlobs(size_t total_blobs) {
- total_blobs_ = total_blobs;
- stored_blobs_ = 0;
+void BlobFileWriter::IncTotalBlobs(size_t increment) {
+ base::AutoLock auto_lock(blob_mutex_);
+ total_blobs_ += increment;
}
} // namespace chromeos_update_engine
diff --git a/payload_generator/blob_file_writer.h b/payload_generator/blob_file_writer.h
index 48553be..bdd4c08 100644
--- a/payload_generator/blob_file_writer.h
+++ b/payload_generator/blob_file_writer.h
@@ -35,10 +35,8 @@
// was stored, or -1 in case of failure.
off_t StoreBlob(const brillo::Blob& blob);
- // The number of |total_blobs| is the number of blobs that will be stored but
- // is only used for logging purposes. If not set or set to 0, logging will be
- // skipped. This function will also reset the number of stored blobs to 0.
- void SetTotalBlobs(size_t total_blobs);
+ // Increase |total_blobs| by |increment|. Thread safe.
+ void IncTotalBlobs(size_t increment);
private:
size_t total_blobs_{0};
diff --git a/payload_generator/delta_diff_generator.cc b/payload_generator/delta_diff_generator.cc
index 595a41e..aa49252 100644
--- a/payload_generator/delta_diff_generator.cc
+++ b/payload_generator/delta_diff_generator.cc
@@ -29,11 +29,13 @@
#include <vector>
#include <base/logging.h>
+#include <base/threading/simple_thread.h>
#include "update_engine/common/utils.h"
#include "update_engine/payload_consumer/delta_performer.h"
#include "update_engine/payload_consumer/payload_constants.h"
#include "update_engine/payload_generator/ab_generator.h"
+#include "update_engine/payload_generator/annotated_operation.h"
#include "update_engine/payload_generator/blob_file_writer.h"
#include "update_engine/payload_generator/delta_diff_utils.h"
#include "update_engine/payload_generator/full_update_generator.h"
@@ -49,6 +51,45 @@
const size_t kRootFSPartitionSize = static_cast<size_t>(2) * 1024 * 1024 * 1024;
const size_t kBlockSize = 4096; // bytes
+class PartitionProcessor : public base::DelegateSimpleThread::Delegate {
+ public:
+ explicit PartitionProcessor(
+ const PayloadGenerationConfig& config,
+ const PartitionConfig& old_part,
+ const PartitionConfig& new_part,
+ BlobFileWriter* file_writer,
+ std::vector<AnnotatedOperation>* aops,
+ std::unique_ptr<chromeos_update_engine::OperationsGenerator> strategy)
+ : config_(config),
+ old_part_(old_part),
+ new_part_(new_part),
+ file_writer_(file_writer),
+ aops_(aops),
+ strategy_(std::move(strategy)) {}
+ PartitionProcessor(PartitionProcessor&&) noexcept = default;
+ void Run() override {
+ LOG(INFO) << "Started an async task to process partition "
+ << old_part_.name;
+ bool success = strategy_->GenerateOperations(
+ config_, old_part_, new_part_, file_writer_, aops_);
+ if (!success) {
+ // ABORT the entire process, so that developer can look
+ // at recent logs and diagnose what happened
+ LOG(FATAL) << "GenerateOperations(" << old_part_.name << ", "
+ << new_part_.name << ") failed";
+ }
+ }
+
+ private:
+ const PayloadGenerationConfig& config_;
+ const PartitionConfig& old_part_;
+ const PartitionConfig& new_part_;
+ BlobFileWriter* file_writer_;
+ std::vector<AnnotatedOperation>* aops_;
+ std::unique_ptr<chromeos_update_engine::OperationsGenerator> strategy_;
+ DISALLOW_COPY_AND_ASSIGN(PartitionProcessor);
+};
+
bool GenerateUpdatePayloadFile(const PayloadGenerationConfig& config,
const string& output_path,
const string& private_key_path,
@@ -80,6 +121,13 @@
config.target.partitions.size());
}
PartitionConfig empty_part("");
+ std::vector<std::vector<AnnotatedOperation>> all_aops;
+ all_aops.resize(config.target.partitions.size());
+ std::vector<PartitionProcessor> partition_tasks{};
+ auto thread_count = std::min<int>(diff_utils::GetMaxThreads(),
+ config.target.partitions.size());
+ base::DelegateSimpleThreadPool thread_pool{"partition-thread-pool",
+ thread_count};
for (size_t i = 0; i < config.target.partitions.size(); i++) {
const PartitionConfig& old_part =
config.is_delta ? config.source.partitions[i] : empty_part;
@@ -99,12 +147,26 @@
strategy.reset(new FullUpdateGenerator());
}
- vector<AnnotatedOperation> aops;
// Generate the operations using the strategy we selected above.
- TEST_AND_RETURN_FALSE(strategy->GenerateOperations(
- config, old_part, new_part, &blob_file, &aops));
+ partition_tasks.push_back(PartitionProcessor(config,
+ old_part,
+ new_part,
+ &blob_file,
+ &all_aops[i],
+ std::move(strategy)));
+ }
+ thread_pool.Start();
+ for (auto& processor : partition_tasks) {
+ thread_pool.AddWork(&processor);
+ }
+ thread_pool.JoinAll();
- TEST_AND_RETURN_FALSE(payload.AddPartition(old_part, new_part, aops));
+ for (size_t i = 0; i < config.target.partitions.size(); i++) {
+ const PartitionConfig& old_part =
+ config.is_delta ? config.source.partitions[i] : empty_part;
+ const PartitionConfig& new_part = config.target.partitions[i];
+ TEST_AND_RETURN_FALSE(
+ payload.AddPartition(old_part, new_part, std::move(all_aops[i])));
}
}
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index 22752e8..220c7ae 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -708,15 +708,15 @@
version.OperationAllowed(InstallOperation::SOURCE_BSDIFF);
if (bsdiff_allowed &&
blocks_to_read * kBlockSize > kMaxBsdiffDestinationSize) {
- LOG(INFO) << "bsdiff blacklisted, data too big: "
- << blocks_to_read * kBlockSize << " bytes";
+ LOG(INFO) << "bsdiff ignored, data too big: " << blocks_to_read * kBlockSize
+ << " bytes";
bsdiff_allowed = false;
}
bool puffdiff_allowed = version.OperationAllowed(InstallOperation::PUFFDIFF);
if (puffdiff_allowed &&
blocks_to_read * kBlockSize > kMaxPuffdiffDestinationSize) {
- LOG(INFO) << "puffdiff blacklisted, data too big: "
+ LOG(INFO) << "puffdiff ignored, data too big: "
<< blocks_to_read * kBlockSize << " bytes";
puffdiff_allowed = false;
}
@@ -938,7 +938,7 @@
if (magic != EXT2_SUPER_MAGIC)
return false;
- // Sanity check the parameters.
+ // Validation check the parameters.
TEST_AND_RETURN_FALSE(log_block_size >= EXT2_MIN_BLOCK_LOG_SIZE &&
log_block_size <= EXT2_MAX_BLOCK_LOG_SIZE);
TEST_AND_RETURN_FALSE(block_count > 0);
diff --git a/payload_generator/full_update_generator.cc b/payload_generator/full_update_generator.cc
index 94a43ab..4a5f63a 100644
--- a/payload_generator/full_update_generator.cc
+++ b/payload_generator/full_update_generator.cc
@@ -153,7 +153,7 @@
aops->resize(num_chunks);
vector<ChunkProcessor> chunk_processors;
chunk_processors.reserve(num_chunks);
- blob_file->SetTotalBlobs(num_chunks);
+ blob_file->IncTotalBlobs(num_chunks);
for (size_t i = 0; i < num_chunks; ++i) {
size_t start_block = i * chunk_blocks;
@@ -187,9 +187,6 @@
thread_pool.AddWork(&processor);
thread_pool.JoinAll();
- // All the work done, disable logging.
- blob_file->SetTotalBlobs(0);
-
// All the operations must have a type set at this point. Otherwise, a
// ChunkProcessor failed to complete.
for (const AnnotatedOperation& aop : *aops) {
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index eb00333..18cff4b 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -445,6 +445,10 @@
out_maximum_signature_size_file,
"",
"Path to the output maximum signature size given a private key.");
+ DEFINE_bool(is_partial_update,
+ false,
+ "The payload only targets a subset of partitions on the device,"
+ "e.g. generic kernel image update.");
brillo::FlagHelper::Init(
argc,
@@ -629,6 +633,10 @@
CHECK(payload_config.target.ValidateDynamicPartitionMetadata());
}
+ if (FLAGS_is_partial_update) {
+ payload_config.is_partial_update = true;
+ }
+
CHECK(!FLAGS_out_file.empty());
// Ignore failures. These are optional arguments.
@@ -702,7 +710,8 @@
payload_config.max_timestamp = FLAGS_max_timestamp;
- if (payload_config.version.minor >= kVerityMinorPayloadVersion)
+ if (payload_config.is_delta &&
+ payload_config.version.minor >= kVerityMinorPayloadVersion)
CHECK(payload_config.target.LoadVerityConfig());
LOG(INFO) << "Generating " << (payload_config.is_delta ? "delta" : "full")
diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc
index 69325d7..c1594c7 100644
--- a/payload_generator/payload_file.cc
+++ b/payload_generator/payload_file.cc
@@ -78,15 +78,18 @@
*(manifest_.mutable_dynamic_partition_metadata()) =
*(config.target.dynamic_partition_metadata);
+ if (config.is_partial_update) {
+ manifest_.set_partial_update(true);
+ }
return true;
}
bool PayloadFile::AddPartition(const PartitionConfig& old_conf,
const PartitionConfig& new_conf,
- const vector<AnnotatedOperation>& aops) {
+ vector<AnnotatedOperation> aops) {
Partition part;
part.name = new_conf.name;
- part.aops = aops;
+ part.aops = std::move(aops);
part.postinstall = new_conf.postinstall;
part.verity = new_conf.verity;
// Initialize the PartitionInfo objects if present.
@@ -169,9 +172,7 @@
TEST_AND_RETURN_FALSE(PayloadSigner::SignatureBlobLength(
{private_key_path}, &signature_blob_length));
PayloadSigner::AddSignatureToManifest(
- next_blob_offset,
- signature_blob_length,
- &manifest_);
+ next_blob_offset, signature_blob_length, &manifest_);
}
// Serialize protobuf
diff --git a/payload_generator/payload_file.h b/payload_generator/payload_file.h
index 9dc80a7..d1f8196 100644
--- a/payload_generator/payload_file.h
+++ b/payload_generator/payload_file.h
@@ -43,7 +43,7 @@
// reference a blob stored in the file provided to WritePayload().
bool AddPartition(const PartitionConfig& old_conf,
const PartitionConfig& new_conf,
- const std::vector<AnnotatedOperation>& aops);
+ std::vector<AnnotatedOperation> aops);
// Write the payload to the |payload_file| file. The operations reference
// blobs in the |data_blobs_path| file and the blobs will be reordered in the
@@ -60,9 +60,9 @@
// Computes a SHA256 hash of the given buf and sets the hash value in the
// operation so that update_engine could verify. This hash should be set
// for all operations that have a non-zero data blob. One exception is the
- // dummy operation for signature blob because the contents of the signature
+ // fake operation for signature blob because the contents of the signature
// blob will not be available at payload creation time. So, update_engine will
- // gracefully ignore the dummy signature operation.
+ // gracefully ignore the fake signature operation.
static bool AddOperationHash(InstallOperation* op, const brillo::Blob& buf);
// Install operations in the manifest may reference data blobs, which
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index b653a03..9c5832d 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -234,7 +234,8 @@
minor == kOpSrcHashMinorPayloadVersion ||
minor == kBrotliBsdiffMinorPayloadVersion ||
minor == kPuffdiffMinorPayloadVersion ||
- minor == kVerityMinorPayloadVersion);
+ minor == kVerityMinorPayloadVersion ||
+ minor == kPartialUpdateMinorPayloadVersion);
return true;
}
@@ -273,13 +274,14 @@
return false;
}
-bool PayloadVersion::IsDelta() const {
+bool PayloadVersion::IsDeltaOrPartial() const {
return minor != kFullPayloadMinorVersion;
}
bool PayloadGenerationConfig::Validate() const {
TEST_AND_RETURN_FALSE(version.Validate());
- TEST_AND_RETURN_FALSE(version.IsDelta() == is_delta);
+ TEST_AND_RETURN_FALSE(version.IsDeltaOrPartial() ==
+ (is_delta || is_partial_update));
if (is_delta) {
for (const PartitionConfig& part : source.partitions) {
if (!part.path.empty()) {
@@ -307,6 +309,10 @@
TEST_AND_RETURN_FALSE(part.verity.IsEmpty());
}
+ if (version.minor < kPartialUpdateMinorPayloadVersion) {
+ TEST_AND_RETURN_FALSE(!is_partial_update);
+ }
+
TEST_AND_RETURN_FALSE(hard_chunk_size == -1 ||
hard_chunk_size % block_size == 0);
TEST_AND_RETURN_FALSE(soft_chunk_size % block_size == 0);
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index af6f181..9abb97f 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -170,8 +170,8 @@
// Return whether the passed |operation| is allowed by this payload.
bool OperationAllowed(InstallOperation::Type operation) const;
- // Whether this payload version is a delta payload.
- bool IsDelta() const;
+ // Whether this payload version is a delta or partial payload.
+ bool IsDeltaOrPartial() const;
// The major version of the payload.
uint64_t major;
@@ -198,6 +198,10 @@
// Whether the requested payload is a delta payload.
bool is_delta = false;
+ // Whether the requested payload is a partial payload, i.e. only update a
+ // subset of partitions on device.
+ bool is_partial_update = false;
+
// The major/minor version of the payload.
PayloadVersion version;
diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc
index 7e5fd4e..c3264c1 100644
--- a/payload_generator/payload_signer.cc
+++ b/payload_generator/payload_signer.cc
@@ -82,7 +82,7 @@
// Given an unsigned payload under |payload_path| and the |payload_signature|
// and |metadata_signature| generates an updated payload that includes the
// signatures. It populates |out_metadata_size| with the size of the final
-// manifest after adding the dummy signature operation, and
+// manifest after adding the fake signature operation, and
// |out_signatures_offset| with the expected offset for the new blob, and
// |out_metadata_signature_size| which will be size of |metadata_signature|
// if the payload major version supports metadata signature, 0 otherwise.
diff --git a/payload_generator/payload_signer.h b/payload_generator/payload_signer.h
index 06e4823..9676b71 100644
--- a/payload_generator/payload_signer.h
+++ b/payload_generator/payload_signer.h
@@ -62,7 +62,7 @@
// size in |metadata_signature_size| and signatures offset in
// |signatures_offset|, calculates the payload signature blob into
// |out_serialized_signature|. Note that the payload must already have an
- // updated manifest that includes the dummy signature op and correct metadata
+ // updated manifest that includes the fake signature op and correct metadata
// signature size in header. Returns true on success, false otherwise.
static bool SignPayload(const std::string& unsigned_payload_path,
const std::vector<std::string>& private_key_paths,
@@ -92,7 +92,7 @@
brillo::Blob* out_payload_hash_data,
brillo::Blob* out_metadata_hash);
- // Given an unsigned payload in |payload_path| (with no dummy signature op)
+ // Given an unsigned payload in |payload_path| (with no fake signature op)
// and the raw |payload_signatures| and |metadata_signatures| updates the
// payload to include the signature thus turning it into a signed payload. The
// new payload is stored in |signed_payload_path|. |payload_path| and
diff --git a/payload_generator/squashfs_filesystem.cc b/payload_generator/squashfs_filesystem.cc
index eb4fda3..6152d7d 100644
--- a/payload_generator/squashfs_filesystem.cc
+++ b/payload_generator/squashfs_filesystem.cc
@@ -275,7 +275,7 @@
auto last = std::unique(zlib_blks.begin(), zlib_blks.end());
zlib_blks.erase(last, zlib_blks.end());
- // Sanity check. Make sure zlib blocks are not overlapping.
+ // Make sure zlib blocks are not overlapping.
auto result = std::adjacent_find(
zlib_blks.begin(),
zlib_blks.end(),
diff --git a/payload_state.cc b/payload_state.cc
index bde7999..4945fe7 100644
--- a/payload_state.cc
+++ b/payload_state.cc
@@ -1058,7 +1058,7 @@
stored_time = Time::FromInternalValue(stored_value);
}
- // Sanity check: If the time read from disk is in the future
+ // Validation check: If the time read from disk is in the future
// (modulo some slack to account for possible NTP drift
// adjustments), something is fishy and we should report and
// reset.
@@ -1105,7 +1105,7 @@
stored_delta = TimeDelta::FromInternalValue(stored_value);
}
- // Sanity-check: Uptime can never be greater than the wall-clock
+ // Validation check: Uptime can never be greater than the wall-clock
// difference (modulo some slack). If it is, report and reset
// to the wall-clock difference.
TimeDelta diff = GetUpdateDuration() - stored_delta;
@@ -1154,7 +1154,7 @@
void PayloadState::SetRollbackVersion(const string& rollback_version) {
CHECK(powerwash_safe_prefs_);
- LOG(INFO) << "Blacklisting version " << rollback_version;
+ LOG(INFO) << "Excluding version " << rollback_version;
rollback_version_ = rollback_version;
powerwash_safe_prefs_->SetString(kPrefsRollbackVersion, rollback_version);
}
diff --git a/payload_state.h b/payload_state.h
index d13c642..427836b 100644
--- a/payload_state.h
+++ b/payload_state.h
@@ -366,14 +366,14 @@
// check where policy was available. This info is preserved over powerwash.
void LoadRollbackHappened();
- // Loads the blacklisted version from our prefs file.
+ // Loads the excluded version from our prefs file.
void LoadRollbackVersion();
- // Blacklists this version from getting AU'd to until we receive a new update
+ // Excludes this version from getting AU'd to until we receive a new update
// response.
void SetRollbackVersion(const std::string& rollback_version);
- // Clears any blacklisted version.
+ // Clears any excluded version.
void ResetRollbackVersion();
inline uint32_t GetUrlIndex() {
@@ -565,7 +565,7 @@
// forced updates to avoid update-rollback loops.
bool rollback_happened_;
- // This stores a blacklisted version set as part of rollback. When we rollback
+ // This stores an excluded version set as part of rollback. When we rollback
// we store the version of the os from which we are rolling back from in order
// to guarantee that we do not re-update to it on the next au attempt after
// reboot.
diff --git a/payload_state_unittest.cc b/payload_state_unittest.cc
index bf9aed4..c33bda4 100644
--- a/payload_state_unittest.cc
+++ b/payload_state_unittest.cc
@@ -1016,7 +1016,7 @@
NiceMock<MockPrefs>* mock_powerwash_safe_prefs =
fake_system_state.mock_powerwash_safe_prefs();
- // Mock out the os version and make sure it's blacklisted correctly.
+ // Mock out the os version and make sure it's excluded correctly.
string rollback_version = "2345.0.0";
OmahaRequestParams params(&fake_system_state);
params.Init(rollback_version, "", false);
diff --git a/pylintrc b/pylintrc
index 33adec2..a433868 100644
--- a/pylintrc
+++ b/pylintrc
@@ -24,7 +24,7 @@
# Profiled execution.
profile=no
-# Add files or directories to the blacklist. They should be base names, not
+# Add files or directories to the ignorelist. They should be base names, not
# paths.
ignore=CVS,.svn,.git,update_metadata_pb2.py
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index d9c18ff..9bae74e 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -189,6 +189,9 @@
DEFINE_string disable_fec_computation "" \
"Optional: Disables the on device fec data computation for incremental \
update. This feature is enabled by default."
+ DEFINE_string is_partial_update "" \
+ "Optional: True if the payload is for partial update. i.e. it only updates \
+a subset of partitions on device."
fi
if [[ "${COMMAND}" == "hash" || "${COMMAND}" == "sign" ]]; then
DEFINE_string unsigned_payload "" "Path to the input unsigned payload."
@@ -654,21 +657,33 @@
--new_mapfiles="${new_mapfiles}"
)
+ if [[ "${FLAGS_is_partial_update}" == "true" ]]; then
+ GENERATOR_ARGS+=( --is_partial_update="true" )
+ # Need at least minor version 7 for partial update, so generate with minor
+ # version 7 if we don't have a source image. Let the delta_generator to fail
+ # the other incompatiable minor versions.
+ if [[ -z "${FORCE_MINOR_VERSION}" ]]; then
+ FORCE_MINOR_VERSION="7"
+ fi
+ fi
+
if [[ "${payload_type}" == "delta" ]]; then
# Source image args:
GENERATOR_ARGS+=(
--old_partitions="${old_partitions}"
--old_mapfiles="${old_mapfiles}"
)
- if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
- GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
- fi
if [[ -n "${FLAGS_disable_fec_computation}" ]]; then
GENERATOR_ARGS+=(
--disable_fec_computation="${FLAGS_disable_fec_computation}" )
fi
fi
+ # minor version is set only for delta or partial payload.
+ if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
+ GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
+ fi
+
if [[ -n "${FORCE_MAJOR_VERSION}" ]]; then
GENERATOR_ARGS+=( --major_version="${FORCE_MAJOR_VERSION}" )
fi
@@ -881,8 +896,8 @@
check_update_payload ${PAYCHECK_ARGS[@]} --check
}
-# Sanity check that the real generator exists:
-GENERATOR="$(which delta_generator || true)"
+# Check that the real generator exists:
+[[ -x "${GENERATOR}" ]] || GENERATOR="$(which delta_generator || true)"
[[ -x "${GENERATOR}" ]] || die "can't find delta_generator"
case "$COMMAND" in
diff --git a/scripts/update_device.py b/scripts/update_device.py
index 7be3edb..1cd4b6a 100755
--- a/scripts/update_device.py
+++ b/scripts/update_device.py
@@ -20,12 +20,14 @@
from __future__ import absolute_import
import argparse
+import binascii
import hashlib
import logging
import os
import socket
import subprocess
import sys
+import struct
import threading
import xml.etree.ElementTree
import zipfile
@@ -89,6 +91,7 @@
OTA_PAYLOAD_PROPERTIES_TXT = 'payload_properties.txt'
SECONDARY_OTA_PAYLOAD_BIN = 'secondary/payload.bin'
SECONDARY_OTA_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt'
+ PAYLOAD_MAGIC_HEADER = b'CrAU'
def __init__(self, otafilename, secondary_payload=False):
self.otafilename = otafilename
@@ -97,10 +100,34 @@
payload_entry = (self.SECONDARY_OTA_PAYLOAD_BIN if secondary_payload else
self.OTA_PAYLOAD_BIN)
payload_info = otazip.getinfo(payload_entry)
- self.offset = payload_info.header_offset
- self.offset += zipfile.sizeFileHeader
- self.offset += len(payload_info.extra) + len(payload_info.filename)
- self.size = payload_info.file_size
+
+ if payload_info.compress_type != 0:
+ logging.error(
+ "Expected layload to be uncompressed, got compression method %d",
+ payload_info.compress_type)
+ # Don't use len(payload_info.extra). Because that returns size of extra
+ # fields in central directory. We need to look at local file directory,
+ # as these two might have different sizes.
+ with open(otafilename, "rb") as fp:
+ fp.seek(payload_info.header_offset)
+ data = fp.read(zipfile.sizeFileHeader)
+ fheader = struct.unpack(zipfile.structFileHeader, data)
+ # Last two fields of local file header are filename length and
+ # extra length
+ filename_len = fheader[-2]
+ extra_len = fheader[-1]
+ self.offset = payload_info.header_offset
+ self.offset += zipfile.sizeFileHeader
+ self.offset += filename_len + extra_len
+ self.size = payload_info.file_size
+ fp.seek(self.offset)
+ payload_header = fp.read(4)
+ if payload_header != self.PAYLOAD_MAGIC_HEADER:
+ logging.warning(
+ "Invalid header, expeted %s, got %s."
+ "Either the offset is not correct, or payload is corrupted",
+ binascii.hexlify(self.PAYLOAD_MAGIC_HEADER),
+ payload_header)
property_entry = (self.SECONDARY_OTA_PAYLOAD_PROPERTIES_TXT if
secondary_payload else self.OTA_PAYLOAD_PROPERTIES_TXT)
diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py
index ea5ed30..78b8e2c 100644
--- a/scripts/update_payload/payload.py
+++ b/scripts/update_payload/payload.py
@@ -20,7 +20,9 @@
from __future__ import print_function
import hashlib
+import io
import struct
+import zipfile
from update_payload import applier
from update_payload import checker
@@ -119,6 +121,10 @@
payload_file: update payload file object open for reading
payload_file_offset: the offset of the actual payload
"""
+ if zipfile.is_zipfile(payload_file):
+ with zipfile.ZipFile(payload_file) as zfp:
+ with zfp.open("payload.bin") as payload_fp:
+ payload_file = io.BytesIO(payload_fp.read())
self.payload_file = payload_file
self.payload_file_offset = payload_file_offset
self.manifest_hasher = None
diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py
index d41c1da..841cd22 100644
--- a/scripts/update_payload/update_metadata_pb2.py
+++ b/scripts/update_payload/update_metadata_pb2.py
@@ -2,8 +2,6 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: update_metadata.proto
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
@@ -19,8 +17,8 @@
name='update_metadata.proto',
package='chromeos_update_engine',
syntax='proto2',
- serialized_options=_b('H\003'),
- serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"\x9f\x01\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1aO\n\tSignature\x12\x13\n\x07version\x18\x01 \x01(\rB\x02\x18\x01\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x1f\n\x17unpadded_signature_size\x18\x03 \x01(\x07\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"s\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\"\xe1\x06\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadata\x12\x16\n\x0epartial_update\x18\x10 \x01(\x08\x42\x02H\x03')
+ serialized_options=b'H\003',
+ serialized_pb=b'\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"\x9f\x01\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1aO\n\tSignature\x12\x13\n\x07version\x18\x01 \x01(\rB\x02\x18\x01\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x1f\n\x17unpadded_signature_size\x18\x03 \x01(\x07\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xe8\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\x12\x0f\n\x07version\x18\x11 \x01(\t\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"s\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\"\xe1\x06\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadata\x12\x16\n\x0epartial_update\x18\x10 \x01(\x08\x42\x02H\x03'
)
@@ -41,11 +39,11 @@
type=None),
_descriptor.EnumValueDescriptor(
name='MOVE', index=2, number=2,
- serialized_options=_b('\010\001'),
+ serialized_options=b'\010\001',
type=None),
_descriptor.EnumValueDescriptor(
name='BSDIFF', index=3, number=3,
- serialized_options=_b('\010\001'),
+ serialized_options=b'\010\001',
type=None),
_descriptor.EnumValueDescriptor(
name='SOURCE_COPY', index=4, number=4,
@@ -135,11 +133,11 @@
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=_b('\030\001'), file=DESCRIPTOR),
+ serialized_options=b'\030\001', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='chromeos_update_engine.Signatures.Signature.data', index=1,
number=2, type=12, cpp_type=9, label=1,
- has_default_value=False, default_value=_b(""),
+ has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
@@ -214,7 +212,7 @@
_descriptor.FieldDescriptor(
name='hash', full_name='chromeos_update_engine.PartitionInfo.hash', index=1,
number=2, type=12, cpp_type=9, label=1,
- has_default_value=False, default_value=_b(""),
+ has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
@@ -245,42 +243,42 @@
_descriptor.FieldDescriptor(
name='board', full_name='chromeos_update_engine.ImageInfo.board', index=0,
number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
+ has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key', full_name='chromeos_update_engine.ImageInfo.key', index=1,
number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
+ has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='channel', full_name='chromeos_update_engine.ImageInfo.channel', index=2,
number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
+ has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='chromeos_update_engine.ImageInfo.version', index=3,
number=4, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
+ has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='build_channel', full_name='chromeos_update_engine.ImageInfo.build_channel', index=4,
number=5, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
+ has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='build_version', full_name='chromeos_update_engine.ImageInfo.build_version', index=5,
number=6, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
+ has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
@@ -360,14 +358,14 @@
_descriptor.FieldDescriptor(
name='data_sha256_hash', full_name='chromeos_update_engine.InstallOperation.data_sha256_hash', index=7,
number=8, type=12, cpp_type=9, label=1,
- has_default_value=False, default_value=_b(""),
+ has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='src_sha256_hash', full_name='chromeos_update_engine.InstallOperation.src_sha256_hash', index=8,
number=9, type=12, cpp_type=9, label=1,
- has_default_value=False, default_value=_b(""),
+ has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
@@ -399,7 +397,7 @@
_descriptor.FieldDescriptor(
name='partition_name', full_name='chromeos_update_engine.PartitionUpdate.partition_name', index=0,
number=1, type=9, cpp_type=9, label=2,
- has_default_value=False, default_value=_b("").decode('utf-8'),
+ has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
@@ -413,14 +411,14 @@
_descriptor.FieldDescriptor(
name='postinstall_path', full_name='chromeos_update_engine.PartitionUpdate.postinstall_path', index=2,
number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
+ has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filesystem_type', full_name='chromeos_update_engine.PartitionUpdate.filesystem_type', index=3,
number=4, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
+ has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
@@ -476,14 +474,14 @@
_descriptor.FieldDescriptor(
name='hash_tree_algorithm', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_algorithm', index=11,
number=12, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
+ has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hash_tree_salt', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_salt', index=12,
number=13, type=12, cpp_type=9, label=1,
- has_default_value=False, default_value=_b(""),
+ has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
@@ -508,6 +506,13 @@
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='version', full_name='chromeos_update_engine.PartitionUpdate.version', index=16,
+ number=17, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
@@ -521,7 +526,7 @@
oneofs=[
],
serialized_start=926,
- serialized_end=1653,
+ serialized_end=1670,
)
@@ -535,7 +540,7 @@
_descriptor.FieldDescriptor(
name='name', full_name='chromeos_update_engine.DynamicPartitionGroup.name', index=0,
number=1, type=9, cpp_type=9, label=2,
- has_default_value=False, default_value=_b("").decode('utf-8'),
+ has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
@@ -565,8 +570,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=1655,
- serialized_end=1731,
+ serialized_start=1672,
+ serialized_end=1748,
)
@@ -603,8 +608,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=1733,
- serialized_end=1848,
+ serialized_start=1750,
+ serialized_end=1865,
)
@@ -621,14 +626,14 @@
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=_b('\030\001'), file=DESCRIPTOR),
+ serialized_options=b'\030\001', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=_b('\030\001'), file=DESCRIPTOR),
+ serialized_options=b'\030\001', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2,
number=3, type=13, cpp_type=3, label=1,
@@ -656,28 +661,28 @@
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=_b('\030\001'), file=DESCRIPTOR),
+ serialized_options=b'\030\001', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=_b('\030\001'), file=DESCRIPTOR),
+ serialized_options=b'\030\001', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=_b('\030\001'), file=DESCRIPTOR),
+ serialized_options=b'\030\001', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=_b('\030\001'), file=DESCRIPTOR),
+ serialized_options=b'\030\001', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9,
number=10, type=11, cpp_type=10, label=1,
@@ -739,8 +744,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=1851,
- serialized_end=2716,
+ serialized_start=1868,
+ serialized_end=2733,
)
_SIGNATURES_SIGNATURE.containing_type = _SIGNATURES
diff --git a/stable/Android.bp b/stable/Android.bp
new file mode 100644
index 0000000..337ae96
--- /dev/null
+++ b/stable/Android.bp
@@ -0,0 +1,65 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Stable AIDL interface between update_engine and other APEXes
+// ========================================================
+aidl_interface {
+ name: "libupdate_engine_stable",
+ srcs: [
+ "android/os/IUpdateEngineStable.aidl",
+ "android/os/IUpdateEngineStableCallback.aidl",
+ ],
+ backend: {
+ cpp: {
+ enabled: true,
+ },
+ java: {
+ enabled: false,
+ },
+ ndk: {
+ enabled: true,
+ apex_available: [
+ "com.android.gki.*",
+ ],
+ },
+ },
+}
+
+// update_engine_stable_client (type: executable)
+// ========================================================
+// update_engine console client installed to APEXes
+cc_binary {
+ name: "update_engine_stable_client",
+
+ header_libs: [
+ "libupdate_engine_headers",
+ ],
+ shared_libs: [
+ "libbinder_ndk",
+ "libbase",
+ "liblog",
+ ],
+ static_libs: [
+ "libgflags",
+ "libupdate_engine_stable-ndk_platform",
+ ],
+ srcs: [
+ "update_engine_stable_client.cc",
+ ],
+ apex_available: [
+ "com.android.gki.*",
+ ],
+}
diff --git a/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStable.aidl b/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStable.aidl
new file mode 100644
index 0000000..82c3ca5
--- /dev/null
+++ b/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStable.aidl
@@ -0,0 +1,23 @@
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL interface (or parcelable). Do not try to
+// edit this file. It looks like you are doing that because you have modified
+// an AIDL interface in a backward-incompatible way, e.g., deleting a function
+// from an interface or a field from a parcelable and it broke the build. That
+// breakage is intended.
+//
+// You must not make a backward incompatible changes to the AIDL files built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.os;
+interface IUpdateEngineStable {
+ void applyPayloadFd(in ParcelFileDescriptor pfd, in long payload_offset, in long payload_size, in String[] headerKeyValuePairs);
+ boolean bind(android.os.IUpdateEngineStableCallback callback);
+ boolean unbind(android.os.IUpdateEngineStableCallback callback);
+}
diff --git a/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStableCallback.aidl b/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStableCallback.aidl
new file mode 100644
index 0000000..4c72b49
--- /dev/null
+++ b/stable/aidl_api/libupdate_engine_stable/current/android/os/IUpdateEngineStableCallback.aidl
@@ -0,0 +1,22 @@
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL interface (or parcelable). Do not try to
+// edit this file. It looks like you are doing that because you have modified
+// an AIDL interface in a backward-incompatible way, e.g., deleting a function
+// from an interface or a field from a parcelable and it broke the build. That
+// breakage is intended.
+//
+// You must not make a backward incompatible changes to the AIDL files built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.os;
+interface IUpdateEngineStableCallback {
+ oneway void onStatusUpdate(int status_code, float percentage);
+ oneway void onPayloadApplicationComplete(int error_code);
+}
diff --git a/stable/android/os/IUpdateEngineStable.aidl b/stable/android/os/IUpdateEngineStable.aidl
new file mode 100644
index 0000000..b3b6674
--- /dev/null
+++ b/stable/android/os/IUpdateEngineStable.aidl
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.os;
+
+import android.os.IUpdateEngineStableCallback;
+import android.os.ParcelFileDescriptor;
+
+/**
+ * The stable interface exposed by the update engine daemon.
+ */
+interface IUpdateEngineStable {
+ /**
+ * Apply the given payload as provided in the given file descriptor.
+ *
+ * See {@link #bind(IUpdateEngineCallback)} for status updates.
+ *
+ * @param pfd The file descriptor opened at the payload file. Note that the daemon must have
+ * enough permission to operate on the file descriptor.
+ * @param payload_offset offset into pfd where the payload binary starts.
+ * @param payload_size length after payload_offset to read from pfd. If 0, it will be auto
+ * detected.
+ * @param headerKeyValuePairs additional header key value pairs, in the format of "key=value".
+ * @see android.os.UpdateEngine#applyPayload(android.content.res.AssetFileDescriptor, String[])
+ */
+ void applyPayloadFd(in ParcelFileDescriptor pfd,
+ in long payload_offset,
+ in long payload_size,
+ in String[] headerKeyValuePairs);
+
+ /**
+ * Bind a callback for status updates on payload application.
+ *
+ * At any given time, only one callback can be bound. If a callback is already bound,
+ * subsequent binding will fail and return false until the bound callback is unbound. That is,
+ * binding is first-come, first-serve.
+ *
+ * A bound callback may be unbound explicitly by calling
+ * {@link #unbind(IUpdateEngineStableCallback)}, or
+ * implicitly when the process implementing the callback dies.
+ *
+ * @param callback See {@link IUpdateEngineStableCallback}
+ * @return true if binding is successful, false otherwise.
+ * @see android.os.UpdateEngine#bind(android.os.UpdateEngineCallback)
+ */
+ boolean bind(IUpdateEngineStableCallback callback);
+
+ /**
+ * Unbind a possibly bound callback.
+ *
+ * If the provided callback does not match the previously bound callback, unbinding fails.
+ *
+ * Note that a callback may also be unbound when the process implementing the callback dies.
+ * Hence, a client usually does not need to explicitly unbind a callback unless it wants to change
+ * the bound callback.
+ *
+ * @param callback The callback to be unbound. See {@link IUpdateEngineStableCallback}.
+ * @return true if unbinding is successful, false otherwise.
+ * @see android.os.UpdateEngine#unbind(android.os.UpdateEngineCallback)
+ */
+ boolean unbind(IUpdateEngineStableCallback callback);
+}
diff --git a/stable/android/os/IUpdateEngineStableCallback.aidl b/stable/android/os/IUpdateEngineStableCallback.aidl
new file mode 100644
index 0000000..d8fc333
--- /dev/null
+++ b/stable/android/os/IUpdateEngineStableCallback.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.os;
+
+/**
+ * The stable Callback interface for IUpdateEngineStable.
+ */
+oneway interface IUpdateEngineStableCallback {
+ /**
+ * Invoked when a payload is being applied and there is a status update.
+ *
+ * @param status_code see {@link android.os.UpdateEngine.UpdateStatusConstants}.
+ * @param percentage percentage of progress of the current stage.
+ * @see android.os.UpdateEngineCallback#onStatusUpdate(int, float)
+ */
+ void onStatusUpdate(int status_code, float percentage);
+
+ /**
+ * Invoked when a payload has finished being applied.
+ *
+ * @param error_code see {@link android.os.UpdateEngine.ErrorCodeConstants}
+ * @see android.os.UpdateEngineCallback#onPayloadApplicationComplete(int)
+ */
+ void onPayloadApplicationComplete(int error_code);
+}
diff --git a/stable/update_engine_stable_client.cc b/stable/update_engine_stable_client.cc
new file mode 100644
index 0000000..da203c4
--- /dev/null
+++ b/stable/update_engine_stable_client.cc
@@ -0,0 +1,188 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// update_engine console client installed to APEXes for scripts to invoke
+// directly. Uses the stable API.
+
+#include <fcntl.h>
+#include <sysexits.h>
+#include <unistd.h>
+
+#include <vector>
+
+#include <aidl/android/os/BnUpdateEngineStableCallback.h>
+#include <aidl/android/os/IUpdateEngineStable.h>
+#include <android-base/logging.h>
+#include <android-base/strings.h>
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+#include <android/binder_ibinder.h>
+#include <common/error_code.h>
+#include <gflags/gflags.h>
+#include <utils/StrongPointer.h>
+
+namespace chromeos_update_engine::internal {
+
+DEFINE_string(payload,
+ "file:///path/to/payload.bin",
+ "The file URI to the update payload to use, or path to the file");
+DEFINE_int64(offset,
+ 0,
+ "The offset in the payload where the CrAU update starts.");
+DEFINE_int64(size,
+ 0,
+ "The size of the CrAU part of the payload. If 0 is passed, it "
+ "will be autodetected.");
+DEFINE_string(headers,
+ "",
+ "A list of key-value pairs, one element of the list per line.");
+
+[[noreturn]] int Exit(int return_code) {
+ LOG(INFO) << "Exit: " << return_code;
+ exit(return_code);
+}
+// Called whenever the UpdateEngine daemon dies.
+void UpdateEngineServiceDied(void*) {
+ LOG(ERROR) << "UpdateEngineService died.";
+ Exit(EX_SOFTWARE);
+}
+
+class UpdateEngineClientAndroid {
+ public:
+ UpdateEngineClientAndroid() = default;
+ int Run();
+
+ private:
+ class UECallback : public aidl::android::os::BnUpdateEngineStableCallback {
+ public:
+ UECallback() = default;
+
+ // android::os::BnUpdateEngineStableCallback overrides.
+ ndk::ScopedAStatus onStatusUpdate(int status_code, float progress) override;
+ ndk::ScopedAStatus onPayloadApplicationComplete(int error_code) override;
+ };
+
+ static std::vector<std::string> ParseHeaders(const std::string& arg);
+
+ const ndk::ScopedAIBinder_DeathRecipient death_recipient_{
+ AIBinder_DeathRecipient_new(&UpdateEngineServiceDied)};
+ std::shared_ptr<aidl::android::os::IUpdateEngineStable> service_;
+ std::shared_ptr<aidl::android::os::BnUpdateEngineStableCallback> callback_;
+};
+
+ndk::ScopedAStatus UpdateEngineClientAndroid::UECallback::onStatusUpdate(
+ int status_code, float progress) {
+ LOG(INFO) << "onStatusUpdate(" << status_code << ", " << progress << ")";
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus
+UpdateEngineClientAndroid::UECallback::onPayloadApplicationComplete(
+ int error_code) {
+ LOG(INFO) << "onPayloadApplicationComplete(" << error_code << ")";
+ auto code = static_cast<ErrorCode>(error_code);
+ Exit((code == ErrorCode::kSuccess || code == ErrorCode::kUpdatedButNotActive)
+ ? EX_OK
+ : EX_SOFTWARE);
+}
+
+int UpdateEngineClientAndroid::Run() {
+ service_ = aidl::android::os::IUpdateEngineStable::fromBinder(ndk::SpAIBinder(
+ AServiceManager_getService("android.os.UpdateEngineStableService")));
+ if (service_ == nullptr) {
+ LOG(ERROR)
+ << "Failed to get IUpdateEngineStable binder from service manager.";
+ return EX_SOFTWARE;
+ }
+
+ // Register a callback object with the service.
+ callback_ = ndk::SharedRefBase::make<UECallback>();
+ bool bound;
+ if (!service_->bind(callback_, &bound).isOk() || !bound) {
+ LOG(ERROR) << "Failed to bind() the UpdateEngine daemon.";
+ return EX_SOFTWARE;
+ }
+
+ auto headers = ParseHeaders(FLAGS_headers);
+ ndk::ScopedAStatus status;
+ const char* payload_path;
+ std::string file_prefix = "file://";
+ if (android::base::StartsWith(FLAGS_payload, file_prefix)) {
+ payload_path = FLAGS_payload.data() + file_prefix.length();
+ } else {
+ payload_path = FLAGS_payload.data();
+ }
+ ndk::ScopedFileDescriptor ufd(
+ TEMP_FAILURE_RETRY(open(payload_path, O_RDONLY)));
+ if (ufd.get() < 0) {
+ PLOG(ERROR) << "Can't open " << payload_path;
+ return EX_SOFTWARE;
+ }
+ status = service_->applyPayloadFd(ufd, FLAGS_offset, FLAGS_size, headers);
+ if (!status.isOk()) {
+ LOG(ERROR) << "Cannot apply payload: " << status.getDescription();
+ return EX_SOFTWARE;
+ }
+
+ // When following updates status changes, exit if the update_engine daemon
+ // dies.
+ if (AIBinder_linkToDeath(service_->asBinder().get(),
+ death_recipient_.get(),
+ nullptr) != STATUS_OK) {
+ return EX_SOFTWARE;
+ }
+
+ return EX_OK;
+}
+
+std::vector<std::string> UpdateEngineClientAndroid::ParseHeaders(
+ const std::string& arg) {
+ std::vector<std::string> lines = android::base::Split(arg, "\n");
+ std::vector<std::string> headers;
+ for (const auto& line : lines) {
+ auto header = android::base::Trim(line);
+ if (!header.empty()) {
+ headers.push_back(header);
+ }
+ }
+ return headers;
+}
+
+} // namespace chromeos_update_engine::internal
+
+int main(int argc, char** argv) {
+ android::base::InitLogging(argv);
+ gflags::ParseCommandLineFlags(&argc, &argv, true);
+
+ // Unlike other update_engine* processes that uses message loops,
+ // update_engine_stable_client uses a thread pool model. However, number of
+ // threads is limited to 1; that is, 0 additional threads should be spawned.
+ // This avoids some race conditions.
+ if (!ABinderProcess_setThreadPoolMaxThreadCount(0)) {
+ LOG(ERROR) << "Cannot set thread pool max thread count";
+ return EX_SOFTWARE;
+ }
+ ABinderProcess_startThreadPool();
+
+ chromeos_update_engine::internal::UpdateEngineClientAndroid client{};
+ int code = client.Run();
+ if (code != EX_OK)
+ return code;
+
+ ABinderProcess_joinThreadPool();
+ LOG(ERROR) << "Exited from joinThreadPool.";
+ return EX_SOFTWARE;
+}
diff --git a/update_engine.conf b/update_engine.conf
index af213ad..b6ca3c4 100644
--- a/update_engine.conf
+++ b/update_engine.conf
@@ -1,2 +1,2 @@
PAYLOAD_MAJOR_VERSION=2
-PAYLOAD_MINOR_VERSION=6
+PAYLOAD_MINOR_VERSION=7
diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc
index b96e29d..be5f914 100644
--- a/update_manager/chromeos_policy.cc
+++ b/update_manager/chromeos_policy.cc
@@ -466,7 +466,7 @@
// ConnectionManager::IsUpdateAllowedOver(); be sure to deprecate the latter.
//
// TODO(garnold) The current logic generally treats the list of allowed
-// connections coming from the device policy as a whitelist, meaning that it
+// connections coming from the device policy as an allowlist, meaning that it
// can only be used for enabling connections, but not disable them. Further,
// certain connection types cannot be enabled even by policy.
// In effect, the only thing that device policy can change is to enable
@@ -598,7 +598,6 @@
string* error,
UpdateBackoffAndDownloadUrlResult* result,
const UpdateState& update_state) const {
- // Sanity checks.
DCHECK_GE(update_state.download_errors_max, 0);
// Set default result values.
@@ -670,7 +669,7 @@
Time prev_err_time;
bool is_first = true;
for (const auto& err_tuple : update_state.download_errors) {
- // Do some sanity checks.
+ // Do some validation checks.
int used_url_idx = get<0>(err_tuple);
if (is_first && url_idx >= 0 && used_url_idx != url_idx) {
LOG(WARNING) << "First URL in error log (" << used_url_idx
diff --git a/update_metadata.proto b/update_metadata.proto
index e6a067e..f79e38b 100644
--- a/update_metadata.proto
+++ b/update_metadata.proto
@@ -288,6 +288,11 @@
// The number of FEC roots.
optional uint32 fec_roots = 16 [default = 2];
+
+ // Per-partition version used for downgrade detection, added
+ // as an effort to support partial updates. For most partitions,
+ // this is the build timestamp.
+ optional string version = 17;
}
message DynamicPartitionGroup {