Add SafetyNet logging for payload timestamp error. am: 15c6d372ee -s ours am: 8186fba1e3 -s ours am: 5b7432ad44 -s ours am: 25f492ec72 -s ours am: 3cbb235071 -s ours am: 475c758720 -s ours am: 62fb7eb258 -s ours
am: 0c363aff69 -s ours
Change-Id: Idb186b081de51667b164c8d32c96dc22cb5dbdec
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..c3d164b
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,8 @@
+// AIDL interface between libupdate_engine and framework.jar
+filegroup {
+ name: "libupdate_engine_aidl",
+ srcs: [
+ "binder_bindings/android/os/IUpdateEngine.aidl",
+ "binder_bindings/android/os/IUpdateEngineCallback.aidl",
+ ],
+}
diff --git a/Android.mk b/Android.mk
index a3a7017..ddf633d 100644
--- a/Android.mk
+++ b/Android.mk
@@ -14,6 +14,8 @@
# limitations under the License.
#
+ifneq ($(TARGET_BUILD_PDK),true)
+
LOCAL_PATH := $(my-dir)
# Default values for the USE flags. Override these USE flags from your product
@@ -22,18 +24,18 @@
local_use_binder := $(if $(BRILLO_USE_BINDER),$(BRILLO_USE_BINDER),1)
local_use_hwid_override := \
$(if $(BRILLO_USE_HWID_OVERRIDE),$(BRILLO_USE_HWID_OVERRIDE),0)
-# "libcros" gates the LibCrosService exposed by the Chrome OS' chrome browser to
-# the system layer.
-local_use_libcros := $(if $(BRILLO_USE_LIBCROS),$(BRILLO_USE_LIBCROS),0)
local_use_mtd := $(if $(BRILLO_USE_MTD),$(BRILLO_USE_MTD),0)
+local_use_chrome_network_proxy := 0
+local_use_chrome_kiosk_app := 0
# IoT devices use Omaha for updates.
local_use_omaha := $(if $(filter true,$(PRODUCT_IOT)),1,0)
ue_common_cflags := \
-DUSE_BINDER=$(local_use_binder) \
+ -DUSE_CHROME_NETWORK_PROXY=$(local_use_chrome_network_proxy) \
+ -DUSE_CHROME_KIOSK_APP=$(local_use_chrome_kiosk_app) \
-DUSE_HWID_OVERRIDE=$(local_use_hwid_override) \
- -DUSE_LIBCROS=$(local_use_libcros) \
-DUSE_MTD=$(local_use_mtd) \
-DUSE_OMAHA=$(local_use_omaha) \
-D_FILE_OFFSET_BITS=64 \
@@ -82,6 +84,7 @@
generated_sources_dir := $(call local-generated-sources-dir)
LOCAL_EXPORT_C_INCLUDE_DIRS := $(generated_sources_dir)/proto/system
LOCAL_SRC_FILES := $(ue_update_metadata_protos_src_files)
+LOCAL_CFLAGS := -Wall -Werror
include $(BUILD_HOST_STATIC_LIBRARY)
# Build for the target.
@@ -91,6 +94,7 @@
generated_sources_dir := $(call local-generated-sources-dir)
LOCAL_EXPORT_C_INCLUDE_DIRS := $(generated_sources_dir)/proto/system
LOCAL_SRC_FILES := $(ue_update_metadata_protos_src_files)
+LOCAL_CFLAGS := -Wall -Werror
include $(BUILD_STATIC_LIBRARY)
# libpayload_consumer (type: static_library)
@@ -101,6 +105,8 @@
libxz \
libbz \
libbspatch \
+ libbrotli \
+ libpuffpatch \
$(ue_update_metadata_protos_exported_static_libraries)
ue_libpayload_consumer_exported_shared_libraries := \
libcrypto \
@@ -125,10 +131,13 @@
common/terminator.cc \
common/utils.cc \
payload_consumer/bzip_extent_writer.cc \
+ payload_consumer/cached_file_descriptor.cc \
payload_consumer/delta_performer.cc \
payload_consumer/download_action.cc \
+ payload_consumer/extent_reader.cc \
payload_consumer/extent_writer.cc \
payload_consumer/file_descriptor.cc \
+ payload_consumer/file_descriptor_utils.cc \
payload_consumer/file_writer.cc \
payload_consumer/filesystem_verifier_action.cc \
payload_consumer/install_plan.cc \
@@ -143,7 +152,6 @@
LOCAL_MODULE := libpayload_consumer
LOCAL_MODULE_CLASS := STATIC_LIBRARIES
LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
LOCAL_CFLAGS := $(ue_common_cflags)
LOCAL_CPPFLAGS := $(ue_common_cppflags)
LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -167,7 +175,6 @@
LOCAL_MODULE := libpayload_consumer
LOCAL_MODULE_CLASS := STATIC_LIBRARIES
LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
LOCAL_CFLAGS := $(ue_common_cflags)
LOCAL_CPPFLAGS := $(ue_common_cppflags)
LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -203,7 +210,6 @@
LOCAL_MODULE := libupdate_engine_boot_control
LOCAL_MODULE_CLASS := STATIC_LIBRARIES
LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
LOCAL_CFLAGS := $(ue_common_cflags)
LOCAL_CPPFLAGS := $(ue_common_cppflags)
LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -261,7 +267,6 @@
LOCAL_MODULE := libupdate_engine
LOCAL_MODULE_CLASS := STATIC_LIBRARIES
LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
LOCAL_EXPORT_C_INCLUDE_DIRS := $(ue_libupdate_engine_exported_c_includes)
LOCAL_CFLAGS := $(ue_common_cflags)
LOCAL_CPPFLAGS := $(ue_common_cppflags)
@@ -291,7 +296,7 @@
hardware_android.cc \
image_properties_android.cc \
libcurl_http_fetcher.cc \
- metrics.cc \
+ metrics_reporter_omaha.cc \
metrics_utils.cc \
omaha_request_action.cc \
omaha_request_params.cc \
@@ -303,10 +308,15 @@
proxy_resolver.cc \
real_system_state.cc \
update_attempter.cc \
+ update_manager/android_things_policy.cc \
+ update_manager/api_restricted_downloads_policy_impl.cc \
update_manager/boxed_value.cc \
- update_manager/chromeos_policy.cc \
update_manager/default_policy.cc \
+ update_manager/enough_slots_ab_updates_policy_impl.cc \
update_manager/evaluation_context.cc \
+ update_manager/interactive_update_policy_impl.cc \
+ update_manager/next_update_check_policy_impl.cc \
+ update_manager/official_build_check_policy_impl.cc \
update_manager/policy.cc \
update_manager/real_config_provider.cc \
update_manager/real_device_policy_provider.cc \
@@ -326,10 +336,10 @@
binder_service_brillo.cc \
parcelable_update_engine_status.cc
endif # local_use_binder == 1
-ifeq ($(local_use_libcros),1)
+ifeq ($(local_use_chrome_network_proxy),1)
LOCAL_SRC_FILES += \
chrome_browser_proxy_resolver.cc
-endif # local_use_libcros == 1
+endif # local_use_chrome_network_proxy == 1
include $(BUILD_STATIC_LIBRARY)
else # local_use_omaha == 1
@@ -359,6 +369,7 @@
libbrillo-binder \
libcutils \
libcurl \
+ libmetricslogger \
libssl \
libutils
@@ -366,7 +377,6 @@
LOCAL_MODULE := libupdate_engine_android
LOCAL_MODULE_CLASS := STATIC_LIBRARIES
LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
LOCAL_CFLAGS := $(ue_common_cflags)
LOCAL_CPPFLAGS := $(ue_common_cppflags)
LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -393,6 +403,8 @@
daemon_state_android.cc \
hardware_android.cc \
libcurl_http_fetcher.cc \
+ metrics_reporter_android.cc \
+ metrics_utils.cc \
network_selector_android.cc \
proxy_resolver.cc \
update_attempter_android.cc \
@@ -411,7 +423,6 @@
LOCAL_REQUIRED_MODULES := \
cacerts_google
LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
LOCAL_CFLAGS := $(ue_common_cflags)
LOCAL_CPPFLAGS := $(ue_common_cppflags)
LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -453,7 +464,6 @@
LOCAL_MODULE_PATH := $(TARGET_RECOVERY_ROOT_OUT)/sbin
LOCAL_MODULE_CLASS := EXECUTABLES
LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
LOCAL_CFLAGS := \
$(ue_common_cflags) \
-D_UE_SIDELOAD
@@ -469,6 +479,8 @@
LOCAL_SRC_FILES := \
boot_control_recovery.cc \
hardware_android.cc \
+ metrics_reporter_stub.cc \
+ metrics_utils.cc \
network_selector_stub.cc \
proxy_resolver.cc \
sideload_main.cc \
@@ -489,7 +501,7 @@
# library dependencies of these static libraries.
LOCAL_STATIC_LIBRARIES += \
$(ue_common_shared_libraries) \
- libcutils \
+ libbase \
liblog \
$(ue_libpayload_consumer_exported_shared_libraries:-host=) \
$(ue_update_metadata_protos_exported_shared_libraries) \
@@ -522,7 +534,6 @@
-Werror \
-Wno-unused-parameter \
-DUSE_BINDER=$(local_use_binder)
-LOCAL_CLANG := true
LOCAL_CPP_EXTENSION := .cc
# TODO(deymo): Remove "external/cros/system_api/dbus" when dbus is not used.
LOCAL_C_INCLUDES := \
@@ -560,7 +571,6 @@
LOCAL_MODULE := update_engine_client
LOCAL_MODULE_CLASS := EXECUTABLES
LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
LOCAL_CFLAGS := $(ue_common_cflags)
LOCAL_CPPFLAGS := $(ue_common_cppflags)
LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -599,9 +609,14 @@
# server-side code. This is used for delta_generator and unittests but not
# for any client code.
ue_libpayload_generator_exported_static_libraries := \
- libpayload_consumer \
- update_metadata-protos \
+ libbsdiff \
+ libdivsufsort \
+ libdivsufsort64 \
+ libbrotli \
liblzma \
+ libpayload_consumer \
+ libpuffdiff \
+ update_metadata-protos \
$(ue_libpayload_consumer_exported_static_libraries) \
$(ue_update_metadata_protos_exported_static_libraries)
ue_libpayload_generator_exported_shared_libraries := \
@@ -616,6 +631,7 @@
payload_generator/block_mapping.cc \
payload_generator/bzip.cc \
payload_generator/cycle_breaker.cc \
+ payload_generator/deflate_utils.cc \
payload_generator/delta_diff_generator.cc \
payload_generator/delta_diff_utils.cc \
payload_generator/ext2_filesystem.cc \
@@ -630,6 +646,7 @@
payload_generator/payload_generation_config.cc \
payload_generator/payload_signer.cc \
payload_generator/raw_filesystem.cc \
+ payload_generator/squashfs_filesystem.cc \
payload_generator/tarjan.cc \
payload_generator/topological_sort.cc \
payload_generator/xz_android.cc
@@ -640,15 +657,18 @@
LOCAL_MODULE := libpayload_generator
LOCAL_MODULE_CLASS := STATIC_LIBRARIES
LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
LOCAL_CFLAGS := $(ue_common_cflags)
LOCAL_CPPFLAGS := $(ue_common_cppflags)
LOCAL_LDFLAGS := $(ue_common_ldflags)
LOCAL_C_INCLUDES := $(ue_common_c_includes)
LOCAL_STATIC_LIBRARIES := \
- libpayload_consumer \
- update_metadata-protos \
+ libbsdiff \
+ libdivsufsort \
+ libdivsufsort64 \
liblzma \
+ libpayload_consumer \
+ libpuffdiff \
+ update_metadata-protos \
$(ue_common_static_libraries) \
$(ue_libpayload_consumer_exported_static_libraries) \
$(ue_update_metadata_protos_exported_static_libraries)
@@ -666,12 +686,14 @@
LOCAL_MODULE := libpayload_generator
LOCAL_MODULE_CLASS := STATIC_LIBRARIES
LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
LOCAL_CFLAGS := $(ue_common_cflags)
LOCAL_CPPFLAGS := $(ue_common_cppflags)
LOCAL_LDFLAGS := $(ue_common_ldflags)
LOCAL_C_INCLUDES := $(ue_common_c_includes)
LOCAL_STATIC_LIBRARIES := \
+ libbsdiff \
+ libdivsufsort \
+ libdivsufsort64 \
libpayload_consumer \
update_metadata-protos \
liblzma \
@@ -696,12 +718,8 @@
# Build for the host.
include $(CLEAR_VARS)
LOCAL_MODULE := delta_generator
-LOCAL_REQUIRED_MODULES := \
- bsdiff \
- imgdiff
LOCAL_MODULE_CLASS := EXECUTABLES
LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
LOCAL_CFLAGS := $(ue_common_cflags)
LOCAL_CPPFLAGS := $(ue_common_cppflags)
LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -727,7 +745,6 @@
LOCAL_MODULE_STEM := delta_generator
LOCAL_MODULE_CLASS := EXECUTABLES
LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
LOCAL_CFLAGS := $(ue_common_cflags)
LOCAL_CPPFLAGS := $(ue_common_cppflags)
LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -802,15 +819,6 @@
$(call ue-unittest-sample-image,disk_ext2_4k_empty.img)
$(call ue-unittest-sample-image,disk_ext2_unittest.img)
-# Zlib Fingerprint
-# ========================================================
-include $(CLEAR_VARS)
-LOCAL_MODULE := zlib_fingerprint
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
-LOCAL_PREBUILT_MODULE_FILE := $(TARGET_OUT_COMMON_GEN)/zlib_fingerprint
-include $(BUILD_PREBUILT)
-
# update_engine.conf
# ========================================================
include $(CLEAR_VARS)
@@ -829,7 +837,6 @@
LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
LOCAL_MODULE_CLASS := EXECUTABLES
LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
LOCAL_CFLAGS := $(ue_common_cflags)
LOCAL_CPPFLAGS := $(ue_common_cppflags)
LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -841,28 +848,6 @@
test_http_server.cc
include $(BUILD_EXECUTABLE)
-# bsdiff (type: executable)
-# ========================================================
-# We need bsdiff in the update_engine_unittests directory, so we build it here.
-include $(CLEAR_VARS)
-LOCAL_MODULE := ue_unittest_bsdiff
-LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
-LOCAL_MODULE_STEM := bsdiff
-LOCAL_CPP_EXTENSION := .cc
-LOCAL_SRC_FILES := ../../external/bsdiff/bsdiff_main.cc
-LOCAL_CFLAGS := \
- -D_FILE_OFFSET_BITS=64 \
- -Wall \
- -Werror \
- -Wextra \
- -Wno-unused-parameter
-LOCAL_STATIC_LIBRARIES := \
- libbsdiff \
- libbz \
- libdivsufsort64 \
- libdivsufsort
-include $(BUILD_EXECUTABLE)
-
# test_subprocess (type: executable)
# ========================================================
# Test helper subprocess program.
@@ -871,7 +856,6 @@
LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/update_engine_unittests
LOCAL_MODULE_CLASS := EXECUTABLES
LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
LOCAL_CFLAGS := $(ue_common_cflags)
LOCAL_CPPFLAGS := $(ue_common_cppflags)
LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -889,7 +873,6 @@
LOCAL_REQUIRED_MODULES := \
test_http_server \
test_subprocess \
- ue_unittest_bsdiff \
ue_unittest_delta_generator \
ue_unittest_disk_ext2_1k.img \
ue_unittest_disk_ext2_4k.img \
@@ -899,10 +882,8 @@
ue_unittest_key.pub.pem \
ue_unittest_key2.pem \
ue_unittest_key2.pub.pem \
- ue_unittest_update_engine.conf \
- zlib_fingerprint
+ ue_unittest_update_engine.conf
LOCAL_CPP_EXTENSION := .cc
-LOCAL_CLANG := true
LOCAL_CFLAGS := $(ue_common_cflags)
LOCAL_CPPFLAGS := $(ue_common_cppflags)
LOCAL_LDFLAGS := $(ue_common_ldflags)
@@ -937,9 +918,13 @@
common/test_utils.cc \
common/utils_unittest.cc \
payload_consumer/bzip_extent_writer_unittest.cc \
+ payload_consumer/cached_file_descriptor_unittest.cc \
payload_consumer/delta_performer_integration_test.cc \
payload_consumer/delta_performer_unittest.cc \
+ payload_consumer/extent_reader_unittest.cc \
payload_consumer/extent_writer_unittest.cc \
+ payload_consumer/fake_file_descriptor.cc \
+ payload_consumer/file_descriptor_utils_unittest.cc \
payload_consumer/file_writer_unittest.cc \
payload_consumer/filesystem_verifier_action_unittest.cc \
payload_consumer/postinstall_runner_action_unittest.cc \
@@ -948,6 +933,7 @@
payload_generator/blob_file_writer_unittest.cc \
payload_generator/block_mapping_unittest.cc \
payload_generator/cycle_breaker_unittest.cc \
+ payload_generator/deflate_utils_unittest.cc \
payload_generator/delta_diff_utils_unittest.cc \
payload_generator/ext2_filesystem_unittest.cc \
payload_generator/extent_ranges_unittest.cc \
@@ -960,6 +946,7 @@
payload_generator/payload_file_unittest.cc \
payload_generator/payload_generation_config_unittest.cc \
payload_generator/payload_signer_unittest.cc \
+ payload_generator/squashfs_filesystem_unittest.cc \
payload_generator/tarjan_unittest.cc \
payload_generator/topological_sort_unittest.cc \
payload_generator/zip_unittest.cc \
@@ -977,6 +964,7 @@
common_service_unittest.cc \
fake_system_state.cc \
image_properties_android_unittest.cc \
+ metrics_reporter_omaha_unittest.cc \
metrics_utils_unittest.cc \
omaha_request_action_unittest.cc \
omaha_request_params_unittest.cc \
@@ -985,11 +973,16 @@
p2p_manager_unittest.cc \
payload_consumer/download_action_unittest.cc \
payload_state_unittest.cc \
+ parcelable_update_engine_status_unittest.cc \
update_attempter_unittest.cc \
+ update_manager/android_things_policy_unittest.cc \
update_manager/boxed_value_unittest.cc \
+ update_manager/chromeos_policy.cc \
update_manager/chromeos_policy_unittest.cc \
update_manager/evaluation_context_unittest.cc \
update_manager/generic_variables_unittest.cc \
+ update_manager/next_update_check_policy_impl_unittest.cc \
+ update_manager/policy_test_utils.cc \
update_manager/prng_unittest.cc \
update_manager/real_device_policy_provider_unittest.cc \
update_manager/real_random_provider_unittest.cc \
@@ -1005,11 +998,9 @@
$(ue_libupdate_engine_android_exported_static_libraries:-host=)
LOCAL_SHARED_LIBRARIES += \
$(ue_libupdate_engine_android_exported_shared_libraries:-host=)
-endif # local_use_omaha == 1
-ifeq ($(local_use_libcros),1)
LOCAL_SRC_FILES += \
- chrome_browser_proxy_resolver_unittest.cc
-endif # local_use_libcros == 1
+ update_attempter_android_unittest.cc
+endif # local_use_omaha == 1
include $(BUILD_NATIVE_TEST)
# Update payload signing public key.
@@ -1036,6 +1027,9 @@
LOCAL_MODULE_TAGS := optional
LOCAL_REQUIRED_MODULES := \
delta_generator \
- shflags
+ shflags \
+ simg2img
include $(BUILD_PREBUILT)
endif # HOST_OS == linux
+
+endif # ifneq ($(TARGET_BUILD_PDK),true)
diff --git a/COMMIT-QUEUE.ini b/COMMIT-QUEUE.ini
new file mode 100644
index 0000000..ed99b9f
--- /dev/null
+++ b/COMMIT-QUEUE.ini
@@ -0,0 +1,11 @@
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Per-project Commit Queue settings.
+# Documentation: http://goo.gl/5J7oND
+
+[GENERAL]
+
+# Moblab testing is needed because of the udpate_payloads ebuild.
+pre-cq-configs: default guado_moblab-no-vmtest-pre-cq
diff --git a/CPPLINT.cfg b/CPPLINT.cfg
new file mode 100644
index 0000000..3dd0f35
--- /dev/null
+++ b/CPPLINT.cfg
@@ -0,0 +1,3 @@
+# This should be kept in sync with platform2/CPPLINT.cfg
+set noparent
+filter=-build/include_order,+build/include_alpha,-build/header_guard
diff --git a/OWNERS b/OWNERS
index f64bd32..0bf7587 100644
--- a/OWNERS
+++ b/OWNERS
@@ -6,3 +6,4 @@
# Chromium OS maintainers:
benchan@google.com
+ahassani@google.com
diff --git a/PRESUBMIT.cfg b/PRESUBMIT.cfg
index 087dfa3..3b8b271 100644
--- a/PRESUBMIT.cfg
+++ b/PRESUBMIT.cfg
@@ -1,3 +1,7 @@
+[Hook Scripts]
+hook0=../../../../chromite/bin/cros lint ${PRESUBMIT_FILES}
+hook1=../../../platform2/common-mk/gyplint.py ${PRESUBMIT_FILES}
+
[Hook Overrides]
cros_license_check: false
aosp_license_check: true
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index 706dfa8..40ddcd1 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -1,3 +1,4 @@
[Builtin Hooks]
clang_format = true
+cpplint = true
pylint = true
diff --git a/binder_bindings/android/brillo/IUpdateEngine.aidl b/binder_bindings/android/brillo/IUpdateEngine.aidl
index b1a1b4f..e549a4d 100644
--- a/binder_bindings/android/brillo/IUpdateEngine.aidl
+++ b/binder_bindings/android/brillo/IUpdateEngine.aidl
@@ -20,7 +20,8 @@
import android.brillo.ParcelableUpdateEngineStatus;
interface IUpdateEngine {
- void AttemptUpdate(in String app_version, in String omaha_url, in int flags);
+ void SetUpdateAttemptFlags(in int flags);
+ boolean AttemptUpdate(in String app_version, in String omaha_url, in int flags);
void AttemptRollback(in boolean powerwash);
boolean CanRollback();
void ResetStatus();
diff --git a/binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl b/binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl
index 42b6438..837d44d 100644
--- a/binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl
+++ b/binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl
@@ -16,8 +16,9 @@
package android.brillo;
+import android.brillo.ParcelableUpdateEngineStatus;
+
interface IUpdateEngineStatusCallback {
oneway
- void HandleStatusUpdate(in long last_checked_time, in double progress,
- in String current_operation, in String new_version, in long new_size);
+ void HandleStatusUpdate(in ParcelableUpdateEngineStatus status);
}
diff --git a/binder_service_android.cc b/binder_service_android.cc
index e179c62..0305727 100644
--- a/binder_service_android.cc
+++ b/binder_service_android.cc
@@ -24,6 +24,7 @@
using android::binder::Status;
using android::os::IUpdateEngineCallback;
+using update_engine::UpdateEngineStatus;
namespace {
Status ErrorPtrToStatus(const brillo::ErrorPtr& error) {
@@ -40,13 +41,9 @@
}
void BinderUpdateEngineAndroidService::SendStatusUpdate(
- int64_t /* last_checked_time */,
- double progress,
- update_engine::UpdateStatus status,
- const std::string& /* new_version */,
- int64_t /* new_size */) {
- last_status_ = static_cast<int>(status);
- last_progress_ = progress;
+ const UpdateEngineStatus& update_engine_status) {
+ last_status_ = static_cast<int>(update_engine_status.status);
+ last_progress_ = update_engine_status.progress;
for (auto& callback : callbacks_) {
callback->onStatusUpdate(last_status_, last_progress_);
}
diff --git a/binder_service_android.h b/binder_service_android.h
index 7d66fcc..eb36e4c 100644
--- a/binder_service_android.h
+++ b/binder_service_android.h
@@ -45,11 +45,8 @@
}
// ServiceObserverInterface overrides.
- void SendStatusUpdate(int64_t last_checked_time,
- double progress,
- update_engine::UpdateStatus status,
- const std::string& new_version,
- int64_t new_size) override;
+ void SendStatusUpdate(
+ const update_engine::UpdateEngineStatus& update_engine_status) override;
void SendPayloadApplicationComplete(ErrorCode error_code) override;
// android::os::BnUpdateEngine overrides.
diff --git a/binder_service_brillo.cc b/binder_service_brillo.cc
index 5e74159..3f01e42 100644
--- a/binder_service_brillo.cc
+++ b/binder_service_brillo.cc
@@ -33,6 +33,7 @@
using android::sp;
using brillo::ErrorPtr;
using std::string;
+using update_engine::UpdateEngineStatus;
namespace chromeos_update_engine {
@@ -57,12 +58,20 @@
return ToStatus(&error);
}
+Status BinderUpdateEngineBrilloService::SetUpdateAttemptFlags(int flags) {
+ return CallCommonHandler(&UpdateEngineService::SetUpdateAttemptFlags, flags);
+}
+
Status BinderUpdateEngineBrilloService::AttemptUpdate(
- const String16& app_version, const String16& omaha_url, int flags) {
+ const String16& app_version,
+ const String16& omaha_url,
+ int flags,
+ bool* out_result) {
return CallCommonHandler(&UpdateEngineService::AttemptUpdate,
NormalString(app_version),
NormalString(omaha_url),
- flags);
+ flags,
+ out_result);
}
Status BinderUpdateEngineBrilloService::AttemptRollback(bool powerwash) {
@@ -79,19 +88,12 @@
Status BinderUpdateEngineBrilloService::GetStatus(
ParcelableUpdateEngineStatus* status) {
- string current_op;
- string new_version;
-
- auto ret = CallCommonHandler(&UpdateEngineService::GetStatus,
- &status->last_checked_time_,
- &status->progress_,
- ¤t_op,
- &new_version,
- &status->new_size_);
+ UpdateEngineStatus update_engine_status;
+ auto ret =
+ CallCommonHandler(&UpdateEngineService::GetStatus, &update_engine_status);
if (ret.isOk()) {
- status->current_operation_ = String16{current_op.c_str()};
- status->new_version_ = String16{new_version.c_str()};
+ *status = ParcelableUpdateEngineStatus(update_engine_status);
}
return ret;
@@ -228,18 +230,10 @@
}
void BinderUpdateEngineBrilloService::SendStatusUpdate(
- int64_t last_checked_time,
- double progress,
- update_engine::UpdateStatus status,
- const string& new_version,
- int64_t new_size) {
- const string str_status = UpdateStatusToString(status);
+ const UpdateEngineStatus& update_engine_status) {
+ ParcelableUpdateEngineStatus parcelable_status(update_engine_status);
for (auto& callback : callbacks_) {
- callback->HandleStatusUpdate(last_checked_time,
- progress,
- String16{str_status.c_str()},
- String16{new_version.c_str()},
- new_size);
+ callback->HandleStatusUpdate(parcelable_status);
}
}
diff --git a/binder_service_brillo.h b/binder_service_brillo.h
index 982c7b1..c802fca 100644
--- a/binder_service_brillo.h
+++ b/binder_service_brillo.h
@@ -46,17 +46,16 @@
}
// ServiceObserverInterface overrides.
- void SendStatusUpdate(int64_t last_checked_time,
- double progress,
- update_engine::UpdateStatus status,
- const std::string& new_version,
- int64_t new_size) override;
+ void SendStatusUpdate(
+ const update_engine::UpdateEngineStatus& update_engine_status) override;
void SendPayloadApplicationComplete(ErrorCode error_code) override {}
// android::brillo::BnUpdateEngine overrides.
+ android::binder::Status SetUpdateAttemptFlags(int flags) override;
android::binder::Status AttemptUpdate(const android::String16& app_version,
const android::String16& omaha_url,
- int flags) override;
+ int flags,
+ bool* out_result) override;
android::binder::Status AttemptRollback(bool powerwash) override;
android::binder::Status CanRollback(bool* out_can_rollback) override;
android::binder::Status ResetStatus() override;
diff --git a/boot_control_android.cc b/boot_control_android.cc
index e3ea66d..8c1603b 100644
--- a/boot_control_android.cc
+++ b/boot_control_android.cc
@@ -20,7 +20,6 @@
#include <base/files/file_util.h>
#include <base/logging.h>
#include <base/strings/string_util.h>
-#include <brillo/make_unique_ptr.h>
#include <brillo/message_loops/message_loop.h>
#include "update_engine/common/utils.h"
diff --git a/boot_control_chromeos.cc b/boot_control_chromeos.cc
index e9ad698..aa94d3c 100644
--- a/boot_control_chromeos.cc
+++ b/boot_control_chromeos.cc
@@ -22,7 +22,6 @@
#include <base/files/file_path.h>
#include <base/files/file_util.h>
#include <base/strings/string_util.h>
-#include <brillo/make_unique_ptr.h>
#include <rootdev/rootdev.h>
extern "C" {
diff --git a/boot_control_recovery.cc b/boot_control_recovery.cc
index 39b5ff1..b74f4aa 100644
--- a/boot_control_recovery.cc
+++ b/boot_control_recovery.cc
@@ -20,7 +20,6 @@
#include <base/files/file_util.h>
#include <base/logging.h>
#include <base/strings/string_util.h>
-#include <brillo/make_unique_ptr.h>
#include <brillo/message_loops/message_loop.h>
#include "update_engine/common/utils.h"
diff --git a/certificate_checker_unittest.cc b/certificate_checker_unittest.cc
index 20efce9..66b92d6 100644
--- a/certificate_checker_unittest.cc
+++ b/certificate_checker_unittest.cc
@@ -29,7 +29,7 @@
using ::testing::DoAll;
using ::testing::Return;
-using ::testing::SetArgumentPointee;
+using ::testing::SetArgPointee;
using ::testing::SetArrayArgument;
using ::testing::_;
using std::string;
@@ -78,8 +78,8 @@
TEST_F(CertificateCheckerTest, NewCertificate) {
EXPECT_CALL(openssl_wrapper_, GetCertificateDigest(nullptr, _, _, _))
.WillOnce(DoAll(
- SetArgumentPointee<1>(depth_),
- SetArgumentPointee<2>(length_),
+ SetArgPointee<1>(depth_),
+ SetArgPointee<2>(length_),
SetArrayArgument<3>(digest_, digest_ + 4),
Return(true)));
EXPECT_CALL(prefs_, GetString(cert_key_, _)).WillOnce(Return(false));
@@ -95,12 +95,12 @@
TEST_F(CertificateCheckerTest, SameCertificate) {
EXPECT_CALL(openssl_wrapper_, GetCertificateDigest(nullptr, _, _, _))
.WillOnce(DoAll(
- SetArgumentPointee<1>(depth_),
- SetArgumentPointee<2>(length_),
+ SetArgPointee<1>(depth_),
+ SetArgPointee<2>(length_),
SetArrayArgument<3>(digest_, digest_ + 4),
Return(true)));
EXPECT_CALL(prefs_, GetString(cert_key_, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(digest_hex_), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(digest_hex_), Return(true)));
EXPECT_CALL(prefs_, SetString(_, _)).Times(0);
EXPECT_CALL(observer_,
CertificateChecked(server_to_check_,
@@ -113,12 +113,12 @@
TEST_F(CertificateCheckerTest, ChangedCertificate) {
EXPECT_CALL(openssl_wrapper_, GetCertificateDigest(nullptr, _, _, _))
.WillOnce(DoAll(
- SetArgumentPointee<1>(depth_),
- SetArgumentPointee<2>(length_),
+ SetArgPointee<1>(depth_),
+ SetArgPointee<2>(length_),
SetArrayArgument<3>(digest_, digest_ + 4),
Return(true)));
EXPECT_CALL(prefs_, GetString(cert_key_, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(diff_digest_hex_), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(diff_digest_hex_), Return(true)));
EXPECT_CALL(observer_,
CertificateChecked(server_to_check_,
CertificateCheckResult::kValidChanged));
diff --git a/chrome_browser_proxy_resolver.cc b/chrome_browser_proxy_resolver.cc
index 12a8328..5beecc1 100644
--- a/chrome_browser_proxy_resolver.cc
+++ b/chrome_browser_proxy_resolver.cc
@@ -20,85 +20,26 @@
#include <base/bind.h>
#include <base/memory/ptr_util.h>
-#include <base/strings/string_tokenizer.h>
#include <base/strings/string_util.h>
+#include <brillo/http/http_proxy.h>
-#include "network_proxy/dbus-proxies.h"
+#include "update_engine/dbus_connection.h"
namespace chromeos_update_engine {
-using base::StringTokenizer;
-using std::deque;
-using std::string;
-
-namespace {
-
-// Timeout for D-Bus calls in milliseconds.
-constexpr int kTimeoutMs = 5000;
-
-} // namespace
-
-ChromeBrowserProxyResolver::ChromeBrowserProxyResolver(
- org::chromium::NetworkProxyServiceInterfaceProxyInterface* dbus_proxy)
- : dbus_proxy_(dbus_proxy),
- next_request_id_(kProxyRequestIdNull + 1),
+ChromeBrowserProxyResolver::ChromeBrowserProxyResolver()
+ : next_request_id_(kProxyRequestIdNull + 1),
weak_ptr_factory_(this) {}
ChromeBrowserProxyResolver::~ChromeBrowserProxyResolver() = default;
-// static
-deque<string> ChromeBrowserProxyResolver::ParseProxyString(
- const string& input) {
- deque<string> ret;
- // Some of this code taken from
- // http://src.chromium.org/svn/trunk/src/net/proxy/proxy_server.cc and
- // http://src.chromium.org/svn/trunk/src/net/proxy/proxy_list.cc
- StringTokenizer entry_tok(input, ";");
- while (entry_tok.GetNext()) {
- string token = entry_tok.token();
- base::TrimWhitespaceASCII(token, base::TRIM_ALL, &token);
-
- // Start by finding the first space (if any).
- string::iterator space;
- for (space = token.begin(); space != token.end(); ++space) {
- if (base::IsAsciiWhitespace(*space)) {
- break;
- }
- }
-
- string scheme = base::ToLowerASCII(string(token.begin(), space));
- // Chrome uses "socks" to mean socks4 and "proxy" to mean http.
- if (scheme == "socks")
- scheme += "4";
- else if (scheme == "proxy")
- scheme = "http";
- else if (scheme != "https" &&
- scheme != "socks4" &&
- scheme != "socks5" &&
- scheme != "direct")
- continue; // Invalid proxy scheme
-
- string host_and_port = string(space, token.end());
- base::TrimWhitespaceASCII(host_and_port, base::TRIM_ALL, &host_and_port);
- if (scheme != "direct" && host_and_port.empty())
- continue; // Must supply host/port when non-direct proxy used.
- ret.push_back(scheme + "://" + host_and_port);
- }
- if (ret.empty() || *ret.rbegin() != kNoProxy)
- ret.push_back(kNoProxy);
- return ret;
-}
-
ProxyRequestId ChromeBrowserProxyResolver::GetProxiesForUrl(
- const string& url, const ProxiesResolvedFn& callback) {
+ const std::string& url, const ProxiesResolvedFn& callback) {
const ProxyRequestId id = next_request_id_++;
- dbus_proxy_->ResolveProxyAsync(
- url,
- base::Bind(&ChromeBrowserProxyResolver::OnResolveProxyResponse,
- weak_ptr_factory_.GetWeakPtr(), id),
- base::Bind(&ChromeBrowserProxyResolver::OnResolveProxyError,
- weak_ptr_factory_.GetWeakPtr(), id),
- kTimeoutMs);
+ brillo::http::GetChromeProxyServersAsync(
+ DBusConnection::Get()->GetDBus(), url,
+ base::Bind(&ChromeBrowserProxyResolver::OnGetChromeProxyServers,
+ weak_ptr_factory_.GetWeakPtr(), id));
pending_callbacks_[id] = callback;
return id;
}
@@ -107,32 +48,18 @@
return pending_callbacks_.erase(request) != 0;
}
-void ChromeBrowserProxyResolver::OnResolveProxyResponse(
- ProxyRequestId request_id,
- const std::string& proxy_info,
- const std::string& error_message) {
- if (!error_message.empty())
- LOG(WARNING) << "Got error resolving proxy: " << error_message;
- RunCallback(request_id, ParseProxyString(proxy_info));
-}
-
-void ChromeBrowserProxyResolver::OnResolveProxyError(ProxyRequestId request_id,
- brillo::Error* error) {
- LOG(WARNING) << "Failed to resolve proxy: "
- << (error ? error->GetMessage() : "[null]");
- RunCallback(request_id, deque<string>{kNoProxy});
-}
-
-void ChromeBrowserProxyResolver::RunCallback(
- ProxyRequestId request_id,
- const std::deque<std::string>& proxies) {
+void ChromeBrowserProxyResolver::OnGetChromeProxyServers(
+ ProxyRequestId request_id, bool success,
+ const std::vector<std::string>& proxies) {
+ // If |success| is false, |proxies| will still hold the direct proxy option
+ // which is what we do in our error case.
auto it = pending_callbacks_.find(request_id);
if (it == pending_callbacks_.end())
return;
ProxiesResolvedFn callback = it->second;
pending_callbacks_.erase(it);
- callback.Run(proxies);
+ callback.Run(std::deque<std::string>(proxies.begin(), proxies.end()));
}
-} // namespace chromeos_update_engine
+} // namespace chromeos_update_engine
diff --git a/chrome_browser_proxy_resolver.h b/chrome_browser_proxy_resolver.h
index 03dbdad..fcf85b6 100644
--- a/chrome_browser_proxy_resolver.h
+++ b/chrome_browser_proxy_resolver.h
@@ -20,46 +20,29 @@
#include <deque>
#include <map>
#include <string>
+#include <vector>
#include <base/memory/weak_ptr.h>
#include "update_engine/proxy_resolver.h"
-namespace brillo {
-class Error;
-} // namespace brillo
-
-namespace org {
-namespace chromium {
-class NetworkProxyServiceInterfaceProxyInterface;
-} // namespace chromium
-} // namespace org
-
namespace chromeos_update_engine {
class ChromeBrowserProxyResolver : public ProxyResolver {
public:
- explicit ChromeBrowserProxyResolver(
- org::chromium::NetworkProxyServiceInterfaceProxyInterface* dbus_proxy);
+ ChromeBrowserProxyResolver();
~ChromeBrowserProxyResolver() override;
- // Parses a string-encoded list of proxies and returns a deque
- // of individual proxies. The last one will always be kNoProxy.
- static std::deque<std::string> ParseProxyString(const std::string& input);
-
// ProxyResolver:
ProxyRequestId GetProxiesForUrl(const std::string& url,
const ProxiesResolvedFn& callback) override;
bool CancelProxyRequest(ProxyRequestId request) override;
-private:
- // Callback for successful D-Bus calls made by GetProxiesForUrl().
- void OnResolveProxyResponse(ProxyRequestId request_id,
- const std::string& proxy_info,
- const std::string& error_message);
-
- // Callback for failed D-Bus calls made by GetProxiesForUrl().
- void OnResolveProxyError(ProxyRequestId request_id, brillo::Error* error);
+ private:
+ // Callback for calls made by GetProxiesForUrl().
+ void OnGetChromeProxyServers(ProxyRequestId request_id,
+ bool success,
+ const std::vector<std::string>& proxies);
// Finds the callback identified by |request_id| in |pending_callbacks_|,
// passes |proxies| to it, and deletes it. Does nothing if the request has
@@ -67,9 +50,6 @@
void RunCallback(ProxyRequestId request_id,
const std::deque<std::string>& proxies);
- // D-Bus proxy for resolving network proxies.
- org::chromium::NetworkProxyServiceInterfaceProxyInterface* dbus_proxy_;
-
// Next ID to return from GetProxiesForUrl().
ProxyRequestId next_request_id_;
diff --git a/chrome_browser_proxy_resolver_unittest.cc b/chrome_browser_proxy_resolver_unittest.cc
deleted file mode 100644
index dc71d2b..0000000
--- a/chrome_browser_proxy_resolver_unittest.cc
+++ /dev/null
@@ -1,180 +0,0 @@
-//
-// Copyright (C) 2011 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/chrome_browser_proxy_resolver.h"
-
-#include <deque>
-#include <string>
-#include <vector>
-
-#include <gmock/gmock.h>
-#include <gtest/gtest.h>
-
-#include <base/macros.h>
-#include <brillo/errors/error.h>
-
-#include "network_proxy/dbus-proxies.h"
-#include "network_proxy/dbus-proxy-mocks.h"
-#include "update_engine/dbus_test_utils.h"
-
-using ::testing::DoAll;
-using ::testing::SaveArg;
-using ::testing::StrEq;
-using ::testing::_;
-using org::chromium::NetworkProxyServiceInterfaceProxyMock;
-using std::deque;
-using std::string;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-namespace {
-
-// Callback for ProxyResolver::GetProxiesForUrl() that copies |src| to |dest|.
-void CopyProxies(deque<string>* dest, const deque<string>& src) {
- *dest = src;
-}
-
-} // namespace
-
-class ChromeBrowserProxyResolverTest : public ::testing::Test {
- public:
- ChromeBrowserProxyResolverTest() = default;
- ~ChromeBrowserProxyResolverTest() override = default;
-
- protected:
- // Adds a GoogleMock expectation for a call to |dbus_proxy_|'s
- // ResolveProxyAsync method to resolve |url|.
- void AddResolveProxyExpectation(const std::string& url) {
- EXPECT_CALL(dbus_proxy_, ResolveProxyAsync(StrEq(url), _, _, _))
- .WillOnce(DoAll(SaveArg<1>(&success_callback_),
- SaveArg<2>(&error_callback_)));
- }
-
- NetworkProxyServiceInterfaceProxyMock dbus_proxy_;
- ChromeBrowserProxyResolver resolver_{&dbus_proxy_};
-
- // Callbacks that were passed to |dbus_proxy_|'s ResolveProxyAsync method.
- base::Callback<void(const std::string&, const std::string&)>
- success_callback_;
- base::Callback<void(brillo::Error*)> error_callback_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ChromeBrowserProxyResolverTest);
-};
-
-TEST_F(ChromeBrowserProxyResolverTest, Parse) {
- // Test ideas from
- // http://src.chromium.org/svn/trunk/src/net/proxy/proxy_list_unittest.cc
- vector<string> inputs = {
- "PROXY foopy:10",
- " DIRECT", // leading space.
- "PROXY foopy1 ; proxy foopy2;\t DIRECT",
- "proxy foopy1 ; SOCKS foopy2",
- "DIRECT ; proxy foopy1 ; DIRECT ; SOCKS5 foopy2;DIRECT ",
- "DIRECT ; proxy foopy1:80; DIRECT ; DIRECT",
- "PROXY-foopy:10",
- "PROXY",
- "PROXY foopy1 ; JUNK ; JUNK ; SOCKS5 foopy2 ; ;",
- "HTTP foopy1; SOCKS5 foopy2",
- };
- vector<deque<string>> outputs = {
- {"http://foopy:10", kNoProxy},
- {kNoProxy},
- {"http://foopy1", "http://foopy2", kNoProxy},
- {"http://foopy1", "socks4://foopy2", kNoProxy},
- {kNoProxy, "http://foopy1", kNoProxy, "socks5://foopy2", kNoProxy},
- {kNoProxy, "http://foopy1:80", kNoProxy, kNoProxy},
- {kNoProxy},
- {kNoProxy},
- {"http://foopy1", "socks5://foopy2", kNoProxy},
- {"socks5://foopy2", kNoProxy},
- };
- ASSERT_EQ(inputs.size(), outputs.size());
-
- for (size_t i = 0; i < inputs.size(); i++) {
- deque<string> results =
- ChromeBrowserProxyResolver::ParseProxyString(inputs[i]);
- deque<string>& expected = outputs[i];
- EXPECT_EQ(results.size(), expected.size()) << "i = " << i;
- if (expected.size() != results.size())
- continue;
- for (size_t j = 0; j < expected.size(); j++) {
- EXPECT_EQ(expected[j], results[j]) << "i = " << i;
- }
- }
-}
-
-TEST_F(ChromeBrowserProxyResolverTest, Success) {
- const char kUrl[] = "http://example.com/blah";
- const char kProxyConfig[] = "SOCKS5 192.168.52.83:5555;DIRECT";
- AddResolveProxyExpectation(kUrl);
- deque<string> proxies;
- resolver_.GetProxiesForUrl(kUrl, base::Bind(&CopyProxies, &proxies));
-
- // Run the D-Bus success callback and verify that the proxies are passed to
- // the supplied function.
- ASSERT_FALSE(success_callback_.is_null());
- success_callback_.Run(kProxyConfig, string());
- ASSERT_EQ(2u, proxies.size());
- EXPECT_EQ("socks5://192.168.52.83:5555", proxies[0]);
- EXPECT_EQ(kNoProxy, proxies[1]);
-}
-
-TEST_F(ChromeBrowserProxyResolverTest, Failure) {
- const char kUrl[] = "http://example.com/blah";
- AddResolveProxyExpectation(kUrl);
- deque<string> proxies;
- resolver_.GetProxiesForUrl(kUrl, base::Bind(&CopyProxies, &proxies));
-
- // Run the D-Bus error callback and verify that the supplied function is
- // instructed to use a direct connection.
- ASSERT_FALSE(error_callback_.is_null());
- brillo::ErrorPtr error = brillo::Error::Create(FROM_HERE, "", "", "");
- error_callback_.Run(error.get());
- ASSERT_EQ(1u, proxies.size());
- EXPECT_EQ(kNoProxy, proxies[0]);
-}
-
-TEST_F(ChromeBrowserProxyResolverTest, CancelCallback) {
- const char kUrl[] = "http://example.com/blah";
- AddResolveProxyExpectation(kUrl);
- int called = 0;
- auto callback = base::Bind(
- [](int* called, const deque<string>& proxies) { (*called)++; }, &called);
- ProxyRequestId request = resolver_.GetProxiesForUrl(kUrl, callback);
-
- // Cancel the request and then run the D-Bus success callback. The original
- // callback shouldn't be run.
- EXPECT_TRUE(resolver_.CancelProxyRequest(request));
- ASSERT_FALSE(success_callback_.is_null());
- success_callback_.Run("DIRECT", string());
- EXPECT_EQ(0, called);
-}
-
-TEST_F(ChromeBrowserProxyResolverTest, CancelCallbackTwice) {
- const char kUrl[] = "http://example.com/blah";
- AddResolveProxyExpectation(kUrl);
- deque<string> proxies;
- ProxyRequestId request =
- resolver_.GetProxiesForUrl(kUrl, base::Bind(&CopyProxies, &proxies));
-
- // Cancel the same request twice. The second call should fail.
- EXPECT_TRUE(resolver_.CancelProxyRequest(request));
- EXPECT_FALSE(resolver_.CancelProxyRequest(request));
-}
-
-} // namespace chromeos_update_engine
diff --git a/client_library/client_binder.cc b/client_library/client_binder.cc
index e98c225..54b33ed 100644
--- a/client_library/client_binder.cc
+++ b/client_library/client_binder.cc
@@ -25,15 +25,15 @@
#include "update_engine/parcelable_update_engine_status.h"
#include "update_engine/update_status_utils.h"
-using android::OK;
-using android::String16;
-using android::String8;
using android::binder::Status;
using android::brillo::ParcelableUpdateEngineStatus;
using android::getService;
+using android::OK;
+using android::String16;
+using android::String8;
using chromeos_update_engine::StringToUpdateStatus;
-using chromeos_update_engine::UpdateEngineService;
using std::string;
+using update_engine::UpdateAttemptFlags;
namespace update_engine {
namespace internal {
@@ -48,10 +48,14 @@
bool BinderUpdateEngineClient::AttemptUpdate(const string& in_app_version,
const string& in_omaha_url,
bool at_user_request) {
- return service_->AttemptUpdate(String16{in_app_version.c_str()},
- String16{in_omaha_url.c_str()},
- at_user_request ? 0 :
- UpdateEngineService::kAttemptUpdateFlagNonInteractive).isOk();
+ bool started;
+ return service_
+ ->AttemptUpdate(
+ String16{in_app_version.c_str()},
+ String16{in_omaha_url.c_str()},
+ at_user_request ? 0 : UpdateAttemptFlags::kFlagNonInteractive,
+ &started)
+ .isOk();
}
bool BinderUpdateEngineClient::GetStatus(int64_t* out_last_checked_time,
@@ -143,18 +147,18 @@
}
Status BinderUpdateEngineClient::StatusUpdateCallback::HandleStatusUpdate(
- int64_t last_checked_time,
- double progress,
- const String16& current_operation,
- const String16& new_version,
- int64_t new_size) {
+ const ParcelableUpdateEngineStatus& status) {
UpdateStatus update_status;
- StringToUpdateStatus(String8{current_operation}.string(), &update_status);
+ StringToUpdateStatus(String8{status.current_operation_}.string(),
+ &update_status);
for (auto& handler : client_->handlers_) {
- handler->HandleStatusUpdate(last_checked_time, progress, update_status,
- String8{new_version}.string(), new_size);
+ handler->HandleStatusUpdate(status.last_checked_time_,
+ status.progress_,
+ update_status,
+ String8{status.new_version_}.string(),
+ status.new_size_);
}
return Status::ok();
diff --git a/client_library/client_binder.h b/client_library/client_binder.h
index b1b34da..17f2beb 100644
--- a/client_library/client_binder.h
+++ b/client_library/client_binder.h
@@ -94,11 +94,7 @@
: client_(client) {}
android::binder::Status HandleStatusUpdate(
- int64_t last_checked_time,
- double progress,
- const android::String16& current_operation,
- const android::String16& new_version,
- int64_t new_size) override;
+ const android::brillo::ParcelableUpdateEngineStatus& status) override;
private:
BinderUpdateEngineClient* client_;
diff --git a/client_library/include/update_engine/update_status.h b/client_library/include/update_engine/update_status.h
index 3e9af5b..41fab48 100644
--- a/client_library/include/update_engine/update_status.h
+++ b/client_library/include/update_engine/update_status.h
@@ -17,6 +17,10 @@
#ifndef UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_UPDATE_STATUS_H_
#define UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_UPDATE_STATUS_H_
+#include <string>
+
+#include <brillo/enum_flags.h>
+
namespace update_engine {
enum class UpdateStatus {
@@ -32,6 +36,39 @@
DISABLED,
};
+// Enum of bit-wise flags for controlling how updates are attempted.
+enum UpdateAttemptFlags : int32_t {
+ kNone = 0,
+ // Treat the update like a non-interactive update, even when being triggered
+ // by the interactive APIs.
+ kFlagNonInteractive = (1 << 0),
+ // Restrict (disallow) downloading of updates.
+ kFlagRestrictDownload = (1 << 1),
+};
+
+// Enable bit-wise operators for the above enumeration of flag values.
+DECLARE_FLAGS_ENUM(UpdateAttemptFlags);
+
+struct UpdateEngineStatus {
+ // When the update_engine last checked for updates (time_t: seconds from unix
+ // epoch)
+ int64_t last_checked_time;
+ // the current status/operation of the update_engine
+ UpdateStatus status;
+ // the current product version (oem bundle id)
+ std::string current_version;
+ // the current system version
+ std::string current_system_version;
+ // The current progress (0.0f-1.0f).
+ double progress;
+ // the size of the update (bytes)
+ uint64_t new_size_bytes;
+ // the new product version
+ std::string new_version;
+ // the new system version, if there is one (empty, otherwise)
+ std::string new_system_version;
+};
+
} // namespace update_engine
#endif // UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_UPDATE_STATUS_H_
diff --git a/common/constants.cc b/common/constants.cc
index c0a6e27..5941c93 100644
--- a/common/constants.cc
+++ b/common/constants.cc
@@ -57,6 +57,7 @@
const char kPrefsP2PFirstAttemptTimestamp[] = "p2p-first-attempt-timestamp";
const char kPrefsP2PNumAttempts[] = "p2p-num-attempts";
const char kPrefsPayloadAttemptNumber[] = "payload-attempt-number";
+const char kPrefsPostInstallSucceeded[] = "post-install-succeeded";
const char kPrefsPreviousVersion[] = "previous-version";
const char kPrefsResumedUpdateFailures[] = "resumed-update-failures";
const char kPrefsRollbackVersion[] = "rollback-version";
@@ -103,5 +104,12 @@
// This can be used to zero-rate OTA traffic by sending it over the correct
// network.
const char kPayloadPropertyNetworkId[] = "NETWORK_ID";
+// Set "SWITCH_SLOT_ON_REBOOT=0" to skip marking the updated partitions active.
+// The default is 1 (always switch slot if update succeeded).
+const char kPayloadPropertySwitchSlotOnReboot[] = "SWITCH_SLOT_ON_REBOOT";
+// Set "RUN_POST_INSTALL=0" to skip running post install, this will only be
+// honored if we're resuming an update and post install has already succeeded.
+// The default is 1 (always run post install).
+const char kPayloadPropertyRunPostInstall[] = "RUN_POST_INSTALL";
} // namespace chromeos_update_engine
diff --git a/common/constants.h b/common/constants.h
index 776e726..26773cf 100644
--- a/common/constants.h
+++ b/common/constants.h
@@ -59,6 +59,7 @@
extern const char kPrefsP2PFirstAttemptTimestamp[];
extern const char kPrefsP2PNumAttempts[];
extern const char kPrefsPayloadAttemptNumber[];
+extern const char kPrefsPostInstallSucceeded[];
extern const char kPrefsPreviousVersion[];
extern const char kPrefsResumedUpdateFailures[];
extern const char kPrefsRollbackVersion[];
@@ -96,6 +97,8 @@
extern const char kPayloadPropertyUserAgent[];
extern const char kPayloadPropertyPowerwash[];
extern const char kPayloadPropertyNetworkId[];
+extern const char kPayloadPropertySwitchSlotOnReboot[];
+extern const char kPayloadPropertyRunPostInstall[];
// A download source is any combination of protocol and server (that's of
// interest to us when looking at UMA metrics) using which we may download
@@ -173,6 +176,7 @@
// succeeding. When using p2p, this is low in order to fail fast.
const int kDownloadMaxRetryCount = 20;
const int kDownloadMaxRetryCountOobeNotComplete = 3;
+const int kDownloadMaxRetryCountInteractive = 3;
const int kDownloadP2PMaxRetryCount = 5;
// The connect timeout, in seconds.
diff --git a/common/error_code.h b/common/error_code.h
index 3800bf0..0b08005 100644
--- a/common/error_code.h
+++ b/common/error_code.h
@@ -75,6 +75,7 @@
kNonCriticalUpdateInOOBE = 49,
// kOmahaUpdateIgnoredOverCellular = 50,
kPayloadTimestampError = 51,
+ kUpdatedButNotActive = 52,
// VERY IMPORTANT! When adding new error codes:
//
diff --git a/common/error_code_utils.cc b/common/error_code_utils.cc
index 0a015eb..313a15f 100644
--- a/common/error_code_utils.cc
+++ b/common/error_code_utils.cc
@@ -146,8 +146,10 @@
return "ErrorCode::kNonCriticalUpdateInOOBE";
case ErrorCode::kPayloadTimestampError:
return "ErrorCode::kPayloadTimestampError";
- // Don't add a default case to let the compiler warn about newly added
- // error codes which should be added here.
+ case ErrorCode::kUpdatedButNotActive:
+ return "ErrorCode::kUpdatedButNotActive";
+ // Don't add a default case to let the compiler warn about newly added
+ // error codes which should be added here.
}
return "Unknown error: " + base::UintToString(static_cast<unsigned>(code));
diff --git a/common/fake_hardware.h b/common/fake_hardware.h
index a384597..f2b2c9d 100644
--- a/common/fake_hardware.h
+++ b/common/fake_hardware.h
@@ -83,6 +83,14 @@
int64_t GetBuildTimestamp() const override { return build_timestamp_; }
+ bool GetFirstActiveOmahaPingSent() const override {
+ return first_active_omaha_ping_sent_;
+ }
+
+ void SetFirstActiveOmahaPingSent() override {
+ first_active_omaha_ping_sent_ = true;
+ }
+
// Setters
void SetIsOfficialBuild(bool is_official_build) {
is_official_build_ = is_official_build;
@@ -144,6 +152,7 @@
int powerwash_count_{kPowerwashCountNotSet};
bool powerwash_scheduled_{false};
int64_t build_timestamp_{0};
+ bool first_active_omaha_ping_sent_{false};
DISALLOW_COPY_AND_ASSIGN(FakeHardware);
};
diff --git a/common/hardware_interface.h b/common/hardware_interface.h
index c9e2f85..94442d1 100644
--- a/common/hardware_interface.h
+++ b/common/hardware_interface.h
@@ -94,6 +94,15 @@
// Returns the timestamp of the current OS build.
virtual int64_t GetBuildTimestamp() const = 0;
+
+ // Returns whether the first active ping was sent to Omaha at some point, and
+ // that the value is persisted across recovery (and powerwash) once set with
+ // |SetFirstActiveOmahaPingSent()|.
+ virtual bool GetFirstActiveOmahaPingSent() const = 0;
+
+ // Persist the fact that first active ping was sent to omaha. It bails out if
+ // it fails.
+ virtual void SetFirstActiveOmahaPingSent() = 0;
};
} // namespace chromeos_update_engine
diff --git a/common/http_fetcher_unittest.cc b/common/http_fetcher_unittest.cc
index dcc1573..867216e 100644
--- a/common/http_fetcher_unittest.cc
+++ b/common/http_fetcher_unittest.cc
@@ -32,7 +32,6 @@
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
#include <base/time/time.h>
-#include <brillo/bind_lambda.h>
#include <brillo/message_loops/base_message_loop.h>
#include <brillo/message_loops/message_loop.h>
#include <brillo/message_loops/message_loop_utils.h>
@@ -634,7 +633,7 @@
fetcher->Unpause();
fetcher->Pause();
// Proxy resolver comes back after we paused the fetcher.
- ASSERT_TRUE(proxy_callback);
+ ASSERT_FALSE(proxy_callback.is_null());
proxy_callback.Run({1, kNoProxy});
}
diff --git a/common/hwid_override_unittest.cc b/common/hwid_override_unittest.cc
index 26ef30a..35e6438 100644
--- a/common/hwid_override_unittest.cc
+++ b/common/hwid_override_unittest.cc
@@ -32,7 +32,7 @@
void SetUp() override {
ASSERT_TRUE(tempdir_.CreateUniqueTempDir());
- ASSERT_TRUE(base::CreateDirectory(tempdir_.path().Append("etc")));
+ ASSERT_TRUE(base::CreateDirectory(tempdir_.GetPath().Append("etc")));
}
protected:
@@ -46,22 +46,24 @@
std::string expected_hwid("expected");
std::string keyval(HwidOverride::kHwidOverrideKey);
keyval += ("=" + expected_hwid);
- ASSERT_EQ(base::WriteFile(tempdir_.path().Append("etc/lsb-release"),
- keyval.c_str(), keyval.length()),
+ ASSERT_EQ(base::WriteFile(tempdir_.GetPath().Append("etc/lsb-release"),
+ keyval.c_str(),
+ keyval.length()),
static_cast<int>(keyval.length()));
- EXPECT_EQ(expected_hwid, HwidOverride::Read(tempdir_.path()));
+ EXPECT_EQ(expected_hwid, HwidOverride::Read(tempdir_.GetPath()));
}
TEST_F(HwidOverrideTest, ReadNothing) {
std::string keyval("SOMETHING_ELSE=UNINTERESTING");
- ASSERT_EQ(base::WriteFile(tempdir_.path().Append("etc/lsb-release"),
- keyval.c_str(), keyval.length()),
+ ASSERT_EQ(base::WriteFile(tempdir_.GetPath().Append("etc/lsb-release"),
+ keyval.c_str(),
+ keyval.length()),
static_cast<int>(keyval.length()));
- EXPECT_EQ(std::string(), HwidOverride::Read(tempdir_.path()));
+ EXPECT_EQ(std::string(), HwidOverride::Read(tempdir_.GetPath()));
}
TEST_F(HwidOverrideTest, ReadFailure) {
- EXPECT_EQ(std::string(), HwidOverride::Read(tempdir_.path()));
+ EXPECT_EQ(std::string(), HwidOverride::Read(tempdir_.GetPath()));
}
} // namespace chromeos_update_engine
diff --git a/common/mock_hardware.h b/common/mock_hardware.h
index 1c4253a..42fa7ba 100644
--- a/common/mock_hardware.h
+++ b/common/mock_hardware.h
@@ -63,6 +63,12 @@
ON_CALL(*this, GetPowerwashSafeDirectory(testing::_))
.WillByDefault(testing::Invoke(&fake_,
&FakeHardware::GetPowerwashSafeDirectory));
+ ON_CALL(*this, GetFirstActiveOmahaPingSent())
+ .WillByDefault(testing::Invoke(&fake_,
+ &FakeHardware::GetFirstActiveOmahaPingSent()));
+ ON_CALL(*this, SetFirstActiveOmahaPingSent())
+ .WillByDefault(testing::Invoke(&fake_,
+ &FakeHardware::SetFirstActiveOmahaPingSent()));
}
~MockHardware() override = default;
@@ -78,6 +84,7 @@
MOCK_CONST_METHOD0(GetPowerwashCount, int());
MOCK_CONST_METHOD1(GetNonVolatileDirectory, bool(base::FilePath*));
MOCK_CONST_METHOD1(GetPowerwashSafeDirectory, bool(base::FilePath*));
+ MOCK_CONST_METHOD0(GetFirstActiveOmahaPingSent, bool());
// Returns a reference to the underlying FakeHardware.
FakeHardware& fake() {
diff --git a/common/prefs_unittest.cc b/common/prefs_unittest.cc
index 73ceb00..aa2eb04 100644
--- a/common/prefs_unittest.cc
+++ b/common/prefs_unittest.cc
@@ -44,7 +44,7 @@
protected:
void SetUp() override {
ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
- prefs_dir_ = temp_dir_.path();
+ prefs_dir_ = temp_dir_.GetPath();
ASSERT_TRUE(prefs_.Init(prefs_dir_));
}
diff --git a/common/subprocess_unittest.cc b/common/subprocess_unittest.cc
index cbc9a85..c8996db 100644
--- a/common/subprocess_unittest.cc
+++ b/common/subprocess_unittest.cc
@@ -32,7 +32,6 @@
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
#include <base/time/time.h>
-#include <brillo/bind_lambda.h>
#include <brillo/message_loops/base_message_loop.h>
#include <brillo/message_loops/message_loop.h>
#include <brillo/message_loops/message_loop_utils.h>
@@ -225,7 +224,7 @@
TEST_F(SubprocessTest, CancelTest) {
base::ScopedTempDir tempdir;
ASSERT_TRUE(tempdir.CreateUniqueTempDir());
- string fifo_path = tempdir.path().Append("fifo").value();
+ string fifo_path = tempdir.GetPath().Append("fifo").value();
EXPECT_EQ(0, mkfifo(fifo_path.c_str(), 0666));
// Start a process, make sure it is running and try to cancel it. We write
diff --git a/common/test_utils.cc b/common/test_utils.cc
index fb22c80..85f78f9 100644
--- a/common/test_utils.cc
+++ b/common/test_utils.cc
@@ -24,6 +24,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <sys/ioctl.h>
+#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include <sys/types.h>
@@ -201,6 +202,13 @@
device_info.lo_file_name[LO_NAME_SIZE - 1] = '\0';
TEST_AND_RETURN_FALSE_ERRNO(
ioctl(loop_device_fd, LOOP_SET_STATUS64, &device_info) == 0);
+ if (writable) {
+ // Make sure loop device isn't read only.
+ int ro = 0;
+ if (ioctl(loop_device_fd, BLKROSET, &ro) != 0) {
+ PLOG(WARNING) << "Failed to mark loop device writable.";
+ }
+ }
return true;
}
@@ -248,7 +256,7 @@
string* mnt_path,
unsigned long flags) { // NOLINT - long
EXPECT_TRUE(temp_dir_.CreateUniqueTempDir());
- *mnt_path = temp_dir_.path().value();
+ *mnt_path = temp_dir_.GetPath().value();
string loop_dev;
loop_binder_.reset(
diff --git a/common/test_utils.h b/common/test_utils.h
index ba9f5f2..ddb3d34 100644
--- a/common/test_utils.h
+++ b/common/test_utils.h
@@ -31,6 +31,7 @@
#include <base/callback.h>
#include <base/files/file_path.h>
#include <base/files/scoped_temp_dir.h>
+#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "update_engine/common/action.h"
@@ -101,6 +102,11 @@
void FillWithData(brillo::Blob* buffer);
+// Compare the value of native array for download source parameter.
+MATCHER_P(DownloadSourceMatcher, source_array, "") {
+ return std::equal(source_array, source_array + kNumDownloadSources, arg);
+}
+
// Class to unmount FS when object goes out of scope
class ScopedFilesystemUnmounter {
public:
diff --git a/common/utils.cc b/common/utils.cc
index f528660..f651823 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -57,7 +57,6 @@
#include "update_engine/common/prefs_interface.h"
#include "update_engine/common/subprocess.h"
#include "update_engine/payload_consumer/file_descriptor.h"
-#include "update_engine/payload_consumer/payload_constants.h"
using base::Time;
using base::TimeDelta;
@@ -1035,19 +1034,6 @@
return false;
}
-bool IsZlibCompatible(const string& fingerprint) {
- if (fingerprint.size() != sizeof(kCompatibleZlibFingerprint[0]) - 1) {
- LOG(ERROR) << "Invalid fingerprint: " << fingerprint;
- return false;
- }
- for (auto& f : kCompatibleZlibFingerprint) {
- if (base::CompareCaseInsensitiveASCII(fingerprint, f) == 0) {
- return true;
- }
- }
- return false;
-}
-
bool ReadExtents(const string& path, const vector<Extent>& extents,
brillo::Blob* out_data, ssize_t out_data_size,
size_t block_size) {
diff --git a/common/utils.h b/common/utils.h
index eaf2640..e4ffcf8 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -88,8 +88,8 @@
bool PReadAll(int fd, void* buf, size_t count, off_t offset,
ssize_t* out_bytes_read);
-bool PReadAll(const FileDescriptorPtr& fd, void* buf, size_t count, off_t offset,
- ssize_t* out_bytes_read);
+bool PReadAll(const FileDescriptorPtr& fd, void* buf, size_t count,
+ off_t offset, ssize_t* out_bytes_read);
// Opens |path| for reading and appends its entire content to the container
// pointed to by |out_p|. Returns true upon successfully reading all of the
@@ -256,6 +256,16 @@
}
}
+// Return the total number of blocks in the passed |extents| collection.
+template <class T>
+uint64_t BlocksInExtents(const T& extents) {
+ uint64_t sum = 0;
+ for (const auto& ext : extents) {
+ sum += ext.num_blocks();
+ }
+ return sum;
+}
+
// Converts seconds into human readable notation including days, hours, minutes
// and seconds. For example, 185 will yield 3m5s, 4300 will yield 1h11m40s, and
// 360000 will yield 4d4h0m0s. Zero padding not applied. Seconds are always
@@ -308,9 +318,6 @@
bool GetMinorVersion(const brillo::KeyValueStore& store,
uint32_t* minor_version);
-// Returns whether zlib |fingerprint| is compatible with zlib we are using.
-bool IsZlibCompatible(const std::string& fingerprint);
-
// This function reads the specified data in |extents| into |out_data|. The
// extents are read from the file at |path|. |out_data_size| is the size of
// |out_data|. Returns false if the number of bytes to read given in
diff --git a/common/utils_unittest.cc b/common/utils_unittest.cc
index 6e9a911..62f9f6c 100644
--- a/common/utils_unittest.cc
+++ b/common/utils_unittest.cc
@@ -100,11 +100,11 @@
TEST(UtilsTest, IsSymlinkTest) {
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- string temp_file = temp_dir.path().Append("temp-file").value();
+ string temp_file = temp_dir.GetPath().Append("temp-file").value();
EXPECT_TRUE(utils::WriteFile(temp_file.c_str(), "", 0));
- string temp_symlink = temp_dir.path().Append("temp-symlink").value();
+ string temp_symlink = temp_dir.GetPath().Append("temp-symlink").value();
EXPECT_EQ(0, symlink(temp_file.c_str(), temp_symlink.c_str()));
- EXPECT_FALSE(utils::IsSymlink(temp_dir.path().value().c_str()));
+ EXPECT_FALSE(utils::IsSymlink(temp_dir.GetPath().value().c_str()));
EXPECT_FALSE(utils::IsSymlink(temp_file.c_str()));
EXPECT_TRUE(utils::IsSymlink(temp_symlink.c_str()));
EXPECT_FALSE(utils::IsSymlink("/non/existent/path"));
@@ -303,8 +303,9 @@
base::Time::Exploded exploded = (base::Time::Exploded) {
.year = 2001, .month = 9, .day_of_week = 0, .day_of_month = 9,
.hour = 1, .minute = 46, .second = 40, .millisecond = 42};
- EXPECT_EQ(base::Time::FromUTCExploded(exploded),
- utils::TimeFromStructTimespec(&ts));
+ base::Time time;
+ EXPECT_TRUE(base::Time::FromUTCExploded(exploded, &time));
+ EXPECT_EQ(time, utils::TimeFromStructTimespec(&ts));
}
TEST(UtilsTest, DecodeAndStoreBase64String) {
@@ -478,23 +479,23 @@
test_utils::ScopedLoopbackDeviceBinder loop_binder(
tmp_image, true, &loop_dev);
- EXPECT_FALSE(utils::IsMountpoint(mnt_dir.path().value()));
+ EXPECT_FALSE(utils::IsMountpoint(mnt_dir.GetPath().value()));
// This is the actual test part. While we hold a file descriptor open for the
// mounted filesystem, umount should still succeed.
EXPECT_TRUE(utils::MountFilesystem(
- loop_dev, mnt_dir.path().value(), MS_RDONLY, "ext4", ""));
+ loop_dev, mnt_dir.GetPath().value(), MS_RDONLY, "ext4", ""));
// Verify the directory is a mount point now.
- EXPECT_TRUE(utils::IsMountpoint(mnt_dir.path().value()));
+ EXPECT_TRUE(utils::IsMountpoint(mnt_dir.GetPath().value()));
- string target_file = mnt_dir.path().Append("empty-file").value();
+ string target_file = mnt_dir.GetPath().Append("empty-file").value();
int fd = HANDLE_EINTR(open(target_file.c_str(), O_RDONLY));
EXPECT_GE(fd, 0);
- EXPECT_TRUE(utils::UnmountFilesystem(mnt_dir.path().value()));
+ EXPECT_TRUE(utils::UnmountFilesystem(mnt_dir.GetPath().value()));
// The filesystem should be already unmounted at this point.
- EXPECT_FALSE(utils::IsMountpoint(mnt_dir.path().value()));
+ EXPECT_FALSE(utils::IsMountpoint(mnt_dir.GetPath().value()));
IGNORE_EINTR(close(fd));
// The filesystem was already unmounted so this call should fail.
- EXPECT_FALSE(utils::UnmountFilesystem(mnt_dir.path().value()));
+ EXPECT_FALSE(utils::UnmountFilesystem(mnt_dir.GetPath().value()));
}
TEST(UtilsTest, IsMountpointTest) {
@@ -503,7 +504,7 @@
base::ScopedTempDir mnt_dir;
EXPECT_TRUE(mnt_dir.CreateUniqueTempDir());
- EXPECT_FALSE(utils::IsMountpoint(mnt_dir.path().value()));
+ EXPECT_FALSE(utils::IsMountpoint(mnt_dir.GetPath().value()));
base::FilePath file;
EXPECT_TRUE(base::CreateTemporaryFile(&file));
diff --git a/common_service.cc b/common_service.cc
index 1a41b63..9f3b862 100644
--- a/common_service.cc
+++ b/common_service.cc
@@ -19,10 +19,10 @@
#include <set>
#include <string>
+#include <base/bind.h>
#include <base/location.h>
#include <base/logging.h>
#include <base/strings/stringprintf.h>
-#include <brillo/bind_lambda.h>
#include <brillo/message_loops/message_loop.h>
#include <brillo/strings/string_utils.h>
#include <policy/device_policy.h>
@@ -43,6 +43,8 @@
using brillo::string_utils::ToString;
using std::set;
using std::string;
+using update_engine::UpdateAttemptFlags;
+using update_engine::UpdateEngineStatus;
namespace chromeos_update_engine {
@@ -71,19 +73,35 @@
// org::chromium::UpdateEngineInterfaceInterface methods implementation.
+bool UpdateEngineService::SetUpdateAttemptFlags(ErrorPtr* /* error */,
+ int32_t in_flags_as_int) {
+ auto flags = static_cast<UpdateAttemptFlags>(in_flags_as_int);
+ LOG(INFO) << "Setting Update Attempt Flags: "
+ << "flags=0x" << std::hex << flags << " "
+ << "RestrictDownload="
+ << ((flags & UpdateAttemptFlags::kFlagRestrictDownload) ? "yes"
+ : "no");
+ system_state_->update_attempter()->SetUpdateAttemptFlags(flags);
+ return true;
+}
+
bool UpdateEngineService::AttemptUpdate(ErrorPtr* /* error */,
const string& in_app_version,
const string& in_omaha_url,
- int32_t in_flags_as_int) {
- AttemptUpdateFlags flags = static_cast<AttemptUpdateFlags>(in_flags_as_int);
- bool interactive = !(flags & kAttemptUpdateFlagNonInteractive);
+ int32_t in_flags_as_int,
+ bool* out_result) {
+ auto flags = static_cast<UpdateAttemptFlags>(in_flags_as_int);
+ bool interactive = !(flags & UpdateAttemptFlags::kFlagNonInteractive);
+ bool restrict_downloads = (flags & UpdateAttemptFlags::kFlagRestrictDownload);
LOG(INFO) << "Attempt update: app_version=\"" << in_app_version << "\" "
<< "omaha_url=\"" << in_omaha_url << "\" "
<< "flags=0x" << std::hex << flags << " "
- << "interactive=" << (interactive ? "yes" : "no");
- system_state_->update_attempter()->CheckForUpdate(
- in_app_version, in_omaha_url, interactive);
+ << "interactive=" << (interactive ? "yes " : "no ")
+ << "RestrictDownload=" << (restrict_downloads ? "yes " : "no ");
+
+ *out_result = system_state_->update_attempter()->CheckForUpdate(
+ in_app_version, in_omaha_url, flags);
return true;
}
@@ -116,16 +134,8 @@
}
bool UpdateEngineService::GetStatus(ErrorPtr* error,
- int64_t* out_last_checked_time,
- double* out_progress,
- string* out_current_operation,
- string* out_new_version,
- int64_t* out_new_size) {
- if (!system_state_->update_attempter()->GetStatus(out_last_checked_time,
- out_progress,
- out_current_operation,
- out_new_version,
- out_new_size)) {
+ UpdateEngineStatus* out_status) {
+ if (!system_state_->update_attempter()->GetStatus(out_status)) {
LogAndSetError(error, FROM_HERE, "GetStatus failed.");
return false;
}
diff --git a/common_service.h b/common_service.h
index 69368fb..544dd93 100644
--- a/common_service.h
+++ b/common_service.h
@@ -24,17 +24,13 @@
#include <base/memory/ref_counted.h>
#include <brillo/errors/error.h>
+#include "update_engine/client_library/include/update_engine/update_status.h"
#include "update_engine/system_state.h"
namespace chromeos_update_engine {
class UpdateEngineService {
public:
- // Flags used in the AttemptUpdateWithFlags() D-Bus method.
- typedef enum {
- kAttemptUpdateFlagNonInteractive = (1<<0)
- } AttemptUpdateFlags;
-
// Error domain for all the service errors.
static const char* const kErrorDomain;
@@ -44,10 +40,17 @@
explicit UpdateEngineService(SystemState* system_state);
virtual ~UpdateEngineService() = default;
+ // Set flags that influence how updates and checks are performed. These
+ // influence all future checks and updates until changed or the device
+ // reboots. The |in_flags_as_int| values are a union of values from
+ // |UpdateAttemptFlags|
+ bool SetUpdateAttemptFlags(brillo::ErrorPtr* error, int32_t in_flags_as_int);
+
bool AttemptUpdate(brillo::ErrorPtr* error,
const std::string& in_app_version,
const std::string& in_omaha_url,
- int32_t in_flags_as_int);
+ int32_t in_flags_as_int,
+ bool* out_result);
bool AttemptRollback(brillo::ErrorPtr* error, bool in_powerwash);
@@ -63,11 +66,7 @@
// progress, the number of operations, size to download and overall progress
// is reported.
bool GetStatus(brillo::ErrorPtr* error,
- int64_t* out_last_checked_time,
- double* out_progress,
- std::string* out_current_operation,
- std::string* out_new_version,
- int64_t* out_new_size);
+ update_engine::UpdateEngineStatus* out_status);
// Reboots the device if an update is applied and a reboot is required.
bool RebootIfNeeded(brillo::ErrorPtr* error);
diff --git a/common_service_unittest.cc b/common_service_unittest.cc
index 0a7bfc3..d9ef567 100644
--- a/common_service_unittest.cc
+++ b/common_service_unittest.cc
@@ -28,9 +28,10 @@
#include "update_engine/omaha_utils.h"
using std::string;
-using testing::Return;
-using testing::SetArgumentPointee;
using testing::_;
+using testing::Return;
+using testing::SetArgPointee;
+using update_engine::UpdateAttemptFlags;
namespace chromeos_update_engine {
@@ -56,13 +57,32 @@
};
TEST_F(UpdateEngineServiceTest, AttemptUpdate) {
- EXPECT_CALL(*mock_update_attempter_, CheckForUpdate(
- "app_ver", "url", false /* interactive */));
- // The update is non-interactive when we pass the non-interactive flag.
- EXPECT_TRUE(common_service_.AttemptUpdate(
- &error_, "app_ver", "url",
- UpdateEngineService::kAttemptUpdateFlagNonInteractive));
+ EXPECT_CALL(
+ *mock_update_attempter_,
+ CheckForUpdate("app_ver", "url", UpdateAttemptFlags::kFlagNonInteractive))
+ .WillOnce(Return(true));
+
+ // The non-interactive flag needs to be passed through to CheckForUpdate.
+ bool result = false;
+ EXPECT_TRUE(
+ common_service_.AttemptUpdate(&error_,
+ "app_ver",
+ "url",
+ UpdateAttemptFlags::kFlagNonInteractive,
+ &result));
EXPECT_EQ(nullptr, error_);
+ EXPECT_TRUE(result);
+}
+
+TEST_F(UpdateEngineServiceTest, AttemptUpdateReturnsFalse) {
+ EXPECT_CALL(*mock_update_attempter_,
+ CheckForUpdate("app_ver", "url", UpdateAttemptFlags::kNone))
+ .WillOnce(Return(false));
+ bool result = true;
+ EXPECT_TRUE(common_service_.AttemptUpdate(
+ &error_, "app_ver", "url", UpdateAttemptFlags::kNone, &result));
+ EXPECT_EQ(nullptr, error_);
+ EXPECT_FALSE(result);
}
// SetChannel is allowed when there's no device policy (the device is not
@@ -82,7 +102,7 @@
policy::MockDevicePolicy mock_device_policy;
fake_system_state_.set_device_policy(&mock_device_policy);
EXPECT_CALL(mock_device_policy, GetReleaseChannelDelegated(_))
- .WillOnce(DoAll(SetArgumentPointee<0>(true), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<0>(true), Return(true)));
EXPECT_CALL(*fake_system_state_.mock_request_params(),
SetTargetChannel("beta-channel", true, _))
.WillOnce(Return(true));
diff --git a/connection_manager.cc b/connection_manager.cc
index f72d9e8..d15faf0 100644
--- a/connection_manager.cc
+++ b/connection_manager.cc
@@ -73,7 +73,7 @@
if (device_policy->GetAllowedConnectionTypesForUpdate(&allowed_types)) {
// The update setting is enforced by the device policy.
- if (!ContainsKey(allowed_types, shill::kTypeCellular)) {
+ if (!base::ContainsKey(allowed_types, shill::kTypeCellular)) {
LOG(INFO) << "Disabling updates over cellular connection as it's not "
"allowed in the device policy.";
return false;
diff --git a/connection_manager_unittest.cc b/connection_manager_unittest.cc
index 0bb5547..e26a686 100644
--- a/connection_manager_unittest.cc
+++ b/connection_manager_unittest.cc
@@ -21,7 +21,6 @@
#include <base/logging.h>
#include <brillo/any.h>
-#include <brillo/make_unique_ptr.h>
#include <brillo/message_loops/fake_message_loop.h>
#include <brillo/variant_dictionary.h>
#include <gmock/gmock.h>
diff --git a/dbus_bindings/org.chromium.NetworkProxyService.dbus-xml b/dbus_bindings/org.chromium.NetworkProxyService.dbus-xml
deleted file mode 100644
index 90686ca..0000000
--- a/dbus_bindings/org.chromium.NetworkProxyService.dbus-xml
+++ /dev/null
@@ -1,13 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-
-<node name="/org/chromium/NetworkProxyService"
- xmlns:tp="http://telepathy.freedesktop.org/wiki/DbusSpec#extensions-v0">
- <interface name="org.chromium.NetworkProxyServiceInterface">
- <method name="ResolveProxy">
- <arg name="source_url" type="s" direction="in" />
- <arg name="proxy_info" type="s" direction="out" />
- <arg name="error_message" type="s" direction="out" />
- <annotation name="org.chromium.DBus.Method.Kind" value="async" />
- </method>
- </interface>
-</node>
diff --git a/dbus_service.cc b/dbus_service.cc
index 0a7ad5b..3a35c71 100644
--- a/dbus_service.cc
+++ b/dbus_service.cc
@@ -25,6 +25,7 @@
using brillo::ErrorPtr;
using chromeos_update_engine::UpdateEngineService;
using std::string;
+using update_engine::UpdateEngineStatus;
DBusUpdateEngineService::DBusUpdateEngineService(SystemState* system_state)
: common_(new UpdateEngineService{system_state}) {
@@ -172,14 +173,13 @@
dbus::Bus::REQUIRE_PRIMARY);
}
-void UpdateEngineAdaptor::SendStatusUpdate(int64_t last_checked_time,
- double progress,
- update_engine::UpdateStatus status,
- const string& new_version,
- int64_t new_size) {
- const string str_status = UpdateStatusToString(status);
- SendStatusUpdateSignal(
- last_checked_time, progress, str_status, new_version, new_size);
+void UpdateEngineAdaptor::SendStatusUpdate(
+ const UpdateEngineStatus& update_engine_status) {
+ SendStatusUpdateSignal(update_engine_status.last_checked_time,
+ update_engine_status.progress,
+ UpdateStatusToString(update_engine_status.status),
+ update_engine_status.new_version,
+ update_engine_status.new_size);
}
} // namespace chromeos_update_engine
diff --git a/dbus_service.h b/dbus_service.h
index 2b36ae9..b754661 100644
--- a/dbus_service.h
+++ b/dbus_service.h
@@ -167,11 +167,8 @@
bool RequestOwnership();
// ServiceObserverInterface overrides.
- void SendStatusUpdate(int64_t last_checked_time,
- double progress,
- update_engine::UpdateStatus status,
- const std::string& new_version,
- int64_t new_size) override;
+ void SendStatusUpdate(
+ const update_engine::UpdateEngineStatus& update_engine_status) override;
void SendPayloadApplicationComplete(ErrorCode error_code) override {}
diff --git a/fake_p2p_manager_configuration.h b/fake_p2p_manager_configuration.h
index 1bc1dc8..c1cf4f2 100644
--- a/fake_p2p_manager_configuration.h
+++ b/fake_p2p_manager_configuration.h
@@ -37,9 +37,7 @@
}
// P2PManager::Configuration override
- base::FilePath GetP2PDir() override {
- return p2p_dir_.path();
- }
+ base::FilePath GetP2PDir() override { return p2p_dir_.GetPath(); }
// P2PManager::Configuration override
std::vector<std::string> GetInitctlArgs(bool is_start) override {
diff --git a/fake_system_state.cc b/fake_system_state.cc
index d51f775..1bfcafa 100644
--- a/fake_system_state.cc
+++ b/fake_system_state.cc
@@ -21,13 +21,13 @@
// Mock the SystemStateInterface so that we could lie that
// OOBE is completed even when there's no such marker file, etc.
FakeSystemState::FakeSystemState()
- : mock_update_attempter_(this, nullptr, nullptr),
+ : mock_update_attempter_(this, nullptr),
mock_request_params_(this),
fake_update_manager_(&fake_clock_),
clock_(&fake_clock_),
connection_manager_(&mock_connection_manager_),
hardware_(&fake_hardware_),
- metrics_lib_(&mock_metrics_lib_),
+ metrics_reporter_(&mock_metrics_reporter_),
prefs_(&mock_prefs_),
powerwash_safe_prefs_(&mock_powerwash_safe_prefs_),
payload_state_(&mock_payload_state_),
diff --git a/fake_system_state.h b/fake_system_state.h
index 2225933..67ad3aa 100644
--- a/fake_system_state.h
+++ b/fake_system_state.h
@@ -27,6 +27,7 @@
#include "update_engine/common/fake_hardware.h"
#include "update_engine/common/mock_prefs.h"
#include "update_engine/mock_connection_manager.h"
+#include "update_engine/mock_metrics_reporter.h"
#include "update_engine/mock_omaha_request_params.h"
#include "update_engine/mock_p2p_manager.h"
#include "update_engine/mock_payload_state.h"
@@ -66,8 +67,9 @@
inline HardwareInterface* hardware() override { return hardware_; }
- inline MetricsLibraryInterface* metrics_lib() override {
- return metrics_lib_;
+ inline MetricsReporterInterface* metrics_reporter() override {
+ CHECK(metrics_reporter_ != nullptr);
+ return metrics_reporter_;
}
inline PrefsInterface* prefs() override { return prefs_; }
@@ -122,8 +124,9 @@
hardware_ = hardware ? hardware : &fake_hardware_;
}
- inline void set_metrics_lib(MetricsLibraryInterface* metrics_lib) {
- metrics_lib_ = metrics_lib ? metrics_lib : &mock_metrics_lib_;
+ inline void set_metrics_reporter(MetricsReporterInterface* metrics_reporter) {
+ metrics_reporter_ =
+ metrics_reporter ? metrics_reporter : &mock_metrics_reporter_;
}
inline void set_prefs(PrefsInterface* prefs) {
@@ -187,9 +190,9 @@
return &fake_hardware_;
}
- inline testing::NiceMock<MetricsLibraryMock>* mock_metrics_lib() {
- CHECK(metrics_lib_ == &mock_metrics_lib_);
- return &mock_metrics_lib_;
+ inline testing::NiceMock<MockMetricsReporter>* mock_metrics_reporter() {
+ CHECK(metrics_reporter_ == &mock_metrics_reporter_);
+ return &mock_metrics_reporter_;
}
inline testing::NiceMock<MockPrefs> *mock_prefs() {
@@ -233,7 +236,7 @@
FakeClock fake_clock_;
testing::NiceMock<MockConnectionManager> mock_connection_manager_;
FakeHardware fake_hardware_;
- testing::NiceMock<MetricsLibraryMock> mock_metrics_lib_;
+ testing::NiceMock<MockMetricsReporter> mock_metrics_reporter_;
testing::NiceMock<MockPrefs> mock_prefs_;
testing::NiceMock<MockPrefs> mock_powerwash_safe_prefs_;
testing::NiceMock<MockPayloadState> mock_payload_state_;
@@ -249,7 +252,7 @@
ClockInterface* clock_;
ConnectionManagerInterface* connection_manager_;
HardwareInterface* hardware_;
- MetricsLibraryInterface* metrics_lib_;
+ MetricsReporterInterface* metrics_reporter_;
PrefsInterface* prefs_;
PrefsInterface* powerwash_safe_prefs_;
PayloadStateInterface* payload_state_;
diff --git a/hardware_android.cc b/hardware_android.cc
index 9490c24..947b13a 100644
--- a/hardware_android.cc
+++ b/hardware_android.cc
@@ -21,19 +21,22 @@
#include <sys/types.h>
#include <algorithm>
+#include <memory>
#include <bootloader.h>
+#include <android-base/properties.h>
#include <base/files/file_util.h>
#include <base/strings/stringprintf.h>
-#include <brillo/make_unique_ptr.h>
-#include <cutils/properties.h>
#include "update_engine/common/hardware.h"
#include "update_engine/common/platform_constants.h"
#include "update_engine/common/utils.h"
#include "update_engine/utils_android.h"
+using android::base::GetBoolProperty;
+using android::base::GetIntProperty;
+using android::base::GetProperty;
using std::string;
namespace chromeos_update_engine {
@@ -72,8 +75,7 @@
std::min(message.size(), sizeof(boot.recovery) - 1));
}
- int fd =
- HANDLE_EINTR(open(misc_device.value().c_str(), O_WRONLY | O_SYNC, 0600));
+ int fd = HANDLE_EINTR(open(misc_device.value().c_str(), O_WRONLY | O_SYNC));
if (fd < 0) {
PLOG(ERROR) << "Opening misc";
return false;
@@ -96,7 +98,7 @@
// Factory defined in hardware.h.
std::unique_ptr<HardwareInterface> CreateHardware() {
- return brillo::make_unique_ptr(new HardwareAndroid());
+ return std::make_unique<HardwareAndroid>();
}
} // namespace hardware
@@ -122,7 +124,7 @@
//
// In case of a non-bool value, we take the most restrictive option and
// assume we are in an official-build.
- return property_get_bool("ro.secure", 1) != 0;
+ return GetBoolProperty("ro.secure", true);
}
bool HardwareAndroid::IsNormalBootMode() const {
@@ -130,7 +132,7 @@
// update_engine will allow extra developers options, such as providing a
// different update URL. In case of error, we assume the build is in
// normal-mode.
- return property_get_bool("ro.debuggable", 0) != 1;
+ return !GetBoolProperty("ro.debuggable", false);
}
bool HardwareAndroid::AreDevFeaturesEnabled() const {
@@ -150,26 +152,19 @@
}
string HardwareAndroid::GetHardwareClass() const {
- char manufacturer[PROPERTY_VALUE_MAX];
- char sku[PROPERTY_VALUE_MAX];
- char revision[PROPERTY_VALUE_MAX];
- property_get(kPropBootHardwareSKU, sku, "");
- property_get(kPropProductManufacturer, manufacturer, "");
- property_get(kPropBootRevision, revision, "");
+ auto manufacturer = GetProperty(kPropProductManufacturer, "");
+ auto sku = GetProperty(kPropBootHardwareSKU, "");
+ auto revision = GetProperty(kPropBootRevision, "");
- return base::StringPrintf("%s:%s:%s", manufacturer, sku, revision);
+ return manufacturer + ":" + sku + ":" + revision;
}
string HardwareAndroid::GetFirmwareVersion() const {
- char bootloader[PROPERTY_VALUE_MAX];
- property_get(kPropBootBootloader, bootloader, "");
- return bootloader;
+ return GetProperty(kPropBootBootloader, "");
}
string HardwareAndroid::GetECVersion() const {
- char baseband[PROPERTY_VALUE_MAX];
- property_get(kPropBootBaseband, baseband, "");
- return baseband;
+ return GetProperty(kPropBootBaseband, "");
}
int HardwareAndroid::GetPowerwashCount() const {
@@ -202,7 +197,17 @@
}
int64_t HardwareAndroid::GetBuildTimestamp() const {
- return property_get_int64(kPropBuildDateUTC, 0);
+ return GetIntProperty<int64_t>(kPropBuildDateUTC, 0);
+}
+
+bool HardwareAndroid::GetFirstActiveOmahaPingSent() const {
+ LOG(WARNING) << "STUB: Assuming first active omaha was never set.";
+ return false;
+}
+
+void HardwareAndroid::SetFirstActiveOmahaPingSent() {
+ LOG(WARNING) << "STUB: Assuming first active omaha is never set.";
+ return;
}
} // namespace chromeos_update_engine
diff --git a/hardware_android.h b/hardware_android.h
index 80fd9df..ca90b62 100644
--- a/hardware_android.h
+++ b/hardware_android.h
@@ -48,6 +48,8 @@
bool GetNonVolatileDirectory(base::FilePath* path) const override;
bool GetPowerwashSafeDirectory(base::FilePath* path) const override;
int64_t GetBuildTimestamp() const override;
+ bool GetFirstActiveOmahaPingSent() const override;
+ void SetFirstActiveOmahaPingSent() override;
private:
DISALLOW_COPY_AND_ASSIGN(HardwareAndroid);
diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc
index f0f3ea9..f2bb28a 100644
--- a/hardware_chromeos.cc
+++ b/hardware_chromeos.cc
@@ -22,7 +22,6 @@
#include <base/strings/string_number_conversions.h>
#include <base/strings/string_util.h>
#include <brillo/key_value_store.h>
-#include <brillo/make_unique_ptr.h>
#include <debugd/dbus-constants.h>
#include <vboot/crossystem.h>
@@ -69,6 +68,8 @@
// UpdateManager config options:
const char* kConfigOptsIsOOBEEnabled = "is_oobe_enabled";
+const char* kActivePingKey = "first_active_omaha_ping_sent";
+
} // namespace
namespace chromeos_update_engine {
@@ -253,4 +254,49 @@
is_oobe_enabled_ = true; // Default value.
}
+bool HardwareChromeOS::GetFirstActiveOmahaPingSent() const {
+ int exit_code = 0;
+ string active_ping_str;
+ vector<string> cmd = { "vpd_get_value", kActivePingKey };
+ if (!Subprocess::SynchronousExec(cmd, &exit_code, &active_ping_str) ||
+ exit_code) {
+ LOG(ERROR) << "Failed to get vpd key for " << kActivePingKey
+ << " with exit code: " << exit_code;
+ return false;
+ }
+
+ base::TrimWhitespaceASCII(active_ping_str,
+ base::TRIM_ALL,
+ &active_ping_str);
+ int active_ping;
+ if (active_ping_str.empty() ||
+ !base::StringToInt(active_ping_str, &active_ping)) {
+ LOG(INFO) << "Failed to parse active_ping value: " << active_ping_str;
+ return false;
+ }
+ return static_cast<bool>(active_ping);
+}
+
+void HardwareChromeOS::SetFirstActiveOmahaPingSent() {
+ int exit_code = 0;
+ string output;
+ vector<string> vpd_set_cmd = {
+ "vpd", "-i", "RW_VPD", "-s", string(kActivePingKey) + "=1" };
+ if (!Subprocess::SynchronousExec(vpd_set_cmd, &exit_code, &output) ||
+ exit_code) {
+ LOG(ERROR) << "Failed to set vpd key for " << kActivePingKey
+ << " with exit code: " << exit_code
+ << " with error: " << output;
+ return;
+ }
+
+ vector<string> vpd_dump_cmd = { "dump_vpd_log", "--force" };
+ if (!Subprocess::SynchronousExec(vpd_dump_cmd, &exit_code, &output) ||
+ exit_code) {
+ LOG(ERROR) << "Failed to cache " << kActivePingKey<< " using dump_vpd_log"
+ << " with exit code: " << exit_code
+ << " with error: " << output;
+ }
+}
+
} // namespace chromeos_update_engine
diff --git a/hardware_chromeos.h b/hardware_chromeos.h
index a756a9b..0cf1214 100644
--- a/hardware_chromeos.h
+++ b/hardware_chromeos.h
@@ -52,6 +52,8 @@
bool GetNonVolatileDirectory(base::FilePath* path) const override;
bool GetPowerwashSafeDirectory(base::FilePath* path) const override;
int64_t GetBuildTimestamp() const override;
+ bool GetFirstActiveOmahaPingSent() const override;
+ void SetFirstActiveOmahaPingSent() override;
private:
friend class HardwareChromeOSTest;
diff --git a/hardware_chromeos_unittest.cc b/hardware_chromeos_unittest.cc
index a6bad54..162dec4 100644
--- a/hardware_chromeos_unittest.cc
+++ b/hardware_chromeos_unittest.cc
@@ -37,21 +37,22 @@
void SetUp() override { ASSERT_TRUE(root_dir_.CreateUniqueTempDir()); }
void WriteStatefulConfig(const string& config) {
- base::FilePath kFile(root_dir_.path().value() + kStatefulPartition +
+ base::FilePath kFile(root_dir_.GetPath().value() + kStatefulPartition +
"/etc/update_manager.conf");
ASSERT_TRUE(base::CreateDirectory(kFile.DirName()));
ASSERT_TRUE(WriteFileString(kFile.value(), config));
}
void WriteRootfsConfig(const string& config) {
- base::FilePath kFile(root_dir_.path().value() + "/etc/update_manager.conf");
+ base::FilePath kFile(root_dir_.GetPath().value() +
+ "/etc/update_manager.conf");
ASSERT_TRUE(base::CreateDirectory(kFile.DirName()));
ASSERT_TRUE(WriteFileString(kFile.value(), config));
}
// Helper method to call HardwareChromeOS::LoadConfig with the test directory.
void CallLoadConfig(bool normal_mode) {
- hardware_.LoadConfig(root_dir_.path().value(), normal_mode);
+ hardware_.LoadConfig(root_dir_.GetPath().value(), normal_mode);
}
HardwareChromeOS hardware_;
diff --git a/image_properties.h b/image_properties.h
index 4f94eeb..f8fe095 100644
--- a/image_properties.h
+++ b/image_properties.h
@@ -41,6 +41,9 @@
// The system version of this image.
std::string system_version;
+ // The version of all product components in key values pairs.
+ std::string product_components;
+
// A unique string that identifies this build. Normally a combination of the
// the version, signing keys and build target.
std::string build_fingerprint;
@@ -76,8 +79,8 @@
// value may be returned instead.
ImageProperties LoadImageProperties(SystemState* system_state);
-// Loads the mutable image properties from the stateful partition if found or the
-// system image otherwise.
+// Loads the mutable image properties from the stateful partition if found or
+// the system image otherwise.
MutableImageProperties LoadMutableImageProperties(SystemState* system_state);
// Stores the mutable image properties in the stateful partition. Returns
@@ -85,6 +88,9 @@
bool StoreMutableImageProperties(SystemState* system_state,
const MutableImageProperties& properties);
+// Logs the image properties.
+void LogImageProperties();
+
// Sets the root_prefix used to load files from during unittests to
// |test_root_prefix|. Passing a nullptr value resets it to the default.
namespace test {
diff --git a/image_properties_android.cc b/image_properties_android.cc
index d52c40b..1ec425d 100644
--- a/image_properties_android.cc
+++ b/image_properties_android.cc
@@ -18,17 +18,19 @@
#include <string>
+#include <android-base/properties.h>
#include <base/logging.h>
#include <brillo/osrelease_reader.h>
#include <brillo/strings/string_utils.h>
-#include <cutils/properties.h>
#include "update_engine/common/boot_control_interface.h"
#include "update_engine/common/constants.h"
#include "update_engine/common/platform_constants.h"
#include "update_engine/common/prefs_interface.h"
+#include "update_engine/common/utils.h"
#include "update_engine/system_state.h"
+using android::base::GetProperty;
using std::string;
namespace chromeos_update_engine {
@@ -41,6 +43,10 @@
const char kSystemId[] = "system_id";
const char kSystemVersion[] = "system_version";
+// The path to the product_components file which stores the version of each
+// components in OEM partition.
+const char kProductComponentsPath[] = "/oem/os-release.d/product_components";
+
// Prefs used to store the target channel and powerwash settings.
const char kPrefsImgPropChannelName[] = "img-prop-channel-name";
const char kPrefsImgPropPowerwashAllowed[] = "img-prop-powerwash-allowed";
@@ -96,16 +102,12 @@
result.version = GetStringWithDefault(osrelease, kProductVersion, "0.0.0.0");
result.system_version =
GetStringWithDefault(osrelease, kSystemVersion, "0.0.0.0");
+ // Can't read it with OsReleaseReader because it has multiple lines.
+ utils::ReadFile(kProductComponentsPath, &result.product_components);
- char prop[PROPERTY_VALUE_MAX];
- property_get(kPropProductName, prop, "brillo");
- result.board = prop;
-
- property_get(kPropBuildFingerprint, prop, "none");
- result.build_fingerprint = prop;
-
- property_get(kPropBuildType, prop, "");
- result.build_type = prop;
+ result.board = GetProperty(kPropProductName, "brillo");
+ result.build_fingerprint = GetProperty(kPropBuildFingerprint, "none");
+ result.build_type = GetProperty(kPropBuildType, "");
// Brillo images don't have a channel assigned. We stored the name of the
// channel where we got the image from in prefs at the time of the update, so
@@ -147,4 +149,8 @@
properties.is_powerwash_allowed));
}
+void LogImageProperties() {
+ // TODO(*): Implement this.
+}
+
} // namespace chromeos_update_engine
diff --git a/image_properties_android_unittest.cc b/image_properties_android_unittest.cc
index 9bbb8b0..7327554 100644
--- a/image_properties_android_unittest.cc
+++ b/image_properties_android_unittest.cc
@@ -36,9 +36,9 @@
void SetUp() override {
// Create a uniquely named test directory.
ASSERT_TRUE(tempdir_.CreateUniqueTempDir());
- osrelease_dir_ = tempdir_.path().Append("etc/os-release.d");
+ osrelease_dir_ = tempdir_.GetPath().Append("etc/os-release.d");
EXPECT_TRUE(base::CreateDirectory(osrelease_dir_));
- test::SetImagePropertiesRootPrefix(tempdir_.path().value().c_str());
+ test::SetImagePropertiesRootPrefix(tempdir_.GetPath().value().c_str());
}
void WriteOsRelease(const string& key, const string& value) {
diff --git a/image_properties_chromeos.cc b/image_properties_chromeos.cc
index 6bab63f..39ddeb3 100644
--- a/image_properties_chromeos.cc
+++ b/image_properties_chromeos.cc
@@ -26,6 +26,7 @@
#include "update_engine/common/constants.h"
#include "update_engine/common/hardware_interface.h"
#include "update_engine/common/platform_constants.h"
+#include "update_engine/common/utils.h"
#include "update_engine/system_state.h"
namespace {
@@ -149,4 +150,17 @@
return lsb_release.Save(path);
}
+void LogImageProperties() {
+ std::string lsb_release;
+ if (utils::ReadFile(kLsbRelease, &lsb_release)) {
+ LOG(INFO) << "lsb-release inside the old rootfs:\n" << lsb_release;
+ }
+
+ std::string stateful_lsb_release;
+ if (utils::ReadFile(std::string(kStatefulPartition) + kLsbRelease,
+ &stateful_lsb_release)) {
+ LOG(INFO) << "stateful lsb-release:\n" << stateful_lsb_release;
+ }
+}
+
} // namespace chromeos_update_engine
diff --git a/image_properties_chromeos_unittest.cc b/image_properties_chromeos_unittest.cc
index 12c2039..d9ed688 100644
--- a/image_properties_chromeos_unittest.cc
+++ b/image_properties_chromeos_unittest.cc
@@ -36,10 +36,10 @@
void SetUp() override {
// Create a uniquely named test directory.
ASSERT_TRUE(tempdir_.CreateUniqueTempDir());
- EXPECT_TRUE(base::CreateDirectory(tempdir_.path().Append("etc")));
- EXPECT_TRUE(base::CreateDirectory(
- base::FilePath(tempdir_.path().value() + kStatefulPartition + "/etc")));
- test::SetImagePropertiesRootPrefix(tempdir_.path().value().c_str());
+ EXPECT_TRUE(base::CreateDirectory(tempdir_.GetPath().Append("etc")));
+ EXPECT_TRUE(base::CreateDirectory(base::FilePath(
+ tempdir_.GetPath().value() + kStatefulPartition + "/etc")));
+ test::SetImagePropertiesRootPrefix(tempdir_.GetPath().value().c_str());
SetLockDown(false);
}
@@ -54,12 +54,13 @@
};
TEST_F(ImagePropertiesTest, SimpleTest) {
- ASSERT_TRUE(WriteFileString(tempdir_.path().Append("etc/lsb-release").value(),
- "CHROMEOS_RELEASE_BOARD=arm-generic\n"
- "CHROMEOS_RELEASE_FOO=bar\n"
- "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
- "CHROMEOS_RELEASE_TRACK=dev-channel\n"
- "CHROMEOS_AUSERVER=http://www.google.com"));
+ ASSERT_TRUE(
+ WriteFileString(tempdir_.GetPath().Append("etc/lsb-release").value(),
+ "CHROMEOS_RELEASE_BOARD=arm-generic\n"
+ "CHROMEOS_RELEASE_FOO=bar\n"
+ "CHROMEOS_RELEASE_VERSION=0.2.2.3\n"
+ "CHROMEOS_RELEASE_TRACK=dev-channel\n"
+ "CHROMEOS_AUSERVER=http://www.google.com"));
ImageProperties props = LoadImageProperties(&fake_system_state_);
EXPECT_EQ("arm-generic", props.board);
EXPECT_EQ("{87efface-864d-49a5-9bb3-4b050a7c227a}", props.product_id);
@@ -70,7 +71,7 @@
TEST_F(ImagePropertiesTest, AppIDTest) {
ASSERT_TRUE(WriteFileString(
- tempdir_.path().Append("etc/lsb-release").value(),
+ tempdir_.GetPath().Append("etc/lsb-release").value(),
"CHROMEOS_RELEASE_APPID={58c35cef-9d30-476e-9098-ce20377d535d}"));
ImageProperties props = LoadImageProperties(&fake_system_state_);
EXPECT_EQ("{58c35cef-9d30-476e-9098-ce20377d535d}", props.product_id);
@@ -78,7 +79,7 @@
TEST_F(ImagePropertiesTest, ConfusingReleaseTest) {
ASSERT_TRUE(
- WriteFileString(tempdir_.path().Append("etc/lsb-release").value(),
+ WriteFileString(tempdir_.GetPath().Append("etc/lsb-release").value(),
"CHROMEOS_RELEASE_FOO=CHROMEOS_RELEASE_VERSION=1.2.3.4\n"
"CHROMEOS_RELEASE_VERSION=0.2.2.3"));
ImageProperties props = LoadImageProperties(&fake_system_state_);
@@ -91,13 +92,14 @@
}
TEST_F(ImagePropertiesTest, OverrideTest) {
- ASSERT_TRUE(WriteFileString(tempdir_.path().Append("etc/lsb-release").value(),
- "CHROMEOS_RELEASE_BOARD=arm-generic\n"
- "CHROMEOS_RELEASE_FOO=bar\n"
- "CHROMEOS_RELEASE_TRACK=dev-channel\n"
- "CHROMEOS_AUSERVER=http://www.google.com"));
+ ASSERT_TRUE(
+ WriteFileString(tempdir_.GetPath().Append("etc/lsb-release").value(),
+ "CHROMEOS_RELEASE_BOARD=arm-generic\n"
+ "CHROMEOS_RELEASE_FOO=bar\n"
+ "CHROMEOS_RELEASE_TRACK=dev-channel\n"
+ "CHROMEOS_AUSERVER=http://www.google.com"));
ASSERT_TRUE(WriteFileString(
- tempdir_.path().value() + kStatefulPartition + "/etc/lsb-release",
+ tempdir_.GetPath().value() + kStatefulPartition + "/etc/lsb-release",
"CHROMEOS_RELEASE_BOARD=x86-generic\n"
"CHROMEOS_RELEASE_TRACK=beta-channel\n"
"CHROMEOS_AUSERVER=https://www.google.com"));
@@ -111,13 +113,14 @@
}
TEST_F(ImagePropertiesTest, OverrideLockDownTest) {
- ASSERT_TRUE(WriteFileString(tempdir_.path().Append("etc/lsb-release").value(),
- "CHROMEOS_RELEASE_BOARD=arm-generic\n"
- "CHROMEOS_RELEASE_FOO=bar\n"
- "CHROMEOS_RELEASE_TRACK=dev-channel\n"
- "CHROMEOS_AUSERVER=https://www.google.com"));
+ ASSERT_TRUE(
+ WriteFileString(tempdir_.GetPath().Append("etc/lsb-release").value(),
+ "CHROMEOS_RELEASE_BOARD=arm-generic\n"
+ "CHROMEOS_RELEASE_FOO=bar\n"
+ "CHROMEOS_RELEASE_TRACK=dev-channel\n"
+ "CHROMEOS_AUSERVER=https://www.google.com"));
ASSERT_TRUE(WriteFileString(
- tempdir_.path().value() + kStatefulPartition + "/etc/lsb-release",
+ tempdir_.GetPath().value() + kStatefulPartition + "/etc/lsb-release",
"CHROMEOS_RELEASE_BOARD=x86-generic\n"
"CHROMEOS_RELEASE_TRACK=stable-channel\n"
"CHROMEOS_AUSERVER=http://www.google.com"));
@@ -132,32 +135,35 @@
}
TEST_F(ImagePropertiesTest, BoardAppIdUsedForNonCanaryChannelTest) {
- ASSERT_TRUE(WriteFileString(tempdir_.path().Append("etc/lsb-release").value(),
- "CHROMEOS_RELEASE_APPID=r\n"
- "CHROMEOS_BOARD_APPID=b\n"
- "CHROMEOS_CANARY_APPID=c\n"
- "CHROMEOS_RELEASE_TRACK=stable-channel\n"));
+ ASSERT_TRUE(
+ WriteFileString(tempdir_.GetPath().Append("etc/lsb-release").value(),
+ "CHROMEOS_RELEASE_APPID=r\n"
+ "CHROMEOS_BOARD_APPID=b\n"
+ "CHROMEOS_CANARY_APPID=c\n"
+ "CHROMEOS_RELEASE_TRACK=stable-channel\n"));
ImageProperties props = LoadImageProperties(&fake_system_state_);
EXPECT_EQ("stable-channel", props.current_channel);
EXPECT_EQ("b", props.product_id);
}
TEST_F(ImagePropertiesTest, CanaryAppIdUsedForCanaryChannelTest) {
- ASSERT_TRUE(WriteFileString(tempdir_.path().Append("etc/lsb-release").value(),
- "CHROMEOS_RELEASE_APPID=r\n"
- "CHROMEOS_BOARD_APPID=b\n"
- "CHROMEOS_CANARY_APPID=c\n"
- "CHROMEOS_RELEASE_TRACK=canary-channel\n"));
+ ASSERT_TRUE(
+ WriteFileString(tempdir_.GetPath().Append("etc/lsb-release").value(),
+ "CHROMEOS_RELEASE_APPID=r\n"
+ "CHROMEOS_BOARD_APPID=b\n"
+ "CHROMEOS_CANARY_APPID=c\n"
+ "CHROMEOS_RELEASE_TRACK=canary-channel\n"));
ImageProperties props = LoadImageProperties(&fake_system_state_);
EXPECT_EQ("canary-channel", props.current_channel);
EXPECT_EQ("c", props.canary_product_id);
}
TEST_F(ImagePropertiesTest, ReleaseAppIdUsedAsDefaultTest) {
- ASSERT_TRUE(WriteFileString(tempdir_.path().Append("etc/lsb-release").value(),
- "CHROMEOS_RELEASE_APPID=r\n"
- "CHROMEOS_CANARY_APPID=c\n"
- "CHROMEOS_RELEASE_TRACK=stable-channel\n"));
+ ASSERT_TRUE(
+ WriteFileString(tempdir_.GetPath().Append("etc/lsb-release").value(),
+ "CHROMEOS_RELEASE_APPID=r\n"
+ "CHROMEOS_CANARY_APPID=c\n"
+ "CHROMEOS_RELEASE_TRACK=stable-channel\n"));
ImageProperties props = LoadImageProperties(&fake_system_state_);
EXPECT_EQ("stable-channel", props.current_channel);
EXPECT_EQ("r", props.product_id);
diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc
index eac6ea0..87f30ad 100644
--- a/libcurl_http_fetcher.cc
+++ b/libcurl_http_fetcher.cc
@@ -272,7 +272,7 @@
} else if (base::StartsWith(
url_, "https://", base::CompareCase::INSENSITIVE_ASCII)) {
SetCurlOptionsForHttps();
-#if !defined(__CHROMEOS__) && !defined(__BRILLO__)
+#if !USE_OMAHA
} else if (base::StartsWith(
url_, "file://", base::CompareCase::INSENSITIVE_ASCII)) {
SetCurlOptionsForFile();
diff --git a/main.cc b/main.cc
index 4275bc1..0b96307 100644
--- a/main.cc
+++ b/main.cc
@@ -38,6 +38,7 @@
namespace chromeos_update_engine {
namespace {
+#ifndef __ANDROID__
void SetupLogSymlink(const string& symlink_path, const string& log_path) {
// TODO(petkov): To ensure a smooth transition between non-timestamped and
// timestamped logs, move an existing log to start the first timestamped
@@ -75,30 +76,41 @@
SetupLogSymlink(kLogSymlink, kLogPath);
return kLogSymlink;
}
+#endif // __ANDROID__
-void SetupLogging(bool log_to_std_err) {
- string log_file;
+void SetupLogging(bool log_to_system, bool log_to_file) {
logging::LoggingSettings log_settings;
log_settings.lock_log = logging::DONT_LOCK_LOG_FILE;
- log_settings.delete_old = logging::APPEND_TO_OLD_LOG_FILE;
+ log_settings.logging_dest = static_cast<logging::LoggingDestination>(
+ (log_to_system ? logging::LOG_TO_SYSTEM_DEBUG_LOG : 0) |
+ (log_to_file ? logging::LOG_TO_FILE : 0));
+ log_settings.log_file = nullptr;
- if (log_to_std_err) {
- // Log to stderr initially.
- log_settings.log_file = nullptr;
- log_settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
- } else {
+ string log_file;
+ if (log_to_file) {
+#ifdef __ANDROID__
+ log_file = "/data/misc/update_engine_log/update_engine.log";
+ log_settings.delete_old = logging::DELETE_OLD_LOG_FILE;
+#else
log_file = SetupLogFile("/var/log");
+ log_settings.delete_old = logging::APPEND_TO_OLD_LOG_FILE;
+#endif // __ANDROID__
log_settings.log_file = log_file.c_str();
- log_settings.logging_dest = logging::LOG_TO_FILE;
}
-
logging::InitLogging(log_settings);
+
+#ifdef __ANDROID__
+ // The log file will have AID_LOG as group ID; this GID is inherited from the
+ // parent directory "/data/misc/update_engine_log" which sets the SGID bit.
+ chmod(log_file.c_str(), 0640);
+#endif
}
} // namespace
} // namespace chromeos_update_engine
int main(int argc, char** argv) {
+ DEFINE_bool(logtofile, false, "Write logs to a file in log_dir.");
DEFINE_bool(logtostderr, false,
"Write logs to stderr instead of to a file in log_dir.");
DEFINE_bool(foreground, false,
@@ -106,7 +118,15 @@
chromeos_update_engine::Terminator::Init();
brillo::FlagHelper::Init(argc, argv, "Chromium OS Update Engine");
- chromeos_update_engine::SetupLogging(FLAGS_logtostderr);
+
+ // We have two logging flags "--logtostderr" and "--logtofile"; and the logic
+ // to choose the logging destination is:
+ // 1. --logtostderr --logtofile -> logs to both
+ // 2. --logtostderr -> logs to system debug
+ // 3. --logtofile or no flags -> logs to file
+ bool log_to_system = FLAGS_logtostderr;
+ bool log_to_file = FLAGS_logtofile || !FLAGS_logtostderr;
+ chromeos_update_engine::SetupLogging(log_to_system, log_to_file);
if (!FLAGS_foreground)
PLOG_IF(FATAL, daemon(0, 0) == 1) << "daemon() failed";
diff --git a/metrics.cc b/metrics.cc
deleted file mode 100644
index 742ba7e..0000000
--- a/metrics.cc
+++ /dev/null
@@ -1,526 +0,0 @@
-//
-// Copyright (C) 2014 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/metrics.h"
-
-#include <string>
-
-#include <base/logging.h>
-#include <metrics/metrics_library.h>
-
-#include "update_engine/common/clock_interface.h"
-#include "update_engine/common/constants.h"
-#include "update_engine/common/prefs_interface.h"
-#include "update_engine/common/utils.h"
-#include "update_engine/metrics_utils.h"
-#include "update_engine/system_state.h"
-
-using std::string;
-
-namespace chromeos_update_engine {
-
-namespace metrics {
-
-// UpdateEngine.Daily.* metrics.
-const char kMetricDailyOSAgeDays[] = "UpdateEngine.Daily.OSAgeDays";
-
-// UpdateEngine.Check.* metrics.
-const char kMetricCheckDownloadErrorCode[] =
- "UpdateEngine.Check.DownloadErrorCode";
-const char kMetricCheckReaction[] = "UpdateEngine.Check.Reaction";
-const char kMetricCheckResult[] = "UpdateEngine.Check.Result";
-const char kMetricCheckTimeSinceLastCheckMinutes[] =
- "UpdateEngine.Check.TimeSinceLastCheckMinutes";
-const char kMetricCheckTimeSinceLastCheckUptimeMinutes[] =
- "UpdateEngine.Check.TimeSinceLastCheckUptimeMinutes";
-
-// UpdateEngine.Attempt.* metrics.
-const char kMetricAttemptNumber[] = "UpdateEngine.Attempt.Number";
-const char kMetricAttemptPayloadType[] =
- "UpdateEngine.Attempt.PayloadType";
-const char kMetricAttemptPayloadSizeMiB[] =
- "UpdateEngine.Attempt.PayloadSizeMiB";
-const char kMetricAttemptConnectionType[] =
- "UpdateEngine.Attempt.ConnectionType";
-const char kMetricAttemptDurationMinutes[] =
- "UpdateEngine.Attempt.DurationMinutes";
-const char kMetricAttemptDurationUptimeMinutes[] =
- "UpdateEngine.Attempt.DurationUptimeMinutes";
-const char kMetricAttemptTimeSinceLastAttemptMinutes[] =
- "UpdateEngine.Attempt.TimeSinceLastAttemptMinutes";
-const char kMetricAttemptTimeSinceLastAttemptUptimeMinutes[] =
- "UpdateEngine.Attempt.TimeSinceLastAttemptUptimeMinutes";
-const char kMetricAttemptPayloadBytesDownloadedMiB[] =
- "UpdateEngine.Attempt.PayloadBytesDownloadedMiB";
-const char kMetricAttemptPayloadDownloadSpeedKBps[] =
- "UpdateEngine.Attempt.PayloadDownloadSpeedKBps";
-const char kMetricAttemptDownloadSource[] =
- "UpdateEngine.Attempt.DownloadSource";
-const char kMetricAttemptResult[] =
- "UpdateEngine.Attempt.Result";
-const char kMetricAttemptInternalErrorCode[] =
- "UpdateEngine.Attempt.InternalErrorCode";
-const char kMetricAttemptDownloadErrorCode[] =
- "UpdateEngine.Attempt.DownloadErrorCode";
-
-// UpdateEngine.SuccessfulUpdate.* metrics.
-const char kMetricSuccessfulUpdateAttemptCount[] =
- "UpdateEngine.SuccessfulUpdate.AttemptCount";
-const char kMetricSuccessfulUpdateBytesDownloadedMiB[] =
- "UpdateEngine.SuccessfulUpdate.BytesDownloadedMiB";
-const char kMetricSuccessfulUpdateDownloadOverheadPercentage[] =
- "UpdateEngine.SuccessfulUpdate.DownloadOverheadPercentage";
-const char kMetricSuccessfulUpdateDownloadSourcesUsed[] =
- "UpdateEngine.SuccessfulUpdate.DownloadSourcesUsed";
-const char kMetricSuccessfulUpdatePayloadType[] =
- "UpdateEngine.SuccessfulUpdate.PayloadType";
-const char kMetricSuccessfulUpdatePayloadSizeMiB[] =
- "UpdateEngine.SuccessfulUpdate.PayloadSizeMiB";
-const char kMetricSuccessfulUpdateRebootCount[] =
- "UpdateEngine.SuccessfulUpdate.RebootCount";
-const char kMetricSuccessfulUpdateTotalDurationMinutes[] =
- "UpdateEngine.SuccessfulUpdate.TotalDurationMinutes";
-const char kMetricSuccessfulUpdateUpdatesAbandonedCount[] =
- "UpdateEngine.SuccessfulUpdate.UpdatesAbandonedCount";
-const char kMetricSuccessfulUpdateUrlSwitchCount[] =
- "UpdateEngine.SuccessfulUpdate.UrlSwitchCount";
-
-// UpdateEngine.Rollback.* metric.
-const char kMetricRollbackResult[] = "UpdateEngine.Rollback.Result";
-
-// UpdateEngine.CertificateCheck.* metrics.
-const char kMetricCertificateCheckUpdateCheck[] =
- "UpdateEngine.CertificateCheck.UpdateCheck";
-const char kMetricCertificateCheckDownload[] =
- "UpdateEngine.CertificateCheck.Download";
-
-// UpdateEngine.* metrics.
-const char kMetricFailedUpdateCount[] = "UpdateEngine.FailedUpdateCount";
-const char kMetricInstallDateProvisioningSource[] =
- "UpdateEngine.InstallDateProvisioningSource";
-const char kMetricTimeToRebootMinutes[] =
- "UpdateEngine.TimeToRebootMinutes";
-
-void ReportDailyMetrics(SystemState *system_state,
- base::TimeDelta os_age) {
- string metric = metrics::kMetricDailyOSAgeDays;
- LOG(INFO) << "Uploading " << utils::FormatTimeDelta(os_age)
- << " for metric " << metric;
- system_state->metrics_lib()->SendToUMA(
- metric,
- static_cast<int>(os_age.InDays()),
- 0, // min: 0 days
- 6*30, // max: 6 months (approx)
- 50); // num_buckets
-}
-
-void ReportUpdateCheckMetrics(SystemState *system_state,
- CheckResult result,
- CheckReaction reaction,
- DownloadErrorCode download_error_code) {
- string metric;
- int value;
- int max_value;
-
- if (result != metrics::CheckResult::kUnset) {
- metric = metrics::kMetricCheckResult;
- value = static_cast<int>(result);
- max_value = static_cast<int>(metrics::CheckResult::kNumConstants) - 1;
- LOG(INFO) << "Sending " << value << " for metric " << metric << " (enum)";
- system_state->metrics_lib()->SendEnumToUMA(metric, value, max_value);
- }
- if (reaction != metrics::CheckReaction::kUnset) {
- metric = metrics::kMetricCheckReaction;
- value = static_cast<int>(reaction);
- max_value = static_cast<int>(metrics::CheckReaction::kNumConstants) - 1;
- LOG(INFO) << "Sending " << value << " for metric " << metric << " (enum)";
- system_state->metrics_lib()->SendEnumToUMA(metric, value, max_value);
- }
- if (download_error_code != metrics::DownloadErrorCode::kUnset) {
- metric = metrics::kMetricCheckDownloadErrorCode;
- value = static_cast<int>(download_error_code);
- LOG(INFO) << "Sending " << value << " for metric " << metric << " (sparse)";
- system_state->metrics_lib()->SendSparseToUMA(metric, value);
- }
-
- base::TimeDelta time_since_last;
- if (metrics_utils::WallclockDurationHelper(
- system_state,
- kPrefsMetricsCheckLastReportingTime,
- &time_since_last)) {
- metric = kMetricCheckTimeSinceLastCheckMinutes;
- LOG(INFO) << "Sending " << utils::FormatTimeDelta(time_since_last)
- << " for metric " << metric;
- system_state->metrics_lib()->SendToUMA(
- metric,
- time_since_last.InMinutes(),
- 0, // min: 0 min
- 30*24*60, // max: 30 days
- 50); // num_buckets
- }
-
- base::TimeDelta uptime_since_last;
- static int64_t uptime_since_last_storage = 0;
- if (metrics_utils::MonotonicDurationHelper(system_state,
- &uptime_since_last_storage,
- &uptime_since_last)) {
- metric = kMetricCheckTimeSinceLastCheckUptimeMinutes;
- LOG(INFO) << "Sending " << utils::FormatTimeDelta(uptime_since_last)
- << " for metric " << metric;
- system_state->metrics_lib()->SendToUMA(
- metric,
- uptime_since_last.InMinutes(),
- 0, // min: 0 min
- 30*24*60, // max: 30 days
- 50); // num_buckets
- }
-}
-
-void ReportAbnormallyTerminatedUpdateAttemptMetrics(
- SystemState *system_state) {
-
- string metric = metrics::kMetricAttemptResult;
- AttemptResult attempt_result = AttemptResult::kAbnormalTermination;
-
- LOG(INFO) << "Uploading " << static_cast<int>(attempt_result)
- << " for metric " << metric;
- system_state->metrics_lib()->SendEnumToUMA(
- metric,
- static_cast<int>(attempt_result),
- static_cast<int>(AttemptResult::kNumConstants));
-}
-
-void ReportUpdateAttemptMetrics(
- SystemState *system_state,
- int attempt_number,
- PayloadType payload_type,
- base::TimeDelta duration,
- base::TimeDelta duration_uptime,
- int64_t payload_size,
- int64_t payload_bytes_downloaded,
- int64_t payload_download_speed_bps,
- DownloadSource download_source,
- AttemptResult attempt_result,
- ErrorCode internal_error_code,
- DownloadErrorCode payload_download_error_code,
- ConnectionType connection_type) {
- string metric;
-
- metric = metrics::kMetricAttemptNumber;
- LOG(INFO) << "Uploading " << attempt_number << " for metric " << metric;
- system_state->metrics_lib()->SendToUMA(metric,
- attempt_number,
- 0, // min: 0 attempts
- 49, // max: 49 attempts
- 50); // num_buckets
-
- metric = metrics::kMetricAttemptPayloadType;
- LOG(INFO) << "Uploading " << utils::ToString(payload_type)
- << " for metric " << metric;
- system_state->metrics_lib()->SendEnumToUMA(metric,
- payload_type,
- kNumPayloadTypes);
-
- metric = metrics::kMetricAttemptDurationMinutes;
- LOG(INFO) << "Uploading " << utils::FormatTimeDelta(duration)
- << " for metric " << metric;
- system_state->metrics_lib()->SendToUMA(metric,
- duration.InMinutes(),
- 0, // min: 0 min
- 10*24*60, // max: 10 days
- 50); // num_buckets
-
- metric = metrics::kMetricAttemptDurationUptimeMinutes;
- LOG(INFO) << "Uploading " << utils::FormatTimeDelta(duration_uptime)
- << " for metric " << metric;
- system_state->metrics_lib()->SendToUMA(metric,
- duration_uptime.InMinutes(),
- 0, // min: 0 min
- 10*24*60, // max: 10 days
- 50); // num_buckets
-
- metric = metrics::kMetricAttemptPayloadSizeMiB;
- int64_t payload_size_mib = payload_size / kNumBytesInOneMiB;
- LOG(INFO) << "Uploading " << payload_size_mib << " for metric " << metric;
- system_state->metrics_lib()->SendToUMA(metric,
- payload_size_mib,
- 0, // min: 0 MiB
- 1024, // max: 1024 MiB = 1 GiB
- 50); // num_buckets
-
- metric = metrics::kMetricAttemptPayloadBytesDownloadedMiB;
- int64_t payload_bytes_downloaded_mib =
- payload_bytes_downloaded / kNumBytesInOneMiB;
- LOG(INFO) << "Uploading " << payload_bytes_downloaded_mib
- << " for metric " << metric;
- system_state->metrics_lib()->SendToUMA(metric,
- payload_bytes_downloaded_mib,
- 0, // min: 0 MiB
- 1024, // max: 1024 MiB = 1 GiB
- 50); // num_buckets
-
- metric = metrics::kMetricAttemptPayloadDownloadSpeedKBps;
- int64_t payload_download_speed_kbps = payload_download_speed_bps / 1000;
- LOG(INFO) << "Uploading " << payload_download_speed_kbps
- << " for metric " << metric;
- system_state->metrics_lib()->SendToUMA(metric,
- payload_download_speed_kbps,
- 0, // min: 0 kB/s
- 10*1000, // max: 10000 kB/s = 10 MB/s
- 50); // num_buckets
-
- metric = metrics::kMetricAttemptDownloadSource;
- LOG(INFO) << "Uploading " << download_source
- << " for metric " << metric;
- system_state->metrics_lib()->SendEnumToUMA(metric,
- download_source,
- kNumDownloadSources);
-
- metric = metrics::kMetricAttemptResult;
- LOG(INFO) << "Uploading " << static_cast<int>(attempt_result)
- << " for metric " << metric;
- system_state->metrics_lib()->SendEnumToUMA(
- metric,
- static_cast<int>(attempt_result),
- static_cast<int>(AttemptResult::kNumConstants));
-
- if (internal_error_code != ErrorCode::kSuccess) {
- metric = metrics::kMetricAttemptInternalErrorCode;
- LOG(INFO) << "Uploading " << internal_error_code
- << " for metric " << metric;
- system_state->metrics_lib()->SendEnumToUMA(
- metric,
- static_cast<int>(internal_error_code),
- static_cast<int>(ErrorCode::kUmaReportedMax));
- }
-
- if (payload_download_error_code != DownloadErrorCode::kUnset) {
- metric = metrics::kMetricAttemptDownloadErrorCode;
- LOG(INFO) << "Uploading " << static_cast<int>(payload_download_error_code)
- << " for metric " << metric << " (sparse)";
- system_state->metrics_lib()->SendSparseToUMA(
- metric,
- static_cast<int>(payload_download_error_code));
- }
-
- base::TimeDelta time_since_last;
- if (metrics_utils::WallclockDurationHelper(
- system_state,
- kPrefsMetricsAttemptLastReportingTime,
- &time_since_last)) {
- metric = kMetricAttemptTimeSinceLastAttemptMinutes;
- LOG(INFO) << "Sending " << utils::FormatTimeDelta(time_since_last)
- << " for metric " << metric;
- system_state->metrics_lib()->SendToUMA(
- metric,
- time_since_last.InMinutes(),
- 0, // min: 0 min
- 30*24*60, // max: 30 days
- 50); // num_buckets
- }
-
- static int64_t uptime_since_last_storage = 0;
- base::TimeDelta uptime_since_last;
- if (metrics_utils::MonotonicDurationHelper(system_state,
- &uptime_since_last_storage,
- &uptime_since_last)) {
- metric = kMetricAttemptTimeSinceLastAttemptUptimeMinutes;
- LOG(INFO) << "Sending " << utils::FormatTimeDelta(uptime_since_last)
- << " for metric " << metric;
- system_state->metrics_lib()->SendToUMA(
- metric,
- uptime_since_last.InMinutes(),
- 0, // min: 0 min
- 30*24*60, // max: 30 days
- 50); // num_buckets
- }
-
- metric = metrics::kMetricAttemptConnectionType;
- LOG(INFO) << "Uploading " << static_cast<int>(connection_type)
- << " for metric " << metric;
- system_state->metrics_lib()->SendEnumToUMA(
- metric,
- static_cast<int>(connection_type),
- static_cast<int>(ConnectionType::kNumConstants));
-}
-
-
-void ReportSuccessfulUpdateMetrics(
- SystemState *system_state,
- int attempt_count,
- int updates_abandoned_count,
- PayloadType payload_type,
- int64_t payload_size,
- int64_t num_bytes_downloaded[kNumDownloadSources],
- int download_overhead_percentage,
- base::TimeDelta total_duration,
- int reboot_count,
- int url_switch_count) {
- string metric;
- int64_t mbs;
-
- metric = kMetricSuccessfulUpdatePayloadSizeMiB;
- mbs = payload_size / kNumBytesInOneMiB;
- LOG(INFO) << "Uploading " << mbs << " (MiBs) for metric " << metric;
- system_state->metrics_lib()->SendToUMA(metric,
- mbs,
- 0, // min: 0 MiB
- 1024, // max: 1024 MiB = 1 GiB
- 50); // num_buckets
-
- int64_t total_bytes = 0;
- int download_sources_used = 0;
- for (int i = 0; i < kNumDownloadSources + 1; i++) {
- DownloadSource source = static_cast<DownloadSource>(i);
-
- // Only consider this download source (and send byte counts) as
- // having been used if we downloaded a non-trivial amount of bytes
- // (e.g. at least 1 MiB) that contributed to the
- // update. Otherwise we're going to end up with a lot of zero-byte
- // events in the histogram.
-
- metric = metrics::kMetricSuccessfulUpdateBytesDownloadedMiB;
- if (i < kNumDownloadSources) {
- metric += utils::ToString(source);
- mbs = num_bytes_downloaded[i] / kNumBytesInOneMiB;
- total_bytes += num_bytes_downloaded[i];
- if (mbs > 0)
- download_sources_used |= (1 << i);
- } else {
- mbs = total_bytes / kNumBytesInOneMiB;
- }
-
- if (mbs > 0) {
- LOG(INFO) << "Uploading " << mbs << " (MiBs) for metric " << metric;
- system_state->metrics_lib()->SendToUMA(metric,
- mbs,
- 0, // min: 0 MiB
- 1024, // max: 1024 MiB = 1 GiB
- 50); // num_buckets
- }
- }
-
- metric = metrics::kMetricSuccessfulUpdateDownloadSourcesUsed;
- LOG(INFO) << "Uploading 0x" << std::hex << download_sources_used
- << " (bit flags) for metric " << metric;
- system_state->metrics_lib()->SendToUMA(
- metric,
- download_sources_used,
- 0, // min
- (1 << kNumDownloadSources) - 1, // max
- 1 << kNumDownloadSources); // num_buckets
-
- metric = metrics::kMetricSuccessfulUpdateDownloadOverheadPercentage;
- LOG(INFO) << "Uploading " << download_overhead_percentage
- << "% for metric " << metric;
- system_state->metrics_lib()->SendToUMA(metric,
- download_overhead_percentage,
- 0, // min: 0% overhead
- 1000, // max: 1000% overhead
- 50); // num_buckets
-
- metric = metrics::kMetricSuccessfulUpdateUrlSwitchCount;
- LOG(INFO) << "Uploading " << url_switch_count
- << " (count) for metric " << metric;
- system_state->metrics_lib()->SendToUMA(metric,
- url_switch_count,
- 0, // min: 0 URL switches
- 49, // max: 49 URL switches
- 50); // num_buckets
-
- metric = metrics::kMetricSuccessfulUpdateTotalDurationMinutes;
- LOG(INFO) << "Uploading " << utils::FormatTimeDelta(total_duration)
- << " for metric " << metric;
- system_state->metrics_lib()->SendToUMA(
- metric,
- static_cast<int>(total_duration.InMinutes()),
- 0, // min: 0 min
- 365*24*60, // max: 365 days ~= 1 year
- 50); // num_buckets
-
- metric = metrics::kMetricSuccessfulUpdateRebootCount;
- LOG(INFO) << "Uploading reboot count of " << reboot_count << " for metric "
- << metric;
- system_state->metrics_lib()->SendToUMA(metric,
- reboot_count,
- 0, // min: 0 reboots
- 49, // max: 49 reboots
- 50); // num_buckets
-
- metric = metrics::kMetricSuccessfulUpdatePayloadType;
- system_state->metrics_lib()->SendEnumToUMA(metric,
- payload_type,
- kNumPayloadTypes);
- LOG(INFO) << "Uploading " << utils::ToString(payload_type)
- << " for metric " << metric;
-
- metric = metrics::kMetricSuccessfulUpdateAttemptCount;
- system_state->metrics_lib()->SendToUMA(metric,
- attempt_count,
- 1, // min: 1 attempt
- 50, // max: 50 attempts
- 50); // num_buckets
- LOG(INFO) << "Uploading " << attempt_count
- << " for metric " << metric;
-
- metric = metrics::kMetricSuccessfulUpdateUpdatesAbandonedCount;
- LOG(INFO) << "Uploading " << updates_abandoned_count
- << " (count) for metric " << metric;
- system_state->metrics_lib()->SendToUMA(metric,
- updates_abandoned_count,
- 0, // min: 0 counts
- 49, // max: 49 counts
- 50); // num_buckets
-}
-
-void ReportRollbackMetrics(SystemState *system_state,
- RollbackResult result) {
- string metric;
- int value;
-
- metric = metrics::kMetricRollbackResult;
- value = static_cast<int>(result);
- LOG(INFO) << "Sending " << value << " for metric " << metric << " (enum)";
- system_state->metrics_lib()->SendEnumToUMA(
- metric,
- value,
- static_cast<int>(metrics::RollbackResult::kNumConstants));
-}
-
-void ReportCertificateCheckMetrics(SystemState* system_state,
- ServerToCheck server_to_check,
- CertificateCheckResult result) {
- string metric;
- switch (server_to_check) {
- case ServerToCheck::kUpdate:
- metric = kMetricCertificateCheckUpdateCheck;
- break;
- case ServerToCheck::kDownload:
- metric = kMetricCertificateCheckDownload;
- break;
- case ServerToCheck::kNone:
- return;
- }
- LOG(INFO) << "Uploading " << static_cast<int>(result) << " for metric "
- << metric;
- system_state->metrics_lib()->SendEnumToUMA(
- metric, static_cast<int>(result),
- static_cast<int>(CertificateCheckResult::kNumConstants));
-}
-
-} // namespace metrics
-
-} // namespace chromeos_update_engine
diff --git a/metrics.h b/metrics.h
deleted file mode 100644
index 7c369ee..0000000
--- a/metrics.h
+++ /dev/null
@@ -1,321 +0,0 @@
-//
-// Copyright (C) 2014 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_METRICS_H_
-#define UPDATE_ENGINE_METRICS_H_
-
-#include <base/time/time.h>
-
-#include "update_engine/certificate_checker.h"
-#include "update_engine/common/constants.h"
-#include "update_engine/common/error_code.h"
-
-namespace chromeos_update_engine {
-
-class SystemState;
-
-namespace metrics {
-
-// UpdateEngine.Daily.* metrics.
-extern const char kMetricDailyOSAgeDays[];
-
-// UpdateEngine.Check.* metrics.
-extern const char kMetricCheckDownloadErrorCode[];
-extern const char kMetricCheckReaction[];
-extern const char kMetricCheckResult[];
-extern const char kMetricCheckTimeSinceLastCheckMinutes[];
-extern const char kMetricCheckTimeSinceLastCheckUptimeMinutes[];
-
-// UpdateEngine.Attempt.* metrics.
-extern const char kMetricAttemptNumber[];
-extern const char kMetricAttemptPayloadType[];
-extern const char kMetricAttemptPayloadSizeMiB[];
-extern const char kMetricAttemptConnectionType[];
-extern const char kMetricAttemptDurationMinutes[];
-extern const char kMetricAttemptDurationUptimeMinutes[];
-extern const char kMetricAttemptTimeSinceLastAttemptSeconds[];
-extern const char kMetricAttemptTimeSinceLastAttemptUptimeSeconds[];
-extern const char kMetricAttemptPayloadBytesDownloaded[];
-extern const char kMetricAttemptPayloadDownloadSpeedKBps[];
-extern const char kMetricAttemptDownloadSource[];
-extern const char kMetricAttemptResult[];
-extern const char kMetricAttemptInternalErrorCode[];
-extern const char kMetricAttemptDownloadErrorCode[];
-
-// UpdateEngine.SuccessfulUpdate.* metrics.
-extern const char kMetricSuccessfulUpdateAttemptCount[];
-extern const char kMetricSuccessfulUpdateBytesDownloadedMiB[];
-extern const char kMetricSuccessfulUpdateDownloadOverheadPercentage[];
-extern const char kMetricSuccessfulUpdateDownloadSourcesUsed[];
-extern const char kMetricSuccessfulUpdatePayloadType[];
-extern const char kMetricSuccessfulUpdatePayloadSizeMiB[];
-extern const char kMetricSuccessfulUpdateTotalDurationMinutes[];
-extern const char kMetricSuccessfulUpdateRebootCount[];
-extern const char kMetricSuccessfulUpdateUpdatesAbandonedCount[];
-extern const char kMetricSuccessfulUpdateUrlSwitchCount[];
-
-// UpdateEngine.Rollback.* metric.
-extern const char kMetricRollbackResult[];
-
-// UpdateEngine.* metrics.
-extern const char kMetricFailedUpdateCount[];
-extern const char kMetricInstallDateProvisioningSource[];
-extern const char kMetricTimeToRebootMinutes[];
-
-// The possible outcomes when checking for updates.
-//
-// This is used in the UpdateEngine.Check.Result histogram.
-enum class CheckResult {
- kUpdateAvailable, // Response indicates an update is available.
- kNoUpdateAvailable, // Response indicates no updates are available.
- kDownloadError, // Error downloading response from Omaha.
- kParsingError, // Error parsing response.
- kRebootPending, // No update check was performed a reboot is pending.
-
- kNumConstants,
- kUnset = -1
-};
-
-// Possible ways a device can react to a new update being available.
-//
-// This is used in the UpdateEngine.Check.Reaction histogram.
-enum class CheckReaction {
- kUpdating, // Device proceeds to download and apply update.
- kIgnored , // Device-policy dictates ignoring the update.
- kDeferring, // Device-policy dictates waiting.
- kBackingOff, // Previous errors dictates waiting.
-
- kNumConstants,
- kUnset = -1
-};
-
-// The possible ways that downloading from a HTTP or HTTPS server can fail.
-//
-// This is used in the UpdateEngine.Check.DownloadErrorCode and
-// UpdateEngine.Attempt.DownloadErrorCode histograms.
-enum class DownloadErrorCode {
- // Errors that can happen in the field. See http://crbug.com/355745
- // for how we plan to add more detail in the future.
- kDownloadError = 0, // Error downloading data from server.
-
- // IMPORTANT: When adding a new error code, add at the bottom of the
- // above block and before the kInputMalformed field. This
- // is to ensure that error codes are not reordered.
-
- // This error code is used to convey that malformed input was given
- // to the utils::GetDownloadErrorCode() function. This should never
- // happen but if it does it's because of an internal update_engine
- // error and we're interested in knowing this.
- kInputMalformed = 100,
-
- // Bucket for capturing HTTP status codes not in the 200-599
- // range. This should never happen in practice but if it does we
- // want to know.
- kHttpStatusOther = 101,
-
- // Above 200 and below 600, the value is the HTTP status code.
- kHttpStatus200 = 200,
-
- kNumConstants = 600,
-
- kUnset = -1
-};
-
-// Possible ways an update attempt can end.
-//
-// This is used in the UpdateEngine.Attempt.Result histogram.
-enum class AttemptResult {
- kUpdateSucceeded, // The update succeeded.
- kInternalError, // An internal error occurred.
- kPayloadDownloadError, // Failure while downloading payload.
- kMetadataMalformed, // Metadata was malformed.
- kOperationMalformed, // An operation was malformed.
- kOperationExecutionError, // An operation failed to execute.
- kMetadataVerificationFailed, // Metadata verification failed.
- kPayloadVerificationFailed, // Payload verification failed.
- kVerificationFailed, // Root or Kernel partition verification failed.
- kPostInstallFailed, // The postinstall step failed.
- kAbnormalTermination, // The attempt ended abnormally.
- kUpdateCanceled, // Update canceled by the user.
-
- kNumConstants,
-
- kUnset = -1
-};
-
-// Possible ways the device is connected to the Internet.
-//
-// This is used in the UpdateEngine.Attempt.ConnectionType histogram.
-enum class ConnectionType {
- kUnknown, // Unknown.
- kEthernet, // Ethernet.
- kWifi, // Wireless.
- kWimax, // WiMax.
- kBluetooth, // Bluetooth.
- kCellular, // Cellular.
- kTetheredEthernet, // Tethered (Ethernet).
- kTetheredWifi, // Tethered (Wifi).
-
- kNumConstants,
- kUnset = -1
-};
-
-// Possible ways a rollback can end.
-//
-// This is used in the UpdateEngine.Rollback histogram.
-enum class RollbackResult {
- kFailed,
- kSuccess,
-
- kNumConstants
-};
-
-// Helper function to report metrics related to rollback. The
-// following metrics are reported:
-//
-// |kMetricRollbackResult|
-void ReportRollbackMetrics(SystemState *system_state,
- RollbackResult result);
-
-// Helper function to report metrics reported once a day. The
-// following metrics are reported:
-//
-// |kMetricDailyOSAgeDays|
-void ReportDailyMetrics(SystemState *system_state,
- base::TimeDelta os_age);
-
-// Helper function to report metrics after completing an update check
-// with the ChromeOS update server ("Omaha"). The following metrics
-// are reported:
-//
-// |kMetricCheckResult|
-// |kMetricCheckReaction|
-// |kMetricCheckDownloadErrorCode|
-// |kMetricCheckTimeSinceLastCheckMinutes|
-// |kMetricCheckTimeSinceLastCheckUptimeMinutes|
-//
-// The |kMetricCheckResult| metric will only be reported if |result|
-// is not |kUnset|.
-//
-// The |kMetricCheckReaction| metric will only be reported if
-// |reaction| is not |kUnset|.
-//
-// The |kMetricCheckDownloadErrorCode| will only be reported if
-// |download_error_code| is not |kUnset|.
-//
-// The values for the |kMetricCheckTimeSinceLastCheckMinutes| and
-// |kMetricCheckTimeSinceLastCheckUptimeMinutes| metrics are
-// automatically reported and calculated by maintaining persistent
-// and process-local state variables.
-void ReportUpdateCheckMetrics(SystemState *system_state,
- CheckResult result,
- CheckReaction reaction,
- DownloadErrorCode download_error_code);
-
-
-// Helper function to report metrics after the completion of each
-// update attempt. The following metrics are reported:
-//
-// |kMetricAttemptNumber|
-// |kMetricAttemptPayloadType|
-// |kMetricAttemptPayloadSizeMiB|
-// |kMetricAttemptDurationSeconds|
-// |kMetricAttemptDurationUptimeSeconds|
-// |kMetricAttemptTimeSinceLastAttemptMinutes|
-// |kMetricAttemptTimeSinceLastAttemptUptimeMinutes|
-// |kMetricAttemptPayloadBytesDownloadedMiB|
-// |kMetricAttemptPayloadDownloadSpeedKBps|
-// |kMetricAttemptDownloadSource|
-// |kMetricAttemptResult|
-// |kMetricAttemptInternalErrorCode|
-// |kMetricAttemptDownloadErrorCode|
-//
-// The |kMetricAttemptInternalErrorCode| metric will only be reported
-// if |internal_error_code| is not |kErrorSuccess|.
-//
-// The |kMetricAttemptDownloadErrorCode| metric will only be
-// reported if |payload_download_error_code| is not |kUnset|.
-//
-// The values for the |kMetricAttemptTimeSinceLastAttemptMinutes| and
-// |kMetricAttemptTimeSinceLastAttemptUptimeMinutes| metrics are
-// automatically calculated and reported by maintaining persistent and
-// process-local state variables.
-void ReportUpdateAttemptMetrics(
- SystemState *system_state,
- int attempt_number,
- PayloadType payload_type,
- base::TimeDelta duration,
- base::TimeDelta duration_uptime,
- int64_t payload_size,
- int64_t payload_bytes_downloaded,
- int64_t payload_download_speed_bps,
- DownloadSource download_source,
- AttemptResult attempt_result,
- ErrorCode internal_error_code,
- DownloadErrorCode payload_download_error_code,
- ConnectionType connection_type);
-
-// Reports the |kAbnormalTermination| for the |kMetricAttemptResult|
-// metric. No other metrics in the UpdateEngine.Attempt.* namespace
-// will be reported.
-void ReportAbnormallyTerminatedUpdateAttemptMetrics(SystemState *system_state);
-
-// Helper function to report the after the completion of a successful
-// update attempt. The following metrics are reported:
-//
-// |kMetricSuccessfulUpdateAttemptCount|
-// |kMetricSuccessfulUpdateUpdatesAbandonedCount|
-// |kMetricSuccessfulUpdatePayloadType|
-// |kMetricSuccessfulUpdatePayloadSizeMiB|
-// |kMetricSuccessfulUpdateBytesDownloadedMiBHttpsServer|
-// |kMetricSuccessfulUpdateBytesDownloadedMiBHttpServer|
-// |kMetricSuccessfulUpdateBytesDownloadedMiBHttpPeer|
-// |kMetricSuccessfulUpdateBytesDownloadedMiB|
-// |kMetricSuccessfulUpdateDownloadSourcesUsed|
-// |kMetricSuccessfulUpdateDownloadOverheadPercentage|
-// |kMetricSuccessfulUpdateTotalDurationMinutes|
-// |kMetricSuccessfulUpdateRebootCount|
-// |kMetricSuccessfulUpdateUrlSwitchCount|
-//
-// The values for the |kMetricSuccessfulUpdateDownloadSourcesUsed| are
-// |kMetricSuccessfulUpdateBytesDownloadedMiB| metrics automatically
-// calculated from examining the |num_bytes_downloaded| array.
-void ReportSuccessfulUpdateMetrics(
- SystemState *system_state,
- int attempt_count,
- int updates_abandoned_count,
- PayloadType payload_type,
- int64_t payload_size,
- int64_t num_bytes_downloaded[kNumDownloadSources],
- int download_overhead_percentage,
- base::TimeDelta total_duration,
- int reboot_count,
- int url_switch_count);
-
-// Helper function to report the after the completion of a SSL certificate
-// check. One of the following metrics is reported:
-//
-// |kMetricCertificateCheckUpdateCheck|
-// |kMetricCertificateCheckDownload|
-void ReportCertificateCheckMetrics(SystemState* system_state,
- ServerToCheck server_to_check,
- CertificateCheckResult result);
-
-} // namespace metrics
-
-} // namespace chromeos_update_engine
-
-#endif // UPDATE_ENGINE_METRICS_H_
diff --git a/metrics_constants.h b/metrics_constants.h
new file mode 100644
index 0000000..abec2ad
--- /dev/null
+++ b/metrics_constants.h
@@ -0,0 +1,137 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_METRICS_CONSTANTS_H_
+#define UPDATE_ENGINE_METRICS_CONSTANTS_H_
+
+namespace chromeos_update_engine {
+
+namespace metrics {
+// The possible outcomes when checking for updates.
+//
+// This is used in the UpdateEngine.Check.Result histogram.
+enum class CheckResult {
+ kUpdateAvailable, // Response indicates an update is available.
+ kNoUpdateAvailable, // Response indicates no updates are available.
+ kDownloadError, // Error downloading response from Omaha.
+ kParsingError, // Error parsing response.
+ kRebootPending, // No update check was performed a reboot is pending.
+
+ kNumConstants,
+ kUnset = -1
+};
+
+// Possible ways a device can react to a new update being available.
+//
+// This is used in the UpdateEngine.Check.Reaction histogram.
+enum class CheckReaction {
+ kUpdating, // Device proceeds to download and apply update.
+ kIgnored, // Device-policy dictates ignoring the update.
+ kDeferring, // Device-policy dictates waiting.
+ kBackingOff, // Previous errors dictates waiting.
+
+ kNumConstants,
+ kUnset = -1
+};
+
+// The possible ways that downloading from a HTTP or HTTPS server can fail.
+//
+// This is used in the UpdateEngine.Check.DownloadErrorCode and
+// UpdateEngine.Attempt.DownloadErrorCode histograms.
+enum class DownloadErrorCode {
+ // Errors that can happen in the field. See http://crbug.com/355745
+ // for how we plan to add more detail in the future.
+ kDownloadError = 0, // Error downloading data from server.
+
+ // IMPORTANT: When adding a new error code, add at the bottom of the
+ // above block and before the kInputMalformed field. This
+ // is to ensure that error codes are not reordered.
+
+ // This error code is used to convey that malformed input was given
+ // to the utils::GetDownloadErrorCode() function. This should never
+ // happen but if it does it's because of an internal update_engine
+ // error and we're interested in knowing this.
+ kInputMalformed = 100,
+
+ // Bucket for capturing HTTP status codes not in the 200-599
+ // range. This should never happen in practice but if it does we
+ // want to know.
+ kHttpStatusOther = 101,
+
+ // Above 200 and below 600, the value is the HTTP status code.
+ kHttpStatus200 = 200,
+
+ kNumConstants = 600,
+
+ kUnset = -1
+};
+
+// Possible ways an update attempt can end.
+//
+// This is used in the UpdateEngine.Attempt.Result histogram.
+enum class AttemptResult {
+ kUpdateSucceeded, // The update succeeded.
+ kInternalError, // An internal error occurred.
+ kPayloadDownloadError, // Failure while downloading payload.
+ kMetadataMalformed, // Metadata was malformed.
+ kOperationMalformed, // An operation was malformed.
+ kOperationExecutionError, // An operation failed to execute.
+ kMetadataVerificationFailed, // Metadata verification failed.
+ kPayloadVerificationFailed, // Payload verification failed.
+ kVerificationFailed, // Root or Kernel partition verification failed.
+ kPostInstallFailed, // The postinstall step failed.
+ kAbnormalTermination, // The attempt ended abnormally.
+ kUpdateCanceled, // Update canceled by the user.
+ kUpdateSucceededNotActive, // Update succeeded but the new slot is not
+ // active.
+
+ kNumConstants,
+
+ kUnset = -1
+};
+
+// Possible ways the device is connected to the Internet.
+//
+// This is used in the UpdateEngine.Attempt.ConnectionType histogram.
+enum class ConnectionType {
+ kUnknown, // Unknown.
+ kEthernet, // Ethernet.
+ kWifi, // Wireless.
+ kWimax, // WiMax.
+ kBluetooth, // Bluetooth.
+ kCellular, // Cellular.
+ kTetheredEthernet, // Tethered (Ethernet).
+ kTetheredWifi, // Tethered (Wifi).
+
+ kNumConstants,
+ kUnset = -1
+};
+
+// Possible ways a rollback can end.
+//
+// This is used in the UpdateEngine.Rollback histogram.
+enum class RollbackResult {
+ kFailed,
+ kSuccess,
+
+ kNumConstants
+};
+
+} // namespace metrics
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_METRICS_CONSTANTS_H_
diff --git a/metrics_reporter_android.cc b/metrics_reporter_android.cc
new file mode 100644
index 0000000..3cb356f
--- /dev/null
+++ b/metrics_reporter_android.cc
@@ -0,0 +1,158 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/metrics_reporter_android.h"
+
+#include <memory>
+#include <string>
+
+#include <metricslogger/metrics_logger.h>
+
+#include "update_engine/common/constants.h"
+
+namespace {
+void LogHistogram(const std::string& metrics, int value) {
+ android::metricslogger::LogHistogram(metrics, value);
+ LOG(INFO) << "uploading " << value << " to histogram for metric " << metrics;
+}
+} // namespace
+
+namespace chromeos_update_engine {
+
+namespace metrics {
+
+// The histograms are defined in:
+// depot/google3/analysis/uma/configs/clearcut/TRON/histograms.xml
+constexpr char kMetricsUpdateEngineAttemptNumber[] =
+ "ota_update_engine_attempt_number";
+constexpr char kMetricsUpdateEngineAttemptResult[] =
+ "ota_update_engine_attempt_result";
+constexpr char kMetricsUpdateEngineAttemptDurationInMinutes[] =
+ "ota_update_engine_attempt_duration_boottime_in_minutes";
+constexpr char kMetricsUpdateEngineAttemptDurationUptimeInMinutes[] =
+ "ota_update_engine_attempt_duration_monotonic_in_minutes";
+constexpr char kMetricsUpdateEngineAttemptErrorCode[] =
+ "ota_update_engine_attempt_error_code";
+constexpr char kMetricsUpdateEngineAttemptPayloadSizeMiB[] =
+ "ota_update_engine_attempt_payload_size_mib";
+constexpr char kMetricsUpdateEngineAttemptPayloadType[] =
+ "ota_update_engine_attempt_payload_type";
+constexpr char kMetricsUpdateEngineAttemptCurrentBytesDownloadedMiB[] =
+ "ota_update_engine_attempt_current_bytes_downloaded_mib";
+
+constexpr char kMetricsUpdateEngineSuccessfulUpdateAttemptCount[] =
+ "ota_update_engine_successful_update_attempt_count";
+constexpr char kMetricsUpdateEngineSuccessfulUpdateTotalDurationInMinutes[] =
+ "ota_update_engine_successful_update_total_duration_in_minutes";
+constexpr char kMetricsUpdateEngineSuccessfulUpdatePayloadSizeMiB[] =
+ "ota_update_engine_successful_update_payload_size_mib";
+constexpr char kMetricsUpdateEngineSuccessfulUpdatePayloadType[] =
+ "ota_update_engine_successful_update_payload_type";
+constexpr char kMetricsUpdateEngineSuccessfulUpdateRebootCount[] =
+ "ota_update_engine_successful_update_reboot_count";
+constexpr char kMetricsUpdateEngineSuccessfulUpdateTotalBytesDownloadedMiB[] =
+ "ota_update_engine_successful_update_total_bytes_downloaded_mib";
+constexpr char
+ kMetricsUpdateEngineSuccessfulUpdateDownloadOverheadPercentage[] =
+ "ota_update_engine_successful_update_download_overhead_percentage";
+
+std::unique_ptr<MetricsReporterInterface> CreateMetricsReporter() {
+ return std::make_unique<MetricsReporterAndroid>();
+}
+
+} // namespace metrics
+
+void MetricsReporterAndroid::ReportUpdateAttemptMetrics(
+ SystemState* /* system_state */,
+ int attempt_number,
+ PayloadType payload_type,
+ base::TimeDelta duration,
+ base::TimeDelta duration_uptime,
+ int64_t payload_size,
+ metrics::AttemptResult attempt_result,
+ ErrorCode error_code) {
+ LogHistogram(metrics::kMetricsUpdateEngineAttemptNumber, attempt_number);
+ LogHistogram(metrics::kMetricsUpdateEngineAttemptPayloadType,
+ static_cast<int>(payload_type));
+ LogHistogram(metrics::kMetricsUpdateEngineAttemptDurationInMinutes,
+ duration.InMinutes());
+ LogHistogram(metrics::kMetricsUpdateEngineAttemptDurationUptimeInMinutes,
+ duration_uptime.InMinutes());
+
+ int64_t payload_size_mib = payload_size / kNumBytesInOneMiB;
+ LogHistogram(metrics::kMetricsUpdateEngineAttemptPayloadSizeMiB,
+ payload_size_mib);
+
+ LogHistogram(metrics::kMetricsUpdateEngineAttemptResult,
+ static_cast<int>(attempt_result));
+ LogHistogram(metrics::kMetricsUpdateEngineAttemptErrorCode,
+ static_cast<int>(error_code));
+}
+
+void MetricsReporterAndroid::ReportUpdateAttemptDownloadMetrics(
+ int64_t payload_bytes_downloaded,
+ int64_t /* payload_download_speed_bps */,
+ DownloadSource /* download_source */,
+ metrics::DownloadErrorCode /* payload_download_error_code */,
+ metrics::ConnectionType /* connection_type */) {
+ LogHistogram(metrics::kMetricsUpdateEngineAttemptCurrentBytesDownloadedMiB,
+ payload_bytes_downloaded);
+}
+
+void MetricsReporterAndroid::ReportSuccessfulUpdateMetrics(
+ int attempt_count,
+ int /* updates_abandoned_count */,
+ PayloadType payload_type,
+ int64_t payload_size,
+ int64_t num_bytes_downloaded[kNumDownloadSources],
+ int download_overhead_percentage,
+ base::TimeDelta total_duration,
+ int reboot_count,
+ int /* url_switch_count */) {
+ LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdateAttemptCount,
+ attempt_count);
+ LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdatePayloadType,
+ static_cast<int>(payload_type));
+
+ int64_t payload_size_mib = payload_size / kNumBytesInOneMiB;
+ LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdatePayloadSizeMiB,
+ payload_size_mib);
+
+ int64_t total_bytes_downloaded = 0;
+ for (size_t i = 0; i < kNumDownloadSources; i++) {
+ total_bytes_downloaded += num_bytes_downloaded[i] / kNumBytesInOneMiB;
+ }
+ LogHistogram(
+ metrics::kMetricsUpdateEngineSuccessfulUpdateTotalBytesDownloadedMiB,
+ total_bytes_downloaded);
+ LogHistogram(
+ metrics::kMetricsUpdateEngineSuccessfulUpdateDownloadOverheadPercentage,
+ download_overhead_percentage);
+
+ LogHistogram(
+ metrics::kMetricsUpdateEngineSuccessfulUpdateTotalDurationInMinutes,
+ total_duration.InMinutes());
+ LogHistogram(metrics::kMetricsUpdateEngineSuccessfulUpdateRebootCount,
+ reboot_count);
+}
+
+void MetricsReporterAndroid::ReportAbnormallyTerminatedUpdateAttemptMetrics() {
+ int attempt_result =
+ static_cast<int>(metrics::AttemptResult::kAbnormalTermination);
+ LogHistogram(metrics::kMetricsUpdateEngineAttemptResult, attempt_result);
+}
+
+}; // namespace chromeos_update_engine
diff --git a/metrics_reporter_android.h b/metrics_reporter_android.h
new file mode 100644
index 0000000..ee94e43
--- /dev/null
+++ b/metrics_reporter_android.h
@@ -0,0 +1,88 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_METRICS_REPORTER_ANDROID_H_
+#define UPDATE_ENGINE_METRICS_REPORTER_ANDROID_H_
+
+#include "update_engine/common/error_code.h"
+#include "update_engine/metrics_constants.h"
+#include "update_engine/metrics_reporter_interface.h"
+
+namespace chromeos_update_engine {
+
+class MetricsReporterAndroid : public MetricsReporterInterface {
+ public:
+ MetricsReporterAndroid() = default;
+
+ ~MetricsReporterAndroid() override = default;
+
+ void Initialize() override {}
+
+ void ReportRollbackMetrics(metrics::RollbackResult result) override {}
+
+ void ReportDailyMetrics(base::TimeDelta os_age) override {}
+
+ void ReportUpdateCheckMetrics(
+ SystemState* system_state,
+ metrics::CheckResult result,
+ metrics::CheckReaction reaction,
+ metrics::DownloadErrorCode download_error_code) override {}
+
+ void ReportUpdateAttemptMetrics(SystemState* system_state,
+ int attempt_number,
+ PayloadType payload_type,
+ base::TimeDelta duration,
+ base::TimeDelta duration_uptime,
+ int64_t payload_size,
+ metrics::AttemptResult attempt_result,
+ ErrorCode internal_error_code) override;
+
+ void ReportUpdateAttemptDownloadMetrics(
+ int64_t payload_bytes_downloaded,
+ int64_t payload_download_speed_bps,
+ DownloadSource download_source,
+ metrics::DownloadErrorCode payload_download_error_code,
+ metrics::ConnectionType connection_type) override;
+
+ void ReportAbnormallyTerminatedUpdateAttemptMetrics() override;
+
+ void ReportSuccessfulUpdateMetrics(
+ int attempt_count,
+ int updates_abandoned_count,
+ PayloadType payload_type,
+ int64_t payload_size,
+ int64_t num_bytes_downloaded[kNumDownloadSources],
+ int download_overhead_percentage,
+ base::TimeDelta total_duration,
+ int reboot_count,
+ int url_switch_count) override;
+
+ void ReportCertificateCheckMetrics(ServerToCheck server_to_check,
+ CertificateCheckResult result) override {}
+
+ void ReportFailedUpdateCount(int target_attempt) override {}
+
+ void ReportTimeToReboot(int time_to_reboot_minutes) override {}
+
+ void ReportInstallDateProvisioningSource(int source, int max) override {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MetricsReporterAndroid);
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_METRICS_REPORTER_ANDROID_H_
diff --git a/metrics_reporter_interface.h b/metrics_reporter_interface.h
new file mode 100644
index 0000000..2c7ce5b
--- /dev/null
+++ b/metrics_reporter_interface.h
@@ -0,0 +1,200 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_METRICS_REPORTER_INTERFACE_H_
+#define UPDATE_ENGINE_METRICS_REPORTER_INTERFACE_H_
+
+#include <memory>
+
+#include <base/time/time.h>
+
+#include "update_engine/common/constants.h"
+#include "update_engine/common/error_code.h"
+#include "update_engine/metrics_constants.h"
+#include "update_engine/system_state.h"
+
+namespace chromeos_update_engine {
+
+enum class ServerToCheck;
+enum class CertificateCheckResult;
+
+namespace metrics {
+
+std::unique_ptr<MetricsReporterInterface> CreateMetricsReporter();
+
+} // namespace metrics
+
+class MetricsReporterInterface {
+ public:
+ virtual ~MetricsReporterInterface() = default;
+
+ virtual void Initialize() = 0;
+
+ // Helper function to report metrics related to rollback. The
+ // following metrics are reported:
+ //
+ // |kMetricRollbackResult|
+ virtual void ReportRollbackMetrics(metrics::RollbackResult result) = 0;
+
+ // Helper function to report metrics reported once a day. The
+ // following metrics are reported:
+ //
+ // |kMetricDailyOSAgeDays|
+ virtual void ReportDailyMetrics(base::TimeDelta os_age) = 0;
+
+ // Helper function to report metrics after completing an update check
+ // with the ChromeOS update server ("Omaha"). The following metrics
+ // are reported:
+ //
+ // |kMetricCheckResult|
+ // |kMetricCheckReaction|
+ // |kMetricCheckDownloadErrorCode|
+ // |kMetricCheckTimeSinceLastCheckMinutes|
+ // |kMetricCheckTimeSinceLastCheckUptimeMinutes|
+ //
+ // The |kMetricCheckResult| metric will only be reported if |result|
+ // is not |kUnset|.
+ //
+ // The |kMetricCheckReaction| metric will only be reported if
+ // |reaction| is not |kUnset|.
+ //
+ // The |kMetricCheckDownloadErrorCode| will only be reported if
+ // |download_error_code| is not |kUnset|.
+ //
+ // The values for the |kMetricCheckTimeSinceLastCheckMinutes| and
+ // |kMetricCheckTimeSinceLastCheckUptimeMinutes| metrics are
+ // automatically reported and calculated by maintaining persistent
+ // and process-local state variables.
+ virtual void ReportUpdateCheckMetrics(
+ SystemState* system_state,
+ metrics::CheckResult result,
+ metrics::CheckReaction reaction,
+ metrics::DownloadErrorCode download_error_code) = 0;
+
+ // Helper function to report metrics after the completion of each
+ // update attempt. The following metrics are reported:
+ //
+ // |kMetricAttemptNumber|
+ // |kMetricAttemptPayloadType|
+ // |kMetricAttemptPayloadSizeMiB|
+ // |kMetricAttemptDurationMinutes|
+ // |kMetricAttemptDurationUptimeMinutes|
+ // |kMetricAttemptTimeSinceLastAttemptMinutes|
+ // |kMetricAttemptTimeSinceLastAttemptUptimeMinutes|
+ // |kMetricAttemptResult|
+ // |kMetricAttemptInternalErrorCode|
+ //
+ // The |kMetricAttemptInternalErrorCode| metric will only be reported
+ // if |internal_error_code| is not |kErrorSuccess|.
+ //
+ // The |kMetricAttemptDownloadErrorCode| metric will only be
+ // reported if |payload_download_error_code| is not |kUnset|.
+ //
+ // The values for the |kMetricAttemptTimeSinceLastAttemptMinutes| and
+ // |kMetricAttemptTimeSinceLastAttemptUptimeMinutes| metrics are
+ // automatically calculated and reported by maintaining persistent and
+ // process-local state variables.
+ virtual void ReportUpdateAttemptMetrics(SystemState* system_state,
+ int attempt_number,
+ PayloadType payload_type,
+ base::TimeDelta duration,
+ base::TimeDelta duration_uptime,
+ int64_t payload_size,
+ metrics::AttemptResult attempt_result,
+ ErrorCode internal_error_code) = 0;
+
+ // Helper function to report download metrics after the completion of each
+ // update attempt. The following metrics are reported:
+ //
+ // |kMetricAttemptPayloadBytesDownloadedMiB|
+ // |kMetricAttemptPayloadDownloadSpeedKBps|
+ // |kMetricAttemptDownloadSource|
+ // |kMetricAttemptDownloadErrorCode|
+ // |kMetricAttemptConnectionType|
+ virtual void ReportUpdateAttemptDownloadMetrics(
+ int64_t payload_bytes_downloaded,
+ int64_t payload_download_speed_bps,
+ DownloadSource download_source,
+ metrics::DownloadErrorCode payload_download_error_code,
+ metrics::ConnectionType connection_type) = 0;
+
+ // Reports the |kAbnormalTermination| for the |kMetricAttemptResult|
+ // metric. No other metrics in the UpdateEngine.Attempt.* namespace
+ // will be reported.
+ virtual void ReportAbnormallyTerminatedUpdateAttemptMetrics() = 0;
+
+ // Helper function to report the after the completion of a successful
+ // update attempt. The following metrics are reported:
+ //
+ // |kMetricSuccessfulUpdateAttemptCount|
+ // |kMetricSuccessfulUpdateUpdatesAbandonedCount|
+ // |kMetricSuccessfulUpdatePayloadType|
+ // |kMetricSuccessfulUpdatePayloadSizeMiB|
+ // |kMetricSuccessfulUpdateBytesDownloadedMiBHttpsServer|
+ // |kMetricSuccessfulUpdateBytesDownloadedMiBHttpServer|
+ // |kMetricSuccessfulUpdateBytesDownloadedMiBHttpPeer|
+ // |kMetricSuccessfulUpdateBytesDownloadedMiB|
+ // |kMetricSuccessfulUpdateDownloadSourcesUsed|
+ // |kMetricSuccessfulUpdateDownloadOverheadPercentage|
+ // |kMetricSuccessfulUpdateTotalDurationMinutes|
+ // |kMetricSuccessfulUpdateRebootCount|
+ // |kMetricSuccessfulUpdateUrlSwitchCount|
+ //
+ // The values for the |kMetricSuccessfulUpdateDownloadSourcesUsed| are
+ // |kMetricSuccessfulUpdateBytesDownloadedMiB| metrics automatically
+ // calculated from examining the |num_bytes_downloaded| array.
+ virtual void ReportSuccessfulUpdateMetrics(
+ int attempt_count,
+ int updates_abandoned_count,
+ PayloadType payload_type,
+ int64_t payload_size,
+ int64_t num_bytes_downloaded[kNumDownloadSources],
+ int download_overhead_percentage,
+ base::TimeDelta total_duration,
+ int reboot_count,
+ int url_switch_count) = 0;
+
+ // Helper function to report the after the completion of a SSL certificate
+ // check. One of the following metrics is reported:
+ //
+ // |kMetricCertificateCheckUpdateCheck|
+ // |kMetricCertificateCheckDownload|
+ virtual void ReportCertificateCheckMetrics(ServerToCheck server_to_check,
+ CertificateCheckResult result) = 0;
+
+ // Helper function to report the number failed update attempts. The following
+ // metrics are reported:
+ //
+ // |kMetricFailedUpdateCount|
+ virtual void ReportFailedUpdateCount(int target_attempt) = 0;
+
+ // Helper function to report the time interval in minutes between a
+ // successful update and the reboot into the updated system. The following
+ // metrics are reported:
+ //
+ // |kMetricTimeToRebootMinutes|
+ virtual void ReportTimeToReboot(int time_to_reboot_minutes) = 0;
+
+ // Helper function to report the source of installation data. The following
+ // metrics are reported:
+ //
+ // |kMetricInstallDateProvisioningSource|
+ virtual void ReportInstallDateProvisioningSource(int source, int max) = 0;
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_METRICS_REPORTER_INTERFACE_H_
diff --git a/metrics_reporter_omaha.cc b/metrics_reporter_omaha.cc
new file mode 100644
index 0000000..0397b83
--- /dev/null
+++ b/metrics_reporter_omaha.cc
@@ -0,0 +1,538 @@
+//
+// Copyright (C) 2014 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/metrics_reporter_omaha.h"
+
+#include <memory>
+#include <string>
+
+#include <base/logging.h>
+#include <metrics/metrics_library.h>
+
+#include "update_engine/common/clock_interface.h"
+#include "update_engine/common/constants.h"
+#include "update_engine/common/prefs_interface.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/metrics_utils.h"
+#include "update_engine/system_state.h"
+
+using std::string;
+
+namespace chromeos_update_engine {
+
+namespace metrics {
+
+// UpdateEngine.Daily.* metrics.
+const char kMetricDailyOSAgeDays[] = "UpdateEngine.Daily.OSAgeDays";
+
+// UpdateEngine.Check.* metrics.
+const char kMetricCheckDownloadErrorCode[] =
+ "UpdateEngine.Check.DownloadErrorCode";
+const char kMetricCheckReaction[] = "UpdateEngine.Check.Reaction";
+const char kMetricCheckResult[] = "UpdateEngine.Check.Result";
+const char kMetricCheckTimeSinceLastCheckMinutes[] =
+ "UpdateEngine.Check.TimeSinceLastCheckMinutes";
+const char kMetricCheckTimeSinceLastCheckUptimeMinutes[] =
+ "UpdateEngine.Check.TimeSinceLastCheckUptimeMinutes";
+
+// UpdateEngine.Attempt.* metrics.
+const char kMetricAttemptNumber[] = "UpdateEngine.Attempt.Number";
+const char kMetricAttemptPayloadType[] = "UpdateEngine.Attempt.PayloadType";
+const char kMetricAttemptPayloadSizeMiB[] =
+ "UpdateEngine.Attempt.PayloadSizeMiB";
+const char kMetricAttemptConnectionType[] =
+ "UpdateEngine.Attempt.ConnectionType";
+const char kMetricAttemptDurationMinutes[] =
+ "UpdateEngine.Attempt.DurationMinutes";
+const char kMetricAttemptDurationUptimeMinutes[] =
+ "UpdateEngine.Attempt.DurationUptimeMinutes";
+const char kMetricAttemptTimeSinceLastAttemptMinutes[] =
+ "UpdateEngine.Attempt.TimeSinceLastAttemptMinutes";
+const char kMetricAttemptTimeSinceLastAttemptUptimeMinutes[] =
+ "UpdateEngine.Attempt.TimeSinceLastAttemptUptimeMinutes";
+const char kMetricAttemptPayloadBytesDownloadedMiB[] =
+ "UpdateEngine.Attempt.PayloadBytesDownloadedMiB";
+const char kMetricAttemptPayloadDownloadSpeedKBps[] =
+ "UpdateEngine.Attempt.PayloadDownloadSpeedKBps";
+const char kMetricAttemptDownloadSource[] =
+ "UpdateEngine.Attempt.DownloadSource";
+const char kMetricAttemptResult[] = "UpdateEngine.Attempt.Result";
+const char kMetricAttemptInternalErrorCode[] =
+ "UpdateEngine.Attempt.InternalErrorCode";
+const char kMetricAttemptDownloadErrorCode[] =
+ "UpdateEngine.Attempt.DownloadErrorCode";
+
+// UpdateEngine.SuccessfulUpdate.* metrics.
+const char kMetricSuccessfulUpdateAttemptCount[] =
+ "UpdateEngine.SuccessfulUpdate.AttemptCount";
+const char kMetricSuccessfulUpdateBytesDownloadedMiB[] =
+ "UpdateEngine.SuccessfulUpdate.BytesDownloadedMiB";
+const char kMetricSuccessfulUpdateDownloadOverheadPercentage[] =
+ "UpdateEngine.SuccessfulUpdate.DownloadOverheadPercentage";
+const char kMetricSuccessfulUpdateDownloadSourcesUsed[] =
+ "UpdateEngine.SuccessfulUpdate.DownloadSourcesUsed";
+const char kMetricSuccessfulUpdatePayloadType[] =
+ "UpdateEngine.SuccessfulUpdate.PayloadType";
+const char kMetricSuccessfulUpdatePayloadSizeMiB[] =
+ "UpdateEngine.SuccessfulUpdate.PayloadSizeMiB";
+const char kMetricSuccessfulUpdateRebootCount[] =
+ "UpdateEngine.SuccessfulUpdate.RebootCount";
+const char kMetricSuccessfulUpdateTotalDurationMinutes[] =
+ "UpdateEngine.SuccessfulUpdate.TotalDurationMinutes";
+const char kMetricSuccessfulUpdateUpdatesAbandonedCount[] =
+ "UpdateEngine.SuccessfulUpdate.UpdatesAbandonedCount";
+const char kMetricSuccessfulUpdateUrlSwitchCount[] =
+ "UpdateEngine.SuccessfulUpdate.UrlSwitchCount";
+
+// UpdateEngine.Rollback.* metric.
+const char kMetricRollbackResult[] = "UpdateEngine.Rollback.Result";
+
+// UpdateEngine.CertificateCheck.* metrics.
+const char kMetricCertificateCheckUpdateCheck[] =
+ "UpdateEngine.CertificateCheck.UpdateCheck";
+const char kMetricCertificateCheckDownload[] =
+ "UpdateEngine.CertificateCheck.Download";
+
+// UpdateEngine.* metrics.
+const char kMetricFailedUpdateCount[] = "UpdateEngine.FailedUpdateCount";
+const char kMetricInstallDateProvisioningSource[] =
+ "UpdateEngine.InstallDateProvisioningSource";
+const char kMetricTimeToRebootMinutes[] = "UpdateEngine.TimeToRebootMinutes";
+
+std::unique_ptr<MetricsReporterInterface> CreateMetricsReporter() {
+ return std::make_unique<MetricsReporterOmaha>();
+}
+
+} // namespace metrics
+
+MetricsReporterOmaha::MetricsReporterOmaha()
+ : metrics_lib_(new MetricsLibrary()) {}
+
+void MetricsReporterOmaha::Initialize() {
+ metrics_lib_->Init();
+}
+
+void MetricsReporterOmaha::ReportDailyMetrics(base::TimeDelta os_age) {
+ string metric = metrics::kMetricDailyOSAgeDays;
+ LOG(INFO) << "Uploading " << utils::FormatTimeDelta(os_age) << " for metric "
+ << metric;
+ metrics_lib_->SendToUMA(metric,
+ static_cast<int>(os_age.InDays()),
+ 0, // min: 0 days
+ 6 * 30, // max: 6 months (approx)
+ 50); // num_buckets
+}
+
+void MetricsReporterOmaha::ReportUpdateCheckMetrics(
+ SystemState* system_state,
+ metrics::CheckResult result,
+ metrics::CheckReaction reaction,
+ metrics::DownloadErrorCode download_error_code) {
+ string metric;
+ int value;
+ int max_value;
+
+ if (result != metrics::CheckResult::kUnset) {
+ metric = metrics::kMetricCheckResult;
+ value = static_cast<int>(result);
+ max_value = static_cast<int>(metrics::CheckResult::kNumConstants) - 1;
+ LOG(INFO) << "Sending " << value << " for metric " << metric << " (enum)";
+ metrics_lib_->SendEnumToUMA(metric, value, max_value);
+ }
+ if (reaction != metrics::CheckReaction::kUnset) {
+ metric = metrics::kMetricCheckReaction;
+ value = static_cast<int>(reaction);
+ max_value = static_cast<int>(metrics::CheckReaction::kNumConstants) - 1;
+ LOG(INFO) << "Sending " << value << " for metric " << metric << " (enum)";
+ metrics_lib_->SendEnumToUMA(metric, value, max_value);
+ }
+ if (download_error_code != metrics::DownloadErrorCode::kUnset) {
+ metric = metrics::kMetricCheckDownloadErrorCode;
+ value = static_cast<int>(download_error_code);
+ LOG(INFO) << "Sending " << value << " for metric " << metric << " (sparse)";
+ metrics_lib_->SendSparseToUMA(metric, value);
+ }
+
+ base::TimeDelta time_since_last;
+ if (metrics_utils::WallclockDurationHelper(
+ system_state,
+ kPrefsMetricsCheckLastReportingTime,
+ &time_since_last)) {
+ metric = metrics::kMetricCheckTimeSinceLastCheckMinutes;
+ LOG(INFO) << "Sending " << utils::FormatTimeDelta(time_since_last)
+ << " for metric " << metric;
+ metrics_lib_->SendToUMA(metric,
+ time_since_last.InMinutes(),
+ 0, // min: 0 min
+ 30 * 24 * 60, // max: 30 days
+ 50); // num_buckets
+ }
+
+ base::TimeDelta uptime_since_last;
+ static int64_t uptime_since_last_storage = 0;
+ if (metrics_utils::MonotonicDurationHelper(
+ system_state, &uptime_since_last_storage, &uptime_since_last)) {
+ metric = metrics::kMetricCheckTimeSinceLastCheckUptimeMinutes;
+ LOG(INFO) << "Sending " << utils::FormatTimeDelta(uptime_since_last)
+ << " for metric " << metric;
+ metrics_lib_->SendToUMA(metric,
+ uptime_since_last.InMinutes(),
+ 0, // min: 0 min
+ 30 * 24 * 60, // max: 30 days
+ 50); // num_buckets
+ }
+}
+
+void MetricsReporterOmaha::ReportAbnormallyTerminatedUpdateAttemptMetrics() {
+ string metric = metrics::kMetricAttemptResult;
+ metrics::AttemptResult attempt_result =
+ metrics::AttemptResult::kAbnormalTermination;
+
+ LOG(INFO) << "Uploading " << static_cast<int>(attempt_result)
+ << " for metric " << metric;
+ metrics_lib_->SendEnumToUMA(
+ metric,
+ static_cast<int>(attempt_result),
+ static_cast<int>(metrics::AttemptResult::kNumConstants));
+}
+
+void MetricsReporterOmaha::ReportUpdateAttemptMetrics(
+ SystemState* system_state,
+ int attempt_number,
+ PayloadType payload_type,
+ base::TimeDelta duration,
+ base::TimeDelta duration_uptime,
+ int64_t payload_size,
+ metrics::AttemptResult attempt_result,
+ ErrorCode internal_error_code) {
+ string metric = metrics::kMetricAttemptNumber;
+ LOG(INFO) << "Uploading " << attempt_number << " for metric " << metric;
+ metrics_lib_->SendToUMA(metric,
+ attempt_number,
+ 0, // min: 0 attempts
+ 49, // max: 49 attempts
+ 50); // num_buckets
+
+ metric = metrics::kMetricAttemptPayloadType;
+ LOG(INFO) << "Uploading " << utils::ToString(payload_type) << " for metric "
+ << metric;
+ metrics_lib_->SendEnumToUMA(metric, payload_type, kNumPayloadTypes);
+
+ metric = metrics::kMetricAttemptDurationMinutes;
+ LOG(INFO) << "Uploading " << utils::FormatTimeDelta(duration)
+ << " for metric " << metric;
+ metrics_lib_->SendToUMA(metric,
+ duration.InMinutes(),
+ 0, // min: 0 min
+ 10 * 24 * 60, // max: 10 days
+ 50); // num_buckets
+
+ metric = metrics::kMetricAttemptDurationUptimeMinutes;
+ LOG(INFO) << "Uploading " << utils::FormatTimeDelta(duration_uptime)
+ << " for metric " << metric;
+ metrics_lib_->SendToUMA(metric,
+ duration_uptime.InMinutes(),
+ 0, // min: 0 min
+ 10 * 24 * 60, // max: 10 days
+ 50); // num_buckets
+
+ metric = metrics::kMetricAttemptPayloadSizeMiB;
+ int64_t payload_size_mib = payload_size / kNumBytesInOneMiB;
+ LOG(INFO) << "Uploading " << payload_size_mib << " for metric " << metric;
+ metrics_lib_->SendToUMA(metric,
+ payload_size_mib,
+ 0, // min: 0 MiB
+ 1024, // max: 1024 MiB = 1 GiB
+ 50); // num_buckets
+
+
+
+ metric = metrics::kMetricAttemptResult;
+ LOG(INFO) << "Uploading " << static_cast<int>(attempt_result)
+ << " for metric " << metric;
+ metrics_lib_->SendEnumToUMA(
+ metric,
+ static_cast<int>(attempt_result),
+ static_cast<int>(metrics::AttemptResult::kNumConstants));
+
+ if (internal_error_code != ErrorCode::kSuccess) {
+ metric = metrics::kMetricAttemptInternalErrorCode;
+ LOG(INFO) << "Uploading " << internal_error_code << " for metric "
+ << metric;
+ metrics_lib_->SendEnumToUMA(metric,
+ static_cast<int>(internal_error_code),
+ static_cast<int>(ErrorCode::kUmaReportedMax));
+ }
+
+ base::TimeDelta time_since_last;
+ if (metrics_utils::WallclockDurationHelper(
+ system_state,
+ kPrefsMetricsAttemptLastReportingTime,
+ &time_since_last)) {
+ metric = metrics::kMetricAttemptTimeSinceLastAttemptMinutes;
+ LOG(INFO) << "Sending " << utils::FormatTimeDelta(time_since_last)
+ << " for metric " << metric;
+ metrics_lib_->SendToUMA(metric,
+ time_since_last.InMinutes(),
+ 0, // min: 0 min
+ 30 * 24 * 60, // max: 30 days
+ 50); // num_buckets
+ }
+
+ static int64_t uptime_since_last_storage = 0;
+ base::TimeDelta uptime_since_last;
+ if (metrics_utils::MonotonicDurationHelper(
+ system_state, &uptime_since_last_storage, &uptime_since_last)) {
+ metric = metrics::kMetricAttemptTimeSinceLastAttemptUptimeMinutes;
+ LOG(INFO) << "Sending " << utils::FormatTimeDelta(uptime_since_last)
+ << " for metric " << metric;
+ metrics_lib_->SendToUMA(metric,
+ uptime_since_last.InMinutes(),
+ 0, // min: 0 min
+ 30 * 24 * 60, // max: 30 days
+ 50); // num_buckets
+ }
+}
+
+void MetricsReporterOmaha::ReportUpdateAttemptDownloadMetrics(
+ int64_t payload_bytes_downloaded,
+ int64_t payload_download_speed_bps,
+ DownloadSource download_source,
+ metrics::DownloadErrorCode payload_download_error_code,
+ metrics::ConnectionType connection_type) {
+ string metric = metrics::kMetricAttemptPayloadBytesDownloadedMiB;
+ int64_t payload_bytes_downloaded_mib =
+ payload_bytes_downloaded / kNumBytesInOneMiB;
+ LOG(INFO) << "Uploading " << payload_bytes_downloaded_mib << " for metric "
+ << metric;
+ metrics_lib_->SendToUMA(metric,
+ payload_bytes_downloaded_mib,
+ 0, // min: 0 MiB
+ 1024, // max: 1024 MiB = 1 GiB
+ 50); // num_buckets
+
+ metric = metrics::kMetricAttemptPayloadDownloadSpeedKBps;
+ int64_t payload_download_speed_kbps = payload_download_speed_bps / 1000;
+ LOG(INFO) << "Uploading " << payload_download_speed_kbps << " for metric "
+ << metric;
+ metrics_lib_->SendToUMA(metric,
+ payload_download_speed_kbps,
+ 0, // min: 0 kB/s
+ 10 * 1000, // max: 10000 kB/s = 10 MB/s
+ 50); // num_buckets
+
+ metric = metrics::kMetricAttemptDownloadSource;
+ LOG(INFO) << "Uploading " << download_source << " for metric " << metric;
+ metrics_lib_->SendEnumToUMA(metric, download_source, kNumDownloadSources);
+
+ if (payload_download_error_code != metrics::DownloadErrorCode::kUnset) {
+ metric = metrics::kMetricAttemptDownloadErrorCode;
+ LOG(INFO) << "Uploading " << static_cast<int>(payload_download_error_code)
+ << " for metric " << metric << " (sparse)";
+ metrics_lib_->SendSparseToUMA(
+ metric, static_cast<int>(payload_download_error_code));
+ }
+
+ metric = metrics::kMetricAttemptConnectionType;
+ LOG(INFO) << "Uploading " << static_cast<int>(connection_type)
+ << " for metric " << metric;
+ metrics_lib_->SendEnumToUMA(
+ metric,
+ static_cast<int>(connection_type),
+ static_cast<int>(metrics::ConnectionType::kNumConstants));
+}
+
+void MetricsReporterOmaha::ReportSuccessfulUpdateMetrics(
+ int attempt_count,
+ int updates_abandoned_count,
+ PayloadType payload_type,
+ int64_t payload_size,
+ int64_t num_bytes_downloaded[kNumDownloadSources],
+ int download_overhead_percentage,
+ base::TimeDelta total_duration,
+ int reboot_count,
+ int url_switch_count) {
+ string metric = metrics::kMetricSuccessfulUpdatePayloadSizeMiB;
+ int64_t mbs = payload_size / kNumBytesInOneMiB;
+ LOG(INFO) << "Uploading " << mbs << " (MiBs) for metric " << metric;
+ metrics_lib_->SendToUMA(metric,
+ mbs,
+ 0, // min: 0 MiB
+ 1024, // max: 1024 MiB = 1 GiB
+ 50); // num_buckets
+
+ int64_t total_bytes = 0;
+ int download_sources_used = 0;
+ for (int i = 0; i < kNumDownloadSources + 1; i++) {
+ DownloadSource source = static_cast<DownloadSource>(i);
+
+ // Only consider this download source (and send byte counts) as
+ // having been used if we downloaded a non-trivial amount of bytes
+ // (e.g. at least 1 MiB) that contributed to the
+ // update. Otherwise we're going to end up with a lot of zero-byte
+ // events in the histogram.
+
+ metric = metrics::kMetricSuccessfulUpdateBytesDownloadedMiB;
+ if (i < kNumDownloadSources) {
+ metric += utils::ToString(source);
+ mbs = num_bytes_downloaded[i] / kNumBytesInOneMiB;
+ total_bytes += num_bytes_downloaded[i];
+ if (mbs > 0)
+ download_sources_used |= (1 << i);
+ } else {
+ mbs = total_bytes / kNumBytesInOneMiB;
+ }
+
+ if (mbs > 0) {
+ LOG(INFO) << "Uploading " << mbs << " (MiBs) for metric " << metric;
+ metrics_lib_->SendToUMA(metric,
+ mbs,
+ 0, // min: 0 MiB
+ 1024, // max: 1024 MiB = 1 GiB
+ 50); // num_buckets
+ }
+ }
+
+ metric = metrics::kMetricSuccessfulUpdateDownloadSourcesUsed;
+ LOG(INFO) << "Uploading 0x" << std::hex << download_sources_used
+ << " (bit flags) for metric " << metric;
+ metrics_lib_->SendToUMA(metric,
+ download_sources_used,
+ 0, // min
+ (1 << kNumDownloadSources) - 1, // max
+ 1 << kNumDownloadSources); // num_buckets
+
+ metric = metrics::kMetricSuccessfulUpdateDownloadOverheadPercentage;
+ LOG(INFO) << "Uploading " << download_overhead_percentage << "% for metric "
+ << metric;
+ metrics_lib_->SendToUMA(metric,
+ download_overhead_percentage,
+ 0, // min: 0% overhead
+ 1000, // max: 1000% overhead
+ 50); // num_buckets
+
+ metric = metrics::kMetricSuccessfulUpdateUrlSwitchCount;
+ LOG(INFO) << "Uploading " << url_switch_count << " (count) for metric "
+ << metric;
+ metrics_lib_->SendToUMA(metric,
+ url_switch_count,
+ 0, // min: 0 URL switches
+ 49, // max: 49 URL switches
+ 50); // num_buckets
+
+ metric = metrics::kMetricSuccessfulUpdateTotalDurationMinutes;
+ LOG(INFO) << "Uploading " << utils::FormatTimeDelta(total_duration)
+ << " for metric " << metric;
+ metrics_lib_->SendToUMA(metric,
+ static_cast<int>(total_duration.InMinutes()),
+ 0, // min: 0 min
+ 365 * 24 * 60, // max: 365 days ~= 1 year
+ 50); // num_buckets
+
+ metric = metrics::kMetricSuccessfulUpdateRebootCount;
+ LOG(INFO) << "Uploading reboot count of " << reboot_count << " for metric "
+ << metric;
+ metrics_lib_->SendToUMA(metric,
+ reboot_count,
+ 0, // min: 0 reboots
+ 49, // max: 49 reboots
+ 50); // num_buckets
+
+ metric = metrics::kMetricSuccessfulUpdatePayloadType;
+ metrics_lib_->SendEnumToUMA(metric, payload_type, kNumPayloadTypes);
+ LOG(INFO) << "Uploading " << utils::ToString(payload_type) << " for metric "
+ << metric;
+
+ metric = metrics::kMetricSuccessfulUpdateAttemptCount;
+ metrics_lib_->SendToUMA(metric,
+ attempt_count,
+ 1, // min: 1 attempt
+ 50, // max: 50 attempts
+ 50); // num_buckets
+ LOG(INFO) << "Uploading " << attempt_count << " for metric " << metric;
+
+ metric = metrics::kMetricSuccessfulUpdateUpdatesAbandonedCount;
+ LOG(INFO) << "Uploading " << updates_abandoned_count << " (count) for metric "
+ << metric;
+ metrics_lib_->SendToUMA(metric,
+ updates_abandoned_count,
+ 0, // min: 0 counts
+ 49, // max: 49 counts
+ 50); // num_buckets
+}
+
+void MetricsReporterOmaha::ReportRollbackMetrics(
+ metrics::RollbackResult result) {
+ string metric = metrics::kMetricRollbackResult;
+ int value = static_cast<int>(result);
+ LOG(INFO) << "Sending " << value << " for metric " << metric << " (enum)";
+ metrics_lib_->SendEnumToUMA(
+ metric, value, static_cast<int>(metrics::RollbackResult::kNumConstants));
+}
+
+void MetricsReporterOmaha::ReportCertificateCheckMetrics(
+ ServerToCheck server_to_check, CertificateCheckResult result) {
+ string metric;
+ switch (server_to_check) {
+ case ServerToCheck::kUpdate:
+ metric = metrics::kMetricCertificateCheckUpdateCheck;
+ break;
+ case ServerToCheck::kDownload:
+ metric = metrics::kMetricCertificateCheckDownload;
+ break;
+ case ServerToCheck::kNone:
+ return;
+ }
+ LOG(INFO) << "Uploading " << static_cast<int>(result) << " for metric "
+ << metric;
+ metrics_lib_->SendEnumToUMA(
+ metric,
+ static_cast<int>(result),
+ static_cast<int>(CertificateCheckResult::kNumConstants));
+}
+
+void MetricsReporterOmaha::ReportFailedUpdateCount(int target_attempt) {
+ string metric = metrics::kMetricFailedUpdateCount;
+ metrics_lib_->SendToUMA(metric,
+ target_attempt,
+ 1, // min value
+ 50, // max value
+ kNumDefaultUmaBuckets);
+
+ LOG(INFO) << "Uploading " << target_attempt << " (count) for metric "
+ << metric;
+}
+
+void MetricsReporterOmaha::ReportTimeToReboot(int time_to_reboot_minutes) {
+ string metric = metrics::kMetricTimeToRebootMinutes;
+ metrics_lib_->SendToUMA(metric,
+ time_to_reboot_minutes,
+ 0, // min: 0 minute
+ 30 * 24 * 60, // max: 1 month (approx)
+ kNumDefaultUmaBuckets);
+
+ LOG(INFO) << "Uploading " << time_to_reboot_minutes << " for metric "
+ << metric;
+}
+
+void MetricsReporterOmaha::ReportInstallDateProvisioningSource(int source,
+ int max) {
+ metrics_lib_->SendEnumToUMA(metrics::kMetricInstallDateProvisioningSource,
+ source, // Sample.
+ max);
+}
+
+} // namespace chromeos_update_engine
diff --git a/metrics_reporter_omaha.h b/metrics_reporter_omaha.h
new file mode 100644
index 0000000..c19fe86
--- /dev/null
+++ b/metrics_reporter_omaha.h
@@ -0,0 +1,156 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_METRICS_REPORTER_OMAHA_H_
+#define UPDATE_ENGINE_METRICS_REPORTER_OMAHA_H_
+
+#include <memory>
+
+#include <base/time/time.h>
+#include <metrics/metrics_library.h>
+
+#include "update_engine/certificate_checker.h"
+#include "update_engine/common/constants.h"
+#include "update_engine/common/error_code.h"
+#include "update_engine/metrics_constants.h"
+#include "update_engine/metrics_reporter_interface.h"
+#include "update_engine/system_state.h"
+
+namespace chromeos_update_engine {
+
+class SystemState;
+
+namespace metrics {
+
+// UpdateEngine.Daily.* metrics.
+extern const char kMetricDailyOSAgeDays[];
+
+// UpdateEngine.Check.* metrics.
+extern const char kMetricCheckDownloadErrorCode[];
+extern const char kMetricCheckReaction[];
+extern const char kMetricCheckResult[];
+extern const char kMetricCheckTimeSinceLastCheckMinutes[];
+extern const char kMetricCheckTimeSinceLastCheckUptimeMinutes[];
+
+// UpdateEngine.Attempt.* metrics.
+extern const char kMetricAttemptNumber[];
+extern const char kMetricAttemptPayloadType[];
+extern const char kMetricAttemptPayloadSizeMiB[];
+extern const char kMetricAttemptConnectionType[];
+extern const char kMetricAttemptDurationMinutes[];
+extern const char kMetricAttemptDurationUptimeMinutes[];
+extern const char kMetricAttemptTimeSinceLastAttemptMinutes[];
+extern const char kMetricAttemptTimeSinceLastAttemptUptimeMinutes[];
+extern const char kMetricAttemptPayloadBytesDownloadedMiB[];
+extern const char kMetricAttemptPayloadDownloadSpeedKBps[];
+extern const char kMetricAttemptDownloadSource[];
+extern const char kMetricAttemptResult[];
+extern const char kMetricAttemptInternalErrorCode[];
+extern const char kMetricAttemptDownloadErrorCode[];
+
+// UpdateEngine.SuccessfulUpdate.* metrics.
+extern const char kMetricSuccessfulUpdateAttemptCount[];
+extern const char kMetricSuccessfulUpdateBytesDownloadedMiB[];
+extern const char kMetricSuccessfulUpdateDownloadOverheadPercentage[];
+extern const char kMetricSuccessfulUpdateDownloadSourcesUsed[];
+extern const char kMetricSuccessfulUpdatePayloadType[];
+extern const char kMetricSuccessfulUpdatePayloadSizeMiB[];
+extern const char kMetricSuccessfulUpdateRebootCount[];
+extern const char kMetricSuccessfulUpdateTotalDurationMinutes[];
+extern const char kMetricSuccessfulUpdateUpdatesAbandonedCount[];
+extern const char kMetricSuccessfulUpdateUrlSwitchCount[];
+
+// UpdateEngine.Rollback.* metric.
+extern const char kMetricRollbackResult[];
+
+// UpdateEngine.CertificateCheck.* metrics.
+extern const char kMetricCertificateCheckUpdateCheck[];
+extern const char kMetricCertificateCheckDownload[];
+
+// UpdateEngine.* metrics.
+extern const char kMetricFailedUpdateCount[];
+extern const char kMetricInstallDateProvisioningSource[];
+extern const char kMetricTimeToRebootMinutes[];
+
+} // namespace metrics
+
+class MetricsReporterOmaha : public MetricsReporterInterface {
+ public:
+ MetricsReporterOmaha();
+
+ ~MetricsReporterOmaha() override = default;
+
+ void Initialize() override;
+
+ void ReportRollbackMetrics(metrics::RollbackResult result) override;
+
+ void ReportDailyMetrics(base::TimeDelta os_age) override;
+
+ void ReportUpdateCheckMetrics(
+ SystemState* system_state,
+ metrics::CheckResult result,
+ metrics::CheckReaction reaction,
+ metrics::DownloadErrorCode download_error_code) override;
+
+ void ReportUpdateAttemptMetrics(SystemState* system_state,
+ int attempt_number,
+ PayloadType payload_type,
+ base::TimeDelta duration,
+ base::TimeDelta duration_uptime,
+ int64_t payload_size,
+ metrics::AttemptResult attempt_result,
+ ErrorCode internal_error_code) override;
+
+ void ReportUpdateAttemptDownloadMetrics(
+ int64_t payload_bytes_downloaded,
+ int64_t payload_download_speed_bps,
+ DownloadSource download_source,
+ metrics::DownloadErrorCode payload_download_error_code,
+ metrics::ConnectionType connection_type) override;
+
+ void ReportAbnormallyTerminatedUpdateAttemptMetrics() override;
+
+ void ReportSuccessfulUpdateMetrics(
+ int attempt_count,
+ int updates_abandoned_count,
+ PayloadType payload_type,
+ int64_t payload_size,
+ int64_t num_bytes_downloaded[kNumDownloadSources],
+ int download_overhead_percentage,
+ base::TimeDelta total_duration,
+ int reboot_count,
+ int url_switch_count) override;
+
+ void ReportCertificateCheckMetrics(ServerToCheck server_to_check,
+ CertificateCheckResult result) override;
+
+ void ReportFailedUpdateCount(int target_attempt) override;
+
+ void ReportTimeToReboot(int time_to_reboot_minutes) override;
+
+ void ReportInstallDateProvisioningSource(int source, int max) override;
+
+ private:
+ friend class MetricsReporterOmahaTest;
+
+ std::unique_ptr<MetricsLibraryInterface> metrics_lib_;
+
+ DISALLOW_COPY_AND_ASSIGN(MetricsReporterOmaha);
+}; // class metrics
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_METRICS_REPORTER_OMAHA_H_
diff --git a/metrics_reporter_omaha_unittest.cc b/metrics_reporter_omaha_unittest.cc
new file mode 100644
index 0000000..76e33c6
--- /dev/null
+++ b/metrics_reporter_omaha_unittest.cc
@@ -0,0 +1,394 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/metrics_reporter_omaha.h"
+
+#include <memory>
+#include <string>
+
+#include <base/time/time.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <metrics/metrics_library_mock.h>
+
+#include "update_engine/common/fake_clock.h"
+#include "update_engine/common/fake_prefs.h"
+#include "update_engine/fake_system_state.h"
+
+using base::TimeDelta;
+using testing::AnyNumber;
+using testing::_;
+
+namespace chromeos_update_engine {
+class MetricsReporterOmahaTest : public ::testing::Test {
+ protected:
+ MetricsReporterOmahaTest() = default;
+
+ // Reset the metrics_lib_ to a mock library.
+ void SetUp() override {
+ mock_metrics_lib_ = new testing::NiceMock<MetricsLibraryMock>();
+ reporter_.metrics_lib_.reset(mock_metrics_lib_);
+ }
+
+ testing::NiceMock<MetricsLibraryMock>* mock_metrics_lib_;
+ MetricsReporterOmaha reporter_;
+};
+
+TEST_F(MetricsReporterOmahaTest, ReportDailyMetrics) {
+ TimeDelta age = TimeDelta::FromDays(10);
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendToUMA(metrics::kMetricDailyOSAgeDays, _, _, _, _))
+ .Times(1);
+
+ reporter_.ReportDailyMetrics(age);
+}
+
+TEST_F(MetricsReporterOmahaTest, ReportUpdateCheckMetrics) {
+ FakeSystemState fake_system_state;
+ FakeClock fake_clock;
+ FakePrefs fake_prefs;
+
+ // We need to execute the report twice to test the time since last report.
+ fake_system_state.set_clock(&fake_clock);
+ fake_system_state.set_prefs(&fake_prefs);
+ fake_clock.SetWallclockTime(base::Time::FromInternalValue(1000000));
+ fake_clock.SetMonotonicTime(base::Time::FromInternalValue(1000000));
+
+ metrics::CheckResult result = metrics::CheckResult::kUpdateAvailable;
+ metrics::CheckReaction reaction = metrics::CheckReaction::kIgnored;
+ metrics::DownloadErrorCode error_code =
+ metrics::DownloadErrorCode::kHttpStatus200;
+
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendEnumToUMA(metrics::kMetricCheckResult, static_cast<int>(result), _))
+ .Times(2);
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendEnumToUMA(
+ metrics::kMetricCheckReaction, static_cast<int>(reaction), _))
+ .Times(2);
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendSparseToUMA(metrics::kMetricCheckDownloadErrorCode,
+ static_cast<int>(error_code)))
+ .Times(2);
+
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendToUMA(metrics::kMetricCheckTimeSinceLastCheckMinutes, 1, _, _, _))
+ .Times(1);
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendToUMA(
+ metrics::kMetricCheckTimeSinceLastCheckUptimeMinutes, 1, _, _, _))
+ .Times(1);
+
+ reporter_.ReportUpdateCheckMetrics(
+ &fake_system_state, result, reaction, error_code);
+
+ // Advance the clock by 1 minute and report the same metrics again.
+ fake_clock.SetWallclockTime(base::Time::FromInternalValue(61000000));
+ fake_clock.SetMonotonicTime(base::Time::FromInternalValue(61000000));
+ reporter_.ReportUpdateCheckMetrics(
+ &fake_system_state, result, reaction, error_code);
+}
+
+TEST_F(MetricsReporterOmahaTest,
+ ReportAbnormallyTerminatedUpdateAttemptMetrics) {
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendEnumToUMA(metrics::kMetricAttemptResult,
+ static_cast<int>(
+ metrics::AttemptResult::kAbnormalTermination),
+ _))
+ .Times(1);
+
+ reporter_.ReportAbnormallyTerminatedUpdateAttemptMetrics();
+}
+
+TEST_F(MetricsReporterOmahaTest, ReportUpdateAttemptMetrics) {
+ FakeSystemState fake_system_state;
+ FakeClock fake_clock;
+ FakePrefs fake_prefs;
+
+ fake_system_state.set_clock(&fake_clock);
+ fake_system_state.set_prefs(&fake_prefs);
+ fake_clock.SetWallclockTime(base::Time::FromInternalValue(1000000));
+ fake_clock.SetMonotonicTime(base::Time::FromInternalValue(1000000));
+
+ int attempt_number = 1;
+ PayloadType payload_type = kPayloadTypeFull;
+ TimeDelta duration = TimeDelta::FromMinutes(1000);
+ TimeDelta duration_uptime = TimeDelta::FromMinutes(1000);
+
+ int64_t payload_size = 100 * kNumBytesInOneMiB;
+
+ metrics::AttemptResult attempt_result =
+ metrics::AttemptResult::kInternalError;
+ ErrorCode internal_error_code = ErrorCode::kDownloadInvalidMetadataSignature;
+
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendToUMA(metrics::kMetricAttemptNumber, attempt_number, _, _, _))
+ .Times(2);
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendEnumToUMA(metrics::kMetricAttemptPayloadType,
+ static_cast<int>(payload_type),
+ _))
+ .Times(2);
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendToUMA(metrics::kMetricAttemptDurationMinutes,
+ duration.InMinutes(),
+ _,
+ _,
+ _))
+ .Times(2);
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendToUMA(metrics::kMetricAttemptDurationUptimeMinutes,
+ duration_uptime.InMinutes(),
+ _,
+ _,
+ _))
+ .Times(2);
+
+
+ // Check the report of attempt result.
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendEnumToUMA(
+ metrics::kMetricAttemptResult, static_cast<int>(attempt_result), _))
+ .Times(2);
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendEnumToUMA(metrics::kMetricAttemptInternalErrorCode,
+ static_cast<int>(internal_error_code),
+ _))
+ .Times(2);
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendToUMA(metrics::kMetricAttemptPayloadSizeMiB, 100, _, _, _))
+ .Times(2);
+
+ // Check the duration between two reports.
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendToUMA(metrics::kMetricAttemptTimeSinceLastAttemptMinutes, 1, _, _, _))
+ .Times(1);
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendToUMA(
+ metrics::kMetricAttemptTimeSinceLastAttemptUptimeMinutes, 1, _, _, _))
+ .Times(1);
+
+ reporter_.ReportUpdateAttemptMetrics(&fake_system_state,
+ attempt_number,
+ payload_type,
+ duration,
+ duration_uptime,
+ payload_size,
+ attempt_result,
+ internal_error_code);
+
+ // Advance the clock by 1 minute and report the same metrics again.
+ fake_clock.SetWallclockTime(base::Time::FromInternalValue(61000000));
+ fake_clock.SetMonotonicTime(base::Time::FromInternalValue(61000000));
+ reporter_.ReportUpdateAttemptMetrics(&fake_system_state,
+ attempt_number,
+ payload_type,
+ duration,
+ duration_uptime,
+ payload_size,
+ attempt_result,
+ internal_error_code);
+}
+
+TEST_F(MetricsReporterOmahaTest, ReportUpdateAttemptDownloadMetrics) {
+ int64_t payload_bytes_downloaded = 200 * kNumBytesInOneMiB;
+ int64_t payload_download_speed_bps = 100 * 1000;
+ DownloadSource download_source = kDownloadSourceHttpServer;
+ metrics::DownloadErrorCode payload_download_error_code =
+ metrics::DownloadErrorCode::kDownloadError;
+ metrics::ConnectionType connection_type = metrics::ConnectionType::kCellular;
+
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendToUMA(metrics::kMetricAttemptPayloadBytesDownloadedMiB, 200, _, _, _))
+ .Times(1);
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendToUMA(metrics::kMetricAttemptPayloadDownloadSpeedKBps, 100, _, _, _))
+ .Times(1);
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendEnumToUMA(metrics::kMetricAttemptDownloadSource,
+ static_cast<int>(download_source),
+ _))
+ .Times(1);
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendSparseToUMA(metrics::kMetricAttemptDownloadErrorCode,
+ static_cast<int>(payload_download_error_code)))
+ .Times(1);
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendEnumToUMA(metrics::kMetricAttemptConnectionType,
+ static_cast<int>(connection_type),
+ _))
+ .Times(1);
+
+ reporter_.ReportUpdateAttemptDownloadMetrics(payload_bytes_downloaded,
+ payload_download_speed_bps,
+ download_source,
+ payload_download_error_code,
+ connection_type);
+}
+
+TEST_F(MetricsReporterOmahaTest, ReportSuccessfulUpdateMetrics) {
+ int attempt_count = 3;
+ int updates_abandoned_count = 2;
+ PayloadType payload_type = kPayloadTypeDelta;
+ int64_t payload_size = 200 * kNumBytesInOneMiB;
+ int64_t num_bytes_downloaded[kNumDownloadSources] = {};
+ // 200MiB payload downloaded from HttpsServer.
+ num_bytes_downloaded[0] = 200 * kNumBytesInOneMiB;
+ int download_overhead_percentage = 20;
+ TimeDelta total_duration = TimeDelta::FromMinutes(30);
+ int reboot_count = 2;
+ int url_switch_count = 2;
+
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendToUMA(metrics::kMetricSuccessfulUpdatePayloadSizeMiB, 200, _, _, _))
+ .Times(1);
+
+ // Check the report to both BytesDownloadedMiBHttpsServer and
+ // BytesDownloadedMiB
+ std::string DownloadedMiBMetric =
+ metrics::kMetricSuccessfulUpdateBytesDownloadedMiB;
+ DownloadedMiBMetric += "HttpsServer";
+ EXPECT_CALL(*mock_metrics_lib_, SendToUMA(DownloadedMiBMetric, 200, _, _, _))
+ .Times(1);
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendToUMA(
+ metrics::kMetricSuccessfulUpdateBytesDownloadedMiB, 200, _, _, _))
+ .Times(1);
+
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendToUMA(
+ metrics::kMetricSuccessfulUpdateDownloadSourcesUsed, 1, _, _, _))
+ .Times(1);
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendToUMA(metrics::kMetricSuccessfulUpdateDownloadOverheadPercentage,
+ 20,
+ _,
+ _,
+ _));
+
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendToUMA(metrics::kMetricSuccessfulUpdateUrlSwitchCount,
+ url_switch_count,
+ _,
+ _,
+ _))
+ .Times(1);
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendToUMA(
+ metrics::kMetricSuccessfulUpdateTotalDurationMinutes, 30, _, _, _))
+ .Times(1);
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendToUMA(
+ metrics::kMetricSuccessfulUpdateRebootCount, reboot_count, _, _, _))
+ .Times(1);
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendEnumToUMA(
+ metrics::kMetricSuccessfulUpdatePayloadType, payload_type, _))
+ .Times(1);
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendToUMA(
+ metrics::kMetricSuccessfulUpdateAttemptCount, attempt_count, _, _, _))
+ .Times(1);
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendToUMA(metrics::kMetricSuccessfulUpdateUpdatesAbandonedCount,
+ updates_abandoned_count,
+ _,
+ _,
+ _))
+ .Times(1);
+
+ reporter_.ReportSuccessfulUpdateMetrics(attempt_count,
+ updates_abandoned_count,
+ payload_type,
+ payload_size,
+ num_bytes_downloaded,
+ download_overhead_percentage,
+ total_duration,
+ reboot_count,
+ url_switch_count);
+}
+
+TEST_F(MetricsReporterOmahaTest, ReportRollbackMetrics) {
+ metrics::RollbackResult result = metrics::RollbackResult::kSuccess;
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendEnumToUMA(
+ metrics::kMetricRollbackResult, static_cast<int>(result), _))
+ .Times(1);
+
+ reporter_.ReportRollbackMetrics(result);
+}
+
+TEST_F(MetricsReporterOmahaTest, ReportCertificateCheckMetrics) {
+ ServerToCheck server_to_check = ServerToCheck::kUpdate;
+ CertificateCheckResult result = CertificateCheckResult::kValid;
+ EXPECT_CALL(*mock_metrics_lib_,
+ SendEnumToUMA(metrics::kMetricCertificateCheckUpdateCheck,
+ static_cast<int>(result),
+ _))
+ .Times(1);
+
+ reporter_.ReportCertificateCheckMetrics(server_to_check, result);
+}
+
+TEST_F(MetricsReporterOmahaTest, ReportFailedUpdateCount) {
+ int target_attempt = 3;
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendToUMA(metrics::kMetricFailedUpdateCount, target_attempt, _, _, _))
+ .Times(1);
+
+ reporter_.ReportFailedUpdateCount(target_attempt);
+}
+
+TEST_F(MetricsReporterOmahaTest, ReportTimeToReboot) {
+ int time_to_reboot_minutes = 1000;
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendToUMA(
+ metrics::kMetricTimeToRebootMinutes, time_to_reboot_minutes, _, _, _))
+ .Times(1);
+
+ reporter_.ReportTimeToReboot(time_to_reboot_minutes);
+}
+
+TEST_F(MetricsReporterOmahaTest, ReportInstallDateProvisioningSource) {
+ int source = 2;
+ int max = 5;
+ EXPECT_CALL(
+ *mock_metrics_lib_,
+ SendEnumToUMA(metrics::kMetricInstallDateProvisioningSource, source, max))
+ .Times(1);
+
+ reporter_.ReportInstallDateProvisioningSource(source, max);
+}
+
+} // namespace chromeos_update_engine
diff --git a/metrics_reporter_stub.cc b/metrics_reporter_stub.cc
new file mode 100644
index 0000000..81664a5
--- /dev/null
+++ b/metrics_reporter_stub.cc
@@ -0,0 +1,31 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/metrics_reporter_stub.h"
+
+#include <memory>
+
+namespace chromeos_update_engine {
+
+namespace metrics {
+
+std::unique_ptr<MetricsReporterInterface> CreateMetricsReporter() {
+ return std::make_unique<MetricsReporterStub>();
+}
+
+} // namespace metrics
+
+} // namespace chromeos_update_engine
diff --git a/metrics_reporter_stub.h b/metrics_reporter_stub.h
new file mode 100644
index 0000000..d0f75ab
--- /dev/null
+++ b/metrics_reporter_stub.h
@@ -0,0 +1,88 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_METRICS_REPORTER_STUB_H_
+#define UPDATE_ENGINE_METRICS_REPORTER_STUB_H_
+
+#include "update_engine/common/error_code.h"
+#include "update_engine/metrics_constants.h"
+#include "update_engine/metrics_reporter_interface.h"
+
+namespace chromeos_update_engine {
+
+class MetricsReporterStub : public MetricsReporterInterface {
+ public:
+ MetricsReporterStub() = default;
+
+ ~MetricsReporterStub() override = default;
+
+ void Initialize() override {}
+
+ void ReportRollbackMetrics(metrics::RollbackResult result) override {}
+
+ void ReportDailyMetrics(base::TimeDelta os_age) override {}
+
+ void ReportUpdateCheckMetrics(
+ SystemState* system_state,
+ metrics::CheckResult result,
+ metrics::CheckReaction reaction,
+ metrics::DownloadErrorCode download_error_code) override {}
+
+ void ReportUpdateAttemptMetrics(SystemState* system_state,
+ int attempt_number,
+ PayloadType payload_type,
+ base::TimeDelta duration,
+ base::TimeDelta duration_uptime,
+ int64_t payload_size,
+ metrics::AttemptResult attempt_result,
+ ErrorCode internal_error_code) override {}
+
+ void ReportUpdateAttemptDownloadMetrics(
+ int64_t payload_bytes_downloaded,
+ int64_t payload_download_speed_bps,
+ DownloadSource download_source,
+ metrics::DownloadErrorCode payload_download_error_code,
+ metrics::ConnectionType connection_type) override {}
+
+ void ReportAbnormallyTerminatedUpdateAttemptMetrics() override {}
+
+ void ReportSuccessfulUpdateMetrics(
+ int attempt_count,
+ int updates_abandoned_count,
+ PayloadType payload_type,
+ int64_t payload_size,
+ int64_t num_bytes_downloaded[kNumDownloadSources],
+ int download_overhead_percentage,
+ base::TimeDelta total_duration,
+ int reboot_count,
+ int url_switch_count) override {}
+
+ void ReportCertificateCheckMetrics(ServerToCheck server_to_check,
+ CertificateCheckResult result) override {}
+
+ void ReportFailedUpdateCount(int target_attempt) override {}
+
+ void ReportTimeToReboot(int time_to_reboot_minutes) override {}
+
+ void ReportInstallDateProvisioningSource(int source, int max) override {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MetricsReporterStub);
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_METRICS_REPORTER_STUB_H_
diff --git a/metrics_utils.cc b/metrics_utils.cc
index 425dc4d..46530f0 100644
--- a/metrics_utils.cc
+++ b/metrics_utils.cc
@@ -21,7 +21,8 @@
#include <base/time/time.h>
#include "update_engine/common/clock_interface.h"
-#include "update_engine/common/prefs_interface.h"
+#include "update_engine/common/constants.h"
+#include "update_engine/common/utils.h"
#include "update_engine/system_state.h"
using base::Time;
@@ -38,6 +39,9 @@
case ErrorCode::kSuccess:
return metrics::AttemptResult::kUpdateSucceeded;
+ case ErrorCode::kUpdatedButNotActive:
+ return metrics::AttemptResult::kUpdateSucceededNotActive;
+
case ErrorCode::kDownloadTransferError:
return metrics::AttemptResult::kPayloadDownloadError;
@@ -211,6 +215,7 @@
case ErrorCode::kFilesystemVerifierError:
case ErrorCode::kUserCanceled:
case ErrorCode::kPayloadTimestampError:
+ case ErrorCode::kUpdatedButNotActive:
break;
// Special flags. These can't happen (we mask them out above) but
@@ -307,5 +312,76 @@
return ret;
}
+int64_t GetPersistedValue(const std::string& key, PrefsInterface* prefs) {
+ CHECK(prefs);
+ if (!prefs->Exists(key))
+ return 0;
+
+ int64_t stored_value;
+ if (!prefs->GetInt64(key, &stored_value))
+ return 0;
+
+ if (stored_value < 0) {
+ LOG(ERROR) << key << ": Invalid value (" << stored_value
+ << ") in persisted state. Defaulting to 0";
+ return 0;
+ }
+
+ return stored_value;
+}
+
+void SetNumReboots(int64_t num_reboots, PrefsInterface* prefs) {
+ CHECK(prefs);
+ prefs->SetInt64(kPrefsNumReboots, num_reboots);
+ LOG(INFO) << "Number of Reboots during current update attempt = "
+ << num_reboots;
+}
+
+void SetPayloadAttemptNumber(int64_t payload_attempt_number,
+ PrefsInterface* prefs) {
+ CHECK(prefs);
+ prefs->SetInt64(kPrefsPayloadAttemptNumber, payload_attempt_number);
+ LOG(INFO) << "Payload Attempt Number = " << payload_attempt_number;
+}
+
+void SetSystemUpdatedMarker(ClockInterface* clock, PrefsInterface* prefs) {
+ CHECK(prefs);
+ CHECK(clock);
+ Time update_finish_time = clock->GetMonotonicTime();
+ prefs->SetInt64(kPrefsSystemUpdatedMarker,
+ update_finish_time.ToInternalValue());
+ LOG(INFO) << "Updated Marker = " << utils::ToString(update_finish_time);
+}
+
+void SetUpdateTimestampStart(const Time& update_start_time,
+ PrefsInterface* prefs) {
+ CHECK(prefs);
+ prefs->SetInt64(kPrefsUpdateTimestampStart,
+ update_start_time.ToInternalValue());
+ LOG(INFO) << "Update Timestamp Start = "
+ << utils::ToString(update_start_time);
+}
+
+bool LoadAndReportTimeToReboot(MetricsReporterInterface* metrics_reporter,
+ PrefsInterface* prefs,
+ ClockInterface* clock) {
+ CHECK(prefs);
+ CHECK(clock);
+ int64_t stored_value = GetPersistedValue(kPrefsSystemUpdatedMarker, prefs);
+ if (stored_value == 0)
+ return false;
+
+ Time system_updated_at = Time::FromInternalValue(stored_value);
+ base::TimeDelta time_to_reboot =
+ clock->GetMonotonicTime() - system_updated_at;
+ if (time_to_reboot.ToInternalValue() < 0) {
+ LOG(ERROR) << "time_to_reboot is negative - system_updated_at: "
+ << utils::ToString(system_updated_at);
+ return false;
+ }
+ metrics_reporter->ReportTimeToReboot(time_to_reboot.InMinutes());
+ return true;
+}
+
} // namespace metrics_utils
} // namespace chromeos_update_engine
diff --git a/metrics_utils.h b/metrics_utils.h
index d9826c1..d08cc4a 100644
--- a/metrics_utils.h
+++ b/metrics_utils.h
@@ -17,8 +17,16 @@
#ifndef UPDATE_ENGINE_METRICS_UTILS_H_
#define UPDATE_ENGINE_METRICS_UTILS_H_
+#include <string>
+
+#include <base/time/time.h>
+
+#include "update_engine/common/clock_interface.h"
+#include "update_engine/common/error_code.h"
+#include "update_engine/common/prefs_interface.h"
#include "update_engine/connection_utils.h"
-#include "update_engine/metrics.h"
+#include "update_engine/metrics_constants.h"
+#include "update_engine/metrics_reporter_interface.h"
namespace chromeos_update_engine {
@@ -65,6 +73,33 @@
int64_t* storage,
base::TimeDelta* out_duration);
+// Returns the persisted value from prefs for the given key. It also
+// validates that the value returned is non-negative.
+int64_t GetPersistedValue(const std::string& key, PrefsInterface* prefs);
+
+// Persists the reboot count of the update attempt to |kPrefsNumReboots|.
+void SetNumReboots(int64_t num_reboots, PrefsInterface* prefs);
+
+// Persists the payload attempt number to |kPrefsPayloadAttemptNumber|.
+void SetPayloadAttemptNumber(int64_t payload_attempt_number,
+ PrefsInterface* prefs);
+
+// Persists the finished time of an update to the |kPrefsSystemUpdatedMarker|.
+void SetSystemUpdatedMarker(ClockInterface* clock, PrefsInterface* prefs);
+
+// Persists the start time of an update to |kPrefsUpdateTimestampStart|.
+void SetUpdateTimestampStart(const base::Time& update_start_time,
+ PrefsInterface* prefs);
+
+// Called at program startup if the device booted into a new update.
+// The |time_to_reboot| parameter contains the (monotonic-clock) duration
+// from when the update successfully completed (the value in
+// |kPrefsSystemUpdatedMarker|) until the device was booted into the update
+// (current monotonic-clock time).
+bool LoadAndReportTimeToReboot(MetricsReporterInterface* metrics_reporter,
+ PrefsInterface* prefs,
+ ClockInterface* clock);
+
} // namespace metrics_utils
} // namespace chromeos_update_engine
diff --git a/mock_file_writer.h b/mock_file_writer.h
index 72d6a86..26cd45d 100644
--- a/mock_file_writer.h
+++ b/mock_file_writer.h
@@ -24,7 +24,8 @@
class MockFileWriter : public FileWriter {
public:
- MOCK_METHOD2(Write, ssize_t(const void* bytes, size_t count));
+ MOCK_METHOD2(Write, bool(const void* bytes, size_t count));
+ MOCK_METHOD3(Write, bool(const void* bytes, size_t count, ErrorCode* error));
MOCK_METHOD0(Close, int());
};
diff --git a/mock_metrics_reporter.h b/mock_metrics_reporter.h
new file mode 100644
index 0000000..a0f164b
--- /dev/null
+++ b/mock_metrics_reporter.h
@@ -0,0 +1,83 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_MOCK_METRICS_REPORTER_H
+#define UPDATE_ENGINE_MOCK_METRICS_REPORTER_H
+
+#include <gmock/gmock.h>
+
+#include "update_engine/metrics_reporter_interface.h"
+
+namespace chromeos_update_engine {
+
+class MockMetricsReporter : public MetricsReporterInterface {
+ public:
+ MOCK_METHOD0(Initialize, void());
+
+ MOCK_METHOD1(ReportRollbackMetrics, void(metrics::RollbackResult result));
+
+ MOCK_METHOD1(ReportDailyMetrics, void(base::TimeDelta os_age));
+
+ MOCK_METHOD4(ReportUpdateCheckMetrics,
+ void(SystemState* system_state,
+ metrics::CheckResult result,
+ metrics::CheckReaction reaction,
+ metrics::DownloadErrorCode download_error_code));
+
+ MOCK_METHOD8(ReportUpdateAttemptMetrics,
+ void(SystemState* system_state,
+ int attempt_number,
+ PayloadType payload_type,
+ base::TimeDelta duration,
+ base::TimeDelta duration_uptime,
+ int64_t payload_size,
+ metrics::AttemptResult attempt_result,
+ ErrorCode internal_error_code));
+
+ MOCK_METHOD5(ReportUpdateAttemptDownloadMetrics,
+ void(int64_t payload_bytes_downloaded,
+ int64_t payload_download_speed_bps,
+ DownloadSource download_source,
+ metrics::DownloadErrorCode payload_download_error_code,
+ metrics::ConnectionType connection_type));
+
+ MOCK_METHOD0(ReportAbnormallyTerminatedUpdateAttemptMetrics, void());
+
+ MOCK_METHOD9(ReportSuccessfulUpdateMetrics,
+ void(int attempt_count,
+ int updates_abandoned_count,
+ PayloadType payload_type,
+ int64_t payload_size,
+ int64_t num_bytes_downloaded[kNumDownloadSources],
+ int download_overhead_percentage,
+ base::TimeDelta total_duration,
+ int reboot_count,
+ int url_switch_count));
+
+ MOCK_METHOD2(ReportCertificateCheckMetrics,
+ void(ServerToCheck server_to_check,
+ CertificateCheckResult result));
+
+ MOCK_METHOD1(ReportFailedUpdateCount, void(int target_attempt));
+
+ MOCK_METHOD1(ReportTimeToReboot, void(int time_to_reboot_minutes));
+
+ MOCK_METHOD2(ReportInstallDateProvisioningSource, void(int source, int max));
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_MOCK_METRICS_REPORTER_H
diff --git a/mock_service_observer.h b/mock_service_observer.h
new file mode 100644
index 0000000..e434eab
--- /dev/null
+++ b/mock_service_observer.h
@@ -0,0 +1,35 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_MOCK_SERVICE_OBSERVER_H_
+#define UPDATE_ENGINE_MOCK_SERVICE_OBSERVER_H_
+
+#include <gmock/gmock.h>
+#include "update_engine/service_observer_interface.h"
+
+namespace chromeos_update_engine {
+
+class MockServiceObserver : public ServiceObserverInterface {
+ public:
+ MOCK_METHOD1(
+ SendStatusUpdate,
+ void(const update_engine::UpdateEngineStatus& update_engine_status));
+ MOCK_METHOD1(SendPayloadApplicationComplete, void(ErrorCode error_code));
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_MOCK_SERVICE_OBSERVER_H_
diff --git a/mock_update_attempter.h b/mock_update_attempter.h
index 89f163e..d88b840 100644
--- a/mock_update_attempter.h
+++ b/mock_update_attempter.h
@@ -36,19 +36,18 @@
bool obey_proxies,
bool interactive));
- MOCK_METHOD5(GetStatus, bool(int64_t* last_checked_time,
- double* progress,
- std::string* current_operation,
- std::string* new_version,
- int64_t* new_size));
+ MOCK_METHOD1(GetStatus, bool(update_engine::UpdateEngineStatus* out_status));
MOCK_METHOD1(GetBootTimeAtUpdate, bool(base::Time* out_boot_time));
MOCK_METHOD0(ResetStatus, bool(void));
- MOCK_METHOD3(CheckForUpdate, void(const std::string& app_version,
- const std::string& omaha_url,
- bool is_interactive));
+ MOCK_METHOD0(GetCurrentUpdateAttemptFlags, UpdateAttemptFlags(void));
+
+ MOCK_METHOD3(CheckForUpdate,
+ bool(const std::string& app_version,
+ const std::string& omaha_url,
+ UpdateAttemptFlags flags));
MOCK_METHOD0(RefreshDevicePolicy, void(void));
diff --git a/network_selector_android.cc b/network_selector_android.cc
index 6879b69..55ba799 100644
--- a/network_selector_android.cc
+++ b/network_selector_android.cc
@@ -16,9 +16,10 @@
#include "update_engine/network_selector_android.h"
+#include <memory>
+
#include <android/multinetwork.h>
#include <base/logging.h>
-#include <brillo/make_unique_ptr.h>
namespace chromeos_update_engine {
@@ -26,7 +27,7 @@
// Factory defined in network_selector.h.
std::unique_ptr<NetworkSelectorInterface> CreateNetworkSelector() {
- return brillo::make_unique_ptr(new NetworkSelectorAndroid());
+ return std::make_unique<NetworkSelectorAndroid>();
}
} // namespace network
diff --git a/network_selector_stub.cc b/network_selector_stub.cc
index 218d454..67925f4 100644
--- a/network_selector_stub.cc
+++ b/network_selector_stub.cc
@@ -16,8 +16,9 @@
#include "update_engine/network_selector_stub.h"
+#include <memory>
+
#include <base/logging.h>
-#include <brillo/make_unique_ptr.h>
namespace chromeos_update_engine {
@@ -25,7 +26,7 @@
// Factory defined in network_selector.h.
std::unique_ptr<NetworkSelectorInterface> CreateNetworkSelector() {
- return brillo::make_unique_ptr(new NetworkSelectorStub());
+ return std::make_unique<NetworkSelectorStub>();
}
} // namespace network
diff --git a/omaha_request_action.cc b/omaha_request_action.cc
index c3bbf9d..c4db0c7 100644
--- a/omaha_request_action.cc
+++ b/omaha_request_action.cc
@@ -31,6 +31,7 @@
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
#include <base/time/time.h>
+#include <brillo/key_value_store.h>
#include <expat.h>
#include <metrics/metrics_library.h>
@@ -42,7 +43,7 @@
#include "update_engine/common/prefs_interface.h"
#include "update_engine/common/utils.h"
#include "update_engine/connection_manager_interface.h"
-#include "update_engine/metrics.h"
+#include "update_engine/metrics_reporter_interface.h"
#include "update_engine/metrics_utils.h"
#include "update_engine/omaha_request_params.h"
#include "update_engine/p2p_manager.h"
@@ -207,8 +208,17 @@
struct OmahaAppData {
string id;
string version;
+ string product_components;
};
+bool IsValidComponentID(const string& id) {
+ for (char c : id) {
+ if (!isalnum(c) && c != '-' && c != '_' && c != '.')
+ return false;
+ }
+ return true;
+}
+
// Returns an XML that corresponds to the entire <app> node of the Omaha
// request based on the given parameters.
string GetAppXml(const OmahaEvent* event,
@@ -276,11 +286,39 @@
XmlEncodeWithDefault(params->os_build_type(), "") + "\" ";
}
+ string product_components_args;
+ if (!app_data.product_components.empty()) {
+ brillo::KeyValueStore store;
+ if (store.LoadFromString(app_data.product_components)) {
+ for (const string& key : store.GetKeys()) {
+ if (!IsValidComponentID(key)) {
+ LOG(ERROR) << "Invalid component id: " << key;
+ continue;
+ }
+ string version;
+ if (!store.GetString(key, &version)) {
+ LOG(ERROR) << "Failed to get version for " << key
+ << " in product_components.";
+ continue;
+ }
+ product_components_args +=
+ base::StringPrintf("_%s.version=\"%s\" ",
+ key.c_str(),
+ XmlEncodeWithDefault(version, "").c_str());
+ }
+ } else {
+ LOG(ERROR) << "Failed to parse product_components:\n"
+ << app_data.product_components;
+ }
+ }
+
+ // clang-format off
string app_xml = " <app "
"appid=\"" + XmlEncodeWithDefault(app_data.id, "") + "\" " +
app_cohort_args +
app_versions +
app_channels +
+ product_components_args +
fingerprint_arg +
buildtype_arg +
"lang=\"" + XmlEncodeWithDefault(params->app_lang(), "en-US") + "\" " +
@@ -293,7 +331,7 @@
">\n" +
app_body +
" </app>\n";
-
+ // clang-format on
return app_xml;
}
@@ -319,8 +357,10 @@
int install_date_in_days,
SystemState* system_state) {
string os_xml = GetOsXml(params);
- OmahaAppData product_app = {.id = params->GetAppId(),
- .version = params->app_version()};
+ OmahaAppData product_app = {
+ .id = params->GetAppId(),
+ .version = params->app_version(),
+ .product_components = params->product_components()};
string app_xml = GetAppXml(event,
params,
product_app,
@@ -380,22 +420,23 @@
string current_path;
// These are the values extracted from the XML.
- string app_cohort;
- string app_cohorthint;
- string app_cohortname;
- bool app_cohort_set = false;
- bool app_cohorthint_set = false;
- bool app_cohortname_set = false;
string updatecheck_poll_interval;
map<string, string> updatecheck_attrs;
string daystart_elapsed_days;
string daystart_elapsed_seconds;
struct App {
+ string id;
vector<string> url_codebase;
string manifest_version;
map<string, string> action_postinstall_attrs;
string updatecheck_status;
+ string cohort;
+ string cohorthint;
+ string cohortname;
+ bool cohort_set = false;
+ bool cohorthint_set = false;
+ bool cohortname_set = false;
struct Package {
string name;
@@ -429,19 +470,23 @@
}
if (data->current_path == "/response/app") {
- data->apps.emplace_back();
+ OmahaParserData::App app;
+ if (attrs.find("appid") != attrs.end()) {
+ app.id = attrs["appid"];
+ }
if (attrs.find("cohort") != attrs.end()) {
- data->app_cohort_set = true;
- data->app_cohort = attrs["cohort"];
+ app.cohort_set = true;
+ app.cohort = attrs["cohort"];
}
if (attrs.find("cohorthint") != attrs.end()) {
- data->app_cohorthint_set = true;
- data->app_cohorthint = attrs["cohorthint"];
+ app.cohorthint_set = true;
+ app.cohorthint = attrs["cohorthint"];
}
if (attrs.find("cohortname") != attrs.end()) {
- data->app_cohortname_set = true;
- data->app_cohortname = attrs["cohortname"];
+ app.cohortname_set = true;
+ app.cohortname = attrs["cohortname"];
}
+ data->apps.push_back(std::move(app));
} else if (data->current_path == "/response/app/updatecheck") {
if (!data->apps.empty())
data->apps.back().updatecheck_status = attrs["status"];
@@ -623,6 +668,11 @@
<< "powerwash_count is " << powerwash_count;
return false;
}
+ if (system_state_->hardware()->GetFirstActiveOmahaPingSent()) {
+ LOG(INFO) << "Not sending ping with a=-1 r=-1 to omaha because "
+ << "the first_active_omaha_ping_sent is true";
+ return false;
+ }
return true;
}
return ping_active_days_ > 0 || ping_roll_call_days_ > 0;
@@ -919,12 +969,17 @@
}
// We persist the cohorts sent by omaha even if the status is "noupdate".
- if (parser_data->app_cohort_set)
- PersistCohortData(kPrefsOmahaCohort, parser_data->app_cohort);
- if (parser_data->app_cohorthint_set)
- PersistCohortData(kPrefsOmahaCohortHint, parser_data->app_cohorthint);
- if (parser_data->app_cohortname_set)
- PersistCohortData(kPrefsOmahaCohortName, parser_data->app_cohortname);
+ for (const auto& app : parser_data->apps) {
+ if (app.id == params_->GetAppId()) {
+ if (app.cohort_set)
+ PersistCohortData(kPrefsOmahaCohort, app.cohort);
+ if (app.cohorthint_set)
+ PersistCohortData(kPrefsOmahaCohortHint, app.cohorthint);
+ if (app.cohortname_set)
+ PersistCohortData(kPrefsOmahaCohortName, app.cohortname);
+ break;
+ }
+ }
// Parse the updatecheck attributes.
PersistEolStatus(parser_data->updatecheck_attrs);
@@ -983,10 +1038,18 @@
ScopedActionCompleter* completer) {
map<string, string> attrs;
for (auto& app : parser_data->apps) {
- if (!app.manifest_version.empty() && output_object->version.empty())
+ if (app.id == params_->GetAppId()) {
+ // this is the app (potentially the only app)
output_object->version = app.manifest_version;
- if (!app.action_postinstall_attrs.empty() && attrs.empty())
+ } else if (!params_->system_app_id().empty() &&
+ app.id == params_->system_app_id()) {
+ // this is the system app (this check is intentionally skipped if there is
+ // no system_app_id set)
+ output_object->system_version = app.manifest_version;
+ }
+ if (!app.action_postinstall_attrs.empty() && attrs.empty()) {
attrs = app.action_postinstall_attrs;
+ }
}
if (output_object->version.empty()) {
LOG(ERROR) << "Omaha Response does not have version in manifest!";
@@ -1087,6 +1150,16 @@
LOG_IF(ERROR, !UpdateLastPingDays(&parser_data, system_state_->prefs()))
<< "Failed to update the last ping day preferences!";
+ // Sets first_active_omaha_ping_sent to true (vpd in CrOS). We only do this if
+ // we have got a response from omaha and if its value has never been set to
+ // true before. Failure of this function should be ignored. There should be no
+ // need to check if a=-1 has been sent because older devices have already sent
+ // their a=-1 in the past and we have to set first_active_omaha_ping_sent for
+ // future checks.
+ if (!system_state_->hardware()->GetFirstActiveOmahaPingSent()) {
+ system_state_->hardware()->SetFirstActiveOmahaPingSent();
+ }
+
if (!HasOutputPipe()) {
// Just set success to whether or not the http transfer succeeded,
// which must be true at this point in the code.
@@ -1503,12 +1576,9 @@
if (!prefs->SetInt64(kPrefsInstallDateDays, install_date_days))
return false;
- string metric_name = metrics::kMetricInstallDateProvisioningSource;
- system_state->metrics_lib()->SendEnumToUMA(
- metric_name,
+ system_state->metrics_reporter()->ReportInstallDateProvisioningSource(
static_cast<int>(source), // Sample.
kProvisionedMax); // Maximum.
-
return true;
}
@@ -1594,8 +1664,8 @@
break;
}
- metrics::ReportUpdateCheckMetrics(system_state_,
- result, reaction, download_error_code);
+ system_state_->metrics_reporter()->ReportUpdateCheckMetrics(
+ system_state_, result, reaction, download_error_code);
}
bool OmahaRequestAction::ShouldIgnoreUpdate(
diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc
index 9091031..d57abe5 100644
--- a/omaha_request_action_unittest.cc
+++ b/omaha_request_action_unittest.cc
@@ -18,18 +18,19 @@
#include <stdint.h>
+#include <memory>
#include <string>
#include <vector>
#include <base/bind.h>
#include <base/files/file_util.h>
#include <base/files/scoped_temp_dir.h>
+#include <base/memory/ptr_util.h>
#include <base/strings/string_number_conversions.h>
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
#include <base/time/time.h>
#include <brillo/bind_lambda.h>
-#include <brillo/make_unique_ptr.h>
#include <brillo/message_loops/fake_message_loop.h>
#include <brillo/message_loops/message_loop.h>
#include <brillo/message_loops/message_loop_utils.h>
@@ -44,7 +45,7 @@
#include "update_engine/common/prefs.h"
#include "update_engine/common/test_utils.h"
#include "update_engine/fake_system_state.h"
-#include "update_engine/metrics.h"
+#include "update_engine/metrics_reporter_interface.h"
#include "update_engine/mock_connection_manager.h"
#include "update_engine/mock_payload_state.h"
#include "update_engine/omaha_request_params.h"
@@ -62,12 +63,13 @@
using testing::Return;
using testing::ReturnPointee;
using testing::SaveArg;
-using testing::SetArgumentPointee;
+using testing::SetArgPointee;
using testing::_;
namespace {
const char kTestAppId[] = "test-app-id";
+const char kTestAppId2[] = "test-app2-id";
// This is a helper struct to allow unit tests build an update response with the
// values they care about.
@@ -89,7 +91,8 @@
"<ping status=\"ok\"/>"
"<updatecheck status=\"noupdate\"/></app>" +
(multi_app_no_update
- ? "<app><updatecheck status=\"noupdate\"/></app>"
+ ? "<app appid=\"" + app_id2 +
+ "\"><updatecheck status=\"noupdate\"/></app>"
: "") +
"</response>";
}
@@ -142,9 +145,11 @@
(disable_p2p_for_sharing ? "DisableP2PForSharing=\"true\" " : "") +
"/></actions></manifest></updatecheck></app>" +
(multi_app
- ? "<app><updatecheck status=\"ok\"><urls><url codebase=\"" +
- codebase2 +
- "\"/></urls><manifest><packages>"
+ ? "<app appid=\"" + app_id2 + "\"" +
+ (include_cohorts ? " cohort=\"cohort2\"" : "") +
+ "><updatecheck status=\"ok\"><urls><url codebase=\"" +
+ codebase2 + "\"/></urls><manifest version=\"" + version2 +
+ "\"><packages>"
"<package name=\"package3\" size=\"333\" "
"hash_sha256=\"hash3\"/></packages>"
"<actions><action event=\"postinstall\" " +
@@ -166,7 +171,9 @@
}
string app_id = kTestAppId;
+ string app_id2 = kTestAppId2;
string version = "1.2.3.4";
+ string version2 = "2.3.4.5";
string more_info_url = "http://more/info";
string prompt = "true";
string codebase = "http://code/base/";
@@ -372,7 +379,7 @@
fake_system_state_.set_request_params(request_params);
OmahaRequestAction action(&fake_system_state_,
nullptr,
- brillo::make_unique_ptr(fetcher),
+ base::WrapUnique(fetcher),
ping_only);
OmahaRequestActionTestProcessorDelegate delegate;
delegate.expected_code_ = expected_code;
@@ -385,23 +392,16 @@
BondActions(&action, &collector_action);
processor.EnqueueAction(&collector_action);
- EXPECT_CALL(*fake_system_state_.mock_metrics_lib(), SendEnumToUMA(_, _, _))
+ EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
+ ReportUpdateCheckMetrics(_, _, _, _))
.Times(AnyNumber());
- EXPECT_CALL(*fake_system_state_.mock_metrics_lib(),
- SendEnumToUMA(metrics::kMetricCheckResult,
- static_cast<int>(expected_check_result),
- static_cast<int>(metrics::CheckResult::kNumConstants) - 1))
- .Times(expected_check_result == metrics::CheckResult::kUnset ? 0 : 1);
- EXPECT_CALL(*fake_system_state_.mock_metrics_lib(),
- SendEnumToUMA(metrics::kMetricCheckReaction,
- static_cast<int>(expected_check_reaction),
- static_cast<int>(metrics::CheckReaction::kNumConstants) - 1))
- .Times(expected_check_reaction == metrics::CheckReaction::kUnset ? 0 : 1);
- EXPECT_CALL(*fake_system_state_.mock_metrics_lib(),
- SendSparseToUMA(metrics::kMetricCheckDownloadErrorCode,
- static_cast<int>(expected_download_error_code)))
- .Times(expected_download_error_code == metrics::DownloadErrorCode::kUnset
- ? 0 : 1);
+
+ EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
+ ReportUpdateCheckMetrics(_,
+ expected_check_result,
+ expected_check_reaction,
+ expected_download_error_code))
+ .Times(ping_only ? 0 : 1);
loop.PostTask(base::Bind(
[](ActionProcessor* processor) { processor->StartProcessing(); },
@@ -431,7 +431,7 @@
fake_system_state.set_request_params(¶ms);
OmahaRequestAction action(&fake_system_state,
event,
- brillo::make_unique_ptr(fetcher),
+ base::WrapUnique(fetcher),
false);
OmahaRequestActionTestProcessorDelegate delegate;
ActionProcessor processor;
@@ -549,6 +549,7 @@
nullptr));
EXPECT_TRUE(response.update_exists);
EXPECT_EQ(fake_update_response_.version, response.version);
+ EXPECT_EQ("", response.system_version);
EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
response.packages[0].payload_urls[0]);
EXPECT_EQ(fake_update_response_.more_info_url, response.more_info_url);
@@ -557,7 +558,7 @@
EXPECT_EQ(true, response.packages[0].is_delta);
EXPECT_EQ(fake_update_response_.prompt == "true", response.prompt);
EXPECT_EQ(fake_update_response_.deadline, response.deadline);
- // Omaha cohort attribets are not set in the response, so they should not be
+ // Omaha cohort attributes are not set in the response, so they should not be
// persisted.
EXPECT_FALSE(fake_prefs_.Exists(kPrefsOmahaCohort));
EXPECT_FALSE(fake_prefs_.Exists(kPrefsOmahaCohortHint));
@@ -624,6 +625,40 @@
EXPECT_EQ(false, response.packages[1].is_delta);
}
+TEST_F(OmahaRequestActionTest, MultiAppAndSystemUpdateTest) {
+ OmahaResponse response;
+ fake_update_response_.multi_app = true;
+ // trigger the lining up of the app and system versions
+ request_params_.set_system_app_id(fake_update_response_.app_id2);
+
+ ASSERT_TRUE(TestUpdateCheck(nullptr, // request_params
+ fake_update_response_.GetUpdateResponse(),
+ -1,
+ false, // ping_only
+ ErrorCode::kSuccess,
+ metrics::CheckResult::kUpdateAvailable,
+ metrics::CheckReaction::kUpdating,
+ metrics::DownloadErrorCode::kUnset,
+ &response,
+ nullptr));
+ EXPECT_TRUE(response.update_exists);
+ EXPECT_EQ(fake_update_response_.version, response.version);
+ EXPECT_EQ(fake_update_response_.version2, response.system_version);
+ EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
+ response.packages[0].payload_urls[0]);
+ EXPECT_EQ(fake_update_response_.codebase2 + "package3",
+ response.packages[1].payload_urls[0]);
+ EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash);
+ EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
+ EXPECT_EQ(11u, response.packages[0].metadata_size);
+ EXPECT_EQ(true, response.packages[0].is_delta);
+ ASSERT_EQ(2u, response.packages.size());
+ EXPECT_EQ(string("hash3"), response.packages[1].hash);
+ EXPECT_EQ(333u, response.packages[1].size);
+ EXPECT_EQ(33u, response.packages[1].metadata_size);
+ EXPECT_EQ(false, response.packages[1].is_delta);
+}
+
TEST_F(OmahaRequestActionTest, MultiAppPartialUpdateTest) {
OmahaResponse response;
fake_update_response_.multi_app = true;
@@ -640,6 +675,7 @@
nullptr));
EXPECT_TRUE(response.update_exists);
EXPECT_EQ(fake_update_response_.version, response.version);
+ EXPECT_EQ("", response.system_version);
EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
response.packages[0].payload_urls[0]);
EXPECT_EQ(fake_update_response_.hash, response.packages[0].hash);
@@ -668,6 +704,7 @@
nullptr));
EXPECT_TRUE(response.update_exists);
EXPECT_EQ(fake_update_response_.version, response.version);
+ EXPECT_EQ("", response.system_version);
EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
response.packages[0].payload_urls[0]);
EXPECT_EQ(fake_update_response_.codebase + "package2",
@@ -698,8 +735,8 @@
MockHttpFetcher* fetcher =
new MockHttpFetcher(http_response.data(), http_response.size(), nullptr);
- OmahaRequestAction action(
- &fake_system_state_, nullptr, brillo::make_unique_ptr(fetcher), false);
+ OmahaRequestAction action(&fake_system_state_, nullptr,
+ base::WrapUnique(fetcher), false);
ActionProcessor processor;
processor.EnqueueAction(&action);
@@ -725,8 +762,8 @@
EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
.WillRepeatedly(
- DoAll(SetArgumentPointee<0>(ConnectionType::kEthernet),
- SetArgumentPointee<1>(ConnectionTethering::kUnknown),
+ DoAll(SetArgPointee<0>(ConnectionType::kEthernet),
+ SetArgPointee<1>(ConnectionTethering::kUnknown),
Return(true)));
EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kEthernet, _))
.WillRepeatedly(Return(false));
@@ -775,18 +812,19 @@
TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBE) {
OmahaResponse response;
+ // TODO set better default value for metrics::checkresult in
+ // OmahaRequestAction::ActionCompleted.
fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
- ASSERT_FALSE(
- TestUpdateCheck(nullptr, // request_params
- fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kNonCriticalUpdateInOOBE,
- metrics::CheckResult::kUnset,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_FALSE(TestUpdateCheck(nullptr, // request_params
+ fake_update_response_.GetUpdateResponse(),
+ -1,
+ false, // ping_only
+ ErrorCode::kNonCriticalUpdateInOOBE,
+ metrics::CheckResult::kParsingError,
+ metrics::CheckReaction::kUnset,
+ metrics::DownloadErrorCode::kUnset,
+ &response,
+ nullptr));
EXPECT_FALSE(response.update_exists);
// The IsOOBEComplete() value is ignored when the OOBE flow is not enabled.
@@ -1140,6 +1178,37 @@
EXPECT_EQ(fake_update_response_.cohortname, value);
}
+TEST_F(OmahaRequestActionTest, MultiAppCohortTest) {
+ OmahaResponse response;
+ OmahaRequestParams params = request_params_;
+ fake_update_response_.multi_app = true;
+ fake_update_response_.include_cohorts = true;
+ fake_update_response_.cohort = "s/154454/8479665";
+ fake_update_response_.cohorthint = "please-put-me-on-beta";
+ fake_update_response_.cohortname = "stable";
+
+ ASSERT_TRUE(TestUpdateCheck(¶ms,
+ fake_update_response_.GetUpdateResponse(),
+ -1,
+ false, // ping_only
+ ErrorCode::kSuccess,
+ metrics::CheckResult::kUpdateAvailable,
+ metrics::CheckReaction::kUpdating,
+ metrics::DownloadErrorCode::kUnset,
+ &response,
+ nullptr));
+
+ string value;
+ EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohort, &value));
+ EXPECT_EQ(fake_update_response_.cohort, value);
+
+ EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohortHint, &value));
+ EXPECT_EQ(fake_update_response_.cohorthint, value);
+
+ EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohortName, &value));
+ EXPECT_EQ(fake_update_response_.cohortname, value);
+}
+
TEST_F(OmahaRequestActionTest, NoOutputPipeTest) {
const string http_response(fake_update_response_.GetNoUpdateResponse());
@@ -1148,12 +1217,13 @@
OmahaRequestParams params = request_params_;
fake_system_state_.set_request_params(¶ms);
- OmahaRequestAction action(&fake_system_state_, nullptr,
- brillo::make_unique_ptr(
- new MockHttpFetcher(http_response.data(),
- http_response.size(),
- nullptr)),
- false);
+ OmahaRequestAction action(
+ &fake_system_state_,
+ nullptr,
+ std::make_unique<MockHttpFetcher>(http_response.data(),
+ http_response.size(),
+ nullptr),
+ false);
OmahaRequestActionTestProcessorDelegate delegate;
ActionProcessor processor;
processor.set_delegate(&delegate);
@@ -1263,7 +1333,10 @@
string input_response =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
"<daystart elapsed_seconds=\"100\"/>"
- "<app appid=\"xyz\" status=\"ok\">"
+ // the appid needs to match that in the request params
+ "<app appid=\"" +
+ fake_update_response_.app_id +
+ "\" status=\"ok\">"
"<updatecheck status=\"ok\">"
"<urls><url codebase=\"http://missing/field/test/\"/></urls>"
"<manifest version=\"10.2.3.4\">"
@@ -1321,12 +1394,13 @@
loop.SetAsCurrent();
string http_response("doesn't matter");
- OmahaRequestAction action(&fake_system_state_, nullptr,
- brillo::make_unique_ptr(
- new MockHttpFetcher(http_response.data(),
- http_response.size(),
- nullptr)),
- false);
+ OmahaRequestAction action(
+ &fake_system_state_,
+ nullptr,
+ std::make_unique<MockHttpFetcher>(http_response.data(),
+ http_response.size(),
+ nullptr),
+ false);
TerminateEarlyTestProcessorDelegate delegate;
ActionProcessor processor;
processor.set_delegate(&delegate);
@@ -1461,7 +1535,7 @@
fake_system_state_.set_prefs(&prefs);
EXPECT_CALL(prefs, GetString(kPrefsPreviousVersion, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(string("")), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(string("")), Return(true)));
// An existing but empty previous version means that we didn't reboot to a new
// update, therefore, no need to update the previous version.
EXPECT_CALL(prefs, SetString(kPrefsPreviousVersion, _)).Times(0);
@@ -1537,10 +1611,9 @@
OmahaRequestAction update_check_action(
&fake_system_state_,
nullptr,
- brillo::make_unique_ptr(
- new MockHttpFetcher(http_response.data(),
- http_response.size(),
- nullptr)),
+ std::make_unique<MockHttpFetcher>(http_response.data(),
+ http_response.size(),
+ nullptr),
false);
EXPECT_FALSE(update_check_action.IsEvent());
@@ -1549,10 +1622,9 @@
OmahaRequestAction event_action(
&fake_system_state_,
new OmahaEvent(OmahaEvent::kTypeUpdateComplete),
- brillo::make_unique_ptr(
- new MockHttpFetcher(http_response.data(),
- http_response.size(),
- nullptr)),
+ std::make_unique<MockHttpFetcher>(http_response.data(),
+ http_response.size(),
+ nullptr),
false);
EXPECT_TRUE(event_action.IsEvent());
}
@@ -1669,23 +1741,22 @@
int64_t six_days_ago =
(Time::Now() - TimeDelta::FromHours(6 * 24 + 11)).ToInternalValue();
EXPECT_CALL(prefs, GetInt64(kPrefsInstallDateDays, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(0), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(0), Return(true)));
EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(six_days_ago), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(six_days_ago), Return(true)));
EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(five_days_ago), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(five_days_ago), Return(true)));
brillo::Blob post_data;
- ASSERT_TRUE(
- TestUpdateCheck(nullptr, // request_params
- fake_update_response_.GetNoUpdateResponse(),
- -1,
- ping_only,
- ErrorCode::kSuccess,
- metrics::CheckResult::kUnset,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- &post_data));
+ ASSERT_TRUE(TestUpdateCheck(nullptr, // request_params
+ fake_update_response_.GetNoUpdateResponse(),
+ -1,
+ ping_only,
+ ErrorCode::kSuccess,
+ metrics::CheckResult::kNoUpdateAvailable,
+ metrics::CheckReaction::kUnset,
+ metrics::DownloadErrorCode::kUnset,
+ nullptr,
+ &post_data));
string post_str(post_data.begin(), post_data.end());
EXPECT_NE(post_str.find("<ping active=\"1\" a=\"6\" r=\"5\"></ping>"),
string::npos);
@@ -1716,11 +1787,11 @@
(Time::Now() - TimeDelta::FromHours(3 * 24 + 12)).ToInternalValue();
int64_t now = Time::Now().ToInternalValue();
EXPECT_CALL(prefs, GetInt64(kPrefsInstallDateDays, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(0), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(0), Return(true)));
EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(three_days_ago), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(three_days_ago), Return(true)));
EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(now), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(now), Return(true)));
brillo::Blob post_data;
ASSERT_TRUE(
TestUpdateCheck(nullptr, // request_params
@@ -1748,11 +1819,11 @@
(Time::Now() - TimeDelta::FromHours(4 * 24)).ToInternalValue();
int64_t now = Time::Now().ToInternalValue();
EXPECT_CALL(prefs, GetInt64(kPrefsInstallDateDays, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(0), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(0), Return(true)));
EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(now), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(now), Return(true)));
EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(four_days_ago), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(four_days_ago), Return(true)));
brillo::Blob post_data;
ASSERT_TRUE(
TestUpdateCheck(nullptr, // request_params
@@ -1779,11 +1850,11 @@
int64_t one_hour_ago =
(Time::Now() - TimeDelta::FromHours(1)).ToInternalValue();
EXPECT_CALL(prefs, GetInt64(kPrefsInstallDateDays, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(0), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(0), Return(true)));
EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(one_hour_ago), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(one_hour_ago), Return(true)));
EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(one_hour_ago), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(one_hour_ago), Return(true)));
// LastActivePingDay and PrefsLastRollCallPingDay are set even if we didn't
// send a ping.
EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _))
@@ -1812,9 +1883,9 @@
fake_system_state_.set_prefs(&prefs);
int64_t now = Time::Now().ToInternalValue();
EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(now), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(now), Return(true)));
EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(now), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(now), Return(true)));
EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0);
EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0);
brillo::Blob post_data;
@@ -1841,11 +1912,11 @@
int64_t future =
(Time::Now() + TimeDelta::FromHours(3 * 24 + 4)).ToInternalValue();
EXPECT_CALL(prefs, GetInt64(kPrefsInstallDateDays, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(0), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(0), Return(true)));
EXPECT_CALL(prefs, GetInt64(kPrefsLastActivePingDay, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(future), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(future), Return(true)));
EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(future), Return(true)));
+ .WillOnce(DoAll(SetArgPointee<1>(future), Return(true)));
EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _))
.WillOnce(Return(true));
EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _))
@@ -2038,7 +2109,7 @@
params.set_update_check_count_wait_enabled(false);
Time arbitrary_date;
- Time::FromString("6/4/1989", &arbitrary_date);
+ ASSERT_TRUE(Time::FromString("6/4/1989", &arbitrary_date));
fake_system_state_.fake_clock()->SetWallclockTime(arbitrary_date);
ASSERT_FALSE(TestUpdateCheck(¶ms,
fake_update_response_.GetUpdateResponse(),
@@ -2079,8 +2150,8 @@
params.set_update_check_count_wait_enabled(false);
Time t1, t2;
- Time::FromString("1/1/2012", &t1);
- Time::FromString("1/3/2012", &t2);
+ ASSERT_TRUE(Time::FromString("1/1/2012", &t1));
+ ASSERT_TRUE(Time::FromString("1/3/2012", &t2));
ASSERT_TRUE(
fake_prefs_.SetInt64(kPrefsUpdateFirstSeenAt, t1.ToInternalValue()));
fake_system_state_.fake_clock()->SetWallclockTime(t2);
@@ -2110,7 +2181,7 @@
brillo::Blob post_data;
OmahaRequestParams params(&fake_system_state_);
- params.set_root(tempdir.path().value());
+ params.set_root(tempdir.GetPath().value());
params.set_app_id("{22222222-2222-2222-2222-222222222222}");
params.set_app_version("1.2.3.4");
params.set_current_channel("canary-channel");
@@ -2143,7 +2214,7 @@
brillo::Blob post_data;
OmahaRequestParams params(&fake_system_state_);
- params.set_root(tempdir.path().value());
+ params.set_root(tempdir.GetPath().value());
params.set_app_id("{11111111-1111-1111-1111-111111111111}");
params.set_app_version("5.6.7.8");
params.set_current_channel("stable-channel");
@@ -2195,6 +2266,35 @@
EXPECT_EQ(string::npos, post_str.find("<ping"));
}
+// Checks that the initial ping with a=-1 r=-1 is not send when the device
+// first_active_omaha_ping_sent is set.
+TEST_F(OmahaRequestActionTest, PingWhenFirstActiveOmahaPingIsSent) {
+ fake_prefs_.SetString(kPrefsPreviousVersion, "");
+
+ // Flag that the device was not powerwashed in the past.
+ fake_system_state_.fake_hardware()->SetPowerwashCount(0);
+
+ // Flag that the device has sent first active ping in the past.
+ fake_system_state_.fake_hardware()->SetFirstActiveOmahaPingSent();
+
+ brillo::Blob post_data;
+ ASSERT_TRUE(
+ TestUpdateCheck(nullptr, // request_params
+ fake_update_response_.GetNoUpdateResponse(),
+ -1,
+ false, // ping_only
+ ErrorCode::kSuccess,
+ metrics::CheckResult::kNoUpdateAvailable,
+ metrics::CheckReaction::kUnset,
+ metrics::DownloadErrorCode::kUnset,
+ nullptr,
+ &post_data));
+ // We shouldn't send a ping in this case since
+ // first_active_omaha_ping_sent=true
+ string post_str(post_data.begin(), post_data.end());
+ EXPECT_EQ(string::npos, post_str.find("<ping"));
+}
+
// Checks that the event 54 is sent on a reboot to a new update.
TEST_F(OmahaRequestActionTest, RebootAfterUpdateEvent) {
// Flag that the device was updated in a previous boot.
diff --git a/omaha_request_params.h b/omaha_request_params.h
index f8e9438..73edd6f 100644
--- a/omaha_request_params.h
+++ b/omaha_request_params.h
@@ -111,6 +111,9 @@
return image_props_.canary_product_id;
}
inline std::string system_app_id() const { return image_props_.system_id; }
+ inline void set_system_app_id(const std::string& system_app_id) {
+ image_props_.system_id = system_app_id;
+ }
inline void set_app_id(const std::string& app_id) {
image_props_.product_id = app_id;
image_props_.canary_product_id = app_id;
@@ -127,6 +130,9 @@
inline std::string system_version() const {
return image_props_.system_version;
}
+ inline std::string product_components() const {
+ return image_props_.product_components;
+ }
inline std::string current_channel() const {
return image_props_.current_channel;
diff --git a/omaha_request_params_unittest.cc b/omaha_request_params_unittest.cc
index 7d4dc2d..57ecf24 100644
--- a/omaha_request_params_unittest.cc
+++ b/omaha_request_params_unittest.cc
@@ -47,7 +47,7 @@
// Create a fresh copy of the params for each test, so there's no
// unintended reuse of state across tests.
params_ = OmahaRequestParams(&fake_system_state_);
- params_.set_root(tempdir_.path().value());
+ params_.set_root(tempdir_.GetPath().value());
SetLockDown(false);
fake_system_state_.set_prefs(&fake_prefs_);
}
@@ -105,7 +105,8 @@
}
TEST_F(OmahaRequestParamsTest, NoDeltasTest) {
- ASSERT_TRUE(WriteFileString(tempdir_.path().Append(".nodelta").value(), ""));
+ ASSERT_TRUE(
+ WriteFileString(tempdir_.GetPath().Append(".nodelta").value(), ""));
EXPECT_TRUE(params_.Init("", "", false));
EXPECT_FALSE(params_.delta_okay());
}
@@ -113,12 +114,12 @@
TEST_F(OmahaRequestParamsTest, SetTargetChannelTest) {
{
OmahaRequestParams params(&fake_system_state_);
- params.set_root(tempdir_.path().value());
+ params.set_root(tempdir_.GetPath().value());
EXPECT_TRUE(params.Init("", "", false));
EXPECT_TRUE(params.SetTargetChannel("canary-channel", false, nullptr));
EXPECT_FALSE(params.is_powerwash_allowed());
}
- params_.set_root(tempdir_.path().value());
+ params_.set_root(tempdir_.GetPath().value());
EXPECT_TRUE(params_.Init("", "", false));
EXPECT_EQ("canary-channel", params_.target_channel());
EXPECT_FALSE(params_.is_powerwash_allowed());
@@ -127,12 +128,12 @@
TEST_F(OmahaRequestParamsTest, SetIsPowerwashAllowedTest) {
{
OmahaRequestParams params(&fake_system_state_);
- params.set_root(tempdir_.path().value());
+ params.set_root(tempdir_.GetPath().value());
EXPECT_TRUE(params.Init("", "", false));
EXPECT_TRUE(params.SetTargetChannel("canary-channel", true, nullptr));
EXPECT_TRUE(params.is_powerwash_allowed());
}
- params_.set_root(tempdir_.path().value());
+ params_.set_root(tempdir_.GetPath().value());
EXPECT_TRUE(params_.Init("", "", false));
EXPECT_EQ("canary-channel", params_.target_channel());
EXPECT_TRUE(params_.is_powerwash_allowed());
@@ -141,7 +142,7 @@
TEST_F(OmahaRequestParamsTest, SetTargetChannelInvalidTest) {
{
OmahaRequestParams params(&fake_system_state_);
- params.set_root(tempdir_.path().value());
+ params.set_root(tempdir_.GetPath().value());
SetLockDown(true);
EXPECT_TRUE(params.Init("", "", false));
string error_message;
@@ -151,7 +152,7 @@
EXPECT_NE(string::npos, error_message.find("stable-channel"));
EXPECT_FALSE(params.is_powerwash_allowed());
}
- params_.set_root(tempdir_.path().value());
+ params_.set_root(tempdir_.GetPath().value());
EXPECT_TRUE(params_.Init("", "", false));
EXPECT_EQ("stable-channel", params_.target_channel());
EXPECT_FALSE(params_.is_powerwash_allowed());
diff --git a/omaha_response.h b/omaha_response.h
index c702068..b973eb5 100644
--- a/omaha_response.h
+++ b/omaha_response.h
@@ -37,6 +37,7 @@
// These are only valid if update_exists is true:
std::string version;
+ std::string system_version;
struct Package {
// The ordered list of URLs in the Omaha response. Each item is a complete
diff --git a/omaha_response_handler_action.cc b/omaha_response_handler_action.cc
index 189fe6b..9c5fb4a 100644
--- a/omaha_response_handler_action.cc
+++ b/omaha_response_handler_action.cc
@@ -31,7 +31,11 @@
#include "update_engine/omaha_request_params.h"
#include "update_engine/payload_consumer/delta_performer.h"
#include "update_engine/payload_state_interface.h"
+#include "update_engine/update_manager/policy.h"
+#include "update_engine/update_manager/update_manager.h"
+using chromeos_update_manager::Policy;
+using chromeos_update_manager::UpdateManager;
using std::string;
namespace chromeos_update_engine {
@@ -69,8 +73,10 @@
return;
}
+ // This is the url to the first package, not all packages.
install_plan_.download_url = current_url;
install_plan_.version = response.version;
+ install_plan_.system_version = response.system_version;
OmahaRequestParams* const params = system_state_->request_params();
PayloadStateInterface* const payload_state = system_state_->payload_state();
@@ -156,7 +162,14 @@
chmod(deadline_file_.c_str(), S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
}
- completer.set_code(ErrorCode::kSuccess);
+ // Check the generated install-plan with the Policy to confirm that
+ // it can be applied at this time (or at all).
+ UpdateManager* const update_manager = system_state_->update_manager();
+ CHECK(update_manager);
+ auto ec = ErrorCode::kSuccess;
+ update_manager->PolicyRequest(
+ &Policy::UpdateCanBeApplied, &ec, &install_plan_);
+ completer.set_code(ec);
}
bool OmahaResponseHandlerAction::AreHashChecksMandatory(
diff --git a/omaha_response_handler_action.h b/omaha_response_handler_action.h
index 51dfa7a..2974841 100644
--- a/omaha_response_handler_action.h
+++ b/omaha_response_handler_action.h
@@ -89,6 +89,7 @@
friend class OmahaResponseHandlerActionTest;
FRIEND_TEST(UpdateAttempterTest, CreatePendingErrorEventResumedTest);
+ FRIEND_TEST(UpdateAttempterTest, UpdateDeferredByPolicyTest);
DISALLOW_COPY_AND_ASSIGN(OmahaResponseHandlerAction);
};
diff --git a/omaha_response_handler_action_unittest.cc b/omaha_response_handler_action_unittest.cc
index 75cd819..568d11d 100644
--- a/omaha_response_handler_action_unittest.cc
+++ b/omaha_response_handler_action_unittest.cc
@@ -16,10 +16,12 @@
#include "update_engine/omaha_response_handler_action.h"
+#include <memory>
#include <string>
#include <base/files/file_util.h>
#include <base/files/scoped_temp_dir.h>
+#include <brillo/message_loops/fake_message_loop.h>
#include <gtest/gtest.h>
#include "update_engine/common/constants.h"
@@ -29,12 +31,18 @@
#include "update_engine/fake_system_state.h"
#include "update_engine/mock_payload_state.h"
#include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/update_manager/mock_policy.h"
using chromeos_update_engine::test_utils::System;
using chromeos_update_engine::test_utils::WriteFileString;
+using chromeos_update_manager::EvalStatus;
+using chromeos_update_manager::FakeUpdateManager;
+using chromeos_update_manager::MockPolicy;
using std::string;
-using testing::Return;
using testing::_;
+using testing::DoAll;
+using testing::Return;
+using testing::SetArgPointee;
namespace chromeos_update_engine {
@@ -58,6 +66,13 @@
const string& deadline_file,
InstallPlan* out);
+ // Pointer to the Action, valid after |DoTest|, released when the test is
+ // finished.
+ std::unique_ptr<OmahaResponseHandlerAction> action_;
+ // Captures the action's result code, for tests that need to directly verify
+ // it in non-success cases.
+ ErrorCode action_result_code_;
+
FakeSystemState fake_system_state_;
// "Hash+"
const brillo::Blob expected_hash_ = {0x48, 0x61, 0x73, 0x68, 0x2b};
@@ -67,8 +82,7 @@
: public ActionProcessorDelegate {
public:
OmahaResponseHandlerActionProcessorDelegate()
- : code_(ErrorCode::kError),
- code_set_(false) {}
+ : code_(ErrorCode::kError), code_set_(false) {}
void ActionCompleted(ActionProcessor* processor,
AbstractAction* action,
ErrorCode code) {
@@ -95,10 +109,11 @@
const char* const kPayloadHashHex = "486173682b";
} // namespace
-bool OmahaResponseHandlerActionTest::DoTest(
- const OmahaResponse& in,
- const string& test_deadline_file,
- InstallPlan* out) {
+bool OmahaResponseHandlerActionTest::DoTest(const OmahaResponse& in,
+ const string& test_deadline_file,
+ InstallPlan* out) {
+ brillo::FakeMessageLoop loop(nullptr);
+ loop.SetAsCurrent();
ActionProcessor processor;
OmahaResponseHandlerActionProcessorDelegate delegate;
processor.set_delegate(&delegate);
@@ -123,15 +138,15 @@
EXPECT_CALL(*(fake_system_state_.mock_payload_state()), GetCurrentUrl())
.WillRepeatedly(Return(current_url));
- OmahaResponseHandlerAction response_handler_action(
+ action_.reset(new OmahaResponseHandlerAction(
&fake_system_state_,
- (test_deadline_file.empty() ?
- constants::kOmahaResponseDeadlineFile : test_deadline_file));
- BondActions(&feeder_action, &response_handler_action);
+ (test_deadline_file.empty() ? constants::kOmahaResponseDeadlineFile
+ : test_deadline_file)));
+ BondActions(&feeder_action, action_.get());
ObjectCollectorAction<InstallPlan> collector_action;
- BondActions(&response_handler_action, &collector_action);
+ BondActions(action_.get(), &collector_action);
processor.EnqueueAction(&feeder_action);
- processor.EnqueueAction(&response_handler_action);
+ processor.EnqueueAction(action_.get());
processor.EnqueueAction(&collector_action);
processor.StartProcessing();
EXPECT_TRUE(!processor.IsRunning())
@@ -139,14 +154,15 @@
if (out)
*out = collector_action.object();
EXPECT_TRUE(delegate.code_set_);
+ action_result_code_ = delegate.code_;
return delegate.code_ == ErrorCode::kSuccess;
}
TEST_F(OmahaResponseHandlerActionTest, SimpleTest) {
string test_deadline_file;
- CHECK(utils::MakeTempFile(
- "omaha_response_handler_action_unittest-XXXXXX",
- &test_deadline_file, nullptr));
+ CHECK(utils::MakeTempFile("omaha_response_handler_action_unittest-XXXXXX",
+ &test_deadline_file,
+ nullptr));
ScopedPathUnlinker deadline_unlinker(test_deadline_file);
{
OmahaResponse in;
@@ -368,7 +384,7 @@
OmahaRequestParams params(&fake_system_state_);
fake_system_state_.fake_hardware()->SetIsOfficialBuild(false);
- params.set_root(tempdir.path().value());
+ params.set_root(tempdir.GetPath().value());
params.set_current_channel("canary-channel");
// The ImageProperties in Android uses prefs to store MutableImageProperties.
#ifdef __ANDROID__
@@ -403,7 +419,7 @@
OmahaRequestParams params(&fake_system_state_);
fake_system_state_.fake_hardware()->SetIsOfficialBuild(false);
- params.set_root(tempdir.path().value());
+ params.set_root(tempdir.GetPath().value());
params.set_current_channel("stable-channel");
// The ImageProperties in Android uses prefs to store MutableImageProperties.
#ifdef __ANDROID__
@@ -447,7 +463,8 @@
EXPECT_CALL(*fake_system_state_.mock_payload_state(), GetP2PUrl())
.WillRepeatedly(Return(p2p_url));
EXPECT_CALL(*fake_system_state_.mock_payload_state(),
- GetUsingP2PForDownloading()).WillRepeatedly(Return(true));
+ GetUsingP2PForDownloading())
+ .WillRepeatedly(Return(true));
InstallPlan install_plan;
EXPECT_TRUE(DoTest(in, "", &install_plan));
@@ -456,4 +473,60 @@
EXPECT_TRUE(install_plan.hash_checks_mandatory);
}
+TEST_F(OmahaResponseHandlerActionTest, SystemVersionTest) {
+ OmahaResponse in;
+ in.update_exists = true;
+ in.version = "a.b.c.d";
+ in.system_version = "b.c.d.e";
+ in.packages.push_back({.payload_urls = {"http://package/1"},
+ .size = 1,
+ .hash = kPayloadHashHex});
+ in.packages.push_back({.payload_urls = {"http://package/2"},
+ .size = 2,
+ .hash = kPayloadHashHex});
+ in.more_info_url = "http://more/info";
+ InstallPlan install_plan;
+ EXPECT_TRUE(DoTest(in, "", &install_plan));
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(2u, install_plan.payloads.size());
+ EXPECT_EQ(in.packages[0].size, install_plan.payloads[0].size);
+ EXPECT_EQ(in.packages[1].size, install_plan.payloads[1].size);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[1].hash);
+ EXPECT_EQ(in.version, install_plan.version);
+ EXPECT_EQ(in.system_version, install_plan.system_version);
+}
+
+TEST_F(OmahaResponseHandlerActionTest, TestDeferredByPolicy) {
+ OmahaResponse in;
+ in.update_exists = true;
+ in.version = "a.b.c.d";
+ in.packages.push_back({.payload_urls = {"http://foo/the_update_a.b.c.d.tgz"},
+ .size = 12,
+ .hash = kPayloadHashHex});
+ // Setup the UpdateManager to disallow the update.
+ FakeClock fake_clock;
+ MockPolicy* mock_policy = new MockPolicy(&fake_clock);
+ FakeUpdateManager* fake_update_manager =
+ fake_system_state_.fake_update_manager();
+ fake_update_manager->set_policy(mock_policy);
+ EXPECT_CALL(*mock_policy, UpdateCanBeApplied(_, _, _, _, _))
+ .WillOnce(
+ DoAll(SetArgPointee<3>(ErrorCode::kOmahaUpdateDeferredPerPolicy),
+ Return(EvalStatus::kSucceeded)));
+ // Perform the Action. It should "fail" with kOmahaUpdateDeferredPerPolicy.
+ InstallPlan install_plan;
+ EXPECT_FALSE(DoTest(in, "", &install_plan));
+ EXPECT_EQ(ErrorCode::kOmahaUpdateDeferredPerPolicy, action_result_code_);
+ // Verify that DoTest() didn't set the output install plan.
+ EXPECT_EQ("", install_plan.version);
+ // Copy the underlying InstallPlan from the Action (like a real Delegate).
+ install_plan = action_->install_plan();
+ // Now verify the InstallPlan that was generated.
+ EXPECT_EQ(in.packages[0].payload_urls[0], install_plan.download_url);
+ EXPECT_EQ(expected_hash_, install_plan.payloads[0].hash);
+ EXPECT_EQ(1U, install_plan.target_slot);
+ EXPECT_EQ(in.version, install_plan.version);
+}
+
} // namespace chromeos_update_engine
diff --git a/parcelable_update_engine_status.cc b/parcelable_update_engine_status.cc
index d8eb6db..8a2dbeb 100644
--- a/parcelable_update_engine_status.cc
+++ b/parcelable_update_engine_status.cc
@@ -15,12 +15,27 @@
//
#include "update_engine/parcelable_update_engine_status.h"
+#include "update_engine/update_status_utils.h"
#include <binder/Parcel.h>
+using update_engine::UpdateEngineStatus;
+
namespace android {
namespace brillo {
+ParcelableUpdateEngineStatus::ParcelableUpdateEngineStatus(
+ const UpdateEngineStatus& status)
+ : last_checked_time_(status.last_checked_time),
+ current_operation_(
+ chromeos_update_engine::UpdateStatusToString(status.status)),
+ progress_(status.progress),
+ current_version_(String16{status.current_version.c_str()}),
+ current_system_version_(String16{status.current_system_version.c_str()}),
+ new_size_(status.new_size_bytes),
+ new_version_(String16{status.new_version.c_str()}),
+ new_system_version_(String16{status.new_system_version.c_str()}) {}
+
status_t ParcelableUpdateEngineStatus::writeToParcel(Parcel* parcel) const {
status_t status;
@@ -29,12 +44,27 @@
return status;
}
+ status = parcel->writeString16(current_operation_);
+ if (status != OK) {
+ return status;
+ }
+
status = parcel->writeDouble(progress_);
if (status != OK) {
return status;
}
- status = parcel->writeString16(current_operation_);
+ status = parcel->writeString16(current_version_);
+ if (status != OK) {
+ return status;
+ }
+
+ status = parcel->writeString16(current_system_version_);
+ if (status != OK) {
+ return status;
+ }
+
+ status = parcel->writeInt64(new_size_);
if (status != OK) {
return status;
}
@@ -44,7 +74,7 @@
return status;
}
- return parcel->writeInt64(new_size_);
+ return parcel->writeString16(new_system_version_);
}
status_t ParcelableUpdateEngineStatus::readFromParcel(const Parcel* parcel) {
@@ -55,12 +85,27 @@
return status;
}
+ status = parcel->readString16(¤t_operation_);
+ if (status != OK) {
+ return status;
+ }
+
status = parcel->readDouble(&progress_);
if (status != OK) {
return status;
}
- status = parcel->readString16(¤t_operation_);
+ status = parcel->readString16(¤t_version_);
+ if (status != OK) {
+ return status;
+ }
+
+ status = parcel->readString16(¤t_system_version_);
+ if (status != OK) {
+ return status;
+ }
+
+ status = parcel->readInt64(&new_size_);
if (status != OK) {
return status;
}
@@ -70,7 +115,7 @@
return status;
}
- return parcel->readInt64(&new_size_);
+ return parcel->readString16(&new_system_version_);
}
} // namespace brillo
diff --git a/parcelable_update_engine_status.h b/parcelable_update_engine_status.h
index 2cfedd9..82006e4 100644
--- a/parcelable_update_engine_status.h
+++ b/parcelable_update_engine_status.h
@@ -20,6 +20,8 @@
#include <binder/Parcelable.h>
#include <utils/String16.h>
+#include "update_engine/client_library/include/update_engine/update_status.h"
+
namespace android {
namespace brillo {
@@ -28,16 +30,31 @@
class ParcelableUpdateEngineStatus : public Parcelable {
public:
ParcelableUpdateEngineStatus() = default;
+ explicit ParcelableUpdateEngineStatus(
+ const update_engine::UpdateEngineStatus& status);
virtual ~ParcelableUpdateEngineStatus() = default;
status_t writeToParcel(Parcel* parcel) const override;
status_t readFromParcel(const Parcel* parcel) override;
+ // This list is kept in the Parcelable serialization order.
+
+ // When the update_engine last checked for updates (seconds since unix Epoch)
int64_t last_checked_time_;
- double progress_;
+ // The current status/operation of the update_engine.
android::String16 current_operation_;
- android::String16 new_version_;
+ // The current progress (0.0f-1.0f).
+ double progress_;
+ // The current product version.
+ android::String16 current_version_;
+ // The current system version.
+ android::String16 current_system_version_;
+ // The size of the update (bytes). This is int64_t for java compatibility.
int64_t new_size_;
+ // The new product version.
+ android::String16 new_version_;
+ // The new system version, if there is one (empty, otherwise).
+ android::String16 new_system_version_;
};
} // namespace brillo
diff --git a/parcelable_update_engine_status_unittest.cc b/parcelable_update_engine_status_unittest.cc
new file mode 100644
index 0000000..f4bd518
--- /dev/null
+++ b/parcelable_update_engine_status_unittest.cc
@@ -0,0 +1,92 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/parcelable_update_engine_status.h"
+#include "update_engine/update_status_utils.h"
+
+#include <binder/Parcel.h>
+#include <gtest/gtest.h>
+
+using android::Parcel;
+using android::String16;
+using android::brillo::ParcelableUpdateEngineStatus;
+using android::status_t;
+using update_engine::UpdateEngineStatus;
+using update_engine::UpdateStatus;
+
+TEST(ParcelableUpdateEngineStatusTest, TestCreationFromUpdateEngineStatus) {
+ // This test creates an object and verifies that all the UpdateEngineStatus
+ // values are properly reflected in the Parcelable version of the class.
+
+ UpdateEngineStatus ue_status = {123456789,
+ UpdateStatus::DOWNLOADING,
+ "0.1.2.3",
+ "1.2.3.4",
+ 0.5f,
+ 34567,
+ "2.3.4.5",
+ "3.4.5.6"};
+ ParcelableUpdateEngineStatus parcelable_status(ue_status);
+ EXPECT_EQ(ue_status.last_checked_time, parcelable_status.last_checked_time_);
+ EXPECT_EQ(
+ String16{chromeos_update_engine::UpdateStatusToString(ue_status.status)},
+ parcelable_status.current_operation_);
+ EXPECT_EQ(String16{ue_status.current_version.c_str()},
+ parcelable_status.current_version_);
+ EXPECT_EQ(String16{ue_status.current_system_version.c_str()},
+ parcelable_status.current_system_version_);
+ EXPECT_EQ(ue_status.progress, parcelable_status.progress_);
+ EXPECT_EQ(static_cast<int64_t>(ue_status.new_size_bytes),
+ parcelable_status.new_size_);
+ EXPECT_EQ(String16{ue_status.new_version.c_str()},
+ parcelable_status.new_version_);
+ EXPECT_EQ(String16{ue_status.new_system_version.c_str()},
+ parcelable_status.new_system_version_);
+}
+
+TEST(ParcelableUpdateEngineStatusTest, TestParceling) {
+ // This tests the writeToParcel and readFromParcel methods for being correctly
+ // matched.
+ UpdateEngineStatus ue_status = {123456789,
+ UpdateStatus::DOWNLOADING,
+ "0.1.2.3",
+ "1.2.3.4",
+ 0.5f,
+ 34567,
+ "2.3.4.5",
+ "3.4.5.6"};
+ ParcelableUpdateEngineStatus source_status(ue_status);
+ Parcel parcel_source, parcel_target;
+ status_t status = source_status.writeToParcel(&parcel_source);
+ EXPECT_EQ(::android::OK, status);
+ size_t parcel_len = parcel_source.dataSize();
+ status = parcel_target.setData(parcel_source.data(), parcel_len);
+ EXPECT_EQ(::android::OK, status);
+ ParcelableUpdateEngineStatus target_status;
+ status = target_status.readFromParcel(&parcel_target);
+ EXPECT_EQ(::android::OK, status);
+
+ EXPECT_EQ(source_status.last_checked_time_, target_status.last_checked_time_);
+ EXPECT_EQ(source_status.current_operation_, target_status.current_operation_);
+ EXPECT_EQ(source_status.current_version_, target_status.current_version_);
+ EXPECT_EQ(source_status.current_system_version_,
+ target_status.current_system_version_);
+ EXPECT_EQ(source_status.progress_, target_status.progress_);
+ EXPECT_EQ(source_status.new_size_, target_status.new_size_);
+ EXPECT_EQ(source_status.new_version_, target_status.new_version_);
+ EXPECT_EQ(source_status.new_system_version_,
+ target_status.new_system_version_);
+}
diff --git a/payload_consumer/bzip_extent_writer.cc b/payload_consumer/bzip_extent_writer.cc
index 0fcc8ba..39d9d67 100644
--- a/payload_consumer/bzip_extent_writer.cc
+++ b/payload_consumer/bzip_extent_writer.cc
@@ -16,7 +16,7 @@
#include "update_engine/payload_consumer/bzip_extent_writer.h"
-using std::vector;
+using google::protobuf::RepeatedPtrField;
namespace chromeos_update_engine {
@@ -25,7 +25,7 @@
}
bool BzipExtentWriter::Init(FileDescriptorPtr fd,
- const vector<Extent>& extents,
+ const RepeatedPtrField<Extent>& extents,
uint32_t block_size) {
// Init bzip2 stream
int rc = BZ2_bzDecompressInit(&stream_,
diff --git a/payload_consumer/bzip_extent_writer.h b/payload_consumer/bzip_extent_writer.h
index 0ad542e..86b346a 100644
--- a/payload_consumer/bzip_extent_writer.h
+++ b/payload_consumer/bzip_extent_writer.h
@@ -19,7 +19,7 @@
#include <bzlib.h>
#include <memory>
-#include <vector>
+#include <utility>
#include <brillo/secure_blob.h>
@@ -41,7 +41,7 @@
~BzipExtentWriter() override = default;
bool Init(FileDescriptorPtr fd,
- const std::vector<Extent>& extents,
+ const google::protobuf::RepeatedPtrField<Extent>& extents,
uint32_t block_size) override;
bool Write(const void* bytes, size_t count) override;
bool EndImpl() override;
diff --git a/payload_consumer/bzip_extent_writer_unittest.cc b/payload_consumer/bzip_extent_writer_unittest.cc
index 8ac3e59..bf050ef 100644
--- a/payload_consumer/bzip_extent_writer_unittest.cc
+++ b/payload_consumer/bzip_extent_writer_unittest.cc
@@ -19,15 +19,17 @@
#include <fcntl.h>
#include <algorithm>
+#include <memory>
#include <string>
#include <vector>
-#include <brillo/make_unique_ptr.h>
#include <gtest/gtest.h>
#include "update_engine/common/test_utils.h"
#include "update_engine/common/utils.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+using google::protobuf::RepeatedPtrField;
using std::min;
using std::string;
using std::vector;
@@ -55,11 +57,7 @@
};
TEST_F(BzipExtentWriterTest, SimpleTest) {
- vector<Extent> extents;
- Extent extent;
- extent.set_start_block(0);
- extent.set_num_blocks(1);
- extents.push_back(extent);
+ vector<Extent> extents = {ExtentForRange(0, 1)};
// 'echo test | bzip2 | hexdump' yields:
static const char test_uncompressed[] = "test\n";
@@ -70,9 +68,9 @@
0x22, 0x9c, 0x28, 0x48, 0x66, 0x61, 0xb8, 0xea, 0x00,
};
- BzipExtentWriter bzip_writer(
- brillo::make_unique_ptr(new DirectExtentWriter()));
- EXPECT_TRUE(bzip_writer.Init(fd_, extents, kBlockSize));
+ BzipExtentWriter bzip_writer(std::make_unique<DirectExtentWriter>());
+ EXPECT_TRUE(
+ bzip_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
EXPECT_TRUE(bzip_writer.Write(test, sizeof(test)));
EXPECT_TRUE(bzip_writer.End());
@@ -102,15 +100,12 @@
for (size_t i = 0; i < decompressed_data.size(); ++i)
decompressed_data[i] = static_cast<uint8_t>("ABC\n"[i % 4]);
- vector<Extent> extents;
- Extent extent;
- extent.set_start_block(0);
- extent.set_num_blocks((kDecompressedLength + kBlockSize - 1) / kBlockSize);
- extents.push_back(extent);
+ vector<Extent> extents = {
+ ExtentForRange(0, (kDecompressedLength + kBlockSize - 1) / kBlockSize)};
- BzipExtentWriter bzip_writer(
- brillo::make_unique_ptr(new DirectExtentWriter()));
- EXPECT_TRUE(bzip_writer.Init(fd_, extents, kBlockSize));
+ BzipExtentWriter bzip_writer(std::make_unique<DirectExtentWriter>());
+ EXPECT_TRUE(
+ bzip_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
brillo::Blob original_compressed_data = compressed_data;
for (brillo::Blob::size_type i = 0; i < compressed_data.size();
diff --git a/payload_consumer/cached_file_descriptor.cc b/payload_consumer/cached_file_descriptor.cc
new file mode 100644
index 0000000..7f2515e
--- /dev/null
+++ b/payload_consumer/cached_file_descriptor.cc
@@ -0,0 +1,98 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/cached_file_descriptor.h"
+
+#include <unistd.h>
+
+#include <algorithm>
+
+#include <base/logging.h>
+
+#include "update_engine/common/utils.h"
+
+namespace chromeos_update_engine {
+
+off64_t CachedFileDescriptor::Seek(off64_t offset, int whence) {
+ // Only support SEEK_SET and SEEK_CUR. I think these two would be enough. If
+ // we want to support SEEK_END then we have to figure out the size of the
+ // underlying file descriptor each time and it may not be a very good idea.
+ CHECK(whence == SEEK_SET || whence == SEEK_CUR);
+ off64_t next_offset = whence == SEEK_SET ? offset : offset_ + offset;
+
+ if (next_offset != offset_) {
+ // We sought somewhere other than what we are now. So we have to flush and
+ // move to the new offset.
+ if (!FlushCache()) {
+ return -1;
+ }
+ // Then we have to seek there.
+ if (fd_->Seek(next_offset, SEEK_SET) < 0) {
+ return -1;
+ }
+ offset_ = next_offset;
+ }
+ return offset_;
+}
+
+ssize_t CachedFileDescriptor::Write(const void* buf, size_t count) {
+ auto bytes = static_cast<const uint8_t*>(buf);
+ size_t total_bytes_wrote = 0;
+ while (total_bytes_wrote < count) {
+ auto bytes_to_cache =
+ std::min(count - total_bytes_wrote, cache_.size() - bytes_cached_);
+ if (bytes_to_cache > 0) { // Which means |cache_| is still have some space.
+ memcpy(cache_.data() + bytes_cached_,
+ bytes + total_bytes_wrote,
+ bytes_to_cache);
+ total_bytes_wrote += bytes_to_cache;
+ bytes_cached_ += bytes_to_cache;
+ }
+ if (bytes_cached_ == cache_.size()) {
+ // Cache is full; write it to the |fd_| as long as you can.
+ if (!FlushCache()) {
+ return -1;
+ }
+ }
+ }
+ offset_ += total_bytes_wrote;
+ return total_bytes_wrote;
+}
+
+bool CachedFileDescriptor::Flush() {
+ return FlushCache() && fd_->Flush();
+}
+
+bool CachedFileDescriptor::Close() {
+ offset_ = 0;
+ return FlushCache() && fd_->Close();
+}
+
+bool CachedFileDescriptor::FlushCache() {
+ size_t begin = 0;
+ while (begin < bytes_cached_) {
+ auto bytes_wrote = fd_->Write(cache_.data() + begin, bytes_cached_ - begin);
+ if (bytes_wrote < 0) {
+ PLOG(ERROR) << "Failed to flush cached data!";
+ return false;
+ }
+ begin += bytes_wrote;
+ }
+ bytes_cached_ = 0;
+ return true;
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/cached_file_descriptor.h b/payload_consumer/cached_file_descriptor.h
new file mode 100644
index 0000000..28c48f7
--- /dev/null
+++ b/payload_consumer/cached_file_descriptor.h
@@ -0,0 +1,76 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_CACHED_FILE_DESCRIPTOR_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_CACHED_FILE_DESCRIPTOR_H_
+
+#include <errno.h>
+#include <sys/types.h>
+
+#include <memory>
+#include <vector>
+
+#include <brillo/secure_blob.h>
+
+#include "update_engine/payload_consumer/file_descriptor.h"
+
+namespace chromeos_update_engine {
+
+class CachedFileDescriptor : public FileDescriptor {
+ public:
+ CachedFileDescriptor(FileDescriptorPtr fd, size_t cache_size) : fd_(fd) {
+ cache_.resize(cache_size);
+ }
+ ~CachedFileDescriptor() override = default;
+
+ bool Open(const char* path, int flags, mode_t mode) override {
+ return fd_->Open(path, flags, mode);
+ }
+ bool Open(const char* path, int flags) override {
+ return fd_->Open(path, flags);
+ }
+ ssize_t Read(void* buf, size_t count) override {
+ return fd_->Read(buf, count);
+ }
+ ssize_t Write(const void* buf, size_t count) override;
+ off64_t Seek(off64_t offset, int whence) override;
+ uint64_t BlockDevSize() override { return fd_->BlockDevSize(); }
+ bool BlkIoctl(int request,
+ uint64_t start,
+ uint64_t length,
+ int* result) override {
+ return fd_->BlkIoctl(request, start, length, result);
+ }
+ bool Flush() override;
+ bool Close() override;
+ bool IsSettingErrno() override { return fd_->IsSettingErrno(); }
+ bool IsOpen() override { return fd_->IsOpen(); }
+
+ private:
+ // Internal flush without the need to call |fd_->Flush()|.
+ bool FlushCache();
+
+ FileDescriptorPtr fd_;
+ brillo::Blob cache_;
+ size_t bytes_cached_{0};
+ off64_t offset_{0};
+
+ DISALLOW_COPY_AND_ASSIGN(CachedFileDescriptor);
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_CACHED_FILE_DESCRIPTOR_H_
diff --git a/payload_consumer/cached_file_descriptor_unittest.cc b/payload_consumer/cached_file_descriptor_unittest.cc
new file mode 100644
index 0000000..6a6302a
--- /dev/null
+++ b/payload_consumer/cached_file_descriptor_unittest.cc
@@ -0,0 +1,204 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/cached_file_descriptor.h"
+
+#include <fcntl.h>
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include <gtest/gtest.h>
+
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+
+using chromeos_update_engine::test_utils::ExpectVectorsEq;
+using std::min;
+using std::string;
+using std::vector;
+
+namespace chromeos_update_engine {
+
+namespace {
+const size_t kCacheSize = 100;
+const size_t kFileSize = 1024;
+const size_t kRandomIterations = 1000;
+} // namespace
+
+class CachedFileDescriptorTest : public ::testing::Test {
+ public:
+ void Open() {
+ cfd_.reset(new CachedFileDescriptor(fd_, kCacheSize));
+ EXPECT_TRUE(cfd_->Open(temp_file_.path().c_str(), O_RDWR, 0600));
+ }
+
+ void Write(uint8_t* buffer, size_t count) {
+ size_t total_bytes_wrote = 0;
+ while (total_bytes_wrote < count) {
+ auto bytes_wrote =
+ cfd_->Write(buffer + total_bytes_wrote, count - total_bytes_wrote);
+ ASSERT_NE(bytes_wrote, -1);
+ total_bytes_wrote += bytes_wrote;
+ }
+ }
+
+ void Close() { EXPECT_TRUE(cfd_->Close()); }
+
+ void SetUp() override {
+ brillo::Blob zero_blob(kFileSize, 0);
+ EXPECT_TRUE(utils::WriteFile(
+ temp_file_.path().c_str(), zero_blob.data(), zero_blob.size()));
+ Open();
+ }
+
+ void TearDown() override {
+ Close();
+ EXPECT_FALSE(cfd_->IsOpen());
+ }
+
+ protected:
+ FileDescriptorPtr fd_{new EintrSafeFileDescriptor};
+ test_utils::ScopedTempFile temp_file_{"CachedFileDescriptor-file.XXXXXX"};
+ int value_{1};
+ FileDescriptorPtr cfd_;
+};
+
+TEST_F(CachedFileDescriptorTest, IsOpenTest) {
+ EXPECT_TRUE(cfd_->IsOpen());
+}
+
+TEST_F(CachedFileDescriptorTest, SimpleWriteTest) {
+ EXPECT_EQ(cfd_->Seek(0, SEEK_SET), 0);
+ brillo::Blob blob_in(kFileSize, value_);
+ Write(blob_in.data(), blob_in.size());
+ EXPECT_TRUE(cfd_->Flush());
+
+ brillo::Blob blob_out;
+ EXPECT_TRUE(utils::ReadFile(temp_file_.path(), &blob_out));
+ EXPECT_EQ(blob_in, blob_out);
+}
+
+TEST_F(CachedFileDescriptorTest, OneBytePerWriteTest) {
+ EXPECT_EQ(cfd_->Seek(0, SEEK_SET), 0);
+ brillo::Blob blob_in(kFileSize, value_);
+ for (size_t idx = 0; idx < blob_in.size(); idx++) {
+ Write(&blob_in[idx], 1);
+ }
+ EXPECT_TRUE(cfd_->Flush());
+
+ brillo::Blob blob_out;
+ EXPECT_TRUE(utils::ReadFile(temp_file_.path(), &blob_out));
+ EXPECT_EQ(blob_in, blob_out);
+}
+
+TEST_F(CachedFileDescriptorTest, RandomWriteTest) {
+ EXPECT_EQ(cfd_->Seek(0, SEEK_SET), 0);
+
+ brillo::Blob blob_in(kFileSize, 0);
+ srand(time(nullptr));
+ uint32_t rand_seed;
+ for (size_t idx = 0; idx < kRandomIterations; idx++) {
+ // zero to full size available.
+ size_t start = rand_r(&rand_seed) % blob_in.size();
+ size_t size = rand_r(&rand_seed) % (blob_in.size() - start);
+ std::fill_n(&blob_in[start], size, idx % 256);
+ EXPECT_EQ(cfd_->Seek(start, SEEK_SET), static_cast<off64_t>(start));
+ Write(&blob_in[start], size);
+ }
+ EXPECT_TRUE(cfd_->Flush());
+
+ brillo::Blob blob_out;
+ EXPECT_TRUE(utils::ReadFile(temp_file_.path(), &blob_out));
+ EXPECT_EQ(blob_in, blob_out);
+}
+
+TEST_F(CachedFileDescriptorTest, SeekTest) {
+ EXPECT_EQ(cfd_->Seek(0, SEEK_SET), 0);
+ EXPECT_EQ(cfd_->Seek(1, SEEK_SET), 1);
+ EXPECT_EQ(cfd_->Seek(kFileSize - 1, SEEK_SET),
+ static_cast<off64_t>(kFileSize - 1));
+ EXPECT_EQ(cfd_->Seek(kFileSize, SEEK_SET), static_cast<off64_t>(kFileSize));
+ EXPECT_EQ(cfd_->Seek(kFileSize + 1, SEEK_SET),
+ static_cast<off64_t>(kFileSize + 1));
+
+ EXPECT_EQ(cfd_->Seek(0, SEEK_SET), 0);
+ EXPECT_EQ(cfd_->Seek(1, SEEK_CUR), 1);
+ EXPECT_EQ(cfd_->Seek(1, SEEK_CUR), 2);
+ EXPECT_EQ(cfd_->Seek(kFileSize - 1, SEEK_SET),
+ static_cast<off64_t>(kFileSize - 1));
+ EXPECT_EQ(cfd_->Seek(1, SEEK_CUR), static_cast<off64_t>(kFileSize));
+ EXPECT_EQ(cfd_->Seek(1, SEEK_CUR), static_cast<off64_t>(kFileSize + 1));
+}
+
+TEST_F(CachedFileDescriptorTest, NoFlushTest) {
+ EXPECT_EQ(cfd_->Seek(0, SEEK_SET), 0);
+ brillo::Blob blob_in(kFileSize, value_);
+ Write(blob_in.data(), blob_in.size());
+
+ brillo::Blob blob_out;
+ EXPECT_TRUE(utils::ReadFile(temp_file_.path(), &blob_out));
+ EXPECT_NE(blob_in, blob_out);
+}
+
+TEST_F(CachedFileDescriptorTest, CacheSizeWriteTest) {
+ off64_t seek = 10;
+ brillo::Blob blob_in(kFileSize, 0);
+ std::fill_n(&blob_in[seek], kCacheSize, value_);
+ // We are writing exactly one cache size; Then it should be commited.
+ EXPECT_EQ(cfd_->Seek(seek, SEEK_SET), seek);
+ Write(&blob_in[seek], kCacheSize);
+
+ brillo::Blob blob_out;
+ EXPECT_TRUE(utils::ReadFile(temp_file_.path(), &blob_out));
+ EXPECT_EQ(blob_in, blob_out);
+}
+
+TEST_F(CachedFileDescriptorTest, UnderCacheSizeWriteTest) {
+ off64_t seek = 100;
+ size_t less_than_cache_size = kCacheSize - 1;
+ EXPECT_EQ(cfd_->Seek(seek, SEEK_SET), seek);
+ brillo::Blob blob_in(kFileSize, 0);
+ std::fill_n(&blob_in[seek], less_than_cache_size, value_);
+ // We are writing less than one cache size; then it should not be commited.
+ Write(&blob_in[seek], less_than_cache_size);
+
+ // Revert the changes in |blob_in|.
+ std::fill_n(&blob_in[seek], less_than_cache_size, 0);
+ brillo::Blob blob_out;
+ EXPECT_TRUE(utils::ReadFile(temp_file_.path(), &blob_out));
+ EXPECT_EQ(blob_in, blob_out);
+}
+
+TEST_F(CachedFileDescriptorTest, SeekAfterWriteTest) {
+ off64_t seek = 100;
+ size_t less_than_cache_size = kCacheSize - 3;
+ EXPECT_EQ(cfd_->Seek(seek, SEEK_SET), seek);
+ brillo::Blob blob_in(kFileSize, 0);
+ std::fill_n(&blob_in[seek], less_than_cache_size, value_);
+ // We are writing less than one cache size; then it should not be commited.
+ Write(&blob_in[seek], less_than_cache_size);
+
+ // Then we seek, it should've written the cache after seek.
+ EXPECT_EQ(cfd_->Seek(200, SEEK_SET), 200);
+
+ brillo::Blob blob_out;
+ EXPECT_TRUE(utils::ReadFile(temp_file_.path(), &blob_out));
+ EXPECT_EQ(blob_in, blob_out);
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index e158b33..001c84a 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -24,17 +24,20 @@
#include <cstring>
#include <memory>
#include <string>
+#include <utility>
#include <vector>
#include <base/files/file_util.h>
#include <base/format_macros.h>
+#include <base/metrics/histogram_macros.h>
#include <base/strings/string_number_conversions.h>
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
+#include <base/time/time.h>
#include <brillo/data_encoding.h>
-#include <brillo/make_unique_ptr.h>
#include <bsdiff/bspatch.h>
#include <google/protobuf/repeated_field.h>
+#include <puffin/puffpatch.h>
#include "update_engine/common/constants.h"
#include "update_engine/common/hardware_interface.h"
@@ -42,8 +45,11 @@
#include "update_engine/common/subprocess.h"
#include "update_engine/common/terminator.h"
#include "update_engine/payload_consumer/bzip_extent_writer.h"
+#include "update_engine/payload_consumer/cached_file_descriptor.h"
#include "update_engine/payload_consumer/download_action.h"
+#include "update_engine/payload_consumer/extent_reader.h"
#include "update_engine/payload_consumer/extent_writer.h"
+#include "update_engine/payload_consumer/file_descriptor_utils.h"
#if USE_MTD
#include "update_engine/payload_consumer/mtd_file_descriptor.h"
#endif
@@ -66,7 +72,7 @@
const uint64_t DeltaPerformer::kDeltaMetadataSignatureSizeSize = 4;
const uint64_t DeltaPerformer::kMaxPayloadHeaderSize = 24;
const uint64_t DeltaPerformer::kSupportedMajorPayloadVersion = 2;
-const uint32_t DeltaPerformer::kSupportedMinorPayloadVersion = 3;
+const uint32_t DeltaPerformer::kSupportedMinorPayloadVersion = 4;
const unsigned DeltaPerformer::kProgressLogMaxChunks = 10;
const unsigned DeltaPerformer::kProgressLogTimeoutSeconds = 30;
@@ -80,6 +86,8 @@
const int kUbiVolumeAttachTimeout = 5 * 60;
#endif
+const uint64_t kCacheSize = 1024 * 1024; // 1MB
+
FileDescriptorPtr CreateFileDescriptor(const char* path) {
FileDescriptorPtr ret;
#if USE_MTD
@@ -110,12 +118,20 @@
// Opens path for read/write. On success returns an open FileDescriptor
// and sets *err to 0. On failure, sets *err to errno and returns nullptr.
-FileDescriptorPtr OpenFile(const char* path, int mode, int* err) {
+FileDescriptorPtr OpenFile(const char* path,
+ int mode,
+ bool cache_writes,
+ int* err) {
// Try to mark the block device read-only based on the mode. Ignore any
// failure since this won't work when passing regular files.
- utils::SetBlockDeviceReadOnly(path, (mode & O_ACCMODE) == O_RDONLY);
+ bool read_only = (mode & O_ACCMODE) == O_RDONLY;
+ utils::SetBlockDeviceReadOnly(path, read_only);
FileDescriptorPtr fd = CreateFileDescriptor(path);
+ if (cache_writes && !read_only) {
+ fd = FileDescriptorPtr(new CachedFileDescriptor(fd, kCacheSize));
+ LOG(INFO) << "Caching writes.";
+ }
#if USE_MTD
// On NAND devices, we can either read, or write, but not both. So here we
// use O_WRONLY.
@@ -271,6 +287,7 @@
size_t read_len = min(count, max - buffer_.size());
const char* bytes_start = *bytes_p;
const char* bytes_end = bytes_start + read_len;
+ buffer_.reserve(max);
buffer_.insert(buffer_.end(), bytes_start, bytes_end);
*bytes_p = bytes_end;
*count_p = count - read_len;
@@ -344,7 +361,7 @@
GetMinorVersion() != kInPlaceMinorPayloadVersion) {
source_path_ = install_part.source_path;
int err;
- source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, &err);
+ source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, false, &err);
if (!source_fd_) {
LOG(ERROR) << "Unable to open source partition "
<< partition.partition_name() << " on slot "
@@ -356,7 +373,15 @@
target_path_ = install_part.target_path;
int err;
- target_fd_ = OpenFile(target_path_.c_str(), O_RDWR, &err);
+
+ int flags = O_RDWR;
+ if (!is_interactive_)
+ flags |= O_DSYNC;
+
+ LOG(INFO) << "Opening " << target_path_ << " partition with"
+ << (is_interactive_ ? "out" : "") << " O_DSYNC";
+
+ target_fd_ = OpenFile(target_path_.c_str(), flags, true, &err);
if (!target_fd_) {
LOG(ERROR) << "Unable to open target partition "
<< partition.partition_name() << " on slot "
@@ -582,12 +607,20 @@
return kMetadataParseSuccess;
}
+#define OP_DURATION_HISTOGRAM(_op_name, _start_time) \
+ LOCAL_HISTOGRAM_CUSTOM_TIMES( \
+ "UpdateEngine.DownloadAction.InstallOperation::" \
+ _op_name ".Duration", \
+ base::TimeTicks::Now() - _start_time, \
+ base::TimeDelta::FromMilliseconds(10), \
+ base::TimeDelta::FromMinutes(5), \
+ 20);
+
// Wrapper around write. Returns true if all requested bytes
// were written, or false on any error, regardless of progress
// and stores an action exit code in |error|.
bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode *error) {
*error = ErrorCode::kSuccess;
-
const char* c_bytes = reinterpret_cast<const char*>(bytes);
// Update the total byte downloaded count and the progress logs.
@@ -717,32 +750,41 @@
ScopedTerminatorExitUnblocker exit_unblocker =
ScopedTerminatorExitUnblocker(); // Avoids a compiler unused var bug.
+ base::TimeTicks op_start_time = base::TimeTicks::Now();
+
bool op_result;
switch (op.type()) {
case InstallOperation::REPLACE:
case InstallOperation::REPLACE_BZ:
case InstallOperation::REPLACE_XZ:
op_result = PerformReplaceOperation(op);
+ OP_DURATION_HISTOGRAM("REPLACE", op_start_time);
break;
case InstallOperation::ZERO:
case InstallOperation::DISCARD:
op_result = PerformZeroOrDiscardOperation(op);
+ OP_DURATION_HISTOGRAM("ZERO_OR_DISCARD", op_start_time);
break;
case InstallOperation::MOVE:
op_result = PerformMoveOperation(op);
+ OP_DURATION_HISTOGRAM("MOVE", op_start_time);
break;
case InstallOperation::BSDIFF:
op_result = PerformBsdiffOperation(op);
+ OP_DURATION_HISTOGRAM("BSDIFF", op_start_time);
break;
case InstallOperation::SOURCE_COPY:
op_result = PerformSourceCopyOperation(op, error);
+ OP_DURATION_HISTOGRAM("SOURCE_COPY", op_start_time);
break;
case InstallOperation::SOURCE_BSDIFF:
+ case InstallOperation::BROTLI_BSDIFF:
op_result = PerformSourceBsdiffOperation(op, error);
+ OP_DURATION_HISTOGRAM("SOURCE_BSDIFF", op_start_time);
break;
- case InstallOperation::IMGDIFF:
- // TODO(deymo): Replace with PUFFIN operation.
- op_result = false;
+ case InstallOperation::PUFFDIFF:
+ op_result = PerformPuffDiffOperation(op, error);
+ OP_DURATION_HISTOGRAM("PUFFDIFF", op_start_time);
break;
default:
op_result = false;
@@ -750,6 +792,10 @@
if (!HandleOpResult(op_result, InstallOperationTypeName(op.type()), error))
return false;
+ if (!target_fd_->Flush()) {
+ return false;
+ }
+
next_operation_num_++;
UpdateOverallProgress(false, "Completed ");
CheckpointUpdateProgress();
@@ -917,9 +963,8 @@
}
// Setup the ExtentWriter stack based on the operation type.
- std::unique_ptr<ExtentWriter> writer =
- brillo::make_unique_ptr(new ZeroPadExtentWriter(
- brillo::make_unique_ptr(new DirectExtentWriter())));
+ std::unique_ptr<ExtentWriter> writer = std::make_unique<ZeroPadExtentWriter>(
+ std::make_unique<DirectExtentWriter>());
if (operation.type() == InstallOperation::REPLACE_BZ) {
writer.reset(new BzipExtentWriter(std::move(writer)));
@@ -927,13 +972,8 @@
writer.reset(new XzExtentWriter(std::move(writer)));
}
- // Create a vector of extents to pass to the ExtentWriter.
- vector<Extent> extents;
- for (int i = 0; i < operation.dst_extents_size(); i++) {
- extents.push_back(operation.dst_extents(i));
- }
-
- TEST_AND_RETURN_FALSE(writer->Init(target_fd_, extents, block_size_));
+ TEST_AND_RETURN_FALSE(
+ writer->Init(target_fd_, operation.dst_extents(), block_size_));
TEST_AND_RETURN_FALSE(writer->Write(buffer_.data(), operation.data_length()));
TEST_AND_RETURN_FALSE(writer->End());
@@ -975,8 +1015,8 @@
for (uint64_t offset = 0; offset < length; offset += zeros.size()) {
uint64_t chunk_length = min(length - offset,
static_cast<uint64_t>(zeros.size()));
- TEST_AND_RETURN_FALSE(
- utils::PWriteAll(target_fd_, zeros.data(), chunk_length, start + offset));
+ TEST_AND_RETURN_FALSE(utils::PWriteAll(
+ target_fd_, zeros.data(), chunk_length, start + offset));
}
}
return true;
@@ -1035,25 +1075,6 @@
namespace {
-// Takes |extents| and fills an empty vector |blocks| with a block index for
-// each block in |extents|. For example, [(3, 2), (8, 1)] would give [3, 4, 8].
-void ExtentsToBlocks(const RepeatedPtrField<Extent>& extents,
- vector<uint64_t>* blocks) {
- for (const Extent& ext : extents) {
- for (uint64_t j = 0; j < ext.num_blocks(); j++)
- blocks->push_back(ext.start_block() + j);
- }
-}
-
-// Takes |extents| and returns the number of blocks in those extents.
-uint64_t GetBlockCount(const RepeatedPtrField<Extent>& extents) {
- uint64_t sum = 0;
- for (const Extent& ext : extents) {
- sum += ext.num_blocks();
- }
- return sum;
-}
-
// Compare |calculated_hash| with source hash in |operation|, return false and
// dump hash and set |error| if don't match.
bool ValidateSourceHash(const brillo::Blob& calculated_hash,
@@ -1099,57 +1120,18 @@
if (operation.has_dst_length())
TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
- uint64_t blocks_to_read = GetBlockCount(operation.src_extents());
- uint64_t blocks_to_write = GetBlockCount(operation.dst_extents());
- TEST_AND_RETURN_FALSE(blocks_to_write == blocks_to_read);
-
- // Create vectors of all the individual src/dst blocks.
- vector<uint64_t> src_blocks;
- vector<uint64_t> dst_blocks;
- ExtentsToBlocks(operation.src_extents(), &src_blocks);
- ExtentsToBlocks(operation.dst_extents(), &dst_blocks);
- DCHECK_EQ(src_blocks.size(), blocks_to_read);
- DCHECK_EQ(src_blocks.size(), dst_blocks.size());
-
- brillo::Blob buf(block_size_);
- ssize_t bytes_read = 0;
- HashCalculator source_hasher;
- // Read/write one block at a time.
- for (uint64_t i = 0; i < blocks_to_read; i++) {
- ssize_t bytes_read_this_iteration = 0;
- uint64_t src_block = src_blocks[i];
- uint64_t dst_block = dst_blocks[i];
-
- // Read in bytes.
- TEST_AND_RETURN_FALSE(
- utils::PReadAll(source_fd_,
- buf.data(),
- block_size_,
- src_block * block_size_,
- &bytes_read_this_iteration));
-
- // Write bytes out.
- TEST_AND_RETURN_FALSE(
- utils::PWriteAll(target_fd_,
- buf.data(),
- block_size_,
- dst_block * block_size_));
-
- bytes_read += bytes_read_this_iteration;
- TEST_AND_RETURN_FALSE(bytes_read_this_iteration ==
- static_cast<ssize_t>(block_size_));
-
- if (operation.has_src_sha256_hash())
- TEST_AND_RETURN_FALSE(source_hasher.Update(buf.data(), buf.size()));
- }
+ brillo::Blob source_hash;
+ TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_,
+ operation.src_extents(),
+ target_fd_,
+ operation.dst_extents(),
+ block_size_,
+ &source_hash));
if (operation.has_src_sha256_hash()) {
- TEST_AND_RETURN_FALSE(source_hasher.Finalize());
- TEST_AND_RETURN_FALSE(
- ValidateSourceHash(source_hasher.raw_hash(), operation, error));
+ TEST_AND_RETURN_FALSE(ValidateSourceHash(source_hash, operation, error));
}
- DCHECK_EQ(bytes_read, static_cast<ssize_t>(blocks_to_read * block_size_));
return true;
}
@@ -1210,12 +1192,102 @@
const uint64_t begin_byte =
end_byte - (block_size_ - operation.dst_length() % block_size_);
brillo::Blob zeros(end_byte - begin_byte);
- TEST_AND_RETURN_FALSE(
- utils::PWriteAll(target_fd_, zeros.data(), end_byte - begin_byte, begin_byte));
+ TEST_AND_RETURN_FALSE(utils::PWriteAll(
+ target_fd_, zeros.data(), end_byte - begin_byte, begin_byte));
}
return true;
}
+bool DeltaPerformer::CalculateAndValidateSourceHash(
+ const InstallOperation& operation, ErrorCode* error) {
+ const uint64_t kMaxBlocksToRead = 256; // 1MB if block size is 4KB
+ auto total_blocks = utils::BlocksInExtents(operation.src_extents());
+ brillo::Blob buf(std::min(kMaxBlocksToRead, total_blocks) * block_size_);
+ DirectExtentReader reader;
+ TEST_AND_RETURN_FALSE(
+ reader.Init(source_fd_, operation.src_extents(), block_size_));
+ HashCalculator source_hasher;
+ while (total_blocks > 0) {
+ auto read_blocks = std::min(total_blocks, kMaxBlocksToRead);
+ TEST_AND_RETURN_FALSE(reader.Read(buf.data(), read_blocks * block_size_));
+ TEST_AND_RETURN_FALSE(
+ source_hasher.Update(buf.data(), read_blocks * block_size_));
+ total_blocks -= read_blocks;
+ }
+ TEST_AND_RETURN_FALSE(source_hasher.Finalize());
+ TEST_AND_RETURN_FALSE(
+ ValidateSourceHash(source_hasher.raw_hash(), operation, error));
+ return true;
+}
+
+namespace {
+
+class BsdiffExtentFile : public bsdiff::FileInterface {
+ public:
+ BsdiffExtentFile(std::unique_ptr<ExtentReader> reader, size_t size)
+ : BsdiffExtentFile(std::move(reader), nullptr, size) {}
+ BsdiffExtentFile(std::unique_ptr<ExtentWriter> writer, size_t size)
+ : BsdiffExtentFile(nullptr, std::move(writer), size) {}
+
+ ~BsdiffExtentFile() override = default;
+
+ bool Read(void* buf, size_t count, size_t* bytes_read) override {
+ TEST_AND_RETURN_FALSE(reader_->Read(buf, count));
+ *bytes_read = count;
+ offset_ += count;
+ return true;
+ }
+
+ bool Write(const void* buf, size_t count, size_t* bytes_written) override {
+ TEST_AND_RETURN_FALSE(writer_->Write(buf, count));
+ *bytes_written = count;
+ offset_ += count;
+ return true;
+ }
+
+ bool Seek(off_t pos) override {
+ if (reader_ != nullptr) {
+ TEST_AND_RETURN_FALSE(reader_->Seek(pos));
+ offset_ = pos;
+ } else {
+ // For writes technically there should be no change of position, or it
+ // should be equivalent of current offset.
+ TEST_AND_RETURN_FALSE(offset_ == static_cast<uint64_t>(pos));
+ }
+ return true;
+ }
+
+ bool Close() override {
+ if (writer_ != nullptr) {
+ TEST_AND_RETURN_FALSE(writer_->End());
+ }
+ return true;
+ }
+
+ bool GetSize(uint64_t* size) override {
+ *size = size_;
+ return true;
+ }
+
+ private:
+ BsdiffExtentFile(std::unique_ptr<ExtentReader> reader,
+ std::unique_ptr<ExtentWriter> writer,
+ size_t size)
+ : reader_(std::move(reader)),
+ writer_(std::move(writer)),
+ size_(size),
+ offset_(0) {}
+
+ std::unique_ptr<ExtentReader> reader_;
+ std::unique_ptr<ExtentWriter> writer_;
+ uint64_t size_;
+ uint64_t offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(BsdiffExtentFile);
+};
+
+} // namespace
+
bool DeltaPerformer::PerformSourceBsdiffOperation(
const InstallOperation& operation, ErrorCode* error) {
// Since we delete data off the beginning of the buffer as we use it,
@@ -1228,45 +1300,142 @@
TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
if (operation.has_src_sha256_hash()) {
- HashCalculator source_hasher;
- const uint64_t kMaxBlocksToRead = 512; // 2MB if block size is 4KB
- brillo::Blob buf(kMaxBlocksToRead * block_size_);
- for (const Extent& extent : operation.src_extents()) {
- for (uint64_t i = 0; i < extent.num_blocks(); i += kMaxBlocksToRead) {
- uint64_t blocks_to_read = min(
- kMaxBlocksToRead, static_cast<uint64_t>(extent.num_blocks()) - i);
- ssize_t bytes_to_read = blocks_to_read * block_size_;
- ssize_t bytes_read_this_iteration = 0;
- TEST_AND_RETURN_FALSE(
- utils::PReadAll(source_fd_, buf.data(), bytes_to_read,
- (extent.start_block() + i) * block_size_,
- &bytes_read_this_iteration));
- TEST_AND_RETURN_FALSE(bytes_read_this_iteration == bytes_to_read);
- TEST_AND_RETURN_FALSE(source_hasher.Update(buf.data(), bytes_to_read));
- }
- }
- TEST_AND_RETURN_FALSE(source_hasher.Finalize());
- TEST_AND_RETURN_FALSE(
- ValidateSourceHash(source_hasher.raw_hash(), operation, error));
+ TEST_AND_RETURN_FALSE(CalculateAndValidateSourceHash(operation, error));
}
- string input_positions;
- TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.src_extents(),
- block_size_,
- operation.src_length(),
- &input_positions));
- string output_positions;
- TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.dst_extents(),
- block_size_,
- operation.dst_length(),
- &output_positions));
+ auto reader = std::make_unique<DirectExtentReader>();
+ TEST_AND_RETURN_FALSE(
+ reader->Init(source_fd_, operation.src_extents(), block_size_));
+ auto src_file = std::make_unique<BsdiffExtentFile>(
+ std::move(reader),
+ utils::BlocksInExtents(operation.src_extents()) * block_size_);
- TEST_AND_RETURN_FALSE(bsdiff::bspatch(source_path_.c_str(),
- target_path_.c_str(),
+ auto writer = std::make_unique<DirectExtentWriter>();
+ TEST_AND_RETURN_FALSE(
+ writer->Init(target_fd_, operation.dst_extents(), block_size_));
+ auto dst_file = std::make_unique<BsdiffExtentFile>(
+ std::move(writer),
+ utils::BlocksInExtents(operation.dst_extents()) * block_size_);
+
+ TEST_AND_RETURN_FALSE(bsdiff::bspatch(std::move(src_file),
+ std::move(dst_file),
buffer_.data(),
- buffer_.size(),
- input_positions.c_str(),
- output_positions.c_str()) == 0);
+ buffer_.size()) == 0);
+ DiscardBuffer(true, buffer_.size());
+ return true;
+}
+
+namespace {
+
+// A class to be passed to |puffpatch| for reading from |source_fd_| and writing
+// into |target_fd_|.
+class PuffinExtentStream : public puffin::StreamInterface {
+ public:
+ // Constructor for creating a stream for reading from an |ExtentReader|.
+ PuffinExtentStream(std::unique_ptr<ExtentReader> reader, size_t size)
+ : PuffinExtentStream(std::move(reader), nullptr, size) {}
+
+ // Constructor for creating a stream for writing to an |ExtentWriter|.
+ PuffinExtentStream(std::unique_ptr<ExtentWriter> writer, size_t size)
+ : PuffinExtentStream(nullptr, std::move(writer), size) {}
+
+ ~PuffinExtentStream() override = default;
+
+ bool GetSize(size_t* size) const override {
+ *size = size_;
+ return true;
+ }
+
+ bool GetOffset(size_t* offset) const override {
+ *offset = offset_;
+ return true;
+ }
+
+ bool Seek(size_t offset) override {
+ if (is_read_) {
+ TEST_AND_RETURN_FALSE(reader_->Seek(offset));
+ offset_ = offset;
+ } else {
+ // For writes technically there should be no change of position, or it
+ // should equivalent of current offset.
+ TEST_AND_RETURN_FALSE(offset_ == offset);
+ }
+ return true;
+ }
+
+ bool Read(void* buffer, size_t count) override {
+ TEST_AND_RETURN_FALSE(is_read_);
+ TEST_AND_RETURN_FALSE(reader_->Read(buffer, count));
+ offset_ += count;
+ return true;
+ }
+
+ bool Write(const void* buffer, size_t count) override {
+ TEST_AND_RETURN_FALSE(!is_read_);
+ TEST_AND_RETURN_FALSE(writer_->Write(buffer, count));
+ offset_ += count;
+ return true;
+ }
+
+ bool Close() override {
+ if (!is_read_) {
+ TEST_AND_RETURN_FALSE(writer_->End());
+ }
+ return true;
+ }
+
+ private:
+ PuffinExtentStream(std::unique_ptr<ExtentReader> reader,
+ std::unique_ptr<ExtentWriter> writer,
+ size_t size)
+ : reader_(std::move(reader)),
+ writer_(std::move(writer)),
+ size_(size),
+ offset_(0),
+ is_read_(reader_ ? true : false) {}
+
+ std::unique_ptr<ExtentReader> reader_;
+ std::unique_ptr<ExtentWriter> writer_;
+ uint64_t size_;
+ uint64_t offset_;
+ bool is_read_;
+
+ DISALLOW_COPY_AND_ASSIGN(PuffinExtentStream);
+};
+
+} // namespace
+
+bool DeltaPerformer::PerformPuffDiffOperation(const InstallOperation& operation,
+ ErrorCode* error) {
+ // Since we delete data off the beginning of the buffer as we use it,
+ // the data we need should be exactly at the beginning of the buffer.
+ TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
+ TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
+
+ if (operation.has_src_sha256_hash()) {
+ TEST_AND_RETURN_FALSE(CalculateAndValidateSourceHash(operation, error));
+ }
+
+ auto reader = std::make_unique<DirectExtentReader>();
+ TEST_AND_RETURN_FALSE(
+ reader->Init(source_fd_, operation.src_extents(), block_size_));
+ puffin::UniqueStreamPtr src_stream(new PuffinExtentStream(
+ std::move(reader),
+ utils::BlocksInExtents(operation.src_extents()) * block_size_));
+
+ auto writer = std::make_unique<DirectExtentWriter>();
+ TEST_AND_RETURN_FALSE(
+ writer->Init(target_fd_, operation.dst_extents(), block_size_));
+ puffin::UniqueStreamPtr dst_stream(new PuffinExtentStream(
+ std::move(writer),
+ utils::BlocksInExtents(operation.dst_extents()) * block_size_));
+
+ const size_t kMaxCacheSize = 5 * 1024 * 1024; // Total 5MB cache.
+ TEST_AND_RETURN_FALSE(puffin::PuffPatch(std::move(src_stream),
+ std::move(dst_stream),
+ buffer_.data(),
+ buffer_.size(),
+ kMaxCacheSize));
DiscardBuffer(true, buffer_.size());
return true;
}
@@ -1695,6 +1864,7 @@
prefs->SetInt64(kPrefsManifestMetadataSize, -1);
prefs->SetInt64(kPrefsManifestSignatureSize, -1);
prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
+ prefs->Delete(kPrefsPostInstallSucceeded);
}
return true;
}
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index f363a4c..731e7f1 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -19,6 +19,7 @@
#include <inttypes.h>
+#include <limits>
#include <string>
#include <vector>
@@ -79,13 +80,15 @@
HardwareInterface* hardware,
DownloadActionDelegate* download_delegate,
InstallPlan* install_plan,
- InstallPlan::Payload* payload)
+ InstallPlan::Payload* payload,
+ bool is_interactive)
: prefs_(prefs),
boot_control_(boot_control),
hardware_(hardware),
download_delegate_(download_delegate),
install_plan_(install_plan),
- payload_(payload) {}
+ payload_(payload),
+ is_interactive_(is_interactive) {}
// FileWriter's Write implementation where caller doesn't care about
// error codes.
@@ -162,9 +165,9 @@
public_key_path_ = public_key_path;
}
- // Set |*out_offset| to the byte offset where the size of the metadata signature
- // is stored in a payload. Return true on success, if this field is not
- // present in the payload, return false.
+ // Set |*out_offset| to the byte offset where the size of the metadata
+ // signature is stored in a payload. Return true on success, if this field is
+ // not present in the payload, return false.
bool GetMetadataSignatureSizeOffset(uint64_t* out_offset) const;
// Set |*out_offset| to the byte offset at which the manifest protobuf begins
@@ -242,6 +245,10 @@
// buffer.
ErrorCode ValidateMetadataSignature(const brillo::Blob& payload);
+ // Calculates and validates the source hash of the operation |operation|.
+ bool CalculateAndValidateSourceHash(const InstallOperation& operation,
+ ErrorCode* error);
+
// Returns true on success.
bool PerformInstallOperation(const InstallOperation& operation);
@@ -256,6 +263,8 @@
ErrorCode* error);
bool PerformSourceBsdiffOperation(const InstallOperation& operation,
ErrorCode* error);
+ bool PerformPuffDiffOperation(const InstallOperation& operation,
+ ErrorCode* error);
// Extracts the payload signature message from the blob on the |operation| if
// the offset matches the one specified by the manifest. Returns whether the
@@ -390,6 +399,9 @@
// The last progress chunk recorded.
unsigned last_progress_chunk_{0};
+ // If |true|, the update is user initiated (vs. periodic update checks).
+ bool is_interactive_{false};
+
// The timeout after which we should force emitting a progress log (constant),
// and the actual point in time for the next forced log to be emitted.
const base::TimeDelta forced_progress_log_wait_{
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index bc67d93..3572a6d 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -747,7 +747,8 @@
&state->fake_hardware_,
&state->mock_delegate_,
install_plan,
- &install_plan->payloads[0]);
+ &install_plan->payloads[0],
+ false /* is_interactive */);
string public_key_path = GetBuildArtifactsPath(kUnittestPublicKeyPath);
EXPECT_TRUE(utils::FileExists(public_key_path.c_str()));
(*performer)->set_public_key_path(public_key_path);
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index fbdf1ab..420efd2 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -87,6 +87,61 @@
0x00, 0x00, 0x59, 0x5a,
};
+const uint8_t src_deflates[] = {
+ /* raw 0 */ 0x11, 0x22,
+ /* deflate 2 */ 0x63, 0x64, 0x62, 0x66, 0x61, 0x05, 0x00,
+ /* raw 9 */ 0x33,
+ /* deflate 10 */ 0x03, 0x00,
+ /* raw 12 */
+ /* deflate 12 */ 0x63, 0x04, 0x00,
+ /* raw 15 */ 0x44, 0x55
+};
+
+const uint8_t dst_deflates[] = {
+ /* deflate 0 */ 0x63, 0x64, 0x62, 0x66, 0x61, 0x05, 0x00,
+ /* raw 7 */ 0x33, 0x66,
+ /* deflate 9 */ 0x01, 0x05, 0x00, 0xFA, 0xFF, 0x01, 0x02, 0x03, 0x04, 0x05,
+ /* deflate 19 */ 0x63, 0x04, 0x00
+};
+
+// To generate this patch either:
+// - Use puffin/src/patching_unittest.cc:TestPatching
+// Or
+// - Use the following approach:
+// * Make src_deflate a string of hex with only spaces. (e.g. "0XTE 0xST")
+// * echo "0XTE 0xST" | xxd -r -p > src.bin
+// * Find the location of deflates in src_deflates (in bytes) in the format of
+// "offset:length,...". (e.g. "2:7,10:2,12:3")
+// * Do previous three steps for dst_deflates.
+// * puffin --operation=puffdiff --src_file=src.bin --dst_file=dst.bin \
+// --src_deflates_byte="2:7,10:2,12:3" --dst_deflates_byte="0:7,9:10,19:3" \
+// --patch_file=patch.bin
+// * hexdump -ve '" " 12/1 "0x%02x, " "\n"' patch.bin
+const uint8_t puffdiff_patch[] = {
+ 0x50, 0x55, 0x46, 0x31, 0x00, 0x00, 0x00, 0x51, 0x08, 0x01, 0x12, 0x27,
+ 0x0A, 0x04, 0x08, 0x10, 0x10, 0x32, 0x0A, 0x04, 0x08, 0x50, 0x10, 0x0A,
+ 0x0A, 0x04, 0x08, 0x60, 0x10, 0x12, 0x12, 0x04, 0x08, 0x10, 0x10, 0x58,
+ 0x12, 0x04, 0x08, 0x78, 0x10, 0x28, 0x12, 0x05, 0x08, 0xA8, 0x01, 0x10,
+ 0x38, 0x18, 0x1F, 0x1A, 0x24, 0x0A, 0x02, 0x10, 0x32, 0x0A, 0x04, 0x08,
+ 0x48, 0x10, 0x50, 0x0A, 0x05, 0x08, 0x98, 0x01, 0x10, 0x12, 0x12, 0x02,
+ 0x10, 0x58, 0x12, 0x04, 0x08, 0x70, 0x10, 0x58, 0x12, 0x05, 0x08, 0xC8,
+ 0x01, 0x10, 0x38, 0x18, 0x21, 0x42, 0x53, 0x44, 0x49, 0x46, 0x46, 0x34,
+ 0x30, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x42, 0x5A, 0x68, 0x39, 0x31, 0x41, 0x59, 0x26, 0x53, 0x59, 0x65,
+ 0x29, 0x8C, 0x9B, 0x00, 0x00, 0x03, 0x60, 0x40, 0x7A, 0x0E, 0x08, 0x00,
+ 0x40, 0x00, 0x20, 0x00, 0x21, 0x22, 0x9A, 0x3D, 0x4F, 0x50, 0x40, 0x0C,
+ 0x3B, 0xC7, 0x9B, 0xB2, 0x21, 0x0E, 0xE9, 0x15, 0x98, 0x7A, 0x7C, 0x5D,
+ 0xC9, 0x14, 0xE1, 0x42, 0x41, 0x94, 0xA6, 0x32, 0x6C, 0x42, 0x5A, 0x68,
+ 0x39, 0x31, 0x41, 0x59, 0x26, 0x53, 0x59, 0xF1, 0x20, 0x5F, 0x0D, 0x00,
+ 0x00, 0x02, 0x41, 0x15, 0x42, 0x08, 0x20, 0x00, 0x40, 0x00, 0x00, 0x02,
+ 0x40, 0x00, 0x20, 0x00, 0x22, 0x3D, 0x23, 0x10, 0x86, 0x03, 0x96, 0x54,
+ 0x11, 0x16, 0x5F, 0x17, 0x72, 0x45, 0x38, 0x50, 0x90, 0xF1, 0x20, 0x5F,
+ 0x0D, 0x42, 0x5A, 0x68, 0x39, 0x31, 0x41, 0x59, 0x26, 0x53, 0x59, 0x07,
+ 0xD4, 0xCB, 0x6E, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x20, 0x00,
+ 0x21, 0x18, 0x46, 0x82, 0xEE, 0x48, 0xA7, 0x0A, 0x12, 0x00, 0xFA, 0x99,
+ 0x6D, 0xC0};
+
} // namespace
class DeltaPerformerTest : public ::testing::Test {
@@ -334,7 +389,8 @@
&fake_hardware_,
&mock_delegate_,
&install_plan_,
- &payload_};
+ &payload_,
+ false /* is_interactive*/};
};
TEST_F(DeltaPerformerTest, FullPayloadWriteTest) {
@@ -375,7 +431,7 @@
testing::Mock::VerifyAndClearExpectations(&mock_delegate_);
EXPECT_CALL(mock_delegate_, ShouldCancel(_))
.WillOnce(
- testing::DoAll(testing::SetArgumentPointee<0>(ErrorCode::kError),
+ testing::DoAll(testing::SetArgPointee<0>(ErrorCode::kError),
testing::Return(true)));
ApplyPayload(payload_data, "/dev/null", false);
@@ -485,6 +541,32 @@
EXPECT_EQ(expected_data, ApplyPayload(payload_data, source_path, true));
}
+TEST_F(DeltaPerformerTest, PuffdiffOperationTest) {
+ AnnotatedOperation aop;
+ *(aop.op.add_src_extents()) = ExtentForRange(0, 1);
+ *(aop.op.add_dst_extents()) = ExtentForRange(0, 1);
+ brillo::Blob puffdiff_payload(std::begin(puffdiff_patch),
+ std::end(puffdiff_patch));
+ aop.op.set_data_offset(0);
+ aop.op.set_data_length(puffdiff_payload.size());
+ aop.op.set_type(InstallOperation::PUFFDIFF);
+ brillo::Blob src(std::begin(src_deflates), std::end(src_deflates));
+ src.resize(4096); // block size
+ brillo::Blob src_hash;
+ EXPECT_TRUE(HashCalculator::RawHashOfData(src, &src_hash));
+ aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size());
+
+ brillo::Blob payload_data = GeneratePayload(puffdiff_payload, {aop}, false);
+
+ string source_path;
+ EXPECT_TRUE(utils::MakeTempFile("Source-XXXXXX", &source_path, nullptr));
+ ScopedPathUnlinker path_unlinker(source_path);
+ EXPECT_TRUE(utils::WriteFile(source_path.c_str(), src.data(), src.size()));
+
+ brillo::Blob dst(std::begin(dst_deflates), std::end(dst_deflates));
+ EXPECT_EQ(dst, ApplyPayload(payload_data, source_path, true));
+}
+
TEST_F(DeltaPerformerTest, SourceHashMismatchTest) {
brillo::Blob expected_data = {'f', 'o', 'o'};
brillo::Blob actual_data = {'b', 'a', 'r'};
@@ -766,8 +848,8 @@
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- string non_existing_file = temp_dir.path().Append("non-existing").value();
- string existing_file = temp_dir.path().Append("existing").value();
+ string non_existing_file = temp_dir.GetPath().Append("non-existing").value();
+ string existing_file = temp_dir.GetPath().Append("existing").value();
EXPECT_EQ(0, System(base::StringPrintf("touch %s", existing_file.c_str())));
// Non-official build, non-existing public-key, key in response -> true
@@ -832,20 +914,4 @@
EXPECT_EQ(DeltaPerformer::kSupportedMajorPayloadVersion, major_version);
}
-// Test that we recognize our own zlib compressor implementation as supported.
-// All other equivalent implementations should be added to
-// kCompatibleZlibFingerprint.
-TEST_F(DeltaPerformerTest, ZlibFingerprintMatch) {
- string fingerprint;
-#ifdef __ANDROID__
- const std::string kZlibFingerprintPath =
- test_utils::GetBuildArtifactsPath("zlib_fingerprint");
-#else
- const std::string kZlibFingerprintPath = "/etc/zlib_fingerprint";
-#endif // __ANDROID__
- EXPECT_TRUE(base::ReadFileToString(base::FilePath(kZlibFingerprintPath),
- &fingerprint));
- EXPECT_TRUE(utils::IsZlibCompatible(fingerprint));
-}
-
} // namespace chromeos_update_engine
diff --git a/payload_consumer/download_action.cc b/payload_consumer/download_action.cc
index c3a5016..f1b6e33 100644
--- a/payload_consumer/download_action.cc
+++ b/payload_consumer/download_action.cc
@@ -20,9 +20,10 @@
#include <algorithm>
#include <string>
-#include <vector>
#include <base/files/file_path.h>
+#include <base/metrics/statistics_recorder.h>
+#include <base/strings/stringprintf.h>
#include "update_engine/common/action_pipe.h"
#include "update_engine/common/boot_control_interface.h"
@@ -35,7 +36,6 @@
using base::FilePath;
using std::string;
-using std::vector;
namespace chromeos_update_engine {
@@ -43,17 +43,21 @@
BootControlInterface* boot_control,
HardwareInterface* hardware,
SystemState* system_state,
- HttpFetcher* http_fetcher)
+ HttpFetcher* http_fetcher,
+ bool is_interactive)
: prefs_(prefs),
boot_control_(boot_control),
hardware_(hardware),
system_state_(system_state),
http_fetcher_(new MultiRangeHttpFetcher(http_fetcher)),
+ is_interactive_(is_interactive),
writer_(nullptr),
code_(ErrorCode::kSuccess),
delegate_(nullptr),
p2p_sharing_fd_(-1),
- p2p_visible_(true) {}
+ p2p_visible_(true) {
+ base::StatisticsRecorder::Initialize();
+}
DownloadAction::~DownloadAction() {}
@@ -173,6 +177,7 @@
install_plan_.Dump();
bytes_received_ = 0;
+ bytes_received_previous_payloads_ = 0;
bytes_total_ = 0;
for (const auto& payload : install_plan_.payloads)
bytes_total_ += payload.size;
@@ -240,8 +245,13 @@
if (writer_ && writer_ != delta_performer_.get()) {
LOG(INFO) << "Using writer for test.";
} else {
- delta_performer_.reset(new DeltaPerformer(
- prefs_, boot_control_, hardware_, delegate_, &install_plan_, payload_));
+ delta_performer_.reset(new DeltaPerformer(prefs_,
+ boot_control_,
+ hardware_,
+ delegate_,
+ &install_plan_,
+ payload_,
+ is_interactive_));
writer_ = delta_performer_.get();
}
if (system_state_ != nullptr) {
@@ -317,8 +327,10 @@
}
bytes_received_ += length;
+ uint64_t bytes_downloaded_total =
+ bytes_received_previous_payloads_ + bytes_received_;
if (delegate_ && download_active_) {
- delegate_->BytesReceived(length, bytes_received_, bytes_total_);
+ delegate_->BytesReceived(length, bytes_downloaded_total, bytes_total_);
}
if (writer_ && !writer_->Write(bytes, length, &code_)) {
if (code_ != ErrorCode::kSuccess) {
@@ -349,31 +361,44 @@
void DownloadAction::TransferComplete(HttpFetcher* fetcher, bool successful) {
if (writer_) {
LOG_IF(WARNING, writer_->Close() != 0) << "Error closing the writer.";
- writer_ = nullptr;
+ if (delta_performer_.get() == writer_) {
+ // no delta_performer_ in tests, so leave the test writer in place
+ writer_ = nullptr;
+ }
}
download_active_ = false;
ErrorCode code =
successful ? ErrorCode::kSuccess : ErrorCode::kDownloadTransferError;
- if (code == ErrorCode::kSuccess && delta_performer_.get()) {
- if (!payload_->already_applied)
+ if (code == ErrorCode::kSuccess) {
+ if (delta_performer_ && !payload_->already_applied)
code = delta_performer_->VerifyPayload(payload_->hash, payload_->size);
- if (code != ErrorCode::kSuccess) {
+ if (code == ErrorCode::kSuccess) {
+ if (payload_ < &install_plan_.payloads.back() &&
+ system_state_->payload_state()->NextPayload()) {
+ LOG(INFO) << "Incrementing to next payload";
+ // No need to reset if this payload was already applied.
+ if (delta_performer_ && !payload_->already_applied)
+ DeltaPerformer::ResetUpdateProgress(prefs_, false);
+ // Start downloading next payload.
+ bytes_received_previous_payloads_ += payload_->size;
+ payload_++;
+ install_plan_.download_url =
+ system_state_->payload_state()->GetCurrentUrl();
+ StartDownloading();
+ return;
+ }
+ // Log UpdateEngine.DownloadAction.* histograms to help diagnose
+ // long-blocking oeprations.
+ std::string histogram_output;
+ base::StatisticsRecorder::WriteGraph(
+ "UpdateEngine.DownloadAction.", &histogram_output);
+ LOG(INFO) << histogram_output;
+ } else {
LOG(ERROR) << "Download of " << install_plan_.download_url
<< " failed due to payload verification error.";
// Delete p2p file, if applicable.
if (!p2p_file_id_.empty())
CloseP2PSharingFd(true);
- } else if (payload_ < &install_plan_.payloads.back() &&
- system_state_->payload_state()->NextPayload()) {
- // No need to reset if this payload was already applied.
- if (!payload_->already_applied)
- DeltaPerformer::ResetUpdateProgress(prefs_, false);
- // Start downloading next payload.
- payload_++;
- install_plan_.download_url =
- system_state_->payload_state()->GetCurrentUrl();
- StartDownloading();
- return;
}
}
diff --git a/payload_consumer/download_action.h b/payload_consumer/download_action.h
index d0e6000..81d7333 100644
--- a/payload_consumer/download_action.h
+++ b/payload_consumer/download_action.h
@@ -73,12 +73,13 @@
// Takes ownership of the passed in HttpFetcher. Useful for testing.
// A good calling pattern is:
// DownloadAction(prefs, boot_contol, hardware, system_state,
- // new WhateverHttpFetcher);
+ // new WhateverHttpFetcher, false);
DownloadAction(PrefsInterface* prefs,
BootControlInterface* boot_control,
HardwareInterface* hardware,
SystemState* system_state,
- HttpFetcher* http_fetcher);
+ HttpFetcher* http_fetcher,
+ bool is_interactive);
~DownloadAction() override;
// InstallPlanAction overrides.
@@ -154,6 +155,11 @@
// Pointer to the MultiRangeHttpFetcher that does the http work.
std::unique_ptr<MultiRangeHttpFetcher> http_fetcher_;
+ // If |true|, the update is user initiated (vs. periodic update checks). Hence
+ // the |delta_performer_| can decide not to use O_DSYNC flag for faster
+ // update.
+ bool is_interactive_;
+
// The FileWriter that downloaded data should be written to. It will
// either point to *decompressing_file_writer_ or *delta_performer_.
FileWriter* writer_;
@@ -166,7 +172,8 @@
// For reporting status to outsiders
DownloadActionDelegate* delegate_;
- uint64_t bytes_received_{0};
+ uint64_t bytes_received_{0}; // per file/range
+ uint64_t bytes_received_previous_payloads_{0};
uint64_t bytes_total_{0};
bool download_active_{false};
diff --git a/payload_consumer/download_action_unittest.cc b/payload_consumer/download_action_unittest.cc
index 7d3ac6c..21ce461 100644
--- a/payload_consumer/download_action_unittest.cc
+++ b/payload_consumer/download_action_unittest.cc
@@ -22,14 +22,12 @@
#include <memory>
#include <string>
#include <utility>
-#include <vector>
#include <base/bind.h>
#include <base/files/file_path.h>
#include <base/files/file_util.h>
#include <base/location.h>
#include <base/strings/stringprintf.h>
-#include <brillo/bind_lambda.h>
#include <brillo/message_loops/fake_message_loop.h>
#include <brillo/message_loops/message_loop.h>
@@ -41,6 +39,7 @@
#include "update_engine/common/utils.h"
#include "update_engine/fake_p2p_manager_configuration.h"
#include "update_engine/fake_system_state.h"
+#include "update_engine/mock_file_writer.h"
#include "update_engine/payload_consumer/mock_download_action.h"
#include "update_engine/update_manager/fake_update_manager.h"
@@ -51,11 +50,11 @@
using base::WriteFile;
using std::string;
using std::unique_ptr;
-using std::vector;
using test_utils::ScopedTempFile;
using testing::AtLeast;
using testing::InSequence;
using testing::Return;
+using testing::SetArgPointee;
using testing::_;
class DownloadActionTest : public ::testing::Test { };
@@ -165,7 +164,8 @@
fake_system_state.boot_control(),
fake_system_state.hardware(),
&fake_system_state,
- http_fetcher);
+ http_fetcher,
+ false /* is_interactive */);
download_action.SetTestFileWriter(&writer);
BondActions(&feeder_action, &download_action);
MockDownloadActionDelegate download_delegate;
@@ -242,6 +242,93 @@
false); // use_download_delegate
}
+TEST(DownloadActionTest, MultiPayloadProgressTest) {
+ std::vector<brillo::Blob> payload_datas;
+ // the first payload must be the largest, as it's the actual payload used by
+ // the MockHttpFetcher for all downloaded data.
+ payload_datas.emplace_back(4 * kMockHttpFetcherChunkSize + 256);
+ payload_datas.emplace_back(2 * kMockHttpFetcherChunkSize);
+ brillo::FakeMessageLoop loop(nullptr);
+ loop.SetAsCurrent();
+ FakeSystemState fake_system_state;
+ EXPECT_CALL(*fake_system_state.mock_payload_state(), NextPayload())
+ .WillOnce(Return(true));
+
+ MockFileWriter mock_file_writer;
+ EXPECT_CALL(mock_file_writer, Close()).WillRepeatedly(Return(0));
+ EXPECT_CALL(mock_file_writer, Write(_, _, _))
+ .WillRepeatedly(
+ DoAll(SetArgPointee<2>(ErrorCode::kSuccess), Return(true)));
+
+ InstallPlan install_plan;
+ uint64_t total_expected_download_size{0};
+ for (const auto& data : payload_datas) {
+ uint64_t size = data.size();
+ install_plan.payloads.push_back(
+ {.size = size, .type = InstallPayloadType::kFull});
+ total_expected_download_size += size;
+ }
+ ObjectFeederAction<InstallPlan> feeder_action;
+ feeder_action.set_obj(install_plan);
+ MockPrefs prefs;
+ MockHttpFetcher* http_fetcher = new MockHttpFetcher(
+ payload_datas[0].data(), payload_datas[0].size(), nullptr);
+ // takes ownership of passed in HttpFetcher
+ DownloadAction download_action(&prefs,
+ fake_system_state.boot_control(),
+ fake_system_state.hardware(),
+ &fake_system_state,
+ http_fetcher,
+ false /* is_interactive */);
+ download_action.SetTestFileWriter(&mock_file_writer);
+ BondActions(&feeder_action, &download_action);
+ MockDownloadActionDelegate download_delegate;
+ {
+ InSequence s;
+ download_action.set_delegate(&download_delegate);
+ // these are hand-computed based on the payloads specified above
+ EXPECT_CALL(download_delegate,
+ BytesReceived(kMockHttpFetcherChunkSize,
+ kMockHttpFetcherChunkSize,
+ total_expected_download_size));
+ EXPECT_CALL(download_delegate,
+ BytesReceived(kMockHttpFetcherChunkSize,
+ kMockHttpFetcherChunkSize * 2,
+ total_expected_download_size));
+ EXPECT_CALL(download_delegate,
+ BytesReceived(kMockHttpFetcherChunkSize,
+ kMockHttpFetcherChunkSize * 3,
+ total_expected_download_size));
+ EXPECT_CALL(download_delegate,
+ BytesReceived(kMockHttpFetcherChunkSize,
+ kMockHttpFetcherChunkSize * 4,
+ total_expected_download_size));
+ EXPECT_CALL(download_delegate,
+ BytesReceived(256,
+ kMockHttpFetcherChunkSize * 4 + 256,
+ total_expected_download_size));
+ EXPECT_CALL(download_delegate,
+ BytesReceived(kMockHttpFetcherChunkSize,
+ kMockHttpFetcherChunkSize * 5 + 256,
+ total_expected_download_size));
+ EXPECT_CALL(download_delegate,
+ BytesReceived(kMockHttpFetcherChunkSize,
+ total_expected_download_size,
+ total_expected_download_size));
+ }
+ ActionProcessor processor;
+ processor.EnqueueAction(&feeder_action);
+ processor.EnqueueAction(&download_action);
+
+ loop.PostTask(
+ FROM_HERE,
+ base::Bind(
+ [](ActionProcessor* processor) { processor->StartProcessing(); },
+ base::Unretained(&processor)));
+ loop.Run();
+ EXPECT_FALSE(loop.PendingTasks());
+}
+
namespace {
class TerminateEarlyTestProcessorDelegate : public ActionProcessorDelegate {
public:
@@ -281,7 +368,8 @@
fake_system_state_.boot_control(),
fake_system_state_.hardware(),
&fake_system_state_,
- new MockHttpFetcher(data.data(), data.size(), nullptr));
+ new MockHttpFetcher(data.data(), data.size(), nullptr),
+ false /* is_interactive */);
download_action.SetTestFileWriter(&writer);
MockDownloadActionDelegate download_delegate;
if (use_download_delegate) {
@@ -382,7 +470,8 @@
fake_system_state_.boot_control(),
fake_system_state_.hardware(),
&fake_system_state_,
- new MockHttpFetcher("x", 1, nullptr));
+ new MockHttpFetcher("x", 1, nullptr),
+ false /* is_interactive */);
download_action.SetTestFileWriter(&writer);
DownloadActionTestAction test_action;
@@ -471,7 +560,8 @@
fake_system_state_.boot_control(),
fake_system_state_.hardware(),
&fake_system_state_,
- http_fetcher_));
+ http_fetcher_,
+ false /* is_interactive */));
download_action_->SetTestFileWriter(&writer);
BondActions(&feeder_action, download_action_.get());
DownloadActionTestProcessorDelegate delegate(ErrorCode::kSuccess);
diff --git a/payload_consumer/extent_reader.cc b/payload_consumer/extent_reader.cc
new file mode 100644
index 0000000..96ea918
--- /dev/null
+++ b/payload_consumer/extent_reader.cc
@@ -0,0 +1,98 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/extent_reader.h"
+
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/payload_constants.h"
+
+using google::protobuf::RepeatedPtrField;
+
+namespace chromeos_update_engine {
+
+bool DirectExtentReader::Init(FileDescriptorPtr fd,
+ const RepeatedPtrField<Extent>& extents,
+ uint32_t block_size) {
+ fd_ = fd;
+ extents_ = extents;
+ block_size_ = block_size;
+ cur_extent_ = extents_.begin();
+
+ extents_upper_bounds_.reserve(extents_.size() + 1);
+ // We add this pad as the first element to not bother with boundary checks
+ // later.
+ extents_upper_bounds_.emplace_back(0);
+ for (const auto& extent : extents_) {
+ total_size_ += extent.num_blocks() * block_size_;
+ extents_upper_bounds_.emplace_back(total_size_);
+ }
+ return true;
+}
+
+bool DirectExtentReader::Seek(uint64_t offset) {
+ TEST_AND_RETURN_FALSE(offset <= total_size_);
+ if (offset_ == offset) {
+ return true;
+ }
+ // The first item is zero and upper_bound never returns it because it always
+ // return the item which is greater than the given value.
+ auto extent_idx = std::upper_bound(
+ extents_upper_bounds_.begin(), extents_upper_bounds_.end(), offset) -
+ extents_upper_bounds_.begin() - 1;
+ cur_extent_ = std::next(extents_.begin(), extent_idx);
+ offset_ = offset;
+ cur_extent_bytes_read_ = offset_ - extents_upper_bounds_[extent_idx];
+ return true;
+}
+
+bool DirectExtentReader::Read(void* buffer, size_t count) {
+ auto bytes = reinterpret_cast<uint8_t*>(buffer);
+ uint64_t bytes_read = 0;
+ while (bytes_read < count) {
+ if (cur_extent_ == extents_.end()) {
+ TEST_AND_RETURN_FALSE(bytes_read == count);
+ }
+ uint64_t cur_extent_bytes_left =
+ cur_extent_->num_blocks() * block_size_ - cur_extent_bytes_read_;
+ uint64_t bytes_to_read =
+ std::min(count - bytes_read, cur_extent_bytes_left);
+
+ ssize_t out_bytes_read;
+ TEST_AND_RETURN_FALSE(utils::PReadAll(
+ fd_,
+ bytes + bytes_read,
+ bytes_to_read,
+ cur_extent_->start_block() * block_size_ + cur_extent_bytes_read_,
+ &out_bytes_read));
+ TEST_AND_RETURN_FALSE(out_bytes_read ==
+ static_cast<ssize_t>(bytes_to_read));
+
+ bytes_read += bytes_to_read;
+ cur_extent_bytes_read_ += bytes_to_read;
+ offset_ += bytes_to_read;
+ if (cur_extent_bytes_read_ == cur_extent_->num_blocks() * block_size_) {
+ // We have to advance the cur_extent_;
+ cur_extent_++;
+ cur_extent_bytes_read_ = 0;
+ }
+ }
+ return true;
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/extent_reader.h b/payload_consumer/extent_reader.h
new file mode 100644
index 0000000..3f9e4c8
--- /dev/null
+++ b/payload_consumer/extent_reader.h
@@ -0,0 +1,82 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_EXTENT_READER_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_EXTENT_READER_H_
+
+#include <vector>
+
+#include "update_engine/payload_consumer/file_descriptor.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+
+// ExtentReader is an abstract class with reads from a given file descriptor at
+// the extents given.
+class ExtentReader {
+ public:
+ virtual ~ExtentReader() = default;
+
+ // Initializes |ExtentReader|
+ virtual bool Init(FileDescriptorPtr fd,
+ const google::protobuf::RepeatedPtrField<Extent>& extents,
+ uint32_t block_size) = 0;
+
+ // Seeks to the given |offset| assuming all extents are concatenated together.
+ virtual bool Seek(uint64_t offset) = 0;
+
+ // Returns true on success.
+ virtual bool Read(void* buffer, size_t count) = 0;
+};
+
+// DirectExtentReader is probably the simplest ExtentReader implementation.
+// It reads the data directly from the extents.
+class DirectExtentReader : public ExtentReader {
+ public:
+ DirectExtentReader() = default;
+ ~DirectExtentReader() override = default;
+
+ bool Init(FileDescriptorPtr fd,
+ const google::protobuf::RepeatedPtrField<Extent>& extents,
+ uint32_t block_size) override;
+ bool Seek(uint64_t offset) override;
+ bool Read(void* bytes, size_t count) override;
+
+ private:
+ FileDescriptorPtr fd_{nullptr};
+ google::protobuf::RepeatedPtrField<Extent> extents_;
+ size_t block_size_{0};
+
+ // Current extent being read from |fd_|.
+ google::protobuf::RepeatedPtrField<Extent>::iterator cur_extent_;
+
+ // Bytes read from |cur_extent_| thus far.
+ uint64_t cur_extent_bytes_read_{0};
+
+ // Offset assuming all extents are concatenated.
+ uint64_t offset_{0};
+
+ // The accelaring upper bounds for |extents_| if we assume all extents are
+ // concatenated.
+ std::vector<uint64_t> extents_upper_bounds_;
+ uint64_t total_size_{0};
+
+ DISALLOW_COPY_AND_ASSIGN(DirectExtentReader);
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_EXTENT_READER_H_
diff --git a/payload_consumer/extent_reader_unittest.cc b/payload_consumer/extent_reader_unittest.cc
new file mode 100644
index 0000000..b7059bc
--- /dev/null
+++ b/payload_consumer/extent_reader_unittest.cc
@@ -0,0 +1,170 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/extent_reader.h"
+
+#include <fcntl.h>
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include <brillo/secure_blob.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
+#include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+#include "update_engine/payload_generator/extent_utils.h"
+
+using chromeos_update_engine::test_utils::ExpectVectorsEq;
+using std::min;
+using std::string;
+using std::vector;
+
+namespace chromeos_update_engine {
+
+namespace {
+const size_t kBlockSize = 8;
+const size_t kRandomIterations = 1000;
+} // namespace
+
+class ExtentReaderTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ sample_.resize(4096 * 10);
+ srand(time(nullptr));
+ unsigned int rand_seed;
+ for (size_t i = 0; i < sample_.size(); i++) {
+ sample_[i] = rand_r(&rand_seed) % 256;
+ }
+ ASSERT_TRUE(utils::WriteFile(
+ temp_file_.path().c_str(), sample_.data(), sample_.size()));
+
+ fd_.reset(new EintrSafeFileDescriptor());
+ ASSERT_TRUE(fd_->Open(temp_file_.path().c_str(), O_RDONLY, 0600));
+ }
+ void TearDown() override { fd_->Close(); }
+
+ void ReadExtents(vector<Extent> extents, brillo::Blob* blob) {
+ blob->clear();
+ for (const auto& extent : extents) {
+ blob->insert(
+ blob->end(),
+ &sample_[extent.start_block() * kBlockSize],
+ &sample_[(extent.start_block() + extent.num_blocks()) * kBlockSize]);
+ }
+ }
+
+ FileDescriptorPtr fd_;
+ test_utils::ScopedTempFile temp_file_{"ExtentReaderTest-file.XXXXXX"};
+ brillo::Blob sample_;
+};
+
+TEST_F(ExtentReaderTest, SimpleTest) {
+ vector<Extent> extents = {ExtentForRange(1, 1)};
+ DirectExtentReader reader;
+ EXPECT_TRUE(reader.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
+ EXPECT_TRUE(reader.Seek(0));
+ brillo::Blob blob1(utils::BlocksInExtents(extents) * kBlockSize);
+ EXPECT_TRUE(reader.Read(blob1.data(), blob1.size()));
+ brillo::Blob blob2;
+ ReadExtents(extents, &blob2);
+ ExpectVectorsEq(blob1, blob2);
+}
+
+TEST_F(ExtentReaderTest, ZeroExtentLengthTest) {
+ vector<Extent> extents = {ExtentForRange(1, 0)};
+ DirectExtentReader reader;
+ EXPECT_TRUE(reader.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
+ EXPECT_TRUE(reader.Seek(0));
+ brillo::Blob blob(1);
+ EXPECT_TRUE(reader.Read(blob.data(), 0));
+ EXPECT_FALSE(reader.Read(blob.data(), 1));
+}
+
+TEST_F(ExtentReaderTest, NoExtentTest) {
+ DirectExtentReader reader;
+ EXPECT_TRUE(reader.Init(fd_, {}, kBlockSize));
+ EXPECT_TRUE(reader.Seek(0));
+ brillo::Blob blob(1);
+ EXPECT_TRUE(reader.Read(blob.data(), 0));
+ EXPECT_FALSE(reader.Read(blob.data(), 1));
+}
+
+TEST_F(ExtentReaderTest, OverflowExtentTest) {
+ vector<Extent> extents = {ExtentForRange(1, 1)};
+ DirectExtentReader reader;
+ EXPECT_TRUE(reader.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
+ EXPECT_TRUE(reader.Seek(0));
+ brillo::Blob blob(utils::BlocksInExtents(extents) * kBlockSize + 1);
+ EXPECT_FALSE(reader.Read(blob.data(), blob.size()));
+}
+
+TEST_F(ExtentReaderTest, SeekOverflow1Test) {
+ vector<Extent> extents = {ExtentForRange(1, 0)};
+ DirectExtentReader reader;
+ EXPECT_TRUE(reader.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
+ EXPECT_TRUE(reader.Seek(0));
+ EXPECT_FALSE(reader.Seek(1));
+}
+
+TEST_F(ExtentReaderTest, SeekOverflow2Test) {
+ DirectExtentReader reader;
+ reader.Init(fd_, {}, kBlockSize);
+ EXPECT_TRUE(reader.Seek(0));
+ EXPECT_FALSE(reader.Seek(1));
+}
+
+TEST_F(ExtentReaderTest, SeekOverflow3Test) {
+ vector<Extent> extents = {ExtentForRange(1, 1)};
+ DirectExtentReader reader;
+ EXPECT_TRUE(reader.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
+ // Seek to the end of the extents should be fine as long as nothing is read.
+ EXPECT_TRUE(reader.Seek(kBlockSize));
+ EXPECT_FALSE(reader.Seek(kBlockSize + 1));
+}
+
+TEST_F(ExtentReaderTest, RandomReadTest) {
+ vector<Extent> extents = {ExtentForRange(0, 0),
+ ExtentForRange(1, 1),
+ ExtentForRange(3, 0),
+ ExtentForRange(4, 2),
+ ExtentForRange(7, 1)};
+ DirectExtentReader reader;
+ EXPECT_TRUE(reader.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
+
+ brillo::Blob result;
+ ReadExtents(extents, &result);
+
+ brillo::Blob blob(utils::BlocksInExtents(extents) * kBlockSize);
+ srand(time(nullptr));
+ uint32_t rand_seed;
+ for (size_t idx = 0; idx < kRandomIterations; idx++) {
+ // zero to full size available.
+ size_t start = rand_r(&rand_seed) % blob.size();
+ size_t size = rand_r(&rand_seed) % (blob.size() - start);
+ EXPECT_TRUE(reader.Seek(start));
+ EXPECT_TRUE(reader.Read(blob.data(), size));
+ for (size_t i = 0; i < size; i++) {
+ ASSERT_EQ(blob[i], result[start + i]);
+ }
+ }
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/extent_writer.cc b/payload_consumer/extent_writer.cc
index 5501e22..c5776ec 100644
--- a/payload_consumer/extent_writer.cc
+++ b/payload_consumer/extent_writer.cc
@@ -34,21 +34,19 @@
return true;
const char* c_bytes = reinterpret_cast<const char*>(bytes);
size_t bytes_written = 0;
- while (count - bytes_written > 0) {
- TEST_AND_RETURN_FALSE(next_extent_index_ < extents_.size());
- uint64_t bytes_remaining_next_extent =
- extents_[next_extent_index_].num_blocks() * block_size_ -
- extent_bytes_written_;
- CHECK_NE(bytes_remaining_next_extent, static_cast<uint64_t>(0));
+ while (bytes_written < count) {
+ TEST_AND_RETURN_FALSE(cur_extent_ != extents_.end());
+ uint64_t bytes_remaining_cur_extent =
+ cur_extent_->num_blocks() * block_size_ - extent_bytes_written_;
+ CHECK_NE(bytes_remaining_cur_extent, static_cast<uint64_t>(0));
size_t bytes_to_write =
static_cast<size_t>(min(static_cast<uint64_t>(count - bytes_written),
- bytes_remaining_next_extent));
+ bytes_remaining_cur_extent));
TEST_AND_RETURN_FALSE(bytes_to_write > 0);
- if (extents_[next_extent_index_].start_block() != kSparseHole) {
+ if (cur_extent_->start_block() != kSparseHole) {
const off64_t offset =
- extents_[next_extent_index_].start_block() * block_size_ +
- extent_bytes_written_;
+ cur_extent_->start_block() * block_size_ + extent_bytes_written_;
TEST_AND_RETURN_FALSE_ERRNO(fd_->Seek(offset, SEEK_SET) !=
static_cast<off64_t>(-1));
TEST_AND_RETURN_FALSE(
@@ -56,13 +54,12 @@
}
bytes_written += bytes_to_write;
extent_bytes_written_ += bytes_to_write;
- if (bytes_remaining_next_extent == bytes_to_write) {
+ if (bytes_remaining_cur_extent == bytes_to_write) {
// We filled this extent
- CHECK_EQ(extent_bytes_written_,
- extents_[next_extent_index_].num_blocks() * block_size_);
+ CHECK_EQ(extent_bytes_written_, cur_extent_->num_blocks() * block_size_);
// move to next extent
extent_bytes_written_ = 0;
- next_extent_index_++;
+ cur_extent_++;
}
}
return true;
diff --git a/payload_consumer/extent_writer.h b/payload_consumer/extent_writer.h
index 6484ebf..2c15861 100644
--- a/payload_consumer/extent_writer.h
+++ b/payload_consumer/extent_writer.h
@@ -17,7 +17,8 @@
#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_EXTENT_WRITER_H_
#define UPDATE_ENGINE_PAYLOAD_CONSUMER_EXTENT_WRITER_H_
-#include <vector>
+#include <memory>
+#include <utility>
#include <base/logging.h>
#include <brillo/secure_blob.h>
@@ -40,7 +41,7 @@
// Returns true on success.
virtual bool Init(FileDescriptorPtr fd,
- const std::vector<Extent>& extents,
+ const google::protobuf::RepeatedPtrField<Extent>& extents,
uint32_t block_size) = 0;
// Returns true on success.
@@ -66,11 +67,12 @@
~DirectExtentWriter() override = default;
bool Init(FileDescriptorPtr fd,
- const std::vector<Extent>& extents,
+ const google::protobuf::RepeatedPtrField<Extent>& extents,
uint32_t block_size) override {
fd_ = fd;
block_size_ = block_size;
extents_ = extents;
+ cur_extent_ = extents_.begin();
return true;
}
bool Write(const void* bytes, size_t count) override;
@@ -80,11 +82,11 @@
FileDescriptorPtr fd_{nullptr};
size_t block_size_{0};
- // Bytes written into next_extent_index_ thus far
+ // Bytes written into |cur_extent_| thus far.
uint64_t extent_bytes_written_{0};
- std::vector<Extent> extents_;
- // The next call to write should correspond to extents_[next_extent_index_]
- std::vector<Extent>::size_type next_extent_index_{0};
+ google::protobuf::RepeatedPtrField<Extent> extents_;
+ // The next call to write should correspond to |cur_extents_|.
+ google::protobuf::RepeatedPtrField<Extent>::iterator cur_extent_;
};
// Takes an underlying ExtentWriter to which all operations are delegated.
@@ -100,7 +102,7 @@
~ZeroPadExtentWriter() override = default;
bool Init(FileDescriptorPtr fd,
- const std::vector<Extent>& extents,
+ const google::protobuf::RepeatedPtrField<Extent>& extents,
uint32_t block_size) override {
block_size_ = block_size;
return underlying_extent_writer_->Init(fd, extents, block_size);
diff --git a/payload_consumer/extent_writer_unittest.cc b/payload_consumer/extent_writer_unittest.cc
index 24d238e..48b27cb 100644
--- a/payload_consumer/extent_writer_unittest.cc
+++ b/payload_consumer/extent_writer_unittest.cc
@@ -19,16 +19,17 @@
#include <fcntl.h>
#include <algorithm>
+#include <memory>
#include <string>
#include <vector>
-#include <brillo/make_unique_ptr.h>
#include <brillo/secure_blob.h>
#include <gtest/gtest.h>
#include "update_engine/common/test_utils.h"
#include "update_engine/common/utils.h"
#include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_generator/extent_ranges.h"
using chromeos_update_engine::test_utils::ExpectVectorsEq;
using std::min;
@@ -65,16 +66,11 @@
};
TEST_F(ExtentWriterTest, SimpleTest) {
- vector<Extent> extents;
- Extent extent;
- extent.set_start_block(1);
- extent.set_num_blocks(1);
- extents.push_back(extent);
-
+ vector<Extent> extents = {ExtentForRange(1, 1)};
const string bytes = "1234";
-
DirectExtentWriter direct_writer;
- EXPECT_TRUE(direct_writer.Init(fd_, extents, kBlockSize));
+ EXPECT_TRUE(
+ direct_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
EXPECT_TRUE(direct_writer.Write(bytes.data(), bytes.size()));
EXPECT_TRUE(direct_writer.End());
@@ -91,14 +87,10 @@
}
TEST_F(ExtentWriterTest, ZeroLengthTest) {
- vector<Extent> extents;
- Extent extent;
- extent.set_start_block(1);
- extent.set_num_blocks(1);
- extents.push_back(extent);
-
+ vector<Extent> extents = {ExtentForRange(1, 1)};
DirectExtentWriter direct_writer;
- EXPECT_TRUE(direct_writer.Init(fd_, extents, kBlockSize));
+ EXPECT_TRUE(
+ direct_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
EXPECT_TRUE(direct_writer.Write(nullptr, 0));
EXPECT_TRUE(direct_writer.End());
}
@@ -117,23 +109,14 @@
void ExtentWriterTest::WriteAlignedExtents(size_t chunk_size,
size_t first_chunk_size) {
- vector<Extent> extents;
- Extent extent;
- extent.set_start_block(1);
- extent.set_num_blocks(1);
- extents.push_back(extent);
- extent.set_start_block(0);
- extent.set_num_blocks(1);
- extents.push_back(extent);
- extent.set_start_block(2);
- extent.set_num_blocks(1);
- extents.push_back(extent);
-
+ vector<Extent> extents = {
+ ExtentForRange(1, 1), ExtentForRange(0, 1), ExtentForRange(2, 1)};
brillo::Blob data(kBlockSize * 3);
test_utils::FillWithData(&data);
DirectExtentWriter direct_writer;
- EXPECT_TRUE(direct_writer.Init(fd_, extents, kBlockSize));
+ EXPECT_TRUE(
+ direct_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
size_t bytes_written = 0;
while (bytes_written < data.size()) {
@@ -172,22 +155,14 @@
}
void ExtentWriterTest::TestZeroPad(bool aligned_size) {
- vector<Extent> extents;
- Extent extent;
- extent.set_start_block(1);
- extent.set_num_blocks(1);
- extents.push_back(extent);
- extent.set_start_block(0);
- extent.set_num_blocks(1);
- extents.push_back(extent);
-
+ vector<Extent> extents = {ExtentForRange(1, 1), ExtentForRange(0, 1)};
brillo::Blob data(kBlockSize * 2);
test_utils::FillWithData(&data);
- ZeroPadExtentWriter zero_pad_writer(
- brillo::make_unique_ptr(new DirectExtentWriter()));
+ ZeroPadExtentWriter zero_pad_writer(std::make_unique<DirectExtentWriter>());
- EXPECT_TRUE(zero_pad_writer.Init(fd_, extents, kBlockSize));
+ EXPECT_TRUE(
+ zero_pad_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
size_t bytes_to_write = data.size();
const size_t missing_bytes = (aligned_size ? 0 : 9);
bytes_to_write -= missing_bytes;
@@ -216,17 +191,9 @@
}
TEST_F(ExtentWriterTest, SparseFileTest) {
- vector<Extent> extents;
- Extent extent;
- extent.set_start_block(1);
- extent.set_num_blocks(1);
- extents.push_back(extent);
- extent.set_start_block(kSparseHole);
- extent.set_num_blocks(2);
- extents.push_back(extent);
- extent.set_start_block(0);
- extent.set_num_blocks(1);
- extents.push_back(extent);
+ vector<Extent> extents = {ExtentForRange(1, 1),
+ ExtentForRange(kSparseHole, 2),
+ ExtentForRange(0, 1)};
const int block_count = 4;
const int on_disk_count = 2;
@@ -234,7 +201,8 @@
test_utils::FillWithData(&data);
DirectExtentWriter direct_writer;
- EXPECT_TRUE(direct_writer.Init(fd_, extents, kBlockSize));
+ EXPECT_TRUE(
+ direct_writer.Init(fd_, {extents.begin(), extents.end()}, kBlockSize));
size_t bytes_written = 0;
while (bytes_written < (block_count * kBlockSize)) {
diff --git a/payload_consumer/fake_extent_writer.h b/payload_consumer/fake_extent_writer.h
index 762c6d5..4418a9e 100644
--- a/payload_consumer/fake_extent_writer.h
+++ b/payload_consumer/fake_extent_writer.h
@@ -18,7 +18,6 @@
#define UPDATE_ENGINE_PAYLOAD_CONSUMER_FAKE_EXTENT_WRITER_H_
#include <memory>
-#include <vector>
#include <brillo/secure_blob.h>
@@ -35,7 +34,7 @@
// ExtentWriter overrides.
bool Init(FileDescriptorPtr /* fd */,
- const std::vector<Extent>& /* extents */,
+ const google::protobuf::RepeatedPtrField<Extent>& /* extents */,
uint32_t /* block_size */) override {
init_called_ = true;
return true;
diff --git a/payload_consumer/fake_file_descriptor.cc b/payload_consumer/fake_file_descriptor.cc
new file mode 100644
index 0000000..d54856b
--- /dev/null
+++ b/payload_consumer/fake_file_descriptor.cc
@@ -0,0 +1,76 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/fake_file_descriptor.h"
+
+namespace chromeos_update_engine {
+
+ssize_t FakeFileDescriptor::Read(void* buf, size_t count) {
+ // Record the read operation so it can later be inspected.
+ read_ops_.emplace_back(offset_, count);
+
+ // Check for the EOF condition first to avoid reporting it as a failure.
+ if (offset_ >= static_cast<uint64_t>(size_) || count == 0)
+ return 0;
+ // Find the first offset greater or equal than the current position where a
+ // failure will occur. This will mark the end of the read chunk.
+ uint64_t first_failure = size_;
+ for (const auto& failure : failure_ranges_) {
+ // A failure range that includes the current offset results in an
+ // immediate failure to read any bytes.
+ if (failure.first <= offset_ && offset_ < failure.first + failure.second) {
+ errno = EIO;
+ return -1;
+ }
+ if (failure.first > offset_)
+ first_failure = std::min(first_failure, failure.first);
+ }
+ count = std::min(static_cast<uint64_t>(count), first_failure - offset_);
+ static const char kHexChars[] = "0123456789ABCDEF";
+ for (size_t i = 0; i < count; ++i) {
+ // Encode the 16-bit number "offset_ / 4" as a hex digit in big-endian.
+ uint16_t current_num = offset_ / 4;
+ uint8_t current_digit = (current_num >> (4 * (3 - offset_ % 4))) & 0x0f;
+
+ static_cast<uint8_t*>(buf)[i] = kHexChars[current_digit];
+ offset_++;
+ }
+
+ return count;
+}
+
+off64_t FakeFileDescriptor::Seek(off64_t offset, int whence) {
+ switch (whence) {
+ case SEEK_SET:
+ offset_ = offset;
+ break;
+ case SEEK_CUR:
+ offset_ += offset;
+ break;
+ case SEEK_END:
+ if (offset > size_)
+ offset_ = 0;
+ else
+ offset_ = size_ - offset_;
+ break;
+ default:
+ errno = EINVAL;
+ return -1;
+ }
+ return offset_;
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/fake_file_descriptor.h b/payload_consumer/fake_file_descriptor.h
new file mode 100644
index 0000000..f17820b
--- /dev/null
+++ b/payload_consumer/fake_file_descriptor.h
@@ -0,0 +1,126 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_FAKE_FILE_DESCRIPTOR_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_FAKE_FILE_DESCRIPTOR_H_
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+#include <vector>
+
+#include "update_engine/payload_consumer/file_descriptor.h"
+
+namespace chromeos_update_engine {
+
+// A fake file descriptor with configurable errors. The file descriptor always
+// reads a fixed sequence of bytes, consisting of the concatenation of the
+// numbers 0, 1, 2... each one encoded in 4 bytes as the big-endian 16-bit
+// number encoded in hexadecimal. For example, the beginning of the stream in
+// ASCII is 0000000100020003... which corresponds to the numbers 0, 1, 2 and 3.
+class FakeFileDescriptor : public FileDescriptor {
+ public:
+ FakeFileDescriptor() = default;
+ ~FakeFileDescriptor() override = default;
+
+ // FileDescriptor override methods.
+ bool Open(const char* path, int flags, mode_t mode) override {
+ if (open_)
+ return false;
+ open_ = true;
+ return true;
+ }
+
+ bool Open(const char* path, int flags) override {
+ return Open(path, flags, 0);
+ }
+
+ ssize_t Read(void* buf, size_t count) override;
+
+ ssize_t Write(const void* buf, size_t count) override {
+ // Read-only block device.
+ errno = EROFS;
+ return -1;
+ }
+
+ off64_t Seek(off64_t offset, int whence) override;
+
+ uint64_t BlockDevSize() override { return size_; }
+
+ bool BlkIoctl(int request,
+ uint64_t start,
+ uint64_t length,
+ int* result) override {
+ return false;
+ }
+
+ bool Flush() override {
+ return open_;
+ }
+
+ bool Close() override {
+ if (!open_)
+ return false;
+ open_ = false;
+ return true;
+ }
+
+ bool IsSettingErrno() override { return true; }
+
+ bool IsOpen() override { return open_; }
+
+ // Fake class configuration methods.
+
+ // Set the size of the file.
+ void SetFileSize(uint64_t size) { size_ = size; }
+
+ // Marks the range starting from |offset| bytes into the file and |length|
+ // size as a failure range. Reads from this range will always fail.
+ void AddFailureRange(uint64_t offset, uint64_t length) {
+ if (length == 0)
+ return;
+ failure_ranges_.emplace_back(offset, length);
+ }
+
+ // Return the list of ranges of bytes requested with a Read() as (offset,
+ // length), regardless of the Read() return value.
+ std::vector<std::pair<uint64_t, uint64_t>> GetReadOps() const {
+ return read_ops_;
+ }
+
+ private:
+ // Whether the fake file is open.
+ bool open_{false};
+
+ // The current file pointer offset into the fake file.
+ uint64_t offset_{0};
+
+ // The size of the file. Reads beyond |max_size_| will an EOF condition.
+ off64_t size_{std::numeric_limits<off64_t>::max()};
+
+ // The list of ranges represented as (start, length) in bytes where reads will
+ // always fail.
+ std::vector<std::pair<uint64_t, uint64_t>> failure_ranges_;
+
+ // List of reads performed as (offset, length) of the read request.
+ std::vector<std::pair<uint64_t, uint64_t>> read_ops_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeFileDescriptor);
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_FAKE_FILE_DESCRIPTOR_H_
diff --git a/payload_consumer/file_descriptor.cc b/payload_consumer/file_descriptor.cc
index 8a23dea..4eabb8f 100644
--- a/payload_consumer/file_descriptor.cc
+++ b/payload_consumer/file_descriptor.cc
@@ -123,16 +123,17 @@
#endif // defined(BLKZEROOUT)
}
+bool EintrSafeFileDescriptor::Flush() {
+ CHECK_GE(fd_, 0);
+ return true;
+}
+
bool EintrSafeFileDescriptor::Close() {
CHECK_GE(fd_, 0);
if (IGNORE_EINTR(close(fd_)))
return false;
- Reset();
- return true;
-}
-
-void EintrSafeFileDescriptor::Reset() {
fd_ = -1;
+ return true;
}
} // namespace chromeos_update_engine
diff --git a/payload_consumer/file_descriptor.h b/payload_consumer/file_descriptor.h
index 7bb2974..5e524d9 100644
--- a/payload_consumer/file_descriptor.h
+++ b/payload_consumer/file_descriptor.h
@@ -39,12 +39,6 @@
// * Write() returns the number of bytes written: this appears to be more useful
// for clients, who may wish to retry or otherwise do something useful with
// the remaining data that was not written.
-//
-// * Provides a Reset() method, which will force to abandon a currently open
-// file descriptor and allow opening another file, without necessarily
-// properly closing the old one. This may be useful in cases where a "closer"
-// class does not care whether Close() was successful, but may need to reuse
-// the same file descriptor again.
namespace chromeos_update_engine {
@@ -93,15 +87,16 @@
uint64_t length,
int* result) = 0;
+ // Flushes any cached data. The descriptor must be opened prior to this
+ // call. Returns false if it fails to write data. Implementations may set
+ // errno accrodingly.
+ virtual bool Flush() = 0;
+
// Closes a file descriptor. The descriptor must be open prior to this call.
// Returns true on success, false otherwise. Specific implementations may set
// errno accordingly.
virtual bool Close() = 0;
- // Resets the file descriptor, abandoning a currently open file and returning
- // the descriptor to the closed state.
- virtual void Reset() = 0;
-
// Indicates whether or not an implementation sets meaningful errno.
virtual bool IsSettingErrno() = 0;
@@ -128,8 +123,8 @@
uint64_t start,
uint64_t length,
int* result) override;
+ bool Flush() override;
bool Close() override;
- void Reset() override;
bool IsSettingErrno() override {
return true;
}
diff --git a/payload_consumer/file_descriptor_utils.cc b/payload_consumer/file_descriptor_utils.cc
new file mode 100644
index 0000000..73f86df
--- /dev/null
+++ b/payload_consumer/file_descriptor_utils.cc
@@ -0,0 +1,85 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/file_descriptor_utils.h"
+
+#include <algorithm>
+
+#include <base/logging.h>
+
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/extent_reader.h"
+#include "update_engine/payload_consumer/extent_writer.h"
+
+using google::protobuf::RepeatedPtrField;
+using std::min;
+
+namespace chromeos_update_engine {
+
+namespace {
+
+// Size of the buffer used to copy blocks.
+const int kMaxCopyBufferSize = 1024 * 1024;
+
+} // namespace
+
+namespace fd_utils {
+
+bool CopyAndHashExtents(FileDescriptorPtr source,
+ const RepeatedPtrField<Extent>& src_extents,
+ FileDescriptorPtr target,
+ const RepeatedPtrField<Extent>& tgt_extents,
+ uint32_t block_size,
+ brillo::Blob* hash_out) {
+ uint64_t total_blocks = utils::BlocksInExtents(src_extents);
+ TEST_AND_RETURN_FALSE(total_blocks == utils::BlocksInExtents(tgt_extents));
+
+ DirectExtentReader reader;
+ TEST_AND_RETURN_FALSE(reader.Init(source, src_extents, block_size));
+ DirectExtentWriter writer;
+ TEST_AND_RETURN_FALSE(writer.Init(target, tgt_extents, block_size));
+
+ uint64_t buffer_blocks = kMaxCopyBufferSize / block_size;
+ // Ensure we copy at least one block at a time.
+ if (buffer_blocks < 1)
+ buffer_blocks = 1;
+ brillo::Blob buf(buffer_blocks * block_size);
+
+ HashCalculator source_hasher;
+ uint64_t blocks_left = total_blocks;
+ while (blocks_left > 0) {
+ uint64_t read_blocks = std::min(blocks_left, buffer_blocks);
+ TEST_AND_RETURN_FALSE(reader.Read(buf.data(), read_blocks * block_size));
+ if (hash_out) {
+ TEST_AND_RETURN_FALSE(
+ source_hasher.Update(buf.data(), read_blocks * block_size));
+ }
+ TEST_AND_RETURN_FALSE(writer.Write(buf.data(), read_blocks * block_size));
+ blocks_left -= read_blocks;
+ }
+ TEST_AND_RETURN_FALSE(writer.End());
+
+ if (hash_out) {
+ TEST_AND_RETURN_FALSE(source_hasher.Finalize());
+ *hash_out = source_hasher.raw_hash();
+ }
+ return true;
+}
+
+} // namespace fd_utils
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/file_descriptor_utils.h b/payload_consumer/file_descriptor_utils.h
new file mode 100644
index 0000000..d1289d6
--- /dev/null
+++ b/payload_consumer/file_descriptor_utils.h
@@ -0,0 +1,48 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_FILE_DESCRIPTOR_UTILS_H_
+#define UPDATE_ENGINE_PAYLOAD_CONSUMER_FILE_DESCRIPTOR_UTILS_H_
+
+#include <brillo/secure_blob.h>
+
+#include "update_engine/payload_consumer/file_descriptor.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+namespace fd_utils {
+
+// Copy blocks from the |source| file to the |target| file and hashes the
+// contents. The blocks to copy from the |source| to the |target| files are
+// specified by the |src_extents| and |tgt_extents| list of Extents, which
+// must have the same length in number of blocks. Stores the hash of the
+// copied blocks in Blob pointed by |hash_out| if not null. The block size
+// is passed as |block_size|. In case of error reading or writing, returns
+// false and the value pointed by |hash_out| is undefined.
+// The |source| and |target| files must be different, or otherwise |src_extents|
+// and |tgt_extents| must not overlap.
+bool CopyAndHashExtents(
+ FileDescriptorPtr source,
+ const google::protobuf::RepeatedPtrField<Extent>& src_extents,
+ FileDescriptorPtr target,
+ const google::protobuf::RepeatedPtrField<Extent>& tgt_extents,
+ uint32_t block_size,
+ brillo::Blob* hash_out);
+
+} // namespace fd_utils
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_FILE_DESCRIPTOR_UTILS_H_
diff --git a/payload_consumer/file_descriptor_utils_unittest.cc b/payload_consumer/file_descriptor_utils_unittest.cc
new file mode 100644
index 0000000..8ba8ce6
--- /dev/null
+++ b/payload_consumer/file_descriptor_utils_unittest.cc
@@ -0,0 +1,170 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/file_descriptor_utils.h"
+
+#include <fcntl.h>
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <brillo/data_encoding.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/fake_file_descriptor.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+
+using google::protobuf::RepeatedPtrField;
+
+namespace chromeos_update_engine {
+
+namespace {
+
+RepeatedPtrField<Extent> CreateExtentList(
+ const std::vector<std::pair<uint64_t, uint64_t>>& lst) {
+ RepeatedPtrField<Extent> result;
+ for (const auto& item : lst) {
+ *result.Add() = ExtentForRange(item.first, item.second);
+ }
+ return result;
+}
+
+} // namespace
+
+class FileDescriptorUtilsTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ EXPECT_TRUE(utils::MakeTempFile("fd_tgt.XXXXXX", &tgt_path_, nullptr));
+ EXPECT_TRUE(target_->Open(tgt_path_.c_str(), O_RDWR));
+ }
+
+ // Check that the |target_| file contains |expected_contents|.
+ void ExpectTarget(const std::string& expected_contents) {
+ std::string target_contents;
+ EXPECT_TRUE(utils::ReadFile(tgt_path_, &target_contents));
+ EXPECT_EQ(expected_contents.size(), target_contents.size());
+ if (target_contents != expected_contents) {
+ ADD_FAILURE() << "Contents don't match.";
+ LOG(INFO) << "Expected contents:";
+ utils::HexDumpString(expected_contents);
+ LOG(INFO) << "Actual contents:";
+ utils::HexDumpString(target_contents);
+ }
+ }
+
+ // Path to the target temporary file.
+ std::string tgt_path_;
+
+ // Source and target file descriptor used for testing the tools.
+ FakeFileDescriptor* fake_source_{new FakeFileDescriptor()};
+ FileDescriptorPtr source_{fake_source_};
+ FileDescriptorPtr target_{new EintrSafeFileDescriptor()};
+};
+
+// Source and target extents should have the same number of blocks.
+TEST_F(FileDescriptorUtilsTest, CopyAndHashExtentsMismatchBlocksTest) {
+ auto src_extents = CreateExtentList({{1, 4}});
+ auto tgt_extents = CreateExtentList({{0, 5}});
+
+ EXPECT_FALSE(fd_utils::CopyAndHashExtents(
+ source_, src_extents, target_, tgt_extents, 4, nullptr));
+}
+
+// Failing to read from the source should fail the copy.
+TEST_F(FileDescriptorUtilsTest, CopyAndHashExtentsReadFailureTest) {
+ auto extents = CreateExtentList({{0, 5}});
+ fake_source_->AddFailureRange(10, 5);
+
+ EXPECT_FALSE(fd_utils::CopyAndHashExtents(
+ source_, extents, target_, extents, 4, nullptr));
+}
+
+// Failing to write to the target should fail the copy.
+TEST_F(FileDescriptorUtilsTest, CopyAndHashExtentsWriteFailureTest) {
+ auto src_extents = CreateExtentList({{0, 2}});
+ auto tgt_extents = CreateExtentList({{5, 2}});
+ fake_source_->AddFailureRange(5 * 4, 10);
+
+ // Note that we pass |source_| as the target as well, which should fail to
+ // write.
+ EXPECT_FALSE(fd_utils::CopyAndHashExtents(
+ source_, src_extents, source_, tgt_extents, 4, nullptr));
+}
+
+// Test that we can copy extents without hashing them, allowing a nullptr
+// pointer as hash_out.
+TEST_F(FileDescriptorUtilsTest, CopyAndHashExtentsWithoutHashingTest) {
+ auto extents = CreateExtentList({{0, 5}});
+
+ EXPECT_TRUE(fd_utils::CopyAndHashExtents(
+ source_, extents, target_, extents, 4, nullptr));
+ ExpectTarget("00000001000200030004");
+}
+
+// CopyAndHash() can take different number of extents in the source and target
+// files, as long as the number of blocks is the same. Test that it handles it
+// properly.
+TEST_F(FileDescriptorUtilsTest, CopyAndHashExtentsManyToOneTest) {
+ brillo::Blob hash_out;
+ // Reorder the input as 1 4 2 3 0.
+ auto src_extents = CreateExtentList({{1, 1}, {4, 1}, {2, 2}, {0, 1}});
+ auto tgt_extents = CreateExtentList({{0, 5}});
+
+ EXPECT_TRUE(fd_utils::CopyAndHashExtents(
+ source_, src_extents, target_, tgt_extents, 4, &hash_out));
+ const char kExpectedResult[] = "00010004000200030000";
+ ExpectTarget(kExpectedResult);
+
+ brillo::Blob expected_hash;
+ EXPECT_TRUE(HashCalculator::RawHashOfBytes(
+ kExpectedResult, strlen(kExpectedResult), &expected_hash));
+ EXPECT_EQ(expected_hash, hash_out);
+}
+
+TEST_F(FileDescriptorUtilsTest, CopyAndHashExtentsManyToManyTest) {
+ brillo::Blob hash_out;
+ auto src_extents = CreateExtentList({{1, 1}, {4, 1}, {2, 2}, {0, 1}});
+ auto tgt_extents = CreateExtentList({{2, 3}, {0, 2}});
+
+ EXPECT_TRUE(fd_utils::CopyAndHashExtents(
+ source_, src_extents, target_, tgt_extents, 4, &hash_out));
+ // The reads always match the source extent list of blocks (up to the
+ // internal buffer size).
+ std::vector<std::pair<uint64_t, uint64_t>> kExpectedOps = {
+ {4, 4}, {16, 4}, {8, 8}, {0, 4}};
+ EXPECT_EQ(kExpectedOps, fake_source_->GetReadOps());
+
+ // The output here is as in the previous test but the first 3 4-byte blocks
+ // are at the end of the stream. The expected hash is as in the previous
+ // example anyway since the hash doesn't depend on the order of the target
+ // blocks.
+ const char kExpectedResult[] = "00030000000100040002";
+ ExpectTarget(kExpectedResult);
+
+ // The data in the order that the reader processes (and hashes) it.
+ const char kExpectedOrderedData[] = "00010004000200030000";
+ brillo::Blob expected_hash;
+ EXPECT_TRUE(HashCalculator::RawHashOfBytes(
+ kExpectedOrderedData, strlen(kExpectedOrderedData), &expected_hash));
+ EXPECT_EQ(expected_hash, hash_out);
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_consumer/file_writer_unittest.cc b/payload_consumer/file_writer_unittest.cc
index debb4c3..92837c8 100644
--- a/payload_consumer/file_writer_unittest.cc
+++ b/payload_consumer/file_writer_unittest.cc
@@ -21,7 +21,6 @@
#include <unistd.h>
#include <string>
-#include <vector>
#include <brillo/secure_blob.h>
#include <gtest/gtest.h>
@@ -30,7 +29,6 @@
#include "update_engine/common/utils.h"
using std::string;
-using std::vector;
namespace chromeos_update_engine {
diff --git a/payload_consumer/filesystem_verifier_action_unittest.cc b/payload_consumer/filesystem_verifier_action_unittest.cc
index 2e1d95d..b4f7f7f 100644
--- a/payload_consumer/filesystem_verifier_action_unittest.cc
+++ b/payload_consumer/filesystem_verifier_action_unittest.cc
@@ -26,7 +26,6 @@
#include <base/posix/eintr_wrapper.h>
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
-#include <brillo/bind_lambda.h>
#include <brillo/message_loops/fake_message_loop.h>
#include <brillo/message_loops/message_loop_utils.h>
#include <gmock/gmock.h>
diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc
index d5d745b..45112d6 100644
--- a/payload_consumer/install_plan.cc
+++ b/payload_consumer/install_plan.cc
@@ -74,13 +74,23 @@
InstallPayloadTypeToString(payload.type).c_str());
}
+ string version_str = base::StringPrintf(", version: %s", version.c_str());
+ if (!system_version.empty()) {
+ version_str +=
+ base::StringPrintf(", system_version: %s", system_version.c_str());
+ }
+
LOG(INFO) << "InstallPlan: " << (is_resume ? "resume" : "new_update")
+ << version_str
<< ", source_slot: " << BootControlInterface::SlotName(source_slot)
<< ", target_slot: " << BootControlInterface::SlotName(target_slot)
<< ", url: " << download_url << payloads_str << partitions_str
<< ", hash_checks_mandatory: "
<< utils::ToString(hash_checks_mandatory)
- << ", powerwash_required: " << utils::ToString(powerwash_required);
+ << ", powerwash_required: " << utils::ToString(powerwash_required)
+ << ", switch_slot_on_reboot: "
+ << utils::ToString(switch_slot_on_reboot)
+ << ", run_post_install: " << utils::ToString(run_post_install);
}
bool InstallPlan::LoadPartitionsFromSlots(BootControlInterface* boot_control) {
diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h
index 6dd5a73..5cdfbc1 100644
--- a/payload_consumer/install_plan.h
+++ b/payload_consumer/install_plan.h
@@ -54,6 +54,8 @@
bool is_resume{false};
std::string download_url; // url to download from
std::string version; // version we are installing.
+ // system version, if present and separate from version
+ std::string system_version;
struct Payload {
uint64_t size = 0; // size of the payload
@@ -117,6 +119,14 @@
// False otherwise.
bool powerwash_required{false};
+ // True if the updated slot should be marked active on success.
+ // False otherwise.
+ bool switch_slot_on_reboot{true};
+
+ // True if the update should run its post-install step.
+ // False otherwise.
+ bool run_post_install{true};
+
// If not blank, a base-64 encoded representation of the PEM-encoded
// public key in the response.
std::string public_key_rsa;
diff --git a/payload_consumer/mtd_file_descriptor.cc b/payload_consumer/mtd_file_descriptor.cc
index 3f0a33f..5d7758a 100644
--- a/payload_consumer/mtd_file_descriptor.cc
+++ b/payload_consumer/mtd_file_descriptor.cc
@@ -18,11 +18,12 @@
#include <fcntl.h>
#include <mtd/ubi-user.h>
-#include <string>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
-#include <vector>
+
+#include <memory>
+#include <string>
#include <base/files/file_path.h>
#include <base/strings/string_number_conversions.h>
@@ -33,7 +34,6 @@
#include "update_engine/common/utils.h"
using std::string;
-using std::vector;
namespace {
diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc
index de0fd74..ad193a0 100644
--- a/payload_consumer/payload_constants.cc
+++ b/payload_consumer/payload_constants.cc
@@ -25,24 +25,13 @@
const uint32_t kInPlaceMinorPayloadVersion = 1;
const uint32_t kSourceMinorPayloadVersion = 2;
const uint32_t kOpSrcHashMinorPayloadVersion = 3;
-const uint32_t kImgdiffMinorPayloadVersion = 4;
+const uint32_t kPuffdiffMinorPayloadVersion = 4;
const char kLegacyPartitionNameKernel[] = "boot";
const char kLegacyPartitionNameRoot[] = "system";
const char kDeltaMagic[4] = {'C', 'r', 'A', 'U'};
-// The zlib in Android and Chrome OS are currently compatible with each other,
-// so they are sharing the same array, but if in the future they are no longer
-// compatible with each other, we coule make the same change on the other one to
-// make them compatible again or use ifdef here.
-const char kCompatibleZlibFingerprint[][65] = {
- "ea973605ccbbdb24f59f449c5f65861a1a9bc7a4353377aaaa06cb3e0f1cfbd7",
- "3747fa404cceb00a5ec3606fc779510aaa784d5864ab1d5c28b9e267c40aad5c",
- // zlib 1.2.11
- "61514794a2985bee78135fd67a2f1fd18e56f3c3e410fbc4552a0e05a701e47a",
-};
-
const char* InstallOperationTypeName(InstallOperation_Type op_type) {
switch (op_type) {
case InstallOperation::BSDIFF:
@@ -63,8 +52,10 @@
return "DISCARD";
case InstallOperation::REPLACE_XZ:
return "REPLACE_XZ";
- case InstallOperation::IMGDIFF:
- return "IMGDIFF";
+ case InstallOperation::PUFFDIFF:
+ return "PUFFDIFF";
+ case InstallOperation::BROTLI_BSDIFF:
+ return "BROTLI_BSDIFF";
}
return "<unknown_op>";
}
diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h
index 7509ed2..1e2e810 100644
--- a/payload_consumer/payload_constants.h
+++ b/payload_consumer/payload_constants.h
@@ -43,9 +43,8 @@
// The minor version that allows per-operation source hash.
extern const uint32_t kOpSrcHashMinorPayloadVersion;
-// The minor version that allows IMGDIFF operation.
-extern const uint32_t kImgdiffMinorPayloadVersion;
-
+// The minor version that allows PUFFDIFF operation.
+extern const uint32_t kPuffdiffMinorPayloadVersion;
// The kernel and rootfs partition names used by the BootControlInterface when
// handling update payloads with a major version 1. The names of the updated
@@ -56,15 +55,6 @@
extern const char kBspatchPath[];
extern const char kDeltaMagic[4];
-// The list of compatible SHA256 hashes of zlib source code.
-// This is used to check if the source image have a compatible zlib (produce
-// same compressed result given the same input).
-// When a new fingerprint is found, please examine the changes in zlib source
-// carefully and determine if it's still compatible with previous version, if
-// yes then add the new fingerprint to this array, otherwise remove all previous
-// fingerprints in the array first, and only include the new fingerprint.
-extern const char kCompatibleZlibFingerprint[3][65];
-
// A block number denoting a hole on a sparse file. Used on Extents to refer to
// section of blocks not present on disk on a sparse file.
const uint64_t kSparseHole = std::numeric_limits<uint64_t>::max();
diff --git a/payload_consumer/payload_verifier.h b/payload_consumer/payload_verifier.h
index 22ced40..8caef35 100644
--- a/payload_consumer/payload_verifier.h
+++ b/payload_consumer/payload_verifier.h
@@ -18,7 +18,6 @@
#define UPDATE_ENGINE_PAYLOAD_CONSUMER_PAYLOAD_VERIFIER_H_
#include <string>
-#include <vector>
#include <base/macros.h>
#include <brillo/secure_blob.h>
diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc
index 27a9ed6..cedecda 100644
--- a/payload_consumer/postinstall_runner_action.cc
+++ b/payload_consumer/postinstall_runner_action.cc
@@ -82,6 +82,11 @@
}
void PostinstallRunnerAction::PerformPartitionPostinstall() {
+ if (!install_plan_.run_post_install) {
+ LOG(INFO) << "Skipping post-install according to install plan.";
+ return CompletePostinstall(ErrorCode::kSuccess);
+ }
+
if (install_plan_.download_url.empty()) {
LOG(INFO) << "Skipping post-install during rollback";
return CompletePostinstall(ErrorCode::kSuccess);
@@ -331,15 +336,21 @@
void PostinstallRunnerAction::CompletePostinstall(ErrorCode error_code) {
// We only attempt to mark the new slot as active if all the postinstall
// steps succeeded.
- if (error_code == ErrorCode::kSuccess &&
- !boot_control_->SetActiveBootSlot(install_plan_.target_slot)) {
- error_code = ErrorCode::kPostinstallRunnerError;
+ if (error_code == ErrorCode::kSuccess) {
+ if (install_plan_.switch_slot_on_reboot) {
+ if (!boot_control_->SetActiveBootSlot(install_plan_.target_slot)) {
+ error_code = ErrorCode::kPostinstallRunnerError;
+ }
+ } else {
+ error_code = ErrorCode::kUpdatedButNotActive;
+ }
}
ScopedActionCompleter completer(processor_, this);
completer.set_code(error_code);
- if (error_code != ErrorCode::kSuccess) {
+ if (error_code != ErrorCode::kSuccess &&
+ error_code != ErrorCode::kUpdatedButNotActive) {
LOG(ERROR) << "Postinstall action failed.";
// Undo any changes done to trigger Powerwash.
diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc
index e82a866..f15171b 100644
--- a/payload_consumer/postinstall_runner_action_unittest.cc
+++ b/payload_consumer/postinstall_runner_action_unittest.cc
@@ -22,14 +22,12 @@
#include <memory>
#include <string>
-#include <vector>
#include <base/bind.h>
#include <base/files/file_util.h>
#include <base/message_loop/message_loop.h>
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
-#include <brillo/bind_lambda.h>
#include <brillo/message_loops/base_message_loop.h>
#include <brillo/message_loops/message_loop_utils.h>
#include <gmock/gmock.h>
@@ -44,7 +42,6 @@
using brillo::MessageLoop;
using chromeos_update_engine::test_utils::ScopedLoopbackDeviceBinder;
using std::string;
-using std::vector;
namespace chromeos_update_engine {
diff --git a/payload_consumer/xz_extent_writer.cc b/payload_consumer/xz_extent_writer.cc
index 4bd893d..343ed80 100644
--- a/payload_consumer/xz_extent_writer.cc
+++ b/payload_consumer/xz_extent_writer.cc
@@ -16,7 +16,7 @@
#include "update_engine/payload_consumer/xz_extent_writer.h"
-using std::vector;
+using google::protobuf::RepeatedPtrField;
namespace chromeos_update_engine {
@@ -47,7 +47,7 @@
return "<unknown xz error>";
}
#undef __XZ_ERROR_STRING_CASE
-};
+}
} // namespace
XzExtentWriter::~XzExtentWriter() {
@@ -55,7 +55,7 @@
}
bool XzExtentWriter::Init(FileDescriptorPtr fd,
- const vector<Extent>& extents,
+ const RepeatedPtrField<Extent>& extents,
uint32_t block_size) {
stream_ = xz_dec_init(XZ_DYNALLOC, kXzMaxDictSize);
TEST_AND_RETURN_FALSE(stream_ != nullptr);
diff --git a/payload_consumer/xz_extent_writer.h b/payload_consumer/xz_extent_writer.h
index a6b3257..5e50256 100644
--- a/payload_consumer/xz_extent_writer.h
+++ b/payload_consumer/xz_extent_writer.h
@@ -20,7 +20,7 @@
#include <xz.h>
#include <memory>
-#include <vector>
+#include <utility>
#include <brillo/secure_blob.h>
@@ -40,7 +40,7 @@
~XzExtentWriter() override;
bool Init(FileDescriptorPtr fd,
- const std::vector<Extent>& extents,
+ const google::protobuf::RepeatedPtrField<Extent>& extents,
uint32_t block_size) override;
bool Write(const void* bytes, size_t count) override;
bool EndImpl() override;
diff --git a/payload_consumer/xz_extent_writer_unittest.cc b/payload_consumer/xz_extent_writer_unittest.cc
index fb8bb40..c8bcdf9 100644
--- a/payload_consumer/xz_extent_writer_unittest.cc
+++ b/payload_consumer/xz_extent_writer_unittest.cc
@@ -22,19 +22,14 @@
#include <unistd.h>
#include <algorithm>
-#include <string>
-#include <vector>
-#include <brillo/make_unique_ptr.h>
+#include <base/memory/ptr_util.h>
#include <gtest/gtest.h>
#include "update_engine/common/test_utils.h"
#include "update_engine/common/utils.h"
#include "update_engine/payload_consumer/fake_extent_writer.h"
-using std::string;
-using std::vector;
-
namespace chromeos_update_engine {
namespace {
@@ -88,8 +83,7 @@
protected:
void SetUp() override {
fake_extent_writer_ = new FakeExtentWriter();
- xz_writer_.reset(
- new XzExtentWriter(brillo::make_unique_ptr(fake_extent_writer_)));
+ xz_writer_.reset(new XzExtentWriter(base::WrapUnique(fake_extent_writer_)));
}
void WriteAll(const brillo::Blob& compressed) {
diff --git a/payload_generator/ab_generator.cc b/payload_generator/ab_generator.cc
index 3b0d012..089dfd9 100644
--- a/payload_generator/ab_generator.cc
+++ b/payload_generator/ab_generator.cc
@@ -17,6 +17,7 @@
#include "update_engine/payload_generator/ab_generator.h"
#include <algorithm>
+#include <utility>
#include <base/strings/stringprintf.h>
@@ -173,7 +174,6 @@
InstallOperation new_op;
*(new_op.add_dst_extents()) = dst_ext;
uint32_t data_size = dst_ext.num_blocks() * kBlockSize;
- new_op.set_dst_length(data_size);
// If this is a REPLACE, attempt to reuse portions of the existing blob.
if (is_replace) {
new_op.set_type(InstallOperation::REPLACE);
@@ -238,15 +238,9 @@
if (is_delta_op) {
ExtendExtents(last_aop.op.mutable_src_extents(),
curr_aop.op.src_extents());
- if (curr_aop.op.src_length() > 0)
- last_aop.op.set_src_length(last_aop.op.src_length() +
- curr_aop.op.src_length());
}
ExtendExtents(last_aop.op.mutable_dst_extents(),
curr_aop.op.dst_extents());
- if (curr_aop.op.dst_length() > 0)
- last_aop.op.set_dst_length(last_aop.op.dst_length() +
- curr_aop.op.dst_length());
// Set the data length to zero so we know to add the blob later.
if (is_a_replace)
last_aop.op.set_data_length(0);
@@ -276,9 +270,9 @@
BlobFileWriter* blob_file) {
TEST_AND_RETURN_FALSE(IsAReplaceOperation(aop->op.type()));
- brillo::Blob data(aop->op.dst_length());
vector<Extent> dst_extents;
ExtentsToVector(aop->op.dst_extents(), &dst_extents);
+ brillo::Blob data(utils::BlocksInExtents(dst_extents) * kBlockSize);
TEST_AND_RETURN_FALSE(utils::ReadExtents(target_part_path,
dst_extents,
&data,
@@ -312,7 +306,7 @@
uint64_t src_length =
aop.op.has_src_length()
? aop.op.src_length()
- : BlocksInExtents(aop.op.src_extents()) * kBlockSize;
+ : utils::BlocksInExtents(aop.op.src_extents()) * kBlockSize;
TEST_AND_RETURN_FALSE(utils::ReadExtents(
source_part_path, src_extents, &src_data, src_length, kBlockSize));
TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfData(src_data, &src_hash));
diff --git a/payload_generator/ab_generator.h b/payload_generator/ab_generator.h
index 77afb87..343b546 100644
--- a/payload_generator/ab_generator.h
+++ b/payload_generator/ab_generator.h
@@ -55,13 +55,13 @@
BlobFileWriter* blob_file,
std::vector<AnnotatedOperation>* aops) override;
- // Split the operations in the vector of AnnotatedOperations |aops|
- // such that for every operation there is only one dst extent and updates
- // |aops| with the new list of operations. All kinds of operations are
- // fragmented except BSDIFF and SOURCE_BSDIFF operations.
- // The |target_part_path| is the filename of the new image, where the
- // destination extents refer to. The blobs of the operations in |aops| should
- // reference |blob_file|. |blob_file| are updated if needed.
+ // Split the operations in the vector of AnnotatedOperations |aops| such that
+ // for every operation there is only one dst extent and updates |aops| with
+ // the new list of operations. All kinds of operations are fragmented except
+ // BSDIFF and SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF operations. The
+ // |target_part_path| is the filename of the new image, where the destination
+ // extents refer to. The blobs of the operations in |aops| should reference
+ // |blob_file|. |blob_file| are updated if needed.
static bool FragmentOperations(const PayloadVersion& version,
std::vector<AnnotatedOperation>* aops,
const std::string& target_part_path,
diff --git a/payload_generator/ab_generator_unittest.cc b/payload_generator/ab_generator_unittest.cc
index ab4b164..25609c7 100644
--- a/payload_generator/ab_generator_unittest.cc
+++ b/payload_generator/ab_generator_unittest.cc
@@ -42,7 +42,9 @@
namespace {
-bool ExtentEquals(const Extent& ext, uint64_t start_block, uint64_t num_blocks) {
+bool ExtentEquals(const Extent& ext,
+ uint64_t start_block,
+ uint64_t num_blocks) {
return ext.start_block() == start_block && ext.num_blocks() == num_blocks;
}
@@ -85,7 +87,6 @@
op_ex1_num_blocks);
*(op.add_dst_extents()) = ExtentForRange(op_ex2_start_block,
op_ex2_num_blocks);
- op.set_dst_length(op_ex1_num_blocks + op_ex2_num_blocks);
brillo::Blob op_data;
op_data.insert(op_data.end(),
@@ -136,7 +137,8 @@
EXPECT_EQ("SplitTestOp:0", result_ops[0].name);
InstallOperation first_op = result_ops[0].op;
EXPECT_EQ(expected_type, first_op.type());
- EXPECT_EQ(op_ex1_size, first_op.dst_length());
+ EXPECT_FALSE(first_op.has_src_length());
+ EXPECT_FALSE(first_op.has_dst_length());
EXPECT_EQ(1, first_op.dst_extents().size());
EXPECT_TRUE(ExtentEquals(first_op.dst_extents(0), op_ex1_start_block,
op_ex1_num_blocks));
@@ -165,7 +167,8 @@
EXPECT_EQ("SplitTestOp:1", result_ops[1].name);
InstallOperation second_op = result_ops[1].op;
EXPECT_EQ(expected_type, second_op.type());
- EXPECT_EQ(op_ex2_size, second_op.dst_length());
+ EXPECT_FALSE(second_op.has_src_length());
+ EXPECT_FALSE(second_op.has_dst_length());
EXPECT_EQ(1, second_op.dst_extents().size());
EXPECT_TRUE(ExtentEquals(second_op.dst_extents(0), op_ex2_start_block,
op_ex2_num_blocks));
@@ -235,7 +238,6 @@
InstallOperation first_op;
first_op.set_type(orig_type);
const size_t first_op_size = first_op_num_blocks * kBlockSize;
- first_op.set_dst_length(first_op_size);
*(first_op.add_dst_extents()) = ExtentForRange(0, first_op_num_blocks);
brillo::Blob first_op_data(part_data.begin(),
part_data.begin() + first_op_size);
@@ -255,8 +257,6 @@
InstallOperation second_op;
second_op.set_type(orig_type);
- const size_t second_op_size = second_op_num_blocks * kBlockSize;
- second_op.set_dst_length(second_op_size);
*(second_op.add_dst_extents()) = ExtentForRange(first_op_num_blocks,
second_op_num_blocks);
brillo::Blob second_op_data(part_data.begin() + first_op_size,
@@ -302,7 +302,7 @@
InstallOperation new_op = aops[0].op;
EXPECT_EQ(expected_op_type, new_op.type());
EXPECT_FALSE(new_op.has_src_length());
- EXPECT_EQ(total_op_num_blocks * kBlockSize, new_op.dst_length());
+ EXPECT_FALSE(new_op.has_dst_length());
EXPECT_EQ(1, new_op.dst_extents().size());
EXPECT_TRUE(ExtentEquals(new_op.dst_extents(0), 0, total_op_num_blocks));
EXPECT_EQ("first,second", aops[0].name);
diff --git a/payload_generator/deflate_utils.cc b/payload_generator/deflate_utils.cc
new file mode 100644
index 0000000..88e42e0
--- /dev/null
+++ b/payload_generator/deflate_utils.cc
@@ -0,0 +1,295 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/deflate_utils.h"
+
+#include <algorithm>
+#include <string>
+#include <utility>
+
+#include <base/files/file_util.h>
+#include <base/logging.h>
+#include <base/strings/string_util.h>
+
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_generator/delta_diff_generator.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+#include "update_engine/payload_generator/extent_utils.h"
+#include "update_engine/payload_generator/squashfs_filesystem.h"
+#include "update_engine/update_metadata.pb.h"
+
+using std::string;
+using std::vector;
+using puffin::BitExtent;
+using puffin::ByteExtent;
+
+namespace chromeos_update_engine {
+namespace deflate_utils {
+namespace {
+
+// The minimum size for a squashfs image to be processed.
+const uint64_t kMinimumSquashfsImageSize = 1 * 1024 * 1024; // bytes
+
+// TODO(*): Optimize this so we don't have to read all extents into memory in
+// case it is large.
+bool CopyExtentsToFile(const string& in_path,
+ const vector<Extent> extents,
+ const string& out_path,
+ size_t block_size) {
+ brillo::Blob data(utils::BlocksInExtents(extents) * block_size);
+ TEST_AND_RETURN_FALSE(
+ utils::ReadExtents(in_path, extents, &data, data.size(), block_size));
+ TEST_AND_RETURN_FALSE(
+ utils::WriteFile(out_path.c_str(), data.data(), data.size()));
+ return true;
+}
+
+bool IsSquashfsImage(const string& part_path,
+ const FilesystemInterface::File& file) {
+ // Only check for files with img postfix.
+ if (base::EndsWith(file.name, ".img", base::CompareCase::SENSITIVE) &&
+ utils::BlocksInExtents(file.extents) >=
+ kMinimumSquashfsImageSize / kBlockSize) {
+ brillo::Blob super_block;
+ TEST_AND_RETURN_FALSE(
+ utils::ReadFileChunk(part_path,
+ file.extents[0].start_block() * kBlockSize,
+ 100,
+ &super_block));
+ return SquashfsFilesystem::IsSquashfsImage(super_block);
+ }
+ return false;
+}
+
+// Realigns subfiles |files| of a splitted file |file| into its correct
+// positions. This can be used for squashfs, zip, apk, etc.
+bool RealignSplittedFiles(const FilesystemInterface::File& file,
+ vector<FilesystemInterface::File>* files) {
+ // We have to shift all the Extents in |files|, based on the Extents of the
+ // |file| itself.
+ size_t num_blocks = 0;
+ for (auto& in_file : *files) { // We need to modify so no constant.
+ TEST_AND_RETURN_FALSE(
+ ShiftExtentsOverExtents(file.extents, &in_file.extents));
+ TEST_AND_RETURN_FALSE(
+ ShiftBitExtentsOverExtents(file.extents, &in_file.deflates));
+
+ in_file.name = file.name + "/" + in_file.name;
+ num_blocks += utils::BlocksInExtents(in_file.extents);
+ }
+
+ // Check that all files in |in_files| cover the entire image.
+ TEST_AND_RETURN_FALSE(utils::BlocksInExtents(file.extents) == num_blocks);
+ return true;
+}
+
+bool IsBitExtentInExtent(const Extent& extent, const BitExtent& bit_extent) {
+ return (bit_extent.offset / 8) >= (extent.start_block() * kBlockSize) &&
+ ((bit_extent.offset + bit_extent.length + 7) / 8) <=
+ ((extent.start_block() + extent.num_blocks()) * kBlockSize);
+}
+
+} // namespace
+
+ByteExtent ExpandToByteExtent(const BitExtent& extent) {
+ uint64_t offset = extent.offset / 8;
+ uint64_t length = ((extent.offset + extent.length + 7) / 8) - offset;
+ return {offset, length};
+}
+
+bool ShiftExtentsOverExtents(const vector<Extent>& base_extents,
+ vector<Extent>* over_extents) {
+ if (utils::BlocksInExtents(base_extents) <
+ utils::BlocksInExtents(*over_extents)) {
+ LOG(ERROR) << "over_extents have more blocks than base_extents! Invalid!";
+ return false;
+ }
+ for (size_t idx = 0; idx < over_extents->size(); idx++) {
+ auto over_ext = &over_extents->at(idx);
+ auto gap_blocks = base_extents[0].start_block();
+ auto last_end_block = base_extents[0].start_block();
+ for (auto base_ext : base_extents) { // We need to modify |base_ext|, so we
+ // use copy.
+ gap_blocks += base_ext.start_block() - last_end_block;
+ last_end_block = base_ext.start_block() + base_ext.num_blocks();
+ base_ext.set_start_block(base_ext.start_block() - gap_blocks);
+ if (over_ext->start_block() >= base_ext.start_block() &&
+ over_ext->start_block() <
+ base_ext.start_block() + base_ext.num_blocks()) {
+ if (over_ext->start_block() + over_ext->num_blocks() <=
+ base_ext.start_block() + base_ext.num_blocks()) {
+ // |over_ext| is inside |base_ext|, increase its start block.
+ over_ext->set_start_block(over_ext->start_block() + gap_blocks);
+ } else {
+ // |over_ext| spills over this |base_ext|, split it into two.
+ auto new_blocks = base_ext.start_block() + base_ext.num_blocks() -
+ over_ext->start_block();
+ vector<Extent> new_extents = {
+ ExtentForRange(gap_blocks + over_ext->start_block(), new_blocks),
+ ExtentForRange(over_ext->start_block() + new_blocks,
+ over_ext->num_blocks() - new_blocks)};
+ *over_ext = new_extents[0];
+ over_extents->insert(std::next(over_extents->begin(), idx + 1),
+ new_extents[1]);
+ }
+ break; // We processed |over_ext|, so break the loop;
+ }
+ }
+ }
+ return true;
+}
+
+bool ShiftBitExtentsOverExtents(const vector<Extent>& base_extents,
+ vector<BitExtent>* over_extents) {
+ if (over_extents->empty()) {
+ return true;
+ }
+
+ // This check is needed to make sure the number of bytes in |over_extents|
+ // does not exceed |base_extents|.
+ auto last_extent = ExpandToByteExtent(over_extents->back());
+ TEST_AND_RETURN_FALSE(last_extent.offset + last_extent.length <=
+ utils::BlocksInExtents(base_extents) * kBlockSize);
+
+ for (auto o_ext = over_extents->begin(); o_ext != over_extents->end();) {
+ size_t gap_blocks = base_extents[0].start_block();
+ size_t last_end_block = base_extents[0].start_block();
+ bool o_ext_processed = false;
+ for (auto b_ext : base_extents) { // We need to modify |b_ext|, so we copy.
+ gap_blocks += b_ext.start_block() - last_end_block;
+ last_end_block = b_ext.start_block() + b_ext.num_blocks();
+ b_ext.set_start_block(b_ext.start_block() - gap_blocks);
+ auto byte_o_ext = ExpandToByteExtent(*o_ext);
+ if (byte_o_ext.offset >= b_ext.start_block() * kBlockSize &&
+ byte_o_ext.offset <
+ (b_ext.start_block() + b_ext.num_blocks()) * kBlockSize) {
+ if ((byte_o_ext.offset + byte_o_ext.length) <=
+ (b_ext.start_block() + b_ext.num_blocks()) * kBlockSize) {
+ // |o_ext| is inside |b_ext|, increase its start block.
+ o_ext->offset += gap_blocks * kBlockSize * 8;
+ ++o_ext;
+ } else {
+ // |o_ext| spills over this |b_ext|, remove it.
+ o_ext = over_extents->erase(o_ext);
+ }
+ o_ext_processed = true;
+ break; // We processed o_ext, so break the loop;
+ }
+ }
+ TEST_AND_RETURN_FALSE(o_ext_processed);
+ }
+ return true;
+}
+
+vector<BitExtent> FindDeflates(const vector<Extent>& extents,
+ const vector<BitExtent>& in_deflates) {
+ vector<BitExtent> result;
+ // TODO(ahassani): Replace this with binary_search style search.
+ for (const auto& deflate : in_deflates) {
+ for (const auto& extent : extents) {
+ if (IsBitExtentInExtent(extent, deflate)) {
+ result.push_back(deflate);
+ break;
+ }
+ }
+ }
+ return result;
+}
+
+bool CompactDeflates(const vector<Extent>& extents,
+ const vector<BitExtent>& in_deflates,
+ vector<BitExtent>* out_deflates) {
+ size_t bytes_passed = 0;
+ out_deflates->reserve(in_deflates.size());
+ for (const auto& extent : extents) {
+ size_t gap_bytes = extent.start_block() * kBlockSize - bytes_passed;
+ for (const auto& deflate : in_deflates) {
+ if (IsBitExtentInExtent(extent, deflate)) {
+ out_deflates->emplace_back(deflate.offset - (gap_bytes * 8),
+ deflate.length);
+ }
+ }
+ bytes_passed += extent.num_blocks() * kBlockSize;
+ }
+
+ // All given |in_deflates| items should've been inside one of the extents in
+ // |extents|.
+ TEST_AND_RETURN_FALSE(in_deflates.size() == out_deflates->size());
+
+ // Make sure all outgoing deflates are ordered and non-overlapping.
+ auto result = std::adjacent_find(out_deflates->begin(),
+ out_deflates->end(),
+ [](const BitExtent& a, const BitExtent& b) {
+ return (a.offset + a.length) > b.offset;
+ });
+ TEST_AND_RETURN_FALSE(result == out_deflates->end());
+ return true;
+}
+
+bool FindAndCompactDeflates(const vector<Extent>& extents,
+ const vector<BitExtent>& in_deflates,
+ vector<BitExtent>* out_deflates) {
+ auto found_deflates = FindDeflates(extents, in_deflates);
+ TEST_AND_RETURN_FALSE(CompactDeflates(extents, found_deflates, out_deflates));
+ return true;
+}
+
+bool PreprocessParitionFiles(const PartitionConfig& part,
+ vector<FilesystemInterface::File>* result_files,
+ bool extract_deflates) {
+ // Get the file system files.
+ vector<FilesystemInterface::File> tmp_files;
+ part.fs_interface->GetFiles(&tmp_files);
+ result_files->reserve(tmp_files.size());
+
+ for (const auto& file : tmp_files) {
+ if (IsSquashfsImage(part.path, file)) {
+ // Read the image into a file.
+ base::FilePath path;
+ TEST_AND_RETURN_FALSE(base::CreateTemporaryFile(&path));
+ ScopedPathUnlinker old_unlinker(path.value());
+ TEST_AND_RETURN_FALSE(
+ CopyExtentsToFile(part.path, file.extents, path.value(), kBlockSize));
+ // Test if it is actually a Squashfs file.
+ auto sqfs =
+ SquashfsFilesystem::CreateFromFile(path.value(), extract_deflates);
+ if (sqfs) {
+ // It is an squashfs file. Get its files to replace with itself.
+ vector<FilesystemInterface::File> files;
+ sqfs->GetFiles(&files);
+
+ // Replace squashfs file with its files only if |files| has at least two
+ // files or if it has some deflates (since it is better to replace it to
+ // take advantage of the deflates.)
+ if (files.size() > 1 ||
+ (files.size() == 1 && !files[0].deflates.empty())) {
+ TEST_AND_RETURN_FALSE(RealignSplittedFiles(file, &files));
+ result_files->insert(result_files->end(), files.begin(), files.end());
+ continue;
+ }
+ } else {
+ LOG(WARNING) << "We thought file: " << file.name
+ << " was a Squashfs file, but it was not.";
+ }
+ }
+ // TODO(ahassani): Process other types of files like apk, zip, etc.
+ result_files->push_back(file);
+ }
+ return true;
+}
+
+} // namespace deflate_utils
+} // namespace chromeos_update_engine
diff --git a/payload_generator/deflate_utils.h b/payload_generator/deflate_utils.h
new file mode 100644
index 0000000..798ce25
--- /dev/null
+++ b/payload_generator/deflate_utils.h
@@ -0,0 +1,97 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_DEFLATE_UTILS_H_
+#define UPDATE_ENGINE_PAYLOAD_GENERATOR_DEFLATE_UTILS_H_
+
+#include <puffin/puffdiff.h>
+#include <vector>
+
+#include "update_engine/payload_generator/filesystem_interface.h"
+#include "update_engine/payload_generator/payload_generation_config.h"
+
+namespace chromeos_update_engine {
+namespace deflate_utils {
+
+// Gets the files from the partition and processes all its files. Processing
+// includes:
+// - splitting large Squashfs containers into its smaller files.
+bool PreprocessParitionFiles(const PartitionConfig& part,
+ std::vector<FilesystemInterface::File>* result,
+ bool extract_deflates);
+
+// Spreads all extents in |over_extents| over |base_extents|. Here we assume the
+// |over_extents| are non-overlapping and sorted by their offset.
+//
+// |base_extents|:
+// | ----------------------- ------ --------------
+// |over_extents|:
+// | ========== ==== ========== ======
+// |over_extents| is transforms to:
+// | ========== ==== = ====== === ======
+//
+bool ShiftExtentsOverExtents(const std::vector<Extent>& base_extents,
+ std::vector<Extent>* over_extents);
+
+// Spreads all extents in |over_extents| over |base_extents|. Here we assume the
+// |over_extents| are non-overlapping and sorted by their offset. An item in
+// |over_extents| is removed if it is spread in two or more extents in
+// |base_extents|.
+//
+// |base_extents|:
+// | ----------------------- ------ --------------
+// |over_extents|:
+// | ========== ==== ========== ======
+// |over_extents| is transforms to:
+// | ========== ==== ======
+//
+bool ShiftBitExtentsOverExtents(const std::vector<Extent>& base_extents,
+ std::vector<puffin::BitExtent>* over_extents);
+
+// Finds all deflate locations in |deflates| that are inside an Extent in
+// |extents|. This function should not change the order of deflates.
+std::vector<puffin::BitExtent> FindDeflates(
+ const std::vector<Extent>& extents,
+ const std::vector<puffin::BitExtent>& deflates);
+
+// Creates a new list of deflate locations (|out_deflates|) from |in_deflates|
+// by assuming all extents in the |extents| have been put together
+// linearly. This function assumes that all deflate locations given in
+// |in_deflates| are located somewhere in the |extents|. |out_deflates| should
+// be empty on call.
+//
+// |extents|:
+// | ----------------------- ------ --------------
+// |in_deflates|:
+// | ======== ==== ==== ======
+// |out_deflates|:
+// | ======== ==== ==== ======
+//
+bool CompactDeflates(const std::vector<Extent>& extents,
+ const std::vector<puffin::BitExtent>& in_deflates,
+ std::vector<puffin::BitExtent>* out_deflates);
+
+// Combines |FindDeflates| and |CompcatDeflates| for ease of use.
+bool FindAndCompactDeflates(const std::vector<Extent>& extents,
+ const std::vector<puffin::BitExtent>& in_deflates,
+ std::vector<puffin::BitExtent>* out_deflates);
+
+// Expands a BitExtents to a ByteExtent.
+puffin::ByteExtent ExpandToByteExtent(const puffin::BitExtent& extent);
+
+} // namespace deflate_utils
+} // namespace chromeos_update_engine
+#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_DEFLATE_UTILS_H_
diff --git a/payload_generator/deflate_utils_unittest.cc b/payload_generator/deflate_utils_unittest.cc
new file mode 100644
index 0000000..cb9476a
--- /dev/null
+++ b/payload_generator/deflate_utils_unittest.cc
@@ -0,0 +1,190 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/deflate_utils.h"
+
+#include <unistd.h>
+
+#include <algorithm>
+#include <utility>
+#include <vector>
+
+#include <gtest/gtest.h>
+
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+#include "update_engine/payload_generator/extent_utils.h"
+
+using std::vector;
+using puffin::BitExtent;
+using puffin::ByteExtent;
+
+namespace chromeos_update_engine {
+namespace deflate_utils {
+
+// This creates a sudo-random BitExtents from ByteExtents for simpler testing.
+vector<BitExtent> ByteToBitExtent(const vector<ByteExtent>& byte_extents) {
+ vector<BitExtent> bit_extents;
+ for (auto& byte_extent : byte_extents) {
+ bit_extents.emplace_back(byte_extent.offset * 8 + (byte_extent.offset & 7),
+ byte_extent.length * 8 - (byte_extent.length & 7));
+ }
+ return bit_extents;
+}
+
+TEST(DeflateUtilsTest, ExtentsShiftTest) {
+ vector<Extent> base_extents = {ExtentForRange(10, 10),
+ ExtentForRange(70, 10),
+ ExtentForRange(50, 10),
+ ExtentForRange(30, 10),
+ ExtentForRange(90, 10)};
+ vector<Extent> over_extents = {ExtentForRange(2, 2),
+ ExtentForRange(5, 2),
+ ExtentForRange(7, 3),
+ ExtentForRange(13, 10),
+ ExtentForRange(25, 20),
+ ExtentForRange(47, 3)};
+ vector<Extent> out_over_extents = {ExtentForRange(12, 2),
+ ExtentForRange(15, 2),
+ ExtentForRange(17, 3),
+ ExtentForRange(73, 7),
+ ExtentForRange(50, 3),
+ ExtentForRange(55, 5),
+ ExtentForRange(30, 10),
+ ExtentForRange(90, 5),
+ ExtentForRange(97, 3)};
+ EXPECT_TRUE(ShiftExtentsOverExtents(base_extents, &over_extents));
+ EXPECT_EQ(over_extents, out_over_extents);
+
+ // Failure case
+ base_extents = {ExtentForRange(10, 10)};
+ over_extents = {ExtentForRange(2, 12)};
+ EXPECT_FALSE(ShiftExtentsOverExtents(base_extents, &over_extents));
+}
+
+TEST(DeflateUtilsTest, ShiftBitExtentsOverExtentsTest) {
+ vector<Extent> base_extents = {ExtentForRange(3, 1),
+ ExtentForRange(1, 1),
+ ExtentForRange(5, 1),
+ ExtentForRange(7, 1),
+ ExtentForRange(9, 1)};
+ vector<BitExtent> over_extents =
+ ByteToBitExtent({{0, 0}, {100, 2000}, {4096, 0}, {5000, 5000}});
+ vector<BitExtent> out_over_extents =
+ ByteToBitExtent({{12288, 0}, {12388, 2000}, {4096, 0}});
+ ASSERT_TRUE(ShiftBitExtentsOverExtents(base_extents, &over_extents));
+ EXPECT_EQ(over_extents, out_over_extents);
+}
+
+TEST(DeflateUtilsTest, ShiftBitExtentsOverExtentsBoundaryTest) {
+ vector<Extent> base_extents = {ExtentForRange(1, 1)};
+ vector<BitExtent> over_extents = ByteToBitExtent({{2, 4096}});
+ vector<BitExtent> out_over_extents = {};
+ EXPECT_FALSE(ShiftBitExtentsOverExtents(base_extents, &over_extents));
+
+ base_extents = {ExtentForRange(1, 1)};
+ over_extents = {};
+ out_over_extents = {};
+ EXPECT_TRUE(ShiftBitExtentsOverExtents(base_extents, &over_extents));
+ EXPECT_EQ(over_extents, out_over_extents);
+
+ base_extents = {};
+ over_extents = {};
+ out_over_extents = {};
+ EXPECT_TRUE(ShiftBitExtentsOverExtents(base_extents, &over_extents));
+ EXPECT_EQ(over_extents, out_over_extents);
+
+ base_extents = {};
+ over_extents = ByteToBitExtent({{0, 1}});
+ out_over_extents = ByteToBitExtent({{0, 1}});
+ EXPECT_FALSE(ShiftBitExtentsOverExtents(base_extents, &over_extents));
+ EXPECT_EQ(over_extents, out_over_extents);
+
+ base_extents = {ExtentForRange(1, 2)};
+ over_extents = ByteToBitExtent({{0, 3 * 4096}, {4 * 4096, 4096}});
+ out_over_extents = ByteToBitExtent({{0, 3 * 4096}, {4 * 4096, 4096}});
+ EXPECT_FALSE(ShiftBitExtentsOverExtents(base_extents, &over_extents));
+ EXPECT_EQ(over_extents, out_over_extents);
+}
+
+TEST(DeflateUtilsTest, FindDeflatesTest) {
+ vector<Extent> extents = {
+ ExtentForRange(1, 1), ExtentForRange(3, 1), ExtentForRange(5, 1)};
+ vector<BitExtent> in_deflates = ByteToBitExtent({{0, 0},
+ {10, 400},
+ {4096, 0},
+ {3000, 2000},
+ {4096, 100},
+ {4097, 100},
+ {8100, 92},
+ {8100, 93},
+ {8100, 6000},
+ {25000, 1}});
+ vector<BitExtent> expected_out_deflates =
+ ByteToBitExtent({{4096, 0}, {4096, 100}, {4097, 100}, {8100, 92}});
+ vector<BitExtent> out_deflates;
+ out_deflates = FindDeflates(extents, in_deflates);
+ EXPECT_EQ(out_deflates, expected_out_deflates);
+}
+
+TEST(DeflateUtilsTest, FindDeflatesBoundaryTest) {
+ vector<Extent> extents = {};
+ vector<BitExtent> in_deflates = ByteToBitExtent({{0, 0}, {8100, 93}});
+ vector<BitExtent> expected_out_deflates = {};
+ vector<BitExtent> out_deflates;
+ out_deflates = FindDeflates(extents, in_deflates);
+ EXPECT_EQ(out_deflates, expected_out_deflates);
+
+ extents = {};
+ in_deflates = {};
+ out_deflates = FindDeflates(extents, in_deflates);
+ EXPECT_EQ(out_deflates, expected_out_deflates);
+}
+
+TEST(DeflateUtilsTest, CompactTest) {
+ vector<Extent> extents = {
+ ExtentForRange(1, 1), ExtentForRange(5, 1), ExtentForRange(3, 1)};
+ vector<BitExtent> in_deflates =
+ ByteToBitExtent({{4096, 0}, {12288, 4096}, {4096, 100}, {20480, 100}});
+ vector<BitExtent> expected_out_deflates =
+ ByteToBitExtent({{0, 0}, {0, 100}, {4096, 100}, {8192, 4096}});
+ vector<BitExtent> out_deflates;
+ ASSERT_TRUE(CompactDeflates(extents, in_deflates, &out_deflates));
+ EXPECT_EQ(out_deflates, expected_out_deflates);
+}
+
+TEST(DeflateUtilsTest, CompactBoundaryTest) {
+ vector<Extent> extents = {};
+ vector<BitExtent> in_deflates = ByteToBitExtent({{4096, 0}});
+ vector<BitExtent> expected_out_deflates = {};
+ vector<BitExtent> out_deflates;
+ EXPECT_FALSE(CompactDeflates(extents, in_deflates, &out_deflates));
+ EXPECT_EQ(out_deflates, expected_out_deflates);
+
+ extents = {};
+ in_deflates = {};
+ ASSERT_TRUE(CompactDeflates(extents, in_deflates, &out_deflates));
+ EXPECT_EQ(out_deflates, expected_out_deflates);
+
+ extents = {ExtentForRange(1, 1)};
+ in_deflates = {};
+ ASSERT_TRUE(CompactDeflates(extents, in_deflates, &out_deflates));
+ EXPECT_EQ(out_deflates, expected_out_deflates);
+}
+
+} // namespace deflate_utils
+} // namespace chromeos_update_engine
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index e928912..bcbc3a5 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -17,30 +17,41 @@
#include "update_engine/payload_generator/delta_diff_utils.h"
#include <endian.h>
-// TODO: Remove these pragmas when b/35721782 is fixed.
+#if defined(__clang__)
+// TODO(*): Remove these pragmas when b/35721782 is fixed.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wmacro-redefined"
+#endif
#include <ext2fs/ext2fs.h>
+#if defined(__clang__)
#pragma clang diagnostic pop
+#endif
#include <unistd.h>
#include <algorithm>
#include <map>
+#include <memory>
+#include <utility>
#include <base/files/file_util.h>
#include <base/format_macros.h>
+#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
#include <base/threading/simple_thread.h>
#include <brillo/data_encoding.h>
+#include <bsdiff/bsdiff.h>
#include "update_engine/common/hash_calculator.h"
#include "update_engine/common/subprocess.h"
#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/payload_constants.h"
#include "update_engine/payload_generator/block_mapping.h"
#include "update_engine/payload_generator/bzip.h"
+#include "update_engine/payload_generator/deflate_utils.h"
#include "update_engine/payload_generator/delta_diff_generator.h"
#include "update_engine/payload_generator/extent_ranges.h"
#include "update_engine/payload_generator/extent_utils.h"
+#include "update_engine/payload_generator/squashfs_filesystem.h"
#include "update_engine/payload_generator/xz.h"
using std::map;
@@ -50,9 +61,6 @@
namespace chromeos_update_engine {
namespace {
-const char* const kBsdiffPath = "bsdiff";
-const char* const kImgdiffPath = "imgdiff";
-
// The maximum destination size allowed for bsdiff. In general, bsdiff should
// work for arbitrary big files, but the payload generation and payload
// application requires a significant amount of RAM. We put a hard-limit of
@@ -60,10 +68,10 @@
// Chrome binary in ASan builders.
const uint64_t kMaxBsdiffDestinationSize = 200 * 1024 * 1024; // bytes
-// The maximum destination size allowed for imgdiff. In general, imgdiff should
-// work for arbitrary big files, but the payload application is quite memory
-// intensive, so we limit these operations to 50 MiB.
-const uint64_t kMaxImgdiffDestinationSize = 50 * 1024 * 1024; // bytes
+// The maximum destination size allowed for puffdiff. In general, puffdiff
+// should work for arbitrary big files, but the payload application is quite
+// memory intensive, so we limit these operations to 150 MiB.
+const uint64_t kMaxPuffdiffDestinationSize = 150 * 1024 * 1024; // bytes
// Process a range of blocks from |range_start| to |range_end| in the extent at
// position |*idx_p| of |extents|. If |do_remove| is true, this range will be
@@ -160,15 +168,6 @@
return removed_bytes;
}
-// Returns true if the given blob |data| contains gzip header magic.
-bool ContainsGZip(const brillo::Blob& data) {
- const uint8_t kGZipMagic[] = {0x1f, 0x8b, 0x08, 0x00};
- return std::search(data.begin(),
- data.end(),
- std::begin(kGZipMagic),
- std::end(kGZipMagic)) != data.end();
-}
-
} // namespace
namespace diff_utils {
@@ -183,6 +182,8 @@
const PayloadVersion& version,
const vector<Extent>& old_extents,
const vector<Extent>& new_extents,
+ const vector<puffin::BitExtent>& old_deflates,
+ const vector<puffin::BitExtent>& new_deflates,
const string& name,
ssize_t chunk_blocks,
BlobFileWriter* blob_file)
@@ -191,6 +192,8 @@
version_(version),
old_extents_(old_extents),
new_extents_(new_extents),
+ old_deflates_(old_deflates),
+ new_deflates_(new_deflates),
name_(name),
chunk_blocks_(chunk_blocks),
blob_file_(blob_file) {}
@@ -215,6 +218,8 @@
// The block ranges of the old/new file within the src/tgt image
const vector<Extent> old_extents_;
const vector<Extent> new_extents_;
+ const vector<puffin::BitExtent> old_deflates_;
+ const vector<puffin::BitExtent> new_deflates_;
const string name_;
// Block limit of one aop.
ssize_t chunk_blocks_;
@@ -229,17 +234,22 @@
void FileDeltaProcessor::Run() {
TEST_AND_RETURN(blob_file_ != nullptr);
+ LOG(INFO) << "Encoding file " << name_ << " ("
+ << utils::BlocksInExtents(new_extents_) << " blocks)";
+
if (!DeltaReadFile(&file_aops_,
old_part_,
new_part_,
old_extents_,
new_extents_,
+ old_deflates_,
+ new_deflates_,
name_,
chunk_blocks_,
version_,
blob_file_)) {
LOG(ERROR) << "Failed to generate delta for " << name_ << " ("
- << BlocksInExtents(new_extents_) << " blocks)";
+ << utils::BlocksInExtents(new_extents_) << " blocks)";
}
}
@@ -270,17 +280,20 @@
&old_visited_blocks,
&new_visited_blocks));
- map<string, vector<Extent>> old_files_map;
+ bool puffdiff_allowed = version.OperationAllowed(InstallOperation::PUFFDIFF);
+ map<string, FilesystemInterface::File> old_files_map;
if (old_part.fs_interface) {
vector<FilesystemInterface::File> old_files;
- old_part.fs_interface->GetFiles(&old_files);
+ TEST_AND_RETURN_FALSE(deflate_utils::PreprocessParitionFiles(
+ old_part, &old_files, puffdiff_allowed));
for (const FilesystemInterface::File& file : old_files)
- old_files_map[file.name] = file.extents;
+ old_files_map[file.name] = file;
}
TEST_AND_RETURN_FALSE(new_part.fs_interface);
vector<FilesystemInterface::File> new_files;
- new_part.fs_interface->GetFiles(&new_files);
+ TEST_AND_RETURN_FALSE(deflate_utils::PreprocessParitionFiles(
+ new_part, &new_files, puffdiff_allowed));
vector<FileDeltaProcessor> file_delta_processors;
@@ -303,9 +316,6 @@
if (new_file_extents.empty())
continue;
- LOG(INFO) << "Encoding file " << new_file.name << " ("
- << BlocksInExtents(new_file_extents) << " blocks)";
-
// We can't visit each dst image inode more than once, as that would
// duplicate work. Here, we avoid visiting each source image inode
// more than once. Technically, we could have multiple operations
@@ -314,8 +324,9 @@
// from using a graph/cycle detection/etc to generate diffs, and at that
// time, it will be easy (non-complex) to have many operations read
// from the same source blocks. At that time, this code can die. -adlr
- vector<Extent> old_file_extents = FilterExtentRanges(
- old_files_map[new_file.name], old_visited_blocks);
+ auto old_file = old_files_map[new_file.name];
+ vector<Extent> old_file_extents =
+ FilterExtentRanges(old_file.extents, old_visited_blocks);
old_visited_blocks.AddExtents(old_file_extents);
file_delta_processors.emplace_back(old_part.path,
@@ -323,6 +334,8 @@
version,
std::move(old_file_extents),
std::move(new_file_extents),
+ old_file.deflates,
+ new_file.deflates,
new_file.name, // operation name
hard_chunk_blocks,
blob_file);
@@ -355,9 +368,9 @@
old_unvisited = FilterExtentRanges(old_unvisited, old_visited_blocks);
}
- LOG(INFO) << "Scanning " << BlocksInExtents(new_unvisited)
- << " unwritten blocks using chunk size of "
- << soft_chunk_blocks << " blocks.";
+ LOG(INFO) << "Scanning " << utils::BlocksInExtents(new_unvisited)
+ << " unwritten blocks using chunk size of " << soft_chunk_blocks
+ << " blocks.";
// We use the soft_chunk_blocks limit for the <non-file-data> as we don't
// really know the structure of this data and we should not expect it to have
// redundancy between partitions.
@@ -366,6 +379,8 @@
new_part.path,
old_unvisited,
new_unvisited,
+ {}, // old_deflates,
+ {}, // new_deflates
"<non-file-data>", // operation name
soft_chunk_blocks,
version,
@@ -470,13 +485,15 @@
new_part,
vector<Extent>(), // old_extents
vector<Extent>{extent}, // new_extents
+ {}, // old_deflates
+ {}, // new_deflates
"<zeros>",
chunk_blocks,
version,
blob_file));
}
LOG(INFO) << "Produced " << (aops->size() - num_ops) << " operations for "
- << BlocksInExtents(new_zeros) << " zeroed blocks";
+ << utils::BlocksInExtents(new_zeros) << " zeroed blocks";
// Produce MOVE/SOURCE_COPY operations for the moved blocks.
num_ops = aops->size();
@@ -530,6 +547,8 @@
const string& new_part,
const vector<Extent>& old_extents,
const vector<Extent>& new_extents,
+ const vector<puffin::BitExtent>& old_deflates,
+ const vector<puffin::BitExtent>& new_deflates,
const string& name,
ssize_t chunk_blocks,
const PayloadVersion& version,
@@ -537,7 +556,7 @@
brillo::Blob data;
InstallOperation operation;
- uint64_t total_blocks = BlocksInExtents(new_extents);
+ uint64_t total_blocks = utils::BlocksInExtents(new_extents);
if (chunk_blocks == -1)
chunk_blocks = total_blocks;
@@ -558,6 +577,8 @@
new_part,
old_extents_chunk,
new_extents_chunk,
+ old_deflates,
+ new_deflates,
version,
&data,
&operation));
@@ -648,16 +669,18 @@
const string& new_part,
const vector<Extent>& old_extents,
const vector<Extent>& new_extents,
+ const vector<puffin::BitExtent>& old_deflates,
+ const vector<puffin::BitExtent>& new_deflates,
const PayloadVersion& version,
brillo::Blob* out_data,
InstallOperation* out_op) {
InstallOperation operation;
// We read blocks from old_extents and write blocks to new_extents.
- uint64_t blocks_to_read = BlocksInExtents(old_extents);
- uint64_t blocks_to_write = BlocksInExtents(new_extents);
+ uint64_t blocks_to_read = utils::BlocksInExtents(old_extents);
+ uint64_t blocks_to_write = utils::BlocksInExtents(new_extents);
- // Disable bsdiff and imgdiff when the data is too big.
+ // Disable bsdiff, and puffdiff when the data is too big.
bool bsdiff_allowed =
version.OperationAllowed(InstallOperation::SOURCE_BSDIFF) ||
version.OperationAllowed(InstallOperation::BSDIFF);
@@ -668,12 +691,12 @@
bsdiff_allowed = false;
}
- bool imgdiff_allowed = version.OperationAllowed(InstallOperation::IMGDIFF);
- if (imgdiff_allowed &&
- blocks_to_read * kBlockSize > kMaxImgdiffDestinationSize) {
- LOG(INFO) << "imgdiff blacklisted, data too big: "
+ bool puffdiff_allowed = version.OperationAllowed(InstallOperation::PUFFDIFF);
+ if (puffdiff_allowed &&
+ blocks_to_read * kBlockSize > kMaxPuffdiffDestinationSize) {
+ LOG(INFO) << "puffdiff blacklisted, data too big: "
<< blocks_to_read * kBlockSize << " bytes";
- imgdiff_allowed = false;
+ puffdiff_allowed = false;
}
// Make copies of the extents so we can modify them.
@@ -711,24 +734,21 @@
? InstallOperation::SOURCE_COPY
: InstallOperation::MOVE);
data_blob = brillo::Blob();
- } else if (bsdiff_allowed || imgdiff_allowed) {
- // If the source file is considered bsdiff safe (no bsdiff bugs
- // triggered), see if BSDIFF encoding is smaller.
- base::FilePath old_chunk;
- TEST_AND_RETURN_FALSE(base::CreateTemporaryFile(&old_chunk));
- ScopedPathUnlinker old_unlinker(old_chunk.value());
- TEST_AND_RETURN_FALSE(utils::WriteFile(
- old_chunk.value().c_str(), old_data.data(), old_data.size()));
- base::FilePath new_chunk;
- TEST_AND_RETURN_FALSE(base::CreateTemporaryFile(&new_chunk));
- ScopedPathUnlinker new_unlinker(new_chunk.value());
- TEST_AND_RETURN_FALSE(utils::WriteFile(
- new_chunk.value().c_str(), new_data.data(), new_data.size()));
-
+ } else {
if (bsdiff_allowed) {
+ base::FilePath patch;
+ TEST_AND_RETURN_FALSE(base::CreateTemporaryFile(&patch));
+ ScopedPathUnlinker unlinker(patch.value());
+
brillo::Blob bsdiff_delta;
- TEST_AND_RETURN_FALSE(DiffFiles(
- kBsdiffPath, old_chunk.value(), new_chunk.value(), &bsdiff_delta));
+ TEST_AND_RETURN_FALSE(0 == bsdiff::bsdiff(old_data.data(),
+ old_data.size(),
+ new_data.data(),
+ new_data.size(),
+ patch.value().c_str(),
+ nullptr));
+
+ TEST_AND_RETURN_FALSE(utils::ReadFile(patch.value(), &bsdiff_delta));
CHECK_GT(bsdiff_delta.size(), static_cast<brillo::Blob::size_type>(0));
if (bsdiff_delta.size() < data_blob.size()) {
operation.set_type(
@@ -738,82 +758,94 @@
data_blob = std::move(bsdiff_delta);
}
}
- if (imgdiff_allowed && ContainsGZip(old_data) && ContainsGZip(new_data)) {
- brillo::Blob imgdiff_delta;
- // Imgdiff might fail in some cases, only use the result if it succeed,
- // otherwise print the extents to analyze.
- if (DiffFiles(kImgdiffPath,
- old_chunk.value(),
- new_chunk.value(),
- &imgdiff_delta) &&
- imgdiff_delta.size() > 0) {
- if (imgdiff_delta.size() < data_blob.size()) {
- operation.set_type(InstallOperation::IMGDIFF);
- data_blob = std::move(imgdiff_delta);
+ if (puffdiff_allowed) {
+ // Find all deflate positions inside the given extents and then put all
+ // deflates together because we have already read all the extents into
+ // one buffer.
+ vector<puffin::BitExtent> src_deflates;
+ TEST_AND_RETURN_FALSE(deflate_utils::FindAndCompactDeflates(
+ src_extents, old_deflates, &src_deflates));
+
+ vector<puffin::BitExtent> dst_deflates;
+ TEST_AND_RETURN_FALSE(deflate_utils::FindAndCompactDeflates(
+ dst_extents, new_deflates, &dst_deflates));
+
+ // Remove equal deflates. TODO(*): We can do a N*N check using
+ // hashing. It will not reduce the payload size, but it will speeds up
+ // the puffing on the client device.
+ auto src = src_deflates.begin();
+ auto dst = dst_deflates.begin();
+ for (; src != src_deflates.end() && dst != dst_deflates.end();) {
+ auto src_in_bytes = deflate_utils::ExpandToByteExtent(*src);
+ auto dst_in_bytes = deflate_utils::ExpandToByteExtent(*dst);
+ if (src_in_bytes.length == dst_in_bytes.length &&
+ !memcmp(old_data.data() + src_in_bytes.offset,
+ new_data.data() + dst_in_bytes.offset,
+ src_in_bytes.length)) {
+ src = src_deflates.erase(src);
+ dst = dst_deflates.erase(dst);
+ } else {
+ src++;
+ dst++;
}
- } else {
- LOG(ERROR) << "Imgdiff failed with source extents: "
- << ExtentsToString(src_extents)
- << ", destination extents: "
- << ExtentsToString(dst_extents);
+ }
+
+ // Only Puffdiff if both files have at least one deflate left.
+ if (!src_deflates.empty() && !dst_deflates.empty()) {
+ brillo::Blob puffdiff_delta;
+ string temp_file_path;
+ TEST_AND_RETURN_FALSE(utils::MakeTempFile(
+ "puffdiff-delta.XXXXXX", &temp_file_path, nullptr));
+ ScopedPathUnlinker temp_file_unlinker(temp_file_path);
+
+ // Perform PuffDiff operation.
+ TEST_AND_RETURN_FALSE(puffin::PuffDiff(old_data,
+ new_data,
+ src_deflates,
+ dst_deflates,
+ temp_file_path,
+ &puffdiff_delta));
+ TEST_AND_RETURN_FALSE(puffdiff_delta.size() > 0);
+ if (puffdiff_delta.size() < data_blob.size()) {
+ operation.set_type(InstallOperation::PUFFDIFF);
+ data_blob = std::move(puffdiff_delta);
+ }
}
}
}
}
- size_t removed_bytes = 0;
// Remove identical src/dst block ranges in MOVE operations.
if (operation.type() == InstallOperation::MOVE) {
- removed_bytes = RemoveIdenticalBlockRanges(
+ auto removed_bytes = RemoveIdenticalBlockRanges(
&src_extents, &dst_extents, new_data.size());
+ operation.set_src_length(old_data.size() - removed_bytes);
+ operation.set_dst_length(new_data.size() - removed_bytes);
}
- // Set legacy src_length and dst_length fields.
- operation.set_src_length(old_data.size() - removed_bytes);
- operation.set_dst_length(new_data.size() - removed_bytes);
- // Embed extents in the operation.
- StoreExtents(src_extents, operation.mutable_src_extents());
+ // WARNING: We always set legacy |src_length| and |dst_length| fields for
+ // BSDIFF. For SOURCE_BSDIFF we only set them for minor version 3 and
+ // lower. This is needed because we used to use these two parameters in the
+ // SOURCE_BSDIFF for minor version 3 and lower, but we do not need them
+ // anymore in higher minor versions. This means if we stop adding these
+ // parameters for those minor versions, the delta payloads will be invalid.
+ if (operation.type() == InstallOperation::BSDIFF ||
+ (operation.type() == InstallOperation::SOURCE_BSDIFF &&
+ version.minor <= kOpSrcHashMinorPayloadVersion)) {
+ operation.set_src_length(old_data.size());
+ operation.set_dst_length(new_data.size());
+ }
+
+ // Embed extents in the operation. Replace (all variants), zero and discard
+ // operations should not have source extents.
+ if (!IsNoSourceOperation(operation.type())) {
+ StoreExtents(src_extents, operation.mutable_src_extents());
+ }
+ // All operations have dst_extents.
StoreExtents(dst_extents, operation.mutable_dst_extents());
- // Replace operations should not have source extents.
- if (IsAReplaceOperation(operation.type())) {
- operation.clear_src_extents();
- operation.clear_src_length();
- }
-
*out_data = std::move(data_blob);
*out_op = operation;
-
- return true;
-}
-
-// Runs the bsdiff or imgdiff tool in |diff_path| on two files and returns the
-// resulting delta in |out|. Returns true on success.
-bool DiffFiles(const string& diff_path,
- const string& old_file,
- const string& new_file,
- brillo::Blob* out) {
- const string kPatchFile = "delta.patchXXXXXX";
- string patch_file_path;
-
- TEST_AND_RETURN_FALSE(
- utils::MakeTempFile(kPatchFile, &patch_file_path, nullptr));
-
- vector<string> cmd;
- cmd.push_back(diff_path);
- cmd.push_back(old_file);
- cmd.push_back(new_file);
- cmd.push_back(patch_file_path);
-
- int rc = 1;
- string stdout;
- TEST_AND_RETURN_FALSE(Subprocess::SynchronousExec(cmd, &rc, &stdout));
- if (rc != 0) {
- LOG(ERROR) << diff_path << " returned " << rc << std::endl << stdout;
- return false;
- }
- TEST_AND_RETURN_FALSE(utils::ReadFile(patch_file_path, out));
- unlink(patch_file_path.c_str());
return true;
}
@@ -823,6 +855,12 @@
op_type == InstallOperation::REPLACE_XZ);
}
+bool IsNoSourceOperation(InstallOperation_Type op_type) {
+ return (IsAReplaceOperation(op_type) ||
+ op_type == InstallOperation::ZERO ||
+ op_type == InstallOperation::DISCARD);
+}
+
// Returns true if |op| is a no-op operation that doesn't do any useful work
// (e.g., a move operation that copies blocks onto themselves).
bool IsNoopOperation(const InstallOperation& op) {
diff --git a/payload_generator/delta_diff_utils.h b/payload_generator/delta_diff_utils.h
index c9fef17..dea8535 100644
--- a/payload_generator/delta_diff_utils.h
+++ b/payload_generator/delta_diff_utils.h
@@ -21,6 +21,7 @@
#include <vector>
#include <brillo/secure_blob.h>
+#include <puffin/puffdiff.h>
#include "update_engine/payload_generator/annotated_operation.h"
#include "update_engine/payload_generator/extent_ranges.h"
@@ -76,12 +77,15 @@
// stored in |new_part| in the blocks described by |new_extents| and, if it
// exists, the old version exists in |old_part| in the blocks described by
// |old_extents|. The operations added to |aops| reference the data blob
-// in the |blob_file|. Returns true on success.
+// in the |blob_file|. |old_deflates| and |new_deflates| are all deflate
+// locations in |old_part| and |new_part|. Returns true on success.
bool DeltaReadFile(std::vector<AnnotatedOperation>* aops,
const std::string& old_part,
const std::string& new_part,
const std::vector<Extent>& old_extents,
const std::vector<Extent>& new_extents,
+ const std::vector<puffin::BitExtent>& old_deflates,
+ const std::vector<puffin::BitExtent>& new_deflates,
const std::string& name,
ssize_t chunk_blocks,
const PayloadVersion& version,
@@ -93,23 +97,19 @@
// fills in |out_op|. If there's no change in old and new files, it creates a
// MOVE or SOURCE_COPY operation. If there is a change, the smallest of the
// operations allowed in the given |version| (REPLACE, REPLACE_BZ, BSDIFF,
-// SOURCE_BSDIFF or IMGDIFF) wins.
-// |new_extents| must not be empty. Returns true on success.
+// SOURCE_BSDIFF, or PUFFDIFF) wins.
+// |new_extents| must not be empty. |old_deflates| and |new_deflates| are all
+// the deflate locations in |old_part| and |new_part|. Returns true on success.
bool ReadExtentsToDiff(const std::string& old_part,
const std::string& new_part,
const std::vector<Extent>& old_extents,
const std::vector<Extent>& new_extents,
+ const std::vector<puffin::BitExtent>& old_deflates,
+ const std::vector<puffin::BitExtent>& new_deflates,
const PayloadVersion& version,
brillo::Blob* out_data,
InstallOperation* out_op);
-// Runs the bsdiff or imgdiff tool in |diff_path| on two files and returns the
-// resulting delta in |out|. Returns true on success.
-bool DiffFiles(const std::string& diff_path,
- const std::string& old_file,
- const std::string& new_file,
- brillo::Blob* out);
-
// Generates the best allowed full operation to produce |new_data|. The allowed
// operations are based on |payload_version|. The operation blob will be stored
// in |out_blob| and the resulting operation type in |out_type|. Returns whether
@@ -119,9 +119,12 @@
brillo::Blob* out_blob,
InstallOperation_Type* out_type);
-// Returns whether op_type is one of the REPLACE full operations.
+// Returns whether |op_type| is one of the REPLACE full operations.
bool IsAReplaceOperation(InstallOperation_Type op_type);
+// Returns true if an operation with type |op_type| has no |src_extents|.
+bool IsNoSourceOperation(InstallOperation_Type op_type);
+
// Returns true if |op| is a no-op operation that doesn't do any useful work
// (e.g., a move operation that copies blocks onto themselves).
bool IsNoopOperation(const InstallOperation& op);
diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc
index 232eab7..a83cea2 100644
--- a/payload_generator/delta_diff_utils_unittest.cc
+++ b/payload_generator/delta_diff_utils_unittest.cc
@@ -131,7 +131,6 @@
uint32_t minor_version) {
BlobFileWriter blob_file(blob_fd_, &blob_size_);
PayloadVersion version(kChromeOSMajorPayloadVersion, minor_version);
- version.imgdiff_allowed = true; // Assume no fingerprint mismatch.
return diff_utils::DeltaMovedAndZeroBlocks(&aops_,
old_part_.path,
new_part_.path,
@@ -180,6 +179,8 @@
new_part_.path,
old_extents,
new_extents,
+ {}, // old_deflates
+ {}, // new_deflates
PayloadVersion(kChromeOSMajorPayloadVersion, kInPlaceMinorPayloadVersion),
&data,
&op));
@@ -193,9 +194,9 @@
EXPECT_EQ(kBlockSize, op.src_length());
EXPECT_EQ(1, op.dst_extents_size());
EXPECT_EQ(kBlockSize, op.dst_length());
- EXPECT_EQ(BlocksInExtents(op.src_extents()),
- BlocksInExtents(op.dst_extents()));
- EXPECT_EQ(1U, BlocksInExtents(op.dst_extents()));
+ EXPECT_EQ(utils::BlocksInExtents(op.src_extents()),
+ utils::BlocksInExtents(op.dst_extents()));
+ EXPECT_EQ(1U, utils::BlocksInExtents(op.dst_extents()));
}
TEST_F(DeltaDiffUtilsTest, MoveWithSameBlock) {
@@ -219,8 +220,8 @@
ExtentForRange(24, 3),
ExtentForRange(29, 1) };
- uint64_t num_blocks = BlocksInExtents(old_extents);
- EXPECT_EQ(num_blocks, BlocksInExtents(new_extents));
+ uint64_t num_blocks = utils::BlocksInExtents(old_extents);
+ EXPECT_EQ(num_blocks, utils::BlocksInExtents(new_extents));
// The size of the data should match the total number of blocks. Each block
// has a different content.
@@ -239,6 +240,8 @@
new_part_.path,
old_extents,
new_extents,
+ {}, // old_deflates
+ {}, // new_deflates
PayloadVersion(kChromeOSMajorPayloadVersion, kInPlaceMinorPayloadVersion),
&data,
&op));
@@ -259,7 +262,7 @@
ExtentForRange(18, 1),
ExtentForRange(20, 1),
ExtentForRange(26, 1) };
- num_blocks = BlocksInExtents(old_extents);
+ num_blocks = utils::BlocksInExtents(old_extents);
EXPECT_EQ(num_blocks * kBlockSize, op.src_length());
EXPECT_EQ(num_blocks * kBlockSize, op.dst_length());
@@ -302,6 +305,8 @@
new_part_.path,
old_extents,
new_extents,
+ {}, // old_deflates
+ {}, // new_deflates
PayloadVersion(kChromeOSMajorPayloadVersion, kInPlaceMinorPayloadVersion),
&data,
&op));
@@ -316,9 +321,9 @@
EXPECT_EQ(kBlockSize, op.src_length());
EXPECT_EQ(1, op.dst_extents_size());
EXPECT_EQ(kBlockSize, op.dst_length());
- EXPECT_EQ(BlocksInExtents(op.src_extents()),
- BlocksInExtents(op.dst_extents()));
- EXPECT_EQ(1U, BlocksInExtents(op.dst_extents()));
+ EXPECT_EQ(utils::BlocksInExtents(op.src_extents()),
+ utils::BlocksInExtents(op.dst_extents()));
+ EXPECT_EQ(1U, utils::BlocksInExtents(op.dst_extents()));
}
TEST_F(DeltaDiffUtilsTest, ReplaceSmallTest) {
@@ -350,6 +355,8 @@
new_part_.path,
old_extents,
new_extents,
+ {}, // old_deflates
+ {}, // new_deflates
PayloadVersion(kChromeOSMajorPayloadVersion,
kInPlaceMinorPayloadVersion),
&data,
@@ -365,8 +372,8 @@
EXPECT_EQ(0, op.src_extents_size());
EXPECT_FALSE(op.has_src_length());
EXPECT_EQ(1, op.dst_extents_size());
- EXPECT_EQ(data_to_test.size(), op.dst_length());
- EXPECT_EQ(1U, BlocksInExtents(op.dst_extents()));
+ EXPECT_FALSE(op.has_dst_length());
+ EXPECT_EQ(1U, utils::BlocksInExtents(op.dst_extents()));
}
}
@@ -391,6 +398,8 @@
new_part_.path,
old_extents,
new_extents,
+ {}, // old_deflates
+ {}, // new_deflates
PayloadVersion(kChromeOSMajorPayloadVersion, kSourceMinorPayloadVersion),
&data,
&op));
@@ -423,6 +432,8 @@
new_part_.path,
old_extents,
new_extents,
+ {}, // old_deflates
+ {}, // new_deflates
PayloadVersion(kChromeOSMajorPayloadVersion, kSourceMinorPayloadVersion),
&data,
&op));
@@ -623,7 +634,8 @@
// The last range is split since the old image has zeros in part of it.
ExtentForRange(30, 20),
};
- brillo::Blob zeros_data(BlocksInExtents(new_zeros) * block_size_, '\0');
+ brillo::Blob zeros_data(utils::BlocksInExtents(new_zeros) * block_size_,
+ '\0');
EXPECT_TRUE(WriteExtents(new_part_.path, new_zeros, block_size_, zeros_data));
vector<Extent> old_zeros = vector<Extent>{ExtentForRange(43, 7)};
diff --git a/payload_generator/ext2_filesystem.cc b/payload_generator/ext2_filesystem.cc
index ee2f8c2..07ec371 100644
--- a/payload_generator/ext2_filesystem.cc
+++ b/payload_generator/ext2_filesystem.cc
@@ -17,12 +17,16 @@
#include "update_engine/payload_generator/ext2_filesystem.h"
#include <et/com_err.h>
-// TODO: Remove these pragmas when b/35721782 is fixed.
+#if defined(__clang__)
+// TODO(*): Remove these pragmas when b/35721782 is fixed.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wmacro-redefined"
+#endif
#include <ext2fs/ext2_io.h>
#include <ext2fs/ext2fs.h>
+#if defined(__clang__)
#pragma clang diagnostic pop
+#endif
#include <map>
#include <set>
@@ -344,7 +348,7 @@
return false;
brillo::Blob blob;
- uint64_t physical_size = BlocksInExtents(extents) * filsys_->blocksize;
+ uint64_t physical_size = utils::BlocksInExtents(extents) * filsys_->blocksize;
// Sparse holes in the settings file are not supported.
if (EXT2_I_SIZE(&ino_data) > physical_size)
return false;
diff --git a/payload_generator/ext2_filesystem.h b/payload_generator/ext2_filesystem.h
index 1a4e1a1..c0562d0 100644
--- a/payload_generator/ext2_filesystem.h
+++ b/payload_generator/ext2_filesystem.h
@@ -23,11 +23,15 @@
#include <string>
#include <vector>
+#if defined(__clang__)
// TODO: Remove these pragmas when b/35721782 is fixed.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wmacro-redefined"
+#endif
#include <ext2fs/ext2fs.h>
+#if defined(__clang__)
#pragma clang diagnostic pop
+#endif
namespace chromeos_update_engine {
diff --git a/payload_generator/ext2_filesystem_unittest.cc b/payload_generator/ext2_filesystem_unittest.cc
index a3c7731..5360e6c 100644
--- a/payload_generator/ext2_filesystem_unittest.cc
+++ b/payload_generator/ext2_filesystem_unittest.cc
@@ -158,7 +158,8 @@
// Small symlinks don't actually have data blocks.
EXPECT_TRUE(map_files["/link-short_symlink"].extents.empty());
- EXPECT_EQ(1U, BlocksInExtents(map_files["/link-long_symlink"].extents));
+ EXPECT_EQ(1U,
+ utils::BlocksInExtents(map_files["/link-long_symlink"].extents));
// Hard-links report the same list of blocks.
EXPECT_EQ(map_files["/link-hard-regular-16k"].extents,
@@ -168,14 +169,19 @@
// The number of blocks in these files doesn't depend on the
// block size.
EXPECT_TRUE(map_files["/empty-file"].extents.empty());
- EXPECT_EQ(1U, BlocksInExtents(map_files["/regular-small"].extents));
- EXPECT_EQ(1U, BlocksInExtents(map_files["/regular-with_net_cap"].extents));
+ EXPECT_EQ(1U, utils::BlocksInExtents(map_files["/regular-small"].extents));
+ EXPECT_EQ(
+ 1U, utils::BlocksInExtents(map_files["/regular-with_net_cap"].extents));
EXPECT_TRUE(map_files["/sparse_empty-10k"].extents.empty());
EXPECT_TRUE(map_files["/sparse_empty-2blocks"].extents.empty());
- EXPECT_EQ(1U, BlocksInExtents(map_files["/sparse-16k-last_block"].extents));
- EXPECT_EQ(1U,
- BlocksInExtents(map_files["/sparse-16k-first_block"].extents));
- EXPECT_EQ(2U, BlocksInExtents(map_files["/sparse-16k-holes"].extents));
+ EXPECT_EQ(
+ 1U,
+ utils::BlocksInExtents(map_files["/sparse-16k-last_block"].extents));
+ EXPECT_EQ(
+ 1U,
+ utils::BlocksInExtents(map_files["/sparse-16k-first_block"].extents));
+ EXPECT_EQ(2U,
+ utils::BlocksInExtents(map_files["/sparse-16k-holes"].extents));
}
}
diff --git a/payload_generator/extent_ranges.cc b/payload_generator/extent_ranges.cc
index 0e0cdf7..c1d3d63 100644
--- a/payload_generator/extent_ranges.cc
+++ b/payload_generator/extent_ranges.cc
@@ -23,6 +23,7 @@
#include <base/logging.h>
+#include "update_engine/common/utils.h"
#include "update_engine/payload_consumer/payload_constants.h"
#include "update_engine/payload_generator/extent_utils.h"
@@ -250,7 +251,7 @@
out.back().set_num_blocks(blocks_needed);
break;
}
- CHECK(out_blocks == BlocksInExtents(out));
+ CHECK(out_blocks == utils::BlocksInExtents(out));
return out;
}
diff --git a/payload_generator/extent_utils.cc b/payload_generator/extent_utils.cc
index 89ccca2..47073f9 100644
--- a/payload_generator/extent_utils.cc
+++ b/payload_generator/extent_utils.cc
@@ -53,16 +53,6 @@
extents->push_back(new_extent);
}
-Extent GetElement(const vector<Extent>& collection, size_t index) {
- return collection[index];
-}
-
-Extent GetElement(
- const google::protobuf::RepeatedPtrField<Extent>& collection,
- size_t index) {
- return collection.Get(index);
-}
-
void ExtendExtents(
google::protobuf::RepeatedPtrField<Extent>* extents,
const google::protobuf::RepeatedPtrField<Extent>& extents_to_add) {
diff --git a/payload_generator/extent_utils.h b/payload_generator/extent_utils.h
index 3e45264..f5fbb0e 100644
--- a/payload_generator/extent_utils.h
+++ b/payload_generator/extent_utils.h
@@ -32,31 +32,12 @@
// into an arbitrary place in the extents.
void AppendBlockToExtents(std::vector<Extent>* extents, uint64_t block);
-// Get/SetElement are intentionally overloaded so that templated functions
-// can accept either type of collection of Extents.
-Extent GetElement(const std::vector<Extent>& collection, size_t index);
-Extent GetElement(
- const google::protobuf::RepeatedPtrField<Extent>& collection,
- size_t index);
-
-// Return the total number of blocks in a collection (vector or
-// RepeatedPtrField) of Extents.
-template<typename T>
-uint64_t BlocksInExtents(const T& collection) {
- uint64_t ret = 0;
- for (size_t i = 0; i < static_cast<size_t>(collection.size()); ++i) {
- ret += GetElement(collection, i).num_blocks();
- }
- return ret;
-}
-
// Takes a collection (vector or RepeatedPtrField) of Extent and
// returns a vector of the blocks referenced, in order.
template<typename T>
std::vector<uint64_t> ExpandExtents(const T& extents) {
std::vector<uint64_t> ret;
- for (size_t i = 0, e = static_cast<size_t>(extents.size()); i != e; ++i) {
- const Extent extent = GetElement(extents, i);
+ for (const auto& extent : extents) {
if (extent.start_block() == kSparseHole) {
ret.resize(ret.size() + extent.num_blocks(), kSparseHole);
} else {
diff --git a/payload_generator/extent_utils_unittest.cc b/payload_generator/extent_utils_unittest.cc
index d470e7b..eef4385 100644
--- a/payload_generator/extent_utils_unittest.cc
+++ b/payload_generator/extent_utils_unittest.cc
@@ -54,23 +54,23 @@
TEST(ExtentUtilsTest, BlocksInExtentsTest) {
{
vector<Extent> extents;
- EXPECT_EQ(0U, BlocksInExtents(extents));
+ EXPECT_EQ(0U, utils::BlocksInExtents(extents));
extents.push_back(ExtentForRange(0, 1));
- EXPECT_EQ(1U, BlocksInExtents(extents));
+ EXPECT_EQ(1U, utils::BlocksInExtents(extents));
extents.push_back(ExtentForRange(23, 55));
- EXPECT_EQ(56U, BlocksInExtents(extents));
+ EXPECT_EQ(56U, utils::BlocksInExtents(extents));
extents.push_back(ExtentForRange(1, 2));
- EXPECT_EQ(58U, BlocksInExtents(extents));
+ EXPECT_EQ(58U, utils::BlocksInExtents(extents));
}
{
google::protobuf::RepeatedPtrField<Extent> extents;
- EXPECT_EQ(0U, BlocksInExtents(extents));
+ EXPECT_EQ(0U, utils::BlocksInExtents(extents));
*extents.Add() = ExtentForRange(0, 1);
- EXPECT_EQ(1U, BlocksInExtents(extents));
+ EXPECT_EQ(1U, utils::BlocksInExtents(extents));
*extents.Add() = ExtentForRange(23, 55);
- EXPECT_EQ(56U, BlocksInExtents(extents));
+ EXPECT_EQ(56U, utils::BlocksInExtents(extents));
*extents.Add() = ExtentForRange(1, 2);
- EXPECT_EQ(58U, BlocksInExtents(extents));
+ EXPECT_EQ(58U, utils::BlocksInExtents(extents));
}
}
diff --git a/payload_generator/filesystem_interface.h b/payload_generator/filesystem_interface.h
index 866c46b..b1506e4 100644
--- a/payload_generator/filesystem_interface.h
+++ b/payload_generator/filesystem_interface.h
@@ -33,6 +33,7 @@
#include <base/macros.h>
#include <brillo/key_value_store.h>
+#include <puffin/utils.h>
#include "update_engine/update_metadata.pb.h"
@@ -62,6 +63,10 @@
// between 0 and GetBlockCount() - 1. The blocks are encoded in extents,
// indicating the starting block, and the number of consecutive blocks.
std::vector<Extent> extents;
+
+ // All the deflate locations in the file. These locations are not relative
+ // to the extents. They are relative to the file system itself.
+ std::vector<puffin::BitExtent> deflates;
};
virtual ~FilesystemInterface() = default;
diff --git a/payload_generator/full_update_generator_unittest.cc b/payload_generator/full_update_generator_unittest.cc
index 9e62de2..6da4d10 100644
--- a/payload_generator/full_update_generator_unittest.cc
+++ b/payload_generator/full_update_generator_unittest.cc
@@ -16,6 +16,7 @@
#include "update_engine/payload_generator/full_update_generator.h"
+#include <memory>
#include <string>
#include <vector>
@@ -116,9 +117,9 @@
// new_part has one chunk and a half.
EXPECT_EQ(2U, aops.size());
EXPECT_EQ(config_.hard_chunk_size / config_.block_size,
- BlocksInExtents(aops[0].op.dst_extents()));
+ utils::BlocksInExtents(aops[0].op.dst_extents()));
EXPECT_EQ((new_part.size() - config_.hard_chunk_size) / config_.block_size,
- BlocksInExtents(aops[1].op.dst_extents()));
+ utils::BlocksInExtents(aops[1].op.dst_extents()));
}
// Test that if the image size is much smaller than the chunk size, it handles
@@ -138,7 +139,7 @@
// new_part has less than one chunk.
EXPECT_EQ(1U, aops.size());
EXPECT_EQ(new_part.size() / config_.block_size,
- BlocksInExtents(aops[0].op.dst_extents()));
+ utils::BlocksInExtents(aops[0].op.dst_extents()));
}
} // namespace chromeos_update_engine
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index cd99a51..2729bc4 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -19,6 +19,7 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
+#include <xz.h>
#include <string>
#include <vector>
@@ -29,13 +30,14 @@
#include <brillo/flag_helper.h>
#include <brillo/key_value_store.h>
+#include "update_engine/common/fake_boot_control.h"
+#include "update_engine/common/fake_hardware.h"
#include "update_engine/common/prefs.h"
#include "update_engine/common/terminator.h"
#include "update_engine/common/utils.h"
#include "update_engine/payload_consumer/delta_performer.h"
#include "update_engine/payload_consumer/payload_constants.h"
#include "update_engine/payload_generator/delta_diff_generator.h"
-#include "update_engine/payload_generator/delta_diff_utils.h"
#include "update_engine/payload_generator/payload_generation_config.h"
#include "update_engine/payload_generator/payload_signer.h"
#include "update_engine/payload_generator/xz.h"
@@ -165,73 +167,84 @@
}
}
-void VerifySignedPayload(const string& in_file,
- const string& public_key) {
+int VerifySignedPayload(const string& in_file, const string& public_key) {
LOG(INFO) << "Verifying signed payload.";
LOG_IF(FATAL, in_file.empty())
<< "Must pass --in_file to verify signed payload.";
LOG_IF(FATAL, public_key.empty())
<< "Must pass --public_key to verify signed payload.";
- CHECK(PayloadSigner::VerifySignedPayload(in_file, public_key));
+ if (!PayloadSigner::VerifySignedPayload(in_file, public_key)) {
+ LOG(INFO) << "VerifySignedPayload failed";
+ return 1;
+ }
+
LOG(INFO) << "Done verifying signed payload.";
+ return 0;
}
// TODO(deymo): This function is likely broken for deltas minor version 2 or
// newer. Move this function to a new file and make the delta_performer
// integration tests use this instead.
-void ApplyDelta(const string& in_file,
- const string& old_kernel,
- const string& old_rootfs,
- const string& prefs_dir) {
+bool ApplyPayload(const string& payload_file,
+ // Simply reuses the payload config used for payload
+ // generation.
+ const PayloadGenerationConfig& config) {
LOG(INFO) << "Applying delta.";
- LOG_IF(FATAL, old_rootfs.empty())
- << "Must pass --old_image to apply delta.";
- Prefs prefs;
+ FakeBootControl fake_boot_control;
+ FakeHardware fake_hardware;
+ MemoryPrefs prefs;
InstallPlan install_plan;
- LOG(INFO) << "Setting up preferences under: " << prefs_dir;
- LOG_IF(ERROR, !prefs.Init(base::FilePath(prefs_dir)))
- << "Failed to initialize preferences.";
- // Get original checksums
- LOG(INFO) << "Calculating original checksums";
- ImageConfig old_image;
- old_image.partitions.emplace_back(kLegacyPartitionNameRoot);
- old_image.partitions.back().path = old_rootfs;
- old_image.partitions.emplace_back(kLegacyPartitionNameKernel);
- old_image.partitions.back().path = old_kernel;
- CHECK(old_image.LoadImageSize());
- for (const auto& old_part : old_image.partitions) {
- PartitionInfo part_info;
- CHECK(diff_utils::InitializePartitionInfo(old_part, &part_info));
- InstallPlan::Partition part;
- part.name = old_part.name;
- part.source_hash.assign(part_info.hash().begin(),
- part_info.hash().end());
- part.source_path = old_part.path;
- // Apply the delta in-place to the old_part.
- part.target_path = old_part.path;
- install_plan.partitions.push_back(part);
+ InstallPlan::Payload payload;
+ install_plan.source_slot =
+ config.is_delta ? 0 : BootControlInterface::kInvalidSlot;
+ install_plan.target_slot = 1;
+ payload.type =
+ config.is_delta ? InstallPayloadType::kDelta : InstallPayloadType::kFull;
+
+ for (size_t i = 0; i < config.target.partitions.size(); i++) {
+ const string& part_name = config.target.partitions[i].name;
+ const string& target_path = config.target.partitions[i].path;
+ fake_boot_control.SetPartitionDevice(
+ part_name, install_plan.target_slot, target_path);
+
+ string source_path;
+ if (config.is_delta) {
+ TEST_AND_RETURN_FALSE(config.target.partitions.size() ==
+ config.source.partitions.size());
+ source_path = config.source.partitions[i].path;
+ fake_boot_control.SetPartitionDevice(
+ part_name, install_plan.source_slot, source_path);
+ }
+
+ LOG(INFO) << "Install partition:"
+ << " source: " << source_path << " target: " << target_path;
}
- install_plan.payloads.resize(1);
+
DeltaPerformer performer(&prefs,
- nullptr,
- nullptr,
+ &fake_boot_control,
+ &fake_hardware,
nullptr,
&install_plan,
- &install_plan.payloads[0]);
+ &payload,
+ true); // is_interactive
+
brillo::Blob buf(1024 * 1024);
- int fd = open(in_file.c_str(), O_RDONLY, 0);
+ int fd = open(payload_file.c_str(), O_RDONLY, 0);
CHECK_GE(fd, 0);
ScopedFdCloser fd_closer(&fd);
+ xz_crc32_init();
for (off_t offset = 0;; offset += buf.size()) {
ssize_t bytes_read;
CHECK(utils::PReadAll(fd, buf.data(), buf.size(), offset, &bytes_read));
if (bytes_read == 0)
break;
- CHECK_EQ(performer.Write(buf.data(), bytes_read), bytes_read);
+ TEST_AND_RETURN_FALSE(performer.Write(buf.data(), bytes_read));
}
CHECK_EQ(performer.Close(), 0);
DeltaPerformer::ResetUpdateProgress(&prefs, false);
- LOG(INFO) << "Done applying delta.";
+ LOG(INFO) << "Completed applying " << (config.is_delta ? "delta" : "full")
+ << " payload.";
+ return true;
}
int ExtractProperties(const string& payload_path, const string& props_file) {
@@ -293,8 +306,6 @@
DEFINE_string(public_key, "", "Path to public key in .pem format");
DEFINE_int32(public_key_version, -1,
"DEPRECATED. Key-check version # of client");
- DEFINE_string(prefs_dir, "/tmp/update_engine_prefs",
- "Preferences directory, used with apply_delta");
DEFINE_string(signature_size, "",
"Raw signature size used for hash calculation. "
"You may pass in multiple sizes by colon separating them. E.g. "
@@ -324,9 +335,6 @@
DEFINE_string(properties_file, "",
"If passed, dumps the payload properties of the payload passed "
"in --in_file and exits.");
- DEFINE_string(zlib_fingerprint, "",
- "The fingerprint of zlib in the source image in hash string "
- "format, used to check imgdiff compatibility.");
DEFINE_int64(max_timestamp,
0,
"The maximum timestamp of the OS allowed to apply this "
@@ -406,17 +414,11 @@
if (!FLAGS_public_key.empty()) {
LOG_IF(WARNING, FLAGS_public_key_version != -1)
<< "--public_key_version is deprecated and ignored.";
- VerifySignedPayload(FLAGS_in_file, FLAGS_public_key);
- return 0;
+ return VerifySignedPayload(FLAGS_in_file, FLAGS_public_key);
}
if (!FLAGS_properties_file.empty()) {
return ExtractProperties(FLAGS_in_file, FLAGS_properties_file) ? 0 : 1;
}
- if (!FLAGS_in_file.empty()) {
- ApplyDelta(FLAGS_in_file, FLAGS_old_kernel, FLAGS_old_image,
- FLAGS_prefs_dir);
- return 0;
- }
// A payload generation was requested. Convert the flags to a
// PayloadGenerationConfig.
@@ -499,6 +501,10 @@
}
}
+ if (!FLAGS_in_file.empty()) {
+ return ApplyPayload(FLAGS_in_file, payload_config) ? 0 : 1;
+ }
+
if (!FLAGS_new_postinstall_config_file.empty()) {
LOG_IF(FATAL, FLAGS_major_version == kChromeOSMajorPayloadVersion)
<< "Postinstall config is only allowed in major version 2 or newer.";
@@ -574,21 +580,10 @@
LOG(INFO) << "Using provided minor_version=" << FLAGS_minor_version;
}
- if (!FLAGS_zlib_fingerprint.empty()) {
- if (utils::IsZlibCompatible(FLAGS_zlib_fingerprint)) {
- payload_config.version.imgdiff_allowed = true;
- } else {
- LOG(INFO) << "IMGDIFF operation disabled due to fingerprint mismatch.";
- }
- }
-
payload_config.max_timestamp = FLAGS_max_timestamp;
- if (payload_config.is_delta) {
- LOG(INFO) << "Generating delta update";
- } else {
- LOG(INFO) << "Generating full update";
- }
+ LOG(INFO) << "Generating " << (payload_config.is_delta ? "delta" : "full")
+ << " update";
// From this point, all the options have been parsed.
if (!payload_config.Validate()) {
diff --git a/payload_generator/graph_utils.cc b/payload_generator/graph_utils.cc
index 2d5fb63..4829b21 100644
--- a/payload_generator/graph_utils.cc
+++ b/payload_generator/graph_utils.cc
@@ -104,9 +104,9 @@
template<typename T>
void DumpExtents(const T& field, int prepend_space_count) {
string header(prepend_space_count, ' ');
- for (int i = 0, e = field.size(); i != e; ++i) {
- LOG(INFO) << header << "(" << GetElement(field, i).start_block() << ", "
- << GetElement(field, i).num_blocks() << ")";
+ for (const auto& extent : field) {
+ LOG(INFO) << header << "(" << extent.start_block() << ", "
+ << extent.num_blocks() << ")";
}
}
diff --git a/payload_generator/inplace_generator.cc b/payload_generator/inplace_generator.cc
index bc140e8..b858c2b 100644
--- a/payload_generator/inplace_generator.cc
+++ b/payload_generator/inplace_generator.cc
@@ -299,8 +299,7 @@
template<typename T>
bool TempBlocksExistInExtents(const T& extents) {
- for (int i = 0, e = extents.size(); i < e; ++i) {
- Extent extent = GetElement(extents, i);
+ for (const auto& extent : extents) {
uint64_t start = extent.start_block();
uint64_t num = extent.num_blocks();
if (start >= kTempBlockStart || (start + num) >= kTempBlockStart) {
@@ -573,6 +572,8 @@
new_part,
vector<Extent>(), // old_extents
new_extents,
+ {}, // old_deflates
+ {}, // new_deflates
(*graph)[cut.old_dst].aop.name,
-1, // chunk_blocks, forces to have a single operation.
kInPlacePayloadVersion,
diff --git a/payload_generator/mapfile_filesystem.cc b/payload_generator/mapfile_filesystem.cc
index f4f0804..5264a9c 100644
--- a/payload_generator/mapfile_filesystem.cc
+++ b/payload_generator/mapfile_filesystem.cc
@@ -21,9 +21,9 @@
#include <base/files/file_util.h>
#include <base/logging.h>
+#include <base/memory/ptr_util.h>
#include <base/strings/string_number_conversions.h>
#include <base/strings/string_split.h>
-#include <brillo/make_unique_ptr.h>
#include "update_engine/common/utils.h"
#include "update_engine/payload_generator/extent_ranges.h"
@@ -61,8 +61,7 @@
return nullptr;
}
- return brillo::make_unique_ptr(
- new MapfileFilesystem(mapfile_filename, num_blocks));
+ return base::WrapUnique(new MapfileFilesystem(mapfile_filename, num_blocks));
}
MapfileFilesystem::MapfileFilesystem(const string& mapfile_filename,
diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc
index 4cb117d..f48d2a2 100644
--- a/payload_generator/payload_file.cc
+++ b/payload_generator/payload_file.cc
@@ -19,6 +19,9 @@
#include <endian.h>
#include <algorithm>
+#include <map>
+
+#include <base/strings/stringprintf.h>
#include "update_engine/common/hash_calculator.h"
#include "update_engine/payload_consumer/delta_performer.h"
@@ -320,38 +323,39 @@
}
void PayloadFile::ReportPayloadUsage(uint64_t metadata_size) const {
- vector<DeltaObject> objects;
+ std::map<DeltaObject, int> object_counts;
off_t total_size = 0;
for (const auto& part : part_vec_) {
for (const AnnotatedOperation& aop : part.aops) {
- objects.push_back(DeltaObject(aop.name,
- aop.op.type(),
- aop.op.data_length()));
+ DeltaObject delta(aop.name, aop.op.type(), aop.op.data_length());
+ object_counts[delta]++;
total_size += aop.op.data_length();
}
}
- objects.push_back(DeltaObject("<manifest-metadata>",
- -1,
- metadata_size));
+ object_counts[DeltaObject("<manifest-metadata>", -1, metadata_size)] = 1;
total_size += metadata_size;
- std::sort(objects.begin(), objects.end());
-
- static const char kFormatString[] = "%6.2f%% %10jd %-10s %s\n";
- for (const DeltaObject& object : objects) {
- fprintf(
- stderr, kFormatString,
+ static const char kFormatString[] = "%6.2f%% %10jd %-13s %s %d";
+ for (const auto& object_count : object_counts) {
+ const DeltaObject& object = object_count.first;
+ LOG(INFO) << base::StringPrintf(
+ kFormatString,
object.size * 100.0 / total_size,
static_cast<intmax_t>(object.size),
(object.type >= 0 ? InstallOperationTypeName(
static_cast<InstallOperation_Type>(object.type))
: "-"),
- object.name.c_str());
+ object.name.c_str(),
+ object_count.second);
}
- fprintf(stderr, kFormatString,
- 100.0, static_cast<intmax_t>(total_size), "", "<total>");
+ LOG(INFO) << base::StringPrintf(kFormatString,
+ 100.0,
+ static_cast<intmax_t>(total_size),
+ "",
+ "<total>",
+ 1);
}
} // namespace chromeos_update_engine
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index e85d693..836b481 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -128,7 +128,7 @@
minor == kInPlaceMinorPayloadVersion ||
minor == kSourceMinorPayloadVersion ||
minor == kOpSrcHashMinorPayloadVersion ||
- minor == kImgdiffMinorPayloadVersion);
+ minor == kPuffdiffMinorPayloadVersion);
return true;
}
@@ -151,7 +151,7 @@
// The implementation of these operations had a bug in earlier versions
// that prevents them from being used in any payload. We will enable
// them for delta payloads for now.
- return minor >= kImgdiffMinorPayloadVersion;
+ return minor >= kPuffdiffMinorPayloadVersion;
// Delta operations:
case InstallOperation::MOVE:
@@ -165,8 +165,9 @@
case InstallOperation::SOURCE_BSDIFF:
return minor >= kSourceMinorPayloadVersion;
- case InstallOperation::IMGDIFF:
- return minor >= kImgdiffMinorPayloadVersion && imgdiff_allowed;
+ case InstallOperation::BROTLI_BSDIFF:
+ case InstallOperation::PUFFDIFF:
+ return minor >= kPuffdiffMinorPayloadVersion;
}
return false;
}
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index dd3242a..c553d29 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -137,10 +137,6 @@
// The minor version of the payload.
uint32_t minor;
-
- // Wheter the IMGDIFF operation is allowed based on the available compressor
- // in the delta_generator and the one supported by the target.
- bool imgdiff_allowed = false;
};
// The PayloadGenerationConfig struct encapsulates all the configuration to
diff --git a/payload_generator/squashfs_filesystem.cc b/payload_generator/squashfs_filesystem.cc
new file mode 100644
index 0000000..c98ad12
--- /dev/null
+++ b/payload_generator/squashfs_filesystem.cc
@@ -0,0 +1,332 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/squashfs_filesystem.h"
+
+#include <fcntl.h>
+
+#include <algorithm>
+#include <string>
+#include <utility>
+
+#include <base/files/file_util.h>
+#include <base/logging.h>
+#include <base/strings/string_number_conversions.h>
+#include <base/strings/string_split.h>
+#include <brillo/streams/file_stream.h>
+
+#include "update_engine/common/subprocess.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_generator/deflate_utils.h"
+#include "update_engine/payload_generator/delta_diff_generator.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+#include "update_engine/payload_generator/extent_utils.h"
+#include "update_engine/update_metadata.pb.h"
+
+using std::string;
+using std::unique_ptr;
+using std::vector;
+
+namespace chromeos_update_engine {
+
+namespace {
+
+Extent ExtentForBytes(uint64_t block_size,
+ uint64_t start_bytes,
+ uint64_t size_bytes) {
+ uint64_t start_block = start_bytes / block_size;
+ uint64_t end_block = (start_bytes + size_bytes + block_size - 1) / block_size;
+ return ExtentForRange(start_block, end_block - start_block);
+}
+
+// The size of the squashfs super block.
+constexpr size_t kSquashfsSuperBlockSize = 96;
+constexpr uint64_t kSquashfsCompressedBit = 1 << 24;
+constexpr uint32_t kSquashfsZlibCompression = 1;
+
+bool ReadSquashfsHeader(const brillo::Blob blob,
+ SquashfsFilesystem::SquashfsHeader* header) {
+ if (blob.size() < kSquashfsSuperBlockSize) {
+ return false;
+ }
+
+ memcpy(&header->magic, blob.data(), 4);
+ memcpy(&header->block_size, blob.data() + 12, 4);
+ memcpy(&header->compression_type, blob.data() + 20, 2);
+ memcpy(&header->major_version, blob.data() + 28, 2);
+ return true;
+}
+
+bool CheckHeader(const SquashfsFilesystem::SquashfsHeader& header) {
+ return header.magic == 0x73717368 && header.major_version == 4;
+}
+
+bool GetFileMapContent(const string& sqfs_path, string* map) {
+ // Create a tmp file
+ string map_file;
+ TEST_AND_RETURN_FALSE(
+ utils::MakeTempFile("squashfs_file_map.XXXXXX", &map_file, nullptr));
+ ScopedPathUnlinker map_unlinker(map_file);
+
+ // Run unsquashfs to get the system file map.
+ // unsquashfs -m <map-file> <squashfs-file>
+ vector<string> cmd = {"unsquashfs", "-m", map_file, sqfs_path};
+ string stdout;
+ int exit_code;
+ if (!Subprocess::SynchronousExec(cmd, &exit_code, &stdout) ||
+ exit_code != 0) {
+ LOG(ERROR) << "Failed to run unsquashfs -m. The stdout content was: "
+ << stdout;
+ return false;
+ }
+ TEST_AND_RETURN_FALSE(utils::ReadFile(map_file, map));
+ return true;
+}
+
+} // namespace
+
+bool SquashfsFilesystem::Init(const string& map,
+ const string& sqfs_path,
+ size_t size,
+ const SquashfsHeader& header,
+ bool extract_deflates) {
+ size_ = size;
+
+ bool is_zlib = header.compression_type == kSquashfsZlibCompression;
+ if (!is_zlib) {
+ LOG(WARNING) << "Filesystem is not Gzipped. Not filling deflates!";
+ }
+ vector<puffin::ByteExtent> zlib_blks;
+
+ // Reading files map. For the format of the file map look at the comments for
+ // |CreateFromFileMap()|.
+ auto lines = base::SplitStringPiece(map,
+ "\n",
+ base::WhitespaceHandling::KEEP_WHITESPACE,
+ base::SplitResult::SPLIT_WANT_NONEMPTY);
+ for (const auto& line : lines) {
+ auto splits =
+ base::SplitStringPiece(line,
+ " \t",
+ base::WhitespaceHandling::TRIM_WHITESPACE,
+ base::SplitResult::SPLIT_WANT_NONEMPTY);
+ // Only filename is invalid.
+ TEST_AND_RETURN_FALSE(splits.size() > 1);
+ uint64_t start;
+ TEST_AND_RETURN_FALSE(base::StringToUint64(splits[1], &start));
+ uint64_t cur_offset = start;
+ for (size_t i = 2; i < splits.size(); ++i) {
+ uint64_t blk_size;
+ TEST_AND_RETURN_FALSE(base::StringToUint64(splits[i], &blk_size));
+ // TODO(ahassani): For puffin push it into a proper list if uncompressed.
+ auto new_blk_size = blk_size & ~kSquashfsCompressedBit;
+ TEST_AND_RETURN_FALSE(new_blk_size <= header.block_size);
+ if (new_blk_size > 0 && !(blk_size & kSquashfsCompressedBit)) {
+ // Compressed block
+ if (is_zlib && extract_deflates) {
+ zlib_blks.emplace_back(cur_offset, new_blk_size);
+ }
+ }
+ cur_offset += new_blk_size;
+ }
+
+ // If size is zero do not add the file.
+ if (cur_offset - start > 0) {
+ File file;
+ file.name = splits[0].as_string();
+ file.extents = {ExtentForBytes(kBlockSize, start, cur_offset - start)};
+ files_.emplace_back(file);
+ }
+ }
+
+ // Sort all files by their offset in the squashfs.
+ std::sort(files_.begin(), files_.end(), [](const File& a, const File& b) {
+ return a.extents[0].start_block() < b.extents[0].start_block();
+ });
+ // If there is any overlap between two consecutive extents, remove them. Here
+ // we are assuming all files have exactly one extent. If this assumption
+ // changes then this implementation needs to change too.
+ for (auto first = files_.begin(), second = first + 1;
+ first != files_.end() && second != files_.end();
+ second = first + 1) {
+ auto first_begin = first->extents[0].start_block();
+ auto first_end = first_begin + first->extents[0].num_blocks();
+ auto second_begin = second->extents[0].start_block();
+ auto second_end = second_begin + second->extents[0].num_blocks();
+ // Remove the first file if the size is zero.
+ if (first_end == first_begin) {
+ first = files_.erase(first);
+ } else if (first_end > second_begin) { // We found a collision.
+ if (second_end <= first_end) {
+ // Second file is inside the first file, remove the second file.
+ second = files_.erase(second);
+ } else if (first_begin == second_begin) {
+ // First file is inside the second file, remove the first file.
+ first = files_.erase(first);
+ } else {
+ // Remove overlapping extents from the first file.
+ first->extents[0].set_num_blocks(second_begin - first_begin);
+ ++first;
+ }
+ } else {
+ ++first;
+ }
+ }
+
+ // Find all the metadata including superblock and add them to the list of
+ // files.
+ ExtentRanges file_extents;
+ for (const auto& file : files_) {
+ file_extents.AddExtents(file.extents);
+ }
+ vector<Extent> full = {
+ ExtentForRange(0, (size_ + kBlockSize - 1) / kBlockSize)};
+ auto metadata_extents = FilterExtentRanges(full, file_extents);
+ // For now there should be at most two extents. One for superblock and one for
+ // metadata at the end. Just create appropriate files with <metadata-i> name.
+ // We can add all these extents as one metadata too, but that violates the
+ // contiguous write optimization.
+ for (size_t i = 0; i < metadata_extents.size(); i++) {
+ File file;
+ file.name = "<metadata-" + std::to_string(i) + ">";
+ file.extents = {metadata_extents[i]};
+ files_.emplace_back(file);
+ }
+
+ // Do one last sort before returning.
+ std::sort(files_.begin(), files_.end(), [](const File& a, const File& b) {
+ return a.extents[0].start_block() < b.extents[0].start_block();
+ });
+
+ if (is_zlib && extract_deflates) {
+ // If it is infact gzipped, then the sqfs_path should be valid to read its
+ // content.
+ TEST_AND_RETURN_FALSE(!sqfs_path.empty());
+ if (zlib_blks.empty()) {
+ return true;
+ }
+
+ // Sort zlib blocks.
+ std::sort(zlib_blks.begin(),
+ zlib_blks.end(),
+ [](const puffin::ByteExtent& a, const puffin::ByteExtent& b) {
+ return a.offset < b.offset;
+ });
+
+ // Sanity check. Make sure zlib blocks are not overlapping.
+ auto result = std::adjacent_find(
+ zlib_blks.begin(),
+ zlib_blks.end(),
+ [](const puffin::ByteExtent& a, const puffin::ByteExtent& b) {
+ return (a.offset + a.length) > b.offset;
+ });
+ TEST_AND_RETURN_FALSE(result == zlib_blks.end());
+
+ vector<puffin::BitExtent> deflates;
+ TEST_AND_RETURN_FALSE(
+ puffin::LocateDeflatesInZlibBlocks(sqfs_path, zlib_blks, &deflates));
+
+ // Add deflates for each file.
+ for (auto& file : files_) {
+ file.deflates = deflate_utils::FindDeflates(file.extents, deflates);
+ }
+ }
+ return true;
+}
+
+unique_ptr<SquashfsFilesystem> SquashfsFilesystem::CreateFromFile(
+ const string& sqfs_path, bool extract_deflates) {
+ if (sqfs_path.empty())
+ return nullptr;
+
+ brillo::StreamPtr sqfs_file =
+ brillo::FileStream::Open(base::FilePath(sqfs_path),
+ brillo::Stream::AccessMode::READ,
+ brillo::FileStream::Disposition::OPEN_EXISTING,
+ nullptr);
+ if (!sqfs_file) {
+ LOG(ERROR) << "Unable to open " << sqfs_path << " for reading.";
+ return nullptr;
+ }
+
+ SquashfsHeader header;
+ brillo::Blob blob(kSquashfsSuperBlockSize);
+ if (!sqfs_file->ReadAllBlocking(blob.data(), blob.size(), nullptr)) {
+ LOG(ERROR) << "Unable to read from file: " << sqfs_path;
+ return nullptr;
+ }
+ if (!ReadSquashfsHeader(blob, &header) || !CheckHeader(header)) {
+ // This is not necessary an error.
+ return nullptr;
+ }
+
+ // Read the map file.
+ string filemap;
+ if (!GetFileMapContent(sqfs_path, &filemap)) {
+ LOG(ERROR) << "Failed to produce squashfs map file: " << sqfs_path;
+ return nullptr;
+ }
+
+ unique_ptr<SquashfsFilesystem> sqfs(new SquashfsFilesystem());
+ if (!sqfs->Init(
+ filemap, sqfs_path, sqfs_file->GetSize(), header, extract_deflates)) {
+ LOG(ERROR) << "Failed to initialized the Squashfs file system";
+ return nullptr;
+ }
+
+ return sqfs;
+}
+
+unique_ptr<SquashfsFilesystem> SquashfsFilesystem::CreateFromFileMap(
+ const string& filemap, size_t size, const SquashfsHeader& header) {
+ if (!CheckHeader(header)) {
+ LOG(ERROR) << "Invalid Squashfs super block!";
+ return nullptr;
+ }
+
+ unique_ptr<SquashfsFilesystem> sqfs(new SquashfsFilesystem());
+ if (!sqfs->Init(filemap, "", size, header, false)) {
+ LOG(ERROR) << "Failed to initialize the Squashfs file system using filemap";
+ return nullptr;
+ }
+ // TODO(ahassani): Add a function that initializes the puffin related extents.
+ return sqfs;
+}
+
+size_t SquashfsFilesystem::GetBlockSize() const {
+ return kBlockSize;
+}
+
+size_t SquashfsFilesystem::GetBlockCount() const {
+ return size_ / kBlockSize;
+}
+
+bool SquashfsFilesystem::GetFiles(vector<File>* files) const {
+ files->insert(files->end(), files_.begin(), files_.end());
+ return true;
+}
+
+bool SquashfsFilesystem::LoadSettings(brillo::KeyValueStore* store) const {
+ // Settings not supported in squashfs.
+ LOG(ERROR) << "squashfs doesn't support LoadSettings().";
+ return false;
+}
+
+bool SquashfsFilesystem::IsSquashfsImage(const brillo::Blob& blob) {
+ SquashfsHeader header;
+ return ReadSquashfsHeader(blob, &header) && CheckHeader(header);
+}
+} // namespace chromeos_update_engine
diff --git a/payload_generator/squashfs_filesystem.h b/payload_generator/squashfs_filesystem.h
new file mode 100644
index 0000000..b79f8c7
--- /dev/null
+++ b/payload_generator/squashfs_filesystem.h
@@ -0,0 +1,120 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// This class implements a FilesystemInterface, which lets the caller obtain
+// basic information about what files are in the filesystem and where are they
+// located in the disk, but not full access to the uncompressed contents of
+// these files. This class uses the definitions found in
+// fs/squashfs/squashfs_fs.h in the kernel header tree. This class supports
+// squashfs version 4 in little-endian format only.
+
+#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_SQUASHFS_FILESYSTEM_H_
+#define UPDATE_ENGINE_PAYLOAD_GENERATOR_SQUASHFS_FILESYSTEM_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <brillo/secure_blob.h>
+
+#include "update_engine/payload_consumer/file_descriptor.h"
+#include "update_engine/payload_generator/extent_utils.h"
+#include "update_engine/payload_generator/filesystem_interface.h"
+
+namespace chromeos_update_engine {
+
+class SquashfsFilesystem : public FilesystemInterface {
+ public:
+ // From an squashfs image we need: (offset, bytes)
+ // - magic: (0, 4)
+ // * Acceptable value is: 0x73717368
+ // - block size: (12, 4)
+ // - compression type: (20, 2)
+ // * 1 is for zlib, gzip
+ // - major number: (28, 2)
+ // * We only support version 4 for now.
+ struct SquashfsHeader {
+ uint32_t magic;
+ uint32_t block_size;
+ uint16_t compression_type;
+ uint16_t major_version;
+ };
+
+ ~SquashfsFilesystem() override = default;
+
+ // Creates the file system from the Squashfs file itself. If
+ // |extract_deflates| is true, it will process files to find location of all
+ // deflate streams.
+ static std::unique_ptr<SquashfsFilesystem> CreateFromFile(
+ const std::string& sqfs_path, bool extract_deflates);
+
+ // Creates the file system from a file map |filemap| which is a multi-line
+ // string with each line with the following format:
+ //
+ // file_path start_byte compressed_size_1 ... compressed_size_2
+ //
+ // file_path: The name of the file inside the Squashfs File.
+ // start_byte: The byte address of the start of the file.
+ // compressed_size_i: The compressed size of the ith block of the file.
+ //
+ // The 25th bit of compressed_size_i is set if the block is uncompressed.
+ // |size| is the size of the Squashfs image.
+ static std::unique_ptr<SquashfsFilesystem> CreateFromFileMap(
+ const std::string& filemap, size_t size, const SquashfsHeader& header);
+
+ // FilesystemInterface overrides.
+ size_t GetBlockSize() const override;
+ size_t GetBlockCount() const override;
+
+ // Returns one FilesystemInterface::File for every file (that is not added to
+ // fragments) in the squashfs image.
+ //
+ // It also returns the following metadata files:
+ // <fragment-i>: The ith fragment in the Sqauashfs file.
+ // <metadata-i>: The part of the file system that does not belong to any
+ // file. Normally, there is only two: one for superblock and
+ // one for the metadata at the end.
+ bool GetFiles(std::vector<File>* files) const override;
+
+ // Squashfs image does not support this yet.
+ bool LoadSettings(brillo::KeyValueStore* store) const override;
+
+ // Returns true if the first few bytes of a file indicates a valid Squashfs
+ // image. The size of the |blob| should be at least
+ // sizeof(SquashfsHeader) or for now 96 bytes.
+ static bool IsSquashfsImage(const brillo::Blob& blob);
+
+ private:
+ SquashfsFilesystem() = default;
+
+ // Initialize and populates the files in the file system.
+ bool Init(const std::string& map,
+ const std::string& sqfs_path,
+ size_t size,
+ const SquashfsHeader& header,
+ bool extract_deflates);
+
+ // The size of the image in bytes.
+ size_t size_;
+
+ // All the files in the filesystem.
+ std::vector<File> files_;
+
+ DISALLOW_COPY_AND_ASSIGN(SquashfsFilesystem);
+};
+
+} // namespace chromeos_update_engine
+#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_SQUASHFS_FILESYSTEM_H_
diff --git a/payload_generator/squashfs_filesystem_unittest.cc b/payload_generator/squashfs_filesystem_unittest.cc
new file mode 100644
index 0000000..29fcf1c
--- /dev/null
+++ b/payload_generator/squashfs_filesystem_unittest.cc
@@ -0,0 +1,318 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/squashfs_filesystem.h"
+
+#include <unistd.h>
+
+#include <algorithm>
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#include <base/format_macros.h>
+#include <base/logging.h>
+#include <base/strings/string_number_conversions.h>
+#include <base/strings/string_util.h>
+#include <base/strings/stringprintf.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_generator/extent_utils.h"
+
+namespace chromeos_update_engine {
+
+using std::map;
+using std::set;
+using std::string;
+using std::unique_ptr;
+using std::vector;
+
+using test_utils::GetBuildArtifactsPath;
+
+namespace {
+
+constexpr uint64_t kTestBlockSize = 4096;
+constexpr uint64_t kTestSqfsBlockSize = 1 << 15;
+
+// Checks that all the blocks in |extents| are in the range [0, total_blocks).
+void ExpectBlocksInRange(const vector<Extent>& extents, uint64_t total_blocks) {
+ for (const Extent& extent : extents) {
+ EXPECT_LE(0U, extent.start_block());
+ EXPECT_LE(extent.start_block() + extent.num_blocks(), total_blocks);
+ }
+}
+
+SquashfsFilesystem::SquashfsHeader GetSimpleHeader() {
+ // These properties are enough for now. Add more as needed.
+ return {
+ .magic = 0x73717368,
+ .block_size = kTestSqfsBlockSize,
+ .compression_type = 1, // For gzip.
+ .major_version = 4,
+ };
+}
+
+} // namespace
+
+class SquashfsFilesystemTest : public ::testing::Test {
+ public:
+ void CheckSquashfs(const unique_ptr<SquashfsFilesystem>& fs) {
+ ASSERT_TRUE(fs);
+ EXPECT_EQ(kTestBlockSize, fs->GetBlockSize());
+
+ vector<FilesystemInterface::File> files;
+ ASSERT_TRUE(fs->GetFiles(&files));
+
+ map<string, FilesystemInterface::File> map_files;
+ for (const auto& file : files) {
+ EXPECT_EQ(map_files.end(), map_files.find(file.name))
+ << "File " << file.name << " repeated in the list.";
+ map_files[file.name] = file;
+ ExpectBlocksInRange(file.extents, fs->GetBlockCount());
+ }
+
+ // Checking the sortness.
+ EXPECT_TRUE(std::is_sorted(files.begin(),
+ files.end(),
+ [](const FilesystemInterface::File& a,
+ const FilesystemInterface::File& b) {
+ return a.extents[0].start_block() <
+ b.extents[0].start_block();
+ }));
+
+ auto overlap_check = [](const FilesystemInterface::File& a,
+ const FilesystemInterface::File& b) {
+ // Return true if overlapping.
+ return a.extents[0].start_block() + a.extents[0].num_blocks() >
+ b.extents[0].start_block();
+ };
+ // Check files are not overlapping.
+ EXPECT_EQ(std::adjacent_find(files.begin(), files.end(), overlap_check),
+ files.end());
+ }
+};
+
+// CreateFromFile() depends on unsquashfs -m, which only exists in Chrome OS.
+#ifdef __CHROMEOS__
+TEST_F(SquashfsFilesystemTest, EmptyFilesystemTest) {
+ unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFile(
+ GetBuildArtifactsPath("gen/disk_sqfs_empty.img"), true);
+ CheckSquashfs(fs);
+
+ // Even an empty squashfs filesystem is rounded up to 4K.
+ EXPECT_EQ(4096 / kTestBlockSize, fs->GetBlockCount());
+
+ vector<FilesystemInterface::File> files;
+ ASSERT_TRUE(fs->GetFiles(&files));
+ ASSERT_EQ(files.size(), 1u);
+
+ FilesystemInterface::File file;
+ file.name = "<metadata-0>";
+ file.extents.emplace_back();
+ file.extents[0].set_start_block(0);
+ file.extents[0].set_num_blocks(1);
+ EXPECT_EQ(files[0].name, file.name);
+ EXPECT_EQ(files[0].extents, file.extents);
+}
+
+TEST_F(SquashfsFilesystemTest, DefaultFilesystemTest) {
+ unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFile(
+ GetBuildArtifactsPath("gen/disk_sqfs_default.img"), true);
+ CheckSquashfs(fs);
+
+ vector<FilesystemInterface::File> files;
+ ASSERT_TRUE(fs->GetFiles(&files));
+ ASSERT_EQ(files.size(), 1u);
+
+ FilesystemInterface::File file;
+ file.name = "<fragment-0>";
+ file.extents.emplace_back();
+ file.extents[0].set_start_block(0);
+ file.extents[0].set_num_blocks(1);
+ EXPECT_EQ(files[0].name, file.name);
+ EXPECT_EQ(files[0].extents, file.extents);
+}
+#endif // __CHROMEOS__
+
+TEST_F(SquashfsFilesystemTest, SimpleFileMapTest) {
+ string filemap = R"(dir1/file1 96 4000
+ dir1/file2 4096 100)";
+ unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFileMap(
+ filemap, kTestBlockSize * 2, GetSimpleHeader());
+ CheckSquashfs(fs);
+
+ vector<FilesystemInterface::File> files;
+ ASSERT_TRUE(fs->GetFiles(&files));
+ EXPECT_EQ(files.size(), 2u);
+}
+
+TEST_F(SquashfsFilesystemTest, FileMapZeroSizeFileTest) {
+ // The second file's size is zero.
+ string filemap = R"(dir1/file1 96 4000
+ dir1/file2 4096
+ dir1/file3 4096 100)";
+ unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFileMap(
+ filemap, kTestBlockSize * 2, GetSimpleHeader());
+ CheckSquashfs(fs);
+
+ vector<FilesystemInterface::File> files;
+ ASSERT_TRUE(fs->GetFiles(&files));
+ // The second and third files are removed. The file with size zero is removed.
+ EXPECT_EQ(files.size(), 2u);
+}
+
+// Testing the compressed bit.
+TEST_F(SquashfsFilesystemTest, CompressedBitTest) {
+ string filemap = "dir1/file1 0 " + std::to_string(4000 | (1 << 24)) + "\n";
+ unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFileMap(
+ filemap, kTestBlockSize, GetSimpleHeader());
+ CheckSquashfs(fs);
+
+ vector<FilesystemInterface::File> files;
+ ASSERT_TRUE(fs->GetFiles(&files));
+ ASSERT_EQ(files.size(), 1u);
+ EXPECT_EQ(files[0].extents[0].num_blocks(), 1u);
+}
+
+// Test overlap.
+TEST_F(SquashfsFilesystemTest, OverlapingFiles1Test) {
+ string filemap = R"(file1 0 6000
+ file2 5000 5000)";
+ unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFileMap(
+ filemap, kTestBlockSize * 3, GetSimpleHeader());
+ CheckSquashfs(fs);
+
+ vector<FilesystemInterface::File> files;
+ ASSERT_TRUE(fs->GetFiles(&files));
+ ASSERT_EQ(files.size(), 2u);
+ EXPECT_EQ(files[0].extents[0].num_blocks(), 1u);
+ EXPECT_EQ(files[1].extents[0].num_blocks(), 2u);
+}
+
+// Test overlap, first inside second.
+TEST_F(SquashfsFilesystemTest, OverlapingFiles2Test) {
+ string filemap = R"(file1 0 4000
+ file2 0 6000)";
+ unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFileMap(
+ filemap, kTestBlockSize * 2, GetSimpleHeader());
+ CheckSquashfs(fs);
+
+ vector<FilesystemInterface::File> files;
+ ASSERT_TRUE(fs->GetFiles(&files));
+ ASSERT_EQ(files.size(), 1u);
+ EXPECT_EQ(files[0].name, "file2");
+ EXPECT_EQ(files[0].extents[0].num_blocks(), 2u);
+}
+
+// Test overlap, second inside first.
+TEST_F(SquashfsFilesystemTest, OverlapingFiles3Test) {
+ string filemap = R"(file1 0 8000
+ file2 100 100)";
+ unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFileMap(
+ filemap, kTestBlockSize * 2, GetSimpleHeader());
+ CheckSquashfs(fs);
+
+ vector<FilesystemInterface::File> files;
+ ASSERT_TRUE(fs->GetFiles(&files));
+ ASSERT_EQ(files.size(), 1u);
+ EXPECT_EQ(files[0].name, "file1");
+ EXPECT_EQ(files[0].extents[0].num_blocks(), 2u);
+}
+
+// Fail a line with only one argument.
+TEST_F(SquashfsFilesystemTest, FailOnlyFileNameTest) {
+ string filemap = "dir1/file1\n";
+ unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFileMap(
+ filemap, kTestBlockSize, GetSimpleHeader());
+ EXPECT_FALSE(fs);
+}
+
+// Fail a line with space separated filen name
+TEST_F(SquashfsFilesystemTest, FailSpaceInFileNameTest) {
+ string filemap = "dir1 file1 0 10\n";
+ unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFileMap(
+ filemap, kTestBlockSize, GetSimpleHeader());
+ EXPECT_FALSE(fs);
+}
+
+// Fail empty line
+TEST_F(SquashfsFilesystemTest, FailEmptyLineTest) {
+ // The second file's size is zero.
+ string filemap = R"(
+ /t
+ dir1/file3 4096 100)";
+ unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFileMap(
+ filemap, kTestBlockSize * 2, GetSimpleHeader());
+ EXPECT_FALSE(fs);
+}
+
+// Fail on bad magic or major
+TEST_F(SquashfsFilesystemTest, FailBadMagicOrMajorTest) {
+ string filemap = "dir1/file1 0 10\n";
+ auto header = GetSimpleHeader();
+ header.magic = 1;
+ EXPECT_FALSE(
+ SquashfsFilesystem::CreateFromFileMap(filemap, kTestBlockSize, header));
+
+ header = GetSimpleHeader();
+ header.major_version = 3;
+ EXPECT_FALSE(
+ SquashfsFilesystem::CreateFromFileMap(filemap, kTestBlockSize, header));
+}
+
+// Fail size with larger than block_size
+TEST_F(SquashfsFilesystemTest, FailLargerThanBlockSizeTest) {
+ string filemap = "file1 0 " + std::to_string(kTestSqfsBlockSize + 1) + "\n";
+ unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFileMap(
+ filemap, kTestBlockSize, GetSimpleHeader());
+ EXPECT_FALSE(fs);
+}
+
+// Test is squashfs image.
+TEST_F(SquashfsFilesystemTest, IsSquashfsImageTest) {
+ // Some sample from a recent squashfs file.
+ brillo::Blob super_block = {
+ 0x68, 0x73, 0x71, 0x73, 0x59, 0x05, 0x00, 0x00, 0x09, 0x3a, 0x89, 0x58,
+ 0x00, 0x00, 0x02, 0x00, 0x9a, 0x00, 0x00, 0x00, 0x01, 0x00, 0x11, 0x00,
+ 0xc0, 0x00, 0x06, 0x00, 0x04, 0x00, 0x00, 0x00, 0x89, 0x18, 0xf7, 0x7c,
+ 0x00, 0x00, 0x00, 0x00, 0x2e, 0x33, 0xcd, 0x16, 0x00, 0x00, 0x00, 0x00,
+ 0x3a, 0x30, 0xcd, 0x16, 0x00, 0x00, 0x00, 0x00, 0x16, 0x33, 0xcd, 0x16,
+ 0x00, 0x00, 0x00, 0x00, 0x07, 0x62, 0xcc, 0x16, 0x00, 0x00, 0x00, 0x00,
+ 0x77, 0xe6, 0xcc, 0x16, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x25, 0xcd, 0x16,
+ 0x00, 0x00, 0x00, 0x00, 0x10, 0x30, 0xcd, 0x16, 0x00, 0x00, 0x00, 0x00};
+
+ EXPECT_TRUE(SquashfsFilesystem::IsSquashfsImage(super_block));
+
+ // Bad magic
+ auto bad_super_block = super_block;
+ bad_super_block[1] = 0x02;
+ EXPECT_FALSE(SquashfsFilesystem::IsSquashfsImage(bad_super_block));
+
+ // Bad major
+ bad_super_block = super_block;
+ bad_super_block[28] = 0x03;
+ EXPECT_FALSE(SquashfsFilesystem::IsSquashfsImage(bad_super_block));
+
+ // Small size;
+ bad_super_block = super_block;
+ bad_super_block.resize(10);
+ EXPECT_FALSE(SquashfsFilesystem::IsSquashfsImage(bad_super_block));
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_generator/zip_unittest.cc b/payload_generator/zip_unittest.cc
index 54adfcb..c750eb7 100644
--- a/payload_generator/zip_unittest.cc
+++ b/payload_generator/zip_unittest.cc
@@ -17,10 +17,10 @@
#include <string.h>
#include <unistd.h>
+#include <memory>
#include <string>
#include <vector>
-#include <brillo/make_unique_ptr.h>
#include <gtest/gtest.h>
#include "update_engine/common/test_utils.h"
@@ -31,6 +31,7 @@
#include "update_engine/payload_generator/xz.h"
using chromeos_update_engine::test_utils::kRandomString;
+using google::protobuf::RepeatedPtrField;
using std::string;
using std::vector;
@@ -50,7 +51,7 @@
~MemoryExtentWriter() override = default;
bool Init(FileDescriptorPtr fd,
- const vector<Extent>& extents,
+ const RepeatedPtrField<Extent>& extents,
uint32_t block_size) override {
return true;
}
@@ -70,7 +71,7 @@
template <typename W>
bool DecompressWithWriter(const brillo::Blob& in, brillo::Blob* out) {
std::unique_ptr<ExtentWriter> writer(
- new W(brillo::make_unique_ptr(new MemoryExtentWriter(out))));
+ new W(std::make_unique<MemoryExtentWriter>(out)));
// Init() parameters are ignored by the testing MemoryExtentWriter.
bool ok = writer->Init(nullptr, {}, 1);
ok = writer->Write(in.data(), in.size()) && ok;
diff --git a/payload_state.cc b/payload_state.cc
index 96181ea..4992606 100644
--- a/payload_state.cc
+++ b/payload_state.cc
@@ -32,6 +32,7 @@
#include "update_engine/common/prefs.h"
#include "update_engine/common/utils.h"
#include "update_engine/connection_manager_interface.h"
+#include "update_engine/metrics_reporter_interface.h"
#include "update_engine/metrics_utils.h"
#include "update_engine/omaha_request_params.h"
#include "update_engine/payload_consumer/install_plan.h"
@@ -44,6 +45,8 @@
namespace chromeos_update_engine {
+using metrics_utils::GetPersistedValue;
+
const TimeDelta PayloadState::kDurationSlack = TimeDelta::FromSeconds(600);
// We want to upperbound backoffs to 16 days
@@ -235,8 +238,8 @@
break;
case AttemptType::kRollback:
- metrics::ReportRollbackMetrics(system_state_,
- metrics::RollbackResult::kSuccess);
+ system_state_->metrics_reporter()->ReportRollbackMetrics(
+ metrics::RollbackResult::kSuccess);
break;
}
attempt_error_code_ = ErrorCode::kSuccess;
@@ -246,7 +249,7 @@
SetNumResponsesSeen(0);
SetPayloadIndex(0);
- CreateSystemUpdatedMarkerFile();
+ metrics_utils::SetSystemUpdatedMarker(system_state_->clock(), prefs_);
}
void PayloadState::UpdateFailed(ErrorCode error) {
@@ -270,8 +273,8 @@
break;
case AttemptType::kRollback:
- metrics::ReportRollbackMetrics(system_state_,
- metrics::RollbackResult::kFailed);
+ system_state_->metrics_reporter()->ReportRollbackMetrics(
+ metrics::RollbackResult::kFailed);
break;
}
@@ -355,6 +358,7 @@
case ErrorCode::kOmahaRequestXMLHasEntityDecl:
case ErrorCode::kFilesystemVerifierError:
case ErrorCode::kUserCanceled:
+ case ErrorCode::kUpdatedButNotActive:
LOG(INFO) << "Not incrementing URL index or failure count for this error";
break;
@@ -631,24 +635,28 @@
case metrics::AttemptResult::kPostInstallFailed:
case metrics::AttemptResult::kAbnormalTermination:
case metrics::AttemptResult::kUpdateCanceled:
+ case metrics::AttemptResult::kUpdateSucceededNotActive:
case metrics::AttemptResult::kNumConstants:
case metrics::AttemptResult::kUnset:
break;
}
- metrics::ReportUpdateAttemptMetrics(system_state_,
- attempt_number,
- payload_type,
- duration,
- duration_uptime,
- payload_size,
- payload_bytes_downloaded,
- payload_download_speed_bps,
- download_source,
- attempt_result,
- internal_error_code,
- payload_download_error_code,
- attempt_connection_type_);
+ system_state_->metrics_reporter()->ReportUpdateAttemptMetrics(
+ system_state_,
+ attempt_number,
+ payload_type,
+ duration,
+ duration_uptime,
+ payload_size,
+ attempt_result,
+ internal_error_code);
+
+ system_state_->metrics_reporter()->ReportUpdateAttemptDownloadMetrics(
+ payload_bytes_downloaded,
+ payload_download_speed_bps,
+ download_source,
+ payload_download_error_code,
+ attempt_connection_type_);
}
void PayloadState::PersistAttemptMetrics() {
@@ -673,7 +681,8 @@
if (!attempt_in_progress)
return;
- metrics::ReportAbnormallyTerminatedUpdateAttemptMetrics(system_state_);
+ system_state_->metrics_reporter()
+ ->ReportAbnormallyTerminatedUpdateAttemptMetrics();
ClearPersistedAttemptMetrics();
}
@@ -735,16 +744,16 @@
int updates_abandoned_count = num_responses_seen_ - 1;
- metrics::ReportSuccessfulUpdateMetrics(system_state_,
- attempt_count,
- updates_abandoned_count,
- payload_type,
- payload_size,
- total_bytes_by_source,
- download_overhead_percentage,
- duration,
- reboot_count,
- url_switch_count);
+ system_state_->metrics_reporter()->ReportSuccessfulUpdateMetrics(
+ attempt_count,
+ updates_abandoned_count,
+ payload_type,
+ payload_size,
+ total_bytes_by_source,
+ download_overhead_percentage,
+ duration,
+ reboot_count,
+ url_switch_count);
}
void PayloadState::UpdateNumReboots() {
@@ -758,11 +767,8 @@
}
void PayloadState::SetNumReboots(uint32_t num_reboots) {
- CHECK(prefs_);
num_reboots_ = num_reboots;
- prefs_->SetInt64(kPrefsNumReboots, num_reboots);
- LOG(INFO) << "Number of Reboots during current update attempt = "
- << num_reboots_;
+ metrics_utils::SetNumReboots(num_reboots, prefs_);
}
void PayloadState::ResetPersistedState() {
@@ -799,24 +805,6 @@
}
}
-int64_t PayloadState::GetPersistedValue(const string& key) {
- CHECK(prefs_);
- if (!prefs_->Exists(key))
- return 0;
-
- int64_t stored_value;
- if (!prefs_->GetInt64(key, &stored_value))
- return 0;
-
- if (stored_value < 0) {
- LOG(ERROR) << key << ": Invalid value (" << stored_value
- << ") in persisted state. Defaulting to 0";
- return 0;
- }
-
- return stored_value;
-}
-
string PayloadState::CalculateResponseSignature() {
string response_sign;
for (size_t i = 0; i < response_.packages.size(); i++) {
@@ -867,19 +855,18 @@
}
void PayloadState::LoadPayloadAttemptNumber() {
- SetPayloadAttemptNumber(GetPersistedValue(kPrefsPayloadAttemptNumber));
+ SetPayloadAttemptNumber(
+ GetPersistedValue(kPrefsPayloadAttemptNumber, prefs_));
}
void PayloadState::LoadFullPayloadAttemptNumber() {
- SetFullPayloadAttemptNumber(GetPersistedValue(
- kPrefsFullPayloadAttemptNumber));
+ SetFullPayloadAttemptNumber(
+ GetPersistedValue(kPrefsFullPayloadAttemptNumber, prefs_));
}
void PayloadState::SetPayloadAttemptNumber(int payload_attempt_number) {
- CHECK(prefs_);
payload_attempt_number_ = payload_attempt_number;
- LOG(INFO) << "Payload Attempt Number = " << payload_attempt_number_;
- prefs_->SetInt64(kPrefsPayloadAttemptNumber, payload_attempt_number_);
+ metrics_utils::SetPayloadAttemptNumber(payload_attempt_number, prefs_);
}
void PayloadState::SetFullPayloadAttemptNumber(
@@ -906,7 +893,7 @@
}
void PayloadState::LoadUrlIndex() {
- SetUrlIndex(GetPersistedValue(kPrefsCurrentUrlIndex));
+ SetUrlIndex(GetPersistedValue(kPrefsCurrentUrlIndex, prefs_));
}
void PayloadState::SetUrlIndex(uint32_t url_index) {
@@ -921,8 +908,8 @@
}
void PayloadState::LoadScatteringWaitPeriod() {
- SetScatteringWaitPeriod(
- TimeDelta::FromSeconds(GetPersistedValue(kPrefsWallClockWaitPeriod)));
+ SetScatteringWaitPeriod(TimeDelta::FromSeconds(
+ GetPersistedValue(kPrefsWallClockWaitPeriod, prefs_)));
}
void PayloadState::SetScatteringWaitPeriod(TimeDelta wait_period) {
@@ -939,7 +926,7 @@
}
void PayloadState::LoadUrlSwitchCount() {
- SetUrlSwitchCount(GetPersistedValue(kPrefsUrlSwitchCount));
+ SetUrlSwitchCount(GetPersistedValue(kPrefsUrlSwitchCount, prefs_));
}
void PayloadState::SetUrlSwitchCount(uint32_t url_switch_count) {
@@ -950,7 +937,7 @@
}
void PayloadState::LoadUrlFailureCount() {
- SetUrlFailureCount(GetPersistedValue(kPrefsCurrentUrlFailureCount));
+ SetUrlFailureCount(GetPersistedValue(kPrefsCurrentUrlFailureCount, prefs_));
}
void PayloadState::SetUrlFailureCount(uint32_t url_failure_count) {
@@ -1033,12 +1020,8 @@
}
void PayloadState::SetUpdateTimestampStart(const Time& value) {
- CHECK(prefs_);
update_timestamp_start_ = value;
- prefs_->SetInt64(kPrefsUpdateTimestampStart,
- update_timestamp_start_.ToInternalValue());
- LOG(INFO) << "Update Timestamp Start = "
- << utils::ToString(update_timestamp_start_);
+ metrics_utils::SetUpdateTimestampStart(value, prefs_);
}
void PayloadState::SetUpdateTimestampEnd(const Time& value) {
@@ -1084,7 +1067,7 @@
}
void PayloadState::LoadNumReboots() {
- SetNumReboots(GetPersistedValue(kPrefsNumReboots));
+ SetNumReboots(GetPersistedValue(kPrefsNumReboots, prefs_));
}
void PayloadState::LoadRollbackVersion() {
@@ -1136,7 +1119,7 @@
void PayloadState::LoadCurrentBytesDownloaded(DownloadSource source) {
string key = GetPrefsKey(kPrefsCurrentBytesDownloaded, source);
- SetCurrentBytesDownloaded(source, GetPersistedValue(key), true);
+ SetCurrentBytesDownloaded(source, GetPersistedValue(key, prefs_), true);
}
void PayloadState::SetCurrentBytesDownloaded(
@@ -1160,7 +1143,7 @@
void PayloadState::LoadTotalBytesDownloaded(DownloadSource source) {
string key = GetPrefsKey(kPrefsTotalBytesDownloaded, source);
- SetTotalBytesDownloaded(source, GetPersistedValue(key), true);
+ SetTotalBytesDownloaded(source, GetPersistedValue(key, prefs_), true);
}
void PayloadState::SetTotalBytesDownloaded(
@@ -1184,7 +1167,7 @@
}
void PayloadState::LoadNumResponsesSeen() {
- SetNumResponsesSeen(GetPersistedValue(kPrefsNumResponsesSeen));
+ SetNumResponsesSeen(GetPersistedValue(kPrefsNumResponsesSeen, prefs_));
}
void PayloadState::SetNumResponsesSeen(int num_responses_seen) {
@@ -1225,24 +1208,6 @@
}
}
-void PayloadState::CreateSystemUpdatedMarkerFile() {
- CHECK(prefs_);
- int64_t value = system_state_->clock()->GetWallclockTime().ToInternalValue();
- prefs_->SetInt64(kPrefsSystemUpdatedMarker, value);
-}
-
-void PayloadState::BootedIntoUpdate(TimeDelta time_to_reboot) {
- // Send |time_to_reboot| as a UMA stat.
- string metric = metrics::kMetricTimeToRebootMinutes;
- system_state_->metrics_lib()->SendToUMA(metric,
- time_to_reboot.InMinutes(),
- 0, // min: 0 minute
- 30*24*60, // max: 1 month (approx)
- kNumDefaultUmaBuckets);
- LOG(INFO) << "Uploading " << utils::FormatTimeDelta(time_to_reboot)
- << " for metric " << metric;
-}
-
void PayloadState::UpdateEngineStarted() {
// Flush previous state from abnormal attempt failure, if any.
ReportAndClearPersistedAttemptMetrics();
@@ -1252,24 +1217,11 @@
if (!system_state_->system_rebooted())
return;
- // Figure out if we just booted into a new update
- if (prefs_->Exists(kPrefsSystemUpdatedMarker)) {
- int64_t stored_value;
- if (prefs_->GetInt64(kPrefsSystemUpdatedMarker, &stored_value)) {
- Time system_updated_at = Time::FromInternalValue(stored_value);
- if (!system_updated_at.is_null()) {
- TimeDelta time_to_reboot =
- system_state_->clock()->GetWallclockTime() - system_updated_at;
- if (time_to_reboot.ToInternalValue() < 0) {
- LOG(ERROR) << "time_to_reboot is negative - system_updated_at: "
- << utils::ToString(system_updated_at);
- } else {
- BootedIntoUpdate(time_to_reboot);
- }
- }
- }
- prefs_->Delete(kPrefsSystemUpdatedMarker);
- }
+ // Report time_to_reboot if we booted into a new update.
+ metrics_utils::LoadAndReportTimeToReboot(
+ system_state_->metrics_reporter(), prefs_, system_state_->clock());
+ prefs_->Delete(kPrefsSystemUpdatedMarker);
+
// Check if it is needed to send metrics about a failed reboot into a new
// version.
ReportFailedBootIfNeeded();
@@ -1303,15 +1255,8 @@
}
// Report the UMA metric of the current boot failure.
- string metric = metrics::kMetricFailedUpdateCount;
- LOG(INFO) << "Uploading " << target_attempt
- << " (count) for metric " << metric;
- system_state_->metrics_lib()->SendToUMA(
- metric,
- target_attempt,
- 1, // min value
- 50, // max value
- kNumDefaultUmaBuckets);
+ system_state_->metrics_reporter()->ReportFailedUpdateCount(
+ target_attempt);
} else {
prefs_->Delete(kPrefsTargetVersionAttempt);
prefs_->Delete(kPrefsTargetVersionUniqueId);
@@ -1368,7 +1313,7 @@
}
void PayloadState::LoadP2PNumAttempts() {
- SetP2PNumAttempts(GetPersistedValue(kPrefsP2PNumAttempts));
+ SetP2PNumAttempts(GetPersistedValue(kPrefsP2PNumAttempts, prefs_));
}
Time PayloadState::GetP2PFirstAttemptTimestamp() {
@@ -1385,7 +1330,8 @@
}
void PayloadState::LoadP2PFirstAttemptTimestamp() {
- int64_t stored_value = GetPersistedValue(kPrefsP2PFirstAttemptTimestamp);
+ int64_t stored_value =
+ GetPersistedValue(kPrefsP2PFirstAttemptTimestamp, prefs_);
Time stored_time = Time::FromInternalValue(stored_value);
SetP2PFirstAttemptTimestamp(stored_time);
}
diff --git a/payload_state.h b/payload_state.h
index 699fc74..24e9900 100644
--- a/payload_state.h
+++ b/payload_state.h
@@ -17,6 +17,7 @@
#ifndef UPDATE_ENGINE_PAYLOAD_STATE_H_
#define UPDATE_ENGINE_PAYLOAD_STATE_H_
+#include <algorithm>
#include <string>
#include <vector>
@@ -24,7 +25,7 @@
#include <gtest/gtest_prod.h> // for FRIEND_TEST
#include "update_engine/common/prefs_interface.h"
-#include "update_engine/metrics.h"
+#include "update_engine/metrics_constants.h"
#include "update_engine/payload_state_interface.h"
namespace chromeos_update_engine {
@@ -240,10 +241,6 @@
// reset on a new update.
void ResetDownloadSourcesOnNewUpdate();
- // Returns the persisted value from prefs_ for the given key. It also
- // validates that the value returned is non-negative.
- int64_t GetPersistedValue(const std::string& key);
-
// Calculates the response "signature", which is basically a string composed
// of the subset of the fields in the current response that affect the
// behavior of the PayloadState.
@@ -404,16 +401,7 @@
// increments num_reboots.
void UpdateNumReboots();
- // Writes the current wall-clock time to the kPrefsSystemUpdatedMarker
- // state variable.
- void CreateSystemUpdatedMarkerFile();
- // Called at program startup if the device booted into a new update.
- // The |time_to_reboot| parameter contains the (wall-clock) duration
- // from when the update successfully completed (the value written
- // into the kPrefsSystemUpdatedMarker state variable) until the device
- // was booted into the update (current wall-clock time).
- void BootedIntoUpdate(base::TimeDelta time_to_reboot);
// Loads the |kPrefsP2PFirstAttemptTimestamp| state variable from disk
// into |p2p_first_attempt_timestamp_|.
diff --git a/payload_state_unittest.cc b/payload_state_unittest.cc
index 4546180..f1c3835 100644
--- a/payload_state_unittest.cc
+++ b/payload_state_unittest.cc
@@ -31,6 +31,7 @@
#include "update_engine/common/test_utils.h"
#include "update_engine/common/utils.h"
#include "update_engine/fake_system_state.h"
+#include "update_engine/metrics_reporter_interface.h"
#include "update_engine/omaha_request_action.h"
using base::Time;
@@ -41,7 +42,7 @@
using testing::Mock;
using testing::NiceMock;
using testing::Return;
-using testing::SetArgumentPointee;
+using testing::SetArgPointee;
using testing::_;
namespace chromeos_update_engine {
@@ -311,11 +312,6 @@
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(_, _, _, _, _))
- .Times(AnyNumber());
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendEnumToUMA(_, _, _))
- .Times(AnyNumber());
-
// Set the first response.
SetupPayloadStateWith2Urls(
"Hash5823", true, false, &payload_state, &response);
@@ -615,7 +611,7 @@
EXPECT_CALL(*prefs2, GetInt64(kPrefsFullPayloadAttemptNumber, _))
.Times(AtLeast(1));
EXPECT_CALL(*prefs2, GetInt64(kPrefsCurrentUrlIndex, _))
- .WillRepeatedly(DoAll(SetArgumentPointee<1>(2), Return(true)));
+ .WillRepeatedly(DoAll(SetArgPointee<1>(2), Return(true)));
EXPECT_CALL(*prefs2, GetInt64(kPrefsCurrentUrlFailureCount, _))
.Times(AtLeast(1));
EXPECT_CALL(*prefs2, GetInt64(kPrefsUrlSwitchCount, _))
@@ -873,26 +869,9 @@
EXPECT_EQ(p2p_total,
payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpPeer));
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(_, _, _, _, _))
- .Times(AnyNumber());
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendEnumToUMA(_, _, _))
- .Times(AnyNumber());
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(
- metrics::kMetricSuccessfulUpdateUrlSwitchCount,
- 3, _, _, _));
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(
- metrics::kMetricSuccessfulUpdateTotalDurationMinutes,
- _, _, _, _));
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(
- metrics::kMetricSuccessfulUpdateDownloadOverheadPercentage,
- 314, _, _, _));
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendEnumToUMA(
- metrics::kMetricAttemptPayloadType, kPayloadTypeFull, kNumPayloadTypes));
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendEnumToUMA(
- metrics::kMetricSuccessfulUpdatePayloadType, kPayloadTypeFull,
- kNumPayloadTypes));
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(
- metrics::kMetricSuccessfulUpdateAttemptCount, 1, _, _, _));
+ EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+ ReportSuccessfulUpdateMetrics(
+ 1, _, kPayloadTypeFull, _, _, 314, _, _, 3));
payload_state.UpdateSucceeded();
@@ -928,12 +907,21 @@
payload_state.GetTotalBytesDownloaded(kDownloadSourceHttpServer));
// Check that only HTTP is reported as a download source.
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(_, _, _, _, _))
- .Times(AnyNumber());
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(
- metrics::kMetricSuccessfulUpdateDownloadSourcesUsed,
- (1 << kDownloadSourceHttpServer),
- _, _, _));
+ int64_t total_bytes[kNumDownloadSources] = {};
+ total_bytes[kDownloadSourceHttpServer] = num_bytes;
+
+ EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+ ReportSuccessfulUpdateMetrics(
+ _,
+ _,
+ _,
+ _,
+ test_utils::DownloadSourceMatcher(total_bytes),
+ _,
+ _,
+ _,
+ _))
+ .Times(1);
payload_state.UpdateSucceeded();
}
@@ -1024,7 +1012,7 @@
// Let's verify we can reload it correctly.
EXPECT_CALL(*mock_powerwash_safe_prefs, GetString(
kPrefsRollbackVersion, _)).WillOnce(DoAll(
- SetArgumentPointee<1>(rollback_version), Return(true)));
+ SetArgPointee<1>(rollback_version), Return(true)));
EXPECT_CALL(*mock_powerwash_safe_prefs, SetString(kPrefsRollbackVersion,
rollback_version));
payload_state.LoadRollbackVersion();
@@ -1032,15 +1020,10 @@
// Check that we report only UpdateEngine.Rollback.* metrics in
// UpdateSucceeded().
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(_, _, _, _, _))
- .Times(0);
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendEnumToUMA(_, _, _))
- .Times(0);
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(),
- SendEnumToUMA(
- metrics::kMetricRollbackResult,
- static_cast<int>(metrics::RollbackResult::kSuccess),
- static_cast<int>(metrics::RollbackResult::kNumConstants)));
+ EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+ ReportRollbackMetrics(metrics::RollbackResult::kSuccess))
+ .Times(1);
+
payload_state.UpdateSucceeded();
}
@@ -1115,8 +1098,8 @@
FakePrefs fake_prefs;
// Set the clock to a well-known time (t = 30 seconds).
- fake_clock.SetWallclockTime(Time::FromInternalValue(
- 30 * Time::kMicrosecondsPerSecond));
+ fake_clock.SetMonotonicTime(
+ Time::FromInternalValue(30 * Time::kMicrosecondsPerSecond));
fake_system_state.set_clock(&fake_clock);
fake_system_state.set_prefs(&fake_prefs);
@@ -1134,15 +1117,14 @@
// (t = 500 seconds). We do this by using a new PayloadState object
// and checking that it emits the right UMA metric with the right
// value.
- fake_clock.SetWallclockTime(Time::FromInternalValue(
- 500 * Time::kMicrosecondsPerSecond));
+ fake_clock.SetMonotonicTime(
+ Time::FromInternalValue(500 * Time::kMicrosecondsPerSecond));
PayloadState payload_state2;
EXPECT_TRUE(payload_state2.Initialize(&fake_system_state));
// Expect 500 - 30 seconds = 470 seconds ~= 7 min 50 sec
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(
- metrics::kMetricTimeToRebootMinutes,
- 7, _, _, _));
+ EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+ ReportTimeToReboot(7));
fake_system_state.set_system_rebooted(true);
payload_state2.UpdateEngineStarted();
@@ -1154,6 +1136,8 @@
TEST(PayloadStateTest, RestartAfterCrash) {
PayloadState payload_state;
FakeSystemState fake_system_state;
+ testing::StrictMock<MockMetricsReporter> mock_metrics_reporter;
+ fake_system_state.set_metrics_reporter(&mock_metrics_reporter);
NiceMock<MockPrefs>* prefs = fake_system_state.mock_prefs();
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
@@ -1168,10 +1152,6 @@
EXPECT_CALL(*prefs, GetBoolean(_, _)).Times(0);
EXPECT_CALL(*prefs, GetBoolean(kPrefsAttemptInProgress, _));
- // No metrics are reported after a crash.
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(),
- SendToUMA(_, _, _, _, _)).Times(0);
-
// Simulate an update_engine restart without a reboot.
fake_system_state.set_system_rebooted(false);
@@ -1184,11 +1164,9 @@
// If there's no marker at startup, ensure we don't report a metric.
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(),
- SendEnumToUMA(
- metrics::kMetricAttemptResult,
- static_cast<int>(metrics::AttemptResult::kAbnormalTermination),
- _)).Times(0);
+ EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+ ReportAbnormallyTerminatedUpdateAttemptMetrics())
+ .Times(0);
payload_state.UpdateEngineStarted();
}
@@ -1204,11 +1182,9 @@
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(),
- SendEnumToUMA(
- metrics::kMetricAttemptResult,
- static_cast<int>(metrics::AttemptResult::kAbnormalTermination),
- _)).Times(1);
+ EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+ ReportAbnormallyTerminatedUpdateAttemptMetrics())
+ .Times(1);
payload_state.UpdateEngineStarted();
EXPECT_FALSE(fake_prefs.Exists(kPrefsAttemptInProgress));
@@ -1228,15 +1204,9 @@
response.packages.resize(1);
payload_state.SetResponse(response);
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(_, _, _, _, _))
- .Times(AnyNumber());
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendEnumToUMA(_, _, _))
- .Times(AnyNumber());
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(),
- SendEnumToUMA(
- metrics::kMetricAttemptResult,
- static_cast<int>(metrics::AttemptResult::kAbnormalTermination),
- _)).Times(0);
+ EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+ ReportAbnormallyTerminatedUpdateAttemptMetrics())
+ .Times(0);
// Attempt not in progress, should be clear.
EXPECT_FALSE(fake_prefs.Exists(kPrefsAttemptInProgress));
@@ -1275,7 +1245,7 @@
// Test with device policy not allowing http updates.
EXPECT_CALL(disable_http_policy, GetHttpDownloadsEnabled(_))
- .WillRepeatedly(DoAll(SetArgumentPointee<0>(false), Return(true)));
+ .WillRepeatedly(DoAll(SetArgPointee<0>(false), Return(true)));
// Reset state and set again.
SetupPayloadStateWith2Urls(
@@ -1304,7 +1274,7 @@
policy::MockDevicePolicy enable_http_policy;
fake_system_state.set_device_policy(&enable_http_policy);
EXPECT_CALL(enable_http_policy, GetHttpDownloadsEnabled(_))
- .WillRepeatedly(DoAll(SetArgumentPointee<0>(true), Return(true)));
+ .WillRepeatedly(DoAll(SetArgPointee<0>(true), Return(true)));
// Now, set the same response using the same hash
// so that we can test that the state is reset not because of the
@@ -1335,14 +1305,9 @@
// Simulate a successful download and update.
payload_state.DownloadComplete();
-
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendEnumToUMA(_, _, _))
- .Times(AnyNumber());
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendEnumToUMA(
- metrics::kMetricAttemptPayloadType, kPayloadTypeDelta, kNumPayloadTypes));
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendEnumToUMA(
- metrics::kMetricSuccessfulUpdatePayloadType, kPayloadTypeDelta,
- kNumPayloadTypes));
+ EXPECT_CALL(
+ *fake_system_state.mock_metrics_reporter(),
+ ReportSuccessfulUpdateMetrics(_, _, kPayloadTypeDelta, _, _, _, _, _, _));
payload_state.UpdateSucceeded();
// Mock the request to a request where the delta was disabled but Omaha sends
@@ -1356,12 +1321,9 @@
payload_state.DownloadComplete();
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendEnumToUMA(
- metrics::kMetricAttemptPayloadType, kPayloadTypeDelta,
- kNumPayloadTypes));
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendEnumToUMA(
- metrics::kMetricSuccessfulUpdatePayloadType, kPayloadTypeDelta,
- kNumPayloadTypes));
+ EXPECT_CALL(
+ *fake_system_state.mock_metrics_reporter(),
+ ReportSuccessfulUpdateMetrics(_, _, kPayloadTypeDelta, _, _, _, _, _, _));
payload_state.UpdateSucceeded();
}
@@ -1382,14 +1344,9 @@
// Simulate a successful download and update.
payload_state.DownloadComplete();
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendEnumToUMA(_, _, _))
- .Times(AnyNumber());
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendEnumToUMA(
- metrics::kMetricAttemptPayloadType, kPayloadTypeForcedFull,
- kNumPayloadTypes));
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendEnumToUMA(
- metrics::kMetricSuccessfulUpdatePayloadType, kPayloadTypeForcedFull,
- kNumPayloadTypes));
+ EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+ ReportSuccessfulUpdateMetrics(
+ _, _, kPayloadTypeForcedFull, _, _, _, _, _, _));
payload_state.UpdateSucceeded();
}
@@ -1411,14 +1368,9 @@
// Simulate a successful download and update.
payload_state.DownloadComplete();
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendEnumToUMA(_, _, _))
- .Times(AnyNumber());
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendEnumToUMA(
- metrics::kMetricAttemptPayloadType, kPayloadTypeFull,
- kNumPayloadTypes));
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendEnumToUMA(
- metrics::kMetricSuccessfulUpdatePayloadType, kPayloadTypeFull,
- kNumPayloadTypes));
+ EXPECT_CALL(
+ *fake_system_state.mock_metrics_reporter(),
+ ReportSuccessfulUpdateMetrics(_, _, kPayloadTypeFull, _, _, _, _, _, _));
payload_state.UpdateSucceeded();
}
@@ -1439,27 +1391,27 @@
payload_state.ExpectRebootInNewVersion("Version:12345678");
// Reboot into the same environment to get an UMA metric with a value of 1.
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(
- metrics::kMetricFailedUpdateCount, 1, _, _, _));
+ EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+ ReportFailedUpdateCount(1));
payload_state.ReportFailedBootIfNeeded();
- Mock::VerifyAndClearExpectations(fake_system_state.mock_metrics_lib());
+ Mock::VerifyAndClearExpectations(fake_system_state.mock_metrics_reporter());
// Simulate a second update and reboot into the same environment, this should
// send a value of 2.
payload_state.ExpectRebootInNewVersion("Version:12345678");
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(
- metrics::kMetricFailedUpdateCount, 2, _, _, _));
+ EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+ ReportFailedUpdateCount(2));
payload_state.ReportFailedBootIfNeeded();
- Mock::VerifyAndClearExpectations(fake_system_state.mock_metrics_lib());
+ Mock::VerifyAndClearExpectations(fake_system_state.mock_metrics_reporter());
// Simulate a third failed reboot to new version, but this time for a
// different payload. This should send a value of 1 this time.
payload_state.ExpectRebootInNewVersion("Version:3141592");
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(
- metrics::kMetricFailedUpdateCount, 1, _, _, _));
+ EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+ ReportFailedUpdateCount(1));
payload_state.ReportFailedBootIfNeeded();
- Mock::VerifyAndClearExpectations(fake_system_state.mock_metrics_lib());
+ Mock::VerifyAndClearExpectations(fake_system_state.mock_metrics_reporter());
}
TEST(PayloadStateTest, RebootAfterUpdateSucceed) {
@@ -1484,8 +1436,8 @@
// Change the BootDevice to a different one, no metric should be sent.
fake_boot_control->SetCurrentSlot(1);
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(
- metrics::kMetricFailedUpdateCount, _, _, _, _))
+ EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+ ReportFailedUpdateCount(_))
.Times(0);
payload_state.ReportFailedBootIfNeeded();
@@ -1511,8 +1463,8 @@
payload_state.UpdateSucceeded();
payload_state.ExpectRebootInNewVersion("Version:12345678");
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(
- metrics::kMetricFailedUpdateCount, _, _, _, _))
+ EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+ ReportFailedUpdateCount(_))
.Times(0);
// Cancel the applied update.
@@ -1530,8 +1482,8 @@
fake_system_state.set_prefs(&fake_prefs);
EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- EXPECT_CALL(*fake_system_state.mock_metrics_lib(), SendToUMA(
- metrics::kMetricFailedUpdateCount, _, _, _, _))
+ EXPECT_CALL(*fake_system_state.mock_metrics_reporter(),
+ ReportFailedUpdateCount(_))
.Times(0);
// Simulate a reboot in this environment.
diff --git a/power_manager_chromeos.cc b/power_manager_chromeos.cc
index e175f95..23fb032 100644
--- a/power_manager_chromeos.cc
+++ b/power_manager_chromeos.cc
@@ -16,6 +16,8 @@
#include "update_engine/power_manager_chromeos.h"
+#include <memory>
+
#include <power_manager/dbus-constants.h>
#include <power_manager/dbus-proxies.h>
@@ -37,7 +39,9 @@
<< ::power_manager::kRequestRestartMethod;
brillo::ErrorPtr error;
return power_manager_proxy_.RequestRestart(
- ::power_manager::REQUEST_RESTART_FOR_UPDATE, &error);
+ ::power_manager::REQUEST_RESTART_FOR_UPDATE,
+ "update_engine applying update",
+ &error);
}
} // namespace chromeos_update_engine
diff --git a/proxy_resolver_unittest.cc b/proxy_resolver_unittest.cc
index 070b361..484aae1 100644
--- a/proxy_resolver_unittest.cc
+++ b/proxy_resolver_unittest.cc
@@ -22,7 +22,6 @@
#include <gtest/gtest.h>
#include <base/bind.h>
-#include <brillo/bind_lambda.h>
#include <brillo/message_loops/fake_message_loop.h>
using std::deque;
diff --git a/pylintrc b/pylintrc
index f83f8c6..80a7605 100644
--- a/pylintrc
+++ b/pylintrc
@@ -149,7 +149,7 @@
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
-ignored-modules=
+ignored-modules=update_payload.update_metadata_pb2
# List of classes names for which member attributes should not be checked
# (useful for classes with attributes dynamically set). This supports can work
@@ -263,7 +263,7 @@
bad-functions=map,filter,input,apply,reduce
# Good variable names which should always be accepted, separated by a comma
-good-names=i,j,k,ex,x,_
+good-names=i,j,k,ex,x,_,main
# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata
diff --git a/real_system_state.cc b/real_system_state.cc
index 5cbf723..8e7ad51 100644
--- a/real_system_state.cc
+++ b/real_system_state.cc
@@ -16,23 +16,24 @@
#include "update_engine/real_system_state.h"
+#include <memory>
#include <string>
#include <base/bind.h>
#include <base/files/file_util.h>
#include <base/location.h>
#include <base/time/time.h>
-#include <brillo/make_unique_ptr.h>
#include <brillo/message_loops/message_loop.h>
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
#include <chromeos/dbus/service_constants.h>
-#endif // USE_LIBCROS
+#endif // USE_CHROME_KIOSK_APP
#include "update_engine/common/boot_control.h"
#include "update_engine/common/boot_control_stub.h"
#include "update_engine/common/constants.h"
#include "update_engine/common/hardware.h"
#include "update_engine/common/utils.h"
+#include "update_engine/metrics_reporter_omaha.h"
#if USE_DBUS
#include "update_engine/dbus_connection.h"
#endif // USE_DBUS
@@ -50,13 +51,13 @@
}
bool RealSystemState::Initialize() {
- metrics_lib_.Init();
+ metrics_reporter_.Initialize();
boot_control_ = boot_control::CreateBootControl();
if (!boot_control_) {
LOG(WARNING) << "Unable to create BootControl instance, using stub "
<< "instead. All update attempts will fail.";
- boot_control_ = brillo::make_unique_ptr(new BootControlStub());
+ boot_control_ = std::make_unique<BootControlStub>();
}
hardware_ = hardware::CreateHardware();
@@ -65,14 +66,10 @@
return false;
}
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
libcros_proxy_.reset(new org::chromium::LibCrosServiceInterfaceProxy(
DBusConnection::Get()->GetDBus(), chromeos::kLibCrosServiceName));
- network_proxy_service_proxy_.reset(
- new org::chromium::NetworkProxyServiceInterfaceProxy(
- DBusConnection::Get()->GetDBus(),
- chromeos::kNetworkProxyServiceName));
-#endif // USE_LIBCROS
+#endif // USE_CHROME_KIOSK_APP
LOG_IF(INFO, !hardware_->IsNormalBootMode()) << "Booted in dev mode.";
LOG_IF(INFO, !hardware_->IsOfficialBuild()) << "Booted non-official build.";
@@ -143,27 +140,22 @@
new CertificateChecker(prefs_.get(), &openssl_wrapper_));
certificate_checker_->Init();
-#if USE_LIBCROS
- org::chromium::NetworkProxyServiceInterfaceProxyInterface* net_proxy =
- network_proxy_service_proxy_.get();
- org::chromium::LibCrosServiceInterfaceProxyInterface* libcros_proxy =
- libcros_proxy_.get();
-#else
- org::chromium::NetworkProxyServiceInterfaceProxyInterface* net_proxy =
- nullptr;
- org::chromium::LibCrosServiceInterfaceProxyInterface* libcros_proxy =
- nullptr;
-#endif // USE_LIBCROS
+ update_attempter_.reset(new UpdateAttempter(this,
+ certificate_checker_.get()));
// Initialize the UpdateAttempter before the UpdateManager.
- update_attempter_.reset(new UpdateAttempter(this, certificate_checker_.get(),
- net_proxy));
update_attempter_->Init();
// Initialize the Update Manager using the default state factory.
chromeos_update_manager::State* um_state =
- chromeos_update_manager::DefaultStateFactory(
- &policy_provider_, libcros_proxy, this);
+ chromeos_update_manager::DefaultStateFactory(&policy_provider_,
+#if USE_CHROME_KIOSK_APP
+ libcros_proxy_.get(),
+#else
+ nullptr,
+#endif // USE_CHROME_KIOSK_APP
+ this);
+
if (!um_state) {
LOG(ERROR) << "Failed to initialize the Update Manager.";
return false;
diff --git a/real_system_state.h b/real_system_state.h
index 64964cd..49f7c31 100644
--- a/real_system_state.h
+++ b/real_system_state.h
@@ -22,13 +22,11 @@
#include <memory>
#include <set>
-#include <metrics/metrics_library.h>
#include <policy/device_policy.h>
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
#include <libcros/dbus-proxies.h>
-#include <network_proxy/dbus-proxies.h>
-#endif // USE_LIBCROS
+#endif // USE_CHROME_KIOSK_APP
#include "update_engine/certificate_checker.h"
#include "update_engine/common/boot_control_interface.h"
@@ -37,6 +35,8 @@
#include "update_engine/common/prefs.h"
#include "update_engine/connection_manager_interface.h"
#include "update_engine/daemon_state_interface.h"
+#include "update_engine/metrics_reporter_interface.h"
+#include "update_engine/metrics_reporter_omaha.h"
#include "update_engine/p2p_manager.h"
#include "update_engine/payload_state.h"
#include "update_engine/power_manager_interface.h"
@@ -92,8 +92,8 @@
inline HardwareInterface* hardware() override { return hardware_.get(); }
- inline MetricsLibraryInterface* metrics_lib() override {
- return &metrics_lib_;
+ inline MetricsReporterInterface* metrics_reporter() override {
+ return &metrics_reporter_;
}
inline PrefsInterface* prefs() override { return prefs_.get(); }
@@ -127,12 +127,10 @@
inline bool system_rebooted() override { return system_rebooted_; }
private:
-#if USE_LIBCROS
// Real DBus proxies using the DBus connection.
+#if USE_CHROME_KIOSK_APP
std::unique_ptr<org::chromium::LibCrosServiceInterfaceProxy> libcros_proxy_;
- std::unique_ptr<org::chromium::NetworkProxyServiceInterfaceProxy>
- network_proxy_service_proxy_;
-#endif // USE_LIBCROS
+#endif // USE_CHROME_KIOSK_APP
// Interface for the power manager.
std::unique_ptr<PowerManagerInterface> power_manager_;
@@ -153,8 +151,8 @@
// Interface for the hardware functions.
std::unique_ptr<HardwareInterface> hardware_;
- // The Metrics Library interface for reporting UMA stats.
- MetricsLibrary metrics_lib_;
+ // The Metrics reporter for reporting UMA stats.
+ MetricsReporterOmaha metrics_reporter_;
// Interface for persisted store.
std::unique_ptr<PrefsInterface> prefs_;
diff --git a/sample_images/generate_images.sh b/sample_images/generate_images.sh
index 6a0d1ea..8478682 100755
--- a/sample_images/generate_images.sh
+++ b/sample_images/generate_images.sh
@@ -26,12 +26,15 @@
# cleanup <path>
# Unmount and remove the mountpoint <path>
cleanup() {
- if ! sudo umount "$1" 2>/dev/null; then
- if mountpoint -q "$1"; then
- sync && sudo umount "$1"
+ local path="$1"
+ if ! sudo umount "${path}" 2>/dev/null; then
+ if mountpoint -q "${path}"; then
+ sync && sudo umount "${path}"
fi
fi
- rmdir "$1"
+ if [ -n "${path}" ]; then
+ sudo rm -rf "${path}"
+ fi
}
# add_files_default <mntdir> <block_size>
@@ -203,10 +206,11 @@
# generate_fs <filename> <kind> <size> [block_size] [block_groups]
generate_fs() {
local filename="$1"
- local kind="$2"
- local size="$3"
- local block_size="${4:-4096}"
- local block_groups="${5:-}"
+ local type="$2"
+ local kind="$3"
+ local size="$4"
+ local block_size="${5:-4096}"
+ local block_groups="${6:-}"
local mkfs_opts=( -q -F -b "${block_size}" -L "ROOT-TEST" -t ext2 )
if [[ -n "${block_groups}" ]]; then
@@ -215,16 +219,17 @@
local mntdir=$(mktemp --tmpdir -d generate_ext2.XXXXXX)
trap 'cleanup "${mntdir}"; rm -f "${filename}"' INT TERM EXIT
-
# Cleanup old image.
if [[ -e "${filename}" ]]; then
rm -f "${filename}"
fi
- truncate --size="${size}" "${filename}"
- mkfs.ext2 "${mkfs_opts[@]}" "${filename}"
- sudo mount "${filename}" "${mntdir}" -o loop
+ if [[ "${type}" == "ext2" ]]; then
+ truncate --size="${size}" "${filename}"
+ mkfs.ext2 "${mkfs_opts[@]}" "${filename}"
+ sudo mount "${filename}" "${mntdir}" -o loop
+ fi
case "${kind}" in
unittest)
add_files_ue_settings "${mntdir}" "${block_size}"
@@ -237,6 +242,10 @@
;;
esac
+ if [[ "${type}" == "sqfs" ]]; then
+ mksquashfs "${mntdir}" "${filename}"
+ fi
+
cleanup "${mntdir}"
trap - INT TERM EXIT
}
@@ -253,10 +262,14 @@
main() {
# Add more sample images here.
- generate_image disk_ext2_1k default $((1024 * 1024)) 1024
- generate_image disk_ext2_4k default $((1024 * 4096)) 4096
- generate_image disk_ext2_4k_empty empty $((1024 * 4096)) 4096
- generate_image disk_ext2_unittest unittest $((1024 * 4096)) 4096
+ generate_image disk_ext2_1k ext2 default $((1024 * 1024)) 1024
+ generate_image disk_ext2_4k ext2 default $((1024 * 4096)) 4096
+ generate_image disk_ext2_4k_empty ext2 empty $((1024 * 4096)) 4096
+ generate_image disk_ext2_unittest ext2 unittest $((1024 * 4096)) 4096
+
+ # Add squashfs sample images.
+ generate_image disk_sqfs_empty sqfs empty $((1024 * 4096)) 4096
+ generate_image disk_sqfs_default sqfs default $((1024 * 4096)) 4096
# Generate the tarball and delete temporary images.
echo "Packing tar file sample_images.tar.bz2"
diff --git a/sample_images/sample_images.tar.bz2 b/sample_images/sample_images.tar.bz2
index 72f4eb5..6215482 100644
--- a/sample_images/sample_images.tar.bz2
+++ b/sample_images/sample_images.tar.bz2
Binary files differ
diff --git a/scripts/blockdiff.py b/scripts/blockdiff.py
new file mode 100755
index 0000000..1dc60a6
--- /dev/null
+++ b/scripts/blockdiff.py
@@ -0,0 +1,115 @@
+#!/usr/bin/python2
+#
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Block diff utility."""
+
+from __future__ import print_function
+
+import optparse
+import sys
+
+
+class BlockDiffError(Exception):
+ pass
+
+
+def BlockDiff(block_size, file1, file2, name1, name2, max_length=-1):
+ """Performs a binary diff of two files by blocks.
+
+ Args:
+ block_size: the size of a block to diff by
+ file1: first file object
+ file2: second file object
+ name1: name of first file (for error reporting)
+ name2: name of second file (for error reporting)
+ max_length: the maximum length to read/diff in bytes (optional)
+ Returns:
+ A list of (start, length) pairs representing block extents that differ
+ between the two files.
+ Raises:
+ BlockDiffError if there were errors while diffing.
+
+ """
+ if max_length < 0:
+ max_length = sys.maxint
+ diff_list = []
+ num_blocks = extent_start = extent_length = 0
+ while max_length or extent_length:
+ read_length = min(max_length, block_size)
+ data1 = file1.read(read_length)
+ data2 = file2.read(read_length)
+ if len(data1) != len(data2):
+ raise BlockDiffError('read %d bytes from %s but %d bytes from %s' %
+ (len(data1), name1, len(data2), name2))
+
+ if data1 != data2:
+ # Data is different, mark it down.
+ if extent_length:
+ # Stretch the current diff extent.
+ extent_length += 1
+ else:
+ # Start a new diff extent.
+ extent_start = num_blocks
+ extent_length = 1
+ elif extent_length:
+ # Record the previous extent.
+ diff_list.append((extent_start, extent_length))
+ extent_length = 0
+
+ # Are we done reading?
+ if not data1:
+ break
+
+ max_length -= len(data1)
+ num_blocks += 1
+
+ return diff_list
+
+
+def main(argv):
+ # Parse command-line arguments.
+ parser = optparse.OptionParser(
+ usage='Usage: %prog FILE1 FILE2',
+ description='Compare FILE1 and FILE2 by blocks.')
+
+ parser.add_option('-b', '--block-size', metavar='NUM', type=int, default=4096,
+ help='the block size to use (default: %default)')
+ parser.add_option('-m', '--max-length', metavar='NUM', type=int, default=-1,
+ help='maximum number of bytes to compared')
+
+ opts, args = parser.parse_args(argv[1:])
+
+ try:
+ name1, name2 = args
+ except ValueError:
+ parser.error('unexpected number of arguments')
+
+ # Perform the block diff.
+ try:
+ with open(name1) as file1:
+ with open(name2) as file2:
+ diff_list = BlockDiff(opts.block_size, file1, file2, name1, name2,
+ opts.max_length)
+ except BlockDiffError as e:
+ print('Error: ' % e, file=sys.stderr)
+ return 2
+
+ # Print the diff, if such was found.
+ if diff_list:
+ total_diff_blocks = 0
+ for extent_start, extent_length in diff_list:
+ total_diff_blocks += extent_length
+ print('%d->%d (%d)' %
+ (extent_start, extent_start + extent_length, extent_length))
+
+ print('total diff: %d blocks' % total_diff_blocks)
+ return 1
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index 375c0df..65c63f5 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -1,8 +1,20 @@
#!/bin/bash
-# Copyright 2015 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
# Script to generate a Brillo update for use by the update engine.
#
@@ -12,14 +24,15 @@
# hash generate a payload or metadata hash
# sign generate a signed payload
# properties generate a properties file from a payload
+# verify verify a payload by recreating a target image.
#
# Generate command arguments:
# --payload generated unsigned payload output file
# --source_image if defined, generate a delta payload from the specified
# image to the target_image
# --target_image the target image that should be sent to clients
-# --metadata_size_file if defined, generate a file containing the size of the payload
-# metadata in bytes to the specified file
+# --metadata_size_file if defined, generate a file containing the size of the
+# payload metadata in bytes to the specified file
#
# Hash command arguments:
# --unsigned_payload the input unsigned payload to generate the hash from
@@ -50,6 +63,10 @@
# --payload the input signed or unsigned payload
# --properties_file the output path where to write the properties, or
# '-' for stdout.
+# Verify command arguments:
+# --payload payload input file
+# --source_image verify payload to the specified source image.
+# --target_image the target image to verify upon.
# Exit codes:
@@ -85,6 +102,7 @@
for signing."
HELP_SIGN="sign: Insert the signatures into the unsigned payload."
HELP_PROPERTIES="properties: Extract payload properties to a file."
+HELP_VERIFY="verify: Verify a (signed) update payload."
usage() {
echo "Supported commands:"
@@ -93,6 +111,7 @@
echo "${HELP_HASH}"
echo "${HELP_SIGN}"
echo "${HELP_PROPERTIES}"
+ echo "${HELP_VERIFY}"
echo
echo "Use: \"$0 <command> --help\" for more options."
}
@@ -123,6 +142,11 @@
properties)
FLAGS_HELP="${HELP_PROPERTIES}"
;;
+
+ verify)
+ FLAGS_HELP="${HELP_VERIFY}"
+ ;;
+
*)
echo "Unrecognized command: \"${COMMAND}\"" >&2
usage >&2
@@ -178,6 +202,15 @@
"Path to output the extracted property files. If '-' is passed stdout will \
be used."
fi
+if [[ "${COMMAND}" == "verify" ]]; then
+ DEFINE_string payload "" \
+ "Path to the input payload file."
+ DEFINE_string target_image "" \
+ "Path to the target image to verify upon."
+ DEFINE_string source_image "" \
+ "Optional: Path to a source image. If specified, the delta update is \
+applied to this."
+fi
DEFINE_string work_dir "${TMPDIR:-/tmp}" "Where to dump temporary files."
@@ -214,9 +247,6 @@
# Path to the postinstall config file in target image if exists.
POSTINSTALL_CONFIG_FILE=""
-# The fingerprint of zlib in the source image.
-ZLIB_FINGERPRINT=""
-
# read_option_int <file.txt> <option_key> [default_value]
#
# Reads the unsigned integer value associated with |option_key| in a key=value
@@ -336,11 +366,6 @@
# updater supports a newer major version.
FORCE_MAJOR_VERSION="1"
- if [[ "${partitions_array}" == "SRC_PARTITIONS" ]]; then
- # Copy from zlib_fingerprint in source image to stdout.
- ZLIB_FINGERPRINT=$(e2cp "${root}":/etc/zlib_fingerprint -)
- fi
-
# When generating legacy Chrome OS images, we need to use "boot" and "system"
# for the partition names to be compatible with updating Brillo devices with
# Chrome OS images.
@@ -376,7 +401,8 @@
if grep -v -E '^[a-zA-Z0-9_-]*$' "${ab_partitions_list}" >&2; then
die "Invalid partition names found in the partition list."
fi
- partitions=($(cat "${ab_partitions_list}"))
+ # Get partition list without duplicates.
+ partitions=($(awk '!seen[$0]++' "${ab_partitions_list}"))
if [[ ${#partitions[@]} -eq 0 ]]; then
die "The list of partitions is empty. Can't generate a payload."
fi
@@ -414,10 +440,6 @@
Disabling deltas for this source version."
exit ${EX_UNSUPPORTED_DELTA}
fi
-
- if [[ "${FORCE_MINOR_VERSION}" -ge 4 ]]; then
- ZLIB_FINGERPRINT=$(unzip -p "${image}" "META/zlib_fingerprint.txt")
- fi
else
# Target image
local postinstall_config=$(create_tempfile "postinstall_config.XXXXXX")
@@ -539,9 +561,6 @@
if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
fi
- if [[ -n "${ZLIB_FINGERPRINT}" ]]; then
- GENERATOR_ARGS+=( --zlib_fingerprint="${ZLIB_FINGERPRINT}" )
- fi
fi
if [[ -n "${FORCE_MAJOR_VERSION}" ]]; then
@@ -644,6 +663,91 @@
-properties_file="${FLAGS_properties_file}"
}
+validate_verify() {
+ [[ -n "${FLAGS_payload}" ]] ||
+ die "Error: you must specify an input filename with --payload FILENAME"
+
+ [[ -n "${FLAGS_target_image}" ]] ||
+ die "Error: you must specify a target image with --target_image FILENAME"
+}
+
+cmd_verify() {
+ local payload_type="delta"
+ if [[ -z "${FLAGS_source_image}" ]]; then
+ payload_type="full"
+ fi
+
+ echo "Extracting images for ${payload_type} update."
+
+ if [[ "${payload_type}" == "delta" ]]; then
+ extract_image "${FLAGS_source_image}" SRC_PARTITIONS
+ fi
+ extract_image "${FLAGS_target_image}" DST_PARTITIONS PARTITIONS_ORDER
+
+ declare -A TMP_PARTITIONS
+ for part in "${PARTITIONS_ORDER[@]}"; do
+ local tmp_part=$(create_tempfile "tmp_part.bin.XXXXXX")
+ echo "Creating temporary target partition ${tmp_part} for ${part}"
+ CLEANUP_FILES+=("${tmp_part}")
+ TMP_PARTITIONS[${part}]=${tmp_part}
+ local FILESIZE=$(stat -c%s "${DST_PARTITIONS[${part}]}")
+ echo "Truncating ${TMP_PARTITIONS[${part}]} to ${FILESIZE}"
+ truncate_file "${TMP_PARTITIONS[${part}]}" "${FILESIZE}"
+ done
+
+ echo "Verifying ${payload_type} update."
+ # Common payload args:
+ GENERATOR_ARGS=( -in_file="${FLAGS_payload}" )
+
+ local part old_partitions="" new_partitions="" partition_names=""
+ for part in "${PARTITIONS_ORDER[@]}"; do
+ if [[ -n "${partition_names}" ]]; then
+ partition_names+=":"
+ new_partitions+=":"
+ old_partitions+=":"
+ fi
+ partition_names+="${part}"
+ new_partitions+="${TMP_PARTITIONS[${part}]}"
+ old_partitions+="${SRC_PARTITIONS[${part}]:-}"
+ done
+
+ # Target image args:
+ GENERATOR_ARGS+=(
+ -partition_names="${partition_names}"
+ -new_partitions="${new_partitions}"
+ )
+
+ if [[ "${payload_type}" == "delta" ]]; then
+ # Source image args:
+ GENERATOR_ARGS+=(
+ -old_partitions="${old_partitions}"
+ )
+ fi
+
+ if [[ -n "${FORCE_MAJOR_VERSION}" ]]; then
+ GENERATOR_ARGS+=( --major_version="${FORCE_MAJOR_VERSION}" )
+ fi
+
+ echo "Running delta_generator to verify ${payload_type} payload with args: \
+${GENERATOR_ARGS[@]}"
+ "${GENERATOR}" "${GENERATOR_ARGS[@]}"
+
+ if [[ $? -eq 0 ]]; then
+ echo "Done applying ${payload_type} update."
+ echo "Checking the newly generated partitions against the target partitions"
+ for part in "${PARTITIONS_ORDER[@]}"; do
+ cmp "${TMP_PARTITIONS[${part}]}" "${DST_PARTITIONS[${part}]}"
+ local not_str=""
+ if [[ $? -ne 0 ]]; then
+ not_str="in"
+ fi
+ echo "The new partition (${part}) is ${not_str}valid."
+ done
+ else
+ echo "Failed to apply ${payload_type} update."
+ fi
+}
+
# Sanity check that the real generator exists:
GENERATOR="$(which delta_generator || true)"
[[ -x "${GENERATOR}" ]] || die "can't find delta_generator"
@@ -661,4 +765,7 @@
properties) validate_properties
cmd_properties
;;
+ verify) validate_verify
+ cmd_verify
+ ;;
esac
diff --git a/scripts/paycheck.py b/scripts/paycheck.py
index 0195f53..8df1bf0 100755
--- a/scripts/paycheck.py
+++ b/scripts/paycheck.py
@@ -12,7 +12,6 @@
import os
import sys
-# pylint: disable=F0401
lib_dir = os.path.join(os.path.dirname(__file__), 'lib')
if os.path.exists(lib_dir) and os.path.isdir(lib_dir):
sys.path.insert(1, lib_dir)
@@ -91,6 +90,8 @@
'operations (not in-place)'))
trace_opts.add_option('--bspatch-path', metavar='FILE',
help=('use the specified bspatch binary'))
+ trace_opts.add_option('--puffpatch-path', metavar='FILE',
+ help=('use the specified puffpatch binary'))
parser.add_option_group(trace_opts)
trace_opts = optparse.OptionGroup(parser, 'Block tracing')
@@ -147,6 +148,8 @@
parser.error('--extract-bsdiff can only be used when applying payloads')
if opts.bspatch_path:
parser.error('--bspatch-path can only be used when applying payloads')
+ if opts.puffpatch_path:
+ parser.error('--puffpatch-path can only be used when applying payloads')
else:
parser.error('unexpected number of arguments')
@@ -215,6 +218,8 @@
dargs = {'bsdiff_in_place': not options.extract_bsdiff}
if options.bspatch_path:
dargs['bspatch_path'] = options.bspatch_path
+ if options.puffpatch_path:
+ dargs['puffpatch_path'] = options.puffpatch_path
if options.assert_type == _TYPE_DELTA:
dargs['old_kernel_part'] = extra_args[2]
dargs['old_rootfs_part'] = extra_args[3]
diff --git a/scripts/run_unittests b/scripts/run_unittests
new file mode 100755
index 0000000..c8e713d
--- /dev/null
+++ b/scripts/run_unittests
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Copyright 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Runs update_payload unittests
+
+set -e
+
+# Invoke unittest scripts.
+for unittest_script in update_payload/*_unittest.py; do
+ filename=$(basename "${unittest_script}")
+ python -m update_payload."${filename%.*}"
+done
+
+exit 0
diff --git a/scripts/update_device.py b/scripts/update_device.py
index 75c58a7..64cfbe3 100755
--- a/scripts/update_device.py
+++ b/scripts/update_device.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/python2
#
# Copyright (C) 2017 The Android Open Source Project
#
@@ -19,18 +19,27 @@
import argparse
import BaseHTTPServer
+import hashlib
import logging
import os
import socket
import subprocess
import sys
import threading
+import xml.etree.ElementTree
import zipfile
+import update_payload.payload
+
# The path used to store the OTA package when applying the package from a file.
OTA_PACKAGE_PATH = '/data/ota_package'
+# The path to the payload public key on the device.
+PAYLOAD_KEY_PATH = '/etc/update_engine/update-payload-key.pub.pem'
+
+# The port on the device that update_engine should connect to.
+DEVICE_PORT = 1234
def CopyFileObjLength(fsrc, fdst, buffer_size=128 * 1024, copy_length=None):
"""Copy from a file object to another.
@@ -90,10 +99,11 @@
Attributes:
serving_payload: path to the only payload file we are serving.
+ serving_range: the start offset and size tuple of the payload.
"""
@staticmethod
- def _ParseRange(range_str, file_size):
+ def _parse_range(range_str, file_size):
"""Parse an HTTP range string.
Args:
@@ -140,12 +150,12 @@
else:
self.send_response(200)
- stat = os.fstat(f.fileno())
- start_range, end_range = self._ParseRange(self.headers.get('range'),
- stat.st_size)
+ serving_start, serving_size = self.serving_range
+ start_range, end_range = self._parse_range(self.headers.get('range'),
+ serving_size)
logging.info('Serving request for %s from %s [%d, %d) length: %d',
- self.path, self.serving_payload, start_range, end_range,
- end_range - start_range)
+ self.path, self.serving_payload, serving_start + start_range,
+ serving_start + end_range, end_range - start_range)
self.send_header('Accept-Ranges', 'bytes')
self.send_header('Content-Range',
@@ -153,22 +163,99 @@
'/' + str(end_range - start_range))
self.send_header('Content-Length', end_range - start_range)
+ stat = os.fstat(f.fileno())
self.send_header('Last-Modified', self.date_time_string(stat.st_mtime))
self.send_header('Content-type', 'application/octet-stream')
self.end_headers()
- f.seek(start_range)
+ f.seek(serving_start + start_range)
CopyFileObjLength(f, self.wfile, copy_length=end_range - start_range)
+ def do_POST(self): # pylint: disable=invalid-name
+ """Reply with the omaha response xml."""
+ if self.path != '/update':
+ self.send_error(404, 'Unknown request')
+ return
+
+ if not self.serving_payload:
+ self.send_error(500, 'No serving payload set')
+ return
+
+ try:
+ f = open(self.serving_payload, 'rb')
+ except IOError:
+ self.send_error(404, 'File not found')
+ return
+
+ content_length = int(self.headers.getheader('Content-Length'))
+ request_xml = self.rfile.read(content_length)
+ xml_root = xml.etree.ElementTree.fromstring(request_xml)
+ appid = None
+ for app in xml_root.iter('app'):
+ if 'appid' in app.attrib:
+ appid = app.attrib['appid']
+ break
+ if not appid:
+ self.send_error(400, 'No appid in Omaha request')
+ return
+
+ self.send_response(200)
+ self.send_header("Content-type", "text/xml")
+ self.end_headers()
+
+ serving_start, serving_size = self.serving_range
+ sha256 = hashlib.sha256()
+ f.seek(serving_start)
+ bytes_to_hash = serving_size
+ while bytes_to_hash:
+ buf = f.read(min(bytes_to_hash, 1024 * 1024))
+ if not buf:
+ self.send_error(500, 'Payload too small')
+ return
+ sha256.update(buf)
+ bytes_to_hash -= len(buf)
+
+ payload = update_payload.Payload(f, payload_file_offset=serving_start)
+ payload.Init()
+
+ response_xml = '''
+ <?xml version="1.0" encoding="UTF-8"?>
+ <response protocol="3.0">
+ <app appid="{appid}">
+ <updatecheck status="ok">
+ <urls>
+ <url codebase="http://127.0.0.1:{port}/"/>
+ </urls>
+ <manifest version="0.0.0.1">
+ <actions>
+ <action event="install" run="payload"/>
+ <action event="postinstall" MetadataSize="{metadata_size}"/>
+ </actions>
+ <packages>
+ <package hash_sha256="{payload_hash}" name="payload" size="{payload_size}"/>
+ </packages>
+ </manifest>
+ </updatecheck>
+ </app>
+ </response>
+ '''.format(appid=appid, port=DEVICE_PORT,
+ metadata_size=payload.metadata_size,
+ payload_hash=sha256.hexdigest(),
+ payload_size=serving_size)
+ self.wfile.write(response_xml.strip())
+ return
+
+
class ServerThread(threading.Thread):
"""A thread for serving HTTP requests."""
- def __init__(self, ota_filename):
+ def __init__(self, ota_filename, serving_range):
threading.Thread.__init__(self)
- # serving_payload is a class attribute and the UpdateHandler class is
- # instantiated with every request.
+ # serving_payload and serving_range are class attributes and the
+ # UpdateHandler class is instantiated with every request.
UpdateHandler.serving_payload = ota_filename
+ UpdateHandler.serving_range = serving_range
self._httpd = BaseHTTPServer.HTTPServer(('127.0.0.1', 0), UpdateHandler)
self.port = self._httpd.server_port
@@ -183,26 +270,31 @@
self._httpd.socket.close()
-def StartServer(ota_filename):
- t = ServerThread(ota_filename)
+def StartServer(ota_filename, serving_range):
+ t = ServerThread(ota_filename, serving_range)
t.start()
return t
-def AndroidUpdateCommand(ota_filename, payload_url):
+def AndroidUpdateCommand(ota_filename, payload_url, extra_headers):
"""Return the command to run to start the update in the Android device."""
ota = AndroidOTAPackage(ota_filename)
headers = ota.properties
headers += 'USER_AGENT=Dalvik (something, something)\n'
-
- # headers += 'POWERWASH=1\n'
headers += 'NETWORK_ID=0\n'
+ headers += extra_headers
return ['update_engine_client', '--update', '--follow',
'--payload=%s' % payload_url, '--offset=%d' % ota.offset,
'--size=%d' % ota.size, '--headers="%s"' % headers]
+def OmahaUpdateCommand(omaha_url):
+ """Return the command to run to start the update in a device using Omaha."""
+ return ['update_engine_client', '--update', '--follow',
+ '--omaha_url=%s' % omaha_url]
+
+
class AdbHost(object):
"""Represents a device connected via ADB."""
@@ -235,11 +327,28 @@
p.wait()
return p.returncode
+ def adb_output(self, command):
+ """Run an ADB command like "adb push" and return the output.
+
+ Args:
+ command: list of strings containing command and arguments to run
+
+ Returns:
+ the program's output as a string.
+
+ Raises:
+ subprocess.CalledProcessError on command exit != 0.
+ """
+ command = self._command_prefix + command
+ logging.info('Running: %s', ' '.join(str(x) for x in command))
+ return subprocess.check_output(command, universal_newlines=True)
+
def main():
parser = argparse.ArgumentParser(description='Android A/B OTA helper.')
- parser.add_argument('otafile', metavar='ZIP', type=str,
- help='the OTA package file (a .zip file).')
+ parser.add_argument('otafile', metavar='PAYLOAD', type=str,
+ help='the OTA package file (a .zip file) or raw payload \
+ if device uses Omaha.')
parser.add_argument('--file', action='store_true',
help='Push the file to the device before updating.')
parser.add_argument('--no-push', action='store_true',
@@ -248,6 +357,10 @@
help='The specific device to use.')
parser.add_argument('--no-verbose', action='store_true',
help='Less verbose output')
+ parser.add_argument('--public-key', type=str, default='',
+ help='Override the public key used to verify payload.')
+ parser.add_argument('--extra-headers', type=str, default='',
+ help='Extra headers to pass to the device.')
args = parser.parse_args()
logging.basicConfig(
level=logging.WARNING if args.no_verbose else logging.INFO)
@@ -262,27 +375,57 @@
# List of commands to perform the update.
cmds = []
+ help_cmd = ['shell', 'su', '0', 'update_engine_client', '--help']
+ use_omaha = 'omaha' in dut.adb_output(help_cmd)
+
if args.file:
# Update via pushing a file to /data.
device_ota_file = os.path.join(OTA_PACKAGE_PATH, 'debug.zip')
payload_url = 'file://' + device_ota_file
if not args.no_push:
- cmds.append(['push', args.otafile, device_ota_file])
+ data_local_tmp_file = '/data/local/tmp/debug.zip'
+ cmds.append(['push', args.otafile, data_local_tmp_file])
+ cmds.append(['shell', 'su', '0', 'mv', data_local_tmp_file,
+ device_ota_file])
+ cmds.append(['shell', 'su', '0', 'chcon',
+ 'u:object_r:ota_package_file:s0', device_ota_file])
cmds.append(['shell', 'su', '0', 'chown', 'system:cache', device_ota_file])
cmds.append(['shell', 'su', '0', 'chmod', '0660', device_ota_file])
else:
# Update via sending the payload over the network with an "adb reverse"
# command.
- device_port = 1234
- payload_url = 'http://127.0.0.1:%d/payload' % device_port
- server_thread = StartServer(args.otafile)
+ payload_url = 'http://127.0.0.1:%d/payload' % DEVICE_PORT
+ if use_omaha and zipfile.is_zipfile(args.otafile):
+ ota = AndroidOTAPackage(args.otafile)
+ serving_range = (ota.offset, ota.size)
+ else:
+ serving_range = (0, os.stat(args.otafile).st_size)
+ server_thread = StartServer(args.otafile, serving_range)
cmds.append(
- ['reverse', 'tcp:%d' % device_port, 'tcp:%d' % server_thread.port])
- finalize_cmds.append(['reverse', '--remove', 'tcp:%d' % device_port])
+ ['reverse', 'tcp:%d' % DEVICE_PORT, 'tcp:%d' % server_thread.port])
+ finalize_cmds.append(['reverse', '--remove', 'tcp:%d' % DEVICE_PORT])
+
+ if args.public_key:
+ payload_key_dir = os.path.dirname(PAYLOAD_KEY_PATH)
+ cmds.append(
+ ['shell', 'su', '0', 'mount', '-t', 'tmpfs', 'tmpfs', payload_key_dir])
+ # Allow adb push to payload_key_dir
+ cmds.append(['shell', 'su', '0', 'chcon', 'u:object_r:shell_data_file:s0',
+ payload_key_dir])
+ cmds.append(['push', args.public_key, PAYLOAD_KEY_PATH])
+ # Allow update_engine to read it.
+ cmds.append(['shell', 'su', '0', 'chcon', '-R', 'u:object_r:system_file:s0',
+ payload_key_dir])
+ finalize_cmds.append(['shell', 'su', '0', 'umount', payload_key_dir])
try:
# The main update command using the configured payload_url.
- update_cmd = AndroidUpdateCommand(args.otafile, payload_url)
+ if use_omaha:
+ update_cmd = \
+ OmahaUpdateCommand('http://127.0.0.1:%d/update' % DEVICE_PORT)
+ else:
+ update_cmd = \
+ AndroidUpdateCommand(args.otafile, payload_url, args.extra_headers)
cmds.append(['shell', 'su', '0'] + update_cmd)
for cmd in cmds:
diff --git a/scripts/update_payload/__init__.py b/scripts/update_payload/__init__.py
index 1906a16..e4a5588 100644
--- a/scripts/update_payload/__init__.py
+++ b/scripts/update_payload/__init__.py
@@ -5,7 +5,6 @@
"""Library for processing, verifying and applying Chrome OS update payloads."""
# Just raise the interface classes to the root namespace.
-# pylint: disable=W0401
-from checker import CHECKS_TO_DISABLE
-from error import PayloadError
-from payload import Payload
+from update_payload.checker import CHECKS_TO_DISABLE
+from update_payload.error import PayloadError
+from update_payload.payload import Payload
diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py
index e3708c7..e470ac4 100644
--- a/scripts/update_payload/applier.py
+++ b/scripts/update_payload/applier.py
@@ -24,8 +24,8 @@
import sys
import tempfile
-import common
-from error import PayloadError
+from update_payload import common
+from update_payload.error import PayloadError
#
@@ -44,7 +44,6 @@
PayloadError if computed hash doesn't match expected one, or if fails to
read the specified length of data.
"""
- # pylint: disable=E1101
hasher = hashlib.sha256()
block_length = 1024 * 1024
max_length = length if length >= 0 else sys.maxint
@@ -195,14 +194,14 @@
"""
def __init__(self, payload, bsdiff_in_place=True, bspatch_path=None,
- imgpatch_path=None, truncate_to_expected_size=True):
+ puffpatch_path=None, truncate_to_expected_size=True):
"""Initialize the applier.
Args:
payload: the payload object to check
bsdiff_in_place: whether to perform BSDIFF operation in-place (optional)
bspatch_path: path to the bspatch binary (optional)
- imgpatch_path: path to the imgpatch binary (optional)
+ puffpatch_path: path to the puffpatch binary (optional)
truncate_to_expected_size: whether to truncate the resulting partitions
to their expected sizes, as specified in the
payload (optional)
@@ -213,7 +212,7 @@
self.minor_version = payload.manifest.minor_version
self.bsdiff_in_place = bsdiff_in_place
self.bspatch_path = bspatch_path or 'bspatch'
- self.imgpatch_path = imgpatch_path or 'imgpatch'
+ self.puffpatch_path = puffpatch_path or 'puffin'
self.truncate_to_expected_size = truncate_to_expected_size
def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
@@ -301,22 +300,27 @@
_WriteExtents(part_file, in_data, op.dst_extents, block_size,
'%s.dst_extents' % op_name)
- def _ApplyBsdiffOperation(self, op, op_name, patch_data, new_part_file):
- """Applies a BSDIFF operation.
+ def _ApplyZeroOperation(self, op, op_name, part_file):
+ """Applies a ZERO operation.
Args:
op: the operation object
op_name: name string for error reporting
- patch_data: the binary patch content
- new_part_file: the target partition file object
+ part_file: the partition file object
Raises:
PayloadError if something goes wrong.
"""
- # Implemented using a SOURCE_BSDIFF operation with the source and target
- # partition set to the new partition.
- self._ApplyDiffOperation(op, op_name, patch_data, new_part_file,
- new_part_file)
+ block_size = self.block_size
+ base_name = '%s.dst_extents' % op_name
+
+ # Iterate over the extents and write zero.
+ # pylint: disable=unused-variable
+ for ex, ex_name in common.ExtentIter(op.dst_extents, base_name):
+ # Only do actual writing if this is not a pseudo-extent.
+ if ex.start_block != common.PSEUDO_EXTENT_MARKER:
+ part_file.seek(ex.start_block * block_size)
+ part_file.write('\0' * (ex.num_blocks * block_size))
def _ApplySourceCopyOperation(self, op, op_name, old_part_file,
new_part_file):
@@ -345,9 +349,26 @@
_WriteExtents(new_part_file, in_data, op.dst_extents, block_size,
'%s.dst_extents' % op_name)
+ def _BytesInExtents(self, extents, base_name):
+ """Counts the length of extents in bytes.
+
+ Args:
+ extents: The list of Extents.
+ base_name: For error reporting.
+
+ Returns:
+ The number of bytes in extents.
+ """
+
+ length = 0
+ # pylint: disable=unused-variable
+ for ex, ex_name in common.ExtentIter(extents, base_name):
+ length += ex.num_blocks * self.block_size
+ return length
+
def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file,
new_part_file):
- """Applies a SOURCE_BSDIFF or IMGDIFF operation.
+ """Applies a SOURCE_BSDIFF, BROTLI_BSDIFF or PUFFDIFF operation.
Args:
op: the operation object
@@ -372,24 +393,40 @@
patch_file.write(patch_data)
if (hasattr(new_part_file, 'fileno') and
- ((not old_part_file) or hasattr(old_part_file, 'fileno')) and
- op.type != common.OpType.IMGDIFF):
+ ((not old_part_file) or hasattr(old_part_file, 'fileno'))):
# Construct input and output extents argument for bspatch.
+
in_extents_arg, _, _ = _ExtentsToBspatchArg(
op.src_extents, block_size, '%s.src_extents' % op_name,
- data_length=op.src_length)
+ data_length=op.src_length if op.src_length else
+ self._BytesInExtents(op.src_extents, "%s.src_extents"))
out_extents_arg, pad_off, pad_len = _ExtentsToBspatchArg(
op.dst_extents, block_size, '%s.dst_extents' % op_name,
- data_length=op.dst_length)
+ data_length=op.dst_length if op.dst_length else
+ self._BytesInExtents(op.dst_extents, "%s.dst_extents"))
new_file_name = '/dev/fd/%d' % new_part_file.fileno()
# Diff from source partition.
old_file_name = '/dev/fd/%d' % old_part_file.fileno()
- # Invoke bspatch on partition file with extents args.
- bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name,
- patch_file_name, in_extents_arg, out_extents_arg]
- subprocess.check_call(bspatch_cmd)
+ if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF,
+ common.OpType.BROTLI_BSDIFF):
+ # Invoke bspatch on partition file with extents args.
+ bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name,
+ patch_file_name, in_extents_arg, out_extents_arg]
+ subprocess.check_call(bspatch_cmd)
+ elif op.type == common.OpType.PUFFDIFF:
+ # Invoke puffpatch on partition file with extents args.
+ puffpatch_cmd = [self.puffpatch_path,
+ "--operation=puffpatch",
+ "--src_file=%s" % old_file_name,
+ "--dst_file=%s" % new_file_name,
+ "--patch_file=%s" % patch_file_name,
+ "--src_extents=%s" % in_extents_arg,
+ "--dst_extents=%s" % out_extents_arg]
+ subprocess.check_call(puffpatch_cmd)
+ else:
+ raise PayloadError("Unknown operation %s", op.type)
# Pad with zeros past the total output length.
if pad_len:
@@ -399,7 +436,9 @@
# Gather input raw data and write to a temp file.
input_part_file = old_part_file if old_part_file else new_part_file
in_data = _ReadExtents(input_part_file, op.src_extents, block_size,
- max_length=op.src_length)
+ max_length=op.src_length if op.src_length else
+ self._BytesInExtents(op.src_extents,
+ "%s.src_extents"))
with tempfile.NamedTemporaryFile(delete=False) as in_file:
in_file_name = in_file.name
in_file.write(in_data)
@@ -408,12 +447,22 @@
with tempfile.NamedTemporaryFile(delete=False) as out_file:
out_file_name = out_file.name
- # Invoke bspatch.
- patch_cmd = [self.bspatch_path, in_file_name, out_file_name,
- patch_file_name]
- if op.type == common.OpType.IMGDIFF:
- patch_cmd[0] = self.imgpatch_path
- subprocess.check_call(patch_cmd)
+ if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF,
+ common.OpType.BROTLI_BSDIFF):
+ # Invoke bspatch.
+ bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name,
+ patch_file_name]
+ subprocess.check_call(bspatch_cmd)
+ elif op.type == common.OpType.PUFFDIFF:
+ # Invoke puffpatch.
+ puffpatch_cmd = [self.puffpatch_path,
+ "--operation=puffpatch",
+ "--src_file=%s" % in_file_name,
+ "--dst_file=%s" % out_file_name,
+ "--patch_file=%s" % patch_file_name]
+ subprocess.check_call(puffpatch_cmd)
+ else:
+ raise PayloadError("Unknown operation %s", op.type)
# Read output.
with open(out_file_name, 'rb') as out_file:
@@ -463,12 +512,16 @@
self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size)
elif op.type == common.OpType.MOVE:
self._ApplyMoveOperation(op, op_name, new_part_file)
+ elif op.type == common.OpType.ZERO:
+ self._ApplyZeroOperation(op, op_name, new_part_file)
elif op.type == common.OpType.BSDIFF:
- self._ApplyBsdiffOperation(op, op_name, data, new_part_file)
+ self._ApplyDiffOperation(op, op_name, data, new_part_file,
+ new_part_file)
elif op.type == common.OpType.SOURCE_COPY:
self._ApplySourceCopyOperation(op, op_name, old_part_file,
new_part_file)
- elif op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.IMGDIFF):
+ elif op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.PUFFDIFF,
+ common.OpType.BROTLI_BSDIFF):
self._ApplyDiffOperation(op, op_name, data, old_part_file,
new_part_file)
else:
@@ -504,7 +557,7 @@
shutil.copyfile(old_part_file_name, new_part_file_name)
elif (self.minor_version == common.SOURCE_MINOR_PAYLOAD_VERSION or
self.minor_version == common.OPSRCHASH_MINOR_PAYLOAD_VERSION or
- self.minor_version == common.IMGDIFF_MINOR_PAYLOAD_VERSION):
+ self.minor_version == common.PUFFDIFF_MINOR_PAYLOAD_VERSION):
# In minor version >= 2, we don't want to copy the partitions, so
# instead just make the new partition file.
open(new_part_file_name, 'w').close()
diff --git a/scripts/update_payload/block_tracer.py b/scripts/update_payload/block_tracer.py
index f222b21..5caf7e3 100644
--- a/scripts/update_payload/block_tracer.py
+++ b/scripts/update_payload/block_tracer.py
@@ -16,7 +16,7 @@
from __future__ import print_function
-import common
+from update_payload import common
#
diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py
index e13ea13..e4cb845 100644
--- a/scripts/update_payload/checker.py
+++ b/scripts/update_payload/checker.py
@@ -21,11 +21,11 @@
import os
import subprocess
-import common
-import error
-import format_utils
-import histogram
-import update_metadata_pb2
+from update_payload import common
+from update_payload import error
+from update_payload import format_utils
+from update_payload import histogram
+from update_payload import update_metadata_pb2
#
@@ -815,10 +815,30 @@
if dst_extent:
raise error.PayloadError('%s: excess dst blocks.' % op_name)
- def _CheckAnyDiffOperation(self, data_length, total_dst_blocks, op_name):
- """Specific checks for BSDIFF, SOURCE_BSDIFF and IMGDIFF operations.
+ def _CheckZeroOperation(self, op, op_name):
+ """Specific checks for ZERO operations.
Args:
+ op: The operation object from the manifest.
+ op_name: Operation name for error reporting.
+
+ Raises:
+ error.PayloadError if any check fails.
+ """
+ # Check: Does not contain src extents, data_length and data_offset.
+ if op.src_extents:
+ raise error.PayloadError('%s: contains src_extents.' % op_name)
+ if op.data_length:
+ raise error.PayloadError('%s: contains data_length.' % op_name)
+ if op.data_offset:
+ raise error.PayloadError('%s: contains data_offset.' % op_name)
+
+ def _CheckAnyDiffOperation(self, op, data_length, total_dst_blocks, op_name):
+ """Specific checks for BSDIFF, SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF
+ operations.
+
+ Args:
+ op: The operation.
data_length: The length of the data blob associated with the operation.
total_dst_blocks: Total number of blocks in dst_extents.
op_name: Operation name for error reporting.
@@ -838,6 +858,15 @@
(op_name, data_length, total_dst_blocks, self.block_size,
total_dst_blocks * self.block_size))
+ # Check the existence of src_length and dst_length for legacy bsdiffs.
+ if (op.type == common.OpType.BSDIFF or
+ (op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3)):
+ if not op.HasField('src_length') or not op.HasField('dst_length'):
+ raise error.PayloadError('%s: require {src,dst}_length.' % op_name)
+ else:
+ if op.HasField('src_length') or op.HasField('dst_length'):
+ raise error.PayloadError('%s: unneeded {src,dst}_length.' % op_name)
+
def _CheckSourceCopyOperation(self, data_offset, total_src_blocks,
total_dst_blocks, op_name):
"""Specific checks for SOURCE_COPY.
@@ -941,8 +970,6 @@
op_name)
# Check: Hash verifies correctly.
- # pylint cannot find the method in hashlib, for some reason.
- # pylint: disable=E1101
actual_hash = hashlib.sha256(self.payload.ReadDataBlob(data_offset,
data_length))
if op.data_sha256_hash != actual_hash.digest():
@@ -972,17 +999,20 @@
elif op.type == common.OpType.MOVE and self.minor_version == 1:
self._CheckMoveOperation(op, data_offset, total_src_blocks,
total_dst_blocks, op_name)
+ elif op.type == common.OpType.ZERO and self.minor_version >= 4:
+ self._CheckZeroOperation(op, op_name)
elif op.type == common.OpType.BSDIFF and self.minor_version == 1:
- self._CheckAnyDiffOperation(data_length, total_dst_blocks, op_name)
+ self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name)
elif op.type == common.OpType.SOURCE_COPY and self.minor_version >= 2:
self._CheckSourceCopyOperation(data_offset, total_src_blocks,
total_dst_blocks, op_name)
self._CheckAnySourceOperation(op, total_src_blocks, op_name)
elif op.type == common.OpType.SOURCE_BSDIFF and self.minor_version >= 2:
- self._CheckAnyDiffOperation(data_length, total_dst_blocks, op_name)
+ self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name)
self._CheckAnySourceOperation(op, total_src_blocks, op_name)
- elif op.type == common.OpType.IMGDIFF and self.minor_version >= 4:
- self._CheckAnyDiffOperation(data_length, total_dst_blocks, op_name)
+ elif (op.type in (common.OpType.PUFFDIFF, common.OpType.BROTLI_BSDIFF) and
+ self.minor_version >= 4):
+ self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name)
self._CheckAnySourceOperation(op, total_src_blocks, op_name)
else:
raise error.PayloadError(
@@ -1011,8 +1041,8 @@
itertools.repeat(0, self._SizeToNumBlocks(total_size)))
def _CheckOperations(self, operations, report, base_name, old_fs_size,
- new_fs_size, new_usable_size, prev_data_offset,
- allow_signature):
+ new_fs_size, old_usable_size, new_usable_size,
+ prev_data_offset, allow_signature):
"""Checks a sequence of update operations.
Args:
@@ -1021,6 +1051,7 @@
base_name: The name of the operation block.
old_fs_size: The old filesystem size in bytes.
new_fs_size: The new filesystem size in bytes.
+ old_usable_size: The overall usable size of the old partition in bytes.
new_usable_size: The overall usable size of the new partition in bytes.
prev_data_offset: Offset of last used data bytes.
allow_signature: Whether this sequence may contain signature operations.
@@ -1038,10 +1069,12 @@
common.OpType.REPLACE: 0,
common.OpType.REPLACE_BZ: 0,
common.OpType.MOVE: 0,
+ common.OpType.ZERO: 0,
common.OpType.BSDIFF: 0,
common.OpType.SOURCE_COPY: 0,
common.OpType.SOURCE_BSDIFF: 0,
- common.OpType.IMGDIFF: 0,
+ common.OpType.PUFFDIFF: 0,
+ common.OpType.BROTLI_BSDIFF: 0,
}
# Total blob sizes for each operation type.
op_blob_totals = {
@@ -1051,7 +1084,8 @@
common.OpType.BSDIFF: 0,
# SOURCE_COPY operations don't have blobs.
common.OpType.SOURCE_BSDIFF: 0,
- common.OpType.IMGDIFF: 0,
+ common.OpType.PUFFDIFF: 0,
+ common.OpType.BROTLI_BSDIFF: 0,
}
# Counts of hashed vs unhashed operations.
blob_hash_counts = {
@@ -1062,7 +1096,7 @@
blob_hash_counts['signature'] = 0
# Allocate old and new block counters.
- old_block_counters = (self._AllocBlockCounters(new_usable_size)
+ old_block_counters = (self._AllocBlockCounters(old_usable_size)
if old_fs_size else None)
new_block_counters = self._AllocBlockCounters(new_usable_size)
@@ -1079,7 +1113,7 @@
is_last = op_num == len(operations)
curr_data_used = self._CheckOperation(
op, op_name, is_last, old_block_counters, new_block_counters,
- new_usable_size if old_fs_size else 0, new_usable_size,
+ old_usable_size, new_usable_size,
prev_data_offset + total_data_used, allow_signature,
blob_hash_counts)
if curr_data_used:
@@ -1132,8 +1166,6 @@
report.AddSection('signatures')
# Check: At least one signature present.
- # pylint cannot see through the protobuf object, it seems.
- # pylint: disable=E1101
if not sigs.signatures:
raise error.PayloadError('Signature block is empty.')
@@ -1229,10 +1261,13 @@
# exceed the filesystem size when moving data blocks around.
# - Otherwise, use the encoded filesystem size.
new_rootfs_usable_size = self.new_rootfs_fs_size
+ old_rootfs_usable_size = self.old_rootfs_fs_size
if rootfs_part_size:
new_rootfs_usable_size = rootfs_part_size
+ old_rootfs_usable_size = rootfs_part_size
elif self.payload_type == _TYPE_DELTA and self.minor_version in (None, 1):
new_rootfs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
+ old_rootfs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
# Part 3: Examine rootfs operations.
# TODO(garnold)(chromium:243559) only default to the filesystem size if
@@ -1242,7 +1277,8 @@
total_blob_size = self._CheckOperations(
self.payload.manifest.install_operations, report,
'install_operations', self.old_rootfs_fs_size,
- self.new_rootfs_fs_size, new_rootfs_usable_size, 0, False)
+ self.new_rootfs_fs_size, old_rootfs_usable_size,
+ new_rootfs_usable_size, 0, False)
# Part 4: Examine kernel operations.
# TODO(garnold)(chromium:243559) as above.
@@ -1251,6 +1287,7 @@
self.payload.manifest.kernel_install_operations, report,
'kernel_install_operations', self.old_kernel_fs_size,
self.new_kernel_fs_size,
+ kernel_part_size if kernel_part_size else self.old_kernel_fs_size,
kernel_part_size if kernel_part_size else self.new_kernel_fs_size,
total_blob_size, True)
diff --git a/scripts/update_payload/checker_unittest.py b/scripts/update_payload/checker_unittest.py
index 56b1a30..974519d 100755
--- a/scripts/update_payload/checker_unittest.py
+++ b/scripts/update_payload/checker_unittest.py
@@ -20,14 +20,16 @@
# pylint: disable=F0401
import mox
-import checker
-import common
-import payload as update_payload # Avoid name conflicts later.
-import test_utils
-import update_metadata_pb2
+from update_payload import checker
+from update_payload import common
+from update_payload import test_utils
+from update_payload import update_metadata_pb2
+from update_payload.error import PayloadError
+from update_payload.payload import Payload # Avoid name conflicts later.
def _OpTypeByName(op_name):
+ """Returns the type of an operation from itsname."""
op_name_to_type = {
'REPLACE': common.OpType.REPLACE,
'REPLACE_BZ': common.OpType.REPLACE_BZ,
@@ -38,7 +40,8 @@
'ZERO': common.OpType.ZERO,
'DISCARD': common.OpType.DISCARD,
'REPLACE_XZ': common.OpType.REPLACE_XZ,
- 'IMGDIFF': common.OpType.IMGDIFF,
+ 'PUFFDIFF': common.OpType.PUFFDIFF,
+ 'BROTLI_BSDIFF': common.OpType.BROTLI_BSDIFF,
}
return op_name_to_type[op_name]
@@ -54,7 +57,7 @@
payload_file = cStringIO.StringIO()
payload_gen_write_to_file_func(payload_file, **payload_gen_dargs)
payload_file.seek(0)
- payload = update_payload.Payload(payload_file)
+ payload = Payload(payload_file)
payload.Init()
return checker.PayloadChecker(payload, **checker_init_dargs)
@@ -64,7 +67,7 @@
payload_file = cStringIO.StringIO()
payload_gen.WriteToFile(payload_file)
payload_file.seek(0)
- payload = update_payload.Payload(payload_file)
+ payload = Payload(payload_file)
payload.Init()
return checker.PayloadChecker(payload)
@@ -90,7 +93,7 @@
def MockPayload(self):
"""Create a mock payload object, complete with a mock manifest."""
- payload = self.mox.CreateMock(update_payload.Payload)
+ payload = self.mox.CreateMock(Payload)
payload.is_init = True
payload.manifest = self.mox.CreateMock(
update_metadata_pb2.DeltaArchiveManifest)
@@ -194,7 +197,7 @@
args = (msg, name, report, is_mandatory, is_submsg)
kwargs = {'convert': convert, 'linebreak': linebreak, 'indent': indent}
if is_mandatory and not is_present:
- self.assertRaises(update_payload.PayloadError,
+ self.assertRaises(PayloadError,
checker.PayloadChecker._CheckElem, *args, **kwargs)
else:
ret_val, ret_subreport = checker.PayloadChecker._CheckElem(*args,
@@ -228,8 +231,7 @@
# Test the method call.
if is_mandatory and not is_present:
- self.assertRaises(update_payload.PayloadError, tested_func, *args,
- **kwargs)
+ self.assertRaises(PayloadError, tested_func, *args, **kwargs)
else:
ret_val = tested_func(*args, **kwargs)
self.assertEquals(val if is_present else None, ret_val)
@@ -253,7 +255,7 @@
# Test the method call.
if is_mandatory and not is_present:
- self.assertRaises(update_payload.PayloadError, tested_func, *args)
+ self.assertRaises(PayloadError, tested_func, *args)
else:
ret_val, ret_subreport = tested_func(*args)
self.assertEquals(val if is_present else None, ret_val)
@@ -265,11 +267,9 @@
None, None, 'foo', 'bar', 'baz'))
self.assertIsNone(checker.PayloadChecker._CheckPresentIff(
'a', 'b', 'foo', 'bar', 'baz'))
- self.assertRaises(update_payload.PayloadError,
- checker.PayloadChecker._CheckPresentIff,
+ self.assertRaises(PayloadError, checker.PayloadChecker._CheckPresentIff,
'a', None, 'foo', 'bar', 'baz')
- self.assertRaises(update_payload.PayloadError,
- checker.PayloadChecker._CheckPresentIff,
+ self.assertRaises(PayloadError, checker.PayloadChecker._CheckPresentIff,
None, 'b', 'foo', 'bar', 'baz')
def DoCheckSha256SignatureTest(self, expect_pass, expect_subprocess_call,
@@ -298,7 +298,7 @@
self.assertIsNone(checker.PayloadChecker._CheckSha256Signature(
sig_data, 'foo', expected_signed_hash, 'bar'))
else:
- self.assertRaises(update_payload.PayloadError,
+ self.assertRaises(PayloadError,
checker.PayloadChecker._CheckSha256Signature,
sig_data, 'foo', expected_signed_hash, 'bar')
finally:
@@ -358,31 +358,31 @@
def testCheckBlocksFitLength_TooManyBlocks(self):
"""Tests _CheckBlocksFitLength(); fails due to excess blocks."""
- self.assertRaises(update_payload.PayloadError,
+ self.assertRaises(PayloadError,
checker.PayloadChecker._CheckBlocksFitLength,
64, 5, 16, 'foo')
- self.assertRaises(update_payload.PayloadError,
+ self.assertRaises(PayloadError,
checker.PayloadChecker._CheckBlocksFitLength,
60, 5, 16, 'foo')
- self.assertRaises(update_payload.PayloadError,
+ self.assertRaises(PayloadError,
checker.PayloadChecker._CheckBlocksFitLength,
49, 5, 16, 'foo')
- self.assertRaises(update_payload.PayloadError,
+ self.assertRaises(PayloadError,
checker.PayloadChecker._CheckBlocksFitLength,
48, 4, 16, 'foo')
def testCheckBlocksFitLength_TooFewBlocks(self):
"""Tests _CheckBlocksFitLength(); fails due to insufficient blocks."""
- self.assertRaises(update_payload.PayloadError,
+ self.assertRaises(PayloadError,
checker.PayloadChecker._CheckBlocksFitLength,
64, 3, 16, 'foo')
- self.assertRaises(update_payload.PayloadError,
+ self.assertRaises(PayloadError,
checker.PayloadChecker._CheckBlocksFitLength,
60, 3, 16, 'foo')
- self.assertRaises(update_payload.PayloadError,
+ self.assertRaises(PayloadError,
checker.PayloadChecker._CheckBlocksFitLength,
49, 3, 16, 'foo')
- self.assertRaises(update_payload.PayloadError,
+ self.assertRaises(PayloadError,
checker.PayloadChecker._CheckBlocksFitLength,
48, 2, 16, 'foo')
@@ -475,8 +475,7 @@
fail_old_rootfs_fs_size or fail_new_kernel_fs_size or
fail_new_rootfs_fs_size)
if should_fail:
- self.assertRaises(update_payload.PayloadError,
- payload_checker._CheckManifest, report,
+ self.assertRaises(PayloadError, payload_checker._CheckManifest, report,
rootfs_part_size, kernel_part_size)
else:
self.assertIsNone(payload_checker._CheckManifest(report,
@@ -492,12 +491,10 @@
self.assertIsNone(payload_checker._CheckLength(
int(3.5 * block_size), 4, 'foo', 'bar'))
# Fails, too few blocks.
- self.assertRaises(update_payload.PayloadError,
- payload_checker._CheckLength,
+ self.assertRaises(PayloadError, payload_checker._CheckLength,
int(3.5 * block_size), 3, 'foo', 'bar')
# Fails, too many blocks.
- self.assertRaises(update_payload.PayloadError,
- payload_checker._CheckLength,
+ self.assertRaises(PayloadError, payload_checker._CheckLength,
int(3.5 * block_size), 5, 'foo', 'bar')
def testCheckExtents(self):
@@ -532,30 +529,26 @@
# Fails, extent missing a start block.
extents = self.NewExtentList((-1, 4), (8, 3), (1024, 16))
self.assertRaises(
- update_payload.PayloadError, payload_checker._CheckExtents,
- extents, (1024 + 16) * block_size, collections.defaultdict(int),
- 'foo')
+ PayloadError, payload_checker._CheckExtents, extents,
+ (1024 + 16) * block_size, collections.defaultdict(int), 'foo')
# Fails, extent missing block count.
extents = self.NewExtentList((0, -1), (8, 3), (1024, 16))
self.assertRaises(
- update_payload.PayloadError, payload_checker._CheckExtents,
- extents, (1024 + 16) * block_size, collections.defaultdict(int),
- 'foo')
+ PayloadError, payload_checker._CheckExtents, extents,
+ (1024 + 16) * block_size, collections.defaultdict(int), 'foo')
# Fails, extent has zero blocks.
extents = self.NewExtentList((0, 4), (8, 3), (1024, 0))
self.assertRaises(
- update_payload.PayloadError, payload_checker._CheckExtents,
- extents, (1024 + 16) * block_size, collections.defaultdict(int),
- 'foo')
+ PayloadError, payload_checker._CheckExtents, extents,
+ (1024 + 16) * block_size, collections.defaultdict(int), 'foo')
# Fails, extent exceeds partition boundaries.
extents = self.NewExtentList((0, 4), (8, 3), (1024, 16))
self.assertRaises(
- update_payload.PayloadError, payload_checker._CheckExtents,
- extents, (1024 + 15) * block_size, collections.defaultdict(int),
- 'foo')
+ PayloadError, payload_checker._CheckExtents, extents,
+ (1024 + 15) * block_size, collections.defaultdict(int), 'foo')
def testCheckReplaceOperation(self):
"""Tests _CheckReplaceOperation() where op.type == REPLACE."""
@@ -577,22 +570,19 @@
# Fail, src extents founds.
op.src_extents = ['bar']
self.assertRaises(
- update_payload.PayloadError,
- payload_checker._CheckReplaceOperation,
+ PayloadError, payload_checker._CheckReplaceOperation,
op, data_length, (data_length + block_size - 1) / block_size, 'foo')
# Fail, missing data.
op.src_extents = []
self.assertRaises(
- update_payload.PayloadError,
- payload_checker._CheckReplaceOperation,
+ PayloadError, payload_checker._CheckReplaceOperation,
op, None, (data_length + block_size - 1) / block_size, 'foo')
# Fail, length / block number mismatch.
op.src_extents = ['bar']
self.assertRaises(
- update_payload.PayloadError,
- payload_checker._CheckReplaceOperation,
+ PayloadError, payload_checker._CheckReplaceOperation,
op, data_length, (data_length + block_size - 1) / block_size + 1, 'foo')
def testCheckReplaceBzOperation(self):
@@ -615,22 +605,19 @@
# Fail, src extents founds.
op.src_extents = ['bar']
self.assertRaises(
- update_payload.PayloadError,
- payload_checker._CheckReplaceOperation,
+ PayloadError, payload_checker._CheckReplaceOperation,
op, data_length, (data_length + block_size - 1) / block_size + 5, 'foo')
# Fail, missing data.
op.src_extents = []
self.assertRaises(
- update_payload.PayloadError,
- payload_checker._CheckReplaceOperation,
+ PayloadError, payload_checker._CheckReplaceOperation,
op, None, (data_length + block_size - 1) / block_size, 'foo')
# Fail, too few blocks to justify BZ.
op.src_extents = []
self.assertRaises(
- update_payload.PayloadError,
- payload_checker._CheckReplaceOperation,
+ PayloadError, payload_checker._CheckReplaceOperation,
op, data_length, (data_length + block_size - 1) / block_size, 'foo')
def testCheckMoveOperation_Pass(self):
@@ -657,8 +644,7 @@
self.AddToMessage(op.dst_extents,
self.NewExtentList((16, 128), (512, 6)))
self.assertRaises(
- update_payload.PayloadError,
- payload_checker._CheckMoveOperation,
+ PayloadError, payload_checker._CheckMoveOperation,
op, 1024, 134, 134, 'foo')
def testCheckMoveOperation_FailInsufficientSrcBlocks(self):
@@ -672,8 +658,7 @@
self.AddToMessage(op.dst_extents,
self.NewExtentList((16, 128), (512, 6)))
self.assertRaises(
- update_payload.PayloadError,
- payload_checker._CheckMoveOperation,
+ PayloadError, payload_checker._CheckMoveOperation,
op, None, 134, 134, 'foo')
def testCheckMoveOperation_FailInsufficientDstBlocks(self):
@@ -687,8 +672,7 @@
self.AddToMessage(op.dst_extents,
self.NewExtentList((16, 128), (512, 5)))
self.assertRaises(
- update_payload.PayloadError,
- payload_checker._CheckMoveOperation,
+ PayloadError, payload_checker._CheckMoveOperation,
op, None, 134, 134, 'foo')
def testCheckMoveOperation_FailExcessSrcBlocks(self):
@@ -702,16 +686,14 @@
self.AddToMessage(op.dst_extents,
self.NewExtentList((16, 128), (512, 5)))
self.assertRaises(
- update_payload.PayloadError,
- payload_checker._CheckMoveOperation,
+ PayloadError, payload_checker._CheckMoveOperation,
op, None, 134, 134, 'foo')
self.AddToMessage(op.src_extents,
self.NewExtentList((1, 4), (12, 2), (1024, 129)))
self.AddToMessage(op.dst_extents,
self.NewExtentList((16, 128), (512, 6)))
self.assertRaises(
- update_payload.PayloadError,
- payload_checker._CheckMoveOperation,
+ PayloadError, payload_checker._CheckMoveOperation,
op, None, 134, 134, 'foo')
def testCheckMoveOperation_FailExcessDstBlocks(self):
@@ -725,8 +707,7 @@
self.AddToMessage(op.dst_extents,
self.NewExtentList((16, 128), (512, 7)))
self.assertRaises(
- update_payload.PayloadError,
- payload_checker._CheckMoveOperation,
+ PayloadError, payload_checker._CheckMoveOperation,
op, None, 134, 134, 'foo')
def testCheckMoveOperation_FailStagnantBlocks(self):
@@ -740,8 +721,7 @@
self.AddToMessage(op.dst_extents,
self.NewExtentList((8, 128), (512, 6)))
self.assertRaises(
- update_payload.PayloadError,
- payload_checker._CheckMoveOperation,
+ PayloadError, payload_checker._CheckMoveOperation,
op, None, 134, 134, 'foo')
def testCheckMoveOperation_FailZeroStartBlock(self):
@@ -755,8 +735,7 @@
self.AddToMessage(op.dst_extents,
self.NewExtentList((8, 128), (512, 6)))
self.assertRaises(
- update_payload.PayloadError,
- payload_checker._CheckMoveOperation,
+ PayloadError, payload_checker._CheckMoveOperation,
op, None, 134, 134, 'foo')
self.AddToMessage(op.src_extents,
@@ -764,29 +743,27 @@
self.AddToMessage(op.dst_extents,
self.NewExtentList((0, 128), (512, 6)))
self.assertRaises(
- update_payload.PayloadError,
- payload_checker._CheckMoveOperation,
+ PayloadError, payload_checker._CheckMoveOperation,
op, None, 134, 134, 'foo')
def testCheckAnyDiff(self):
"""Tests _CheckAnyDiffOperation()."""
payload_checker = checker.PayloadChecker(self.MockPayload())
+ op = update_metadata_pb2.InstallOperation()
# Pass.
self.assertIsNone(
- payload_checker._CheckAnyDiffOperation(10000, 3, 'foo'))
+ payload_checker._CheckAnyDiffOperation(op, 10000, 3, 'foo'))
# Fail, missing data blob.
self.assertRaises(
- update_payload.PayloadError,
- payload_checker._CheckAnyDiffOperation,
- None, 3, 'foo')
+ PayloadError, payload_checker._CheckAnyDiffOperation,
+ op, None, 3, 'foo')
# Fail, too big of a diff blob (unjustified).
self.assertRaises(
- update_payload.PayloadError,
- payload_checker._CheckAnyDiffOperation,
- 10000, 2, 'foo')
+ PayloadError, payload_checker._CheckAnyDiffOperation,
+ op, 10000, 2, 'foo')
def testCheckSourceCopyOperation_Pass(self):
"""Tests _CheckSourceCopyOperation(); pass case."""
@@ -797,15 +774,13 @@
def testCheckSourceCopyOperation_FailContainsData(self):
"""Tests _CheckSourceCopyOperation(); message contains data."""
payload_checker = checker.PayloadChecker(self.MockPayload())
- self.assertRaises(update_payload.PayloadError,
- payload_checker._CheckSourceCopyOperation,
+ self.assertRaises(PayloadError, payload_checker._CheckSourceCopyOperation,
134, 0, 0, 'foo')
def testCheckSourceCopyOperation_FailBlockCountsMismatch(self):
"""Tests _CheckSourceCopyOperation(); src and dst block totals not equal."""
payload_checker = checker.PayloadChecker(self.MockPayload())
- self.assertRaises(update_payload.PayloadError,
- payload_checker._CheckSourceCopyOperation,
+ self.assertRaises(PayloadError, payload_checker._CheckSourceCopyOperation,
None, 0, 1, 'foo')
def DoCheckOperationTest(self, op_type_name, is_last, allow_signature,
@@ -818,7 +793,7 @@
Args:
op_type_name: 'REPLACE', 'REPLACE_BZ', 'MOVE', 'BSDIFF', 'SOURCE_COPY',
- or 'SOURCE_BSDIFF'.
+ 'SOURCE_BSDIFF', BROTLI_BSDIFF or 'PUFFDIFF'.
is_last: Whether we're testing the last operation in a sequence.
allow_signature: Whether we're testing a signature-capable operation.
allow_unhashed: Whether we're allowing to not hash the data.
@@ -857,7 +832,8 @@
total_src_blocks = 0
if op_type in (common.OpType.MOVE, common.OpType.BSDIFF,
- common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF):
+ common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF,
+ common.OpType.PUFFDIFF, common.OpType.BROTLI_BSDIFF):
if fail_src_extents:
self.AddToMessage(op.src_extents,
self.NewExtentList((1, 0)))
@@ -872,6 +848,9 @@
payload_checker.minor_version = 2 if fail_bad_minor_version else 1
elif op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF):
payload_checker.minor_version = 1 if fail_bad_minor_version else 2
+ elif op_type in (common.OpType.ZERO, common.OpType.DISCARD,
+ common.OpType.PUFFDIFF, common.OpType.BROTLI_BSDIFF):
+ payload_checker.minor_version = 3 if fail_bad_minor_version else 4
if op_type not in (common.OpType.MOVE, common.OpType.SOURCE_COPY):
if not fail_mismatched_data_offset_length:
@@ -889,6 +868,7 @@
op.data_sha256_hash = hashlib.sha256(fake_data).digest()
payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn(
fake_data)
+
elif fail_data_hash:
# Create an invalid data blob hash.
op.data_sha256_hash = hashlib.sha256(
@@ -909,7 +889,9 @@
if total_src_blocks:
if fail_src_length:
op.src_length = total_src_blocks * block_size + 8
- else:
+ elif (op_type in (common.OpType.MOVE, common.OpType.BSDIFF,
+ common.OpType.SOURCE_BSDIFF) and
+ payload_checker.minor_version <= 3):
op.src_length = total_src_blocks * block_size
elif fail_src_length:
# Add an orphaned src_length.
@@ -918,7 +900,9 @@
if total_dst_blocks:
if fail_dst_length:
op.dst_length = total_dst_blocks * block_size + 8
- else:
+ elif (op_type in (common.OpType.MOVE, common.OpType.BSDIFF,
+ common.OpType.SOURCE_BSDIFF) and
+ payload_checker.minor_version <= 3):
op.dst_length = total_dst_blocks * block_size
self.mox.ReplayAll()
@@ -931,8 +915,7 @@
old_part_size, new_part_size, prev_data_offset, allow_signature,
blob_hash_counts)
if should_fail:
- self.assertRaises(update_payload.PayloadError,
- payload_checker._CheckOperation, *args)
+ self.assertRaises(PayloadError, payload_checker._CheckOperation, *args)
else:
self.assertEqual(op.data_length if op.HasField('data_length') else 0,
payload_checker._CheckOperation(*args))
@@ -954,6 +937,7 @@
self.assertEqual(17, len(result))
def DoCheckOperationsTest(self, fail_nonexhaustive_full_update):
+ """Tests _CheckOperations()."""
# Generate a test payload. For this test, we only care about one
# (arbitrary) set of operations, so we'll only be generating kernel and
# test with them.
@@ -982,11 +966,10 @@
payload_checker.payload_type = checker._TYPE_FULL
report = checker._PayloadReport()
- args = (payload_checker.payload.manifest.install_operations, report,
- 'foo', 0, rootfs_part_size, rootfs_part_size, 0, False)
+ args = (payload_checker.payload.manifest.install_operations, report, 'foo',
+ 0, rootfs_part_size, rootfs_part_size, rootfs_part_size, 0, False)
if fail_nonexhaustive_full_update:
- self.assertRaises(update_payload.PayloadError,
- payload_checker._CheckOperations, *args)
+ self.assertRaises(PayloadError, payload_checker._CheckOperations, *args)
else:
self.assertEqual(rootfs_data_length,
payload_checker._CheckOperations(*args))
@@ -994,6 +977,7 @@
def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_missing_pseudo_op,
fail_mismatched_pseudo_op, fail_sig_missing_fields,
fail_unknown_sig_version, fail_incorrect_sig):
+ """Tests _CheckSignatures()."""
# Generate a test payload. For this test, we only care about the signature
# block and how it relates to the payload hash. Therefore, we're generating
# a random (otherwise useless) payload for this purpose.
@@ -1058,8 +1042,7 @@
fail_unknown_sig_version or fail_incorrect_sig)
args = (report, test_utils._PUBKEY_FILE_NAME)
if should_fail:
- self.assertRaises(update_payload.PayloadError,
- payload_checker._CheckSignatures, *args)
+ self.assertRaises(PayloadError, payload_checker._CheckSignatures, *args)
else:
self.assertIsNone(payload_checker._CheckSignatures(*args))
@@ -1088,7 +1071,7 @@
if should_succeed:
self.assertIsNone(payload_checker._CheckManifestMinorVersion(*args))
else:
- self.assertRaises(update_payload.PayloadError,
+ self.assertRaises(PayloadError,
payload_checker._CheckManifestMinorVersion, *args)
def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided,
@@ -1096,6 +1079,7 @@
fail_mismatched_block_size, fail_excess_data,
fail_rootfs_part_size_exceeded,
fail_kernel_part_size_exceeded):
+ """Tests Run()."""
# Generate a test payload. For this test, we generate a full update that
# has sample kernel and rootfs operations. Since most testing is done with
# internal PayloadChecker methods that are tested elsewhere, here we only
@@ -1153,7 +1137,7 @@
'assert_type': 'delta' if fail_wrong_payload_type else 'full',
'block_size': use_block_size}}
if fail_invalid_block_size:
- self.assertRaises(update_payload.PayloadError, _GetPayloadChecker,
+ self.assertRaises(PayloadError, _GetPayloadChecker,
payload_gen.WriteToFileWithData, **kwargs)
else:
payload_checker = _GetPayloadChecker(payload_gen.WriteToFileWithData,
@@ -1167,8 +1151,7 @@
fail_rootfs_part_size_exceeded or
fail_kernel_part_size_exceeded)
if should_fail:
- self.assertRaises(update_payload.PayloadError, payload_checker.Run,
- **kwargs)
+ self.assertRaises(PayloadError, payload_checker.Run, **kwargs)
else:
self.assertIsNone(payload_checker.Run(**kwargs))
@@ -1275,7 +1258,8 @@
AddParametricTests('CheckOperation',
{'op_type_name': ('REPLACE', 'REPLACE_BZ', 'MOVE',
'BSDIFF', 'SOURCE_COPY',
- 'SOURCE_BSDIFF'),
+ 'SOURCE_BSDIFF', 'PUFFDIFF',
+ 'BROTLI_BSDIFF'),
'is_last': (True, False),
'allow_signature': (True, False),
'allow_unhashed': (True, False),
diff --git a/scripts/update_payload/common.py b/scripts/update_payload/common.py
index 678fc5d..231c504 100644
--- a/scripts/update_payload/common.py
+++ b/scripts/update_payload/common.py
@@ -6,8 +6,8 @@
from __future__ import print_function
-from error import PayloadError
-import update_metadata_pb2
+from update_payload import update_metadata_pb2
+from update_payload.error import PayloadError
#
@@ -27,7 +27,7 @@
INPLACE_MINOR_PAYLOAD_VERSION = 1
SOURCE_MINOR_PAYLOAD_VERSION = 2
OPSRCHASH_MINOR_PAYLOAD_VERSION = 3
-IMGDIFF_MINOR_PAYLOAD_VERSION = 4
+PUFFDIFF_MINOR_PAYLOAD_VERSION = 4
#
# Payload operation types.
@@ -35,7 +35,6 @@
class OpType(object):
"""Container for operation type constants."""
_CLASS = update_metadata_pb2.InstallOperation
- # pylint: disable=E1101
REPLACE = _CLASS.REPLACE
REPLACE_BZ = _CLASS.REPLACE_BZ
MOVE = _CLASS.MOVE
@@ -45,9 +44,10 @@
ZERO = _CLASS.ZERO
DISCARD = _CLASS.DISCARD
REPLACE_XZ = _CLASS.REPLACE_XZ
- IMGDIFF = _CLASS.IMGDIFF
+ PUFFDIFF = _CLASS.PUFFDIFF
+ BROTLI_BSDIFF = _CLASS.BROTLI_BSDIFF
ALL = (REPLACE, REPLACE_BZ, MOVE, BSDIFF, SOURCE_COPY, SOURCE_BSDIFF, ZERO,
- DISCARD, REPLACE_XZ, IMGDIFF)
+ DISCARD, REPLACE_XZ, PUFFDIFF, BROTLI_BSDIFF)
NAMES = {
REPLACE: 'REPLACE',
REPLACE_BZ: 'REPLACE_BZ',
@@ -58,7 +58,8 @@
ZERO: 'ZERO',
DISCARD: 'DISCARD',
REPLACE_XZ: 'REPLACE_XZ',
- IMGDIFF: 'IMGDIFF',
+ PUFFDIFF: 'PUFFDIFF',
+ BROTLI_BSDIFF: 'BROTLI_BSDIFF',
}
def __init__(self):
diff --git a/scripts/update_payload/format_utils_unittest.py b/scripts/update_payload/format_utils_unittest.py
index 8c5ba8e..7153f9e 100755
--- a/scripts/update_payload/format_utils_unittest.py
+++ b/scripts/update_payload/format_utils_unittest.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/python2
#
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -8,10 +8,11 @@
import unittest
-import format_utils
+from update_payload import format_utils
class NumToPercentTest(unittest.TestCase):
+ """ Tests number conversion to percentage format."""
def testHundredPercent(self):
self.assertEqual(format_utils.NumToPercent(1, 1), '100%')
@@ -43,6 +44,7 @@
class BytesToHumanReadableTest(unittest.TestCase):
+ """ Tests number conversion to human readable format."""
def testBaseTwo(self):
self.assertEqual(format_utils.BytesToHumanReadable(0x1000), '4 KiB')
self.assertEqual(format_utils.BytesToHumanReadable(0x400000), '4 MiB')
diff --git a/scripts/update_payload/histogram.py b/scripts/update_payload/histogram.py
index 9916329..f72db61 100644
--- a/scripts/update_payload/histogram.py
+++ b/scripts/update_payload/histogram.py
@@ -6,7 +6,7 @@
from collections import defaultdict
-import format_utils
+from update_payload import format_utils
class Histogram(object):
diff --git a/scripts/update_payload/histogram_unittest.py b/scripts/update_payload/histogram_unittest.py
index 421ff20..643bb32 100755
--- a/scripts/update_payload/histogram_unittest.py
+++ b/scripts/update_payload/histogram_unittest.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/python2
#
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -8,11 +8,12 @@
import unittest
-import format_utils
-import histogram
+from update_payload import format_utils
+from update_payload import histogram
class HistogramTest(unittest.TestCase):
+ """ Tests histogram"""
@staticmethod
def AddHumanReadableSize(size):
diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py
index f76c0de..8d9a20e 100644
--- a/scripts/update_payload/payload.py
+++ b/scripts/update_payload/payload.py
@@ -9,12 +9,12 @@
import hashlib
import struct
-import applier
-import block_tracer
-import checker
-import common
-from error import PayloadError
-import update_metadata_pb2
+from update_payload import applier
+from update_payload import block_tracer
+from update_payload import checker
+from update_payload import common
+from update_payload import update_metadata_pb2
+from update_payload.error import PayloadError
#
@@ -101,13 +101,15 @@
hasher=hasher)
- def __init__(self, payload_file):
+ def __init__(self, payload_file, payload_file_offset=0):
"""Initialize the payload object.
Args:
payload_file: update payload file object open for reading
+ payload_file_offset: the offset of the actual payload
"""
self.payload_file = payload_file
+ self.payload_file_offset = payload_file_offset
self.manifest_hasher = None
self.is_init = False
self.header = None
@@ -159,7 +161,8 @@
return common.Read(
self.payload_file, self.header.metadata_signature_len,
- offset=self.header.size + self.header.manifest_len)
+ offset=self.payload_file_offset + self.header.size +
+ self.header.manifest_len)
def ReadDataBlob(self, offset, length):
"""Reads and returns a single data blob from the update payload.
@@ -175,7 +178,8 @@
PayloadError if a read error occurred.
"""
return common.Read(self.payload_file, length,
- offset=self.data_offset + offset)
+ offset=self.payload_file_offset + self.data_offset +
+ offset)
def Init(self):
"""Initializes the payload object.
@@ -189,11 +193,10 @@
if self.is_init:
raise PayloadError('payload object already initialized')
- # Initialize hash context.
- # pylint: disable=E1101
self.manifest_hasher = hashlib.sha256()
# Read the file header.
+ self.payload_file.seek(self.payload_file_offset)
self.header = self._ReadHeader()
# Read the manifest.
@@ -215,6 +218,7 @@
def Describe(self):
"""Emits the payload embedded description data to standard output."""
def _DescribeImageInfo(description, image_info):
+ """Display info about the image."""
def _DisplayIndentedValue(name, value):
print(' {:<14} {}'.format(name+':', value))
@@ -231,11 +235,9 @@
_DisplayIndentedValue('Build version', image_info.build_version)
if self.manifest.HasField('old_image_info'):
- # pylint: disable=E1101
_DescribeImageInfo('Old Image', self.manifest.old_image_info)
if self.manifest.HasField('new_image_info'):
- # pylint: disable=E1101
_DescribeImageInfo('New Image', self.manifest.new_image_info)
def _AssertInit(self):
@@ -245,7 +247,7 @@
def ResetFile(self):
"""Resets the offset of the payload file to right past the manifest."""
- self.payload_file.seek(self.data_offset)
+ self.payload_file.seek(self.payload_file_offset + self.data_offset)
def IsDelta(self):
"""Returns True iff the payload appears to be a delta."""
@@ -293,7 +295,7 @@
def Apply(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
old_rootfs_part=None, bsdiff_in_place=True, bspatch_path=None,
- truncate_to_expected_size=True):
+ puffpatch_path=None, truncate_to_expected_size=True):
"""Applies the update payload.
Args:
@@ -303,6 +305,7 @@
old_rootfs_part: name of source rootfs partition file (optional)
bsdiff_in_place: whether to perform BSDIFF operations in-place (optional)
bspatch_path: path to the bspatch binary (optional)
+ puffpatch_path: path to the puffpatch binary (optional)
truncate_to_expected_size: whether to truncate the resulting partitions
to their expected sizes, as specified in the
payload (optional)
@@ -315,6 +318,7 @@
# Create a short-lived payload applier object and run it.
helper = applier.PayloadApplier(
self, bsdiff_in_place=bsdiff_in_place, bspatch_path=bspatch_path,
+ puffpatch_path=puffpatch_path,
truncate_to_expected_size=truncate_to_expected_size)
helper.Run(new_kernel_part, new_rootfs_part,
old_kernel_part=old_kernel_part,
diff --git a/scripts/update_payload/test_utils.py b/scripts/update_payload/test_utils.py
index 61a91f5..38712fb 100644
--- a/scripts/update_payload/test_utils.py
+++ b/scripts/update_payload/test_utils.py
@@ -12,9 +12,9 @@
import struct
import subprocess
-import common
-import payload
-import update_metadata_pb2
+from update_payload import common
+from update_payload import payload
+from update_payload import update_metadata_pb2
class TestError(Exception):
@@ -84,7 +84,6 @@
Raises:
TestError if something goes wrong.
"""
- # pylint: disable=E1101
data_sha256_hash = common.SIG_ASN1_HEADER + hashlib.sha256(data).digest()
sign_cmd = ['openssl', 'rsautl', '-sign', '-inkey', privkey_file_name]
try:
@@ -110,8 +109,6 @@
version: signature version (None means do not assign)
data: signature binary data (None means do not assign)
"""
- # Pylint fails to identify a member of the Signatures message.
- # pylint: disable=E1101
sig = self.sigs.signatures.add()
if version is not None:
sig.version = version
@@ -174,11 +171,9 @@
part_hash: the partition hash
"""
if is_kernel:
- # pylint: disable=E1101
part_info = (self.manifest.new_kernel_info if is_new
else self.manifest.old_kernel_info)
else:
- # pylint: disable=E1101
part_info = (self.manifest.new_rootfs_info if is_new
else self.manifest.old_rootfs_info)
_SetMsgField(part_info, 'size', part_size)
@@ -188,7 +183,6 @@
data_length=None, src_extents=None, src_length=None,
dst_extents=None, dst_length=None, data_sha256_hash=None):
"""Adds an InstallOperation entry."""
- # pylint: disable=E1101
operations = (self.manifest.kernel_install_operations if is_kernel
else self.manifest.install_operations)
@@ -293,7 +287,6 @@
data_offset = data_length = data_sha256_hash = None
if data_blob is not None:
if do_hash_data_blob:
- # pylint: disable=E1101
data_sha256_hash = hashlib.sha256(data_blob).digest()
data_length, data_offset = self.AddData(data_blob)
diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py
index 46c475e..595f2f6 100644
--- a/scripts/update_payload/update_metadata_pb2.py
+++ b/scripts/update_payload/update_metadata_pb2.py
@@ -13,7 +13,7 @@
DESCRIPTOR = _descriptor.FileDescriptor(
name='update_metadata.proto',
package='chromeos_update_engine',
- serialized_pb='\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xd2\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\r\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\r\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\x91\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x0b\n\x07IMGDIFF\x10\t\"\x88\x03\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\"\xc4\x05\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdateB\x02H\x03')
+ serialized_pb='\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xe6\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\r\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\r\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xa5\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x0c\n\x08PUFFDIFF\x10\t\x12\x11\n\rBROTLI_BSDIFF\x10\n\"\xa6\x03\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\"\xc4\x05\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdateB\x02H\x03')
@@ -60,14 +60,18 @@
options=None,
type=None),
_descriptor.EnumValueDescriptor(
- name='IMGDIFF', index=9, number=9,
+ name='PUFFDIFF', index=9, number=9,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='BROTLI_BSDIFF', index=10, number=10,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=712,
- serialized_end=857,
+ serialized_end=877,
)
@@ -347,7 +351,7 @@
is_extendable=False,
extension_ranges=[],
serialized_start=391,
- serialized_end=857,
+ serialized_end=877,
)
@@ -414,6 +418,13 @@
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
+ _descriptor.FieldDescriptor(
+ name='postinstall_optional', full_name='chromeos_update_engine.PartitionUpdate.postinstall_optional', index=8,
+ number=9, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
],
extensions=[
],
@@ -423,8 +434,8 @@
options=None,
is_extendable=False,
extension_ranges=[],
- serialized_start=860,
- serialized_end=1252,
+ serialized_start=880,
+ serialized_end=1302,
)
@@ -535,8 +546,8 @@
options=None,
is_extendable=False,
extension_ranges=[],
- serialized_start=1255,
- serialized_end=1963,
+ serialized_start=1305,
+ serialized_end=2013,
)
_SIGNATURES_SIGNATURE.containing_type = _SIGNATURES;
diff --git a/service_observer_interface.h b/service_observer_interface.h
index 893df04..4edb0ac 100644
--- a/service_observer_interface.h
+++ b/service_observer_interface.h
@@ -31,11 +31,8 @@
// Called whenever the value of these parameters changes. For |progress|
// value changes, this method will be called only if it changes significantly.
- virtual void SendStatusUpdate(int64_t last_checked_time,
- double progress,
- update_engine::UpdateStatus status,
- const std::string& new_version,
- int64_t new_size) = 0;
+ virtual void SendStatusUpdate(
+ const update_engine::UpdateEngineStatus& update_engine_status) = 0;
// Called whenever an update attempt is completed.
virtual void SendPayloadApplicationComplete(ErrorCode error_code) = 0;
diff --git a/sideload_main.cc b/sideload_main.cc
index 574d062..ddb312e 100644
--- a/sideload_main.cc
+++ b/sideload_main.cc
@@ -25,7 +25,6 @@
#include <base/strings/stringprintf.h>
#include <brillo/asynchronous_signal_handler.h>
#include <brillo/flag_helper.h>
-#include <brillo/make_unique_ptr.h>
#include <brillo/message_loops/base_message_loop.h>
#include <brillo/streams/file_stream.h>
#include <brillo/streams/stream.h>
@@ -42,6 +41,7 @@
using std::string;
using std::vector;
using update_engine::UpdateStatus;
+using update_engine::UpdateEngineStatus;
namespace {
// The root directory used for temporary files in update_engine_sideload.
@@ -81,11 +81,10 @@
}
// ServiceObserverInterface overrides.
- void SendStatusUpdate(int64_t last_checked_time,
- double progress,
- UpdateStatus status,
- const string& new_version,
- int64_t new_size) override {
+ void SendStatusUpdate(
+ const UpdateEngineStatus& update_engine_status) override {
+ UpdateStatus status = update_engine_status.status;
+ double progress = update_engine_status.progress;
if (status_ != status && (status == UpdateStatus::DOWNLOADING ||
status == UpdateStatus::FINALIZING)) {
// Split the progress bar in two parts for the two stages DOWNLOADING and
diff --git a/system_state.h b/system_state.h
index d538427..1b0ad08 100644
--- a/system_state.h
+++ b/system_state.h
@@ -17,8 +17,6 @@
#ifndef UPDATE_ENGINE_SYSTEM_STATE_H_
#define UPDATE_ENGINE_SYSTEM_STATE_H_
-class MetricsLibraryInterface;
-
namespace chromeos_update_manager {
class UpdateManager;
@@ -40,6 +38,7 @@
class ClockInterface;
class ConnectionManagerInterface;
class HardwareInterface;
+class MetricsReporterInterface;
class OmahaRequestParams;
class P2PManager;
class PayloadStateInterface;
@@ -76,7 +75,7 @@
virtual HardwareInterface* hardware() = 0;
// Gets the Metrics Library interface for reporting UMA stats.
- virtual MetricsLibraryInterface* metrics_lib() = 0;
+ virtual MetricsReporterInterface* metrics_reporter() = 0;
// Gets the interface object for persisted store.
virtual PrefsInterface* prefs() = 0;
diff --git a/test_http_server.cc b/test_http_server.cc
index 2955e79..93aa11c 100644
--- a/test_http_server.cc
+++ b/test_http_server.cc
@@ -98,9 +98,11 @@
request->raw_headers = headers;
// Break header into lines.
- vector<string> lines;
- base::SplitStringUsingSubstr(
- headers.substr(0, headers.length() - strlen(EOL EOL)), EOL, &lines);
+ vector<string> lines = base::SplitStringUsingSubstr(
+ headers.substr(0, headers.length() - strlen(EOL EOL)),
+ EOL,
+ base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_ALL);
// Decode URL line.
vector<string> terms = base::SplitString(lines[0], base::kWhitespaceASCII,
diff --git a/update_attempter.cc b/update_attempter.cc
index ff3b046..9cef154 100644
--- a/update_attempter.cc
+++ b/update_attempter.cc
@@ -31,10 +31,8 @@
#include <base/rand_util.h>
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
-#include <brillo/bind_lambda.h>
#include <brillo/data_encoding.h>
#include <brillo/errors/error_codes.h>
-#include <brillo/make_unique_ptr.h>
#include <brillo/message_loops/message_loop.h>
#include <policy/device_policy.h>
#include <policy/libpolicy.h>
@@ -50,7 +48,7 @@
#include "update_engine/common/subprocess.h"
#include "update_engine/common/utils.h"
#include "update_engine/libcurl_http_fetcher.h"
-#include "update_engine/metrics.h"
+#include "update_engine/metrics_reporter_interface.h"
#include "update_engine/omaha_request_action.h"
#include "update_engine/omaha_request_params.h"
#include "update_engine/omaha_response_handler_action.h"
@@ -78,6 +76,8 @@
using std::shared_ptr;
using std::string;
using std::vector;
+using update_engine::UpdateAttemptFlags;
+using update_engine::UpdateEngineStatus;
namespace chromeos_update_engine {
@@ -119,20 +119,11 @@
return code;
}
-UpdateAttempter::UpdateAttempter(
- SystemState* system_state,
- CertificateChecker* cert_checker,
- org::chromium::NetworkProxyServiceInterfaceProxyInterface*
- network_proxy_service_proxy)
+UpdateAttempter::UpdateAttempter(SystemState* system_state,
+ CertificateChecker* cert_checker)
: processor_(new ActionProcessor()),
system_state_(system_state),
-#if USE_LIBCROS
- cert_checker_(cert_checker),
- chrome_proxy_resolver_(network_proxy_service_proxy) {
-#else
- cert_checker_(cert_checker) {
-#endif // USE_LIBCROS
-}
+ cert_checker_(cert_checker) {}
UpdateAttempter::~UpdateAttempter() {
// CertificateChecker might not be initialized in unittests.
@@ -178,9 +169,8 @@
void UpdateAttempter::CertificateChecked(ServerToCheck server_to_check,
CertificateCheckResult result) {
- metrics::ReportCertificateCheckMetrics(system_state_,
- server_to_check,
- result);
+ system_state_->metrics_reporter()->ReportCertificateCheckMetrics(
+ server_to_check, result);
}
bool UpdateAttempter::CheckAndReportDailyMetrics() {
@@ -241,7 +231,7 @@
return;
}
- metrics::ReportDailyMetrics(system_state_, age);
+ system_state_->metrics_reporter()->ReportDailyMetrics(age);
}
void UpdateAttempter::Update(const string& app_version,
@@ -269,10 +259,11 @@
// not performing an update check because of this.
LOG(INFO) << "Not updating b/c we already updated and we're waiting for "
<< "reboot, we'll ping Omaha instead";
- metrics::ReportUpdateCheckMetrics(system_state_,
- metrics::CheckResult::kRebootPending,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset);
+ system_state_->metrics_reporter()->ReportUpdateCheckMetrics(
+ system_state_,
+ metrics::CheckResult::kRebootPending,
+ metrics::CheckReaction::kUnset,
+ metrics::DownloadErrorCode::kUnset);
PingOmaha();
return;
}
@@ -328,12 +319,11 @@
bool use_p2p_for_downloading = false;
bool use_p2p_for_sharing = false;
- // Never use p2p for downloading in interactive checks unless the
- // developer has opted in for it via a marker file.
+ // Never use p2p for downloading in interactive checks unless the developer
+ // has opted in for it via a marker file.
//
- // (Why would a developer want to opt in? If he's working on the
- // update_engine or p2p codebases so he can actually test his
- // code.).
+ // (Why would a developer want to opt in? If they are working on the
+ // update_engine or p2p codebases so they can actually test their code.)
if (system_state_ != nullptr) {
if (!system_state_->p2p_manager()->IsP2PEnabled()) {
@@ -606,42 +596,40 @@
shared_ptr<OmahaResponseHandlerAction> response_handler_action(
new OmahaResponseHandlerAction(system_state_));
- shared_ptr<OmahaRequestAction> download_started_action(
- new OmahaRequestAction(system_state_,
- new OmahaEvent(
- OmahaEvent::kTypeUpdateDownloadStarted),
- brillo::make_unique_ptr(new LibcurlHttpFetcher(
- GetProxyResolver(),
- system_state_->hardware())),
- false));
+ shared_ptr<OmahaRequestAction> download_started_action(new OmahaRequestAction(
+ system_state_,
+ new OmahaEvent(OmahaEvent::kTypeUpdateDownloadStarted),
+ std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
+ system_state_->hardware()),
+ false));
LibcurlHttpFetcher* download_fetcher =
new LibcurlHttpFetcher(GetProxyResolver(), system_state_->hardware());
download_fetcher->set_server_to_check(ServerToCheck::kDownload);
+ if (interactive)
+ download_fetcher->set_max_retry_count(kDownloadMaxRetryCountInteractive);
shared_ptr<DownloadAction> download_action(
new DownloadAction(prefs_,
system_state_->boot_control(),
system_state_->hardware(),
system_state_,
- download_fetcher)); // passes ownership
+ download_fetcher, // passes ownership
+ interactive));
shared_ptr<OmahaRequestAction> download_finished_action(
new OmahaRequestAction(
system_state_,
new OmahaEvent(OmahaEvent::kTypeUpdateDownloadFinished),
- brillo::make_unique_ptr(
- new LibcurlHttpFetcher(GetProxyResolver(),
- system_state_->hardware())),
+ std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
+ system_state_->hardware()),
false));
shared_ptr<FilesystemVerifierAction> filesystem_verifier_action(
new FilesystemVerifierAction());
shared_ptr<OmahaRequestAction> update_complete_action(
- new OmahaRequestAction(
- system_state_,
- new OmahaEvent(OmahaEvent::kTypeUpdateComplete),
- brillo::make_unique_ptr(
- new LibcurlHttpFetcher(GetProxyResolver(),
- system_state_->hardware())),
- false));
+ new OmahaRequestAction(system_state_,
+ new OmahaEvent(OmahaEvent::kTypeUpdateComplete),
+ std::make_unique<LibcurlHttpFetcher>(
+ GetProxyResolver(), system_state_->hardware()),
+ false));
download_action->set_delegate(this);
response_handler_action_ = response_handler_action;
@@ -771,9 +759,20 @@
return BootControlInterface::kInvalidSlot;
}
-void UpdateAttempter::CheckForUpdate(const string& app_version,
+bool UpdateAttempter::CheckForUpdate(const string& app_version,
const string& omaha_url,
- bool interactive) {
+ UpdateAttemptFlags flags) {
+ bool interactive = !(flags & UpdateAttemptFlags::kFlagNonInteractive);
+
+ if (interactive && status_ != UpdateStatus::IDLE) {
+ // An update check is either in-progress, or an update has completed and the
+ // system is in UPDATED_NEED_REBOOT. Either way, don't do an interactive
+ // update at this time
+ LOG(INFO) << "Refusing to do an interactive update with an update already "
+ "in progress";
+ return false;
+ }
+
LOG(INFO) << "Forced update check requested.";
forced_app_version_.clear();
forced_omaha_url_.clear();
@@ -795,12 +794,22 @@
forced_omaha_url_ = constants::kOmahaDefaultAUTestURL;
}
+ if (interactive) {
+ // Use the passed-in update attempt flags for this update attempt instead
+ // of the previously set ones.
+ current_update_attempt_flags_ = flags;
+ // Note: The caching for non-interactive update checks happens in
+ // OnUpdateScheduled().
+ }
+
if (forced_update_pending_callback_.get()) {
// Make sure that a scheduling request is made prior to calling the forced
// update pending callback.
ScheduleUpdates();
forced_update_pending_callback_->Run(true, interactive);
}
+
+ return true;
}
bool UpdateAttempter::RebootIfNeeded() {
@@ -858,6 +867,15 @@
<< (params.is_interactive ? "interactive" : "periodic")
<< " update.";
+ if (!params.is_interactive) {
+ // Cache the update attempt flags that will be used by this update attempt
+ // so that they can't be changed mid-way through.
+ current_update_attempt_flags_ = update_attempt_flags_;
+ }
+
+ LOG(INFO) << "Update attempt flags in use = 0x" << std::hex
+ << current_update_attempt_flags_;
+
Update(forced_app_version_, forced_omaha_url_, params.target_channel,
params.target_version_prefix, false, params.is_interactive);
// Always clear the forced app_version and omaha_url after an update attempt
@@ -891,6 +909,9 @@
// Reset cpu shares back to normal.
cpu_limiter_.StopLimiter();
+ // reset the state that's only valid for a single update pass
+ current_update_attempt_flags_ = UpdateAttemptFlags::kNone;
+
if (status_ == UpdateStatus::REPORTING_ERROR_EVENT) {
LOG(INFO) << "Error event sent.";
@@ -1011,7 +1032,28 @@
server_dictated_poll_interval_ =
std::max(0, omaha_request_action->GetOutputObject().poll_interval);
}
+ } else if (type == OmahaResponseHandlerAction::StaticType()) {
+ // Depending on the returned error code, note that an update is available.
+ if (code == ErrorCode::kOmahaUpdateDeferredPerPolicy ||
+ code == ErrorCode::kSuccess) {
+ // Note that the status will be updated to DOWNLOADING when some bytes
+ // get actually downloaded from the server and the BytesReceived
+ // callback is invoked. This avoids notifying the user that a download
+ // has started in cases when the server and the client are unable to
+ // initiate the download.
+ CHECK(action == response_handler_action_.get());
+ auto plan = response_handler_action_->install_plan();
+ UpdateLastCheckedTime();
+ new_version_ = plan.version;
+ new_system_version_ = plan.system_version;
+ new_payload_size_ = 0;
+ for (const auto& payload : plan.payloads)
+ new_payload_size_ += payload.size;
+ cpu_limiter_.StartLimiter();
+ SetStatusAndNotify(UpdateStatus::UPDATE_AVAILABLE);
+ }
}
+ // General failure cases.
if (code != ErrorCode::kSuccess) {
// If the current state is at or past the download phase, count the failure
// in case a switch to full update becomes necessary. Ignore network
@@ -1024,23 +1066,18 @@
CreatePendingErrorEvent(action, code);
return;
}
- // Find out which action completed.
- if (type == OmahaResponseHandlerAction::StaticType()) {
- // Note that the status will be updated to DOWNLOADING when some bytes get
- // actually downloaded from the server and the BytesReceived callback is
- // invoked. This avoids notifying the user that a download has started in
- // cases when the server and the client are unable to initiate the download.
- CHECK(action == response_handler_action_.get());
- const InstallPlan& plan = response_handler_action_->install_plan();
- UpdateLastCheckedTime();
- new_version_ = plan.version;
- new_payload_size_ = 0;
- for (const auto& payload : plan.payloads)
- new_payload_size_ += payload.size;
- cpu_limiter_.StartLimiter();
- SetStatusAndNotify(UpdateStatus::UPDATE_AVAILABLE);
- } else if (type == DownloadAction::StaticType()) {
+ // Find out which action completed (successfully).
+ if (type == DownloadAction::StaticType()) {
SetStatusAndNotify(UpdateStatus::FINALIZING);
+ } else if (type == FilesystemVerifierAction::StaticType()) {
+ // Log the system properties before the postinst and after the file system
+ // is verified. It used to be done in the postinst itself. But postinst
+ // cannot do this anymore. On the other hand, these logs are frequently
+ // looked at and it is preferable not to scatter them in random location in
+ // the log and rather log it right before the postinst. The reason not do
+ // this in the |PostinstallRunnerAction| is to prevent dependency from
+ // libpayload_consumer to libupdate_engine.
+ LogImageProperties();
}
}
@@ -1127,16 +1164,15 @@
}
}
-bool UpdateAttempter::GetStatus(int64_t* last_checked_time,
- double* progress,
- string* current_operation,
- string* new_version,
- int64_t* new_payload_size) {
- *last_checked_time = last_checked_time_;
- *progress = download_progress_;
- *current_operation = UpdateStatusToString(status_);
- *new_version = new_version_;
- *new_payload_size = new_payload_size_;
+bool UpdateAttempter::GetStatus(UpdateEngineStatus* out_status) {
+ out_status->last_checked_time = last_checked_time_;
+ out_status->status = status_;
+ out_status->current_version = omaha_request_params_->app_version();
+ out_status->current_system_version = omaha_request_params_->system_version();
+ out_status->progress = download_progress_;
+ out_status->new_size_bytes = new_payload_size_;
+ out_status->new_version = new_version_;
+ out_status->new_system_version = new_system_version_;
return true;
}
@@ -1173,12 +1209,12 @@
}
void UpdateAttempter::BroadcastStatus() {
+ UpdateEngineStatus broadcast_status;
+ // Use common method for generating the current status.
+ GetStatus(&broadcast_status);
+
for (const auto& observer : service_observers_) {
- observer->SendStatusUpdate(last_checked_time_,
- download_progress_,
- status_,
- new_version_,
- new_payload_size_);
+ observer->SendStatusUpdate(broadcast_status);
}
last_notify_time_ = TimeTicks::Now();
}
@@ -1283,9 +1319,8 @@
shared_ptr<OmahaRequestAction> error_event_action(
new OmahaRequestAction(system_state_,
error_event_.release(), // Pass ownership.
- brillo::make_unique_ptr(new LibcurlHttpFetcher(
- GetProxyResolver(),
- system_state_->hardware())),
+ std::make_unique<LibcurlHttpFetcher>(
+ GetProxyResolver(), system_state_->hardware()),
false));
actions_.push_back(shared_ptr<AbstractAction>(error_event_action));
processor_->EnqueueAction(error_event_action.get());
@@ -1329,9 +1364,8 @@
shared_ptr<OmahaRequestAction> ping_action(new OmahaRequestAction(
system_state_,
nullptr,
- brillo::make_unique_ptr(new LibcurlHttpFetcher(
- GetProxyResolver(),
- system_state_->hardware())),
+ std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
+ system_state_->hardware()),
true));
actions_.push_back(shared_ptr<OmahaRequestAction>(ping_action));
processor_->set_delegate(nullptr);
diff --git a/update_attempter.h b/update_attempter.h
index 7780357..76e93a2 100644
--- a/update_attempter.h
+++ b/update_attempter.h
@@ -29,9 +29,9 @@
#include <base/time/time.h>
#include <gtest/gtest_prod.h> // for FRIEND_TEST
-#if USE_LIBCROS
+#if USE_CHROME_NETWORK_PROXY
#include "update_engine/chrome_browser_proxy_resolver.h"
-#endif // USE_LIBCROS
+#endif // USE_CHROME_NETWORK_PROXY
#include "update_engine/certificate_checker.h"
#include "update_engine/client_library/include/update_engine/update_status.h"
#include "update_engine/common/action_processor.h"
@@ -46,14 +46,6 @@
#include "update_engine/update_manager/policy.h"
#include "update_engine/update_manager/update_manager.h"
-class MetricsLibraryInterface;
-
-namespace org {
-namespace chromium {
-class NetworkProxyServiceInterfaceProxyInterface;
-} // namespace chromium
-} // namespace org
-
namespace policy {
class PolicyProvider;
}
@@ -68,12 +60,10 @@
public PostinstallRunnerAction::DelegateInterface {
public:
using UpdateStatus = update_engine::UpdateStatus;
+ using UpdateAttemptFlags = update_engine::UpdateAttemptFlags;
static const int kMaxDeltaUpdateFailures;
- UpdateAttempter(SystemState* system_state,
- CertificateChecker* cert_checker,
- org::chromium::NetworkProxyServiceInterfaceProxyInterface*
- network_proxy_service_proxy);
+ UpdateAttempter(SystemState* system_state, CertificateChecker* cert_checker);
~UpdateAttempter() override;
// Further initialization to be done post construction.
@@ -114,12 +104,8 @@
// for testing purposes.
virtual bool ResetStatus();
- // Returns the current status in the out params. Returns true on success.
- virtual bool GetStatus(int64_t* last_checked_time,
- double* progress,
- std::string* current_operation,
- std::string* new_version,
- int64_t* new_size);
+ // Returns the current status in the out param. Returns true on success.
+ virtual bool GetStatus(update_engine::UpdateEngineStatus* out_status);
// Runs chromeos-setgoodkernel, whose responsibility it is to mark the
// currently booted partition has high priority/permanent/etc. The execution
@@ -136,12 +122,28 @@
int http_response_code() const { return http_response_code_; }
void set_http_response_code(int code) { http_response_code_ = code; }
+ // Set flags that influence how updates and checks are performed. These
+ // influence all future checks and updates until changed or the device
+ // reboots.
+ void SetUpdateAttemptFlags(UpdateAttemptFlags flags) {
+ update_attempt_flags_ = flags;
+ }
+
+ // Returns the update attempt flags that are in place for the current update
+ // attempt. These are cached at the start of an update attempt so that they
+ // remain constant throughout the process.
+ virtual UpdateAttemptFlags GetCurrentUpdateAttemptFlags() {
+ return current_update_attempt_flags_;
+ }
+
// This is the internal entry point for going through an
// update. If the current status is idle invokes Update.
// This is called by the DBus implementation.
- virtual void CheckForUpdate(const std::string& app_version,
+ // This returns true if an update check was started, false if a check or an
+ // update was already in progress.
+ virtual bool CheckForUpdate(const std::string& app_version,
const std::string& omaha_url,
- bool is_interactive);
+ UpdateAttemptFlags flags);
// This is the internal entry point for going through a rollback. This will
// attempt to run the postinstall on the non-active partition and set it as
@@ -252,17 +254,23 @@
FRIEND_TEST(UpdateAttempterTest, ActionCompletedDownloadTest);
FRIEND_TEST(UpdateAttempterTest, ActionCompletedErrorTest);
FRIEND_TEST(UpdateAttempterTest, ActionCompletedOmahaRequestTest);
+ FRIEND_TEST(UpdateAttempterTest, BootTimeInUpdateMarkerFile);
+ FRIEND_TEST(UpdateAttempterTest, BroadcastCompleteDownloadTest);
+ FRIEND_TEST(UpdateAttempterTest, ChangeToDownloadingOnReceivedBytesTest);
FRIEND_TEST(UpdateAttempterTest, CreatePendingErrorEventTest);
FRIEND_TEST(UpdateAttempterTest, CreatePendingErrorEventResumedTest);
FRIEND_TEST(UpdateAttempterTest, DisableDeltaUpdateIfNeededTest);
+ FRIEND_TEST(UpdateAttempterTest, DownloadProgressAccumulationTest);
FRIEND_TEST(UpdateAttempterTest, MarkDeltaUpdateFailureTest);
FRIEND_TEST(UpdateAttempterTest, PingOmahaTest);
+ FRIEND_TEST(UpdateAttempterTest, ReportDailyMetrics);
FRIEND_TEST(UpdateAttempterTest, ScheduleErrorEventActionNoEventTest);
FRIEND_TEST(UpdateAttempterTest, ScheduleErrorEventActionTest);
- FRIEND_TEST(UpdateAttempterTest, UpdateTest);
- FRIEND_TEST(UpdateAttempterTest, ReportDailyMetrics);
- FRIEND_TEST(UpdateAttempterTest, BootTimeInUpdateMarkerFile);
FRIEND_TEST(UpdateAttempterTest, TargetVersionPrefixSetAndReset);
+ FRIEND_TEST(UpdateAttempterTest, UpdateAttemptFlagsCachedAtUpdateStart);
+ FRIEND_TEST(UpdateAttempterTest, UpdateDeferredByPolicyTest);
+ FRIEND_TEST(UpdateAttempterTest, UpdateIsNotRunningWhenUpdateAvailable);
+ FRIEND_TEST(UpdateAttempterTest, UpdateTest);
// CertificateChecker::Observer method.
// Report metrics about the certificate being checked.
@@ -304,13 +312,13 @@
void MarkDeltaUpdateFailure();
ProxyResolver* GetProxyResolver() {
-#if USE_LIBCROS
+#if USE_CHROME_NETWORK_PROXY
return obeying_proxies_ ?
reinterpret_cast<ProxyResolver*>(&chrome_proxy_resolver_) :
reinterpret_cast<ProxyResolver*>(&direct_proxy_resolver_);
#else
return &direct_proxy_resolver_;
-#endif // USE_LIBCROS
+#endif // USE_CHROME_NETWORK_PROXY
}
// Sends a ping to Omaha.
@@ -438,7 +446,13 @@
int64_t last_checked_time_ = 0;
std::string prev_version_;
std::string new_version_ = "0.0.0.0";
- int64_t new_payload_size_ = 0;
+ std::string new_system_version_;
+ uint64_t new_payload_size_ = 0;
+ // Flags influencing all periodic update checks
+ UpdateAttemptFlags update_attempt_flags_ = UpdateAttemptFlags::kNone;
+ // Flags influencing the currently in-progress check (cached at the start of
+ // the update check).
+ UpdateAttemptFlags current_update_attempt_flags_ = UpdateAttemptFlags::kNone;
// Common parameters for all Omaha requests.
OmahaRequestParams* omaha_request_params_ = nullptr;
@@ -452,9 +466,9 @@
// Our two proxy resolvers
DirectProxyResolver direct_proxy_resolver_;
-#if USE_LIBCROS
+#if USE_CHROME_NETWORK_PROXY
ChromeBrowserProxyResolver chrome_proxy_resolver_;
-#endif // USE_LIBCROS
+#endif // USE_CHROME_NETWORK_PROXY
// Originally, both of these flags are false. Once UpdateBootFlags is called,
// |update_boot_flags_running_| is set to true. As soon as UpdateBootFlags
diff --git a/update_attempter_android.cc b/update_attempter_android.cc
index 6c992c8..aacb06b 100644
--- a/update_attempter_android.cc
+++ b/update_attempter_android.cc
@@ -18,8 +18,10 @@
#include <algorithm>
#include <map>
+#include <memory>
#include <utility>
+#include <android-base/properties.h>
#include <base/bind.h>
#include <base/logging.h>
#include <base/strings/string_number_conversions.h>
@@ -27,12 +29,14 @@
#include <brillo/data_encoding.h>
#include <brillo/message_loops/message_loop.h>
#include <brillo/strings/string_utils.h>
-#include <log/log.h>
+#include <log/log_safetynet.h>
#include "update_engine/common/constants.h"
#include "update_engine/common/file_fetcher.h"
#include "update_engine/common/utils.h"
#include "update_engine/daemon_state_interface.h"
+#include "update_engine/metrics_reporter_interface.h"
+#include "update_engine/metrics_utils.h"
#include "update_engine/network_selector.h"
#include "update_engine/payload_consumer/download_action.h"
#include "update_engine/payload_consumer/filesystem_verifier_action.h"
@@ -46,11 +50,13 @@
#endif
using base::Bind;
+using base::Time;
using base::TimeDelta;
using base::TimeTicks;
using std::shared_ptr;
using std::string;
using std::vector;
+using update_engine::UpdateEngineStatus;
namespace chromeos_update_engine {
@@ -75,6 +81,13 @@
return false;
}
+bool GetHeaderAsBool(const string& header, bool default_value) {
+ int value = 0;
+ if (base::StringToInt(header, &value) && (value == 0 || value == 1))
+ return value == 1;
+ return default_value;
+}
+
} // namespace
UpdateAttempterAndroid::UpdateAttempterAndroid(
@@ -86,7 +99,9 @@
prefs_(prefs),
boot_control_(boot_control),
hardware_(hardware),
- processor_(new ActionProcessor()) {
+ processor_(new ActionProcessor()),
+ clock_(new Clock()) {
+ metrics_reporter_ = metrics::CreateMetricsReporter();
network_selector_ = network::CreateNetworkSelector();
}
@@ -99,10 +114,12 @@
void UpdateAttempterAndroid::Init() {
// In case of update_engine restart without a reboot we need to restore the
// reboot needed state.
- if (UpdateCompletedOnThisBoot())
+ if (UpdateCompletedOnThisBoot()) {
SetStatusAndNotify(UpdateStatus::UPDATED_NEED_REBOOT);
- else
+ } else {
SetStatusAndNotify(UpdateStatus::IDLE);
+ UpdatePrefsAndReportUpdateMetricsOnReboot();
+ }
}
bool UpdateAttempterAndroid::ApplyPayload(
@@ -183,10 +200,25 @@
install_plan_.source_slot = boot_control_->GetCurrentSlot();
install_plan_.target_slot = install_plan_.source_slot == 0 ? 1 : 0;
- int data_wipe = 0;
install_plan_.powerwash_required =
- base::StringToInt(headers[kPayloadPropertyPowerwash], &data_wipe) &&
- data_wipe != 0;
+ GetHeaderAsBool(headers[kPayloadPropertyPowerwash], false);
+
+ install_plan_.switch_slot_on_reboot =
+ GetHeaderAsBool(headers[kPayloadPropertySwitchSlotOnReboot], true);
+
+ install_plan_.run_post_install = true;
+ // Optionally skip post install if and only if:
+ // a) we're resuming
+ // b) post install has already succeeded before
+ // c) RUN_POST_INSTALL is set to 0.
+ if (install_plan_.is_resume && prefs_->Exists(kPrefsPostInstallSucceeded)) {
+ bool post_install_succeeded = false;
+ prefs_->GetBoolean(kPrefsPostInstallSucceeded, &post_install_succeeded);
+ if (post_install_succeeded) {
+ install_plan_.run_post_install =
+ GetHeaderAsBool(headers[kPayloadPropertyRunPostInstall], true);
+ }
+ }
NetworkId network_id = kDefaultNetworkId;
if (!headers[kPayloadPropertyNetworkId].empty()) {
@@ -222,6 +254,10 @@
// Just in case we didn't update boot flags yet, make sure they're updated
// before any update processing starts. This will start the update process.
UpdateBootFlags();
+
+ UpdatePrefsOnUpdateStart(install_plan_.is_resume);
+ // TODO(xunchang) report the metrics for unresumable updates
+
return true;
}
@@ -259,6 +295,7 @@
// after resetting to idle state, it doesn't go back to
// UpdateStatus::UPDATED_NEED_REBOOT state.
bool ret_value = prefs_->Delete(kPrefsUpdateCompletedOnBootId);
+ ClearMetricsPrefs();
// Update the boot flags so the current slot has higher priority.
if (!boot_control_->SetActiveBootSlot(boot_control_->GetCurrentSlot()))
@@ -299,7 +336,6 @@
// Update succeeded.
WriteUpdateCompletedMarker();
prefs_->SetInt64(kPrefsDeltaUpdateFailures, 0);
- DeltaPerformer::ResetUpdateProgress(prefs_, false);
LOG(INFO) << "Update successfully applied, waiting to reboot.";
break;
@@ -342,6 +378,11 @@
if (type == DownloadAction::StaticType()) {
download_progress_ = 0;
}
+ if (type == PostinstallRunnerAction::StaticType()) {
+ bool succeeded =
+ code == ErrorCode::kSuccess || code == ErrorCode::kUpdatedButNotActive;
+ prefs_->SetBoolean(kPrefsPostInstallSucceeded, succeeded);
+ }
if (code != ErrorCode::kSuccess) {
// If an action failed, the ActionProcessor will cancel the whole thing.
return;
@@ -363,6 +404,16 @@
} else {
ProgressUpdate(progress);
}
+
+ // Update the bytes downloaded in prefs.
+ int64_t current_bytes_downloaded =
+ metrics_utils::GetPersistedValue(kPrefsCurrentBytesDownloaded, prefs_);
+ int64_t total_bytes_downloaded =
+ metrics_utils::GetPersistedValue(kPrefsTotalBytesDownloaded, prefs_);
+ prefs_->SetInt64(kPrefsCurrentBytesDownloaded,
+ current_bytes_downloaded + bytes_progressed);
+ prefs_->SetInt64(kPrefsTotalBytesDownloaded,
+ total_bytes_downloaded + bytes_progressed);
}
bool UpdateAttempterAndroid::ShouldCancel(ErrorCode* cancel_reason) {
@@ -430,17 +481,34 @@
SetStatusAndNotify(new_status);
ongoing_update_ = false;
+ // The network id is only applicable to one download attempt and once it's
+ // done the network id should not be re-used anymore.
+ if (!network_selector_->SetProcessNetwork(kDefaultNetworkId)) {
+ LOG(WARNING) << "Unable to unbind network.";
+ }
+
for (auto observer : daemon_state_->service_observers())
observer->SendPayloadApplicationComplete(error_code);
+
+ CollectAndReportUpdateMetricsOnUpdateFinished(error_code);
+ ClearMetricsPrefs();
+ if (error_code == ErrorCode::kSuccess) {
+ metrics_utils::SetSystemUpdatedMarker(clock_.get(), prefs_);
+ // Clear the total bytes downloaded if and only if the update succeeds.
+ prefs_->SetInt64(kPrefsTotalBytesDownloaded, 0);
+ }
}
void UpdateAttempterAndroid::SetStatusAndNotify(UpdateStatus status) {
status_ = status;
size_t payload_size =
install_plan_.payloads.empty() ? 0 : install_plan_.payloads[0].size;
+ UpdateEngineStatus status_to_send = {.status = status_,
+ .progress = download_progress_,
+ .new_size_bytes = payload_size};
+
for (auto observer : daemon_state_->service_observers()) {
- observer->SendStatusUpdate(
- 0, download_progress_, status_, "", payload_size);
+ observer->SendStatusUpdate(status_to_send);
}
last_notify_time_ = TimeTicks::Now();
}
@@ -471,8 +539,9 @@
new DownloadAction(prefs_,
boot_control_,
hardware_,
- nullptr, // system_state, not used.
- download_fetcher)); // passes ownership
+ nullptr, // system_state, not used.
+ download_fetcher, // passes ownership
+ true /* is_interactive */));
shared_ptr<FilesystemVerifierAction> filesystem_verifier_action(
new FilesystemVerifierAction());
@@ -522,4 +591,153 @@
update_completed_on_boot_id == boot_id);
}
+// Collect and report the android metrics when we terminate the update.
+void UpdateAttempterAndroid::CollectAndReportUpdateMetricsOnUpdateFinished(
+ ErrorCode error_code) {
+ int64_t attempt_number =
+ metrics_utils::GetPersistedValue(kPrefsPayloadAttemptNumber, prefs_);
+ PayloadType payload_type = kPayloadTypeFull;
+ int64_t payload_size = 0;
+ for (const auto& p : install_plan_.payloads) {
+ if (p.type == InstallPayloadType::kDelta)
+ payload_type = kPayloadTypeDelta;
+ payload_size += p.size;
+ }
+
+ metrics::AttemptResult attempt_result =
+ metrics_utils::GetAttemptResult(error_code);
+ Time attempt_start_time = Time::FromInternalValue(
+ metrics_utils::GetPersistedValue(kPrefsUpdateTimestampStart, prefs_));
+ TimeDelta duration = clock_->GetBootTime() - attempt_start_time;
+ TimeDelta duration_uptime = clock_->GetMonotonicTime() - attempt_start_time;
+
+ metrics_reporter_->ReportUpdateAttemptMetrics(
+ nullptr, // system_state
+ static_cast<int>(attempt_number),
+ payload_type,
+ duration,
+ duration_uptime,
+ payload_size,
+ attempt_result,
+ error_code);
+
+ int64_t current_bytes_downloaded =
+ metrics_utils::GetPersistedValue(kPrefsCurrentBytesDownloaded, prefs_);
+ metrics_reporter_->ReportUpdateAttemptDownloadMetrics(
+ current_bytes_downloaded,
+ 0,
+ DownloadSource::kNumDownloadSources,
+ metrics::DownloadErrorCode::kUnset,
+ metrics::ConnectionType::kUnset);
+
+ if (error_code == ErrorCode::kSuccess) {
+ int64_t reboot_count =
+ metrics_utils::GetPersistedValue(kPrefsNumReboots, prefs_);
+ string build_version;
+ prefs_->GetString(kPrefsPreviousVersion, &build_version);
+
+ // For android metrics, we only care about the total bytes downloaded
+ // for all sources; for now we assume the only download source is
+ // HttpsServer.
+ int64_t total_bytes_downloaded =
+ metrics_utils::GetPersistedValue(kPrefsTotalBytesDownloaded, prefs_);
+ int64_t num_bytes_downloaded[kNumDownloadSources] = {};
+ num_bytes_downloaded[DownloadSource::kDownloadSourceHttpsServer] =
+ total_bytes_downloaded;
+
+ int download_overhead_percentage = 0;
+ if (current_bytes_downloaded > 0) {
+ download_overhead_percentage =
+ (total_bytes_downloaded - current_bytes_downloaded) * 100ull /
+ current_bytes_downloaded;
+ }
+ metrics_reporter_->ReportSuccessfulUpdateMetrics(
+ static_cast<int>(attempt_number),
+ 0, // update abandoned count
+ payload_type,
+ payload_size,
+ num_bytes_downloaded,
+ download_overhead_percentage,
+ duration,
+ static_cast<int>(reboot_count),
+ 0); // url_switch_count
+ }
+}
+
+void UpdateAttempterAndroid::UpdatePrefsAndReportUpdateMetricsOnReboot() {
+ string current_boot_id;
+ TEST_AND_RETURN(utils::GetBootId(¤t_boot_id));
+ // Example: [ro.build.version.incremental]: [4292972]
+ string current_version =
+ android::base::GetProperty("ro.build.version.incremental", "");
+ TEST_AND_RETURN(!current_version.empty());
+
+ // If there's no record of previous version (e.g. due to a data wipe), we
+ // save the info of current boot and skip the metrics report.
+ if (!prefs_->Exists(kPrefsPreviousVersion)) {
+ prefs_->SetString(kPrefsBootId, current_boot_id);
+ prefs_->SetString(kPrefsPreviousVersion, current_version);
+ ClearMetricsPrefs();
+ return;
+ }
+ string previous_version;
+ // update_engine restarted under the same build.
+ // TODO(xunchang) identify and report rollback by checking UpdateMarker.
+ if (prefs_->GetString(kPrefsPreviousVersion, &previous_version) &&
+ previous_version == current_version) {
+ string last_boot_id;
+ bool is_reboot = prefs_->Exists(kPrefsBootId) &&
+ (prefs_->GetString(kPrefsBootId, &last_boot_id) &&
+ last_boot_id != current_boot_id);
+ // Increment the reboot number if |kPrefsNumReboots| exists. That pref is
+ // set when we start a new update.
+ if (is_reboot && prefs_->Exists(kPrefsNumReboots)) {
+ prefs_->SetString(kPrefsBootId, current_boot_id);
+ int64_t reboot_count =
+ metrics_utils::GetPersistedValue(kPrefsNumReboots, prefs_);
+ metrics_utils::SetNumReboots(reboot_count + 1, prefs_);
+ }
+ return;
+ }
+
+ // Now that the build version changes, report the update metrics.
+ // TODO(xunchang) check the build version is larger than the previous one.
+ prefs_->SetString(kPrefsBootId, current_boot_id);
+ prefs_->SetString(kPrefsPreviousVersion, current_version);
+
+ bool previous_attempt_exists = prefs_->Exists(kPrefsPayloadAttemptNumber);
+ // |kPrefsPayloadAttemptNumber| should be cleared upon successful update.
+ if (previous_attempt_exists) {
+ metrics_reporter_->ReportAbnormallyTerminatedUpdateAttemptMetrics();
+ }
+
+ metrics_utils::LoadAndReportTimeToReboot(
+ metrics_reporter_.get(), prefs_, clock_.get());
+ ClearMetricsPrefs();
+}
+
+// Save the update start time. Reset the reboot count and attempt number if the
+// update isn't a resume; otherwise increment the attempt number.
+void UpdateAttempterAndroid::UpdatePrefsOnUpdateStart(bool is_resume) {
+ if (!is_resume) {
+ metrics_utils::SetNumReboots(0, prefs_);
+ metrics_utils::SetPayloadAttemptNumber(1, prefs_);
+ } else {
+ int64_t attempt_number =
+ metrics_utils::GetPersistedValue(kPrefsPayloadAttemptNumber, prefs_);
+ metrics_utils::SetPayloadAttemptNumber(attempt_number + 1, prefs_);
+ }
+ Time update_start_time = clock_->GetMonotonicTime();
+ metrics_utils::SetUpdateTimestampStart(update_start_time, prefs_);
+}
+
+void UpdateAttempterAndroid::ClearMetricsPrefs() {
+ CHECK(prefs_);
+ prefs_->Delete(kPrefsCurrentBytesDownloaded);
+ prefs_->Delete(kPrefsNumReboots);
+ prefs_->Delete(kPrefsPayloadAttemptNumber);
+ prefs_->Delete(kPrefsSystemUpdatedMarker);
+ prefs_->Delete(kPrefsUpdateTimestampStart);
+}
+
} // namespace chromeos_update_engine
diff --git a/update_attempter_android.h b/update_attempter_android.h
index 167191e..28bf90a 100644
--- a/update_attempter_android.h
+++ b/update_attempter_android.h
@@ -28,9 +28,12 @@
#include "update_engine/client_library/include/update_engine/update_status.h"
#include "update_engine/common/action_processor.h"
#include "update_engine/common/boot_control_interface.h"
+#include "update_engine/common/clock.h"
#include "update_engine/common/hardware_interface.h"
#include "update_engine/common/prefs_interface.h"
#include "update_engine/daemon_state_interface.h"
+#include "update_engine/metrics_reporter_interface.h"
+#include "update_engine/metrics_utils.h"
#include "update_engine/network_selector_interface.h"
#include "update_engine/payload_consumer/download_action.h"
#include "update_engine/payload_consumer/postinstall_runner_action.h"
@@ -86,6 +89,8 @@
void ProgressUpdate(double progress) override;
private:
+ friend class UpdateAttempterAndroidTest;
+
// Asynchronously marks the current slot as successful if needed. If already
// marked as good, CompleteUpdateBootFlags() is called starting the action
// processor.
@@ -117,6 +122,41 @@
// Returns whether an update was completed in the current boot.
bool UpdateCompletedOnThisBoot();
+ // Prefs to use for metrics report
+ // |kPrefsPayloadAttemptNumber|: number of update attempts for the current
+ // payload_id.
+ // |KprefsNumReboots|: number of reboots when applying the current update.
+ // |kPrefsSystemUpdatedMarker|: end timestamp of the last successful update.
+ // |kPrefsUpdateTimestampStart|: start timestamp of the current update.
+ // |kPrefsCurrentBytesDownloaded|: number of bytes downloaded for the current
+ // payload_id.
+ // |kPrefsTotalBytesDownloaded|: number of bytes downloaded in total since
+ // the last successful update.
+
+ // Metrics report function to call:
+ // |ReportUpdateAttemptMetrics|
+ // |ReportSuccessfulUpdateMetrics|
+ // Prefs to update:
+ // |kPrefsSystemUpdatedMarker|
+ void CollectAndReportUpdateMetricsOnUpdateFinished(ErrorCode error_code);
+
+ // Metrics report function to call:
+ // |ReportAbnormallyTerminatedUpdateAttemptMetrics|
+ // |ReportTimeToRebootMetrics|
+ // Prefs to update:
+ // |kPrefsBootId|, |kPrefsPreviousVersion|
+ void UpdatePrefsAndReportUpdateMetricsOnReboot();
+
+ // Prefs to update:
+ // |kPrefsPayloadAttemptNumber|, |kPrefsUpdateTimestampStart|
+ void UpdatePrefsOnUpdateStart(bool is_resume);
+
+ // Prefs to delete:
+ // |kPrefsNumReboots|, |kPrefsPayloadAttemptNumber|,
+ // |kPrefsSystemUpdatedMarker|, |kPrefsUpdateTimestampStart|,
+ // |kPrefsCurrentBytesDownloaded|
+ void ClearMetricsPrefs();
+
DaemonStateInterface* daemon_state_;
// DaemonStateAndroid pointers.
@@ -162,6 +202,10 @@
// before applying an update to the other slot.
bool updated_boot_flags_ = false;
+ std::unique_ptr<ClockInterface> clock_;
+
+ std::unique_ptr<MetricsReporterInterface> metrics_reporter_;
+
DISALLOW_COPY_AND_ASSIGN(UpdateAttempterAndroid);
};
diff --git a/update_attempter_android_unittest.cc b/update_attempter_android_unittest.cc
new file mode 100644
index 0000000..94452df
--- /dev/null
+++ b/update_attempter_android_unittest.cc
@@ -0,0 +1,209 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_attempter_android.h"
+
+#include <memory>
+#include <string>
+
+#include <android-base/properties.h>
+#include <base/time/time.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/fake_boot_control.h"
+#include "update_engine/common/fake_clock.h"
+#include "update_engine/common/fake_hardware.h"
+#include "update_engine/common/fake_prefs.h"
+#include "update_engine/common/mock_action_processor.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/daemon_state_android.h"
+#include "update_engine/mock_metrics_reporter.h"
+
+using base::Time;
+using base::TimeDelta;
+using testing::_;
+using update_engine::UpdateStatus;
+
+namespace chromeos_update_engine {
+
+class UpdateAttempterAndroidTest : public ::testing::Test {
+ protected:
+ UpdateAttempterAndroidTest() = default;
+
+ void SetUp() override {
+ clock_ = new FakeClock();
+ metrics_reporter_ = new testing::NiceMock<MockMetricsReporter>();
+ update_attempter_android_.metrics_reporter_.reset(metrics_reporter_);
+ update_attempter_android_.clock_.reset(clock_);
+ update_attempter_android_.processor_.reset(
+ new testing::NiceMock<MockActionProcessor>());
+ }
+
+ void SetUpdateStatus(update_engine::UpdateStatus status) {
+ update_attempter_android_.status_ = status;
+ }
+
+ UpdateAttempterAndroid update_attempter_android_{
+ &daemon_state_, &prefs_, &boot_control_, &hardware_};
+
+ DaemonStateAndroid daemon_state_;
+ FakePrefs prefs_;
+ FakeBootControl boot_control_;
+ FakeHardware hardware_;
+
+ FakeClock* clock_;
+ testing::NiceMock<MockMetricsReporter>* metrics_reporter_;
+};
+
+TEST_F(UpdateAttempterAndroidTest, UpdatePrefsSameBuildVersionOnInit) {
+ std::string build_version =
+ android::base::GetProperty("ro.build.version.incremental", "");
+ prefs_.SetString(kPrefsPreviousVersion, build_version);
+ prefs_.SetString(kPrefsBootId, "oldboot");
+ prefs_.SetInt64(kPrefsNumReboots, 1);
+
+ EXPECT_CALL(*metrics_reporter_, ReportTimeToReboot(_)).Times(0);
+ update_attempter_android_.Init();
+
+ // Check that the boot_id and reboot_count are updated.
+ std::string boot_id;
+ utils::GetBootId(&boot_id);
+ EXPECT_TRUE(prefs_.Exists(kPrefsBootId));
+ std::string prefs_boot_id;
+ EXPECT_TRUE(prefs_.GetString(kPrefsBootId, &prefs_boot_id));
+ EXPECT_EQ(boot_id, prefs_boot_id);
+
+ EXPECT_TRUE(prefs_.Exists(kPrefsNumReboots));
+ int64_t reboot_count;
+ EXPECT_TRUE(prefs_.GetInt64(kPrefsNumReboots, &reboot_count));
+ EXPECT_EQ(2, reboot_count);
+}
+
+TEST_F(UpdateAttempterAndroidTest, UpdatePrefsBuildVersionChangeOnInit) {
+ prefs_.SetString(kPrefsPreviousVersion, "00001"); // Set the fake version
+ prefs_.SetInt64(kPrefsPayloadAttemptNumber, 1);
+ prefs_.SetInt64(kPrefsSystemUpdatedMarker, 23456);
+
+ EXPECT_CALL(*metrics_reporter_,
+ ReportAbnormallyTerminatedUpdateAttemptMetrics())
+ .Times(1);
+
+ Time now = Time::FromInternalValue(34456);
+ clock_->SetMonotonicTime(now);
+ TimeDelta duration = now - Time::FromInternalValue(23456);
+ EXPECT_CALL(*metrics_reporter_, ReportTimeToReboot(duration.InMinutes()))
+ .Times(1);
+
+ update_attempter_android_.Init();
+ // Check that we reset the metric prefs.
+ EXPECT_FALSE(prefs_.Exists(kPrefsNumReboots));
+ EXPECT_FALSE(prefs_.Exists(kPrefsPayloadAttemptNumber));
+ EXPECT_FALSE(prefs_.Exists(kPrefsUpdateTimestampStart));
+ EXPECT_FALSE(prefs_.Exists(kPrefsSystemUpdatedMarker));
+}
+
+TEST_F(UpdateAttempterAndroidTest, ReportMetricsOnUpdateTerminated) {
+ prefs_.SetInt64(kPrefsNumReboots, 3);
+ prefs_.SetInt64(kPrefsPayloadAttemptNumber, 2);
+ prefs_.SetString(kPrefsPreviousVersion, "56789");
+ prefs_.SetInt64(kPrefsUpdateTimestampStart, 12345);
+
+ Time boot_time = Time::FromInternalValue(22345);
+ Time up_time = Time::FromInternalValue(21345);
+ clock_->SetBootTime(boot_time);
+ clock_->SetMonotonicTime(up_time);
+ TimeDelta duration = boot_time - Time::FromInternalValue(12345);
+ TimeDelta duration_uptime = up_time - Time::FromInternalValue(12345);
+ EXPECT_CALL(
+ *metrics_reporter_,
+ ReportUpdateAttemptMetrics(_,
+ 2,
+ _,
+ duration,
+ duration_uptime,
+ _,
+ metrics::AttemptResult::kUpdateSucceeded,
+ ErrorCode::kSuccess))
+ .Times(1);
+ EXPECT_CALL(*metrics_reporter_,
+ ReportSuccessfulUpdateMetrics(2, 0, _, _, _, _, duration, 3, _))
+ .Times(1);
+
+ SetUpdateStatus(UpdateStatus::UPDATE_AVAILABLE);
+ update_attempter_android_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+
+ EXPECT_FALSE(prefs_.Exists(kPrefsNumReboots));
+ EXPECT_FALSE(prefs_.Exists(kPrefsPayloadAttemptNumber));
+ EXPECT_FALSE(prefs_.Exists(kPrefsUpdateTimestampStart));
+ EXPECT_TRUE(prefs_.Exists(kPrefsSystemUpdatedMarker));
+}
+
+TEST_F(UpdateAttempterAndroidTest, ReportMetricsForBytesDownloaded) {
+ // Check both prefs are updated correctly.
+ update_attempter_android_.BytesReceived(20, 50, 200);
+ EXPECT_EQ(
+ 20,
+ metrics_utils::GetPersistedValue(kPrefsCurrentBytesDownloaded, &prefs_));
+ EXPECT_EQ(
+ 20,
+ metrics_utils::GetPersistedValue(kPrefsTotalBytesDownloaded, &prefs_));
+
+ EXPECT_CALL(*metrics_reporter_,
+ ReportUpdateAttemptDownloadMetrics(50, _, _, _, _))
+ .Times(1);
+ EXPECT_CALL(*metrics_reporter_,
+ ReportUpdateAttemptDownloadMetrics(40, _, _, _, _))
+ .Times(1);
+
+ int64_t total_bytes[kNumDownloadSources] = {};
+ total_bytes[kDownloadSourceHttpsServer] = 90;
+ EXPECT_CALL(*metrics_reporter_,
+ ReportSuccessfulUpdateMetrics(
+ _,
+ _,
+ _,
+ _,
+ test_utils::DownloadSourceMatcher(total_bytes),
+ 125,
+ _,
+ _,
+ _))
+ .Times(1);
+
+ // The first update fails after receving 50 bytes in total.
+ update_attempter_android_.BytesReceived(30, 50, 200);
+ update_attempter_android_.ProcessingDone(nullptr, ErrorCode::kError);
+ EXPECT_EQ(
+ 0,
+ metrics_utils::GetPersistedValue(kPrefsCurrentBytesDownloaded, &prefs_));
+ EXPECT_EQ(
+ 50,
+ metrics_utils::GetPersistedValue(kPrefsTotalBytesDownloaded, &prefs_));
+
+ // The second update succeeds after receiving 40 bytes, which leads to a
+ // overhead of 50 / 40 = 125%.
+ update_attempter_android_.BytesReceived(40, 40, 50);
+ update_attempter_android_.ProcessingDone(nullptr, ErrorCode::kSuccess);
+ // Both prefs should be cleared.
+ EXPECT_EQ(
+ 0,
+ metrics_utils::GetPersistedValue(kPrefsCurrentBytesDownloaded, &prefs_));
+ EXPECT_EQ(
+ 0, metrics_utils::GetPersistedValue(kPrefsTotalBytesDownloaded, &prefs_));
+}
+
+} // namespace chromeos_update_engine
diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc
index 4928477..240e4ec 100644
--- a/update_attempter_unittest.cc
+++ b/update_attempter_unittest.cc
@@ -22,8 +22,6 @@
#include <base/files/file_util.h>
#include <base/message_loop/message_loop.h>
-#include <brillo/bind_lambda.h>
-#include <brillo/make_unique_ptr.h>
#include <brillo/message_loops/base_message_loop.h>
#include <brillo/message_loops/message_loop.h>
#include <brillo/message_loops/message_loop_utils.h>
@@ -31,10 +29,6 @@
#include <policy/libpolicy.h>
#include <policy/mock_device_policy.h>
-#if USE_LIBCROS
-#include "network_proxy/dbus-proxies.h"
-#include "network_proxy/dbus-proxy-mocks.h"
-#endif // USE_LIBCROS
#include "update_engine/common/fake_clock.h"
#include "update_engine/common/fake_prefs.h"
#include "update_engine/common/mock_action.h"
@@ -48,24 +42,21 @@
#include "update_engine/fake_system_state.h"
#include "update_engine/mock_p2p_manager.h"
#include "update_engine/mock_payload_state.h"
+#include "update_engine/mock_service_observer.h"
#include "update_engine/payload_consumer/filesystem_verifier_action.h"
#include "update_engine/payload_consumer/install_plan.h"
#include "update_engine/payload_consumer/payload_constants.h"
#include "update_engine/payload_consumer/postinstall_runner_action.h"
-namespace org {
-namespace chromium {
-class NetworkProxyServiceInterfaceProxyMock;
-} // namespace chromium
-} // namespace org
-
using base::Time;
using base::TimeDelta;
-using org::chromium::NetworkProxyServiceInterfaceProxyInterface;
-using org::chromium::NetworkProxyServiceInterfaceProxyMock;
+using chromeos_update_manager::EvalStatus;
+using chromeos_update_manager::UpdateCheckParams;
using std::string;
using std::unique_ptr;
+using testing::_;
using testing::DoAll;
+using testing::Field;
using testing::InSequence;
using testing::Ne;
using testing::NiceMock;
@@ -73,8 +64,9 @@
using testing::Return;
using testing::ReturnPointee;
using testing::SaveArg;
-using testing::SetArgumentPointee;
-using testing::_;
+using testing::SetArgPointee;
+using update_engine::UpdateAttemptFlags;
+using update_engine::UpdateEngineStatus;
using update_engine::UpdateStatus;
namespace chromeos_update_engine {
@@ -84,10 +76,8 @@
// methods.
class UpdateAttempterUnderTest : public UpdateAttempter {
public:
- UpdateAttempterUnderTest(
- SystemState* system_state,
- NetworkProxyServiceInterfaceProxyInterface* network_proxy_service_proxy)
- : UpdateAttempter(system_state, nullptr, network_proxy_service_proxy) {}
+ explicit UpdateAttempterUnderTest(SystemState* system_state)
+ : UpdateAttempter(system_state, nullptr) {}
// Wrap the update scheduling method, allowing us to opt out of scheduled
// updates for testing purposes.
@@ -136,7 +126,7 @@
EXPECT_EQ(0.0, attempter_.download_progress_);
EXPECT_EQ(0, attempter_.last_checked_time_);
EXPECT_EQ("0.0.0.0", attempter_.new_version_);
- EXPECT_EQ(0, attempter_.new_payload_size_);
+ EXPECT_EQ(0ULL, attempter_.new_payload_size_);
processor_ = new NiceMock<MockActionProcessor>();
attempter_.processor_.reset(processor_); // Transfers ownership.
prefs_ = fake_system_state_.mock_prefs();
@@ -187,13 +177,7 @@
brillo::BaseMessageLoop loop_{&base_loop_};
FakeSystemState fake_system_state_;
-#if USE_LIBCROS
- NetworkProxyServiceInterfaceProxyMock network_proxy_service_proxy_mock_;
- UpdateAttempterUnderTest attempter_{&fake_system_state_,
- &network_proxy_service_proxy_mock_};
-#else
- UpdateAttempterUnderTest attempter_{&fake_system_state_, nullptr};
-#endif // USE_LIBCROS
+ UpdateAttempterUnderTest attempter_{&fake_system_state_};
OpenSSLWrapper openssl_wrapper_;
CertificateChecker certificate_checker_;
@@ -215,10 +199,16 @@
TEST_F(UpdateAttempterTest, ActionCompletedDownloadTest) {
unique_ptr<MockHttpFetcher> fetcher(new MockHttpFetcher("", 0, nullptr));
fetcher->FailTransfer(503); // Sets the HTTP response code.
- DownloadAction action(prefs_, nullptr, nullptr, nullptr, fetcher.release());
+ DownloadAction action(prefs_,
+ nullptr,
+ nullptr,
+ nullptr,
+ fetcher.release(),
+ false /* is_interactive */);
EXPECT_CALL(*prefs_, GetInt64(kPrefsDeltaUpdateFailures, _)).Times(0);
attempter_.ActionCompleted(nullptr, &action, ErrorCode::kSuccess);
EXPECT_EQ(UpdateStatus::FINALIZING, attempter_.status());
+ EXPECT_EQ(0.0, attempter_.download_progress_);
ASSERT_EQ(nullptr, attempter_.error_event_.get());
}
@@ -232,6 +222,86 @@
ASSERT_NE(nullptr, attempter_.error_event_.get());
}
+TEST_F(UpdateAttempterTest, DownloadProgressAccumulationTest) {
+ // Simple test case, where all the values match (nothing was skipped)
+ uint64_t bytes_progressed_1 = 1024 * 1024; // 1MB
+ uint64_t bytes_progressed_2 = 1024 * 1024; // 1MB
+ uint64_t bytes_received_1 = bytes_progressed_1;
+ uint64_t bytes_received_2 = bytes_received_1 + bytes_progressed_2;
+ uint64_t bytes_total = 20 * 1024 * 1024; // 20MB
+
+ double progress_1 =
+ static_cast<double>(bytes_received_1) / static_cast<double>(bytes_total);
+ double progress_2 =
+ static_cast<double>(bytes_received_2) / static_cast<double>(bytes_total);
+
+ EXPECT_EQ(0.0, attempter_.download_progress_);
+ // This is set via inspecting the InstallPlan payloads when the
+ // OmahaResponseAction is completed
+ attempter_.new_payload_size_ = bytes_total;
+ NiceMock<MockServiceObserver> observer;
+ EXPECT_CALL(observer,
+ SendStatusUpdate(AllOf(
+ Field(&UpdateEngineStatus::progress, progress_1),
+ Field(&UpdateEngineStatus::status, UpdateStatus::DOWNLOADING),
+ Field(&UpdateEngineStatus::new_size_bytes, bytes_total))));
+ EXPECT_CALL(observer,
+ SendStatusUpdate(AllOf(
+ Field(&UpdateEngineStatus::progress, progress_2),
+ Field(&UpdateEngineStatus::status, UpdateStatus::DOWNLOADING),
+ Field(&UpdateEngineStatus::new_size_bytes, bytes_total))));
+ attempter_.AddObserver(&observer);
+ attempter_.BytesReceived(bytes_progressed_1, bytes_received_1, bytes_total);
+ EXPECT_EQ(progress_1, attempter_.download_progress_);
+ // This iteration validates that a later set of updates to the variables are
+ // properly handled (so that |getStatus()| will return the same progress info
+ // as the callback is receiving.
+ attempter_.BytesReceived(bytes_progressed_2, bytes_received_2, bytes_total);
+ EXPECT_EQ(progress_2, attempter_.download_progress_);
+}
+
+TEST_F(UpdateAttempterTest, ChangeToDownloadingOnReceivedBytesTest) {
+ // The transition into UpdateStatus::DOWNLOADING happens when the
+ // first bytes are received.
+ uint64_t bytes_progressed = 1024 * 1024; // 1MB
+ uint64_t bytes_received = 2 * 1024 * 1024; // 2MB
+ uint64_t bytes_total = 20 * 1024 * 1024; // 300MB
+ attempter_.status_ = UpdateStatus::CHECKING_FOR_UPDATE;
+ // This is set via inspecting the InstallPlan payloads when the
+ // OmahaResponseAction is completed
+ attempter_.new_payload_size_ = bytes_total;
+ EXPECT_EQ(0.0, attempter_.download_progress_);
+ NiceMock<MockServiceObserver> observer;
+ EXPECT_CALL(observer,
+ SendStatusUpdate(AllOf(
+ Field(&UpdateEngineStatus::status, UpdateStatus::DOWNLOADING),
+ Field(&UpdateEngineStatus::new_size_bytes, bytes_total))));
+ attempter_.AddObserver(&observer);
+ attempter_.BytesReceived(bytes_progressed, bytes_received, bytes_total);
+ EXPECT_EQ(UpdateStatus::DOWNLOADING, attempter_.status_);
+}
+
+TEST_F(UpdateAttempterTest, BroadcastCompleteDownloadTest) {
+ // There is a special case to ensure that at 100% downloaded,
+ // download_progress_ is updated and that value broadcast. This test confirms
+ // that.
+ uint64_t bytes_progressed = 0; // ignored
+ uint64_t bytes_received = 5 * 1024 * 1024; // ignored
+ uint64_t bytes_total = 5 * 1024 * 1024; // 300MB
+ attempter_.status_ = UpdateStatus::DOWNLOADING;
+ attempter_.new_payload_size_ = bytes_total;
+ EXPECT_EQ(0.0, attempter_.download_progress_);
+ NiceMock<MockServiceObserver> observer;
+ EXPECT_CALL(observer,
+ SendStatusUpdate(AllOf(
+ Field(&UpdateEngineStatus::progress, 1.0),
+ Field(&UpdateEngineStatus::status, UpdateStatus::DOWNLOADING),
+ Field(&UpdateEngineStatus::new_size_bytes, bytes_total))));
+ attempter_.AddObserver(&observer);
+ attempter_.BytesReceived(bytes_progressed, bytes_received, bytes_total);
+ EXPECT_EQ(1.0, attempter_.download_progress_);
+}
+
TEST_F(UpdateAttempterTest, ActionCompletedOmahaRequestTest) {
unique_ptr<MockHttpFetcher> fetcher(new MockHttpFetcher("", 0, nullptr));
fetcher->FailTransfer(500); // Sets the HTTP response code.
@@ -298,13 +368,13 @@
EXPECT_TRUE(attempter_.omaha_request_params_->delta_okay());
EXPECT_CALL(*prefs_, GetInt64(kPrefsDeltaUpdateFailures, _))
.WillOnce(DoAll(
- SetArgumentPointee<1>(UpdateAttempter::kMaxDeltaUpdateFailures - 1),
+ SetArgPointee<1>(UpdateAttempter::kMaxDeltaUpdateFailures - 1),
Return(true)));
attempter_.DisableDeltaUpdateIfNeeded();
EXPECT_TRUE(attempter_.omaha_request_params_->delta_okay());
EXPECT_CALL(*prefs_, GetInt64(kPrefsDeltaUpdateFailures, _))
.WillOnce(DoAll(
- SetArgumentPointee<1>(UpdateAttempter::kMaxDeltaUpdateFailures),
+ SetArgPointee<1>(UpdateAttempter::kMaxDeltaUpdateFailures),
Return(true)));
attempter_.DisableDeltaUpdateIfNeeded();
EXPECT_FALSE(attempter_.omaha_request_params_->delta_okay());
@@ -316,10 +386,10 @@
TEST_F(UpdateAttempterTest, MarkDeltaUpdateFailureTest) {
EXPECT_CALL(*prefs_, GetInt64(kPrefsDeltaUpdateFailures, _))
.WillOnce(Return(false))
- .WillOnce(DoAll(SetArgumentPointee<1>(-1), Return(true)))
- .WillOnce(DoAll(SetArgumentPointee<1>(1), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<1>(-1), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<1>(1), Return(true)))
.WillOnce(DoAll(
- SetArgumentPointee<1>(UpdateAttempter::kMaxDeltaUpdateFailures),
+ SetArgPointee<1>(UpdateAttempter::kMaxDeltaUpdateFailures),
Return(true)));
EXPECT_CALL(*prefs_, SetInt64(Ne(kPrefsDeltaUpdateFailures), _))
.WillRepeatedly(Return(true));
@@ -386,10 +456,11 @@
// Expect that the device policy is loaded by the UpdateAttempter at some
// point by calling RefreshDevicePolicy.
- policy::MockDevicePolicy* device_policy = new policy::MockDevicePolicy();
- attempter_.policy_provider_.reset(new policy::PolicyProvider(device_policy));
+ auto device_policy = std::make_unique<policy::MockDevicePolicy>();
EXPECT_CALL(*device_policy, LoadPolicy())
.Times(testing::AtLeast(1)).WillRepeatedly(Return(true));
+ attempter_.policy_provider_.reset(
+ new policy::PolicyProvider(std::move(device_policy)));
{
InSequence s;
@@ -428,11 +499,23 @@
void UpdateAttempterTest::RollbackTestStart(
bool enterprise_rollback, bool valid_slot) {
// Create a device policy so that we can change settings.
- policy::MockDevicePolicy* device_policy = new policy::MockDevicePolicy();
- attempter_.policy_provider_.reset(new policy::PolicyProvider(device_policy));
-
+ auto device_policy = std::make_unique<policy::MockDevicePolicy>();
EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
- fake_system_state_.set_device_policy(device_policy);
+ fake_system_state_.set_device_policy(device_policy.get());
+ if (enterprise_rollback) {
+ // We return an empty owner as this is an enterprise.
+ EXPECT_CALL(*device_policy, GetOwner(_)).WillRepeatedly(
+ DoAll(SetArgPointee<0>(string("")),
+ Return(true)));
+ } else {
+ // We return a fake owner as this is an owned consumer device.
+ EXPECT_CALL(*device_policy, GetOwner(_)).WillRepeatedly(
+ DoAll(SetArgPointee<0>(string("fake.mail@fake.com")),
+ Return(true)));
+ }
+
+ attempter_.policy_provider_.reset(
+ new policy::PolicyProvider(std::move(device_policy)));
if (valid_slot) {
BootControlInterface::Slot rollback_slot = 1;
@@ -450,18 +533,6 @@
is_rollback_allowed = true;
}
- if (enterprise_rollback) {
- // We return an empty owner as this is an enterprise.
- EXPECT_CALL(*device_policy, GetOwner(_)).WillRepeatedly(
- DoAll(SetArgumentPointee<0>(string("")),
- Return(true)));
- } else {
- // We return a fake owner as this is an owned consumer device.
- EXPECT_CALL(*device_policy, GetOwner(_)).WillRepeatedly(
- DoAll(SetArgumentPointee<0>(string("fake.mail@fake.com")),
- Return(true)));
- }
-
if (is_rollback_allowed) {
InSequence s;
for (size_t i = 0; i < arraysize(kRollbackActionTypes); ++i) {
@@ -732,17 +803,18 @@
void UpdateAttempterTest::ReadScatterFactorFromPolicyTestStart() {
int64_t scatter_factor_in_seconds = 36000;
- policy::MockDevicePolicy* device_policy = new policy::MockDevicePolicy();
- attempter_.policy_provider_.reset(new policy::PolicyProvider(device_policy));
-
+ auto device_policy = std::make_unique<policy::MockDevicePolicy>();
EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
- fake_system_state_.set_device_policy(device_policy);
+ fake_system_state_.set_device_policy(device_policy.get());
EXPECT_CALL(*device_policy, GetScatterFactorInSeconds(_))
.WillRepeatedly(DoAll(
- SetArgumentPointee<0>(scatter_factor_in_seconds),
+ SetArgPointee<0>(scatter_factor_in_seconds),
Return(true)));
+ attempter_.policy_provider_.reset(
+ new policy::PolicyProvider(std::move(device_policy)));
+
attempter_.Update("", "", "", "", false, false);
EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
@@ -770,17 +842,18 @@
int64_t scatter_factor_in_seconds = 10;
- policy::MockDevicePolicy* device_policy = new policy::MockDevicePolicy();
- attempter_.policy_provider_.reset(new policy::PolicyProvider(device_policy));
-
+ auto device_policy = std::make_unique<policy::MockDevicePolicy>();
EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
- fake_system_state_.set_device_policy(device_policy);
+ fake_system_state_.set_device_policy(device_policy.get());
EXPECT_CALL(*device_policy, GetScatterFactorInSeconds(_))
.WillRepeatedly(DoAll(
- SetArgumentPointee<0>(scatter_factor_in_seconds),
+ SetArgPointee<0>(scatter_factor_in_seconds),
Return(true)));
+ attempter_.policy_provider_.reset(
+ new policy::PolicyProvider(std::move(device_policy)));
+
attempter_.Update("", "", "", "", false, false);
EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
@@ -829,17 +902,18 @@
// otherwise.
int64_t scatter_factor_in_seconds = 50;
- policy::MockDevicePolicy* device_policy = new policy::MockDevicePolicy();
- attempter_.policy_provider_.reset(new policy::PolicyProvider(device_policy));
-
+ auto device_policy = std::make_unique<policy::MockDevicePolicy>();
EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
- fake_system_state_.set_device_policy(device_policy);
+ fake_system_state_.set_device_policy(device_policy.get());
EXPECT_CALL(*device_policy, GetScatterFactorInSeconds(_))
.WillRepeatedly(DoAll(
- SetArgumentPointee<0>(scatter_factor_in_seconds),
+ SetArgPointee<0>(scatter_factor_in_seconds),
Return(true)));
+ attempter_.policy_provider_.reset(
+ new policy::PolicyProvider(std::move(device_policy)));
+
// Trigger an interactive check so we can test that scattering is disabled.
attempter_.Update("", "", "", "", false, true);
EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
@@ -958,14 +1032,14 @@
TEST_F(UpdateAttempterTest, CheckForUpdateAUTest) {
fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false);
- attempter_.CheckForUpdate("", "autest", true);
+ attempter_.CheckForUpdate("", "autest", UpdateAttemptFlags::kNone);
EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url());
}
TEST_F(UpdateAttempterTest, CheckForUpdateScheduledAUTest) {
fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false);
- attempter_.CheckForUpdate("", "autest-scheduled", true);
+ attempter_.CheckForUpdate("", "autest-scheduled", UpdateAttemptFlags::kNone);
EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url());
}
@@ -979,4 +1053,90 @@
fake_system_state_.request_params()->target_version_prefix().empty());
}
+TEST_F(UpdateAttempterTest, UpdateDeferredByPolicyTest) {
+ // Construct an OmahaResponseHandlerAction that has processed an InstallPlan,
+ // but the update is being deferred by the Policy.
+ OmahaResponseHandlerAction* response_action =
+ new OmahaResponseHandlerAction(&fake_system_state_);
+ response_action->install_plan_.version = "a.b.c.d";
+ response_action->install_plan_.system_version = "b.c.d.e";
+ response_action->install_plan_.payloads.push_back(
+ {.size = 1234ULL, .type = InstallPayloadType::kFull});
+ attempter_.response_handler_action_.reset(response_action);
+ // Inform the UpdateAttempter that the OmahaResponseHandlerAction has
+ // completed, with the deferred-update error code.
+ attempter_.ActionCompleted(
+ nullptr, response_action, ErrorCode::kOmahaUpdateDeferredPerPolicy);
+ {
+ UpdateEngineStatus status;
+ attempter_.GetStatus(&status);
+ EXPECT_EQ(UpdateStatus::UPDATE_AVAILABLE, status.status);
+ EXPECT_EQ(response_action->install_plan_.version, status.new_version);
+ EXPECT_EQ(response_action->install_plan_.system_version,
+ status.new_system_version);
+ EXPECT_EQ(response_action->install_plan_.payloads[0].size,
+ status.new_size_bytes);
+ }
+ // An "error" event should have been created to tell Omaha that the update is
+ // being deferred.
+ EXPECT_TRUE(nullptr != attempter_.error_event_);
+ EXPECT_EQ(OmahaEvent::kTypeUpdateComplete, attempter_.error_event_->type);
+ EXPECT_EQ(OmahaEvent::kResultUpdateDeferred, attempter_.error_event_->result);
+ ErrorCode expected_code = static_cast<ErrorCode>(
+ static_cast<int>(ErrorCode::kOmahaUpdateDeferredPerPolicy) |
+ static_cast<int>(ErrorCode::kTestOmahaUrlFlag));
+ EXPECT_EQ(expected_code, attempter_.error_event_->error_code);
+ // End the processing
+ attempter_.ProcessingDone(nullptr, ErrorCode::kOmahaUpdateDeferredPerPolicy);
+ // Validate the state of the attempter.
+ {
+ UpdateEngineStatus status;
+ attempter_.GetStatus(&status);
+ EXPECT_EQ(UpdateStatus::REPORTING_ERROR_EVENT, status.status);
+ EXPECT_EQ(response_action->install_plan_.version, status.new_version);
+ EXPECT_EQ(response_action->install_plan_.system_version,
+ status.new_system_version);
+ EXPECT_EQ(response_action->install_plan_.payloads[0].size,
+ status.new_size_bytes);
+ }
+}
+
+TEST_F(UpdateAttempterTest, UpdateIsNotRunningWhenUpdateAvailable) {
+ EXPECT_FALSE(attempter_.IsUpdateRunningOrScheduled());
+ // Verify in-progress update with UPDATE_AVAILABLE is running
+ attempter_.status_ = UpdateStatus::UPDATE_AVAILABLE;
+ EXPECT_TRUE(attempter_.IsUpdateRunningOrScheduled());
+}
+
+TEST_F(UpdateAttempterTest, UpdateAttemptFlagsCachedAtUpdateStart) {
+ attempter_.SetUpdateAttemptFlags(UpdateAttemptFlags::kFlagRestrictDownload);
+
+ UpdateCheckParams params = {.updates_enabled = true};
+ attempter_.OnUpdateScheduled(EvalStatus::kSucceeded, params);
+
+ EXPECT_EQ(UpdateAttemptFlags::kFlagRestrictDownload,
+ attempter_.GetCurrentUpdateAttemptFlags());
+}
+
+TEST_F(UpdateAttempterTest, InteractiveUpdateUsesPassedRestrictions) {
+ attempter_.SetUpdateAttemptFlags(UpdateAttemptFlags::kFlagRestrictDownload);
+
+ attempter_.CheckForUpdate("", "", UpdateAttemptFlags::kNone);
+ EXPECT_EQ(UpdateAttemptFlags::kNone,
+ attempter_.GetCurrentUpdateAttemptFlags());
+}
+
+TEST_F(UpdateAttempterTest, NonInteractiveUpdateUsesSetRestrictions) {
+ attempter_.SetUpdateAttemptFlags(UpdateAttemptFlags::kNone);
+
+ // This tests that when CheckForUpdate() is called with the non-interactive
+ // flag set, that it doesn't change the current UpdateAttemptFlags.
+ attempter_.CheckForUpdate("",
+ "",
+ UpdateAttemptFlags::kFlagNonInteractive |
+ UpdateAttemptFlags::kFlagRestrictDownload);
+ EXPECT_EQ(UpdateAttemptFlags::kNone,
+ attempter_.GetCurrentUpdateAttemptFlags());
+}
+
} // namespace chromeos_update_engine
diff --git a/update_engine.conf b/update_engine.conf
index 449e669..e3f246f 100644
--- a/update_engine.conf
+++ b/update_engine.conf
@@ -1,2 +1,2 @@
PAYLOAD_MAJOR_VERSION=2
-PAYLOAD_MINOR_VERSION=3
+PAYLOAD_MINOR_VERSION=4
diff --git a/update_engine.gyp b/update_engine.gyp
index 6b7e5f4..f312a1d 100644
--- a/update_engine.gyp
+++ b/update_engine.gyp
@@ -14,6 +14,10 @@
# limitations under the License.
#
{
+ 'variables': {
+ 'USE_chrome_network_proxy': '1',
+ 'USE_chrome_kiosk_app': '1',
+ },
'target_defaults': {
'variables': {
'deps': [
@@ -47,7 +51,8 @@
'USE_BINDER=<(USE_binder)',
'USE_DBUS=<(USE_dbus)',
'USE_HWID_OVERRIDE=<(USE_hwid_override)',
- 'USE_LIBCROS=<(USE_libcros)',
+ 'USE_CHROME_KIOSK_APP=<(USE_chrome_kiosk_app)',
+ 'USE_CHROME_NETWORK_PROXY=<(USE_chrome_network_proxy)',
'USE_MTD=<(USE_mtd)',
'USE_OMAHA=1',
'USE_SHILL=1',
@@ -80,7 +85,7 @@
},
},
'sources': [
- 'update_metadata.proto'
+ 'update_metadata.proto',
],
'includes': ['../../../platform2/common-mk/protoc.gypi'],
},
@@ -98,32 +103,19 @@
'includes': ['../../../platform2/common-mk/generate-dbus-adaptors.gypi'],
},
{
- 'target_name': 'update_engine-other-dbus-proxies',
+ 'target_name': 'update_engine-dbus-libcros-client',
'type': 'none',
- 'actions': [
- {
- 'action_name': 'update_engine-dbus-libcros-client',
- 'variables': {
- 'mock_output_file': 'include/libcros/dbus-proxy-mocks.h',
- 'proxy_output_file': 'include/libcros/dbus-proxies.h'
- },
- 'sources': [
- 'dbus_bindings/org.chromium.LibCrosService.dbus-xml',
- ],
- 'includes': ['../../../platform2/common-mk/generate-dbus-proxies.gypi'],
+ 'actions': [{
+ 'action_name': 'update_engine-dbus-libcros-client-action',
+ 'variables': {
+ 'mock_output_file': 'include/libcros/dbus-proxy-mocks.h',
+ 'proxy_output_file': 'include/libcros/dbus-proxies.h',
},
- {
- 'action_name': 'update_engine-dbus-network_proxy-client',
- 'variables': {
- 'mock_output_file': 'include/network_proxy/dbus-proxy-mocks.h',
- 'proxy_output_file': 'include/network_proxy/dbus-proxies.h'
- },
- 'sources': [
- 'dbus_bindings/org.chromium.NetworkProxyService.dbus-xml',
- ],
- 'includes': ['../../../platform2/common-mk/generate-dbus-proxies.gypi'],
- },
- ],
+ 'sources': [
+ 'dbus_bindings/org.chromium.LibCrosService.dbus-xml',
+ ],
+ 'includes': ['../../../platform2/common-mk/generate-dbus-proxies.gypi'],
+ }],
},
# The payload application component and common dependencies.
{
@@ -138,6 +130,7 @@
'exported_deps': [
'libcrypto',
'xz-embedded',
+ 'libpuffpatch',
],
'deps': ['<@(exported_deps)'],
},
@@ -178,10 +171,13 @@
'common/terminator.cc',
'common/utils.cc',
'payload_consumer/bzip_extent_writer.cc',
+ 'payload_consumer/cached_file_descriptor.cc',
'payload_consumer/delta_performer.cc',
'payload_consumer/download_action.cc',
+ 'payload_consumer/extent_reader.cc',
'payload_consumer/extent_writer.cc',
'payload_consumer/file_descriptor.cc',
+ 'payload_consumer/file_descriptor_utils.cc',
'payload_consumer/file_writer.cc',
'payload_consumer/filesystem_verifier_action.cc',
'payload_consumer/install_plan.cc',
@@ -212,7 +208,6 @@
'libpayload_consumer',
'update_metadata-protos',
'update_engine-dbus-adaptor',
- 'update_engine-other-dbus-proxies',
],
'variables': {
'exported_deps': [
@@ -226,6 +221,7 @@
'libshill-client',
'libssl',
'libupdate_engine-client',
+ 'vboot_host',
],
'deps': ['<@(exported_deps)'],
},
@@ -247,7 +243,6 @@
'-lpolicy-<(libbase_ver)',
'-lrootdev',
'-lrt',
- '-lvboot_host',
],
},
'sources': [
@@ -262,7 +257,7 @@
'hardware_chromeos.cc',
'image_properties_chromeos.cc',
'libcurl_http_fetcher.cc',
- 'metrics.cc',
+ 'metrics_reporter_omaha.cc',
'metrics_utils.cc',
'omaha_request_action.cc',
'omaha_request_params.cc',
@@ -292,14 +287,16 @@
'update_status_utils.cc',
],
'conditions': [
- ['USE_libcros == 1', {
- 'dependencies': [
- 'update_engine-other-dbus-proxies',
- ],
+ ['USE_chrome_network_proxy == 1', {
'sources': [
'chrome_browser_proxy_resolver.cc',
],
}],
+ ['USE_chrome_kiosk_app == 1', {
+ 'dependencies': [
+ 'update_engine-dbus-libcros-client',
+ ],
+ }],
],
},
# update_engine daemon.
@@ -343,7 +340,7 @@
'common/error_code_utils.cc',
'omaha_utils.cc',
'update_engine_client.cc',
- ],
+ ],
},
# server-side code. This is used for delta_generator and unittests but not
# for any client code.
@@ -357,6 +354,7 @@
'variables': {
'exported_deps': [
'ext2fs',
+ 'libpuffdiff',
],
'deps': ['<@(exported_deps)'],
},
@@ -373,6 +371,9 @@
'<@(exported_deps)',
],
},
+ 'libraries': [
+ '-lbsdiff',
+ ],
},
'sources': [
'payload_generator/ab_generator.cc',
@@ -381,6 +382,7 @@
'payload_generator/block_mapping.cc',
'payload_generator/bzip.cc',
'payload_generator/cycle_breaker.cc',
+ 'payload_generator/deflate_utils.cc',
'payload_generator/delta_diff_generator.cc',
'payload_generator/delta_diff_utils.cc',
'payload_generator/ext2_filesystem.cc',
@@ -395,6 +397,7 @@
'payload_generator/payload_generation_config.cc',
'payload_generator/payload_signer.cc',
'payload_generator/raw_filesystem.cc',
+ 'payload_generator/squashfs_filesystem.cc',
'payload_generator/tarjan.cc',
'payload_generator/topological_sort.cc',
'payload_generator/xz_chromeos.cc',
@@ -468,7 +471,6 @@
{
'target_name': 'update_engine_unittests',
'type': 'executable',
- 'includes': ['../../../platform2/common-mk/common_test.gypi'],
'variables': {
'deps': [
'libbrillo-test-<(libbase_ver)',
@@ -508,6 +510,7 @@
'fake_system_state.cc',
'hardware_chromeos_unittest.cc',
'image_properties_chromeos_unittest.cc',
+ 'metrics_reporter_omaha_unittest.cc',
'metrics_utils_unittest.cc',
'omaha_request_action_unittest.cc',
'omaha_request_params_unittest.cc',
@@ -515,10 +518,14 @@
'omaha_utils_unittest.cc',
'p2p_manager_unittest.cc',
'payload_consumer/bzip_extent_writer_unittest.cc',
+ 'payload_consumer/cached_file_descriptor_unittest.cc',
'payload_consumer/delta_performer_integration_test.cc',
'payload_consumer/delta_performer_unittest.cc',
'payload_consumer/download_action_unittest.cc',
+ 'payload_consumer/extent_reader_unittest.cc',
'payload_consumer/extent_writer_unittest.cc',
+ 'payload_consumer/fake_file_descriptor.cc',
+ 'payload_consumer/file_descriptor_utils_unittest.cc',
'payload_consumer/file_writer_unittest.cc',
'payload_consumer/filesystem_verifier_action_unittest.cc',
'payload_consumer/postinstall_runner_action_unittest.cc',
@@ -527,6 +534,7 @@
'payload_generator/blob_file_writer_unittest.cc',
'payload_generator/block_mapping_unittest.cc',
'payload_generator/cycle_breaker_unittest.cc',
+ 'payload_generator/deflate_utils_unittest.cc',
'payload_generator/delta_diff_utils_unittest.cc',
'payload_generator/ext2_filesystem_unittest.cc',
'payload_generator/extent_ranges_unittest.cc',
@@ -539,11 +547,13 @@
'payload_generator/payload_file_unittest.cc',
'payload_generator/payload_generation_config_unittest.cc',
'payload_generator/payload_signer_unittest.cc',
+ 'payload_generator/squashfs_filesystem_unittest.cc',
'payload_generator/tarjan_unittest.cc',
'payload_generator/topological_sort_unittest.cc',
'payload_generator/zip_unittest.cc',
'payload_state_unittest.cc',
'proxy_resolver_unittest.cc',
+ 'testrunner.cc',
'update_attempter_unittest.cc',
'update_manager/boxed_value_unittest.cc',
'update_manager/chromeos_policy_unittest.cc',
@@ -559,15 +569,6 @@
'update_manager/umtest_utils.cc',
'update_manager/update_manager_unittest.cc',
'update_manager/variable_unittest.cc',
- # Main entry point for runnning tests.
- 'testrunner.cc',
- ],
- 'conditions': [
- ['USE_libcros == 1', {
- 'sources': [
- 'chrome_browser_proxy_resolver_unittest.cc',
- ],
- }],
],
},
],
diff --git a/update_engine.rc b/update_engine.rc
index b6a706a..a7d6235 100644
--- a/update_engine.rc
+++ b/update_engine.rc
@@ -1,5 +1,9 @@
-service update_engine /system/bin/update_engine --logtostderr --foreground
+service update_engine /system/bin/update_engine --logtostderr --logtofile --foreground
class late_start
user root
group root system wakelock inet cache
writepid /dev/cpuset/system-background/tasks
+ disabled
+
+on property:ro.boot.slot_suffix=*
+ enable update_engine
diff --git a/update_engine_client.cc b/update_engine_client.cc
index 44897e0..bb19632 100644
--- a/update_engine_client.cc
+++ b/update_engine_client.cc
@@ -26,6 +26,7 @@
#include <base/command_line.h>
#include <base/logging.h>
#include <base/macros.h>
+#include <base/threading/platform_thread.h>
#include <brillo/daemons/daemon.h>
#include <brillo/flag_helper.h>
@@ -52,6 +53,11 @@
// initialization.
const int kContinueRunning = -1;
+// The ShowStatus request will be retried `kShowStatusRetryCount` times at
+// `kShowStatusRetryInterval` second intervals on failure.
+const int kShowStatusRetryCount = 30;
+const int kShowStatusRetryIntervalInSeconds = 2;
+
class UpdateEngineClient : public brillo::Daemon {
public:
UpdateEngineClient(int argc, char** argv) : argc_(argc), argv_(argv) {
@@ -73,7 +79,7 @@
// We can't call QuitWithExitCode from OnInit(), so we delay the execution
// of the ProcessFlags method after the Daemon initialization is done.
- base::MessageLoop::current()->PostTask(
+ base::MessageLoop::current()->task_runner()->PostTask(
FROM_HERE,
base::Bind(&UpdateEngineClient::ProcessFlagsAndExit,
base::Unretained(this)));
@@ -151,9 +157,18 @@
string new_version;
int64_t new_size = 0;
- if (!client_->GetStatus(&last_checked_time, &progress, ¤t_op,
- &new_version, &new_size)) {
- return false;
+ int retry_count = kShowStatusRetryCount;
+ while (retry_count > 0) {
+ if (client_->GetStatus(&last_checked_time, &progress, ¤t_op,
+ &new_version, &new_size)) {
+ break;
+ }
+ if (--retry_count == 0) {
+ return false;
+ }
+ LOG(WARNING) << "Will try " << retry_count << " more times!";
+ base::PlatformThread::Sleep(
+ base::TimeDelta::FromSeconds(kShowStatusRetryIntervalInSeconds));
}
printf("LAST_CHECKED_TIME=%" PRIi64
diff --git a/update_engine_client_android.cc b/update_engine_client_android.cc
index 989a97e..267f6e9 100644
--- a/update_engine_client_android.cc
+++ b/update_engine_client_android.cc
@@ -97,7 +97,10 @@
ErrorCode code = static_cast<ErrorCode>(error_code);
LOG(INFO) << "onPayloadApplicationComplete(" << utils::ErrorCodeToString(code)
<< " (" << error_code << "))";
- client_->ExitWhenIdle(code == ErrorCode::kSuccess ? EX_OK : 1);
+ client_->ExitWhenIdle(
+ (code == ErrorCode::kSuccess || code == ErrorCode::kUpdatedButNotActive)
+ ? EX_OK
+ : 1);
return Status::ok();
}
diff --git a/update_manager/android_things_policy.cc b/update_manager/android_things_policy.cc
new file mode 100644
index 0000000..5fbda46
--- /dev/null
+++ b/update_manager/android_things_policy.cc
@@ -0,0 +1,180 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/android_things_policy.h"
+
+#include <string>
+#include <vector>
+
+#include <base/logging.h>
+#include <base/time/time.h>
+
+#include "update_engine/update_manager/api_restricted_downloads_policy_impl.h"
+#include "update_engine/update_manager/enough_slots_ab_updates_policy_impl.h"
+#include "update_engine/update_manager/interactive_update_policy_impl.h"
+#include "update_engine/update_manager/official_build_check_policy_impl.h"
+
+using base::Time;
+using chromeos_update_engine::ErrorCode;
+using std::string;
+using std::vector;
+
+namespace chromeos_update_manager {
+
+const NextUpdateCheckPolicyConstants
+ AndroidThingsPolicy::kNextUpdateCheckPolicyConstants = {
+ .timeout_initial_interval = 7 * 60,
+ .timeout_periodic_interval = 5 * 60 * 60,
+ .timeout_max_backoff_interval = 26 * 60 * 60,
+ .timeout_regular_fuzz = 10 * 60,
+ .attempt_backoff_max_interval_in_days = 16,
+ .attempt_backoff_fuzz_in_hours = 12,
+};
+
+EvalStatus AndroidThingsPolicy::UpdateCheckAllowed(
+ EvaluationContext* ec,
+ State* state,
+ string* error,
+ UpdateCheckParams* result) const {
+ // Set the default return values.
+ result->updates_enabled = true;
+ result->target_channel.clear();
+ result->target_version_prefix.clear();
+ result->is_interactive = false;
+
+ // Build a list of policies to consult. Note that each policy may modify the
+ // result structure, even if it signals kContinue.
+ EnoughSlotsAbUpdatesPolicyImpl enough_slots_ab_updates_policy;
+ OnlyUpdateOfficialBuildsPolicyImpl only_update_official_builds_policy;
+ InteractiveUpdatePolicyImpl interactive_update_policy;
+ NextUpdateCheckTimePolicyImpl next_update_check_time_policy(
+ kNextUpdateCheckPolicyConstants);
+
+ vector<Policy const*> policies_to_consult = {
+ // Do not perform any updates if there are not enough slots to do
+ // A/B updates
+ &enough_slots_ab_updates_policy,
+
+ // Unofficial builds should not perform periodic update checks.
+ &only_update_official_builds_policy,
+
+ // Check to see if an interactive update was requested.
+ &interactive_update_policy,
+
+ // Ensure that periodic update checks are timed properly.
+ &next_update_check_time_policy,
+ };
+
+ // Now that the list of policy implementations, and the order to consult them,
+ // as been setup, do that. If none of the policies make a definitive
+ // decisions about whether or not to check for updates, then allow the update
+ // check to happen.
+ EvalStatus status = ConsultPolicies(policies_to_consult,
+ &Policy::UpdateCheckAllowed,
+ ec,
+ state,
+ error,
+ result);
+ if (status != EvalStatus::kContinue) {
+ return status;
+ } else {
+ // It is time to check for an update.
+ LOG(INFO) << "Allowing update check.";
+ return EvalStatus::kSucceeded;
+ }
+}
+
+// Uses the |UpdateRestrictions| to determine if the download and apply can
+// occur at this time.
+EvalStatus AndroidThingsPolicy::UpdateCanBeApplied(
+ EvaluationContext* ec,
+ State* state,
+ string* error,
+ ErrorCode* result,
+ chromeos_update_engine::InstallPlan* install_plan) const {
+ // Build a list of policies to consult. Note that each policy may modify the
+ // result structure, even if it signals kContinue.
+ ApiRestrictedDownloadsPolicyImpl api_restricted_downloads_policy;
+
+ vector<Policy const*> policies_to_consult = {
+ // Do not apply the update if all updates are restricted by the API.
+ &api_restricted_downloads_policy,
+ };
+
+ // Now that the list of policy implementations, and the order to consult them,
+ // as been setup, do that. If none of the policies make a definitive
+ // decisions about whether or not to check for updates, then allow the update
+ // check to happen.
+ EvalStatus status = ConsultPolicies(policies_to_consult,
+ &Policy::UpdateCanBeApplied,
+ ec,
+ state,
+ error,
+ result,
+ install_plan);
+ if (EvalStatus::kContinue != status) {
+ return status;
+ } else {
+ // The update can proceed.
+ LOG(INFO) << "Allowing update to be applied.";
+ *result = ErrorCode::kSuccess;
+ return EvalStatus::kSucceeded;
+ }
+}
+
+// Always returns |EvalStatus::kSucceeded|
+EvalStatus AndroidThingsPolicy::UpdateCanStart(EvaluationContext* ec,
+ State* state,
+ string* error,
+ UpdateDownloadParams* result,
+ UpdateState update_state) const {
+ // Update is good to go.
+ result->update_can_start = true;
+ return EvalStatus::kSucceeded;
+}
+
+// Always returns |EvalStatus::kSucceeded|
+EvalStatus AndroidThingsPolicy::UpdateDownloadAllowed(EvaluationContext* ec,
+ State* state,
+ string* error,
+ bool* result) const {
+ // By default, we allow updates.
+ *result = true;
+ return EvalStatus::kSucceeded;
+}
+
+// P2P is always disabled. Returns |result|==|false| and
+// |EvalStatus::kSucceeded|
+EvalStatus AndroidThingsPolicy::P2PEnabled(EvaluationContext* ec,
+ State* state,
+ string* error,
+ bool* result) const {
+ *result = false;
+ return EvalStatus::kSucceeded;
+}
+
+// This will return immediately with |EvalStatus::kSucceeded| and set
+// |result|==|false|
+EvalStatus AndroidThingsPolicy::P2PEnabledChanged(EvaluationContext* ec,
+ State* state,
+ string* error,
+ bool* result,
+ bool prev_result) const {
+ *result = false;
+ return EvalStatus::kSucceeded;
+}
+
+} // namespace chromeos_update_manager
diff --git a/update_manager/android_things_policy.h b/update_manager/android_things_policy.h
new file mode 100644
index 0000000..9fd8bc4
--- /dev/null
+++ b/update_manager/android_things_policy.h
@@ -0,0 +1,92 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_ANDROID_THINGS_POLICY_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_ANDROID_THINGS_POLICY_H_
+
+#include <string>
+
+#include "update_engine/update_manager/next_update_check_policy_impl.h"
+#include "update_engine/update_manager/policy_utils.h"
+
+namespace chromeos_update_manager {
+
+// AndroidThingsPolicy implements the policy-related logic used in
+// AndroidThings.
+class AndroidThingsPolicy : public Policy {
+ public:
+ AndroidThingsPolicy() = default;
+ ~AndroidThingsPolicy() override = default;
+
+ // Policy overrides.
+ EvalStatus UpdateCheckAllowed(EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ UpdateCheckParams* result) const override;
+
+ // Uses the |UpdateRestrictions| to determine if the download and apply can
+ // occur at this time.
+ EvalStatus UpdateCanBeApplied(
+ EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ chromeos_update_engine::ErrorCode* result,
+ chromeos_update_engine::InstallPlan* install_plan) const override;
+
+ // Always returns |EvalStatus::kSucceeded|
+ EvalStatus UpdateCanStart(EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ UpdateDownloadParams* result,
+ UpdateState update_state) const override;
+
+ // Always returns |EvalStatus::kSucceeded|
+ EvalStatus UpdateDownloadAllowed(EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ bool* result) const override;
+
+ // P2P is always disabled. Returns |result|==|false| and
+ // |EvalStatus::kSucceeded|
+ EvalStatus P2PEnabled(EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ bool* result) const override;
+
+ // This will return immediately with |EvalStatus::kSucceeded| and set
+ // |result|==|false|
+ EvalStatus P2PEnabledChanged(EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ bool* result,
+ bool prev_result) const override;
+
+ protected:
+ // Policy override.
+ std::string PolicyName() const override { return "AndroidThingsPolicy"; }
+
+ private:
+ friend class UmAndroidThingsPolicyTest;
+ FRIEND_TEST(UmAndroidThingsPolicyTest, UpdateCheckAllowedWaitsForTheTimeout);
+
+ static const NextUpdateCheckPolicyConstants kNextUpdateCheckPolicyConstants;
+
+ DISALLOW_COPY_AND_ASSIGN(AndroidThingsPolicy);
+};
+
+} // namespace chromeos_update_manager
+
+#endif // UPDATE_ENGINE_UPDATE_MANAGER_ANDROID_THINGS_POLICY_H_
diff --git a/update_manager/android_things_policy_unittest.cc b/update_manager/android_things_policy_unittest.cc
new file mode 100644
index 0000000..8a50bc2
--- /dev/null
+++ b/update_manager/android_things_policy_unittest.cc
@@ -0,0 +1,188 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/android_things_policy.h"
+
+#include <memory>
+
+#include "update_engine/update_manager/next_update_check_policy_impl.h"
+#include "update_engine/update_manager/policy_test_utils.h"
+
+using base::Time;
+using base::TimeDelta;
+using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::InstallPlan;
+
+namespace chromeos_update_manager {
+
+class UmAndroidThingsPolicyTest : public UmPolicyTestBase {
+ protected:
+ UmAndroidThingsPolicyTest() {
+ policy_ = std::make_unique<AndroidThingsPolicy>();
+ }
+
+ void SetUpDefaultState() override {
+ UmPolicyTestBase::SetUpDefaultState();
+
+ // For the purpose of the tests, this is an official build
+ fake_state_.system_provider()->var_is_official_build()->reset(
+ new bool(true));
+ // NOLINTNEXTLINE(readability/casting)
+ fake_state_.system_provider()->var_num_slots()->reset(new unsigned int(2));
+ }
+
+ // Configures the policy to return a desired value from UpdateCheckAllowed by
+ // faking the current wall clock time as needed. Restores the default state.
+ // This is used when testing policies that depend on this one.
+ virtual void SetUpdateCheckAllowed(bool allow_check) {
+ Time next_update_check;
+ CallMethodWithContext(&NextUpdateCheckTimePolicyImpl::NextUpdateCheckTime,
+ &next_update_check,
+ AndroidThingsPolicy::kNextUpdateCheckPolicyConstants);
+ SetUpDefaultState();
+ Time curr_time = next_update_check;
+ if (allow_check)
+ curr_time += TimeDelta::FromSeconds(1);
+ else
+ curr_time -= TimeDelta::FromSeconds(1);
+ fake_clock_.SetWallclockTime(curr_time);
+ }
+};
+
+TEST_F(UmAndroidThingsPolicyTest, UpdateCheckAllowedWaitsForTheTimeout) {
+ // We get the next update_check timestamp from the policy's private method
+ // and then we check the public method respects that value on the normal
+ // case.
+ Time next_update_check;
+ Time last_checked_time =
+ fake_clock_.GetWallclockTime() + TimeDelta::FromMinutes(1234);
+
+ LOG(INFO) << "last_checked_time: " << last_checked_time;
+ fake_state_.updater_provider()->var_last_checked_time()->reset(
+ new Time(last_checked_time));
+ CallMethodWithContext(&NextUpdateCheckTimePolicyImpl::NextUpdateCheckTime,
+ &next_update_check,
+ AndroidThingsPolicy::kNextUpdateCheckPolicyConstants);
+ LOG(INFO) << "Next check allowed at: " << next_update_check;
+
+ // Check that the policy blocks until the next_update_check is reached.
+ SetUpDefaultClock();
+ SetUpDefaultState();
+ fake_state_.updater_provider()->var_last_checked_time()->reset(
+ new Time(last_checked_time));
+ fake_clock_.SetWallclockTime(next_update_check - TimeDelta::FromSeconds(1));
+
+ UpdateCheckParams result;
+ ExpectPolicyStatus(
+ EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result);
+
+ SetUpDefaultClock();
+ SetUpDefaultState();
+ fake_state_.updater_provider()->var_last_checked_time()->reset(
+ new Time(last_checked_time));
+ fake_clock_.SetWallclockTime(next_update_check + TimeDelta::FromSeconds(1));
+ ExpectPolicyStatus(
+ EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
+ EXPECT_TRUE(result.updates_enabled);
+ EXPECT_FALSE(result.is_interactive);
+}
+
+TEST_F(UmAndroidThingsPolicyTest,
+ UpdateCheckAllowedUpdatesDisabledForUnofficialBuilds) {
+ // UpdateCheckAllowed should return kAskMeAgainLater if this is an unofficial
+ // build; we don't want periodic update checks on developer images.
+
+ fake_state_.system_provider()->var_is_official_build()->reset(
+ new bool(false));
+
+ UpdateCheckParams result;
+ ExpectPolicyStatus(
+ EvalStatus::kAskMeAgainLater, &Policy::UpdateCheckAllowed, &result);
+}
+
+TEST_F(UmAndroidThingsPolicyTest,
+ UpdateCheckAllowedUpdatesDisabledWhenNotEnoughSlotsAbUpdates) {
+ // UpdateCheckAllowed should return false (kSucceeded) if the image booted
+ // without enough slots to do A/B updates.
+
+ // NOLINTNEXTLINE(readability/casting)
+ fake_state_.system_provider()->var_num_slots()->reset(new unsigned int(1));
+
+ UpdateCheckParams result;
+ ExpectPolicyStatus(
+ EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
+ EXPECT_FALSE(result.updates_enabled);
+}
+
+TEST_F(UmAndroidThingsPolicyTest,
+ UpdateCheckAllowedForcedUpdateRequestedInteractive) {
+ // UpdateCheckAllowed should return true because a forced update request was
+ // signaled for an interactive update.
+
+ SetUpdateCheckAllowed(true);
+ fake_state_.updater_provider()->var_forced_update_requested()->reset(
+ new UpdateRequestStatus(UpdateRequestStatus::kInteractive));
+
+ UpdateCheckParams result;
+ ExpectPolicyStatus(
+ EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
+ EXPECT_TRUE(result.updates_enabled);
+ EXPECT_TRUE(result.is_interactive);
+}
+
+TEST_F(UmAndroidThingsPolicyTest,
+ UpdateCheckAllowedForcedUpdateRequestedPeriodic) {
+ // UpdateCheckAllowed should return true because a forced update request was
+ // signaled for a periodic check.
+
+ SetUpdateCheckAllowed(true);
+ fake_state_.updater_provider()->var_forced_update_requested()->reset(
+ new UpdateRequestStatus(UpdateRequestStatus::kPeriodic));
+
+ UpdateCheckParams result;
+ ExpectPolicyStatus(
+ EvalStatus::kSucceeded, &Policy::UpdateCheckAllowed, &result);
+ EXPECT_TRUE(result.updates_enabled);
+ EXPECT_FALSE(result.is_interactive);
+}
+
+TEST_F(UmAndroidThingsPolicyTest, UpdateCanBeAppliedOk) {
+ // UpdateCanBeApplied should return kSucceeded in the base case
+
+ InstallPlan plan;
+ ErrorCode result;
+ ExpectPolicyStatus(
+ EvalStatus::kSucceeded, &Policy::UpdateCanBeApplied, &result, &plan);
+
+ EXPECT_EQ(ErrorCode::kSuccess, result);
+}
+
+TEST_F(UmAndroidThingsPolicyTest, UpdateCanBeAppliedRestricted) {
+ // UpdateCanBeApplied should return kOmahaUpdateDeferredPerPolicy in
+ // when the restricted flag is set in the Updater.
+
+ fake_state_.updater_provider()->var_update_restrictions()->reset(
+ new UpdateRestrictions(UpdateRestrictions::kRestrictDownloading));
+
+ InstallPlan plan;
+ ErrorCode result;
+ ExpectPolicyStatus(
+ EvalStatus::kSucceeded, &Policy::UpdateCanBeApplied, &result, &plan);
+
+ EXPECT_EQ(ErrorCode::kOmahaUpdateDeferredPerPolicy, result);
+}
+
+} // namespace chromeos_update_manager
diff --git a/update_manager/api_restricted_downloads_policy_impl.cc b/update_manager/api_restricted_downloads_policy_impl.cc
new file mode 100644
index 0000000..d413cca
--- /dev/null
+++ b/update_manager/api_restricted_downloads_policy_impl.cc
@@ -0,0 +1,47 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/api_restricted_downloads_policy_impl.h"
+
+using chromeos_update_engine::ErrorCode;
+using std::string;
+using std::vector;
+
+namespace chromeos_update_manager {
+
+// Allow the API to restrict the downloading of updates.
+EvalStatus ApiRestrictedDownloadsPolicyImpl::UpdateCanBeApplied(
+ EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ ErrorCode* result,
+ chromeos_update_engine::InstallPlan* install_plan) const {
+ // Next, check to see if updates can be applied (in general).
+ const UpdateRestrictions* update_restrictions_p =
+ ec->GetValue(state->updater_provider()->var_update_restrictions());
+ if (update_restrictions_p) {
+ if (*update_restrictions_p & UpdateRestrictions::kRestrictDownloading) {
+ *result = ErrorCode::kOmahaUpdateDeferredPerPolicy;
+ return EvalStatus::kSucceeded;
+ }
+ }
+
+ // The API isn't restricting downloads, so implicitly allow them to happen
+ // but don't explicitly return success from this policy implementation.
+ return EvalStatus::kContinue;
+}
+
+} // namespace chromeos_update_manager
diff --git a/update_manager/api_restricted_downloads_policy_impl.h b/update_manager/api_restricted_downloads_policy_impl.h
new file mode 100644
index 0000000..21457a5
--- /dev/null
+++ b/update_manager/api_restricted_downloads_policy_impl.h
@@ -0,0 +1,51 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_API_RESTRICTED_DOWNLOADS_POLICY_IMPL_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_API_RESTRICTED_DOWNLOADS_POLICY_IMPL_H_
+
+#include <string>
+
+#include "update_engine/update_manager/policy_utils.h"
+
+namespace chromeos_update_manager {
+
+// Allow the API to restrict the downloading of updates.
+class ApiRestrictedDownloadsPolicyImpl : public PolicyImplBase {
+ public:
+ ApiRestrictedDownloadsPolicyImpl() = default;
+ ~ApiRestrictedDownloadsPolicyImpl() override = default;
+
+ // Policy overrides.
+ EvalStatus UpdateCanBeApplied(
+ EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ chromeos_update_engine::ErrorCode* result,
+ chromeos_update_engine::InstallPlan* install_plan) const override;
+
+ protected:
+ std::string PolicyName() const override {
+ return "ApiRestrictedDownloadsPolicyImpl";
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ApiRestrictedDownloadsPolicyImpl);
+};
+
+} // namespace chromeos_update_manager
+
+#endif // UPDATE_ENGINE_UPDATE_MANAGER_API_RESTRICTED_DOWNLOADS_POLICY_IMPL_H_
diff --git a/update_manager/boxed_value.cc b/update_manager/boxed_value.cc
index 9758d33..a437c02 100644
--- a/update_manager/boxed_value.cc
+++ b/update_manager/boxed_value.cc
@@ -176,4 +176,19 @@
return "Unknown";
}
+template <>
+string BoxedValue::ValuePrinter<UpdateRestrictions>(const void* value) {
+ const UpdateRestrictions* val =
+ reinterpret_cast<const UpdateRestrictions*>(value);
+
+ if (*val == UpdateRestrictions::kNone) {
+ return "None";
+ }
+ string retval = "Flags:";
+ if (*val & kRestrictDownloading) {
+ retval += " RestrictDownloading";
+ }
+ return retval;
+}
+
} // namespace chromeos_update_manager
diff --git a/update_manager/boxed_value_unittest.cc b/update_manager/boxed_value_unittest.cc
index 2a086a6..4aeaec8 100644
--- a/update_manager/boxed_value_unittest.cc
+++ b/update_manager/boxed_value_unittest.cc
@@ -21,6 +21,7 @@
#include <map>
#include <set>
#include <string>
+#include <utility>
#include <base/strings/stringprintf.h>
#include <base/time/time.h>
@@ -231,4 +232,14 @@
EXPECT_EQ("DeleterMarker:true", value.ToString());
}
+TEST(UmBoxedValueTest, UpdateRestrictionsToString) {
+ EXPECT_EQ(
+ "None",
+ BoxedValue(new UpdateRestrictions(UpdateRestrictions::kNone)).ToString());
+ EXPECT_EQ("Flags: RestrictDownloading",
+ BoxedValue(new UpdateRestrictions(
+ UpdateRestrictions::kRestrictDownloading))
+ .ToString());
+}
+
} // namespace chromeos_update_manager
diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc
index 81a169f..b32b626 100644
--- a/update_manager/chromeos_policy.cc
+++ b/update_manager/chromeos_policy.cc
@@ -36,6 +36,7 @@
using chromeos_update_engine::ConnectionTethering;
using chromeos_update_engine::ConnectionType;
using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::InstallPlan;
using std::get;
using std::max;
using std::min;
@@ -135,6 +136,7 @@
case ErrorCode::kOmahaRequestXMLHasEntityDecl:
case ErrorCode::kFilesystemVerifierError:
case ErrorCode::kUserCanceled:
+ case ErrorCode::kUpdatedButNotActive:
LOG(INFO) << "Not changing URL index or failure count due to error "
<< chromeos_update_engine::utils::ErrorCodeToString(err_code)
<< " (" << static_cast<int>(err_code) << ")";
@@ -324,6 +326,15 @@
return EvalStatus::kSucceeded;
}
+EvalStatus ChromeOSPolicy::UpdateCanBeApplied(EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ ErrorCode* result,
+ InstallPlan* install_plan) const {
+ *result = ErrorCode::kSuccess;
+ return EvalStatus::kSucceeded;
+}
+
EvalStatus ChromeOSPolicy::UpdateCanStart(
EvaluationContext* ec,
State* state,
diff --git a/update_manager/chromeos_policy.h b/update_manager/chromeos_policy.h
index b4370c4..283bedc 100644
--- a/update_manager/chromeos_policy.h
+++ b/update_manager/chromeos_policy.h
@@ -59,6 +59,13 @@
EvaluationContext* ec, State* state, std::string* error,
UpdateCheckParams* result) const override;
+ EvalStatus UpdateCanBeApplied(
+ EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ chromeos_update_engine::ErrorCode* result,
+ chromeos_update_engine::InstallPlan* install_plan) const override;
+
EvalStatus UpdateCanStart(
EvaluationContext* ec,
State* state,
diff --git a/update_manager/chromeos_policy_unittest.cc b/update_manager/chromeos_policy_unittest.cc
index 0c38700..63fa0f7 100644
--- a/update_manager/chromeos_policy_unittest.cc
+++ b/update_manager/chromeos_policy_unittest.cc
@@ -93,6 +93,7 @@
new bool(true));
fake_state_.system_provider()->var_is_oobe_complete()->reset(
new bool(true));
+ // NOLINTNEXTLINE(readability/casting)
fake_state_.system_provider()->var_num_slots()->reset(new unsigned int(2));
// Connection is wifi, untethered.
@@ -418,10 +419,11 @@
}
TEST_F(UmChromeOSPolicyTest,
- UpdateCheckAllowedUpdatesDisabledForRemovableBootDevice) {
+ UpdateCheckAllowedUpdatesDisabledWhenNotEnoughSlotsAbUpdates) {
// UpdateCheckAllowed should return false (kSucceeded) if the image booted
- // from a removable device.
+ // without enough slots to do A/B updates.
+ // NOLINTNEXTLINE(readability/casting)
fake_state_.system_provider()->var_num_slots()->reset(new unsigned int(1));
UpdateCheckParams result;
diff --git a/update_manager/default_policy.cc b/update_manager/default_policy.cc
index 9a5ce7e..5da1520 100644
--- a/update_manager/default_policy.cc
+++ b/update_manager/default_policy.cc
@@ -16,6 +16,9 @@
#include "update_engine/update_manager/default_policy.h"
+using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::InstallPlan;
+
namespace {
// A fixed minimum interval between consecutive allowed update checks. This
@@ -53,6 +56,15 @@
return EvalStatus::kAskMeAgainLater;
}
+EvalStatus DefaultPolicy::UpdateCanBeApplied(EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ ErrorCode* result,
+ InstallPlan* install_plan) const {
+ *result = ErrorCode::kSuccess;
+ return EvalStatus::kSucceeded;
+}
+
EvalStatus DefaultPolicy::UpdateCanStart(
EvaluationContext* ec,
State* state,
diff --git a/update_manager/default_policy.h b/update_manager/default_policy.h
index 3f41178..136ca35 100644
--- a/update_manager/default_policy.h
+++ b/update_manager/default_policy.h
@@ -69,6 +69,13 @@
EvaluationContext* ec, State* state, std::string* error,
UpdateCheckParams* result) const override;
+ EvalStatus UpdateCanBeApplied(
+ EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ chromeos_update_engine::ErrorCode* result,
+ chromeos_update_engine::InstallPlan* install_plan) const override;
+
EvalStatus UpdateCanStart(
EvaluationContext* ec, State* state, std::string* error,
UpdateDownloadParams* result,
diff --git a/update_manager/enough_slots_ab_updates_policy_impl.cc b/update_manager/enough_slots_ab_updates_policy_impl.cc
new file mode 100644
index 0000000..70f15d4
--- /dev/null
+++ b/update_manager/enough_slots_ab_updates_policy_impl.cc
@@ -0,0 +1,38 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/enough_slots_ab_updates_policy_impl.h"
+
+namespace chromeos_update_manager {
+
+// Do not perform any updates if booted from removable device. This decision
+// is final.
+EvalStatus EnoughSlotsAbUpdatesPolicyImpl::UpdateCheckAllowed(
+ EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ UpdateCheckParams* result) const {
+ const auto* num_slots_p =
+ ec->GetValue(state->system_provider()->var_num_slots());
+ if (num_slots_p == nullptr || *num_slots_p < 2) {
+ LOG(INFO) << "Not enough slots for A/B updates, disabling update checks.";
+ result->updates_enabled = false;
+ return EvalStatus::kSucceeded;
+ }
+ return EvalStatus::kContinue;
+}
+
+} // namespace chromeos_update_manager
diff --git a/update_manager/enough_slots_ab_updates_policy_impl.h b/update_manager/enough_slots_ab_updates_policy_impl.h
new file mode 100644
index 0000000..1d45389
--- /dev/null
+++ b/update_manager/enough_slots_ab_updates_policy_impl.h
@@ -0,0 +1,49 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_ENOUGH_SLOTS_AB_UPDATES_POLICY_IMPL_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_ENOUGH_SLOTS_AB_UPDATES_POLICY_IMPL_H_
+
+#include <string>
+
+#include "update_engine/update_manager/policy_utils.h"
+
+namespace chromeos_update_manager {
+
+// Do not perform any updates if booted from removable device.
+class EnoughSlotsAbUpdatesPolicyImpl : public PolicyImplBase {
+ public:
+ EnoughSlotsAbUpdatesPolicyImpl() = default;
+ ~EnoughSlotsAbUpdatesPolicyImpl() override = default;
+
+ // Policy overrides.
+ EvalStatus UpdateCheckAllowed(EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ UpdateCheckParams* result) const override;
+
+ protected:
+ std::string PolicyName() const override {
+ return "EnoughSlotsAbUpdatesPolicyImpl";
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(EnoughSlotsAbUpdatesPolicyImpl);
+};
+
+} // namespace chromeos_update_manager
+
+#endif // UPDATE_ENGINE_UPDATE_MANAGER_ENOUGH_SLOTS_AB_UPDATES_POLICY_IMPL_H_
diff --git a/update_manager/evaluation_context.cc b/update_manager/evaluation_context.cc
index 98238f2..b6c7b91 100644
--- a/update_manager/evaluation_context.cc
+++ b/update_manager/evaluation_context.cc
@@ -24,7 +24,6 @@
#include <base/bind.h>
#include <base/json/json_writer.h>
#include <base/location.h>
-#include <base/memory/ptr_util.h>
#include <base/strings/string_util.h>
#include <base/values.h>
@@ -229,7 +228,7 @@
}
string EvaluationContext::DumpContext() const {
- auto variables = base::MakeUnique<base::DictionaryValue>();
+ auto variables = std::make_unique<base::DictionaryValue>();
for (auto& it : value_cache_) {
variables->SetString(it.first->GetName(), it.second.ToString());
}
diff --git a/update_manager/fake_updater_provider.h b/update_manager/fake_updater_provider.h
index 44389f4..7295765 100644
--- a/update_manager/fake_updater_provider.h
+++ b/update_manager/fake_updater_provider.h
@@ -41,19 +41,15 @@
return &var_update_completed_time_;
}
- FakeVariable<double>* var_progress() override {
- return &var_progress_;
- }
+ FakeVariable<double>* var_progress() override { return &var_progress_; }
- FakeVariable<Stage>* var_stage() override {
- return &var_stage_;
- }
+ FakeVariable<Stage>* var_stage() override { return &var_stage_; }
FakeVariable<std::string>* var_new_version() override {
return &var_new_version_;
}
- FakeVariable<int64_t>* var_payload_size() override {
+ FakeVariable<uint64_t>* var_payload_size() override {
return &var_payload_size_;
}
@@ -65,9 +61,7 @@
return &var_new_channel_;
}
- FakeVariable<bool>* var_p2p_enabled() override {
- return &var_p2p_enabled_;
- }
+ FakeVariable<bool>* var_p2p_enabled() override { return &var_p2p_enabled_; }
FakeVariable<bool>* var_cellular_enabled() override {
return &var_cellular_enabled_;
@@ -85,42 +79,35 @@
return &var_forced_update_requested_;
}
+ FakeVariable<UpdateRestrictions>* var_update_restrictions() override {
+ return &var_update_restrictions_;
+ }
+
private:
- FakeVariable<base::Time>
- var_updater_started_time_{ // NOLINT(whitespace/braces)
- "updater_started_time", kVariableModePoll};
- FakeVariable<base::Time> var_last_checked_time_{ // NOLINT(whitespace/braces)
- "last_checked_time", kVariableModePoll};
- FakeVariable<base::Time>
- var_update_completed_time_{ // NOLINT(whitespace/braces)
- "update_completed_time", kVariableModePoll};
- FakeVariable<double> var_progress_{ // NOLINT(whitespace/braces)
- "progress", kVariableModePoll};
- FakeVariable<Stage> var_stage_{ // NOLINT(whitespace/braces)
- "stage", kVariableModePoll};
- FakeVariable<std::string> var_new_version_{ // NOLINT(whitespace/braces)
- "new_version", kVariableModePoll};
- FakeVariable<int64_t> var_payload_size_{ // NOLINT(whitespace/braces)
- "payload_size", kVariableModePoll};
- FakeVariable<std::string> var_curr_channel_{ // NOLINT(whitespace/braces)
- "curr_channel", kVariableModePoll};
- FakeVariable<std::string> var_new_channel_{ // NOLINT(whitespace/braces)
- "new_channel", kVariableModePoll};
- FakeVariable<bool> var_p2p_enabled_{// NOLINT(whitespace/braces)
- "p2p_enabled",
- kVariableModeAsync};
- FakeVariable<bool> var_cellular_enabled_{// NOLINT(whitespace/braces)
- "cellular_enabled",
+ FakeVariable<base::Time> var_updater_started_time_{"updater_started_time",
+ kVariableModePoll};
+ FakeVariable<base::Time> var_last_checked_time_{"last_checked_time",
+ kVariableModePoll};
+ FakeVariable<base::Time> var_update_completed_time_{"update_completed_time",
+ kVariableModePoll};
+ FakeVariable<double> var_progress_{"progress", kVariableModePoll};
+ FakeVariable<Stage> var_stage_{"stage", kVariableModePoll};
+ FakeVariable<std::string> var_new_version_{"new_version", kVariableModePoll};
+ FakeVariable<uint64_t> var_payload_size_{"payload_size", kVariableModePoll};
+ FakeVariable<std::string> var_curr_channel_{"curr_channel",
+ kVariableModePoll};
+ FakeVariable<std::string> var_new_channel_{"new_channel", kVariableModePoll};
+ FakeVariable<bool> var_p2p_enabled_{"p2p_enabled", kVariableModeAsync};
+ FakeVariable<bool> var_cellular_enabled_{"cellular_enabled",
kVariableModeAsync};
- FakeVariable<unsigned int>
- var_consecutive_failed_update_checks_{ // NOLINT(whitespace/braces)
- "consecutive_failed_update_checks", kVariableModePoll};
- FakeVariable<unsigned int>
- var_server_dictated_poll_interval_{ // NOLINT(whitespace/braces)
- "server_dictated_poll_interval", kVariableModePoll};
- FakeVariable<UpdateRequestStatus>
- var_forced_update_requested_{ // NOLINT(whitespace/braces)
- "forced_update_requested", kVariableModeAsync};
+ FakeVariable<unsigned int> var_consecutive_failed_update_checks_{
+ "consecutive_failed_update_checks", kVariableModePoll};
+ FakeVariable<unsigned int> var_server_dictated_poll_interval_{
+ "server_dictated_poll_interval", kVariableModePoll};
+ FakeVariable<UpdateRequestStatus> var_forced_update_requested_{
+ "forced_update_requested", kVariableModeAsync};
+ FakeVariable<UpdateRestrictions> var_update_restrictions_{
+ "update_restrictions", kVariableModePoll};
DISALLOW_COPY_AND_ASSIGN(FakeUpdaterProvider);
};
diff --git a/update_manager/interactive_update_policy_impl.cc b/update_manager/interactive_update_policy_impl.cc
new file mode 100644
index 0000000..df7f17b
--- /dev/null
+++ b/update_manager/interactive_update_policy_impl.cc
@@ -0,0 +1,44 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/interactive_update_policy_impl.h"
+
+namespace chromeos_update_manager {
+
+// Check to see if an interactive update was requested.
+EvalStatus InteractiveUpdatePolicyImpl::UpdateCheckAllowed(
+ EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ UpdateCheckParams* result) const {
+ UpdaterProvider* const updater_provider = state->updater_provider();
+
+ // First, check to see if an interactive update was requested.
+ const UpdateRequestStatus* forced_update_requested_p =
+ ec->GetValue(updater_provider->var_forced_update_requested());
+ if (forced_update_requested_p != nullptr &&
+ *forced_update_requested_p != UpdateRequestStatus::kNone) {
+ result->is_interactive =
+ (*forced_update_requested_p == UpdateRequestStatus::kInteractive);
+ LOG(INFO) << "Forced update signaled ("
+ << (result->is_interactive ? "interactive" : "periodic")
+ << "), allowing update check.";
+ return EvalStatus::kSucceeded;
+ }
+ return EvalStatus::kContinue;
+}
+
+} // namespace chromeos_update_manager
diff --git a/update_manager/interactive_update_policy_impl.h b/update_manager/interactive_update_policy_impl.h
new file mode 100644
index 0000000..a431456
--- /dev/null
+++ b/update_manager/interactive_update_policy_impl.h
@@ -0,0 +1,49 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_INTERACTIVE_UPDATE_POLICY_IMPL_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_INTERACTIVE_UPDATE_POLICY_IMPL_H_
+
+#include <string>
+
+#include "update_engine/update_manager/policy_utils.h"
+
+namespace chromeos_update_manager {
+
+// Check to see if an interactive update was requested.
+class InteractiveUpdatePolicyImpl : public PolicyImplBase {
+ public:
+ InteractiveUpdatePolicyImpl() = default;
+ ~InteractiveUpdatePolicyImpl() override = default;
+
+ // Policy overrides.
+ EvalStatus UpdateCheckAllowed(EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ UpdateCheckParams* result) const override;
+
+ protected:
+ std::string PolicyName() const override {
+ return "InteractiveUpdatePolicyImpl";
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InteractiveUpdatePolicyImpl);
+};
+
+} // namespace chromeos_update_manager
+
+#endif // UPDATE_ENGINE_UPDATE_MANAGER_OFFICIAL_BUILD_CHECK_POLICY_IMPL_H_
diff --git a/update_manager/mock_policy.h b/update_manager/mock_policy.h
index 14470e9..8060bf8 100644
--- a/update_manager/mock_policy.h
+++ b/update_manager/mock_policy.h
@@ -36,6 +36,11 @@
testing::_))
.WillByDefault(testing::Invoke(
&default_policy_, &DefaultPolicy::UpdateCheckAllowed));
+ ON_CALL(*this,
+ UpdateCanBeApplied(
+ testing::_, testing::_, testing::_, testing::_, testing::_))
+ .WillByDefault(testing::Invoke(&default_policy_,
+ &DefaultPolicy::UpdateCanBeApplied));
ON_CALL(*this, UpdateCanStart(testing::_, testing::_, testing::_,
testing::_, testing::_))
.WillByDefault(testing::Invoke(
@@ -61,6 +66,13 @@
EvalStatus(EvaluationContext*, State*, std::string*,
UpdateCheckParams*));
+ MOCK_CONST_METHOD5(UpdateCanBeApplied,
+ EvalStatus(EvaluationContext*,
+ State*,
+ std::string*,
+ chromeos_update_engine::ErrorCode*,
+ chromeos_update_engine::InstallPlan*));
+
MOCK_CONST_METHOD5(UpdateCanStart,
EvalStatus(EvaluationContext*, State*, std::string*,
UpdateDownloadParams*, UpdateState));
diff --git a/update_manager/next_update_check_policy_impl.cc b/update_manager/next_update_check_policy_impl.cc
new file mode 100644
index 0000000..6f9748e
--- /dev/null
+++ b/update_manager/next_update_check_policy_impl.cc
@@ -0,0 +1,150 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/next_update_check_policy_impl.h"
+
+#include <algorithm>
+
+#include "update_engine/common/utils.h"
+
+using base::Time;
+using base::TimeDelta;
+using std::max;
+using std::string;
+
+namespace chromeos_update_manager {
+
+NextUpdateCheckTimePolicyImpl::NextUpdateCheckTimePolicyImpl(
+ const NextUpdateCheckPolicyConstants& constants)
+ : policy_constants_(constants) {}
+
+EvalStatus NextUpdateCheckTimePolicyImpl::UpdateCheckAllowed(
+ EvaluationContext* ec,
+ State* state,
+ string* error,
+ UpdateCheckParams* result) const {
+ // Ensure that periodic update checks are timed properly.
+ Time next_update_check;
+
+ if (NextUpdateCheckTime(
+ ec, state, error, &next_update_check, policy_constants_) !=
+ EvalStatus::kSucceeded) {
+ return EvalStatus::kFailed;
+ }
+ if (!ec->IsWallclockTimeGreaterThan(next_update_check)) {
+ LOG(INFO) << "Periodic check interval not satisfied, blocking until "
+ << chromeos_update_engine::utils::ToString(next_update_check);
+ return EvalStatus::kAskMeAgainLater;
+ }
+
+ return EvalStatus::kContinue;
+}
+
+EvalStatus NextUpdateCheckTimePolicyImpl::NextUpdateCheckTime(
+ EvaluationContext* ec,
+ State* state,
+ string* error,
+ Time* next_update_check,
+ const NextUpdateCheckPolicyConstants& constants) {
+ UpdaterProvider* const updater_provider = state->updater_provider();
+
+ // Don't check for updates too often. We limit the update checks to once every
+ // some interval. The interval is kTimeoutInitialInterval the first time and
+ // kTimeoutPeriodicInterval for the subsequent update checks. If the update
+ // check fails, we increase the interval between the update checks
+ // exponentially until kTimeoutMaxBackoffInterval. Finally, to avoid having
+ // many chromebooks running update checks at the exact same time, we add some
+ // fuzz to the interval.
+ const Time* updater_started_time =
+ ec->GetValue(updater_provider->var_updater_started_time());
+ POLICY_CHECK_VALUE_AND_FAIL(updater_started_time, error);
+
+ const Time* last_checked_time =
+ ec->GetValue(updater_provider->var_last_checked_time());
+
+ const auto* seed = ec->GetValue(state->random_provider()->var_seed());
+ POLICY_CHECK_VALUE_AND_FAIL(seed, error);
+
+ PRNG prng(*seed);
+
+ // If this is the first attempt, compute and return an initial value.
+ if (last_checked_time == nullptr ||
+ *last_checked_time < *updater_started_time) {
+ *next_update_check = *updater_started_time +
+ FuzzedInterval(&prng,
+ constants.timeout_initial_interval,
+ constants.timeout_regular_fuzz);
+ return EvalStatus::kSucceeded;
+ }
+
+ // Check whether the server is enforcing a poll interval; if not, this value
+ // will be zero.
+ const unsigned int* server_dictated_poll_interval =
+ ec->GetValue(updater_provider->var_server_dictated_poll_interval());
+ POLICY_CHECK_VALUE_AND_FAIL(server_dictated_poll_interval, error);
+
+ int interval = *server_dictated_poll_interval;
+ int fuzz = 0;
+
+ // If no poll interval was dictated by server compute a back-off period,
+ // starting from a predetermined base periodic interval and increasing
+ // exponentially by the number of consecutive failed attempts.
+ if (interval == 0) {
+ const unsigned int* consecutive_failed_update_checks =
+ ec->GetValue(updater_provider->var_consecutive_failed_update_checks());
+ POLICY_CHECK_VALUE_AND_FAIL(consecutive_failed_update_checks, error);
+
+ interval = constants.timeout_periodic_interval;
+ unsigned int num_failures = *consecutive_failed_update_checks;
+ while (interval < constants.timeout_max_backoff_interval && num_failures) {
+ interval *= 2;
+ num_failures--;
+ }
+ }
+
+ // We cannot back off longer than the predetermined maximum interval.
+ if (interval > constants.timeout_max_backoff_interval)
+ interval = constants.timeout_max_backoff_interval;
+
+ // We cannot back off shorter than the predetermined periodic interval. Also,
+ // in this case set the fuzz to a predetermined regular value.
+ if (interval <= constants.timeout_periodic_interval) {
+ interval = constants.timeout_periodic_interval;
+ fuzz = constants.timeout_regular_fuzz;
+ }
+
+ // If not otherwise determined, defer to a fuzz of +/-(interval / 2).
+ if (fuzz == 0)
+ fuzz = interval;
+
+ *next_update_check =
+ *last_checked_time + FuzzedInterval(&prng, interval, fuzz);
+ return EvalStatus::kSucceeded;
+}
+
+TimeDelta NextUpdateCheckTimePolicyImpl::FuzzedInterval(PRNG* prng,
+ int interval,
+ int fuzz) {
+ DCHECK_GE(interval, 0);
+ DCHECK_GE(fuzz, 0);
+ int half_fuzz = fuzz / 2;
+ // This guarantees the output interval is non negative.
+ int interval_min = max(interval - half_fuzz, 0);
+ int interval_max = interval + half_fuzz;
+ return TimeDelta::FromSeconds(prng->RandMinMax(interval_min, interval_max));
+}
+
+} // namespace chromeos_update_manager
diff --git a/update_manager/next_update_check_policy_impl.h b/update_manager/next_update_check_policy_impl.h
new file mode 100644
index 0000000..291ea0f
--- /dev/null
+++ b/update_manager/next_update_check_policy_impl.h
@@ -0,0 +1,98 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_NEXT_UPDATE_CHECK_POLICY_IMPL_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_NEXT_UPDATE_CHECK_POLICY_IMPL_H_
+
+#include <string>
+
+#include <base/time/time.h>
+
+#include "update_engine/update_manager/policy_utils.h"
+#include "update_engine/update_manager/prng.h"
+
+namespace chromeos_update_manager {
+
+// Constants that are provided to the policy implementation.
+struct NextUpdateCheckPolicyConstants {
+ // Default update check timeout interval/fuzz values used to compute the
+ // NextUpdateCheckTime(), in seconds. Actual fuzz is within +/- half of the
+ // indicated value.
+ int timeout_initial_interval;
+ int timeout_periodic_interval;
+ int timeout_max_backoff_interval;
+ int timeout_regular_fuzz;
+
+ // Maximum update attempt backoff interval and fuzz.
+ int attempt_backoff_max_interval_in_days;
+ int attempt_backoff_fuzz_in_hours;
+};
+
+// Ensure that periodic update checks are timed properly.
+class NextUpdateCheckTimePolicyImpl : public PolicyImplBase {
+ public:
+ explicit NextUpdateCheckTimePolicyImpl(
+ const NextUpdateCheckPolicyConstants& constants);
+
+ // Policy overrides.
+ EvalStatus UpdateCheckAllowed(EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ UpdateCheckParams* result) const override;
+
+ // A private policy implementation returning the wallclock timestamp when
+ // the next update check should happen.
+ // TODO(garnold) We should probably change that to infer a monotonic
+ // timestamp, which will make the update check intervals more resilient to
+ // clock skews. Might require switching some of the variables exported by the
+ // UpdaterProvider to report monotonic time, as well.
+ //
+ // NOTE:
+ // Exposed as a public static so that it's logic can be used to test
+ // Policy implementations that utilize this fragment for their
+ // timing, without needing to list them all with FRIEND_TEST (so that
+ // those Policy implementations can exist without modifying this
+ // class's definition.
+ //
+ // The output value from this method (|next_update_check|), isn't
+ // available via the UpdateCheckParams |result| of the Policy
+ // method, and so this timing logic needs to be otherwise exposed.
+ static EvalStatus NextUpdateCheckTime(
+ EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ base::Time* next_update_check,
+ const NextUpdateCheckPolicyConstants& constants);
+
+ // Returns a TimeDelta based on the provided |interval| seconds +/- half
+ // |fuzz| seconds. The return value is guaranteed to be a non-negative
+ // TimeDelta.
+ static base::TimeDelta FuzzedInterval(PRNG* prng, int interval, int fuzz);
+
+ protected:
+ std::string PolicyName() const override {
+ return "NextUpdateCheckTimePolicyImpl";
+ }
+
+ private:
+ const NextUpdateCheckPolicyConstants policy_constants_;
+
+ DISALLOW_COPY_AND_ASSIGN(NextUpdateCheckTimePolicyImpl);
+};
+
+} // namespace chromeos_update_manager
+
+#endif // UPDATE_ENGINE_UPDATE_MANAGER_NEXT_UPDATE_CHECK_POLICY_IMPL_H_
diff --git a/update_manager/next_update_check_policy_impl_unittest.cc b/update_manager/next_update_check_policy_impl_unittest.cc
new file mode 100644
index 0000000..58aff66
--- /dev/null
+++ b/update_manager/next_update_check_policy_impl_unittest.cc
@@ -0,0 +1,163 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/next_update_check_policy_impl.h"
+
+#include <memory>
+
+#include "update_engine/update_manager/policy_test_utils.h"
+
+using base::Time;
+using base::TimeDelta;
+using std::string;
+
+namespace chromeos_update_manager {
+
+const NextUpdateCheckPolicyConstants policy_test_constants = {
+ // these are specifically NOT the values used by real Policy
+ // implementations.
+ .timeout_initial_interval = 3 * 60,
+ .timeout_periodic_interval = 2 * 60 * 60,
+ .timeout_max_backoff_interval = 8 * 60 * 60,
+ .timeout_regular_fuzz = 5 * 60,
+ .attempt_backoff_max_interval_in_days = 12,
+ .attempt_backoff_fuzz_in_hours = 10,
+};
+
+class UmNextUpdateCheckTimePolicyImplTest : public UmPolicyTestBase {
+ protected:
+ UmNextUpdateCheckTimePolicyImplTest() {
+ policy_ =
+ std::make_unique<NextUpdateCheckTimePolicyImpl>(policy_test_constants);
+ }
+};
+
+TEST_F(UmNextUpdateCheckTimePolicyImplTest,
+ FirstCheckIsAtMostInitialIntervalAfterStart) {
+ Time next_update_check;
+
+ // Set the last update time so it'll appear as if this is a first update check
+ // in the lifetime of the current updater.
+ fake_state_.updater_provider()->var_last_checked_time()->reset(
+ new Time(fake_clock_.GetWallclockTime() - TimeDelta::FromMinutes(10)));
+
+ CallMethodWithContext(&NextUpdateCheckTimePolicyImpl::NextUpdateCheckTime,
+ &next_update_check,
+ policy_test_constants);
+
+ EXPECT_LE(fake_clock_.GetWallclockTime(), next_update_check);
+ EXPECT_GE(fake_clock_.GetWallclockTime() +
+ TimeDelta::FromSeconds(
+ policy_test_constants.timeout_initial_interval +
+ policy_test_constants.timeout_regular_fuzz / 2),
+ next_update_check);
+}
+
+TEST_F(UmNextUpdateCheckTimePolicyImplTest, RecurringCheckBaseIntervalAndFuzz) {
+ // Ensure that we're using the correct interval (kPeriodicInterval) and fuzz
+ // (ktimeout_regular_fuzz) as base values for period updates.
+ Time next_update_check;
+
+ CallMethodWithContext(&NextUpdateCheckTimePolicyImpl::NextUpdateCheckTime,
+ &next_update_check,
+ policy_test_constants);
+
+ EXPECT_LE(fake_clock_.GetWallclockTime() +
+ TimeDelta::FromSeconds(
+ policy_test_constants.timeout_periodic_interval -
+ policy_test_constants.timeout_regular_fuzz / 2),
+ next_update_check);
+ EXPECT_GE(fake_clock_.GetWallclockTime() +
+ TimeDelta::FromSeconds(
+ policy_test_constants.timeout_periodic_interval +
+ policy_test_constants.timeout_regular_fuzz / 2),
+ next_update_check);
+}
+
+TEST_F(UmNextUpdateCheckTimePolicyImplTest,
+ RecurringCheckBackoffIntervalAndFuzz) {
+ // Ensure that we're properly backing off and fuzzing in the presence of
+ // failed updates attempts.
+ Time next_update_check;
+
+ fake_state_.updater_provider()->var_consecutive_failed_update_checks()->reset(
+ new unsigned int{2});
+
+ ExpectStatus(EvalStatus::kSucceeded,
+ NextUpdateCheckTimePolicyImpl::NextUpdateCheckTime,
+ &next_update_check,
+ policy_test_constants);
+
+ int expected_interval = policy_test_constants.timeout_periodic_interval * 4;
+ EXPECT_LE(
+ fake_clock_.GetWallclockTime() +
+ TimeDelta::FromSeconds(expected_interval - expected_interval / 2),
+ next_update_check);
+ EXPECT_GE(
+ fake_clock_.GetWallclockTime() +
+ TimeDelta::FromSeconds(expected_interval + expected_interval / 2),
+ next_update_check);
+}
+
+TEST_F(UmNextUpdateCheckTimePolicyImplTest,
+ RecurringCheckServerDictatedPollInterval) {
+ // Policy honors the server provided check poll interval.
+ Time next_update_check;
+
+ const auto kInterval = policy_test_constants.timeout_periodic_interval * 4;
+ fake_state_.updater_provider()->var_server_dictated_poll_interval()->reset(
+ new unsigned int(kInterval)); // NOLINT(readability/casting)
+ // We should not be backing off in this case.
+ fake_state_.updater_provider()->var_consecutive_failed_update_checks()->reset(
+ new unsigned int(2)); // NOLINT(readability/casting)
+
+ ExpectStatus(EvalStatus::kSucceeded,
+ &NextUpdateCheckTimePolicyImpl::NextUpdateCheckTime,
+ &next_update_check,
+ policy_test_constants);
+
+ EXPECT_LE(fake_clock_.GetWallclockTime() +
+ TimeDelta::FromSeconds(kInterval - kInterval / 2),
+ next_update_check);
+ EXPECT_GE(fake_clock_.GetWallclockTime() +
+ TimeDelta::FromSeconds(kInterval + kInterval / 2),
+ next_update_check);
+}
+
+TEST_F(UmNextUpdateCheckTimePolicyImplTest, ExponentialBackoffIsCapped) {
+ Time next_update_check;
+
+ fake_state_.updater_provider()->var_consecutive_failed_update_checks()->reset(
+ new unsigned int(100)); // NOLINT(readability/casting)
+
+ ExpectStatus(EvalStatus::kSucceeded,
+ &NextUpdateCheckTimePolicyImpl::NextUpdateCheckTime,
+ &next_update_check,
+ policy_test_constants);
+
+ EXPECT_LE(fake_clock_.GetWallclockTime() +
+ TimeDelta::FromSeconds(
+ policy_test_constants.timeout_max_backoff_interval -
+ policy_test_constants.timeout_max_backoff_interval / 2),
+ next_update_check);
+ EXPECT_GE(fake_clock_.GetWallclockTime() +
+ TimeDelta::FromSeconds(
+ policy_test_constants.timeout_max_backoff_interval +
+ policy_test_constants.timeout_max_backoff_interval / 2),
+ next_update_check);
+}
+
+} // namespace chromeos_update_manager
diff --git a/update_manager/official_build_check_policy_impl.cc b/update_manager/official_build_check_policy_impl.cc
new file mode 100644
index 0000000..096f7bf
--- /dev/null
+++ b/update_manager/official_build_check_policy_impl.cc
@@ -0,0 +1,36 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/official_build_check_policy_impl.h"
+
+namespace chromeos_update_manager {
+
+// Unofficial builds should not perform periodic update checks.
+EvalStatus OnlyUpdateOfficialBuildsPolicyImpl::UpdateCheckAllowed(
+ EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ UpdateCheckParams* result) const {
+ const bool* is_official_build_p =
+ ec->GetValue(state->system_provider()->var_is_official_build());
+ if (is_official_build_p != nullptr && !(*is_official_build_p)) {
+ LOG(INFO) << "Unofficial build, blocking periodic update checks.";
+ return EvalStatus::kAskMeAgainLater;
+ }
+ return EvalStatus::kContinue;
+}
+
+} // namespace chromeos_update_manager
diff --git a/update_manager/official_build_check_policy_impl.h b/update_manager/official_build_check_policy_impl.h
new file mode 100644
index 0000000..6257209
--- /dev/null
+++ b/update_manager/official_build_check_policy_impl.h
@@ -0,0 +1,49 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_OFFICIAL_BUILD_CHECK_POLICY_IMPL_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_OFFICIAL_BUILD_CHECK_POLICY_IMPL_H_
+
+#include <string>
+
+#include "update_engine/update_manager/policy_utils.h"
+
+namespace chromeos_update_manager {
+
+// Unofficial builds should not perform periodic update checks.
+class OnlyUpdateOfficialBuildsPolicyImpl : public PolicyImplBase {
+ public:
+ OnlyUpdateOfficialBuildsPolicyImpl() = default;
+ ~OnlyUpdateOfficialBuildsPolicyImpl() override = default;
+
+ // Policy overrides.
+ EvalStatus UpdateCheckAllowed(EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ UpdateCheckParams* result) const override;
+
+ protected:
+ std::string PolicyName() const override {
+ return "OnlyUpdateOfficialBuildsPolicyImpl";
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(OnlyUpdateOfficialBuildsPolicyImpl);
+};
+
+} // namespace chromeos_update_manager
+
+#endif // UPDATE_ENGINE_UPDATE_MANAGER_OFFICIAL_BUILD_CHECK_POLICY_IMPL_H_
diff --git a/update_manager/policy.cc b/update_manager/policy.cc
index 151c225..5f79a68 100644
--- a/update_manager/policy.cc
+++ b/update_manager/policy.cc
@@ -30,6 +30,8 @@
return "kSucceeded";
case EvalStatus::kAskMeAgainLater:
return "kAskMeAgainLater";
+ case EvalStatus::kContinue:
+ return "kContinue";
}
return "Invalid";
}
diff --git a/update_manager/policy.h b/update_manager/policy.h
index fae1494..b60c4da 100644
--- a/update_manager/policy.h
+++ b/update_manager/policy.h
@@ -22,6 +22,7 @@
#include <vector>
#include "update_engine/common/error_code.h"
+#include "update_engine/payload_consumer/install_plan.h"
#include "update_engine/update_manager/evaluation_context.h"
#include "update_engine/update_manager/state.h"
@@ -32,6 +33,7 @@
kFailed,
kSucceeded,
kAskMeAgainLater,
+ kContinue,
};
std::string ToString(EvalStatus status);
@@ -204,6 +206,9 @@
if (reinterpret_cast<typeof(&Policy::UpdateCheckAllowed)>(
policy_method) == &Policy::UpdateCheckAllowed)
return class_name + "UpdateCheckAllowed";
+ if (reinterpret_cast<typeof(&Policy::UpdateCanBeApplied)>(policy_method) ==
+ &Policy::UpdateCanBeApplied)
+ return class_name + "UpdateCanBeApplied";
if (reinterpret_cast<typeof(&Policy::UpdateCanStart)>(
policy_method) == &Policy::UpdateCanStart)
return class_name + "UpdateCanStart";
@@ -235,6 +240,17 @@
EvaluationContext* ec, State* state, std::string* error,
UpdateCheckParams* result) const = 0;
+ // UpdateCanBeApplied returns whether the given |install_plan| can be acted
+ // on at this time. The reason for not applying is returned in |result|.
+ // The Policy may modify the passed-in |install_plan|, based on the
+ // implementation in the Policy and values provided by the EvaluationContext.
+ virtual EvalStatus UpdateCanBeApplied(
+ EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ chromeos_update_engine::ErrorCode* result,
+ chromeos_update_engine::InstallPlan* install_plan) const = 0;
+
// Returns EvalStatus::kSucceeded if either an update can start being
// processed, or the attempt needs to be aborted. In cases where the update
// needs to wait for some condition to be satisfied, but none of the values
diff --git a/update_manager/policy_test_utils.cc b/update_manager/policy_test_utils.cc
new file mode 100644
index 0000000..fbfcb82
--- /dev/null
+++ b/update_manager/policy_test_utils.cc
@@ -0,0 +1,110 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_manager/policy_test_utils.h"
+
+#include <memory>
+#include <tuple>
+#include <vector>
+
+#include "update_engine/update_manager/next_update_check_policy_impl.h"
+
+using base::Time;
+using base::TimeDelta;
+using chromeos_update_engine::ErrorCode;
+using std::string;
+using std::tuple;
+using std::vector;
+
+namespace chromeos_update_manager {
+
+void UmPolicyTestBase::SetUp() {
+ loop_.SetAsCurrent();
+ SetUpDefaultClock();
+ eval_ctx_ = new EvaluationContext(&fake_clock_, TimeDelta::FromSeconds(5));
+ SetUpDefaultState();
+}
+
+void UmPolicyTestBase::TearDown() {
+ EXPECT_FALSE(loop_.PendingTasks());
+}
+
+// Sets the clock to fixed values.
+void UmPolicyTestBase::SetUpDefaultClock() {
+ fake_clock_.SetMonotonicTime(Time::FromInternalValue(12345678L));
+ fake_clock_.SetWallclockTime(Time::FromInternalValue(12345678901234L));
+}
+
+void UmPolicyTestBase::SetUpDefaultState() {
+ fake_state_.updater_provider()->var_updater_started_time()->reset(
+ new Time(fake_clock_.GetWallclockTime()));
+ fake_state_.updater_provider()->var_last_checked_time()->reset(
+ new Time(fake_clock_.GetWallclockTime()));
+ fake_state_.updater_provider()->var_consecutive_failed_update_checks()->reset(
+ new unsigned int(0)); // NOLINT(readability/casting)
+ fake_state_.updater_provider()->var_server_dictated_poll_interval()->reset(
+ new unsigned int(0)); // NOLINT(readability/casting)
+ fake_state_.updater_provider()->var_forced_update_requested()->reset(
+ new UpdateRequestStatus{UpdateRequestStatus::kNone});
+
+ // Chosen by fair dice roll. Guaranteed to be random.
+ fake_state_.random_provider()->var_seed()->reset(new uint64_t(4));
+}
+
+// Returns a default UpdateState structure:
+UpdateState UmPolicyTestBase::GetDefaultUpdateState(
+ TimeDelta first_seen_period) {
+ Time first_seen_time = fake_clock_.GetWallclockTime() - first_seen_period;
+ UpdateState update_state = UpdateState();
+
+ // This is a non-interactive check returning a delta payload, seen for the
+ // first time (|first_seen_period| ago). Clearly, there were no failed
+ // attempts so far.
+ update_state.is_interactive = false;
+ update_state.is_delta_payload = false;
+ update_state.first_seen = first_seen_time;
+ update_state.num_checks = 1;
+ update_state.num_failures = 0;
+ update_state.failures_last_updated = Time(); // Needs to be zero.
+ // There's a single HTTP download URL with a maximum of 10 retries.
+ update_state.download_urls = vector<string>{"http://fake/url/"};
+ update_state.download_errors_max = 10;
+ // Download was never attempted.
+ update_state.last_download_url_idx = -1;
+ update_state.last_download_url_num_errors = 0;
+ // There were no download errors.
+ update_state.download_errors = vector<tuple<int, ErrorCode, Time>>();
+ // P2P is not disabled by Omaha.
+ update_state.p2p_downloading_disabled = false;
+ update_state.p2p_sharing_disabled = false;
+ // P2P was not attempted.
+ update_state.p2p_num_attempts = 0;
+ update_state.p2p_first_attempted = Time();
+ // No active backoff period, backoff is not disabled by Omaha.
+ update_state.backoff_expiry = Time();
+ update_state.is_backoff_disabled = false;
+ // There is no active scattering wait period (max 7 days allowed) nor check
+ // threshold (none allowed).
+ update_state.scatter_wait_period = TimeDelta();
+ update_state.scatter_check_threshold = 0;
+ update_state.scatter_wait_period_max = TimeDelta::FromDays(7);
+ update_state.scatter_check_threshold_min = 0;
+ update_state.scatter_check_threshold_max = 0;
+
+ return update_state;
+}
+
+} // namespace chromeos_update_manager
diff --git a/update_manager/policy_test_utils.h b/update_manager/policy_test_utils.h
new file mode 100644
index 0000000..5b93f7b
--- /dev/null
+++ b/update_manager/policy_test_utils.h
@@ -0,0 +1,99 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_UPDATE_MANAGER_POLICY_TEST_UTILS_H_
+#define UPDATE_ENGINE_UPDATE_MANAGER_POLICY_TEST_UTILS_H_
+
+#include <memory>
+#include <string>
+
+#include <base/time/time.h>
+#include <brillo/message_loops/fake_message_loop.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/fake_clock.h"
+#include "update_engine/update_manager/evaluation_context.h"
+#include "update_engine/update_manager/fake_state.h"
+#include "update_engine/update_manager/policy_utils.h"
+
+namespace chromeos_update_manager {
+
+class UmPolicyTestBase : public ::testing::Test {
+ protected:
+ UmPolicyTestBase() = default;
+
+ void SetUp() override;
+
+ void TearDown() override;
+
+ // Sets the clock to fixed values.
+ virtual void SetUpDefaultClock();
+
+ // Sets up the default state in fake_state_. override to add Policy-specific
+ // items, but only after calling this class's implementation.
+ virtual void SetUpDefaultState();
+
+ // Returns a default UpdateState structure:
+ virtual UpdateState GetDefaultUpdateState(base::TimeDelta first_seen_period);
+
+ // Runs the passed |method| after resetting the EvaluationContext and expects
+ // it to return the |expected| return value.
+ template <typename T, typename R, typename... Args>
+ void ExpectStatus(EvalStatus expected, T method, R* result, Args... args) {
+ std::string error = "<None>";
+ eval_ctx_->ResetEvaluation();
+ EXPECT_EQ(expected,
+ (*method)(eval_ctx_.get(), &fake_state_, &error, result, args...))
+ << "Returned error: " << error
+ << "\nEvaluation context: " << eval_ctx_->DumpContext();
+ }
+
+ // Runs the passed |method| after resetting the EvaluationContext, in order
+ // to use the method to get a value for other testing (doesn't validate the
+ // return value, just returns it).
+ template <typename T, typename R, typename... Args>
+ EvalStatus CallMethodWithContext(T method, R* result, Args... args) {
+ std::string error = "<None>";
+ eval_ctx_->ResetEvaluation();
+ return (*method)(eval_ctx_.get(), &fake_state_, &error, result, args...);
+ }
+
+ // Runs the passed |policy_method| on the framework policy and expects it to
+ // return the |expected| return value.
+ template <typename T, typename R, typename... Args>
+ void ExpectPolicyStatus(EvalStatus expected,
+ T policy_method,
+ R* result,
+ Args... args) {
+ std::string error = "<None>";
+ eval_ctx_->ResetEvaluation();
+ EXPECT_EQ(expected,
+ (policy_.get()->*policy_method)(
+ eval_ctx_.get(), &fake_state_, &error, result, args...))
+ << "Returned error: " << error
+ << "\nEvaluation context: " << eval_ctx_->DumpContext();
+ }
+
+ brillo::FakeMessageLoop loop_{nullptr};
+ chromeos_update_engine::FakeClock fake_clock_;
+ FakeState fake_state_;
+ scoped_refptr<EvaluationContext> eval_ctx_;
+ std::unique_ptr<Policy> policy_;
+};
+
+} // namespace chromeos_update_manager
+
+#endif // UPDATE_ENGINE_UPDATE_MANAGER_POLICY_TEST_UTILS_H_
diff --git a/update_manager/policy_utils.h b/update_manager/policy_utils.h
index 960987e..eaf9ee9 100644
--- a/update_manager/policy_utils.h
+++ b/update_manager/policy_utils.h
@@ -17,6 +17,9 @@
#ifndef UPDATE_ENGINE_UPDATE_MANAGER_POLICY_UTILS_H_
#define UPDATE_ENGINE_UPDATE_MANAGER_POLICY_UTILS_H_
+#include <string>
+#include <vector>
+
#include "update_engine/update_manager/policy.h"
// Checks that the passed pointer value is not null, returning kFailed on the
@@ -35,4 +38,84 @@
} \
} while (false)
+namespace chromeos_update_manager {
+
+// Call the passed-in Policy method on a series of Policy implementations, until
+// one of them renders a decision by returning a value other than
+// |EvalStatus::kContinue|.
+template <typename T, typename R, typename... Args>
+EvalStatus ConsultPolicies(const std::vector<Policy const*> policies,
+ T policy_method,
+ EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ R* result,
+ Args... args) {
+ for (auto policy : policies) {
+ EvalStatus status =
+ (policy->*policy_method)(ec, state, error, result, args...);
+ if (status != EvalStatus::kContinue) {
+ LOG(INFO) << "decision by " << policy->PolicyRequestName(policy_method);
+ return status;
+ }
+ }
+ return EvalStatus::kContinue;
+}
+
+// Base class implementation that returns |EvalStatus::kContinue| for all
+// decisions, to be used as a base-class for various Policy facets that only
+// pertain to certain situations. This might be better folded into Policy
+// instead of using pure-virtual methods on that class.
+class PolicyImplBase : public Policy {
+ public:
+ // Policy overrides.
+ EvalStatus UpdateCheckAllowed(EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ UpdateCheckParams* result) const override {
+ return EvalStatus::kContinue;
+ };
+
+ EvalStatus UpdateCanBeApplied(
+ EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ chromeos_update_engine::ErrorCode* result,
+ chromeos_update_engine::InstallPlan* install_plan) const override {
+ return EvalStatus::kContinue;
+ };
+
+ EvalStatus UpdateCanStart(EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ UpdateDownloadParams* result,
+ UpdateState update_state) const override {
+ return EvalStatus::kContinue;
+ };
+
+ EvalStatus UpdateDownloadAllowed(EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ bool* result) const override {
+ return EvalStatus::kContinue;
+ };
+
+ EvalStatus P2PEnabled(EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ bool* result) const override {
+ return EvalStatus::kContinue;
+ };
+
+ EvalStatus P2PEnabledChanged(EvaluationContext* ec,
+ State* state,
+ std::string* error,
+ bool* result,
+ bool prev_result) const override {
+ return EvalStatus::kContinue;
+ };
+};
+
+} // namespace chromeos_update_manager
+
#endif // UPDATE_ENGINE_UPDATE_MANAGER_POLICY_UTILS_H_
diff --git a/update_manager/real_device_policy_provider_unittest.cc b/update_manager/real_device_policy_provider_unittest.cc
index 71c95bb..167cbd9 100644
--- a/update_manager/real_device_policy_provider_unittest.cc
+++ b/update_manager/real_device_policy_provider_unittest.cc
@@ -18,7 +18,7 @@
#include <memory>
-#include <brillo/make_unique_ptr.h>
+#include <base/memory/ptr_util.h>
#include <brillo/message_loops/fake_message_loop.h>
#include <brillo/message_loops/message_loop.h>
#include <brillo/message_loops/message_loop_utils.h>
@@ -63,8 +63,7 @@
auto session_manager_proxy_mock =
new org::chromium::SessionManagerInterfaceProxyMock();
provider_.reset(new RealDevicePolicyProvider(
- brillo::make_unique_ptr(session_manager_proxy_mock),
- &mock_policy_provider_));
+ base::WrapUnique(session_manager_proxy_mock), &mock_policy_provider_));
#else
provider_.reset(new RealDevicePolicyProvider(&mock_policy_provider_));
#endif // USE_DBUS
diff --git a/update_manager/real_shill_provider_unittest.cc b/update_manager/real_shill_provider_unittest.cc
index e821dc7..6506923 100644
--- a/update_manager/real_shill_provider_unittest.cc
+++ b/update_manager/real_shill_provider_unittest.cc
@@ -18,8 +18,8 @@
#include <memory>
#include <utility>
+#include <base/memory/ptr_util.h>
#include <base/time/time.h>
-#include <brillo/make_unique_ptr.h>
#include <brillo/message_loops/fake_message_loop.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
@@ -94,7 +94,9 @@
now_exp.minute = 5;
now_exp.second = 33;
now_exp.millisecond = 675;
- return Time::FromLocalExploded(now_exp);
+ Time time;
+ ignore_result(Time::FromLocalExploded(now_exp, &time));
+ return time;
}
Time ConnChangedTime() {
@@ -261,9 +263,9 @@
EXPECT_CALL(*service_proxy_mock, GetProperties(_, _, _))
.WillOnce(DoAll(SetArgPointee<0>(reply_dict), Return(true)));
- fake_shill_proxy_->SetServiceForPath(
- dbus::ObjectPath(service_path),
- brillo::make_unique_ptr(service_proxy_mock));
+ fake_shill_proxy_->SetServiceForPath(dbus::ObjectPath(service_path),
+ base::WrapUnique(service_proxy_mock));
+
return service_proxy_mock;
}
diff --git a/update_manager/real_system_provider.cc b/update_manager/real_system_provider.cc
index 9b968ca..fdf7e86 100644
--- a/update_manager/real_system_provider.cc
+++ b/update_manager/real_system_provider.cc
@@ -20,9 +20,9 @@
#include <base/callback.h>
#include <base/logging.h>
#include <base/time/time.h>
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
#include <libcros/dbus-proxies.h>
-#endif
+#endif // USE_CHROME_KIOSK_APP
#include "update_engine/common/utils.h"
#include "update_engine/update_manager/generic_variables.h"
@@ -124,7 +124,7 @@
bool RealSystemProvider::GetKioskAppRequiredPlatformVersion(
string* required_platform_version) {
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
brillo::ErrorPtr error;
if (!libcros_proxy_->GetKioskAppRequiredPlatformVersion(
required_platform_version, &error)) {
@@ -132,7 +132,7 @@
required_platform_version->clear();
return false;
}
-#endif
+#endif // USE_CHROME_KIOSK_APP
return true;
}
diff --git a/update_manager/real_system_provider.h b/update_manager/real_system_provider.h
index a62e1ae..80a8615 100644
--- a/update_manager/real_system_provider.h
+++ b/update_manager/real_system_provider.h
@@ -40,8 +40,12 @@
chromeos_update_engine::BootControlInterface* boot_control,
org::chromium::LibCrosServiceInterfaceProxyInterface* libcros_proxy)
: hardware_(hardware),
+#if USE_CHROME_KIOSK_APP
boot_control_(boot_control),
libcros_proxy_(libcros_proxy) {}
+#else
+ boot_control_(boot_control) {}
+#endif // USE_CHROME_KIOSK_APP
// Initializes the provider and returns whether it succeeded.
bool Init();
@@ -78,8 +82,9 @@
chromeos_update_engine::HardwareInterface* const hardware_;
chromeos_update_engine::BootControlInterface* const boot_control_;
- org::chromium::LibCrosServiceInterfaceProxyInterface* const libcros_proxy_
- ALLOW_UNUSED_TYPE;
+#if USE_CHROME_KIOSK_APP
+ org::chromium::LibCrosServiceInterfaceProxyInterface* const libcros_proxy_;
+#endif // USE_CHROME_KIOSK_APP
DISALLOW_COPY_AND_ASSIGN(RealSystemProvider);
};
diff --git a/update_manager/real_system_provider_unittest.cc b/update_manager/real_system_provider_unittest.cc
index 821a6cc..103a35f 100644
--- a/update_manager/real_system_provider_unittest.cc
+++ b/update_manager/real_system_provider_unittest.cc
@@ -19,37 +19,36 @@
#include <memory>
#include <base/time/time.h>
-#include <brillo/make_unique_ptr.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "update_engine/common/fake_boot_control.h"
#include "update_engine/common/fake_hardware.h"
#include "update_engine/update_manager/umtest_utils.h"
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
#include "libcros/dbus-proxies.h"
#include "libcros/dbus-proxy-mocks.h"
using org::chromium::LibCrosServiceInterfaceProxyMock;
-#endif // USE_LIBCROS
+#endif // USE_CHROME_KIOSK_APP
using std::unique_ptr;
using testing::_;
using testing::DoAll;
using testing::Return;
using testing::SetArgPointee;
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
namespace {
const char kRequiredPlatformVersion[] ="1234.0.0";
} // namespace
-#endif // USE_LIBCROS
+#endif // USE_CHROME_KIOSK_APP
namespace chromeos_update_manager {
class UmRealSystemProviderTest : public ::testing::Test {
protected:
void SetUp() override {
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
libcros_proxy_mock_.reset(new LibCrosServiceInterfaceProxyMock());
ON_CALL(*libcros_proxy_mock_,
GetKioskAppRequiredPlatformVersion(_, _, _))
@@ -61,7 +60,7 @@
#else
provider_.reset(
new RealSystemProvider(&fake_hardware_, &fake_boot_control_, nullptr));
-#endif // USE_LIBCROS
+#endif // USE_CHROME_KIOSK_APP
EXPECT_TRUE(provider_->Init());
}
@@ -69,9 +68,9 @@
chromeos_update_engine::FakeBootControl fake_boot_control_;
unique_ptr<RealSystemProvider> provider_;
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
unique_ptr<LibCrosServiceInterfaceProxyMock> libcros_proxy_mock_;
-#endif // USE_LIBCROS
+#endif // USE_CHROME_KIOSK_APP
};
TEST_F(UmRealSystemProviderTest, InitTest) {
@@ -91,7 +90,7 @@
UmTestUtils::ExpectVariableHasValue(false, provider_->var_is_oobe_complete());
}
-#if USE_LIBCROS
+#if USE_CHROME_KIOSK_APP
TEST_F(UmRealSystemProviderTest, KioskRequiredPlatformVersion) {
UmTestUtils::ExpectVariableHasValue(
std::string(kRequiredPlatformVersion),
@@ -129,6 +128,6 @@
UmTestUtils::ExpectVariableHasValue(
std::string(), provider_->var_kiosk_required_platform_version());
}
-#endif
+#endif // USE_CHROME_KIOSK_APP
} // namespace chromeos_update_manager
diff --git a/update_manager/real_time_provider.cc b/update_manager/real_time_provider.cc
index ca3acad..db26816 100644
--- a/update_manager/real_time_provider.cc
+++ b/update_manager/real_time_provider.cc
@@ -43,7 +43,10 @@
Time::Exploded now_exp;
clock_->GetWallclockTime().LocalExplode(&now_exp);
now_exp.hour = now_exp.minute = now_exp.second = now_exp.millisecond = 0;
- return new Time(Time::FromLocalExploded(now_exp));
+ Time* now = new Time();
+ bool success = Time::FromLocalExploded(now_exp, now);
+ DCHECK(success);
+ return now;
}
private:
diff --git a/update_manager/real_time_provider_unittest.cc b/update_manager/real_time_provider_unittest.cc
index 0e1ef34..f8db30b 100644
--- a/update_manager/real_time_provider_unittest.cc
+++ b/update_manager/real_time_provider_unittest.cc
@@ -51,7 +51,9 @@
now_exp.minute = 5;
now_exp.second = 33;
now_exp.millisecond = 675;
- return Time::FromLocalExploded(now_exp);
+ Time time;
+ ignore_result(Time::FromLocalExploded(now_exp, &time));
+ return time;
}
FakeClock fake_clock_;
@@ -66,7 +68,8 @@
exploded.minute = 0;
exploded.second = 0;
exploded.millisecond = 0;
- const Time expected = Time::FromLocalExploded(exploded);
+ Time expected;
+ ignore_result(Time::FromLocalExploded(exploded, &expected));
fake_clock_.SetWallclockTime(now);
UmTestUtils::ExpectVariableHasValue(expected, provider_->var_curr_date());
diff --git a/update_manager/real_updater_provider.cc b/update_manager/real_updater_provider.cc
index 1a3e65a..050bd42 100644
--- a/update_manager/real_updater_provider.cc
+++ b/update_manager/real_updater_provider.cc
@@ -25,10 +25,12 @@
#include <base/time/time.h>
#include <update_engine/dbus-constants.h>
+#include "update_engine/client_library/include/update_engine/update_status.h"
#include "update_engine/common/clock_interface.h"
#include "update_engine/common/prefs.h"
#include "update_engine/omaha_request_params.h"
#include "update_engine/update_attempter.h"
+#include "update_engine/update_status_utils.h"
using base::StringPrintf;
using base::Time;
@@ -36,6 +38,8 @@
using chromeos_update_engine::OmahaRequestParams;
using chromeos_update_engine::SystemState;
using std::string;
+using update_engine::UpdateAttemptFlags;
+using update_engine::UpdateEngineStatus;
namespace chromeos_update_manager {
@@ -60,27 +64,32 @@
class GetStatusHelper {
public:
GetStatusHelper(SystemState* system_state, string* errmsg) {
- is_success_ = system_state->update_attempter()->GetStatus(
- &last_checked_time_, &progress_, &update_status_, &new_version_,
- &payload_size_);
- if (!is_success_ && errmsg)
+ is_success_ =
+ system_state->update_attempter()->GetStatus(&update_engine_status_);
+ if (!is_success_ && errmsg) {
*errmsg = "Failed to get a status update from the update engine";
+ }
}
inline bool is_success() { return is_success_; }
- inline int64_t last_checked_time() { return last_checked_time_; }
- inline double progress() { return progress_; }
- inline const string& update_status() { return update_status_; }
- inline const string& new_version() { return new_version_; }
- inline int64_t payload_size() { return payload_size_; }
+ inline int64_t last_checked_time() {
+ return update_engine_status_.last_checked_time;
+ }
+ inline double progress() { return update_engine_status_.progress; }
+ inline const string update_status() {
+ return chromeos_update_engine::UpdateStatusToString(
+ update_engine_status_.status);
+ }
+ inline const string& new_version() {
+ return update_engine_status_.new_version;
+ }
+ inline uint64_t payload_size() {
+ return update_engine_status_.new_size_bytes;
+ }
private:
bool is_success_;
- int64_t last_checked_time_;
- double progress_;
- string update_status_;
- string new_version_;
- int64_t payload_size_;
+ UpdateEngineStatus update_engine_status_;
};
// A variable reporting the time when a last update check was issued.
@@ -196,24 +205,18 @@
};
// A variable reporting the size of the update being processed in bytes.
-class PayloadSizeVariable : public UpdaterVariableBase<int64_t> {
+class PayloadSizeVariable : public UpdaterVariableBase<uint64_t> {
public:
PayloadSizeVariable(const string& name, SystemState* system_state)
- : UpdaterVariableBase<int64_t>(name, kVariableModePoll, system_state) {}
+ : UpdaterVariableBase<uint64_t>(name, kVariableModePoll, system_state) {}
private:
- const int64_t* GetValue(TimeDelta /* timeout */, string* errmsg) override {
+ const uint64_t* GetValue(TimeDelta /* timeout */, string* errmsg) override {
GetStatusHelper raw(system_state(), errmsg);
if (!raw.is_success())
return nullptr;
- if (raw.payload_size() < 0) {
- if (errmsg)
- *errmsg = string("Invalid payload size: %" PRId64, raw.payload_size());
- return nullptr;
- }
-
- return new int64_t(raw.payload_size());
+ return new uint64_t(raw.payload_size());
}
DISALLOW_COPY_AND_ASSIGN(PayloadSizeVariable);
@@ -414,40 +417,66 @@
DISALLOW_COPY_AND_ASSIGN(ForcedUpdateRequestedVariable);
};
+// A variable returning the current update restrictions that are in effect.
+class UpdateRestrictionsVariable
+ : public UpdaterVariableBase<UpdateRestrictions> {
+ public:
+ UpdateRestrictionsVariable(const string& name, SystemState* system_state)
+ : UpdaterVariableBase<UpdateRestrictions>(
+ name, kVariableModePoll, system_state) {}
+
+ private:
+ const UpdateRestrictions* GetValue(TimeDelta /* timeout */,
+ string* /* errmsg */) override {
+ UpdateAttemptFlags attempt_flags =
+ system_state()->update_attempter()->GetCurrentUpdateAttemptFlags();
+ UpdateRestrictions restriction_flags = UpdateRestrictions::kNone;
+ // Don't blindly copy the whole value, test and set bits that should
+ // transfer from one set of flags to the other.
+ if (attempt_flags & UpdateAttemptFlags::kFlagRestrictDownload) {
+ restriction_flags = static_cast<UpdateRestrictions>(
+ restriction_flags | UpdateRestrictions::kRestrictDownloading);
+ }
+
+ return new UpdateRestrictions(restriction_flags);
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(UpdateRestrictionsVariable);
+};
+
// RealUpdaterProvider methods.
RealUpdaterProvider::RealUpdaterProvider(SystemState* system_state)
- : system_state_(system_state),
- var_updater_started_time_("updater_started_time",
- system_state->clock()->GetWallclockTime()),
- var_last_checked_time_(
- new LastCheckedTimeVariable("last_checked_time", system_state_)),
- var_update_completed_time_(
- new UpdateCompletedTimeVariable("update_completed_time",
- system_state_)),
- var_progress_(new ProgressVariable("progress", system_state_)),
- var_stage_(new StageVariable("stage", system_state_)),
- var_new_version_(new NewVersionVariable("new_version", system_state_)),
- var_payload_size_(new PayloadSizeVariable("payload_size", system_state_)),
- var_curr_channel_(new CurrChannelVariable("curr_channel", system_state_)),
- var_new_channel_(new NewChannelVariable("new_channel", system_state_)),
- var_p2p_enabled_(
- new BooleanPrefVariable("p2p_enabled", system_state_->prefs(),
- chromeos_update_engine::kPrefsP2PEnabled,
- false)),
- var_cellular_enabled_(
- new BooleanPrefVariable(
- "cellular_enabled", system_state_->prefs(),
- chromeos_update_engine::kPrefsUpdateOverCellularPermission,
- false)),
- var_consecutive_failed_update_checks_(
- new ConsecutiveFailedUpdateChecksVariable(
- "consecutive_failed_update_checks", system_state_)),
- var_server_dictated_poll_interval_(
- new ServerDictatedPollIntervalVariable(
- "server_dictated_poll_interval", system_state_)),
- var_forced_update_requested_(
- new ForcedUpdateRequestedVariable(
- "forced_update_requested", system_state_)) {}
-
+ : system_state_(system_state),
+ var_updater_started_time_("updater_started_time",
+ system_state->clock()->GetWallclockTime()),
+ var_last_checked_time_(
+ new LastCheckedTimeVariable("last_checked_time", system_state_)),
+ var_update_completed_time_(new UpdateCompletedTimeVariable(
+ "update_completed_time", system_state_)),
+ var_progress_(new ProgressVariable("progress", system_state_)),
+ var_stage_(new StageVariable("stage", system_state_)),
+ var_new_version_(new NewVersionVariable("new_version", system_state_)),
+ var_payload_size_(new PayloadSizeVariable("payload_size", system_state_)),
+ var_curr_channel_(new CurrChannelVariable("curr_channel", system_state_)),
+ var_new_channel_(new NewChannelVariable("new_channel", system_state_)),
+ var_p2p_enabled_(
+ new BooleanPrefVariable("p2p_enabled",
+ system_state_->prefs(),
+ chromeos_update_engine::kPrefsP2PEnabled,
+ false)),
+ var_cellular_enabled_(new BooleanPrefVariable(
+ "cellular_enabled",
+ system_state_->prefs(),
+ chromeos_update_engine::kPrefsUpdateOverCellularPermission,
+ false)),
+ var_consecutive_failed_update_checks_(
+ new ConsecutiveFailedUpdateChecksVariable(
+ "consecutive_failed_update_checks", system_state_)),
+ var_server_dictated_poll_interval_(new ServerDictatedPollIntervalVariable(
+ "server_dictated_poll_interval", system_state_)),
+ var_forced_update_requested_(new ForcedUpdateRequestedVariable(
+ "forced_update_requested", system_state_)),
+ var_update_restrictions_(new UpdateRestrictionsVariable(
+ "update_restrictions", system_state_)) {}
} // namespace chromeos_update_manager
diff --git a/update_manager/real_updater_provider.h b/update_manager/real_updater_provider.h
index b99bcc5..5e3e27b 100644
--- a/update_manager/real_updater_provider.h
+++ b/update_manager/real_updater_provider.h
@@ -64,7 +64,7 @@
return var_new_version_.get();
}
- Variable<int64_t>* var_payload_size() override {
+ Variable<uint64_t>* var_payload_size() override {
return var_payload_size_.get();
}
@@ -96,6 +96,10 @@
return var_forced_update_requested_.get();
}
+ Variable<UpdateRestrictions>* var_update_restrictions() override {
+ return var_update_restrictions_.get();
+ }
+
private:
// A pointer to the update engine's system state aggregator.
chromeos_update_engine::SystemState* system_state_;
@@ -107,7 +111,7 @@
std::unique_ptr<Variable<double>> var_progress_;
std::unique_ptr<Variable<Stage>> var_stage_;
std::unique_ptr<Variable<std::string>> var_new_version_;
- std::unique_ptr<Variable<int64_t>> var_payload_size_;
+ std::unique_ptr<Variable<uint64_t>> var_payload_size_;
std::unique_ptr<Variable<std::string>> var_curr_channel_;
std::unique_ptr<Variable<std::string>> var_new_channel_;
std::unique_ptr<Variable<bool>> var_p2p_enabled_;
@@ -115,6 +119,7 @@
std::unique_ptr<Variable<unsigned int>> var_consecutive_failed_update_checks_;
std::unique_ptr<Variable<unsigned int>> var_server_dictated_poll_interval_;
std::unique_ptr<Variable<UpdateRequestStatus>> var_forced_update_requested_;
+ std::unique_ptr<Variable<UpdateRestrictions>> var_update_restrictions_;
DISALLOW_COPY_AND_ASSIGN(RealUpdaterProvider);
};
diff --git a/update_manager/real_updater_provider_unittest.cc b/update_manager/real_updater_provider_unittest.cc
index 14eb30b..b653885 100644
--- a/update_manager/real_updater_provider_unittest.cc
+++ b/update_manager/real_updater_provider_unittest.cc
@@ -38,9 +38,11 @@
using chromeos_update_engine::OmahaRequestParams;
using std::string;
using std::unique_ptr;
+using testing::_;
+using testing::DoAll;
using testing::Return;
using testing::SetArgPointee;
-using testing::_;
+using update_engine::UpdateAttemptFlags;
namespace {
@@ -55,7 +57,9 @@
now_exp.minute = 5;
now_exp.second = 33;
now_exp.millisecond = 675;
- return Time::FromLocalExploded(now_exp);
+ Time time;
+ ignore_result(Time::FromLocalExploded(now_exp, &time));
+ return time;
}
// Rounds down a timestamp to the nearest second. This is useful when faking
@@ -64,7 +68,29 @@
Time::Exploded exp;
time.LocalExplode(&exp);
exp.millisecond = 0;
- return Time::FromLocalExploded(exp);
+ Time rounded_time;
+ ignore_result(Time::FromLocalExploded(exp, &rounded_time));
+ return rounded_time;
+}
+
+ACTION_P(ActionSetUpdateEngineStatusLastCheckedTime, time) {
+ arg0->last_checked_time = time;
+};
+
+ACTION_P(ActionSetUpdateEngineStatusProgress, progress) {
+ arg0->progress = progress;
+};
+
+ACTION_P(ActionSetUpdateEngineStatusStatus, status) {
+ arg0->status = status;
+}
+
+ACTION_P(ActionSetUpdateEngineStatusNewVersion, new_version) {
+ arg0->new_version = new_version;
+}
+
+ACTION_P(ActionSetUpdateEngineStatusNewSizeBytes, new_size_bytes) {
+ arg0->new_size_bytes = new_size_bytes;
}
} // namespace
@@ -116,225 +142,189 @@
}
TEST_F(UmRealUpdaterProviderTest, GetLastCheckedTimeOkay) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<0>(FixedTime().ToTimeT()), Return(true)));
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(
+ ActionSetUpdateEngineStatusLastCheckedTime(FixedTime().ToTimeT()),
+ Return(true)));
UmTestUtils::ExpectVariableHasValue(RoundedToSecond(FixedTime()),
provider_->var_last_checked_time());
}
TEST_F(UmRealUpdaterProviderTest, GetLastCheckedTimeFailNoValue) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
.WillOnce(Return(false));
UmTestUtils::ExpectVariableNotSet(provider_->var_last_checked_time());
}
TEST_F(UmRealUpdaterProviderTest, GetProgressOkayMin) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<1>(0.0), Return(true)));
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(ActionSetUpdateEngineStatusProgress(0.0), Return(true)));
UmTestUtils::ExpectVariableHasValue(0.0, provider_->var_progress());
}
TEST_F(UmRealUpdaterProviderTest, GetProgressOkayMid) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<1>(0.3), Return(true)));
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(ActionSetUpdateEngineStatusProgress(0.3), Return(true)));
UmTestUtils::ExpectVariableHasValue(0.3, provider_->var_progress());
}
TEST_F(UmRealUpdaterProviderTest, GetProgressOkayMax) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<1>(1.0), Return(true)));
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(ActionSetUpdateEngineStatusProgress(1.0), Return(true)));
UmTestUtils::ExpectVariableHasValue(1.0, provider_->var_progress());
}
TEST_F(UmRealUpdaterProviderTest, GetProgressFailNoValue) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
.WillOnce(Return(false));
UmTestUtils::ExpectVariableNotSet(provider_->var_progress());
}
TEST_F(UmRealUpdaterProviderTest, GetProgressFailTooSmall) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<1>(-2.0), Return(true)));
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(ActionSetUpdateEngineStatusProgress(-2.0), Return(true)));
UmTestUtils::ExpectVariableNotSet(provider_->var_progress());
}
TEST_F(UmRealUpdaterProviderTest, GetProgressFailTooBig) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<1>(2.0), Return(true)));
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(ActionSetUpdateEngineStatusProgress(2.0), Return(true)));
UmTestUtils::ExpectVariableNotSet(provider_->var_progress());
}
TEST_F(UmRealUpdaterProviderTest, GetStageOkayIdle) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<2>(update_engine::kUpdateStatusIdle),
- Return(true)));
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(
+ ActionSetUpdateEngineStatusStatus(update_engine::UpdateStatus::IDLE),
+ Return(true)));
UmTestUtils::ExpectVariableHasValue(Stage::kIdle, provider_->var_stage());
}
TEST_F(UmRealUpdaterProviderTest, GetStageOkayCheckingForUpdate) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(
- SetArgPointee<2>(update_engine::kUpdateStatusCheckingForUpdate),
- Return(true)));
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(ActionSetUpdateEngineStatusStatus(
+ update_engine::UpdateStatus::CHECKING_FOR_UPDATE),
+ Return(true)));
UmTestUtils::ExpectVariableHasValue(Stage::kCheckingForUpdate,
provider_->var_stage());
}
TEST_F(UmRealUpdaterProviderTest, GetStageOkayUpdateAvailable) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(
- SetArgPointee<2>(update_engine::kUpdateStatusUpdateAvailable),
- Return(true)));
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(ActionSetUpdateEngineStatusStatus(
+ update_engine::UpdateStatus::UPDATE_AVAILABLE),
+ Return(true)));
UmTestUtils::ExpectVariableHasValue(Stage::kUpdateAvailable,
provider_->var_stage());
}
TEST_F(UmRealUpdaterProviderTest, GetStageOkayDownloading) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<2>(update_engine::kUpdateStatusDownloading),
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(ActionSetUpdateEngineStatusStatus(
+ update_engine::UpdateStatus::DOWNLOADING),
Return(true)));
UmTestUtils::ExpectVariableHasValue(Stage::kDownloading,
provider_->var_stage());
}
TEST_F(UmRealUpdaterProviderTest, GetStageOkayVerifying) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<2>(update_engine::kUpdateStatusVerifying),
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(ActionSetUpdateEngineStatusStatus(
+ update_engine::UpdateStatus::VERIFYING),
Return(true)));
UmTestUtils::ExpectVariableHasValue(Stage::kVerifying,
provider_->var_stage());
}
TEST_F(UmRealUpdaterProviderTest, GetStageOkayFinalizing) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<2>(update_engine::kUpdateStatusFinalizing),
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(ActionSetUpdateEngineStatusStatus(
+ update_engine::UpdateStatus::FINALIZING),
Return(true)));
UmTestUtils::ExpectVariableHasValue(Stage::kFinalizing,
provider_->var_stage());
}
TEST_F(UmRealUpdaterProviderTest, GetStageOkayUpdatedNeedReboot) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(
- SetArgPointee<2>(update_engine::kUpdateStatusUpdatedNeedReboot),
- Return(true)));
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(ActionSetUpdateEngineStatusStatus(
+ update_engine::UpdateStatus::UPDATED_NEED_REBOOT),
+ Return(true)));
UmTestUtils::ExpectVariableHasValue(Stage::kUpdatedNeedReboot,
provider_->var_stage());
}
TEST_F(UmRealUpdaterProviderTest, GetStageOkayReportingErrorEvent) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(
- SetArgPointee<2>(update_engine::kUpdateStatusReportingErrorEvent),
- Return(true)));
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(ActionSetUpdateEngineStatusStatus(
+ update_engine::UpdateStatus::REPORTING_ERROR_EVENT),
+ Return(true)));
UmTestUtils::ExpectVariableHasValue(Stage::kReportingErrorEvent,
provider_->var_stage());
}
TEST_F(UmRealUpdaterProviderTest, GetStageOkayAttemptingRollback) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(
- SetArgPointee<2>(update_engine::kUpdateStatusAttemptingRollback),
- Return(true)));
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(ActionSetUpdateEngineStatusStatus(
+ update_engine::UpdateStatus::ATTEMPTING_ROLLBACK),
+ Return(true)));
UmTestUtils::ExpectVariableHasValue(Stage::kAttemptingRollback,
provider_->var_stage());
}
TEST_F(UmRealUpdaterProviderTest, GetStageFailNoValue) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
.WillOnce(Return(false));
UmTestUtils::ExpectVariableNotSet(provider_->var_stage());
}
-TEST_F(UmRealUpdaterProviderTest, GetStageFailUnknown) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<2>("FooUpdateEngineState"),
- Return(true)));
- UmTestUtils::ExpectVariableNotSet(provider_->var_stage());
-}
-
-TEST_F(UmRealUpdaterProviderTest, GetStageFailEmpty) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<2>(""), Return(true)));
- UmTestUtils::ExpectVariableNotSet(provider_->var_stage());
-}
-
TEST_F(UmRealUpdaterProviderTest, GetNewVersionOkay) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<3>("1.2.0"), Return(true)));
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(
+ DoAll(ActionSetUpdateEngineStatusNewVersion("1.2.0"), Return(true)));
UmTestUtils::ExpectVariableHasValue(string("1.2.0"),
provider_->var_new_version());
}
TEST_F(UmRealUpdaterProviderTest, GetNewVersionFailNoValue) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
.WillOnce(Return(false));
UmTestUtils::ExpectVariableNotSet(provider_->var_new_version());
}
TEST_F(UmRealUpdaterProviderTest, GetPayloadSizeOkayZero) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<4>(static_cast<int64_t>(0)), Return(true)));
- UmTestUtils::ExpectVariableHasValue(static_cast<int64_t>(0),
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(
+ ActionSetUpdateEngineStatusNewSizeBytes(static_cast<uint64_t>(0)),
+ Return(true)));
+ UmTestUtils::ExpectVariableHasValue(static_cast<uint64_t>(0),
provider_->var_payload_size());
}
TEST_F(UmRealUpdaterProviderTest, GetPayloadSizeOkayArbitrary) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<4>(static_cast<int64_t>(567890)),
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(ActionSetUpdateEngineStatusNewSizeBytes(
+ static_cast<uint64_t>(567890)),
Return(true)));
- UmTestUtils::ExpectVariableHasValue(static_cast<int64_t>(567890),
+ UmTestUtils::ExpectVariableHasValue(static_cast<uint64_t>(567890),
provider_->var_payload_size());
}
TEST_F(UmRealUpdaterProviderTest, GetPayloadSizeOkayTwoGigabytes) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<4>(static_cast<int64_t>(1) << 31),
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
+ .WillOnce(DoAll(ActionSetUpdateEngineStatusNewSizeBytes(
+ static_cast<uint64_t>(1) << 31),
Return(true)));
- UmTestUtils::ExpectVariableHasValue(static_cast<int64_t>(1) << 31,
+ UmTestUtils::ExpectVariableHasValue(static_cast<uint64_t>(1) << 31,
provider_->var_payload_size());
}
TEST_F(UmRealUpdaterProviderTest, GetPayloadSizeFailNoValue) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(), GetStatus(_))
.WillOnce(Return(false));
UmTestUtils::ExpectVariableNotSet(provider_->var_payload_size());
}
-TEST_F(UmRealUpdaterProviderTest, GetPayloadSizeFailNegative) {
- EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
- GetStatus(_, _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<4>(static_cast<int64_t>(-1024)),
- Return(true)));
- UmTestUtils::ExpectVariableNotSet(provider_->var_payload_size());
-}
-
TEST_F(UmRealUpdaterProviderTest, GetCurrChannelOkay) {
const string kChannelName("foo-channel");
OmahaRequestParams request_params(&fake_sys_state_);
@@ -440,4 +430,20 @@
kPollInterval, provider_->var_server_dictated_poll_interval());
}
+TEST_F(UmRealUpdaterProviderTest, GetUpdateRestrictions) {
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
+ GetCurrentUpdateAttemptFlags())
+ .WillRepeatedly(Return(UpdateAttemptFlags::kFlagRestrictDownload |
+ UpdateAttemptFlags::kFlagNonInteractive));
+ UmTestUtils::ExpectVariableHasValue(UpdateRestrictions::kRestrictDownloading,
+ provider_->var_update_restrictions());
+}
+
+TEST_F(UmRealUpdaterProviderTest, GetUpdateRestrictionsNone) {
+ EXPECT_CALL(*fake_sys_state_.mock_update_attempter(),
+ GetCurrentUpdateAttemptFlags())
+ .WillRepeatedly(Return(UpdateAttemptFlags::kNone));
+ UmTestUtils::ExpectVariableHasValue(UpdateRestrictions::kNone,
+ provider_->var_update_restrictions());
+}
} // namespace chromeos_update_manager
diff --git a/update_manager/state_factory.cc b/update_manager/state_factory.cc
index 70fc80b..208ed51 100644
--- a/update_manager/state_factory.cc
+++ b/update_manager/state_factory.cc
@@ -19,7 +19,6 @@
#include <memory>
#include <base/logging.h>
-#include <brillo/make_unique_ptr.h>
#if USE_DBUS
#include <session_manager/dbus-proxies.h>
#endif // USE_DBUS
@@ -57,8 +56,7 @@
chromeos_update_engine::DBusConnection::Get()->GetDBus();
unique_ptr<RealDevicePolicyProvider> device_policy_provider(
new RealDevicePolicyProvider(
- brillo::make_unique_ptr(
- new org::chromium::SessionManagerInterfaceProxy(bus)),
+ std::make_unique<org::chromium::SessionManagerInterfaceProxy>(bus),
policy_provider));
#else
unique_ptr<RealDevicePolicyProvider> device_policy_provider(
@@ -73,6 +71,7 @@
unique_ptr<RealRandomProvider> random_provider(new RealRandomProvider());
unique_ptr<RealSystemProvider> system_provider(new RealSystemProvider(
system_state->hardware(), system_state->boot_control(), libcros_proxy));
+
unique_ptr<RealTimeProvider> time_provider(new RealTimeProvider(clock));
unique_ptr<RealUpdaterProvider> updater_provider(
new RealUpdaterProvider(system_state));
diff --git a/update_manager/update_manager.cc b/update_manager/update_manager.cc
index 8e9b221..25f3216 100644
--- a/update_manager/update_manager.cc
+++ b/update_manager/update_manager.cc
@@ -16,7 +16,11 @@
#include "update_engine/update_manager/update_manager.h"
+#ifdef __ANDROID__
+#include "update_engine/update_manager/android_things_policy.h"
+#else
#include "update_engine/update_manager/chromeos_policy.h"
+#endif // __ANDROID__
#include "update_engine/update_manager/state.h"
namespace chromeos_update_manager {
@@ -28,9 +32,11 @@
evaluation_timeout_(evaluation_timeout),
expiration_timeout_(expiration_timeout),
weak_ptr_factory_(this) {
- // TODO(deymo): Make it possible to replace this policy with a different
- // implementation with a build-time flag.
+#ifdef __ANDROID__
+ policy_.reset(new AndroidThingsPolicy());
+#else
policy_.reset(new ChromeOSPolicy());
+#endif // __ANDROID__
}
UpdateManager::~UpdateManager() {
diff --git a/update_manager/update_manager_unittest.cc b/update_manager/update_manager_unittest.cc
index 03f1610..c2766ea 100644
--- a/update_manager/update_manager_unittest.cc
+++ b/update_manager/update_manager_unittest.cc
@@ -67,7 +67,9 @@
now_exp.minute = 5;
now_exp.second = 33;
now_exp.millisecond = 675;
- return Time::FromLocalExploded(now_exp);
+ Time time;
+ ignore_result(Time::FromLocalExploded(now_exp, &time));
+ return time;
}
} // namespace
diff --git a/update_manager/updater_provider.h b/update_manager/updater_provider.h
index 8048d38..cb62623 100644
--- a/update_manager/updater_provider.h
+++ b/update_manager/updater_provider.h
@@ -44,6 +44,12 @@
kPeriodic,
};
+// These enum values are a bit-field.
+enum UpdateRestrictions : int {
+ kNone,
+ kRestrictDownloading = (1 << 0),
+};
+
// Provider for Chrome OS update related information.
class UpdaterProvider : public Provider {
public:
@@ -79,7 +85,7 @@
// A variable returning the update payload size. The payload size is
// guaranteed to be non-negative.
- virtual Variable<int64_t>* var_payload_size() = 0;
+ virtual Variable<uint64_t>* var_payload_size() = 0;
// A variable returning the current channel.
virtual Variable<std::string>* var_curr_channel() = 0;
@@ -105,6 +111,10 @@
// scheduled update.
virtual Variable<UpdateRequestStatus>* var_forced_update_requested() = 0;
+ // A variable that returns the update restriction flags that are set
+ // for all updates.
+ virtual Variable<UpdateRestrictions>* var_update_restrictions() = 0;
+
protected:
UpdaterProvider() {}
diff --git a/update_metadata.proto b/update_metadata.proto
index 596a04e..b5d6c59 100644
--- a/update_metadata.proto
+++ b/update_metadata.proto
@@ -77,10 +77,15 @@
// - REPLACE_XZ: Replace the dst_extents with the contents of the attached
// xz file after decompression. The xz file should only use crc32 or no crc at
// all to be compatible with xz-embedded.
+// - PUFFDIFF: Read the data in src_extents in the old partition, perform
+// puffpatch with the attached data and write the new data to dst_extents in
+// the new partition.
//
// The operations allowed in the payload (supported by the client) depend on the
// major and minor version. See InstallOperation.Type bellow for details.
+syntax = "proto2";
+
package chromeos_update_engine;
option optimize_for = LITE_RUNTIME;
@@ -159,12 +164,13 @@
// On minor version 3 or newer and on major version 2 or newer, these
// operations are supported:
- ZERO = 6; // Write zeros in the destination.
- DISCARD = 7; // Discard the destination blocks, reading as undefined.
REPLACE_XZ = 8; // Replace destination extents w/ attached xz data.
// On minor version 4 or newer, these operations are supported:
- IMGDIFF = 9; // The data is in imgdiff format.
+ ZERO = 6; // Write zeros in the destination.
+ DISCARD = 7; // Discard the destination blocks, reading as undefined.
+ PUFFDIFF = 9; // The data is in puffdiff format.
+ BROTLI_BSDIFF = 10; // Like SOURCE_BSDIFF, but compressed with brotli.
}
required Type type = 1;
// The offset into the delta file (after the protobuf)
@@ -176,14 +182,15 @@
// Ordered list of extents that are read from (if any) and written to.
repeated Extent src_extents = 4;
// Byte length of src, equal to the number of blocks in src_extents *
- // block_size. It is used for BSDIFF, because we need to pass that
- // external program the number of bytes to read from the blocks we pass it.
- // This is not used in any other operation.
+ // block_size. It is used for BSDIFF and SOURCE_BSDIFF, because we need to
+ // pass that external program the number of bytes to read from the blocks we
+ // pass it. This is not used in any other operation.
optional uint64 src_length = 5;
repeated Extent dst_extents = 6;
// Byte length of dst, equal to the number of blocks in dst_extents *
- // block_size. Used for BSDIFF, but not in any other operation.
+ // block_size. Used for BSDIFF and SOURCE_BSDIFF, but not in any other
+ // operation.
optional uint64 dst_length = 7;
// Optional SHA 256 hash of the blob associated with this operation.
diff --git a/utils_android.cc b/utils_android.cc
index 38d62ea..393e65a 100644
--- a/utils_android.cc
+++ b/utils_android.cc
@@ -16,7 +16,6 @@
#include "update_engine/utils_android.h"
-#include <cutils/properties.h>
#include <fs_mgr.h>
using std::string;