Merge remote-tracking branch 'aosp/upstream-master' into merge
It's a merge from chrome OS with some reverts.
1. the fd watcher change, because the libbrillo version isn't
compatible in aosp.
commit 6955bcc4ffe4cc9d62a88186b9a7e75d095a7897
commit 493fecb3f48c8478fd3ef244d631d857730dd14d
2. two libcurl unittest. Because the RunOnce() of the fake message
loop seems to have different behavior in aosp.
commit d3d84218cafbc1a95e7d6bbb775b495d1bebf4d2
Put preprocessor guards to use the old code in aosp. And we can
switch to the new code in the other path after adopting the new
libbrillo & libchrome.
Test: unit tests pass, apply an OTA
Change-Id: Id613599834b0f44f92841dbeae6303601db5490d
diff --git a/.clang-format b/.clang-format
index 3b6a627..3044f59 100644
--- a/.clang-format
+++ b/.clang-format
@@ -37,3 +37,7 @@
IncludeBlocks: Preserve
PointerAlignment: Left
TabWidth: 2
+
+# cpplint.py does smarter #include sorting than clang-format (the former ignores
+# case and changes '-' to '_').
+SortIncludes: false
diff --git a/Android.bp b/Android.bp
index a5223c7..ecf3585 100644
--- a/Android.bp
+++ b/Android.bp
@@ -29,12 +29,9 @@
cflags: [
"-DBASE_VER=576279",
- "-DUSE_BINDER=1",
"-DUSE_CHROME_NETWORK_PROXY=0",
"-DUSE_CHROME_KIOSK_APP=0",
"-DUSE_HWID_OVERRIDE=0",
- "-DUSE_MTD=0",
- "-DUSE_OMAHA=0",
"-D_FILE_OFFSET_BITS=64",
"-D_POSIX_C_SOURCE=199309L",
"-Wa,--noexecstack",
@@ -302,7 +299,7 @@
":libupdate_engine_aidl",
"binder_service_android.cc",
"certificate_checker.cc",
- "daemon.cc",
+ "daemon_android.cc",
"daemon_state_android.cc",
"hardware_android.cc",
"libcurl_http_fetcher.cc",
@@ -408,54 +405,6 @@
],
}
-// libupdate_engine_client (type: shared_library)
-// ========================================================
-cc_library_shared {
- name: "libupdate_engine_client",
-
- cflags: [
- "-Wall",
- "-Werror",
- "-Wno-unused-parameter",
- "-DUSE_BINDER=1",
- ],
- export_include_dirs: ["client_library/include"],
- include_dirs: [
- // TODO(deymo): Remove "external/cros/system_api/dbus" when dbus is not used.
- "external/cros/system_api/dbus",
- "system",
- ],
-
- aidl: {
- local_include_dirs: ["binder_bindings"],
- },
-
- shared_libs: [
- "libchrome",
- "libbrillo",
- "libbinder",
- "libbrillo-binder",
- "libutils",
- ],
-
- srcs: [
- ":libupdate_engine_client_aidl",
- "client_library/client.cc",
- "client_library/client_binder.cc",
- "parcelable_update_engine_status.cc",
- "update_status_utils.cc",
- ],
-}
-
-filegroup {
- name: "libupdate_engine_client_aidl",
- srcs: [
- "binder_bindings/android/brillo/IUpdateEngine.aidl",
- "binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl",
- ],
- path: "binder_bindings",
-}
-
// update_engine_client (type: executable)
// ========================================================
// update_engine console client.
@@ -533,7 +482,6 @@
"payload_generator/block_mapping.cc",
"payload_generator/boot_img_filesystem.cc",
"payload_generator/bzip.cc",
- "payload_generator/cycle_breaker.cc",
"payload_generator/deflate_utils.cc",
"payload_generator/delta_diff_generator.cc",
"payload_generator/delta_diff_utils.cc",
@@ -541,18 +489,14 @@
"payload_generator/extent_ranges.cc",
"payload_generator/extent_utils.cc",
"payload_generator/full_update_generator.cc",
- "payload_generator/graph_types.cc",
- "payload_generator/graph_utils.cc",
- "payload_generator/inplace_generator.cc",
"payload_generator/mapfile_filesystem.cc",
"payload_generator/payload_file.cc",
"payload_generator/payload_generation_config_android.cc",
"payload_generator/payload_generation_config.cc",
+ "payload_generator/payload_properties.cc",
"payload_generator/payload_signer.cc",
"payload_generator/raw_filesystem.cc",
"payload_generator/squashfs_filesystem.cc",
- "payload_generator/tarjan.cc",
- "payload_generator/topological_sort.cc",
"payload_generator/xz_android.cc",
],
}
@@ -721,6 +665,7 @@
"common/test_utils.cc",
"common/utils_unittest.cc",
"dynamic_partition_control_android_unittest.cc",
+ "libcurl_http_fetcher_unittest.cc",
"payload_consumer/bzip_extent_writer_unittest.cc",
"payload_consumer/cached_file_descriptor_unittest.cc",
"payload_consumer/certificate_parser_android_unittest.cc",
@@ -740,7 +685,6 @@
"payload_generator/blob_file_writer_unittest.cc",
"payload_generator/block_mapping_unittest.cc",
"payload_generator/boot_img_filesystem_unittest.cc",
- "payload_generator/cycle_breaker_unittest.cc",
"payload_generator/deflate_utils_unittest.cc",
"payload_generator/delta_diff_utils_unittest.cc",
"payload_generator/ext2_filesystem_unittest.cc",
@@ -748,19 +692,17 @@
"payload_generator/extent_utils_unittest.cc",
"payload_generator/fake_filesystem.cc",
"payload_generator/full_update_generator_unittest.cc",
- "payload_generator/graph_utils_unittest.cc",
- "payload_generator/inplace_generator_unittest.cc",
"payload_generator/mapfile_filesystem_unittest.cc",
"payload_generator/payload_file_unittest.cc",
"payload_generator/payload_generation_config_android_unittest.cc",
"payload_generator/payload_generation_config_unittest.cc",
+ "payload_generator/payload_properties_unittest.cc",
"payload_generator/payload_signer_unittest.cc",
"payload_generator/squashfs_filesystem_unittest.cc",
- "payload_generator/tarjan_unittest.cc",
- "payload_generator/topological_sort_unittest.cc",
"payload_generator/zip_unittest.cc",
"testrunner.cc",
"update_attempter_android_unittest.cc",
+ "update_status_utils_unittest.cc",
],
}
@@ -784,12 +726,3 @@
},
},
}
-
-// AIDL interface between libupdate_engine and the Things jar.
-filegroup {
- name: "things_update_engine_aidl",
- srcs: [
- "binder_bindings/android/brillo/IUpdateEngine.aidl",
- "binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl",
- ],
-}
diff --git a/BUILD.gn b/BUILD.gn
new file mode 100644
index 0000000..e438af4
--- /dev/null
+++ b/BUILD.gn
@@ -0,0 +1,590 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Stop linter from complaining XXX_unittest.cc naming.
+# gnlint: disable=GnLintSourceFileNames
+
+import("//common-mk/generate-dbus-adaptors.gni")
+import("//common-mk/generate-dbus-proxies.gni")
+import("//common-mk/openssl_pem.gni")
+import("//common-mk/pkg_config.gni")
+import("//common-mk/proto_library.gni")
+import("//update_engine/tar_bunzip2.gni")
+
+group("all") {
+ deps = [
+ ":delta_generator",
+ ":libpayload_consumer",
+ ":libpayload_generator",
+ ":libupdate_engine",
+ ":libupdate_engine_client",
+ ":update_engine",
+ ":update_engine-dbus-adaptor",
+ ":update_engine-dbus-kiosk-app-client",
+ ":update_engine_client",
+ ":update_metadata-protos",
+ ]
+
+ if (use.test) {
+ deps += [
+ ":test_http_server",
+ ":test_subprocess",
+ ":update_engine-test_images",
+ ":update_engine-testkeys",
+ ":update_engine_test_libs",
+ ":update_engine_unittests",
+ ]
+ }
+
+ if (use.fuzzer) {
+ deps += [
+ ":update_engine_delta_performer_fuzzer",
+ ":update_engine_omaha_request_action_fuzzer",
+ ]
+ }
+}
+
+pkg_config("target_defaults") {
+ cflags_cc = [
+ "-fno-strict-aliasing",
+ "-Wnon-virtual-dtor",
+ ]
+ cflags = [
+ "-g",
+ "-ffunction-sections",
+ "-Wall",
+ "-Wextra",
+ "-Werror",
+ "-Wno-unused-parameter",
+ ]
+ ldflags = [ "-Wl,--gc-sections" ]
+ defines = [
+ "__CHROMEOS__",
+ "_FILE_OFFSET_BITS=64",
+ "_POSIX_C_SOURCE=199309L",
+ "USE_DBUS=${use.dbus}",
+ "USE_FEC=0",
+ "USE_HWID_OVERRIDE=${use.hwid_override}",
+ "USE_CHROME_KIOSK_APP=${use.chrome_kiosk_app}",
+ "USE_CHROME_NETWORK_PROXY=${use.chrome_network_proxy}",
+ "USE_SHILL=1",
+ ]
+ include_dirs = [
+ # We need this include dir because we include all the local code as
+ # "update_engine/...".
+ "${platform2_root}",
+ "${platform2_root}/update_engine/client_library/include",
+ ]
+
+ # NOSORT
+ pkg_deps = [
+ "libbrillo",
+ "libchrome-${libbase_ver}",
+
+ # system_api depends on protobuf (or protobuf-lite). It must appear
+ # before protobuf here or the linker flags won't be in the right
+ # order.
+ "system_api",
+ ]
+ if (use.fuzzer) {
+ pkg_deps += [ "protobuf" ]
+ } else {
+ pkg_deps += [ "protobuf-lite" ]
+ }
+}
+
+# Protobufs.
+proto_library("update_metadata-protos") {
+ proto_in_dir = "."
+ proto_out_dir = "include/update_engine"
+ sources = [ "update_metadata.proto" ]
+}
+
+# Chrome D-Bus bindings.
+generate_dbus_adaptors("update_engine-dbus-adaptor") {
+ dbus_adaptors_out_dir = "include/dbus_bindings"
+ sources = [ "dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml" ]
+}
+
+generate_dbus_proxies("update_engine-dbus-kiosk-app-client") {
+ mock_output_file = "include/kiosk-app/dbus-proxy-mocks.h"
+ proxy_output_file = "include/kiosk-app/dbus-proxies.h"
+ sources = [ "dbus_bindings/org.chromium.KioskAppService.dbus-xml" ]
+}
+
+# The payload application component and common dependencies.
+static_library("libpayload_consumer") {
+ sources = [
+ "common/action_processor.cc",
+ "common/boot_control_stub.cc",
+ "common/clock.cc",
+ "common/constants.cc",
+ "common/cpu_limiter.cc",
+ "common/error_code_utils.cc",
+ "common/hash_calculator.cc",
+ "common/http_common.cc",
+ "common/http_fetcher.cc",
+ "common/hwid_override.cc",
+ "common/multi_range_http_fetcher.cc",
+ "common/platform_constants_chromeos.cc",
+ "common/prefs.cc",
+ "common/proxy_resolver.cc",
+ "common/subprocess.cc",
+ "common/terminator.cc",
+ "common/utils.cc",
+ "payload_consumer/bzip_extent_writer.cc",
+ "payload_consumer/cached_file_descriptor.cc",
+ "payload_consumer/delta_performer.cc",
+ "payload_consumer/download_action.cc",
+ "payload_consumer/extent_reader.cc",
+ "payload_consumer/extent_writer.cc",
+ "payload_consumer/file_descriptor.cc",
+ "payload_consumer/file_descriptor_utils.cc",
+ "payload_consumer/file_writer.cc",
+ "payload_consumer/filesystem_verifier_action.cc",
+ "payload_consumer/install_plan.cc",
+ "payload_consumer/mount_history.cc",
+ "payload_consumer/payload_constants.cc",
+ "payload_consumer/payload_metadata.cc",
+ "payload_consumer/payload_verifier.cc",
+ "payload_consumer/postinstall_runner_action.cc",
+ "payload_consumer/verity_writer_stub.cc",
+ "payload_consumer/xz_extent_writer.cc",
+ ]
+ configs += [ ":target_defaults" ]
+ libs = [
+ "bz2",
+ "rt",
+ ]
+
+ # TODO(crbug.com/1082873): Remove after fixing usage of deprecated
+ # declarations.
+ cflags_cc = [ "-Wno-error=deprecated-declarations" ]
+
+ # TODO(deymo): Remove unused dependencies once we stop including files
+ # from the root directory.
+ all_dependent_pkg_deps = [
+ "libbspatch",
+ "libcrypto",
+ "libpuffpatch",
+ "xz-embedded",
+ ]
+ public_deps = [ ":update_metadata-protos" ]
+}
+
+# The main daemon static_library with all the code used to check for updates
+# with Omaha and expose a DBus daemon.
+static_library("libupdate_engine") {
+ sources = [
+ "boot_control_chromeos.cc",
+ "certificate_checker.cc",
+ "common_service.cc",
+ "connection_manager.cc",
+ "connection_utils.cc",
+ "daemon_chromeos.cc",
+ "dbus_connection.cc",
+ "dbus_service.cc",
+ "hardware_chromeos.cc",
+ "image_properties_chromeos.cc",
+ "libcurl_http_fetcher.cc",
+ "metrics_reporter_omaha.cc",
+ "metrics_utils.cc",
+ "omaha_request_action.cc",
+ "omaha_request_builder_xml.cc",
+ "omaha_request_params.cc",
+ "omaha_response_handler_action.cc",
+ "omaha_utils.cc",
+ "p2p_manager.cc",
+ "payload_state.cc",
+ "power_manager_chromeos.cc",
+ "real_system_state.cc",
+ "shill_proxy.cc",
+ "update_attempter.cc",
+ "update_boot_flags_action.cc",
+ "update_manager/boxed_value.cc",
+ "update_manager/chromeos_policy.cc",
+ "update_manager/default_policy.cc",
+ "update_manager/enough_slots_ab_updates_policy_impl.cc",
+ "update_manager/enterprise_device_policy_impl.cc",
+ "update_manager/evaluation_context.cc",
+ "update_manager/interactive_update_policy_impl.cc",
+ "update_manager/next_update_check_policy_impl.cc",
+ "update_manager/official_build_check_policy_impl.cc",
+ "update_manager/out_of_box_experience_policy_impl.cc",
+ "update_manager/policy.cc",
+ "update_manager/policy_test_utils.cc",
+ "update_manager/real_config_provider.cc",
+ "update_manager/real_device_policy_provider.cc",
+ "update_manager/real_random_provider.cc",
+ "update_manager/real_shill_provider.cc",
+ "update_manager/real_system_provider.cc",
+ "update_manager/real_time_provider.cc",
+ "update_manager/real_updater_provider.cc",
+ "update_manager/staging_utils.cc",
+ "update_manager/state_factory.cc",
+ "update_manager/update_manager.cc",
+ "update_manager/update_time_restrictions_policy_impl.cc",
+ "update_manager/weekly_time.cc",
+ "update_status_utils.cc",
+ ]
+ configs += [ ":target_defaults" ]
+ libs = [
+ "bz2",
+ "policy",
+ "rootdev",
+ "rt",
+ ]
+ all_dependent_pkg_deps = [
+ "dbus-1",
+ "expat",
+ "libcurl",
+ "libdebugd-client",
+ "libmetrics-${libbase_ver}",
+ "libpower_manager-client",
+ "libsession_manager-client",
+ "libshill-client",
+ "libssl",
+ "libupdate_engine-client",
+ "vboot_host",
+ ]
+ deps = [
+ ":libpayload_consumer",
+ ":update_engine-dbus-adaptor",
+ ":update_metadata-protos",
+ ]
+
+ if (use.dlc) {
+ all_dependent_pkg_deps += [ "libdlcservice-client" ]
+ }
+
+ if (use.chrome_network_proxy) {
+ sources += [ "chrome_browser_proxy_resolver.cc" ]
+ }
+
+ if (use.chrome_kiosk_app) {
+ deps += [ ":update_engine-dbus-kiosk-app-client" ]
+ }
+
+ if (use.dlc) {
+ sources += [
+ "dlcservice_chromeos.cc",
+ "excluder_chromeos.cc",
+ ]
+ } else {
+ sources += [
+ "common/dlcservice_stub.cc",
+ "common/excluder_stub.cc",
+ ]
+ }
+}
+
+# update_engine daemon.
+executable("update_engine") {
+ sources = [ "main.cc" ]
+ configs += [ ":target_defaults" ]
+ deps = [ ":libupdate_engine" ]
+}
+
+# update_engine client library.
+static_library("libupdate_engine_client") {
+ sources = [
+ "client_library/client_dbus.cc",
+ "update_status_utils.cc",
+ ]
+ include_dirs = [ "client_library/include" ]
+ configs += [ ":target_defaults" ]
+ pkg_deps = [
+ "dbus-1",
+ "libupdate_engine-client",
+ ]
+}
+
+# update_engine console client.
+executable("update_engine_client") {
+ sources = [
+ "common/error_code_utils.cc",
+ "omaha_utils.cc",
+ "update_engine_client.cc",
+ ]
+ configs += [ ":target_defaults" ]
+ deps = [ ":libupdate_engine_client" ]
+}
+
+# server-side code. This is used for delta_generator and unittests but not
+# for any client code.
+static_library("libpayload_generator") {
+ sources = [
+ "common/file_fetcher.cc",
+ "payload_generator/ab_generator.cc",
+ "payload_generator/annotated_operation.cc",
+ "payload_generator/blob_file_writer.cc",
+ "payload_generator/block_mapping.cc",
+ "payload_generator/boot_img_filesystem.cc",
+ "payload_generator/bzip.cc",
+ "payload_generator/deflate_utils.cc",
+ "payload_generator/delta_diff_generator.cc",
+ "payload_generator/delta_diff_utils.cc",
+ "payload_generator/ext2_filesystem.cc",
+ "payload_generator/extent_ranges.cc",
+ "payload_generator/extent_utils.cc",
+ "payload_generator/full_update_generator.cc",
+ "payload_generator/mapfile_filesystem.cc",
+ "payload_generator/payload_file.cc",
+ "payload_generator/payload_generation_config.cc",
+ "payload_generator/payload_generation_config_chromeos.cc",
+ "payload_generator/payload_properties.cc",
+ "payload_generator/payload_signer.cc",
+ "payload_generator/raw_filesystem.cc",
+ "payload_generator/squashfs_filesystem.cc",
+ "payload_generator/xz_chromeos.cc",
+ ]
+ configs += [ ":target_defaults" ]
+ all_dependent_pkg_deps = [
+ "ext2fs",
+ "libbsdiff",
+ "liblzma",
+ "libpuffdiff",
+ ]
+ deps = [
+ ":libpayload_consumer",
+ ":update_metadata-protos",
+ ]
+
+ # TODO(crbug.com/1082873): Remove after fixing usage of deprecated
+ # declarations.
+ cflags_cc = [ "-Wno-error=deprecated-declarations" ]
+}
+
+# server-side delta generator.
+executable("delta_generator") {
+ sources = [ "payload_generator/generate_delta_main.cc" ]
+ configs += [ ":target_defaults" ]
+ configs -= [ "//common-mk:pie" ]
+ deps = [
+ ":libpayload_consumer",
+ ":libpayload_generator",
+ ]
+}
+
+if (use.test || use.fuzzer) {
+ static_library("update_engine_test_libs") {
+ sources = [
+ "common/fake_prefs.cc",
+ "common/mock_http_fetcher.cc",
+ "common/test_utils.cc",
+ "fake_shill_proxy.cc",
+ "fake_system_state.cc",
+ "payload_consumer/fake_file_descriptor.cc",
+ "payload_generator/fake_filesystem.cc",
+ "update_manager/umtest_utils.cc",
+ ]
+
+ # TODO(crbug.com/887845): After library odering issue is fixed,
+ # //common-mk:test can be moved in all_dependent_configs and
+ # //common-mk:test in each test configs can be removed.
+ configs += [
+ "//common-mk:test",
+ ":target_defaults",
+ ]
+ pkg_deps = [ "libshill-client-test" ]
+ deps = [ ":libupdate_engine" ]
+ }
+}
+
+if (use.test) {
+ # Public keys used for unit testing.
+ genopenssl_key("update_engine-testkeys") {
+ openssl_pem_in_dir = "."
+ openssl_pem_out_dir = "include/update_engine"
+ sources = [
+ "unittest_key.pem",
+ "unittest_key2.pem",
+ ]
+ }
+
+ # Unpacks sample images used for testing.
+ tar_bunzip2("update_engine-test_images") {
+ image_out_dir = "."
+ sources = [ "sample_images/sample_images.tar.bz2" ]
+ }
+
+ # Test HTTP Server.
+ executable("test_http_server") {
+ sources = [
+ "common/http_common.cc",
+ "test_http_server.cc",
+ ]
+
+ # //common-mk:test should be on the top.
+ # TODO(crbug.com/887845): Remove this after library odering issue is fixed.
+ configs += [
+ "//common-mk:test",
+ ":target_defaults",
+ ]
+ }
+
+ # Test subprocess helper.
+ executable("test_subprocess") {
+ sources = [ "test_subprocess.cc" ]
+
+ # //common-mk:test should be on the top.
+ # TODO(crbug.com/887845): Remove this after library odering issue is fixed.
+ configs += [
+ "//common-mk:test",
+ ":target_defaults",
+ ]
+ }
+
+ # Main unittest file.
+ executable("update_engine_unittests") {
+ sources = [
+ "boot_control_chromeos_unittest.cc",
+ "certificate_checker_unittest.cc",
+ "common/action_pipe_unittest.cc",
+ "common/action_processor_unittest.cc",
+ "common/action_unittest.cc",
+ "common/cpu_limiter_unittest.cc",
+ "common/hash_calculator_unittest.cc",
+ "common/http_fetcher_unittest.cc",
+ "common/hwid_override_unittest.cc",
+ "common/prefs_unittest.cc",
+ "common/proxy_resolver_unittest.cc",
+ "common/subprocess_unittest.cc",
+ "common/terminator_unittest.cc",
+ "common/utils_unittest.cc",
+ "common_service_unittest.cc",
+ "connection_manager_unittest.cc",
+ "hardware_chromeos_unittest.cc",
+ "image_properties_chromeos_unittest.cc",
+ "libcurl_http_fetcher_unittest.cc",
+ "metrics_reporter_omaha_unittest.cc",
+ "metrics_utils_unittest.cc",
+ "omaha_request_action_unittest.cc",
+ "omaha_request_builder_xml_unittest.cc",
+ "omaha_request_params_unittest.cc",
+ "omaha_response_handler_action_unittest.cc",
+ "omaha_utils_unittest.cc",
+ "p2p_manager_unittest.cc",
+ "payload_consumer/bzip_extent_writer_unittest.cc",
+ "payload_consumer/cached_file_descriptor_unittest.cc",
+ "payload_consumer/delta_performer_integration_test.cc",
+ "payload_consumer/delta_performer_unittest.cc",
+ "payload_consumer/download_action_unittest.cc",
+ "payload_consumer/extent_reader_unittest.cc",
+ "payload_consumer/extent_writer_unittest.cc",
+ "payload_consumer/file_descriptor_utils_unittest.cc",
+ "payload_consumer/file_writer_unittest.cc",
+ "payload_consumer/filesystem_verifier_action_unittest.cc",
+ "payload_consumer/postinstall_runner_action_unittest.cc",
+ "payload_consumer/xz_extent_writer_unittest.cc",
+ "payload_generator/ab_generator_unittest.cc",
+ "payload_generator/blob_file_writer_unittest.cc",
+ "payload_generator/block_mapping_unittest.cc",
+ "payload_generator/boot_img_filesystem_unittest.cc",
+ "payload_generator/deflate_utils_unittest.cc",
+ "payload_generator/delta_diff_utils_unittest.cc",
+ "payload_generator/ext2_filesystem_unittest.cc",
+ "payload_generator/extent_ranges_unittest.cc",
+ "payload_generator/extent_utils_unittest.cc",
+ "payload_generator/full_update_generator_unittest.cc",
+ "payload_generator/mapfile_filesystem_unittest.cc",
+ "payload_generator/payload_file_unittest.cc",
+ "payload_generator/payload_generation_config_unittest.cc",
+ "payload_generator/payload_properties_unittest.cc",
+ "payload_generator/payload_signer_unittest.cc",
+ "payload_generator/squashfs_filesystem_unittest.cc",
+ "payload_generator/zip_unittest.cc",
+ "payload_state_unittest.cc",
+ "testrunner.cc",
+ "update_attempter_unittest.cc",
+ "update_boot_flags_action_unittest.cc",
+ "update_manager/boxed_value_unittest.cc",
+ "update_manager/chromeos_policy_unittest.cc",
+ "update_manager/evaluation_context_unittest.cc",
+ "update_manager/generic_variables_unittest.cc",
+ "update_manager/prng_unittest.cc",
+ "update_manager/real_device_policy_provider_unittest.cc",
+ "update_manager/real_random_provider_unittest.cc",
+ "update_manager/real_shill_provider_unittest.cc",
+ "update_manager/real_system_provider_unittest.cc",
+ "update_manager/real_time_provider_unittest.cc",
+ "update_manager/real_updater_provider_unittest.cc",
+ "update_manager/staging_utils_unittest.cc",
+ "update_manager/update_manager_unittest.cc",
+ "update_manager/update_time_restrictions_policy_impl_unittest.cc",
+ "update_manager/variable_unittest.cc",
+ "update_manager/weekly_time_unittest.cc",
+ "update_status_utils_unittest.cc",
+ ]
+ if (use.dlc) {
+ sources += [ "excluder_chromeos_unittest.cc" ]
+ }
+
+ # //common-mk:test should be on the top.
+ # TODO(crbug.com/887845): Remove this after library odering issue is fixed.
+ configs += [
+ "//common-mk:test",
+ ":target_defaults",
+ ]
+ pkg_deps = [
+ "libbrillo-test",
+ "libchrome-test-${libbase_ver}",
+ "libdebugd-client-test",
+ "libpower_manager-client-test",
+ "libsession_manager-client-test",
+ "libshill-client-test",
+ ]
+ deps = [
+ ":libpayload_generator",
+ ":libupdate_engine",
+ ":update_engine_test_libs",
+ ]
+ }
+}
+
+# Fuzzer target.
+if (use.fuzzer) {
+ executable("update_engine_delta_performer_fuzzer") {
+ sources = [ "payload_consumer/delta_performer_fuzzer.cc" ]
+ configs += [
+ "//common-mk/common_fuzzer",
+ ":target_defaults",
+ ]
+ pkg_deps = [
+ "libbrillo-test",
+ "libchrome-test-${libbase_ver}",
+ ]
+ deps = [
+ ":libupdate_engine",
+ ":update_engine_test_libs",
+ ]
+ }
+ executable("update_engine_omaha_request_action_fuzzer") {
+ sources = [ "omaha_request_action_fuzzer.cc" ]
+ configs += [
+ "//common-mk/common_fuzzer",
+ ":target_defaults",
+ ]
+ pkg_deps = [
+ "libbrillo-test",
+ "libchrome-test-${libbase_ver}",
+ ]
+ deps = [
+ ":libupdate_engine",
+ ":update_engine_test_libs",
+ ]
+ }
+}
diff --git a/COMMIT-QUEUE.ini b/COMMIT-QUEUE.ini
deleted file mode 100644
index ed99b9f..0000000
--- a/COMMIT-QUEUE.ini
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Per-project Commit Queue settings.
-# Documentation: http://goo.gl/5J7oND
-
-[GENERAL]
-
-# Moblab testing is needed because of the udpate_payloads ebuild.
-pre-cq-configs: default guado_moblab-no-vmtest-pre-cq
diff --git a/OWNERS b/OWNERS
index 07ee38e..75fd9f1 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,10 +1,12 @@
set noparent
-# Current general maintainers:
+# Android et. al. maintainers:
deymo@google.com
senj@google.com
# Chromium OS maintainers:
-benchan@google.com
ahassani@google.com
-xiaochu@google.com
+kimjae@google.com
+
+# Chromium OS only:
+# COMPONENT: Internals>Installer
diff --git a/PRESUBMIT.cfg b/PRESUBMIT.cfg
index f2c7831..42156b3 100644
--- a/PRESUBMIT.cfg
+++ b/PRESUBMIT.cfg
@@ -1,6 +1,6 @@
[Hook Scripts]
-hook0=../../../../chromite/bin/cros lint ${PRESUBMIT_FILES}
-hook1=../../../platform2/common-mk/gyplint.py ${PRESUBMIT_FILES}
+cros lint = cros lint ${PRESUBMIT_FILES}
+gnlint = ../../../platform2/common-mk/gnlint.py ${PRESUBMIT_FILES}
[Hook Overrides]
clang_format_check: true
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..71f271b
--- /dev/null
+++ b/README.md
@@ -0,0 +1,642 @@
+# Chrome OS Update Process
+
+[TOC]
+
+System updates in more modern operating systems like Chrome OS and Android are
+called A/B updates, over-the-air ([OTA]) updates, seamless updates, or simply
+auto updates. In contrast to more primitive system updates (like Windows or
+macOS) where the system is booted into a special mode to override the system
+partitions with newer updates and may take several minutes or hours, A/B updates
+have several advantages including but not limited to:
+
+* Updates maintain a workable system that remains on the disk during and after
+ an update. Hence, reducing the likelihood of corrupting a device into a
+ non-usable state. And reducing the need for flashing devices manually or at
+ repair and warranty centers, etc.
+* Updates can happen while the system is running (normally with minimum
+ overhead) without interrupting the user. The only downside for users is a
+ required reboot (or, in Chrome OS, a sign out which automatically causes a
+ reboot if an update was performed where the reboot duration is about 10
+ seconds and is no different than a normal reboot).
+* The user does not need (although they can) to request for an update. The
+ update checks happen periodically in the background.
+* If the update fails to apply, the user is not affected. The user will
+ continue on the old version of the system and the system will attempt to
+ apply the update again at a later time.
+* If the update applies correctly but fails to boot, the system will rollback
+ to the old partition and the user can still use the system as usual.
+* The user does not need to reserve enough space for the update. The system
+ has already reserved enough space in terms of two copies (A and B) of a
+ partition. The system doesn’t even need any cache space on the disk,
+ everything happens seamlessly from network to memory to the inactive
+ partitions.
+
+## Life of an A/B Update
+
+In A/B update capable systems, each partition, such as the kernel or root (or
+other artifacts like [DLC]), has two copies. We call these two copies active (A)
+and inactive (B). The system is booted into the active partition (depending on
+which copy has the higher priority at boot time) and when a new update is
+available, it is written into the inactive partition. After a successful reboot,
+the previously inactive partition becomes active and the old active partition
+becomes inactive.
+
+But everything starts with generating update payloads in (Google) servers for
+each new system image. Once the update payloads are generated, they are signed
+with specific keys and stored in a location known to an update server (Omaha).
+
+When the updater client initiates an update (either periodically or user
+initiated), it first consults different device policies to see if the update
+check is allowed. For example, device policies can prevent an update check
+during certain times of a day or they require the update check time to be
+scattered throughout the day randomly, etc.
+
+Once policies allow for the update check, the updater client sends a request to
+the update server (all this communication happens over HTTPS) and identifies its
+parameters like its Application ID, hardware ID, version, board, etc. Then if
+the update server decides to serve an update payload, it will respond with all
+the parameters needed to perform an update like the URLs to download the
+payloads, the metadata signatures, the payload size and hash, etc. The updater
+client continues communicating with the update server after different state
+changes, like reporting that it started to download the payload or it finished
+the update, or reports that the update failed with specific error codes, etc.
+
+Each payload consists of two main sections: metadata and extra data. The
+metadata is basically a list of operations that should be performed for an
+update. The extra data contains the data blobs needed by some or all of these
+operations. The updater client first downloads the metadata and
+cryptographically verifies it using the provided signatures from the update
+server’s response. Once the metadata is verified as valid, the rest of the
+payload can easily be verified cryptographically (mostly through SHA256 hashes).
+
+Next, the updater client marks the inactive partition as unbootable (because it
+needs to write the new updates into it). At this point the system cannot
+rollback to the inactive partition anymore.
+
+Then, the updater client performs the operations defined in the metadata (in the
+order they appear in the metadata) and the rest of the payload is gradually
+downloaded when these operations require their data. Once an operation is
+finished its data is discarded. This eliminates the need for caching the entire
+payload before applying it. During this process the updater client periodically
+checkpoints the last operation performed so in the event of failure or system
+shutdown, etc. it can continue from the point it missed without redoing all
+operations from the beginning.
+
+During the download, the updater client hashes the downloaded bytes and when the
+download finishes, it checks the payload signature (located at the end of the
+payload). If the signature cannot be verified, the update is rejected.
+
+After the inactive partition is updated, the entire partition is re-read, hashed
+and compared to a hash value passed in the metadata to make sure the update was
+successfully written into the partition.
+
+In the next step, the [Postinstall] process (if any) is called. The postinstall
+reconstructs the dm-verity tree hash of the ROOT partition and writes it at the
+end of the partition (after the last block of the file system). The postinstall
+can also perform any board specific or firmware update tasks necessary. If
+postinstall fails, the entire update is considered failed.
+
+Then the updater client goes into a state that identifies the update has
+completed and the user needs to reboot the system. At this point, until the user
+reboots (or signs out), the updater client will not do any more system updates
+even if newer updates are available. However, it does continue to perform
+periodic update checks so we can have statistics on the number of active devices
+in the field.
+
+After the update proved successful, the inactive partition is marked to have a
+higher priority (on a boot, a partition with higher priority is booted
+first). Once the user reboots the system, it will boot into the updated
+partition and it is marked as active. At this point, after the reboot, The
+updater client calls into the [`chromeos-setgoodkernel`] program. The program
+verifies the integrity of the system partitions using the dm-verity and marks
+the active partition as healthy. At this point the system is basically updated
+successfully.
+
+## Update Engine Daemon
+
+The `update_engine` is a single-threaded daemon process that runs all the
+times. This process is the heart of the auto updates. It runs with lower
+priorities in the background and is one of the last processes to start after a
+system boot. Different clients (like Chrome or other services) can send requests
+for update checks to the update engine. The details of how requests are passed
+to the update engine is system dependent, but in Chrome OS it is D-Bus. Look at
+the [D-Bus interface] for a list of all available methods.
+
+There are many resiliency features embedded in the update engine that makes auto
+updates robust including but not limited to:
+
+* If the update engine crashes, it will restart automatically.
+* During an active update it periodically checkpoints the state of the update
+ and if it fails to continue the update or crashes in the middle, it will
+ continue from the last checkpoint.
+* It retries failed network communication.
+* If it fails to apply a delta payload (due to bit changes on the active
+ partition) for a few times, it switches to full payload.
+
+The updater clients writes its active preferences in
+`/var/lib/update_engine/prefs`. These preferences help with tracking changes
+during the lifetime of the updater client and allows properly continuing the
+update process after failed attempts or crashes.
+
+The core update engine code base in a Chromium OS checkout is located in
+`src/aosp/system/update_engine` fetching [this repository].
+
+### Policy Management
+
+In Chrome OS, devices are allowed to accept different policies from their
+managing organizations. Some of these policies affect how/when updates should be
+performed. For example, an organization may want to scatter the update checks
+during certain times of the day so as not to interfere with normal
+business. Within the update engine daemon, [UpdateManager] has the
+responsibility of loading such policies and making different decisions based on
+them. For example, some policies may allow the act of checking for updates to
+happen, while they prevent downloading the update payload. Or some policies
+don’t allow the update check within certain time frames, etc. Anything that
+relates to the Chrome OS update policies should be contained within the
+[update_manager] directory in the source code.
+
+### Rollback vs. Enterprise Rollback
+
+Chrome OS defines a concept for Rollback: Whenever a newly updated system does
+not work as it is intended, under certain circumstances the device can be rolled
+back to a previously working version. There are two types of rollback supported
+in Chrome OS: A (legacy, original) rollback and an enterprise rollback (I know,
+naming is confusing).
+
+A normal rollback, which has existed for as long as Chrome OS had auto updater,
+is performed by switching the currently inactive partition into the active
+partition and rebooting into it. It is as simple as running a successful
+postinstall on the inactive partition, and rebooting the device. It is a feature
+used by Chrome that happens under certain circumstances. Of course rollback
+can’t happen if the inactive partition has been tampered with or has been nuked
+by the updater client to install an even newer update. Normally a rollback is
+followed by a Powerwash which clobbers the stateful partition.
+
+Enterprise rollback is a new feature added to allow enterprise users to
+downgrade the installed image to an older version. It is very similar to a
+normal system update, except that an older update payload is downloaded and
+installed. There is no direct API for entering into the enterprise rollback. It
+is managed by the enterprise device policies only.
+
+Developers should be careful when touching any rollback related feature and make
+sure they know exactly which of these two features they are trying to adapt.
+
+### Interactive vs Non-Interactive vs. Forced Updates
+
+Non-interactive updates are updates that are scheduled periodically by the
+update engine and happen in the background. Interactive updates, on the other
+hand, happen when a user specifically requests an update check (e.g. by clicking
+on “Check For Update” button in Chrome OS’s About page). Depending on the update
+server's policies, interactive updates have higher priority than non-interactive
+updates (by carrying marker hints). They may decide to not provide an update if
+they have busy server load, etc. There are other internal differences between
+these two types of updates too. For example, interactive updates try to install
+the update faster.
+
+Forced updates are similar to interactive updates (initiated by some kind of
+user action), but they can also be configured to act as non-interactive. Since
+non-interactive updates happen periodically, a forced-non-interactive update
+causes a non-interactive update at the moment of the request, not at a later
+time. We can call a forced non-interactive update with:
+
+```bash
+update_engine_client --interactive=false --check_for_update
+```
+
+### P2P Updates
+
+Many organizations might not have the external bandwidth requirements that
+system updates need for all their devices. To help with this, Chrome OS can act
+as a payload server to other client devices in the same network subnet. This is
+basically a peer-to-peer update system that allows the devices to download the
+update payloads from other devices in the network. This has to be enabled
+explicitly in the organization through device policies and specific network
+configurations to be enabled for P2P updates to work. Regardless of the location
+of update payloads, all update requests go through update servers in HTTPS.
+
+Check out the [P2P update related code] for both the server and the client side.
+
+### Network
+
+The updater client has the capability to download the payloads using Ethernet,
+WiFi, or Cellular networks depending on which one the device is connected
+to. Downloading over Cellular networks will prompt permission from the user as
+it can consume a considerable amount of data.
+
+### Logs
+
+In Chrome OS the `update_engine` logs are located in `/var/log/update_engine`
+directory. Whenever `update_engine` starts, it starts a new log file with the
+current data-time format in the log file’s name
+(`update_engine.log-DATE-TIME`). Many log files can be seen in
+`/var/log/update_engine` after a few restarts of the update engine or after the
+system reboots. The latest active log is symlinked to
+`/var/log/update_engine.log`.
+
+## Update Payload Generation
+
+The update payload generation is the process of converting a set of
+partitions/files into a format that is both understandable by the updater client
+(especially if it's a much older version) and is securely verifiable. This
+process involves breaking the input partitions into smaller components and
+compressing them in order to help with network bandwidth when downloading the
+payloads.
+
+For each generated payload, there is a corresponding properties file which
+contains the metadata information of the payload in JSON format. Normally the
+file is located in the same location as the generated payload and its file name
+is the same as the payload file name plus `.json`
+postfix. e.g. `/path/to/payload.bin` and `/path/to/payload.bin.json`. This
+properties file is necessary in order to do any kind of auto update in [`cros
+flash`], AU autotests, etc. Similarly the updater server uses this file to
+dispatch the payload properties to the updater clients.
+
+Once update payloads are generated, their original images cannot be changed
+anymore otherwise the update payloads may not be able to be applied.
+
+`delta_generator` is a tool with a wide range of options for generating
+different types of update payloads. Its code is located in
+`update_engine/payload_generator`. This directory contains all the source code
+related to mechanics of generating an update payload. None of the files in this
+directory should be included or used in any other library/executable other than
+the `delta_generator` which means this directory does not get compiled into the
+rest of the update engine tools.
+
+However, it is not recommended to use `delta_generator` directly. To manually
+generate payloads easier, [`cros_generate_update_payloads`] should be used. Most
+of the higher level policies and tools for generating payloads reside as a
+library in [`chromite/lib/paygen`]. Whenever calls to the update payload
+generation API are needed, this library should be used instead.
+
+### Update Payload File Specification
+
+Each update payload file has a specific structure defined in the table below:
+
+|Field|Size (bytes)|Type|Description|
+|-----|------------|----|-----------|
+|Magic Number|4|char[4]|Magic string "CrAU" identifying this is an update payload.|
+|Major Version|8|uint64|Payload major version number.|
+|Manifest Size|8|uint64|Manifest size in bytes.|
+|Manifest Signature Size|4|uint32|Manifest signature blob size in bytes (only in major version 2).|
+|Manifest|Varies|[DeltaArchiveManifest]|The list of operations to be performed.|
+|Manifest Signature|Varies|[Signatures]|The signature of the first five fields. There could be multiple signatures if the key has changed.|
+|Payload Data|Varies|List of raw or compressed data blobs|The list of binary blobs used by operations in the metadata.|
+|Payload Signature Size|Varies|uint64|The size of the payload signature.|
+|Payload Signature|Varies|[Signatures]|The signature of the entire payload except the metadata signature. There could be multiple signatures if the key has changed.|
+
+### Delta vs. Full Update Payloads
+
+There are two types of payload: Full and Delta. A full payload is generated
+solely from the target image (the image we want to update to) and has all the
+data necessary to update the inactive partition. Hence, full payloads can be
+quite large in size. A delta payload, on the other hand, is a differential
+update generated by comparing the source image (the active partitions) and the
+target image and producing the diffs between these two images. It is basically a
+differential update similar to applications like `diff` or `bsdiff`. Hence,
+updating the system using the delta payloads requires the system to read parts
+of the active partition in order to update the inactive partition (or
+reconstruct the target partition). The delta payloads are significantly smaller
+than the full payloads. The structure of the payload is equal for both types.
+
+Payload generation is quite resource intensive and its tools are implemented
+with high parallelism.
+
+#### Generating Full Payloads
+
+A full payload is generated by breaking the partition into 2MiB (configurable)
+chunks and either compressing them using bzip2 or XZ algorithms or keeping it as
+raw data depending on which produces smaller data. Full payloads are much larger
+in comparison to delta payloads hence require longer download time if the
+network bandwidth is limited. On the other hand, full payloads are a bit faster
+to apply because the system doesn’t need to read data from the source partition.
+
+#### Generating Delta Payloads
+
+Delta payloads are generated by looking at both the source and target images
+data on a file and metadata basis (more precisely, the file system level on each
+appropriate partition). The reason we can generate delta payloads is that Chrome
+OS partitions are read only. So with high certainty we can assume the active
+partitions on the client’s device is bit-by-bit equal to the original partitions
+generated in the image generation/signing phase. The process for generating a
+delta payload is roughly as follows:
+
+1. Find all the zero-filled blocks on the target partition and produce `ZERO`
+ operation for them. `ZERO` operation basically discards the associated
+ blocks (depending on the implementation).
+2. Find all the blocks that have not changed between the source and target
+ partitions by directly comparing one-to-one source and target blocks and
+ produce `SOURCE_COPY` operation.
+3. List all the files (and their associated blocks) in the source and target
+ partitions and remove blocks (and files) which we have already generated
+ operations for in the last two steps. Assign the remaining metadata (inodes,
+ etc) of each partition as a file.
+4. If a file is new, generate a `REPLACE`, `REPLACE_XZ`, or `REPLACE_BZ`
+ operation for its data blocks depending on which one generates a smaller
+ data blob.
+5. For each other file, compare the source and target blocks and produce a
+ `SOURCE_BSDIFF` or `PUFFDIFF` operation depending on which one generates a
+ smaller data blob. These two operations produce binary diffs between a
+ source and target data blob. (Look at [bsdiff] and [puffin] for details of
+ such binary differential programs!)
+6. Sort the operations based on their target partitions’ block offset.
+7. Optionally merge same or similar operations next to each other into larger
+ operations for better efficiency and potentially smaller payloads.
+
+Full payloads can only contain `REPLACE`, `REPLACE_BZ`, and `REPLACE_XZ`
+operations. Delta payloads can contain any operations.
+
+### Major and Minor versions
+
+The major and minor versions specify the update payload file format and the
+capability of the updater client to accept certain types of update payloads
+respectively. These numbers are [hard coded] in the updater client.
+
+Major version is basically the update payload file version specified in the
+[update payload file specification] above (second field). Each updater client
+supports a range of major versions. Currently, there are only two major
+versions: 1, and 2. And both Chrome OS and Android are on major version 2 (major
+version 1 is being deprecated). Whenever there are new additions that cannot be
+fitted in the [Manifest protobuf], we need to uprev the major version. Upreving
+major version should be done with utmost care because older clients do not know
+how to handle the newer versions. Any major version uprev in Chrome OS should be
+associated with a GoldenEye stepping stone.
+
+Minor version defines the capability of the updater client to accept certain
+operations or perform certain actions. Each updater client supports a range of
+minor versions. For example, the updater client with minor version 4 (or less)
+does not know how to handle a `PUFFDIFF` operation. So when generating a delta
+payload for an image which has an updater client with minor version 4 (or less)
+we cannot produce PUFFDIFF operation for it. The payload generation process
+looks at the source image’s minor version to decide the type of operations it
+supports and only a payload that confirms to those restrictions. Similarly, if
+there is a bug in a client with a specific minor version, an uprev in the minor
+version helps with avoiding to generate payloads that cause that bug to
+manifest. However, upreving minor versions is quite expensive too in terms of
+maintainability and it can be error prone. So one should practice caution when
+making such a change.
+
+Minor versions are irrelevant in full payloads. Full payloads should always be
+able to be applied for very old clients. The reason is that the updater clients
+may not send their current version, so if we had different types of full
+payloads, we would not have known which version to serve to the client.
+
+### Signed vs Unsigned Payloads
+
+Update payloads can be signed (with private/public key pairs) for use in
+production or be kept unsigned for use in testing. Tools like `delta_generator`
+help with generating metadata and payload hashes or signing the payloads given
+private keys.
+
+## update_payload Scripts
+
+[update_payload] contains a set of python scripts used mostly to validate
+payload generation and application. We normally test the update payloads using
+an actual device (live tests). [`brillo_update_payload`] script can be used to
+generate and test applying of a payload on a host device machine. These tests
+can be viewed as dynamic tests without the need for an actual device. Other
+`update_payload` scripts (like [`check_update_payload`]) can be used to
+statically check that a payload is in the correct state and its application
+works correctly. These scripts actually apply the payload statically without
+running the code in payload_consumer.
+
+## Postinstall
+
+[Postinstall] is a process called after the updater client writes the new image
+artifacts to the inactive partitions. One of postinstall's main responsibilities
+is to recreate the dm-verity tree hash at the end of the root partition. Among
+other things, it installs new firmware updates or any board specific
+processes. Postinstall runs in separate chroot inside the newly installed
+partition. So it is quite separated from the rest of the active running
+system. Anything that needs to be done after an update and before the device is
+rebooted, should be implemented inside the postinstall.
+
+## Building Update Engine
+
+You can build `update_engine` the same as other platform applications:
+
+```bash
+(chroot) $ emerge-${BOARD} update_engine
+```
+or to build without the source copy:
+
+```bash
+(chroot) $ cros_workon_make --board=${BOARD} update_engine
+```
+
+After a change in the `update_engine` daemon, either build an image and install
+the image on the device using cros flash, etc. or use `cros deploy` to only
+install the `update_engine` service on the device:
+
+```bash
+(chroot) $ cros deploy update_engine
+```
+
+You need to restart the `update_engine` daemon in order to see the affected
+changes:
+
+```bash
+# SSH into the device.
+restart update-engine # with a dash not underscore.
+```
+
+Other payload generation tools like `delta_generator` are board agnostic and
+only available in the SDK. So in order to make any changes to the
+`delta_generator`, you should build the SDK:
+
+```bash
+# Do it only once to start building the 9999 ebuild from ToT.
+(chroot) $ cros_workon --host start update_engine
+
+(chroot) $ sudo emerge update_engine
+```
+
+If you make any changes to the D-Bus interface make sure `system_api`,
+`update_engine-client`, and `update_engine` packages are marked to build from
+9999 ebuild and then build both packages in that order:
+
+```bash
+(chroot) $ emerge-${BOARD} system_api update_engine-client update_engine
+```
+
+If you make any changes to [`update_engine` protobufs] in the `system_api`,
+build the `system_api` package first.
+
+## Running Unit Tests
+
+[Running unit tests similar to other platforms]:
+
+```bash
+(chroot) $ FEATURES=test emerge-<board> update_engine
+```
+
+or
+
+```bash
+(chroot) $ cros_workon_make --board=<board> --test update_engine
+```
+
+or
+
+```bash
+(chroot) $ cros_run_unit_tests --board ${BOARD} --packages update_engine
+```
+
+The above commands run all the unit tests, but `update_engine` package is quite
+large and it takes a long time to run all the unit tests. To run all unit tests
+in a test class run:
+
+```bash
+(chroot) $ FEATURES=test \
+ P2_TEST_FILTER="*OmahaRequestActionTest.*-*RunAsRoot*" \
+ emerge-amd64-generic update_engine
+```
+
+To run one exact unit test fixture (e.g. `MultiAppUpdateTest`), run:
+
+```bash
+(chroot) $ FEATURES=test \
+ P2_TEST_FILTER="*OmahaRequestActionTest.MultiAppUpdateTest-*RunAsRoot*" \
+ emerge-amd64-generic update_engine
+```
+
+To run `update_payload` unit tests enter `update_engine/scripts` directory and
+run the desired `unittest.p`y files.
+
+## Initiating a Configured Update
+
+There are different methods to initiate an update:
+
+* Click on the “Check For Update” button in setting’s About page. There is no
+ way to configure this way of update check.
+* Use the [`update_engine_client`] program. There are a few configurations you
+ can do.
+* Call `autest` in the crosh. Mainly used by the QA team and is not intended
+ to be used by any other team.
+* Use [`cros flash`]. It internally uses the update_engine to flash a device
+ with a given image.
+* Run one of many auto update autotests.
+* Start a [Dev Server] on your host machine and send a specific HTTP request
+ (look at `cros_au` API in the Dev Server code), that has the information
+ like the IP address of your Chromebook and where the update payloads are
+ located to the Dev Server to start an update on your device (**Warning:**
+ complicated to do, not recommended).
+
+`update_engine_client` is a client application that can help initiate an update
+or get more information about the status of the updater client. It has several
+options like initiating an interactive vs. non-interactive update, changing
+channels, getting the current status of update process, doing a rollback,
+changing the Omaha URL to download the payload (the most important one), etc.
+
+`update_engine` daemon reads the `/etc/lsb-release` file on the device to
+identify different update parameters like the updater server (Omaha) URL, the
+current channel, etc. However, to override any of these parameters, create the
+file `/mnt/stateful_partition/etc/lsb-release` with desired customized
+parameters. For example, this can be used to point to a developer version of the
+update server and allow the update_engine to schedule a periodic update from
+that specific server.
+
+If you have some changes in the protocol that communicates with Omaha, but you
+don’t have those changes in the update server, or you have some specific
+payloads that do not exist on the production update server you can use
+[Nebraska] to help with doing an update.
+
+## Note to Developers and Maintainers
+
+When changing the update engine source code be extra careful about these things:
+
+### Do NOT Break Backward Compatibility
+
+At each release cycle we should be able to generate full and delta payloads that
+can correctly be applied to older devices that run older versions of the update
+engine client. So for example, removing or not passing arguments in the metadata
+proto file might break older clients. Or passing operations that are not
+understood in older clients will break them. Whenever changing anything in the
+payload generation process, ask yourself this question: Would it work on older
+clients? If not, do I need to control it with minor versions or any other means.
+
+Especially regarding enterprise rollback, a newer updater client should be able
+to accept an older update payload. Normally this happens using a full payload,
+but care should be taken in order to not break this compatibility.
+
+### Think About The Future
+
+When creating a change in the update engine, think about 5 years from now:
+
+* How can the change be implemented that five years from now older clients
+ don’t break?
+* How is it going to be maintained five years from now?
+* How can it make it easier for future changes without breaking older clients
+ or incurring heavy maintenance costs?
+
+### Prefer Not To Implement Your Feature In The Updater Client
+If a feature can be implemented from server side, Do NOT implement it in the
+client updater. Because the client updater can be fragile at points and small
+mistakes can have catastrophic consequences. For example, if a bug is introduced
+in the updater client that causes it to crash right before checking for update
+and we can't quite catch this bug early in the release process, then the
+production devices which have already moved to the new buggy system, may no
+longer receive automatic updates anymore. So, always think if the feature is
+being implemented can be done form the server side (with potentially minimal
+changes to the client updater)? Or can the feature be moved to another service
+with minimal interface to the updater client. Answering these questions will pay
+off greatly in the future.
+
+### Be Respectful Of Other Code Bases
+
+The current update engine code base is used in many projects like Android. We
+sync the code base among these two projects frequently. Try to not break Android
+or other systems that share the update engine code. Whenever landing a change,
+always think about whether Android needs that change:
+
+* How will it affect Android?
+* Can the change be moved to an interface and stubs implementations be
+ implemented so as not to affect Android?
+* Can Chrome OS or Android specific code be guarded by macros?
+
+As a basic measure, if adding/removing/renaming code, make sure to change both
+`build.gn` and `Android.bp`. Do not bring Chrome OS specific code (for example
+other libraries that live in `system_api` or `dlcservice`) into the common code
+of update_engine. Try to separate these concerns using best software engineering
+practices.
+
+### Merging from Android (or other code bases)
+
+Chrome OS tracks the Android code as an [upstream branch]. To merge the Android
+code to Chrome OS (or vice versa) just do a `git merge` of that branch into
+Chrome OS, test it using whatever means and upload a merge commit.
+
+```bash
+repo start merge-aosp
+git merge --no-ff --strategy=recursive -X patience cros/upstream
+repo upload --cbr --no-verify .
+```
+
+[Postinstall]: #postinstall
+[update payload file specification]: #update-payload-file-specification
+[OTA]: https://source.android.com/devices/tech/ota
+[DLC]: https://chromium.googlesource.com/chromiumos/platform2/+/master/dlcservice
+[`chromeos-setgoodkernel`]: https://chromium.googlesource.com/chromiumos/platform2/+/master/installer/chromeos-setgoodkernel
+[D-Bus interface]: /dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml
+[this repository]: /
+[UpdateManager]: /update_manager/update_manager.cc
+[update_manager]: /update_manager/
+[P2P update related code]: https://chromium.googlesource.com/chromiumos/platform2/+/master/p2p/
+[`cros_generate_update_payloads`]: https://chromium.googlesource.com/chromiumos/chromite/+/master/scripts/cros_generate_update_payload.py
+[`chromite/lib/paygen`]: https://chromium.googlesource.com/chromiumos/chromite/+/master/lib/paygen/
+[DeltaArchiveManifest]: /update_metadata.proto#302
+[Signatures]: /update_metadata.proto#122
+[hard coded]: /update_engine.conf
+[Manifest protobuf]: /update_metadata.proto
+[update_payload]: /scripts/
+[Postinstall]: https://chromium.googlesource.com/chromiumos/platform2/+/master/installer/chromeos-postinst
+[`update_engine` protobufs]: https://chromium.googlesource.com/chromiumos/platform2/+/master/system_api/dbus/update_engine/
+[Running unit tests similar to other platforms]: https://chromium.googlesource.com/chromiumos/docs/+/master/testing/running_unit_tests.md
+[Nebraska]: https://chromium.googlesource.com/chromiumos/platform/dev-util/+/master/nebraska/
+[upstream branch]: https://chromium.googlesource.com/aosp/platform/system/update_engine/+/upstream
+[`cros flash`]: https://chromium.googlesource.com/chromiumos/docs/+/master/cros_flash.md
+[bsdiff]: https://android.googlesource.com/platform/external/bsdiff/+/master
+[puffin]: https://android.googlesource.com/platform/external/puffin/+/master
+[`update_engine_client`]: /update_engine_client.cc
+[`brillo_update_payload`]: /scripts/brillo_update_payload
+[`check_update_payload`]: /scripts/paycheck.py
+[Dev Server]: https://chromium.googlesource.com/chromiumos/chromite/+/master/docs/devserver.md
diff --git a/UpdateEngine.conf b/UpdateEngine.conf
index 9490096..f9a66dc 100644
--- a/UpdateEngine.conf
+++ b/UpdateEngine.conf
@@ -1,5 +1,20 @@
<!DOCTYPE busconfig PUBLIC "-//freedesktop//DTD D-BUS Bus Configuration 1.0//EN"
"http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
+<!--
+ Copyright (C) 2019 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+!-->
<busconfig>
<policy user="root">
<allow own="org.chromium.UpdateEngine" />
@@ -26,7 +41,7 @@
send_member="ResetStatus"/>
<allow send_destination="org.chromium.UpdateEngine"
send_interface="org.chromium.UpdateEngineInterface"
- send_member="GetStatus"/>
+ send_member="GetStatusAdvanced"/>
<allow send_destination="org.chromium.UpdateEngine"
send_interface="org.chromium.UpdateEngineInterface"
send_member="RebootIfNeeded"/>
@@ -66,15 +81,12 @@
<allow send_destination="org.chromium.UpdateEngine"
send_interface="org.chromium.UpdateEngineInterface"
send_member="GetLastAttemptError"/>
- <allow send_destination="org.chromium.UpdateEngine"
- send_interface="org.chromium.UpdateEngineInterface"
- send_member="GetEolStatus"/>
<allow send_interface="org.chromium.UpdateEngineLibcrosProxyResolvedInterface" />
</policy>
<policy user="power">
<allow send_destination="org.chromium.UpdateEngine"
send_interface="org.chromium.UpdateEngineInterface"
- send_member="GetStatus"/>
+ send_member="GetStatusAdvanced"/>
</policy>
<policy user="dlcservice">
<allow send_destination="org.chromium.UpdateEngine"
@@ -82,6 +94,12 @@
send_member="GetStatus"/>
<allow send_destination="org.chromium.UpdateEngine"
send_interface="org.chromium.UpdateEngineInterface"
+ send_member="GetStatusAdvanced"/>
+ <allow send_destination="org.chromium.UpdateEngine"
+ send_interface="org.chromium.UpdateEngineInterface"
send_member="AttemptInstall"/>
+ <allow send_destination="org.chromium.UpdateEngine"
+ send_interface="org.chromium.UpdateEngineInterface"
+ send_member="SetDlcActiveValue"/>
</policy>
</busconfig>
diff --git a/binder_bindings/android/brillo/IUpdateEngine.aidl b/binder_bindings/android/brillo/IUpdateEngine.aidl
deleted file mode 100644
index 56e1524..0000000
--- a/binder_bindings/android/brillo/IUpdateEngine.aidl
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.brillo;
-
-import android.brillo.IUpdateEngineStatusCallback;
-import android.brillo.ParcelableUpdateEngineStatus;
-
-interface IUpdateEngine {
- void SetUpdateAttemptFlags(in int flags);
- boolean AttemptUpdate(in String app_version, in String omaha_url, in int flags);
- void AttemptRollback(in boolean powerwash);
- boolean CanRollback();
- void ResetStatus();
- ParcelableUpdateEngineStatus GetStatus();
- void RebootIfNeeded();
- void SetChannel(in String target_channel, in boolean powewash);
- String GetChannel(in boolean get_current_channel);
- void SetCohortHint(in String cohort_hint);
- String GetCohortHint();
- void SetP2PUpdatePermission(in boolean enabled);
- boolean GetP2PUpdatePermission();
- void SetUpdateOverCellularPermission(in boolean enabled);
- void SetUpdateOverCellularTarget(in String target_version,
- in long target_size);
- boolean GetUpdateOverCellularPermission();
- long GetDurationSinceUpdate();
- String GetPrevVersion();
- String GetRollbackPartition();
- void RegisterStatusCallback(in IUpdateEngineStatusCallback callback);
- int GetLastAttemptError();
- int GetEolStatus();
-}
diff --git a/binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl b/binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl
deleted file mode 100644
index 837d44d..0000000
--- a/binder_bindings/android/brillo/IUpdateEngineStatusCallback.aidl
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.brillo;
-
-import android.brillo.ParcelableUpdateEngineStatus;
-
-interface IUpdateEngineStatusCallback {
- oneway
- void HandleStatusUpdate(in ParcelableUpdateEngineStatus status);
-}
diff --git a/binder_bindings/android/brillo/ParcelableUpdateEngineStatus.aidl b/binder_bindings/android/brillo/ParcelableUpdateEngineStatus.aidl
deleted file mode 100644
index fc10505..0000000
--- a/binder_bindings/android/brillo/ParcelableUpdateEngineStatus.aidl
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.brillo;
-
-parcelable ParcelableUpdateEngineStatus cpp_header
- "update_engine/parcelable_update_engine_status.h";
diff --git a/binder_service_brillo.cc b/binder_service_brillo.cc
deleted file mode 100644
index cc74763..0000000
--- a/binder_service_brillo.cc
+++ /dev/null
@@ -1,247 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/binder_service_brillo.h"
-
-#include <base/bind.h>
-
-#include <binderwrapper/binder_wrapper.h>
-
-#include <utils/String16.h>
-#include <utils/StrongPointer.h>
-
-#include "update_engine/update_status_utils.h"
-
-using android::sp;
-using android::String16;
-using android::String8;
-using android::binder::Status;
-using android::brillo::IUpdateEngineStatusCallback;
-using android::brillo::ParcelableUpdateEngineStatus;
-using brillo::ErrorPtr;
-using std::string;
-using update_engine::UpdateEngineStatus;
-
-namespace chromeos_update_engine {
-
-namespace {
-string NormalString(const String16& in) {
- return string{String8{in}.string()};
-}
-
-Status ToStatus(ErrorPtr* error) {
- return Status::fromServiceSpecificError(
- 1, String8{error->get()->GetMessage().c_str()});
-}
-} // namespace
-
-template <typename... Parameters, typename... Arguments>
-Status BinderUpdateEngineBrilloService::CallCommonHandler(
- bool (UpdateEngineService::*Handler)(ErrorPtr*, Parameters...),
- Arguments... arguments) {
- ErrorPtr error;
- if (((common_.get())->*Handler)(&error, arguments...))
- return Status::ok();
- return ToStatus(&error);
-}
-
-Status BinderUpdateEngineBrilloService::SetUpdateAttemptFlags(int flags) {
- return CallCommonHandler(&UpdateEngineService::SetUpdateAttemptFlags, flags);
-}
-
-Status BinderUpdateEngineBrilloService::AttemptUpdate(
- const String16& app_version,
- const String16& omaha_url,
- int flags,
- bool* out_result) {
- return CallCommonHandler(&UpdateEngineService::AttemptUpdate,
- NormalString(app_version),
- NormalString(omaha_url),
- flags,
- out_result);
-}
-
-Status BinderUpdateEngineBrilloService::AttemptRollback(bool powerwash) {
- return CallCommonHandler(&UpdateEngineService::AttemptRollback, powerwash);
-}
-
-Status BinderUpdateEngineBrilloService::CanRollback(bool* out_can_rollback) {
- return CallCommonHandler(&UpdateEngineService::CanRollback, out_can_rollback);
-}
-
-Status BinderUpdateEngineBrilloService::ResetStatus() {
- return CallCommonHandler(&UpdateEngineService::ResetStatus);
-}
-
-Status BinderUpdateEngineBrilloService::GetStatus(
- ParcelableUpdateEngineStatus* status) {
- UpdateEngineStatus update_engine_status;
- auto ret =
- CallCommonHandler(&UpdateEngineService::GetStatus, &update_engine_status);
-
- if (ret.isOk()) {
- *status = ParcelableUpdateEngineStatus(update_engine_status);
- }
-
- return ret;
-}
-
-Status BinderUpdateEngineBrilloService::RebootIfNeeded() {
- return CallCommonHandler(&UpdateEngineService::RebootIfNeeded);
-}
-
-Status BinderUpdateEngineBrilloService::SetChannel(
- const String16& target_channel, bool powerwash) {
- return CallCommonHandler(&UpdateEngineService::SetChannel,
- NormalString(target_channel),
- powerwash);
-}
-
-Status BinderUpdateEngineBrilloService::GetChannel(bool get_current_channel,
- String16* out_channel) {
- string channel_string;
- auto ret = CallCommonHandler(
- &UpdateEngineService::GetChannel, get_current_channel, &channel_string);
-
- *out_channel = String16(channel_string.c_str());
- return ret;
-}
-
-Status BinderUpdateEngineBrilloService::SetCohortHint(
- const String16& in_cohort_hint) {
- return CallCommonHandler(&UpdateEngineService::SetCohortHint,
- NormalString(in_cohort_hint));
-}
-
-Status BinderUpdateEngineBrilloService::GetCohortHint(
- String16* out_cohort_hint) {
- string cohort_hint;
- auto ret =
- CallCommonHandler(&UpdateEngineService::GetCohortHint, &cohort_hint);
-
- *out_cohort_hint = String16(cohort_hint.c_str());
- return ret;
-}
-
-Status BinderUpdateEngineBrilloService::SetP2PUpdatePermission(bool enabled) {
- return CallCommonHandler(&UpdateEngineService::SetP2PUpdatePermission,
- enabled);
-}
-
-Status BinderUpdateEngineBrilloService::GetP2PUpdatePermission(
- bool* out_p2p_permission) {
- return CallCommonHandler(&UpdateEngineService::GetP2PUpdatePermission,
- out_p2p_permission);
-}
-
-Status BinderUpdateEngineBrilloService::SetUpdateOverCellularPermission(
- bool enabled) {
- return CallCommonHandler(
- &UpdateEngineService::SetUpdateOverCellularPermission, enabled);
-}
-
-Status BinderUpdateEngineBrilloService::SetUpdateOverCellularTarget(
- const String16& target_version, int64_t target_size) {
- return CallCommonHandler(&UpdateEngineService::SetUpdateOverCellularTarget,
- NormalString(target_version),
- target_size);
-}
-
-Status BinderUpdateEngineBrilloService::GetUpdateOverCellularPermission(
- bool* out_cellular_permission) {
- return CallCommonHandler(
- &UpdateEngineService::GetUpdateOverCellularPermission,
- out_cellular_permission);
-}
-
-Status BinderUpdateEngineBrilloService::GetDurationSinceUpdate(
- int64_t* out_duration) {
- return CallCommonHandler(&UpdateEngineService::GetDurationSinceUpdate,
- out_duration);
-}
-
-Status BinderUpdateEngineBrilloService::GetPrevVersion(
- String16* out_prev_version) {
- string version_string;
- auto ret =
- CallCommonHandler(&UpdateEngineService::GetPrevVersion, &version_string);
-
- *out_prev_version = String16(version_string.c_str());
- return ret;
-}
-
-Status BinderUpdateEngineBrilloService::GetRollbackPartition(
- String16* out_rollback_partition) {
- string partition_string;
- auto ret = CallCommonHandler(&UpdateEngineService::GetRollbackPartition,
- &partition_string);
-
- if (ret.isOk()) {
- *out_rollback_partition = String16(partition_string.c_str());
- }
-
- return ret;
-}
-
-Status BinderUpdateEngineBrilloService::RegisterStatusCallback(
- const sp<IUpdateEngineStatusCallback>& callback) {
- callbacks_.emplace_back(callback);
-
- auto binder_wrapper = android::BinderWrapper::Get();
-
- binder_wrapper->RegisterForDeathNotifications(
- IUpdateEngineStatusCallback::asBinder(callback),
- base::Bind(&BinderUpdateEngineBrilloService::UnregisterStatusCallback,
- base::Unretained(this),
- base::Unretained(callback.get())));
-
- return Status::ok();
-}
-
-Status BinderUpdateEngineBrilloService::GetLastAttemptError(
- int* out_last_attempt_error) {
- return CallCommonHandler(&UpdateEngineService::GetLastAttemptError,
- out_last_attempt_error);
-}
-
-Status BinderUpdateEngineBrilloService::GetEolStatus(int* out_eol_status) {
- return CallCommonHandler(&UpdateEngineService::GetEolStatus, out_eol_status);
-}
-
-void BinderUpdateEngineBrilloService::UnregisterStatusCallback(
- IUpdateEngineStatusCallback* callback) {
- auto it = callbacks_.begin();
- while (it != callbacks_.end() && it->get() != callback)
- it++;
-
- if (it == callbacks_.end()) {
- LOG(ERROR) << "Got death notification for unknown callback.";
- return;
- }
-
- LOG(INFO) << "Erasing orphan callback";
- callbacks_.erase(it);
-}
-
-void BinderUpdateEngineBrilloService::SendStatusUpdate(
- const UpdateEngineStatus& update_engine_status) {
- ParcelableUpdateEngineStatus parcelable_status(update_engine_status);
- for (auto& callback : callbacks_) {
- callback->HandleStatusUpdate(parcelable_status);
- }
-}
-
-} // namespace chromeos_update_engine
diff --git a/binder_service_brillo.h b/binder_service_brillo.h
deleted file mode 100644
index d0d0dc9..0000000
--- a/binder_service_brillo.h
+++ /dev/null
@@ -1,114 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_BINDER_SERVICE_BRILLO_H_
-#define UPDATE_ENGINE_BINDER_SERVICE_BRILLO_H_
-
-#include <utils/Errors.h>
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include <utils/RefBase.h>
-
-#include "update_engine/common_service.h"
-#include "update_engine/parcelable_update_engine_status.h"
-#include "update_engine/service_observer_interface.h"
-
-#include "android/brillo/BnUpdateEngine.h"
-#include "android/brillo/IUpdateEngineStatusCallback.h"
-
-namespace chromeos_update_engine {
-
-class BinderUpdateEngineBrilloService : public android::brillo::BnUpdateEngine,
- public ServiceObserverInterface {
- public:
- explicit BinderUpdateEngineBrilloService(SystemState* system_state)
- : common_(new UpdateEngineService(system_state)) {}
- virtual ~BinderUpdateEngineBrilloService() = default;
-
- const char* ServiceName() const {
- return "android.brillo.UpdateEngineService";
- }
-
- // ServiceObserverInterface overrides.
- void SendStatusUpdate(
- const update_engine::UpdateEngineStatus& update_engine_status) override;
- void SendPayloadApplicationComplete(ErrorCode error_code) override {}
-
- // android::brillo::BnUpdateEngine overrides.
- android::binder::Status SetUpdateAttemptFlags(int flags) override;
- android::binder::Status AttemptUpdate(const android::String16& app_version,
- const android::String16& omaha_url,
- int flags,
- bool* out_result) override;
- android::binder::Status AttemptRollback(bool powerwash) override;
- android::binder::Status CanRollback(bool* out_can_rollback) override;
- android::binder::Status ResetStatus() override;
- android::binder::Status GetStatus(
- android::brillo::ParcelableUpdateEngineStatus* status);
- android::binder::Status RebootIfNeeded() override;
- android::binder::Status SetChannel(const android::String16& target_channel,
- bool powerwash) override;
- android::binder::Status GetChannel(bool get_current_channel,
- android::String16* out_channel) override;
- android::binder::Status SetCohortHint(
- const android::String16& cohort_hint) override;
- android::binder::Status GetCohortHint(
- android::String16* out_cohort_hint) override;
- android::binder::Status SetP2PUpdatePermission(bool enabled) override;
- android::binder::Status GetP2PUpdatePermission(
- bool* out_p2p_permission) override;
- android::binder::Status SetUpdateOverCellularPermission(
- bool enabled) override;
- android::binder::Status SetUpdateOverCellularTarget(
- const android::String16& target_version, int64_t target_size) override;
- android::binder::Status GetUpdateOverCellularPermission(
- bool* out_cellular_permission) override;
- android::binder::Status GetDurationSinceUpdate(
- int64_t* out_duration) override;
- android::binder::Status GetPrevVersion(
- android::String16* out_prev_version) override;
- android::binder::Status GetRollbackPartition(
- android::String16* out_rollback_partition) override;
- android::binder::Status RegisterStatusCallback(
- const android::sp<android::brillo::IUpdateEngineStatusCallback>& callback)
- override;
- android::binder::Status GetLastAttemptError(
- int* out_last_attempt_error) override;
- android::binder::Status GetEolStatus(int* out_eol_status) override;
-
- private:
- // Generic function for dispatching to the common service.
- template <typename... Parameters, typename... Arguments>
- android::binder::Status CallCommonHandler(
- bool (UpdateEngineService::*Handler)(brillo::ErrorPtr*, Parameters...),
- Arguments... arguments);
-
- // To be used as a death notification handler only.
- void UnregisterStatusCallback(
- android::brillo::IUpdateEngineStatusCallback* callback);
-
- std::unique_ptr<UpdateEngineService> common_;
-
- std::vector<android::sp<android::brillo::IUpdateEngineStatusCallback>>
- callbacks_;
-};
-
-} // namespace chromeos_update_engine
-
-#endif // UPDATE_ENGINE_BINDER_SERVICE_BRILLO_H_
diff --git a/boot_control_chromeos.cc b/boot_control_chromeos.cc
index da84e99..95456f0 100644
--- a/boot_control_chromeos.cc
+++ b/boot_control_chromeos.cc
@@ -19,11 +19,14 @@
#include <memory>
#include <string>
#include <utility>
+#include <vector>
#include <base/bind.h>
#include <base/files/file_path.h>
#include <base/files/file_util.h>
+#include <base/strings/string_split.h>
#include <base/strings/string_util.h>
+#include <chromeos/constants/imageloader.h>
#include <rootdev/rootdev.h>
extern "C" {
@@ -36,6 +39,7 @@
#include "update_engine/common/utils.h"
using std::string;
+using std::vector;
namespace {
@@ -44,8 +48,7 @@
const char* kAndroidPartitionNameKernel = "boot";
const char* kAndroidPartitionNameRoot = "system";
-const char kDlcInstallRootDirectoryEncrypted[] = "/home/chronos/dlc";
-const char kPartitionNamePrefixDlc[] = "dlc_";
+const char kPartitionNamePrefixDlc[] = "dlc";
const char kPartitionNameDlcA[] = "dlc_a";
const char kPartitionNameDlcB[] = "dlc_b";
const char kPartitionNameDlcImage[] = "dlc.img";
@@ -148,6 +151,31 @@
return current_slot_;
}
+bool BootControlChromeOS::ParseDlcPartitionName(
+ const std::string partition_name,
+ std::string* dlc_id,
+ std::string* dlc_package) const {
+ CHECK_NE(dlc_id, nullptr);
+ CHECK_NE(dlc_package, nullptr);
+
+ vector<string> tokens = base::SplitString(
+ partition_name, "/", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ if (tokens.size() != 3 || tokens[0] != kPartitionNamePrefixDlc) {
+ LOG(ERROR) << "DLC partition name (" << partition_name
+ << ") is not well formatted.";
+ return false;
+ }
+ if (tokens[1].empty() || tokens[2].empty()) {
+ LOG(ERROR) << " partition name does not contain valid DLC ID (" << tokens[1]
+ << ") or package (" << tokens[2] << ")";
+ return false;
+ }
+
+ *dlc_id = tokens[1];
+ *dlc_package = tokens[2];
+ return true;
+}
+
bool BootControlChromeOS::GetPartitionDevice(const std::string& partition_name,
BootControlInterface::Slot slot,
bool not_in_payload,
@@ -157,17 +185,13 @@
if (base::StartsWith(partition_name,
kPartitionNamePrefixDlc,
base::CompareCase::SENSITIVE)) {
- // Extract DLC module ID from partition_name (DLC module ID is the string
- // after |kPartitionNamePrefixDlc| in partition_name).
- const auto dlc_module_id =
- partition_name.substr(strlen(kPartitionNamePrefixDlc));
- if (dlc_module_id.empty()) {
- LOG(ERROR) << " partition name does not contain DLC module ID:"
- << partition_name;
+ string dlc_id, dlc_package;
+ if (!ParseDlcPartitionName(partition_name, &dlc_id, &dlc_package))
return false;
- }
- *device = base::FilePath(kDlcInstallRootDirectoryEncrypted)
- .Append(dlc_module_id)
+
+ *device = base::FilePath(imageloader::kDlcImageRootpath)
+ .Append(dlc_id)
+ .Append(dlc_package)
.Append(slot == 0 ? kPartitionNameDlcA : kPartitionNameDlcB)
.Append(kPartitionNameDlcImage)
.value();
diff --git a/boot_control_chromeos.h b/boot_control_chromeos.h
index 6edc148..f90e65b 100644
--- a/boot_control_chromeos.h
+++ b/boot_control_chromeos.h
@@ -64,6 +64,7 @@
friend class BootControlChromeOSTest;
FRIEND_TEST(BootControlChromeOSTest, SysfsBlockDeviceTest);
FRIEND_TEST(BootControlChromeOSTest, GetPartitionNumberTest);
+ FRIEND_TEST(BootControlChromeOSTest, ParseDlcPartitionNameTest);
// Returns the sysfs block device for a root block device. For example,
// SysfsBlockDevice("/dev/sda") returns "/sys/block/sda". Returns an empty
@@ -79,6 +80,13 @@
int GetPartitionNumber(const std::string partition_name,
BootControlInterface::Slot slot) const;
+ // Extracts DLC module ID and package ID from partition name. The structure of
+ // the partition name is dlc/<dlc-id>/<dlc-package>. For example:
+ // dlc/dummy-dlc/dummy-package
+ bool ParseDlcPartitionName(const std::string partition_name,
+ std::string* dlc_id,
+ std::string* dlc_package) const;
+
// Cached values for GetNumSlots() and GetCurrentSlot().
BootControlInterface::Slot num_slots_{1};
BootControlInterface::Slot current_slot_{BootControlInterface::kInvalidSlot};
diff --git a/boot_control_chromeos_unittest.cc b/boot_control_chromeos_unittest.cc
index 6a60009..1c40dce 100644
--- a/boot_control_chromeos_unittest.cc
+++ b/boot_control_chromeos_unittest.cc
@@ -18,6 +18,8 @@
#include <gtest/gtest.h>
+using std::string;
+
namespace chromeos_update_engine {
class BootControlChromeOSTest : public ::testing::Test {
@@ -67,4 +69,22 @@
EXPECT_EQ(-1, bootctl_.GetPartitionNumber("A little panda", 0));
}
+TEST_F(BootControlChromeOSTest, ParseDlcPartitionNameTest) {
+ string id, package;
+
+ EXPECT_TRUE(bootctl_.ParseDlcPartitionName("dlc/id/package", &id, &package));
+ EXPECT_EQ(id, "id");
+ EXPECT_EQ(package, "package");
+
+ EXPECT_FALSE(
+ bootctl_.ParseDlcPartitionName("dlc-foo/id/package", &id, &package));
+ EXPECT_FALSE(
+ bootctl_.ParseDlcPartitionName("dlc-foo/id/package/", &id, &package));
+ EXPECT_FALSE(bootctl_.ParseDlcPartitionName("dlc/id", &id, &package));
+ EXPECT_FALSE(bootctl_.ParseDlcPartitionName("dlc/id/", &id, &package));
+ EXPECT_FALSE(bootctl_.ParseDlcPartitionName("dlc//package", &id, &package));
+ EXPECT_FALSE(bootctl_.ParseDlcPartitionName("dlc", &id, &package));
+ EXPECT_FALSE(bootctl_.ParseDlcPartitionName("foo", &id, &package));
+}
+
} // namespace chromeos_update_engine
diff --git a/client-headers/BUILD.gn b/client-headers/BUILD.gn
new file mode 100644
index 0000000..8c1a17e
--- /dev/null
+++ b/client-headers/BUILD.gn
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import("//common-mk/generate-dbus-proxies.gni")
+
+group("all") {
+ deps = [ ":libupdate_engine-client-headers" ]
+}
+
+# update_engine client library generated headers. Used by other daemons and
+# by the update_engine_client console program to interact with update_engine.
+generate_dbus_proxies("libupdate_engine-client-headers") {
+ sources = [ "../dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml" ]
+ dbus_service_config = "../dbus_bindings/dbus-service-config.json"
+ mock_output_file = "include/update_engine/dbus-proxy-mocks.h"
+ proxy_output_file = "include/update_engine/dbus-proxies.h"
+ proxy_path_in_mocks = "update_engine/dbus-proxies.h"
+}
diff --git a/libupdate_engine-client-test.pc.in b/client-headers/libupdate_engine-client-test.pc.in
similarity index 100%
rename from libupdate_engine-client-test.pc.in
rename to client-headers/libupdate_engine-client-test.pc.in
diff --git a/libupdate_engine-client.pc.in b/client-headers/libupdate_engine-client.pc.in
similarity index 100%
rename from libupdate_engine-client.pc.in
rename to client-headers/libupdate_engine-client.pc.in
diff --git a/client_library/client.cc b/client_library/client.cc
deleted file mode 100644
index b05df90..0000000
--- a/client_library/client.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/client_library/include/update_engine/client.h"
-
-#include <memory>
-
-#if USE_BINDER
-#include "update_engine/client_library/client_binder.h"
-#else // !USE_BINDER
-#include "update_engine/client_library/client_dbus.h"
-#endif // USE_BINDER
-
-using std::unique_ptr;
-
-namespace update_engine {
-
-unique_ptr<UpdateEngineClient> UpdateEngineClient::CreateInstance() {
-#if USE_BINDER
- auto update_engine_client_impl = new internal::BinderUpdateEngineClient{};
-#else // !USE_BINDER
- auto update_engine_client_impl = new internal::DBusUpdateEngineClient{};
-#endif // USE_BINDER
- auto ret = unique_ptr<UpdateEngineClient>{update_engine_client_impl};
-
- if (!update_engine_client_impl->Init()) {
- ret.reset();
- }
-
- return ret;
-}
-
-} // namespace update_engine
diff --git a/client_library/client_binder.cc b/client_library/client_binder.cc
deleted file mode 100644
index 588bc64..0000000
--- a/client_library/client_binder.cc
+++ /dev/null
@@ -1,264 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/client_library/client_binder.h"
-
-#include <binder/IServiceManager.h>
-
-#include <base/message_loop/message_loop.h>
-#include <utils/String8.h>
-
-#include "update_engine/common_service.h"
-#include "update_engine/parcelable_update_engine_status.h"
-#include "update_engine/update_status_utils.h"
-
-using android::getService;
-using android::OK;
-using android::String16;
-using android::String8;
-using android::binder::Status;
-using android::brillo::ParcelableUpdateEngineStatus;
-using chromeos_update_engine::StringToUpdateStatus;
-using std::string;
-using update_engine::UpdateAttemptFlags;
-
-namespace update_engine {
-namespace internal {
-
-bool BinderUpdateEngineClient::Init() {
- if (!binder_watcher_.Init())
- return false;
-
- return getService(String16{"android.brillo.UpdateEngineService"},
- &service_) == OK;
-}
-
-bool BinderUpdateEngineClient::AttemptUpdate(const string& in_app_version,
- const string& in_omaha_url,
- bool at_user_request) {
- bool started;
- return service_
- ->AttemptUpdate(
- String16{in_app_version.c_str()},
- String16{in_omaha_url.c_str()},
- at_user_request ? 0 : UpdateAttemptFlags::kFlagNonInteractive,
- &started)
- .isOk();
-}
-
-bool BinderUpdateEngineClient::AttemptInstall(
- const string& omaha_url, const std::vector<string>& dlc_module_ids) {
- return false;
-}
-
-bool BinderUpdateEngineClient::GetStatus(int64_t* out_last_checked_time,
- double* out_progress,
- UpdateStatus* out_update_status,
- string* out_new_version,
- int64_t* out_new_size) const {
- ParcelableUpdateEngineStatus status;
-
- if (!service_->GetStatus(&status).isOk())
- return false;
-
- *out_last_checked_time = status.last_checked_time_;
- *out_progress = status.progress_;
- StringToUpdateStatus(String8{status.current_operation_}.string(),
- out_update_status);
- *out_new_version = String8{status.new_version_}.string();
- *out_new_size = status.new_size_;
- return true;
-}
-
-bool BinderUpdateEngineClient::SetCohortHint(const string& in_cohort_hint) {
- return service_->SetCohortHint(String16{in_cohort_hint.c_str()}).isOk();
-}
-
-bool BinderUpdateEngineClient::GetCohortHint(string* out_cohort_hint) const {
- String16 out_as_string16;
-
- if (!service_->GetCohortHint(&out_as_string16).isOk())
- return false;
-
- *out_cohort_hint = String8{out_as_string16}.string();
- return true;
-}
-
-bool BinderUpdateEngineClient::SetUpdateOverCellularPermission(bool allowed) {
- return service_->SetUpdateOverCellularPermission(allowed).isOk();
-}
-
-bool BinderUpdateEngineClient::GetUpdateOverCellularPermission(
- bool* allowed) const {
- return service_->GetUpdateOverCellularPermission(allowed).isOk();
-}
-
-bool BinderUpdateEngineClient::SetP2PUpdatePermission(bool enabled) {
- return service_->SetP2PUpdatePermission(enabled).isOk();
-}
-
-bool BinderUpdateEngineClient::GetP2PUpdatePermission(bool* enabled) const {
- return service_->GetP2PUpdatePermission(enabled).isOk();
-}
-
-bool BinderUpdateEngineClient::Rollback(bool powerwash) {
- return service_->AttemptRollback(powerwash).isOk();
-}
-
-bool BinderUpdateEngineClient::GetRollbackPartition(
- string* rollback_partition) const {
- String16 out_as_string16;
-
- if (!service_->GetRollbackPartition(&out_as_string16).isOk())
- return false;
-
- *rollback_partition = String8{out_as_string16}.string();
- return true;
-}
-
-bool BinderUpdateEngineClient::GetPrevVersion(string* prev_version) const {
- String16 out_as_string16;
-
- if (!service_->GetPrevVersion(&out_as_string16).isOk())
- return false;
-
- *prev_version = String8{out_as_string16}.string();
- return true;
-}
-
-void BinderUpdateEngineClient::RebootIfNeeded() {
- if (!service_->RebootIfNeeded().isOk()) {
- // Reboot error code doesn't necessarily mean that a reboot
- // failed. For example, D-Bus may be shutdown before we receive the
- // result.
- LOG(INFO) << "RebootIfNeeded() failure ignored.";
- }
-}
-
-bool BinderUpdateEngineClient::ResetStatus() {
- return service_->ResetStatus().isOk();
-}
-
-Status BinderUpdateEngineClient::StatusUpdateCallback::HandleStatusUpdate(
- const ParcelableUpdateEngineStatus& status) {
- UpdateStatus update_status;
-
- StringToUpdateStatus(String8{status.current_operation_}.string(),
- &update_status);
-
- for (auto& handler : client_->handlers_) {
- handler->HandleStatusUpdate(status.last_checked_time_,
- status.progress_,
- update_status,
- String8{status.new_version_}.string(),
- status.new_size_);
- }
-
- return Status::ok();
-}
-
-bool BinderUpdateEngineClient::RegisterStatusUpdateHandler(
- StatusUpdateHandler* handler) {
- if (!status_callback_.get()) {
- status_callback_ = new BinderUpdateEngineClient::StatusUpdateCallback(this);
- if (!service_->RegisterStatusCallback(status_callback_).isOk()) {
- return false;
- }
- }
-
- handlers_.push_back(handler);
-
- int64_t last_checked_time;
- double progress;
- UpdateStatus update_status;
- string new_version;
- int64_t new_size;
-
- if (!GetStatus(&last_checked_time,
- &progress,
- &update_status,
- &new_version,
- &new_size)) {
- handler->IPCError("Could not get status from binder service");
- }
-
- handler->HandleStatusUpdate(
- last_checked_time, progress, update_status, new_version, new_size);
-
- return true;
-}
-
-bool BinderUpdateEngineClient::UnregisterStatusUpdateHandler(
- StatusUpdateHandler* handler) {
- auto it = std::find(handlers_.begin(), handlers_.end(), handler);
- if (it != handlers_.end()) {
- handlers_.erase(it);
- return true;
- }
-
- return false;
-}
-
-bool BinderUpdateEngineClient::SetTargetChannel(const string& in_target_channel,
- bool allow_powerwash) {
- return service_
- ->SetChannel(String16{in_target_channel.c_str()}, allow_powerwash)
- .isOk();
-}
-
-bool BinderUpdateEngineClient::GetTargetChannel(string* out_channel) const {
- String16 out_as_string16;
-
- if (!service_->GetChannel(false, &out_as_string16).isOk())
- return false;
-
- *out_channel = String8{out_as_string16}.string();
- return true;
-}
-
-bool BinderUpdateEngineClient::GetChannel(string* out_channel) const {
- String16 out_as_string16;
-
- if (!service_->GetChannel(true, &out_as_string16).isOk())
- return false;
-
- *out_channel = String8{out_as_string16}.string();
- return true;
-}
-
-bool BinderUpdateEngineClient::GetLastAttemptError(
- int32_t* last_attempt_error) const {
- int out_as_int;
-
- if (!service_->GetLastAttemptError(&out_as_int).isOk())
- return false;
-
- *last_attempt_error = out_as_int;
- return true;
-}
-
-bool BinderUpdateEngineClient::GetEolStatus(int32_t* eol_status) const {
- int out_as_int;
-
- if (!service_->GetEolStatus(&out_as_int).isOk())
- return false;
-
- *eol_status = out_as_int;
- return true;
-}
-
-} // namespace internal
-} // namespace update_engine
diff --git a/client_library/client_binder.h b/client_library/client_binder.h
deleted file mode 100644
index f3e4102..0000000
--- a/client_library/client_binder.h
+++ /dev/null
@@ -1,117 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_CLIENT_LIBRARY_CLIENT_BINDER_H_
-#define UPDATE_ENGINE_CLIENT_LIBRARY_CLIENT_BINDER_H_
-
-#include <cstdint>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include <base/macros.h>
-#include <utils/String16.h>
-#include <utils/StrongPointer.h>
-
-#include <brillo/binder_watcher.h>
-
-#include "android/brillo/BnUpdateEngineStatusCallback.h"
-#include "android/brillo/IUpdateEngine.h"
-
-#include "update_engine/client_library/include/update_engine/client.h"
-
-namespace update_engine {
-namespace internal {
-
-class BinderUpdateEngineClient : public UpdateEngineClient {
- public:
- BinderUpdateEngineClient() = default;
- bool Init();
-
- virtual ~BinderUpdateEngineClient() = default;
-
- bool AttemptUpdate(const std::string& app_version,
- const std::string& omaha_url,
- bool at_user_request) override;
-
- bool AttemptInstall(const std::string& omaha_url,
- const std::vector<std::string>& dlc_module_ids) override;
-
- bool GetStatus(int64_t* out_last_checked_time,
- double* out_progress,
- UpdateStatus* out_update_status,
- std::string* out_new_version,
- int64_t* out_new_size) const override;
-
- bool SetCohortHint(const std::string& in_cohort_hint) override;
- bool GetCohortHint(std::string* out_cohort_hint) const override;
-
- bool SetUpdateOverCellularPermission(bool allowed) override;
- bool GetUpdateOverCellularPermission(bool* allowed) const override;
-
- bool SetP2PUpdatePermission(bool enabled) override;
- bool GetP2PUpdatePermission(bool* enabled) const override;
-
- bool Rollback(bool powerwash) override;
-
- bool GetRollbackPartition(std::string* rollback_partition) const override;
-
- void RebootIfNeeded() override;
-
- bool GetPrevVersion(std::string* prev_version) const override;
-
- bool ResetStatus() override;
-
- bool SetTargetChannel(const std::string& target_channel,
- bool allow_powerwash) override;
-
- bool GetTargetChannel(std::string* out_channel) const override;
-
- bool GetChannel(std::string* out_channel) const override;
-
- bool RegisterStatusUpdateHandler(StatusUpdateHandler* handler) override;
- bool UnregisterStatusUpdateHandler(StatusUpdateHandler* handler) override;
-
- bool GetLastAttemptError(int32_t* last_attempt_error) const override;
-
- bool GetEolStatus(int32_t* eol_status) const override;
-
- private:
- class StatusUpdateCallback
- : public android::brillo::BnUpdateEngineStatusCallback {
- public:
- explicit StatusUpdateCallback(BinderUpdateEngineClient* client)
- : client_(client) {}
-
- android::binder::Status HandleStatusUpdate(
- const android::brillo::ParcelableUpdateEngineStatus& status) override;
-
- private:
- BinderUpdateEngineClient* client_;
- };
-
- android::sp<android::brillo::IUpdateEngine> service_;
- android::sp<android::brillo::IUpdateEngineStatusCallback> status_callback_;
- std::vector<update_engine::StatusUpdateHandler*> handlers_;
- brillo::BinderWatcher binder_watcher_;
-
- DISALLOW_COPY_AND_ASSIGN(BinderUpdateEngineClient);
-}; // class BinderUpdateEngineClient
-
-} // namespace internal
-} // namespace update_engine
-
-#endif // UPDATE_ENGINE_CLIENT_LIBRARY_CLIENT_BINDER_H_
diff --git a/client_library/client_dbus.cc b/client_library/client_dbus.cc
index 7ca6307..8e9a7fd 100644
--- a/client_library/client_dbus.cc
+++ b/client_library/client_dbus.cc
@@ -18,21 +18,49 @@
#include <base/message_loop/message_loop.h>
+#include <memory>
+
#include <dbus/bus.h>
#include <update_engine/dbus-constants.h>
-#include <update_engine/proto_bindings/update_engine.pb.h>
#include "update_engine/update_status_utils.h"
-using chromeos_update_engine::StringToUpdateStatus;
using dbus::Bus;
using org::chromium::UpdateEngineInterfaceProxy;
using std::string;
+using std::unique_ptr;
using std::vector;
namespace update_engine {
+
+unique_ptr<UpdateEngineClient> UpdateEngineClient::CreateInstance() {
+ auto ret = std::make_unique<internal::DBusUpdateEngineClient>();
+ if (!ret->Init()) {
+ ret.reset();
+ }
+ return ret;
+}
+
namespace internal {
+namespace {
+// This converts the status from Protobuf |StatusResult| to The internal
+// |UpdateEngineStatus| struct.
+void ConvertToUpdateEngineStatus(const StatusResult& status,
+ UpdateEngineStatus* out_status) {
+ out_status->last_checked_time = status.last_checked_time();
+ out_status->progress = status.progress();
+ out_status->new_version = status.new_version();
+ out_status->new_size_bytes = status.new_size();
+ out_status->status = static_cast<UpdateStatus>(status.current_operation());
+ out_status->is_enterprise_rollback = status.is_enterprise_rollback();
+ out_status->is_install = status.is_install();
+ out_status->eol_date = status.eol_date();
+ out_status->will_powerwash_after_reboot =
+ status.will_powerwash_after_reboot();
+}
+} // namespace
+
bool DBusUpdateEngineClient::Init() {
Bus::Options options;
options.bus_type = Bus::SYSTEM;
@@ -57,41 +85,24 @@
nullptr);
}
-bool DBusUpdateEngineClient::AttemptInstall(
- const string& omaha_url, const vector<string>& dlc_module_ids) {
- // Convert parameters into protobuf.
- chromeos_update_engine::DlcParameters dlc_parameters;
- dlc_parameters.set_omaha_url(omaha_url);
- for (const auto& dlc_module_id : dlc_module_ids) {
- chromeos_update_engine::DlcInfo* dlc_info = dlc_parameters.add_dlc_infos();
- dlc_info->set_dlc_id(dlc_module_id);
- }
- string dlc_request;
- if (dlc_parameters.SerializeToString(&dlc_request)) {
- return proxy_->AttemptInstall(dlc_request, nullptr /* brillo::ErrorPtr* */);
- } else {
- LOG(ERROR) << "Fail to serialize a protobuf to a string.";
- return false;
- }
+bool DBusUpdateEngineClient::AttemptInstall(const string& omaha_url,
+ const vector<string>& dlc_ids) {
+ return proxy_->AttemptInstall(omaha_url, dlc_ids, nullptr);
}
-bool DBusUpdateEngineClient::GetStatus(int64_t* out_last_checked_time,
- double* out_progress,
- UpdateStatus* out_update_status,
- string* out_new_version,
- int64_t* out_new_size) const {
- string status_as_string;
- const bool success = proxy_->GetStatus(out_last_checked_time,
- out_progress,
- &status_as_string,
- out_new_version,
- out_new_size,
- nullptr);
- if (!success) {
+bool DBusUpdateEngineClient::SetDlcActiveValue(bool is_active,
+ const std::string& dlc_id) {
+ return proxy_->SetDlcActiveValue(is_active, dlc_id, /*error=*/nullptr);
+}
+
+bool DBusUpdateEngineClient::GetStatus(UpdateEngineStatus* out_status) const {
+ StatusResult status;
+ if (!proxy_->GetStatusAdvanced(&status, nullptr)) {
return false;
}
- return StringToUpdateStatus(status_as_string, out_update_status);
+ ConvertToUpdateEngineStatus(status, out_status);
+ return true;
}
bool DBusUpdateEngineClient::SetCohortHint(const string& cohort_hint) {
@@ -160,40 +171,25 @@
void DBusUpdateEngineClient::StatusUpdateHandlersRegistered(
StatusUpdateHandler* handler) const {
- int64_t last_checked_time;
- double progress;
- UpdateStatus update_status;
- string new_version;
- int64_t new_size;
-
- if (!GetStatus(&last_checked_time,
- &progress,
- &update_status,
- &new_version,
- &new_size)) {
+ UpdateEngineStatus status;
+ if (!GetStatus(&status)) {
handler->IPCError("Could not query current status");
return;
}
std::vector<update_engine::StatusUpdateHandler*> just_handler = {handler};
for (auto h : handler ? just_handler : handlers_) {
- h->HandleStatusUpdate(
- last_checked_time, progress, update_status, new_version, new_size);
+ h->HandleStatusUpdate(status);
}
}
void DBusUpdateEngineClient::RunStatusUpdateHandlers(
- int64_t last_checked_time,
- double progress,
- const string& current_operation,
- const string& new_version,
- int64_t new_size) {
- UpdateStatus status;
- StringToUpdateStatus(current_operation, &status);
+ const StatusResult& status) {
+ UpdateEngineStatus ue_status;
+ ConvertToUpdateEngineStatus(status, &ue_status);
for (auto handler : handlers_) {
- handler->HandleStatusUpdate(
- last_checked_time, progress, status, new_version, new_size);
+ handler->HandleStatusUpdate(ue_status);
}
}
@@ -210,7 +206,7 @@
bool DBusUpdateEngineClient::RegisterStatusUpdateHandler(
StatusUpdateHandler* handler) {
- if (!base::MessageLoopForIO::current()) {
+ if (!base::MessageLoopCurrent::IsSet()) {
LOG(FATAL) << "Cannot get UpdateEngineClient outside of message loop.";
return false;
}
@@ -222,7 +218,7 @@
return true;
}
- proxy_->RegisterStatusUpdateSignalHandler(
+ proxy_->RegisterStatusUpdateAdvancedSignalHandler(
base::Bind(&DBusUpdateEngineClient::RunStatusUpdateHandlers,
base::Unretained(this)),
base::Bind(&DBusUpdateEngineClient::DBusStatusHandlersRegistered,
@@ -255,9 +251,5 @@
return proxy_->GetLastAttemptError(last_attempt_error, nullptr);
}
-bool DBusUpdateEngineClient::GetEolStatus(int32_t* eol_status) const {
- return proxy_->GetEolStatus(eol_status, nullptr);
-}
-
} // namespace internal
} // namespace update_engine
diff --git a/client_library/client_dbus.h b/client_library/client_dbus.h
index a186d45..f19555f 100644
--- a/client_library/client_dbus.h
+++ b/client_library/client_dbus.h
@@ -23,6 +23,7 @@
#include <vector>
#include <base/macros.h>
+#include <update_engine/proto_bindings/update_engine.pb.h>
#include "update_engine/client_library/include/update_engine/client.h"
#include "update_engine/dbus-proxies.h"
@@ -42,13 +43,11 @@
bool at_user_request) override;
bool AttemptInstall(const std::string& omaha_url,
- const std::vector<std::string>& dlc_module_ids) override;
+ const std::vector<std::string>& dlc_ids) override;
- bool GetStatus(int64_t* out_last_checked_time,
- double* out_progress,
- UpdateStatus* out_update_status,
- std::string* out_new_version,
- int64_t* out_new_size) const override;
+ bool SetDlcActiveValue(bool is_active, const std::string& dlc_id) override;
+
+ bool GetStatus(UpdateEngineStatus* out_status) const override;
bool SetCohortHint(const std::string& cohort_hint) override;
bool GetCohortHint(std::string* cohort_hint) const override;
@@ -81,8 +80,6 @@
bool GetLastAttemptError(int32_t* last_attempt_error) const override;
- bool GetEolStatus(int32_t* eol_status) const override;
-
private:
void DBusStatusHandlersRegistered(const std::string& interface,
const std::string& signal_name,
@@ -93,11 +90,7 @@
// registered handlers receive the event.
void StatusUpdateHandlersRegistered(StatusUpdateHandler* handler) const;
- void RunStatusUpdateHandlers(int64_t last_checked_time,
- double progress,
- const std::string& current_operation,
- const std::string& new_version,
- int64_t new_size);
+ void RunStatusUpdateHandlers(const StatusResult& status);
std::unique_ptr<org::chromium::UpdateEngineInterfaceProxy> proxy_;
std::vector<update_engine::StatusUpdateHandler*> handlers_;
diff --git a/client_library/include/update_engine/client.h b/client_library/include/update_engine/client.h
index 1bc6111..f734733 100644
--- a/client_library/include/update_engine/client.h
+++ b/client_library/include/update_engine/client.h
@@ -54,31 +54,18 @@
// empty indicates update_engine should use its default value. Note that
// update_engine will ignore this parameter in production mode to avoid
// pulling untrusted updates.
- // |dlc_module_ids|
+ // |dlc_ids|
// A list of DLC module IDs.
- virtual bool AttemptInstall(
- const std::string& omaha_url,
- const std::vector<std::string>& dlc_module_ids) = 0;
+ virtual bool AttemptInstall(const std::string& omaha_url,
+ const std::vector<std::string>& dlc_ids) = 0;
- // Returns the current status of the Update Engine.
- //
- // |out_last_checked_time|
- // the last time the update engine checked for an update in seconds since
- // the epoc.
- // |out_progress|
- // when downloading an update, this is calculated as
- // (number of bytes received) / (total bytes).
- // |out_update_status|
- // See update_status.h.
- // |out_new_version|
- // string version of the new system image.
- // |out_new_size|
- // number of bytes in the new system image.
- virtual bool GetStatus(int64_t* out_last_checked_time,
- double* out_progress,
- UpdateStatus* out_update_status,
- std::string* out_new_version,
- int64_t* out_new_size) const = 0;
+ // Same as above but return the entire struct instead.
+ virtual bool GetStatus(UpdateEngineStatus* out_status) const = 0;
+
+ // Sets the DLC as active or inactive. When set to active, the ping metadata
+ // for the DLC is updated accordingly. When set to inactive, the metadata
+ // for the DLC is deleted.
+ virtual bool SetDlcActiveValue(bool is_active, const std::string& dlc_id) = 0;
// Getter and setter for the cohort hint.
virtual bool SetCohortHint(const std::string& cohort_hint) = 0;
@@ -132,9 +119,6 @@
// Get the last UpdateAttempt error code.
virtual bool GetLastAttemptError(int32_t* last_attempt_error) const = 0;
- // Get the current end-of-life status code. See EolStatus enum for details.
- virtual bool GetEolStatus(int32_t* eol_status) const = 0;
-
protected:
// Use CreateInstance().
UpdateEngineClient() = default;
diff --git a/client_library/include/update_engine/status_update_handler.h b/client_library/include/update_engine/status_update_handler.h
index d2fad34..238f6bd 100644
--- a/client_library/include/update_engine/status_update_handler.h
+++ b/client_library/include/update_engine/status_update_handler.h
@@ -14,7 +14,9 @@
// limitations under the License.
//
+// NOLINTNEXTLINE(whitespace/line_length)
#ifndef UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_
+// NOLINTNEXTLINE(whitespace/line_length)
#define UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_
#include <string>
@@ -35,13 +37,10 @@
virtual void IPCError(const std::string& error) = 0;
// Runs every time update_engine reports a status change.
- virtual void HandleStatusUpdate(int64_t last_checked_time,
- double progress,
- UpdateStatus current_operation,
- const std::string& new_version,
- int64_t new_size) = 0;
+ virtual void HandleStatusUpdate(const UpdateEngineStatus& status) = 0;
};
} // namespace update_engine
+// NOLINTNEXTLINE(whitespace/line_length)
#endif // UPDATE_ENGINE_CLIENT_LIBRARY_INCLUDE_UPDATE_ENGINE_STATUS_UPDATE_HANDLER_H_
diff --git a/client_library/include/update_engine/update_status.h b/client_library/include/update_engine/update_status.h
index 6490e27..043a36e 100644
--- a/client_library/include/update_engine/update_status.h
+++ b/client_library/include/update_engine/update_status.h
@@ -21,12 +21,19 @@
#include <brillo/enum_flags.h>
+// NOTE: Keep this file in sync with
+// platform2/system_api/dbus/update_engine/update_engine.proto especially:
+// - |UpdateStatus| <-> |Operation|
+// - |UpdateEngineStatus| <-> |StatusResult|
+
namespace update_engine {
-// ATTENTION: When adding a new enum value here, always append at the end and
-// make sure to make proper adjustments in UpdateAttempter:ActionCompleted(). If
-// any enum memeber is deprecated, the assigned value of other members should
-// not change. See b/62842358.
+// ATTENTION:
+// When adding a new enum value:
+// - always append at the end with proper adjustments in |ActionCompleted()|.
+// - always update |kNonIdleUpdateStatues| in update_attempter_unittest.cc.
+// When deprecating an old enum value:
+// - other enum values should not change their old values. See b/62842358.
enum class UpdateStatus {
IDLE = 0,
CHECKING_FOR_UPDATE = 1,
@@ -42,6 +49,13 @@
// allow updates, e.g. over cellular network.
NEED_PERMISSION_TO_UPDATE = 10,
CLEANUP_PREVIOUS_UPDATE = 11,
+
+ // This value is exclusively used in Chrome. DO NOT define nor use it.
+ // TODO(crbug.com/977320): Remove this value from chrome by refactoring the
+ // Chrome code and evantually from here. This is not really an operation or
+ // state that the update_engine stays on. This is the result of an internal
+ // failure and should be reflected differently.
+ // ERROR = -1,
};
// Enum of bit-wise flags for controlling how updates are attempted.
@@ -58,23 +72,27 @@
DECLARE_FLAGS_ENUM(UpdateAttemptFlags);
struct UpdateEngineStatus {
- // When the update_engine last checked for updates (time_t: seconds from unix
- // epoch)
+ // Update engine last checked update (time_t: seconds from unix epoch).
int64_t last_checked_time;
- // the current status/operation of the update_engine
+ // Current status/operation of the update_engine.
UpdateStatus status;
- // the current product version (oem bundle id)
+ // Current product version (oem bundle id).
std::string current_version;
- // the current system version
- std::string current_system_version;
- // The current progress (0.0f-1.0f).
+ // Current progress (0.0f-1.0f).
double progress;
- // the size of the update (bytes)
+ // Size of the update in bytes.
uint64_t new_size_bytes;
- // the new product version
+ // New product version.
std::string new_version;
- // the new system version, if there is one (empty, otherwise)
- std::string new_system_version;
+ // Wether the update is an enterprise rollback. The value is valid only if the
+ // current operation is passed CHECKING_FOR_UPDATE.
+ bool is_enterprise_rollback;
+ // Indication of install for DLC(s).
+ bool is_install;
+ // The end-of-life date of the device in the number of days since Unix Epoch.
+ int64_t eol_date;
+ // The system will powerwash once the update is applied.
+ bool will_powerwash_after_reboot;
};
} // namespace update_engine
diff --git a/common/constants.cc b/common/constants.cc
index 5bfb2b6..fa13a38 100644
--- a/common/constants.cc
+++ b/common/constants.cc
@@ -18,6 +18,10 @@
namespace chromeos_update_engine {
+const char kExclusionPrefsSubDir[] = "exclusion";
+
+const char kDlcPrefsSubDir[] = "dlc";
+
const char kPowerwashSafePrefsSubDirectory[] = "update_engine/prefs";
const char kPrefsSubDirectory[] = "prefs";
@@ -55,11 +59,16 @@
const char kPrefsOmahaCohort[] = "omaha-cohort";
const char kPrefsOmahaCohortHint[] = "omaha-cohort-hint";
const char kPrefsOmahaCohortName[] = "omaha-cohort-name";
-const char kPrefsOmahaEolStatus[] = "omaha-eol-status";
+const char kPrefsOmahaEolDate[] = "omaha-eol-date";
const char kPrefsP2PEnabled[] = "p2p-enabled";
const char kPrefsP2PFirstAttemptTimestamp[] = "p2p-first-attempt-timestamp";
const char kPrefsP2PNumAttempts[] = "p2p-num-attempts";
const char kPrefsPayloadAttemptNumber[] = "payload-attempt-number";
+// Keep |kPrefsPingActive| in sync with |kDlcMetadataFilePingActive| in
+// dlcservice.
+const char kPrefsPingActive[] = "active";
+const char kPrefsPingLastActive[] = "date_last_active";
+const char kPrefsPingLastRollcall[] = "date_last_rollcall";
const char kPrefsPostInstallSucceeded[] = "post-install-succeeded";
const char kPrefsPreviousVersion[] = "previous-version";
const char kPrefsResumedUpdateFailures[] = "resumed-update-failures";
@@ -123,4 +132,12 @@
// The default is 1 (always run post install).
const char kPayloadPropertyRunPostInstall[] = "RUN_POST_INSTALL";
+const char kOmahaUpdaterVersion[] = "0.1.0.0";
+
+// X-Goog-Update headers.
+const char kXGoogleUpdateInteractivity[] = "X-Goog-Update-Interactivity";
+const char kXGoogleUpdateAppId[] = "X-Goog-Update-AppId";
+const char kXGoogleUpdateUpdater[] = "X-Goog-Update-Updater";
+const char kXGoogleUpdateSessionId[] = "X-Goog-SessionId";
+
} // namespace chromeos_update_engine
diff --git a/common/constants.h b/common/constants.h
index af1c0ab..eb489fc 100644
--- a/common/constants.h
+++ b/common/constants.h
@@ -19,6 +19,12 @@
namespace chromeos_update_engine {
+// The root path of all exclusion prefs.
+extern const char kExclusionPrefsSubDir[];
+
+// The root path of all DLC metadata.
+extern const char kDlcPrefsSubDir[];
+
// Directory for AU prefs that are preserved across powerwash.
extern const char kPowerwashSafePrefsSubDirectory[];
@@ -56,11 +62,14 @@
extern const char kPrefsOmahaCohort[];
extern const char kPrefsOmahaCohortHint[];
extern const char kPrefsOmahaCohortName[];
-extern const char kPrefsOmahaEolStatus[];
+extern const char kPrefsOmahaEolDate[];
extern const char kPrefsP2PEnabled[];
extern const char kPrefsP2PFirstAttemptTimestamp[];
extern const char kPrefsP2PNumAttempts[];
extern const char kPrefsPayloadAttemptNumber[];
+extern const char kPrefsPingActive[];
+extern const char kPrefsPingLastActive[];
+extern const char kPrefsPingLastRollcall[];
extern const char kPrefsPostInstallSucceeded[];
extern const char kPrefsPreviousVersion[];
extern const char kPrefsResumedUpdateFailures[];
@@ -108,6 +117,14 @@
extern const char kPayloadPropertySwitchSlotOnReboot[];
extern const char kPayloadPropertyRunPostInstall[];
+extern const char kOmahaUpdaterVersion[];
+
+// X-Goog-Update headers.
+extern const char kXGoogleUpdateInteractivity[];
+extern const char kXGoogleUpdateAppId[];
+extern const char kXGoogleUpdateUpdater[];
+extern const char kXGoogleUpdateSessionId[];
+
// A download source is any combination of protocol and server (that's of
// interest to us when looking at UMA metrics) using which we may download
// the payload.
diff --git a/common/cpu_limiter.cc b/common/cpu_limiter.cc
index 1d14764..5f1ae6f 100644
--- a/common/cpu_limiter.cc
+++ b/common/cpu_limiter.cc
@@ -67,7 +67,7 @@
if (shares_ == shares)
return true;
- std::string string_shares = base::IntToString(static_cast<int>(shares));
+ std::string string_shares = base::NumberToString(static_cast<int>(shares));
LOG(INFO) << "Setting cgroup cpu shares to " << string_shares;
if (!utils::WriteFile(
kCGroupSharesPath, string_shares.c_str(), string_shares.size())) {
diff --git a/common/cpu_limiter.h b/common/cpu_limiter.h
index c7add89..e6d7331 100644
--- a/common/cpu_limiter.h
+++ b/common/cpu_limiter.h
@@ -30,10 +30,6 @@
kLow = 2,
};
-// Sets the current process shares to |shares|. Returns true on
-// success, false otherwise.
-bool SetCpuShares(CpuShares shares);
-
class CPULimiter {
public:
CPULimiter() = default;
diff --git a/common/dlcservice.h b/common/dlcservice.h
deleted file mode 100644
index 9dae560..0000000
--- a/common/dlcservice.h
+++ /dev/null
@@ -1,32 +0,0 @@
-//
-// Copyright (C) 2018 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_COMMON_DLCSERVICE_H_
-#define UPDATE_ENGINE_COMMON_DLCSERVICE_H_
-
-#include <memory>
-
-#include "update_engine/common/dlcservice_interface.h"
-
-namespace chromeos_update_engine {
-
-// This factory function creates a new DlcServiceInterface instance for the
-// current platform.
-std::unique_ptr<DlcServiceInterface> CreateDlcService();
-
-} // namespace chromeos_update_engine
-
-#endif // UPDATE_ENGINE_COMMON_DLCSERVICE_H_
diff --git a/common/dlcservice_interface.h b/common/dlcservice_interface.h
index aa24105..7b57710 100644
--- a/common/dlcservice_interface.h
+++ b/common/dlcservice_interface.h
@@ -17,6 +17,7 @@
#ifndef UPDATE_ENGINE_COMMON_DLCSERVICE_INTERFACE_H_
#define UPDATE_ENGINE_COMMON_DLCSERVICE_INTERFACE_H_
+#include <memory>
#include <string>
#include <vector>
@@ -30,9 +31,17 @@
public:
virtual ~DlcServiceInterface() = default;
- // Returns true and a list of installed DLC module ids in |dlc_module_ids|.
+ // Returns true and a list of installed DLC ids in |dlc_ids|.
// On failure it returns false.
- virtual bool GetInstalled(std::vector<std::string>* dlc_module_ids) = 0;
+ virtual bool GetDlcsToUpdate(std::vector<std::string>* dlc_ids) = 0;
+
+ // Returns true if dlcservice successfully handled the install completion
+ // method call, otherwise false.
+ virtual bool InstallCompleted(const std::vector<std::string>& dlc_ids) = 0;
+
+ // Returns true if dlcservice successfully handled the update completion
+ // method call, otherwise false.
+ virtual bool UpdateCompleted(const std::vector<std::string>& dlc_ids) = 0;
protected:
DlcServiceInterface() = default;
@@ -41,6 +50,10 @@
DISALLOW_COPY_AND_ASSIGN(DlcServiceInterface);
};
+// This factory function creates a new DlcServiceInterface instance for the
+// current platform.
+std::unique_ptr<DlcServiceInterface> CreateDlcService();
+
} // namespace chromeos_update_engine
#endif // UPDATE_ENGINE_COMMON_DLCSERVICE_INTERFACE_H_
diff --git a/common/dlcservice_stub.cc b/common/dlcservice_stub.cc
index c5f9306..2447147 100644
--- a/common/dlcservice_stub.cc
+++ b/common/dlcservice_stub.cc
@@ -27,9 +27,16 @@
return std::make_unique<DlcServiceStub>();
}
-bool DlcServiceStub::GetInstalled(std::vector<std::string>* dlc_module_ids) {
- if (dlc_module_ids)
- dlc_module_ids->clear();
+bool DlcServiceStub::GetDlcsToUpdate(vector<string>* dlc_ids) {
+ if (dlc_ids)
+ dlc_ids->clear();
+ return true;
+}
+
+bool DlcServiceStub::InstallCompleted(const vector<string>& dlc_ids) {
+ return true;
+}
+bool DlcServiceStub::UpdateCompleted(const vector<string>& dlc_ids) {
return true;
}
diff --git a/common/dlcservice_stub.h b/common/dlcservice_stub.h
index 4e12c11..bc803e8 100644
--- a/common/dlcservice_stub.h
+++ b/common/dlcservice_stub.h
@@ -31,7 +31,9 @@
~DlcServiceStub() = default;
// BootControlInterface overrides.
- bool GetInstalled(std::vector<std::string>* dlc_module_ids) override;
+ bool GetDlcsToUpdate(std::vector<std::string>* dlc_ids) override;
+ bool InstallCompleted(const std::vector<std::string>& dlc_ids) override;
+ bool UpdateCompleted(const std::vector<std::string>& dlc_ids) override;
private:
DISALLOW_COPY_AND_ASSIGN(DlcServiceStub);
diff --git a/common/error_code_utils.cc b/common/error_code_utils.cc
index 3fbf0fe..64df24a 100644
--- a/common/error_code_utils.cc
+++ b/common/error_code_utils.cc
@@ -175,7 +175,7 @@
// error codes which should be added here.
}
- return "Unknown error: " + base::UintToString(static_cast<unsigned>(code));
+ return "Unknown error: " + base::NumberToString(static_cast<unsigned>(code));
}
} // namespace utils
diff --git a/common/excluder_interface.h b/common/excluder_interface.h
new file mode 100644
index 0000000..3985bba
--- /dev/null
+++ b/common/excluder_interface.h
@@ -0,0 +1,60 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_COMMON_EXCLUDER_INTERFACE_H_
+#define UPDATE_ENGINE_COMMON_EXCLUDER_INTERFACE_H_
+
+#include <memory>
+#include <string>
+
+#include <base/macros.h>
+
+namespace chromeos_update_engine {
+
+class PrefsInterface;
+
+class ExcluderInterface {
+ public:
+ virtual ~ExcluderInterface() = default;
+
+ // Returns true on successfuly excluding |name|, otherwise false. On a
+ // successful |Exclude()| the passed in |name| will be considered excluded
+ // and calls to |IsExcluded()| will return true. The exclusions are persisted.
+ virtual bool Exclude(const std::string& name) = 0;
+
+ // Returns true if |name| reached the exclusion limit, otherwise false.
+ virtual bool IsExcluded(const std::string& name) = 0;
+
+ // Returns true on sucessfully reseting the entire exclusion state, otherwise
+ // false. On a successful |Reset()| there will be no excluded |name| in the
+ // exclusion state.
+ virtual bool Reset() = 0;
+
+ // Not copyable or movable
+ ExcluderInterface(const ExcluderInterface&) = delete;
+ ExcluderInterface& operator=(const ExcluderInterface&) = delete;
+ ExcluderInterface(ExcluderInterface&&) = delete;
+ ExcluderInterface& operator=(ExcluderInterface&&) = delete;
+
+ protected:
+ ExcluderInterface() = default;
+};
+
+std::unique_ptr<ExcluderInterface> CreateExcluder(PrefsInterface* prefs);
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_COMMON_EXCLUDER_INTERFACE_H_
diff --git a/common/excluder_stub.cc b/common/excluder_stub.cc
new file mode 100644
index 0000000..a251765
--- /dev/null
+++ b/common/excluder_stub.cc
@@ -0,0 +1,43 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/common/excluder_stub.h"
+
+#include <memory>
+
+#include "update_engine/common/prefs_interface.h"
+
+using std::string;
+
+namespace chromeos_update_engine {
+
+std::unique_ptr<ExcluderInterface> CreateExcluder(PrefsInterface* prefs) {
+ return std::make_unique<ExcluderStub>();
+}
+
+bool ExcluderStub::Exclude(const string& name) {
+ return true;
+}
+
+bool ExcluderStub::IsExcluded(const string& name) {
+ return false;
+}
+
+bool ExcluderStub::Reset() {
+ return true;
+}
+
+} // namespace chromeos_update_engine
diff --git a/common/excluder_stub.h b/common/excluder_stub.h
new file mode 100644
index 0000000..2d5372a
--- /dev/null
+++ b/common/excluder_stub.h
@@ -0,0 +1,46 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_COMMON_EXCLUDER_STUB_H_
+#define UPDATE_ENGINE_COMMON_EXCLUDER_STUB_H_
+
+#include <string>
+
+#include "update_engine/common/excluder_interface.h"
+
+namespace chromeos_update_engine {
+
+// An implementation of the |ExcluderInterface| that does nothing.
+class ExcluderStub : public ExcluderInterface {
+ public:
+ ExcluderStub() = default;
+ ~ExcluderStub() = default;
+
+ // |ExcluderInterface| overrides.
+ bool Exclude(const std::string& name) override;
+ bool IsExcluded(const std::string& name) override;
+ bool Reset() override;
+
+ // Not copyable or movable.
+ ExcluderStub(const ExcluderStub&) = delete;
+ ExcluderStub& operator=(const ExcluderStub&) = delete;
+ ExcluderStub(ExcluderStub&&) = delete;
+ ExcluderStub& operator=(ExcluderStub&&) = delete;
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_COMMON_EXCLUDER_STUB_H_
diff --git a/common/fake_hardware.h b/common/fake_hardware.h
index 0b232da..2a8e81d 100644
--- a/common/fake_hardware.h
+++ b/common/fake_hardware.h
@@ -77,6 +77,10 @@
std::string GetECVersion() const override { return ec_version_; }
+ std::string GetDeviceRequisition() const override {
+ return device_requisition_;
+ }
+
int GetMinKernelKeyVersion() const override {
return min_kernel_key_version_;
}
@@ -104,15 +108,15 @@
int GetPowerwashCount() const override { return powerwash_count_; }
- bool SchedulePowerwash(bool is_rollback) override {
+ bool SchedulePowerwash(bool save_rollback_data) override {
powerwash_scheduled_ = true;
- is_rollback_powerwash_ = is_rollback;
+ save_rollback_data_ = save_rollback_data;
return true;
}
bool CancelPowerwash() override {
powerwash_scheduled_ = false;
- is_rollback_powerwash_ = false;
+ save_rollback_data_ = false;
return true;
}
@@ -175,6 +179,10 @@
void SetECVersion(const std::string& ec_version) { ec_version_ = ec_version; }
+ void SetDeviceRequisition(const std::string& requisition) {
+ device_requisition_ = requisition;
+ }
+
void SetMinKernelKeyVersion(int min_kernel_key_version) {
min_kernel_key_version_ = min_kernel_key_version;
}
@@ -197,7 +205,7 @@
int GetMaxKernelKeyRollforward() const { return kernel_max_rollforward_; }
bool GetIsRollbackPowerwashScheduled() const {
- return powerwash_scheduled_ && is_rollback_powerwash_;
+ return powerwash_scheduled_ && save_rollback_data_;
}
private:
@@ -211,13 +219,14 @@
std::string hardware_class_{"Fake HWID BLAH-1234"};
std::string firmware_version_{"Fake Firmware v1.0.1"};
std::string ec_version_{"Fake EC v1.0a"};
+ std::string device_requisition_{"fake_requisition"};
int min_kernel_key_version_{kMinKernelKeyVersion};
int min_firmware_key_version_{kMinFirmwareKeyVersion};
int kernel_max_rollforward_{kKernelMaxRollforward};
int firmware_max_rollforward_{kFirmwareMaxRollforward};
int powerwash_count_{kPowerwashCountNotSet};
bool powerwash_scheduled_{false};
- bool is_rollback_powerwash_{false};
+ bool save_rollback_data_{false};
int64_t build_timestamp_{0};
bool first_active_omaha_ping_sent_{false};
bool warm_reset_{false};
diff --git a/common/fake_prefs.cc b/common/fake_prefs.cc
index c446e06..73559c5 100644
--- a/common/fake_prefs.cc
+++ b/common/fake_prefs.cc
@@ -21,6 +21,7 @@
#include <gtest/gtest.h>
using std::string;
+using std::vector;
using chromeos_update_engine::FakePrefs;
@@ -105,6 +106,13 @@
return true;
}
+bool FakePrefs::GetSubKeys(const string& ns, vector<string>* keys) const {
+ for (const auto& pr : values_)
+ if (pr.first.compare(0, ns.length(), ns) == 0)
+ keys->push_back(pr.first);
+ return true;
+}
+
string FakePrefs::GetTypeName(PrefType type) {
switch (type) {
case PrefType::kString:
diff --git a/common/fake_prefs.h b/common/fake_prefs.h
index b1c5b71..b24ff4d 100644
--- a/common/fake_prefs.h
+++ b/common/fake_prefs.h
@@ -49,6 +49,9 @@
bool Exists(const std::string& key) const override;
bool Delete(const std::string& key) override;
+ bool GetSubKeys(const std::string& ns,
+ std::vector<std::string>* keys) const override;
+
void AddObserver(const std::string& key,
ObserverInterface* observer) override;
void RemoveObserver(const std::string& key,
diff --git a/common/file_fetcher.h b/common/file_fetcher.h
index fbdfc32..bd39007 100644
--- a/common/file_fetcher.h
+++ b/common/file_fetcher.h
@@ -59,6 +59,12 @@
void SetHeader(const std::string& header_name,
const std::string& header_value) override {}
+ bool GetHeader(const std::string& header_name,
+ std::string* header_value) const override {
+ header_value->clear();
+ return false;
+ }
+
// Suspend the asynchronous file read.
void Pause() override;
diff --git a/common/hardware_interface.h b/common/hardware_interface.h
index d92a6fc..4f0305f 100644
--- a/common/hardware_interface.h
+++ b/common/hardware_interface.h
@@ -70,6 +70,10 @@
// running a custom chrome os ec.
virtual std::string GetECVersion() const = 0;
+ // Returns the OEM device requisition or an empty string if the system does
+ // not have a requisition, or if not running Chrome OS.
+ virtual std::string GetDeviceRequisition() const = 0;
+
// Returns the minimum kernel key version that verified boot on Chrome OS
// will allow to boot. This is the value of crossystem tpm_kernver. Returns
// -1 on error, or if not running on Chrome OS.
@@ -102,9 +106,9 @@
virtual int GetPowerwashCount() const = 0;
// Signals that a powerwash (stateful partition wipe) should be performed
- // after reboot. If |is_rollback| is true additional state is preserved
- // during shutdown that can be restored after the powerwash.
- virtual bool SchedulePowerwash(bool is_rollback) = 0;
+ // after reboot. If |save_rollback_data| is true additional state is
+ // preserved during shutdown that can be restored after the powerwash.
+ virtual bool SchedulePowerwash(bool save_rollback_data) = 0;
// Cancel the powerwash operation scheduled to be performed on next boot.
virtual bool CancelPowerwash() = 0;
diff --git a/common/http_common.cc b/common/http_common.cc
index 5f234b0..c8bac47 100644
--- a/common/http_common.cc
+++ b/common/http_common.cc
@@ -21,6 +21,7 @@
#include <cstdlib>
#include <base/macros.h>
+#include <base/stl_util.h>
namespace chromeos_update_engine {
@@ -56,7 +57,7 @@
bool is_found = false;
size_t i;
- for (i = 0; i < arraysize(http_response_table); i++)
+ for (i = 0; i < base::size(http_response_table); i++)
if ((is_found = (http_response_table[i].code == code)))
break;
@@ -77,7 +78,7 @@
bool is_found = false;
size_t i;
- for (i = 0; i < arraysize(http_content_type_table); i++)
+ for (i = 0; i < base::size(http_content_type_table); i++)
if ((is_found = (http_content_type_table[i].type == type)))
break;
diff --git a/common/http_fetcher.h b/common/http_fetcher.h
index 2b4fc83..f74a0f0 100644
--- a/common/http_fetcher.h
+++ b/common/http_fetcher.h
@@ -29,6 +29,7 @@
#include "update_engine/common/http_common.h"
#include "update_engine/common/proxy_resolver.h"
+#include "update_engine/metrics_constants.h"
// This class is a simple wrapper around an HTTP library (libcurl). We can
// easily mock out this interface for testing.
@@ -58,6 +59,12 @@
HttpFetcherDelegate* delegate() const { return delegate_; }
int http_response_code() const { return http_response_code_; }
+ // Returns additional error code that can't be expressed in terms of an HTTP
+ // response code. For example, if there was a specific internal error code in
+ // the objects used in the implementation of this class (like libcurl) that we
+ // are interested about, we can communicate it through this value.
+ ErrorCode GetAuxiliaryErrorCode() const { return auxiliary_error_code_; }
+
// Optional: Post data to the server. The HttpFetcher should make a copy
// of this data and upload it via HTTP POST during the transfer. The type of
// the data is necessary for properly setting the Content-Type HTTP header.
@@ -99,6 +106,14 @@
virtual void SetHeader(const std::string& header_name,
const std::string& header_value) = 0;
+ // Only used for testing.
+ // If |header_name| is set, the value will be set into |header_value|.
+ // On success the boolean true will be returned, hoewever on failture to find
+ // the |header_name| in the header the return value will be false. The state
+ // in which |header_value| is left in for failures is an empty string.
+ virtual bool GetHeader(const std::string& header_name,
+ std::string* header_value) const = 0;
+
// If data is coming in too quickly, you can call Pause() to pause the
// transfer. The delegate will not have ReceivedBytes() called while
// an HttpFetcher is paused.
@@ -150,6 +165,10 @@
// set to the response code when the transfer is complete.
int http_response_code_;
+ // Set when there is an error that can't be expressed in the form of
+ // |http_response_code_|.
+ ErrorCode auxiliary_error_code_{ErrorCode::kSuccess};
+
// The delegate; may be null.
HttpFetcherDelegate* delegate_;
diff --git a/common/http_fetcher_unittest.cc b/common/http_fetcher_unittest.cc
index 237ea20..589579e 100644
--- a/common/http_fetcher_unittest.cc
+++ b/common/http_fetcher_unittest.cc
@@ -29,6 +29,7 @@
#include <base/location.h>
#include <base/logging.h>
#include <base/message_loop/message_loop.h>
+#include <base/stl_util.h>
#include <base/strings/string_number_conversions.h>
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
@@ -1049,7 +1050,7 @@
unique_ptr<HttpServer> server(this->test_.CreateServer());
ASSERT_TRUE(server->started_);
- for (size_t c = 0; c < arraysize(kRedirectCodes); ++c) {
+ for (size_t c = 0; c < base::size(kRedirectCodes); ++c) {
const string url = base::StringPrintf(
"/redirect/%d/download/%d", kRedirectCodes[c], kMediumLength);
RedirectTest(server.get(), true, url, this->test_.NewLargeFetcher());
@@ -1066,7 +1067,7 @@
string url;
for (int r = 0; r < kDownloadMaxRedirects; r++) {
url += base::StringPrintf("/redirect/%d",
- kRedirectCodes[r % arraysize(kRedirectCodes)]);
+ kRedirectCodes[r % base::size(kRedirectCodes)]);
}
url += base::StringPrintf("/download/%d", kMediumLength);
RedirectTest(server.get(), true, url, this->test_.NewLargeFetcher());
@@ -1082,7 +1083,7 @@
string url;
for (int r = 0; r < kDownloadMaxRedirects + 1; r++) {
url += base::StringPrintf("/redirect/%d",
- kRedirectCodes[r % arraysize(kRedirectCodes)]);
+ kRedirectCodes[r % base::size(kRedirectCodes)]);
}
url += base::StringPrintf("/download/%d", kMediumLength);
RedirectTest(server.get(), false, url, this->test_.NewLargeFetcher());
diff --git a/common/hwid_override.cc b/common/hwid_override.cc
index 8800e94..1bb0f8f 100644
--- a/common/hwid_override.cc
+++ b/common/hwid_override.cc
@@ -16,14 +16,12 @@
#include "update_engine/common/hwid_override.h"
-#include <map>
#include <string>
#include <base/files/file_path.h>
#include <base/files/file_util.h>
#include <brillo/key_value_store.h>
-using std::map;
using std::string;
namespace chromeos_update_engine {
diff --git a/common/mock_excluder.h b/common/mock_excluder.h
new file mode 100644
index 0000000..bc54772
--- /dev/null
+++ b/common/mock_excluder.h
@@ -0,0 +1,37 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_MOCK_APP_EXCLUDER_H_
+#define UPDATE_ENGINE_MOCK_APP_EXCLUDER_H_
+
+#include "update_engine/common/excluder_interface.h"
+
+#include <string>
+
+#include <gmock/gmock.h>
+
+namespace chromeos_update_engine {
+
+class MockExcluder : public ExcluderInterface {
+ public:
+ MOCK_METHOD(bool, Exclude, (const std::string&), (override));
+ MOCK_METHOD(bool, IsExcluded, (const std::string&), (override));
+ MOCK_METHOD(bool, Reset, (), (override));
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_MOCK_APP_EXCLUDER_H_
diff --git a/common/mock_http_fetcher.h b/common/mock_http_fetcher.h
index 492e6ce..0f04319 100644
--- a/common/mock_http_fetcher.h
+++ b/common/mock_http_fetcher.h
@@ -89,6 +89,12 @@
void SetHeader(const std::string& header_name,
const std::string& header_value) override;
+ bool GetHeader(const std::string& header_name,
+ std::string* header_value) const override {
+ header_value->clear();
+ return false;
+ }
+
// Return the value of the header |header_name| or the empty string if not
// set.
std::string GetHeader(const std::string& header_name) const;
diff --git a/common/mock_prefs.h b/common/mock_prefs.h
index 2582e19..62417a8 100644
--- a/common/mock_prefs.h
+++ b/common/mock_prefs.h
@@ -18,6 +18,7 @@
#define UPDATE_ENGINE_COMMON_MOCK_PREFS_H_
#include <string>
+#include <vector>
#include <gmock/gmock.h>
@@ -41,6 +42,9 @@
MOCK_CONST_METHOD1(Exists, bool(const std::string& key));
MOCK_METHOD1(Delete, bool(const std::string& key));
+ MOCK_CONST_METHOD2(GetSubKeys,
+ bool(const std::string&, std::vector<std::string>*));
+
MOCK_METHOD2(AddObserver, void(const std::string& key, ObserverInterface*));
MOCK_METHOD2(RemoveObserver,
void(const std::string& key, ObserverInterface*));
diff --git a/common/multi_range_http_fetcher.h b/common/multi_range_http_fetcher.h
index f57ea7f..ef32f0d 100644
--- a/common/multi_range_http_fetcher.h
+++ b/common/multi_range_http_fetcher.h
@@ -83,6 +83,11 @@
base_fetcher_->SetHeader(header_name, header_value);
}
+ bool GetHeader(const std::string& header_name,
+ std::string* header_value) const override {
+ return base_fetcher_->GetHeader(header_name, header_value);
+ }
+
void Pause() override { base_fetcher_->Pause(); }
void Unpause() override { base_fetcher_->Unpause(); }
diff --git a/common/prefs.cc b/common/prefs.cc
index 12d06c0..615014f 100644
--- a/common/prefs.cc
+++ b/common/prefs.cc
@@ -18,17 +18,37 @@
#include <algorithm>
+#include <base/files/file_enumerator.h>
#include <base/files/file_util.h>
#include <base/logging.h>
#include <base/strings/string_number_conversions.h>
+#include <base/strings/string_split.h>
#include <base/strings/string_util.h>
#include "update_engine/common/utils.h"
using std::string;
+using std::vector;
namespace chromeos_update_engine {
+namespace {
+
+const char kKeySeparator = '/';
+
+void DeleteEmptyDirectories(const base::FilePath& path) {
+ base::FileEnumerator path_enum(
+ path, false /* recursive */, base::FileEnumerator::DIRECTORIES);
+ for (base::FilePath dir_path = path_enum.Next(); !dir_path.empty();
+ dir_path = path_enum.Next()) {
+ DeleteEmptyDirectories(dir_path);
+ if (base::IsDirectoryEmpty(dir_path))
+ base::DeleteFile(dir_path, false);
+ }
+}
+
+} // namespace
+
bool PrefsBase::GetString(const string& key, string* value) const {
return storage_->GetKey(key, value);
}
@@ -54,7 +74,7 @@
}
bool PrefsBase::SetInt64(const string& key, const int64_t value) {
- return SetString(key, base::Int64ToString(value));
+ return SetString(key, base::NumberToString(value));
}
bool PrefsBase::GetBoolean(const string& key, bool* value) const {
@@ -92,6 +112,10 @@
return true;
}
+bool PrefsBase::GetSubKeys(const string& ns, vector<string>* keys) const {
+ return storage_->GetSubKeys(ns, keys);
+}
+
void PrefsBase::AddObserver(const string& key, ObserverInterface* observer) {
observers_[key].push_back(observer);
}
@@ -104,6 +128,10 @@
observers_for_key.erase(observer_it);
}
+string PrefsInterface::CreateSubKey(const vector<string>& ns_and_key) {
+ return base::JoinString(ns_and_key, string(1, kKeySeparator));
+}
+
// Prefs
bool Prefs::Init(const base::FilePath& prefs_dir) {
@@ -112,6 +140,8 @@
bool Prefs::FileStorage::Init(const base::FilePath& prefs_dir) {
prefs_dir_ = prefs_dir;
+ // Delete empty directories. Ignore errors when deleting empty directories.
+ DeleteEmptyDirectories(prefs_dir_);
return true;
}
@@ -119,12 +149,29 @@
base::FilePath filename;
TEST_AND_RETURN_FALSE(GetFileNameForKey(key, &filename));
if (!base::ReadFileToString(filename, value)) {
- LOG(INFO) << key << " not present in " << prefs_dir_.value();
return false;
}
return true;
}
+bool Prefs::FileStorage::GetSubKeys(const string& ns,
+ vector<string>* keys) const {
+ base::FilePath filename;
+ TEST_AND_RETURN_FALSE(GetFileNameForKey(ns, &filename));
+ base::FileEnumerator namespace_enum(
+ prefs_dir_, true, base::FileEnumerator::FILES);
+ for (base::FilePath f = namespace_enum.Next(); !f.empty();
+ f = namespace_enum.Next()) {
+ auto filename_str = filename.value();
+ if (f.value().compare(0, filename_str.length(), filename_str) == 0) {
+ // Only return the key portion excluding the |prefs_dir_| with slash.
+ keys->push_back(f.value().substr(
+ prefs_dir_.AsEndingWithSeparator().value().length()));
+ }
+ }
+ return true;
+}
+
bool Prefs::FileStorage::SetKey(const string& key, const string& value) {
base::FilePath filename;
TEST_AND_RETURN_FALSE(GetFileNameForKey(key, &filename));
@@ -153,13 +200,11 @@
bool Prefs::FileStorage::GetFileNameForKey(const string& key,
base::FilePath* filename) const {
- // Allows only non-empty keys containing [A-Za-z0-9_-].
+ // Allows only non-empty keys containing [A-Za-z0-9_-/].
TEST_AND_RETURN_FALSE(!key.empty());
- for (size_t i = 0; i < key.size(); ++i) {
- char c = key.at(i);
+ for (char c : key)
TEST_AND_RETURN_FALSE(base::IsAsciiAlpha(c) || base::IsAsciiDigit(c) ||
- c == '_' || c == '-');
- }
+ c == '_' || c == '-' || c == kKeySeparator);
*filename = prefs_dir_.Append(key);
return true;
}
@@ -175,6 +220,24 @@
return true;
}
+bool MemoryPrefs::MemoryStorage::GetSubKeys(const string& ns,
+ vector<string>* keys) const {
+ using value_type = decltype(values_)::value_type;
+ using key_type = decltype(values_)::key_type;
+ auto lower_comp = [](const value_type& pr, const key_type& ns) {
+ return pr.first.substr(0, ns.length()) < ns;
+ };
+ auto upper_comp = [](const key_type& ns, const value_type& pr) {
+ return ns < pr.first.substr(0, ns.length());
+ };
+ auto lower_it =
+ std::lower_bound(begin(values_), end(values_), ns, lower_comp);
+ auto upper_it = std::upper_bound(lower_it, end(values_), ns, upper_comp);
+ while (lower_it != upper_it)
+ keys->push_back((lower_it++)->first);
+ return true;
+}
+
bool MemoryPrefs::MemoryStorage::SetKey(const string& key,
const string& value) {
values_[key] = value;
@@ -187,9 +250,8 @@
bool MemoryPrefs::MemoryStorage::DeleteKey(const string& key) {
auto it = values_.find(key);
- if (it == values_.end())
- return false;
- values_.erase(it);
+ if (it != values_.end())
+ values_.erase(it);
return true;
}
diff --git a/common/prefs.h b/common/prefs.h
index 0116454..3fc1d89 100644
--- a/common/prefs.h
+++ b/common/prefs.h
@@ -42,6 +42,11 @@
// Returns whether the operation succeeded.
virtual bool GetKey(const std::string& key, std::string* value) const = 0;
+ // Get the keys stored within the namespace. If there are no keys in the
+ // namespace, |keys| will be empty. Returns whether the operation succeeded.
+ virtual bool GetSubKeys(const std::string& ns,
+ std::vector<std::string>* keys) const = 0;
+
// Set the value of the key named |key| to |value| regardless of the
// previous value. Returns whether the operation succeeded.
virtual bool SetKey(const std::string& key, const std::string& value) = 0;
@@ -70,6 +75,9 @@
bool Exists(const std::string& key) const override;
bool Delete(const std::string& key) override;
+ bool GetSubKeys(const std::string& ns,
+ std::vector<std::string>* keys) const override;
+
void AddObserver(const std::string& key,
ObserverInterface* observer) override;
void RemoveObserver(const std::string& key,
@@ -111,6 +119,8 @@
// PrefsBase::StorageInterface overrides.
bool GetKey(const std::string& key, std::string* value) const override;
+ bool GetSubKeys(const std::string& ns,
+ std::vector<std::string>* keys) const override;
bool SetKey(const std::string& key, const std::string& value) override;
bool KeyExists(const std::string& key) const override;
bool DeleteKey(const std::string& key) override;
@@ -149,6 +159,8 @@
// PrefsBase::StorageInterface overrides.
bool GetKey(const std::string& key, std::string* value) const override;
+ bool GetSubKeys(const std::string& ns,
+ std::vector<std::string>* keys) const override;
bool SetKey(const std::string& key, const std::string& value) override;
bool KeyExists(const std::string& key) const override;
bool DeleteKey(const std::string& key) override;
diff --git a/common/prefs_interface.h b/common/prefs_interface.h
index 03ae3ec..1311cb4 100644
--- a/common/prefs_interface.h
+++ b/common/prefs_interface.h
@@ -20,6 +20,7 @@
#include <stdint.h>
#include <string>
+#include <vector>
namespace chromeos_update_engine {
@@ -79,6 +80,13 @@
// this key. Calling with non-existent keys does nothing.
virtual bool Delete(const std::string& key) = 0;
+ // Creates a key which is part of a sub preference.
+ static std::string CreateSubKey(const std::vector<std::string>& ns_with_key);
+
+ // Returns a list of keys within the namespace.
+ virtual bool GetSubKeys(const std::string& ns,
+ std::vector<std::string>* keys) const = 0;
+
// Add an observer to watch whenever the given |key| is modified. The
// OnPrefSet() and OnPrefDelete() methods will be called whenever any of the
// Set*() methods or the Delete() method are called on the given key,
diff --git a/common/prefs_unittest.cc b/common/prefs_unittest.cc
index cb6fc70..6dd26c0 100644
--- a/common/prefs_unittest.cc
+++ b/common/prefs_unittest.cc
@@ -20,6 +20,7 @@
#include <limits>
#include <string>
+#include <vector>
#include <base/files/file_util.h>
#include <base/files/scoped_temp_dir.h>
@@ -30,8 +31,11 @@
#include <gtest/gtest.h>
using std::string;
+using std::vector;
using testing::_;
+using testing::ElementsAre;
using testing::Eq;
+using testing::UnorderedElementsAre;
namespace {
// Test key used along the tests.
@@ -40,12 +44,92 @@
namespace chromeos_update_engine {
-class PrefsTest : public ::testing::Test {
+class BasePrefsTest : public ::testing::Test {
+ protected:
+ void MultiNamespaceKeyTest() {
+ ASSERT_TRUE(common_prefs_);
+ auto key0 = common_prefs_->CreateSubKey({"ns1", "key"});
+ // Corner case for "ns1".
+ auto key0corner = common_prefs_->CreateSubKey({"ns11", "key"});
+ auto key1A = common_prefs_->CreateSubKey({"ns1", "nsA", "keyA"});
+ auto key1B = common_prefs_->CreateSubKey({"ns1", "nsA", "keyB"});
+ auto key2 = common_prefs_->CreateSubKey({"ns1", "nsB", "key"});
+ // Corner case for "ns1/nsB".
+ auto key2corner = common_prefs_->CreateSubKey({"ns1", "nsB1", "key"});
+ EXPECT_FALSE(common_prefs_->Exists(key0));
+ EXPECT_FALSE(common_prefs_->Exists(key1A));
+ EXPECT_FALSE(common_prefs_->Exists(key1B));
+ EXPECT_FALSE(common_prefs_->Exists(key2));
+
+ EXPECT_TRUE(common_prefs_->SetString(key0, ""));
+ EXPECT_TRUE(common_prefs_->SetString(key0corner, ""));
+ EXPECT_TRUE(common_prefs_->SetString(key1A, ""));
+ EXPECT_TRUE(common_prefs_->SetString(key1B, ""));
+ EXPECT_TRUE(common_prefs_->SetString(key2, ""));
+ EXPECT_TRUE(common_prefs_->SetString(key2corner, ""));
+
+ EXPECT_TRUE(common_prefs_->Exists(key0));
+ EXPECT_TRUE(common_prefs_->Exists(key0corner));
+ EXPECT_TRUE(common_prefs_->Exists(key1A));
+ EXPECT_TRUE(common_prefs_->Exists(key1B));
+ EXPECT_TRUE(common_prefs_->Exists(key2));
+ EXPECT_TRUE(common_prefs_->Exists(key2corner));
+
+ vector<string> keys2;
+ EXPECT_TRUE(common_prefs_->GetSubKeys("ns1/nsB/", &keys2));
+ EXPECT_THAT(keys2, ElementsAre(key2));
+ for (const auto& key : keys2)
+ EXPECT_TRUE(common_prefs_->Delete(key));
+ EXPECT_TRUE(common_prefs_->Exists(key0));
+ EXPECT_TRUE(common_prefs_->Exists(key0corner));
+ EXPECT_TRUE(common_prefs_->Exists(key1A));
+ EXPECT_TRUE(common_prefs_->Exists(key1B));
+ EXPECT_FALSE(common_prefs_->Exists(key2));
+ EXPECT_TRUE(common_prefs_->Exists(key2corner));
+
+ vector<string> keys2corner;
+ EXPECT_TRUE(common_prefs_->GetSubKeys("ns1/nsB", &keys2corner));
+ EXPECT_THAT(keys2corner, ElementsAre(key2corner));
+ for (const auto& key : keys2corner)
+ EXPECT_TRUE(common_prefs_->Delete(key));
+ EXPECT_FALSE(common_prefs_->Exists(key2corner));
+
+ vector<string> keys1;
+ EXPECT_TRUE(common_prefs_->GetSubKeys("ns1/nsA/", &keys1));
+ EXPECT_THAT(keys1, UnorderedElementsAre(key1A, key1B));
+ for (const auto& key : keys1)
+ EXPECT_TRUE(common_prefs_->Delete(key));
+ EXPECT_TRUE(common_prefs_->Exists(key0));
+ EXPECT_TRUE(common_prefs_->Exists(key0corner));
+ EXPECT_FALSE(common_prefs_->Exists(key1A));
+ EXPECT_FALSE(common_prefs_->Exists(key1B));
+
+ vector<string> keys0;
+ EXPECT_TRUE(common_prefs_->GetSubKeys("ns1/", &keys0));
+ EXPECT_THAT(keys0, ElementsAre(key0));
+ for (const auto& key : keys0)
+ EXPECT_TRUE(common_prefs_->Delete(key));
+ EXPECT_FALSE(common_prefs_->Exists(key0));
+ EXPECT_TRUE(common_prefs_->Exists(key0corner));
+
+ vector<string> keys0corner;
+ EXPECT_TRUE(common_prefs_->GetSubKeys("ns1", &keys0corner));
+ EXPECT_THAT(keys0corner, ElementsAre(key0corner));
+ for (const auto& key : keys0corner)
+ EXPECT_TRUE(common_prefs_->Delete(key));
+ EXPECT_FALSE(common_prefs_->Exists(key0corner));
+ }
+
+ PrefsInterface* common_prefs_;
+};
+
+class PrefsTest : public BasePrefsTest {
protected:
void SetUp() override {
ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
prefs_dir_ = temp_dir_.GetPath();
ASSERT_TRUE(prefs_.Init(prefs_dir_));
+ common_prefs_ = &prefs_;
}
bool SetValue(const string& key, const string& value) {
@@ -59,6 +143,31 @@
Prefs prefs_;
};
+TEST(Prefs, Init) {
+ Prefs prefs;
+ const string ns1 = "ns1";
+ const string ns2A = "ns2A";
+ const string ns2B = "ns2B";
+ const string sub_pref = "sp";
+
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ auto ns1_path = temp_dir.GetPath().Append(ns1);
+ auto ns2A_path = ns1_path.Append(ns2A);
+ auto ns2B_path = ns1_path.Append(ns2B);
+ auto sub_pref_path = ns2A_path.Append(sub_pref);
+
+ EXPECT_TRUE(base::CreateDirectory(ns2B_path));
+ EXPECT_TRUE(base::PathExists(ns2B_path));
+
+ EXPECT_TRUE(base::CreateDirectory(sub_pref_path));
+ EXPECT_TRUE(base::PathExists(sub_pref_path));
+
+ EXPECT_TRUE(base::PathExists(ns1_path));
+ ASSERT_TRUE(prefs.Init(temp_dir.GetPath()));
+ EXPECT_FALSE(base::PathExists(ns1_path));
+}
+
TEST_F(PrefsTest, GetFileNameForKey) {
const char kAllvalidCharsKey[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_-";
@@ -77,6 +186,18 @@
EXPECT_FALSE(prefs_.file_storage_.GetFileNameForKey("", &path));
}
+TEST_F(PrefsTest, CreateSubKey) {
+ const string name_space = "ns";
+ const string sub_pref1 = "sp1";
+ const string sub_pref2 = "sp2";
+ const string sub_key = "sk";
+
+ EXPECT_EQ(PrefsInterface::CreateSubKey({name_space, sub_pref1, sub_key}),
+ "ns/sp1/sk");
+ EXPECT_EQ(PrefsInterface::CreateSubKey({name_space, sub_pref2, sub_key}),
+ "ns/sp2/sk");
+}
+
TEST_F(PrefsTest, GetString) {
const string test_data = "test data";
ASSERT_TRUE(SetValue(kKey, test_data));
@@ -279,6 +400,29 @@
EXPECT_FALSE(prefs_.Exists(kKey));
}
+TEST_F(PrefsTest, SetDeleteSubKey) {
+ const string name_space = "ns";
+ const string sub_pref = "sp";
+ const string sub_key1 = "sk1";
+ const string sub_key2 = "sk2";
+ auto key1 = prefs_.CreateSubKey({name_space, sub_pref, sub_key1});
+ auto key2 = prefs_.CreateSubKey({name_space, sub_pref, sub_key2});
+ base::FilePath sub_pref_path = prefs_dir_.Append(name_space).Append(sub_pref);
+
+ ASSERT_TRUE(prefs_.SetInt64(key1, 0));
+ ASSERT_TRUE(prefs_.SetInt64(key2, 0));
+ EXPECT_TRUE(base::PathExists(sub_pref_path.Append(sub_key1)));
+ EXPECT_TRUE(base::PathExists(sub_pref_path.Append(sub_key2)));
+
+ ASSERT_TRUE(prefs_.Delete(key1));
+ EXPECT_FALSE(base::PathExists(sub_pref_path.Append(sub_key1)));
+ EXPECT_TRUE(base::PathExists(sub_pref_path.Append(sub_key2)));
+ ASSERT_TRUE(prefs_.Delete(key2));
+ EXPECT_FALSE(base::PathExists(sub_pref_path.Append(sub_key2)));
+ prefs_.Init(prefs_dir_);
+ EXPECT_FALSE(base::PathExists(prefs_dir_.Append(name_space)));
+}
+
class MockPrefsObserver : public PrefsInterface::ObserverInterface {
public:
MOCK_METHOD1(OnPrefSet, void(const string&));
@@ -299,6 +443,19 @@
prefs_.Delete(kKey);
testing::Mock::VerifyAndClearExpectations(&mock_obserser);
+ auto key1 = prefs_.CreateSubKey({"ns", "sp1", "key1"});
+ prefs_.AddObserver(key1, &mock_obserser);
+
+ EXPECT_CALL(mock_obserser, OnPrefSet(key1));
+ EXPECT_CALL(mock_obserser, OnPrefDeleted(_)).Times(0);
+ prefs_.SetString(key1, "value");
+ testing::Mock::VerifyAndClearExpectations(&mock_obserser);
+
+ EXPECT_CALL(mock_obserser, OnPrefSet(_)).Times(0);
+ EXPECT_CALL(mock_obserser, OnPrefDeleted(Eq(key1)));
+ prefs_.Delete(key1);
+ testing::Mock::VerifyAndClearExpectations(&mock_obserser);
+
prefs_.RemoveObserver(kKey, &mock_obserser);
}
@@ -341,8 +498,14 @@
prefs_.RemoveObserver(kInvalidKey, &mock_obserser);
}
-class MemoryPrefsTest : public ::testing::Test {
+TEST_F(PrefsTest, MultiNamespaceKeyTest) {
+ MultiNamespaceKeyTest();
+}
+
+class MemoryPrefsTest : public BasePrefsTest {
protected:
+ void SetUp() override { common_prefs_ = &prefs_; }
+
MemoryPrefs prefs_;
};
@@ -358,7 +521,16 @@
EXPECT_TRUE(prefs_.Delete(kKey));
EXPECT_FALSE(prefs_.Exists(kKey));
- EXPECT_FALSE(prefs_.Delete(kKey));
+ EXPECT_TRUE(prefs_.Delete(kKey));
+
+ auto key = prefs_.CreateSubKey({"ns", "sp", "sk"});
+ ASSERT_TRUE(prefs_.SetInt64(key, 0));
+ EXPECT_TRUE(prefs_.Exists(key));
+ EXPECT_TRUE(prefs_.Delete(kKey));
+}
+
+TEST_F(MemoryPrefsTest, MultiNamespaceKeyTest) {
+ MultiNamespaceKeyTest();
}
} // namespace chromeos_update_engine
diff --git a/common/subprocess.cc b/common/subprocess.cc
index 0131f10..298a65c 100644
--- a/common/subprocess.cc
+++ b/common/subprocess.cc
@@ -29,6 +29,7 @@
#include <base/bind.h>
#include <base/logging.h>
#include <base/posix/eintr_wrapper.h>
+#include <base/stl_util.h>
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
#include <brillo/process.h>
@@ -95,6 +96,7 @@
proc->RedirectUsingPipe(STDOUT_FILENO, false);
proc->SetPreExecCallback(base::Bind(&SetupChild, env, flags));
+ LOG(INFO) << "Running \"" << base::JoinString(cmd, " ") << "\"";
return proc->Start();
}
@@ -122,13 +124,17 @@
bytes_read = 0;
bool eof;
bool ok = utils::ReadAll(
- record->stdout_fd, buf, arraysize(buf), &bytes_read, &eof);
+ record->stdout_fd, buf, base::size(buf), &bytes_read, &eof);
record->stdout.append(buf, bytes_read);
if (!ok || eof) {
// There was either an error or an EOF condition, so we are done watching
// the file descriptor.
+#ifdef __ANDROID__
MessageLoop::current()->CancelTask(record->stdout_task_id);
record->stdout_task_id = MessageLoop::kTaskIdNull;
+#else
+ record->stdout_controller.reset();
+#endif // __ANDROID__
return;
}
} while (bytes_read);
@@ -143,8 +149,12 @@
// Make sure we read any remaining process output and then close the pipe.
OnStdoutReady(record);
+#ifdef __ANDROID__
MessageLoop::current()->CancelTask(record->stdout_task_id);
record->stdout_task_id = MessageLoop::kTaskIdNull;
+#else
+ record->stdout_controller.reset();
+#endif // __ANDROID__
// Don't print any log if the subprocess exited with exit code 0.
if (info.si_code != CLD_EXITED) {
@@ -199,12 +209,18 @@
<< record->stdout_fd << ".";
}
+#ifdef __ANDROID__
record->stdout_task_id = MessageLoop::current()->WatchFileDescriptor(
FROM_HERE,
record->stdout_fd,
MessageLoop::WatchMode::kWatchRead,
true,
base::Bind(&Subprocess::OnStdoutReady, record.get()));
+#else
+ record->stdout_controller = base::FileDescriptorWatcher::WatchReadable(
+ record->stdout_fd,
+ base::BindRepeating(&Subprocess::OnStdoutReady, record.get()));
+#endif // __ANDROID__
subprocess_records_[pid] = std::move(record);
return pid;
@@ -234,22 +250,20 @@
bool Subprocess::SynchronousExec(const vector<string>& cmd,
int* return_code,
- string* stdout) {
- // The default for SynchronousExec is to use kSearchPath since the code relies
- // on that.
- return SynchronousExecFlags(
- cmd, kRedirectStderrToStdout | kSearchPath, return_code, stdout);
+ string* stdout,
+ string* stderr) {
+ // The default for |SynchronousExec| is to use |kSearchPath| since the code
+ // relies on that.
+ return SynchronousExecFlags(cmd, kSearchPath, return_code, stdout, stderr);
}
bool Subprocess::SynchronousExecFlags(const vector<string>& cmd,
uint32_t flags,
int* return_code,
- string* stdout) {
+ string* stdout,
+ string* stderr) {
brillo::ProcessImpl proc;
- // It doesn't make sense to redirect some pipes in the synchronous case
- // because we won't be reading on our end, so we don't expose the output_pipes
- // in this case.
- if (!LaunchProcess(cmd, flags, {}, &proc)) {
+ if (!LaunchProcess(cmd, flags, {STDERR_FILENO}, &proc)) {
LOG(ERROR) << "Failed to launch subprocess";
return false;
}
@@ -257,21 +271,39 @@
if (stdout) {
stdout->clear();
}
+ if (stderr) {
+ stderr->clear();
+ }
- int fd = proc.GetPipe(STDOUT_FILENO);
+ // Read from both stdout and stderr individually.
+ int stdout_fd = proc.GetPipe(STDOUT_FILENO);
+ int stderr_fd = proc.GetPipe(STDERR_FILENO);
vector<char> buffer(32 * 1024);
- while (true) {
- int rc = HANDLE_EINTR(read(fd, buffer.data(), buffer.size()));
- if (rc < 0) {
- PLOG(ERROR) << "Reading from child's output";
- break;
- } else if (rc == 0) {
- break;
- } else {
- if (stdout)
+ bool stdout_closed = false, stderr_closed = false;
+ while (!stdout_closed || !stderr_closed) {
+ if (!stdout_closed) {
+ int rc = HANDLE_EINTR(read(stdout_fd, buffer.data(), buffer.size()));
+ if (rc <= 0) {
+ stdout_closed = true;
+ if (rc < 0)
+ PLOG(ERROR) << "Reading from child's stdout";
+ } else if (stdout != nullptr) {
stdout->append(buffer.data(), rc);
+ }
+ }
+
+ if (!stderr_closed) {
+ int rc = HANDLE_EINTR(read(stderr_fd, buffer.data(), buffer.size()));
+ if (rc <= 0) {
+ stderr_closed = true;
+ if (rc < 0)
+ PLOG(ERROR) << "Reading from child's stderr";
+ } else if (stderr != nullptr) {
+ stderr->append(buffer.data(), rc);
+ }
}
}
+
// At this point, the subprocess already closed the output, so we only need to
// wait for it to finish.
int proc_return_code = proc.Wait();
diff --git a/common/subprocess.h b/common/subprocess.h
index bc19d16..f1b9f1f 100644
--- a/common/subprocess.h
+++ b/common/subprocess.h
@@ -25,6 +25,7 @@
#include <vector>
#include <base/callback.h>
+#include <base/files/file_descriptor_watcher_posix.h>
#include <base/logging.h>
#include <base/macros.h>
#include <brillo/asynchronous_signal_handler_interface.h>
@@ -87,14 +88,16 @@
// Executes a command synchronously. Returns true on success. If |stdout| is
// non-null, the process output is stored in it, otherwise the output is
- // logged. Note that stderr is redirected to stdout.
+ // logged.
static bool SynchronousExec(const std::vector<std::string>& cmd,
int* return_code,
- std::string* stdout);
+ std::string* stdout,
+ std::string* stderr);
static bool SynchronousExecFlags(const std::vector<std::string>& cmd,
uint32_t flags,
int* return_code,
- std::string* stdout);
+ std::string* stdout,
+ std::string* stderr);
// Gets the one instance.
static Subprocess& Get() { return *subprocess_singleton_; }
@@ -120,8 +123,12 @@
// These are used to monitor the stdout of the running process, including
// the stderr if it was redirected.
+#ifdef __ANDROID__
brillo::MessageLoop::TaskId stdout_task_id{
brillo::MessageLoop::kTaskIdNull};
+#else
+ std::unique_ptr<base::FileDescriptorWatcher::Controller> stdout_controller;
+#endif // __ANDROID__
int stdout_fd{-1};
std::string stdout;
};
diff --git a/common/subprocess_unittest.cc b/common/subprocess_unittest.cc
index 104ef41..bc52b83 100644
--- a/common/subprocess_unittest.cc
+++ b/common/subprocess_unittest.cc
@@ -45,6 +45,7 @@
using base::TimeDelta;
using brillo::MessageLoop;
using std::string;
+using std::unique_ptr;
using std::vector;
namespace {
@@ -73,6 +74,10 @@
brillo::BaseMessageLoop loop_{&base_loop_};
brillo::AsynchronousSignalHandler async_signal_handler_;
Subprocess subprocess_;
+#ifndef __ANDROID__
+ unique_ptr<base::FileDescriptorWatcher::Controller> watcher_;
+#endif // __ANDROID__
+
};
namespace {
@@ -193,7 +198,7 @@
TEST_F(SubprocessTest, SynchronousTrueSearchsOnPath) {
int rc = -1;
EXPECT_TRUE(Subprocess::SynchronousExecFlags(
- {"true"}, Subprocess::kSearchPath, &rc, nullptr));
+ {"true"}, Subprocess::kSearchPath, &rc, nullptr, nullptr));
EXPECT_EQ(0, rc);
}
@@ -201,16 +206,17 @@
vector<string> cmd = {
kBinPath "/sh", "-c", "echo -n stdout-here; echo -n stderr-there >&2"};
int rc = -1;
- string stdout;
- ASSERT_TRUE(Subprocess::SynchronousExec(cmd, &rc, &stdout));
+ string stdout, stderr;
+ ASSERT_TRUE(Subprocess::SynchronousExec(cmd, &rc, &stdout, &stderr));
EXPECT_EQ(0, rc);
- EXPECT_EQ("stdout-herestderr-there", stdout);
+ EXPECT_EQ("stdout-here", stdout);
+ EXPECT_EQ("stderr-there", stderr);
}
TEST_F(SubprocessTest, SynchronousEchoNoOutputTest) {
int rc = -1;
ASSERT_TRUE(Subprocess::SynchronousExec(
- {kBinPath "/sh", "-c", "echo test"}, &rc, nullptr));
+ {kBinPath "/sh", "-c", "echo test"}, &rc, nullptr, nullptr));
EXPECT_EQ(0, rc);
}
@@ -255,6 +261,7 @@
int fifo_fd = HANDLE_EINTR(open(fifo_path.c_str(), O_RDONLY));
EXPECT_GE(fifo_fd, 0);
+#ifdef __ANDROID__
loop_.WatchFileDescriptor(FROM_HERE,
fifo_fd,
MessageLoop::WatchMode::kWatchRead,
@@ -270,6 +277,25 @@
},
fifo_fd,
tag));
+#else
+ watcher_ = base::FileDescriptorWatcher::WatchReadable(
+ fifo_fd,
+ base::Bind(
+ [](unique_ptr<base::FileDescriptorWatcher::Controller>* watcher,
+ int fifo_fd,
+ uint32_t tag) {
+ char c;
+ EXPECT_EQ(1, HANDLE_EINTR(read(fifo_fd, &c, 1)));
+ EXPECT_EQ('X', c);
+ LOG(INFO) << "Killing tag " << tag;
+ Subprocess::Get().KillExec(tag);
+ *watcher = nullptr;
+ },
+ // watcher_ is no longer used outside the clousure.
+ base::Unretained(&watcher_),
+ fifo_fd,
+ tag));
+#endif // __ANDROID__
// This test would leak a callback that runs when the child process exits
// unless we wait for it to run.
diff --git a/common/utils.cc b/common/utils.cc
index fc89040..3e3d830 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -84,49 +84,6 @@
// The path to the kernel's boot_id.
const char kBootIdPath[] = "/proc/sys/kernel/random/boot_id";
-// Return true if |disk_name| is an MTD or a UBI device. Note that this test is
-// simply based on the name of the device.
-bool IsMtdDeviceName(const string& disk_name) {
- return base::StartsWith(
- disk_name, "/dev/ubi", base::CompareCase::SENSITIVE) ||
- base::StartsWith(disk_name, "/dev/mtd", base::CompareCase::SENSITIVE);
-}
-
-// Return the device name for the corresponding partition on a NAND device.
-// WARNING: This function returns device names that are not mountable.
-string MakeNandPartitionName(int partition_num) {
- switch (partition_num) {
- case 2:
- case 4:
- case 6: {
- return base::StringPrintf("/dev/mtd%d", partition_num);
- }
- default: {
- return base::StringPrintf("/dev/ubi%d_0", partition_num);
- }
- }
-}
-
-// Return the device name for the corresponding partition on a NAND device that
-// may be mountable (but may not be writable).
-string MakeNandPartitionNameForMount(int partition_num) {
- switch (partition_num) {
- case 2:
- case 4:
- case 6: {
- return base::StringPrintf("/dev/mtd%d", partition_num);
- }
- case 3:
- case 5:
- case 7: {
- return base::StringPrintf("/dev/ubiblock%d_0", partition_num);
- }
- default: {
- return base::StringPrintf("/dev/ubi%d_0", partition_num);
- }
- }
-}
-
// If |path| is absolute, or explicit relative to the current working directory,
// leaves it as is. Otherwise, uses the system's temp directory, as defined by
// base::GetTempDir() and prepends it to |path|. On success stores the full
@@ -474,22 +431,6 @@
return false;
}
- size_t partition_name_len = string::npos;
- if (partition_name[last_nondigit_pos] == '_') {
- // NAND block devices have weird naming which could be something
- // like "/dev/ubiblock2_0". We discard "_0" in such a case.
- size_t prev_nondigit_pos =
- partition_name.find_last_not_of("0123456789", last_nondigit_pos - 1);
- if (prev_nondigit_pos == string::npos ||
- (prev_nondigit_pos + 1) == last_nondigit_pos) {
- LOG(ERROR) << "Unable to parse partition device name: " << partition_name;
- return false;
- }
-
- partition_name_len = last_nondigit_pos - prev_nondigit_pos;
- last_nondigit_pos = prev_nondigit_pos;
- }
-
if (out_disk_name) {
// Special case for MMC devices which have the following naming scheme:
// mmcblk0p2
@@ -502,8 +443,7 @@
}
if (out_partition_num) {
- string partition_str =
- partition_name.substr(last_nondigit_pos + 1, partition_name_len);
+ string partition_str = partition_name.substr(last_nondigit_pos + 1);
*out_partition_num = atoi(partition_str.c_str());
}
return true;
@@ -520,13 +460,6 @@
return string();
}
- if (IsMtdDeviceName(disk_name)) {
- // Special case for UBI block devices.
- // 1. ubiblock is not writable, we need to use plain "ubi".
- // 2. There is a "_0" suffix.
- return MakeNandPartitionName(partition_num);
- }
-
string partition_name = disk_name;
if (isdigit(partition_name.back())) {
// Special case for devices with names ending with a digit.
@@ -540,17 +473,6 @@
return partition_name;
}
-string MakePartitionNameForMount(const string& part_name) {
- if (IsMtdDeviceName(part_name)) {
- int partition_num;
- if (!SplitPartitionName(part_name, nullptr, &partition_num)) {
- return "";
- }
- return MakeNandPartitionNameForMount(partition_num);
- }
- return part_name;
-}
-
string ErrnoNumberAsString(int err) {
char buf[100];
buf[0] = '\0';
@@ -567,33 +489,6 @@
return lstat(path, &stbuf) == 0 && S_ISLNK(stbuf.st_mode) != 0;
}
-bool TryAttachingUbiVolume(int volume_num, int timeout) {
- const string volume_path = base::StringPrintf("/dev/ubi%d_0", volume_num);
- if (FileExists(volume_path.c_str())) {
- return true;
- }
-
- int exit_code;
- vector<string> cmd = {"ubiattach",
- "-m",
- base::StringPrintf("%d", volume_num),
- "-d",
- base::StringPrintf("%d", volume_num)};
- TEST_AND_RETURN_FALSE(Subprocess::SynchronousExec(cmd, &exit_code, nullptr));
- TEST_AND_RETURN_FALSE(exit_code == 0);
-
- cmd = {"ubiblock", "--create", volume_path};
- TEST_AND_RETURN_FALSE(Subprocess::SynchronousExec(cmd, &exit_code, nullptr));
- TEST_AND_RETURN_FALSE(exit_code == 0);
-
- while (timeout > 0 && !FileExists(volume_path.c_str())) {
- sleep(1);
- timeout--;
- }
-
- return FileExists(volume_path.c_str());
-}
-
bool MakeTempFile(const string& base_filename_template,
string* filename,
int* fd) {
@@ -1083,6 +978,10 @@
return str;
}
+string GetExclusionName(const string& str_to_convert) {
+ return base::NumberToString(base::StringPieceHash()(str_to_convert));
+}
+
} // namespace utils
} // namespace chromeos_update_engine
diff --git a/common/utils.h b/common/utils.h
index c6c34f4..23ac03d 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -128,11 +128,6 @@
// Returns true if |path| exists and is a symbolic link.
bool IsSymlink(const char* path);
-// Try attaching UBI |volume_num|. If there is any error executing required
-// commands to attach the volume, this function returns false. This function
-// only returns true if "/dev/ubi%d_0" becomes available in |timeout| seconds.
-bool TryAttachingUbiVolume(int volume_num, int timeout);
-
// If |base_filename_template| is neither absolute (starts with "/") nor
// explicitly relative to the current working directory (starts with "./" or
// "../"), then it is prepended the system's temporary directory. On success,
@@ -163,14 +158,6 @@
// Returns empty string when invalid parameters are passed in
std::string MakePartitionName(const std::string& disk_name, int partition_num);
-// Similar to "MakePartitionName" but returns a name that is suitable for
-// mounting. On NAND system we can write to "/dev/ubiX_0", which is what
-// MakePartitionName returns, but we cannot mount that device. To mount, we
-// have to use "/dev/ubiblockX_0" for rootfs. Stateful and OEM partitions are
-// mountable with "/dev/ubiX_0". The input is a partition device such as
-// /dev/sda3. Return empty string on error.
-std::string MakePartitionNameForMount(const std::string& part_name);
-
// Set the read-only attribute on the block device |device| to the value passed
// in |read_only|. Return whether the operation succeeded.
bool SetBlockDeviceReadOnly(const std::string& device, bool read_only);
@@ -332,6 +319,9 @@
// Return a string representation of |utime| for log file names.
std::string GetTimeAsString(time_t utime);
+// Returns the string format of the hashed |str_to_convert| that can be used
+// with |Excluder| as the exclusion name.
+std::string GetExclusionName(const std::string& str_to_convert);
} // namespace utils
diff --git a/common/utils_unittest.cc b/common/utils_unittest.cc
index b4ac2f5..ebcc548 100644
--- a/common/utils_unittest.cc
+++ b/common/utils_unittest.cc
@@ -123,10 +123,6 @@
EXPECT_EQ("/dev/mmcblk0", disk);
EXPECT_EQ(3, part_num);
- EXPECT_TRUE(utils::SplitPartitionName("/dev/ubiblock3_2", &disk, &part_num));
- EXPECT_EQ("/dev/ubiblock", disk);
- EXPECT_EQ(3, part_num);
-
EXPECT_TRUE(utils::SplitPartitionName("/dev/loop10", &disk, &part_num));
EXPECT_EQ("/dev/loop", disk);
EXPECT_EQ(10, part_num);
@@ -135,14 +131,6 @@
EXPECT_EQ("/dev/loop28", disk);
EXPECT_EQ(11, part_num);
- EXPECT_TRUE(utils::SplitPartitionName("/dev/loop10_0", &disk, &part_num));
- EXPECT_EQ("/dev/loop", disk);
- EXPECT_EQ(10, part_num);
-
- EXPECT_TRUE(utils::SplitPartitionName("/dev/loop28p11_0", &disk, &part_num));
- EXPECT_EQ("/dev/loop28", disk);
- EXPECT_EQ(11, part_num);
-
EXPECT_FALSE(utils::SplitPartitionName("/dev/mmcblk0p", &disk, &part_num));
EXPECT_FALSE(utils::SplitPartitionName("/dev/sda", &disk, &part_num));
EXPECT_FALSE(utils::SplitPartitionName("/dev/foo/bar", &disk, &part_num));
@@ -157,29 +145,6 @@
EXPECT_EQ("/dev/mmcblk0p2", utils::MakePartitionName("/dev/mmcblk0", 2));
EXPECT_EQ("/dev/loop8", utils::MakePartitionName("/dev/loop", 8));
EXPECT_EQ("/dev/loop12p2", utils::MakePartitionName("/dev/loop12", 2));
- EXPECT_EQ("/dev/ubi5_0", utils::MakePartitionName("/dev/ubiblock", 5));
- EXPECT_EQ("/dev/mtd4", utils::MakePartitionName("/dev/ubiblock", 4));
- EXPECT_EQ("/dev/ubi3_0", utils::MakePartitionName("/dev/ubiblock", 3));
- EXPECT_EQ("/dev/mtd2", utils::MakePartitionName("/dev/ubiblock", 2));
- EXPECT_EQ("/dev/ubi1_0", utils::MakePartitionName("/dev/ubiblock", 1));
-}
-
-TEST(UtilsTest, MakePartitionNameForMountTest) {
- EXPECT_EQ("/dev/sda4", utils::MakePartitionNameForMount("/dev/sda4"));
- EXPECT_EQ("/dev/sda123", utils::MakePartitionNameForMount("/dev/sda123"));
- EXPECT_EQ("/dev/mmcblk2", utils::MakePartitionNameForMount("/dev/mmcblk2"));
- EXPECT_EQ("/dev/mmcblk0p2",
- utils::MakePartitionNameForMount("/dev/mmcblk0p2"));
- EXPECT_EQ("/dev/loop0", utils::MakePartitionNameForMount("/dev/loop0"));
- EXPECT_EQ("/dev/loop8", utils::MakePartitionNameForMount("/dev/loop8"));
- EXPECT_EQ("/dev/loop12p2", utils::MakePartitionNameForMount("/dev/loop12p2"));
- EXPECT_EQ("/dev/ubiblock5_0",
- utils::MakePartitionNameForMount("/dev/ubiblock5_0"));
- EXPECT_EQ("/dev/mtd4", utils::MakePartitionNameForMount("/dev/ubi4_0"));
- EXPECT_EQ("/dev/ubiblock3_0",
- utils::MakePartitionNameForMount("/dev/ubiblock3"));
- EXPECT_EQ("/dev/mtd2", utils::MakePartitionNameForMount("/dev/ubi2"));
- EXPECT_EQ("/dev/ubi1_0", utils::MakePartitionNameForMount("/dev/ubiblock1"));
}
TEST(UtilsTest, FuzzIntTest) {
diff --git a/common_service.cc b/common_service.cc
index 0d5ee6d..85fb9e4 100644
--- a/common_service.cc
+++ b/common_service.cc
@@ -50,11 +50,7 @@
namespace {
// Log and set the error on the passed ErrorPtr.
void LogAndSetError(ErrorPtr* error,
-#if BASE_VER < 576279
- const tracked_objects::Location& location,
-#else
const base::Location& location,
-#endif
const string& reason) {
brillo::Error::AddTo(error,
location,
@@ -109,9 +105,8 @@
bool UpdateEngineService::AttemptInstall(brillo::ErrorPtr* error,
const string& omaha_url,
- const vector<string>& dlc_module_ids) {
- if (!system_state_->update_attempter()->CheckForInstall(dlc_module_ids,
- omaha_url)) {
+ const vector<string>& dlc_ids) {
+ if (!system_state_->update_attempter()->CheckForInstall(dlc_ids, omaha_url)) {
// TODO(xiaochu): support more detailed error messages.
LogAndSetError(error, FROM_HERE, "Could not schedule install operation.");
return false;
@@ -147,6 +142,17 @@
return true;
}
+bool UpdateEngineService::SetDlcActiveValue(brillo::ErrorPtr* error,
+ bool is_active,
+ const string& dlc_id) {
+ if (!system_state_->update_attempter()->SetDlcActiveValue(is_active,
+ dlc_id)) {
+ LogAndSetError(error, FROM_HERE, "SetDlcActiveValue failed.");
+ return false;
+ }
+ return true;
+}
+
bool UpdateEngineService::GetStatus(ErrorPtr* error,
UpdateEngineStatus* out_status) {
if (!system_state_->update_attempter()->GetStatus(out_status)) {
@@ -210,7 +216,7 @@
}
bool UpdateEngineService::SetCohortHint(ErrorPtr* error,
- string in_cohort_hint) {
+ const string& in_cohort_hint) {
PrefsInterface* prefs = system_state_->prefs();
// It is ok to override the cohort hint with an invalid value since it is
@@ -412,20 +418,4 @@
return true;
}
-bool UpdateEngineService::GetEolStatus(ErrorPtr* error,
- int32_t* out_eol_status) {
- PrefsInterface* prefs = system_state_->prefs();
-
- string str_eol_status;
- if (prefs->Exists(kPrefsOmahaEolStatus) &&
- !prefs->GetString(kPrefsOmahaEolStatus, &str_eol_status)) {
- LogAndSetError(error, FROM_HERE, "Error getting the end-of-life status.");
- return false;
- }
-
- // StringToEolStatus will return kSupported for invalid values.
- *out_eol_status = static_cast<int32_t>(StringToEolStatus(str_eol_status));
- return true;
-}
-
} // namespace chromeos_update_engine
diff --git a/common_service.h b/common_service.h
index f93855d..cfcece5 100644
--- a/common_service.h
+++ b/common_service.h
@@ -55,10 +55,10 @@
// Attempts a DLC module install operation.
// |omaha_url|: the URL to query for update.
- // |dlc_module_ids|: a list of DLC module IDs.
+ // |dlc_ids|: a list of DLC module IDs.
bool AttemptInstall(brillo::ErrorPtr* error,
const std::string& omaha_url,
- const std::vector<std::string>& dlc_module_ids);
+ const std::vector<std::string>& dlc_ids);
bool AttemptRollback(brillo::ErrorPtr* error, bool in_powerwash);
@@ -70,6 +70,13 @@
// update. This is used for development only.
bool ResetStatus(brillo::ErrorPtr* error);
+ // Sets the DLC as active or inactive. When set to active, the ping metadata
+ // for the DLC is updated accordingly. When set to inactive, the metadata
+ // for the DLC is deleted.
+ bool SetDlcActiveValue(brillo::ErrorPtr* error,
+ bool is_active,
+ const std::string& dlc_id);
+
// Returns the current status of the Update Engine. If an update is in
// progress, the number of operations, size to download and overall progress
// is reported.
@@ -102,7 +109,8 @@
// Sets the current "cohort hint" value to |in_cohort_hint|. The cohort hint
// is sent back to Omaha on every request and can be used as a hint of what
// cohort should we be put on.
- bool SetCohortHint(brillo::ErrorPtr* error, std::string in_cohort_hint);
+ bool SetCohortHint(brillo::ErrorPtr* error,
+ const std::string& in_cohort_hint);
// Return the current cohort hint. This value can be set with SetCohortHint()
// and can also be updated from Omaha on every update check request.
@@ -153,10 +161,6 @@
bool GetLastAttemptError(brillo::ErrorPtr* error,
int32_t* out_last_attempt_error);
- // Returns the current end-of-life status of the device. This value is updated
- // on every update check and persisted on disk across reboots.
- bool GetEolStatus(brillo::ErrorPtr* error, int32_t* out_eol_status);
-
private:
SystemState* system_state_;
};
diff --git a/common_service_unittest.cc b/common_service_unittest.cc
index 65202a0..3dc8a22 100644
--- a/common_service_unittest.cc
+++ b/common_service_unittest.cc
@@ -100,6 +100,20 @@
EXPECT_FALSE(common_service_.AttemptInstall(&error_, "", {}));
}
+TEST_F(UpdateEngineServiceTest, SetDlcActiveValue) {
+ EXPECT_CALL(*mock_update_attempter_, SetDlcActiveValue(_, _))
+ .WillOnce(Return(true));
+
+ EXPECT_TRUE(common_service_.SetDlcActiveValue(&error_, true, "dlc0"));
+}
+
+TEST_F(UpdateEngineServiceTest, SetDlcActiveValueReturnsFalse) {
+ EXPECT_CALL(*mock_update_attempter_, SetDlcActiveValue(_, _))
+ .WillOnce(Return(false));
+
+ EXPECT_FALSE(common_service_.SetDlcActiveValue(&error_, true, "dlc0"));
+}
+
// SetChannel is allowed when there's no device policy (the device is not
// enterprise enrolled).
TEST_F(UpdateEngineServiceTest, SetChannelWithNoPolicy) {
@@ -169,19 +183,4 @@
UpdateEngineService::kErrorFailed));
}
-TEST_F(UpdateEngineServiceTest, GetEolStatusTest) {
- FakePrefs fake_prefs;
- fake_system_state_.set_prefs(&fake_prefs);
- // The default value should be "supported".
- int32_t eol_status = static_cast<int32_t>(EolStatus::kEol);
- EXPECT_TRUE(common_service_.GetEolStatus(&error_, &eol_status));
- EXPECT_EQ(nullptr, error_);
- EXPECT_EQ(EolStatus::kSupported, static_cast<EolStatus>(eol_status));
-
- fake_prefs.SetString(kPrefsOmahaEolStatus, "security-only");
- EXPECT_TRUE(common_service_.GetEolStatus(&error_, &eol_status));
- EXPECT_EQ(nullptr, error_);
- EXPECT_EQ(EolStatus::kSecurityOnly, static_cast<EolStatus>(eol_status));
-}
-
} // namespace chromeos_update_engine
diff --git a/connection_manager.cc b/connection_manager.cc
index 7263a74..fe43f37 100644
--- a/connection_manager.cc
+++ b/connection_manager.cc
@@ -54,66 +54,58 @@
bool ConnectionManager::IsUpdateAllowedOver(
ConnectionType type, ConnectionTethering tethering) const {
- switch (type) {
- case ConnectionType::kBluetooth:
- return false;
-
- case ConnectionType::kCellular: {
- set<string> allowed_types;
-
- const policy::DevicePolicy* device_policy =
- system_state_->device_policy();
-
- // The device_policy is loaded in a lazy way before an update check. Load
- // it now from the libbrillo cache if it wasn't already loaded.
- if (!device_policy) {
- UpdateAttempter* update_attempter = system_state_->update_attempter();
- if (update_attempter) {
- update_attempter->RefreshDevicePolicy();
- device_policy = system_state_->device_policy();
- }
- }
-
- if (!device_policy) {
- // Device policy fails to be loaded (possibly due to guest account). We
- // do not check the local user setting here, which should be checked by
- // |OmahaRequestAction| during checking for update.
- LOG(INFO) << "Allowing updates over cellular as device policy "
- "fails to be loaded.";
- return true;
- }
-
- if (device_policy->GetAllowedConnectionTypesForUpdate(&allowed_types)) {
- // The update setting is enforced by the device policy.
-
- if (!base::ContainsKey(allowed_types, shill::kTypeCellular)) {
- LOG(INFO) << "Disabling updates over cellular connection as it's not "
- "allowed in the device policy.";
- return false;
- }
-
- LOG(INFO) << "Allowing updates over cellular per device policy.";
- return true;
- }
-
- // If there's no update setting in the device policy, we do not check
- // the local user setting here, which should be checked by
- // |OmahaRequestAction| during checking for update.
- LOG(INFO) << "Allowing updates over cellular as device policy does "
- "not include update setting.";
+ if (type != ConnectionType::kCellular) {
+ if (tethering != ConnectionTethering::kConfirmed) {
return true;
}
- default:
- if (tethering == ConnectionTethering::kConfirmed) {
- // Treat this connection as if it is a cellular connection.
- LOG(INFO) << "Current connection is confirmed tethered, using Cellular "
- "setting.";
- return IsUpdateAllowedOver(ConnectionType::kCellular,
- ConnectionTethering::kUnknown);
- }
- return true;
+ // Treat this connection as if it is a cellular connection.
+ LOG(INFO)
+ << "Current connection is confirmed tethered, using Cellular setting.";
}
+
+ const policy::DevicePolicy* device_policy = system_state_->device_policy();
+
+ // The device_policy is loaded in a lazy way before an update check. Load
+ // it now from the libbrillo cache if it wasn't already loaded.
+ if (!device_policy) {
+ UpdateAttempter* update_attempter = system_state_->update_attempter();
+ if (update_attempter) {
+ update_attempter->RefreshDevicePolicy();
+ device_policy = system_state_->device_policy();
+ }
+ }
+
+ if (!device_policy) {
+ // Device policy fails to be loaded (possibly due to guest account). We
+ // do not check the local user setting here, which should be checked by
+ // |OmahaRequestAction| during checking for update.
+ LOG(INFO) << "Allowing updates over cellular as device policy fails to be "
+ "loaded.";
+ return true;
+ }
+
+ set<string> allowed_types;
+ if (device_policy->GetAllowedConnectionTypesForUpdate(&allowed_types)) {
+ // The update setting is enforced by the device policy.
+
+ // TODO(crbug.com/1054279): Use base::Contains after uprev to r680000.
+ if (allowed_types.find(shill::kTypeCellular) == allowed_types.end()) {
+ LOG(INFO) << "Disabling updates over cellular connection as it's not "
+ "allowed in the device policy.";
+ return false;
+ }
+
+ LOG(INFO) << "Allowing updates over cellular per device policy.";
+ return true;
+ }
+
+ // If there's no update setting in the device policy, we do not check
+ // the local user setting here, which should be checked by
+ // |OmahaRequestAction| during checking for update.
+ LOG(INFO) << "Allowing updates over cellular as device policy does "
+ "not include update setting.";
+ return true;
}
bool ConnectionManager::IsAllowedConnectionTypesForUpdateSet() const {
diff --git a/connection_manager_unittest.cc b/connection_manager_unittest.cc
index 3cdaf4c..97436c9 100644
--- a/connection_manager_unittest.cc
+++ b/connection_manager_unittest.cc
@@ -184,9 +184,6 @@
TEST_F(ConnectionManagerTest, SimpleTest) {
TestWithServiceType(shill::kTypeEthernet, nullptr, ConnectionType::kEthernet);
TestWithServiceType(shill::kTypeWifi, nullptr, ConnectionType::kWifi);
- TestWithServiceType(shill::kTypeWimax, nullptr, ConnectionType::kWimax);
- TestWithServiceType(
- shill::kTypeBluetooth, nullptr, ConnectionType::kBluetooth);
TestWithServiceType(shill::kTypeCellular, nullptr, ConnectionType::kCellular);
}
@@ -195,8 +192,6 @@
TestWithServiceType(
shill::kTypeVPN, shill::kTypeVPN, ConnectionType::kUnknown);
TestWithServiceType(shill::kTypeVPN, shill::kTypeWifi, ConnectionType::kWifi);
- TestWithServiceType(
- shill::kTypeVPN, shill::kTypeWimax, ConnectionType::kWimax);
}
TEST_F(ConnectionManagerTest, TetheringTest) {
@@ -229,16 +224,6 @@
ConnectionTethering::kUnknown));
}
-TEST_F(ConnectionManagerTest, AllowUpdatesOverWimaxTest) {
- EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWimax,
- ConnectionTethering::kUnknown));
-}
-
-TEST_F(ConnectionManagerTest, BlockUpdatesOverBluetoothTest) {
- EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kBluetooth,
- ConnectionTethering::kUnknown));
-}
-
TEST_F(ConnectionManagerTest, AllowUpdatesOnlyOver3GPerPolicyTest) {
policy::MockDevicePolicy allow_3g_policy;
@@ -263,10 +248,9 @@
// This test tests multiple connection types being allowed, with
// 3G one among them. Only Cellular is currently enforced by the policy
- // setting, the others are ignored (see Bluetooth for example).
+ // setting.
set<string> allowed_set;
allowed_set.insert(StringForConnectionType(ConnectionType::kCellular));
- allowed_set.insert(StringForConnectionType(ConnectionType::kBluetooth));
EXPECT_CALL(allow_3g_policy, GetAllowedConnectionTypesForUpdate(_))
.Times(3)
@@ -280,10 +264,6 @@
ConnectionTethering::kUnknown));
EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWifi,
ConnectionTethering::kUnknown));
- EXPECT_TRUE(cmut_.IsUpdateAllowedOver(ConnectionType::kWimax,
- ConnectionTethering::kUnknown));
- EXPECT_FALSE(cmut_.IsUpdateAllowedOver(ConnectionType::kBluetooth,
- ConnectionTethering::kUnknown));
// Tethered networks are treated in the same way as Cellular networks and
// thus allowed.
@@ -325,7 +305,6 @@
set<string> allowed_set;
allowed_set.insert(StringForConnectionType(ConnectionType::kEthernet));
allowed_set.insert(StringForConnectionType(ConnectionType::kWifi));
- allowed_set.insert(StringForConnectionType(ConnectionType::kWimax));
EXPECT_CALL(block_3g_policy, GetAllowedConnectionTypesForUpdate(_))
.Times(1)
@@ -363,10 +342,6 @@
StringForConnectionType(ConnectionType::kEthernet));
EXPECT_STREQ(shill::kTypeWifi,
StringForConnectionType(ConnectionType::kWifi));
- EXPECT_STREQ(shill::kTypeWimax,
- StringForConnectionType(ConnectionType::kWimax));
- EXPECT_STREQ(shill::kTypeBluetooth,
- StringForConnectionType(ConnectionType::kBluetooth));
EXPECT_STREQ(shill::kTypeCellular,
StringForConnectionType(ConnectionType::kCellular));
EXPECT_STREQ("Unknown", StringForConnectionType(ConnectionType::kUnknown));
diff --git a/connection_utils.cc b/connection_utils.cc
index aeb0163..5af7341 100644
--- a/connection_utils.cc
+++ b/connection_utils.cc
@@ -32,10 +32,6 @@
return ConnectionType::kEthernet;
} else if (type_str == shill::kTypeWifi) {
return ConnectionType::kWifi;
- } else if (type_str == shill::kTypeWimax) {
- return ConnectionType::kWimax;
- } else if (type_str == shill::kTypeBluetooth) {
- return ConnectionType::kBluetooth;
} else if (type_str == shill::kTypeCellular) {
return ConnectionType::kCellular;
} else if (type_str == kTypeDisconnected) {
@@ -61,10 +57,6 @@
return shill::kTypeEthernet;
case ConnectionType::kWifi:
return shill::kTypeWifi;
- case ConnectionType::kWimax:
- return shill::kTypeWimax;
- case ConnectionType::kBluetooth:
- return shill::kTypeBluetooth;
case ConnectionType::kCellular:
return shill::kTypeCellular;
case ConnectionType::kDisconnected:
diff --git a/connection_utils.h b/connection_utils.h
index d5133a1..4e71fcf 100644
--- a/connection_utils.h
+++ b/connection_utils.h
@@ -25,8 +25,6 @@
kDisconnected,
kEthernet,
kWifi,
- kWimax,
- kBluetooth,
kCellular,
kUnknown
};
diff --git a/daemon.cc b/daemon.cc
deleted file mode 100644
index d42344a..0000000
--- a/daemon.cc
+++ /dev/null
@@ -1,117 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/daemon.h"
-
-#include <sysexits.h>
-
-#include <base/bind.h>
-#include <base/location.h>
-#if USE_BINDER
-#include <binderwrapper/binder_wrapper.h>
-#endif // USE_BINDER
-
-#if USE_OMAHA
-#include "update_engine/real_system_state.h"
-#else // !USE_OMAHA
-#include "update_engine/daemon_state_android.h"
-#endif // USE_OMAHA
-
-namespace chromeos_update_engine {
-
-int UpdateEngineDaemon::OnInit() {
- // Register the |subprocess_| singleton with this Daemon as the signal
- // handler.
- subprocess_.Init(this);
-
- int exit_code = Daemon::OnInit();
- if (exit_code != EX_OK)
- return exit_code;
-
-#if USE_BINDER
- android::BinderWrapper::Create();
- binder_watcher_.Init();
-#endif // USE_BINDER
-
-#if USE_OMAHA
- // Initialize update engine global state but continue if something fails.
- // TODO(deymo): Move the daemon_state_ initialization to a factory method
- // avoiding the explicit re-usage of the |bus| instance, shared between
- // D-Bus service and D-Bus client calls.
- RealSystemState* real_system_state = new RealSystemState();
- daemon_state_.reset(real_system_state);
- LOG_IF(ERROR, !real_system_state->Initialize())
- << "Failed to initialize system state.";
-#else // !USE_OMAHA
- DaemonStateAndroid* daemon_state_android = new DaemonStateAndroid();
- daemon_state_.reset(daemon_state_android);
- LOG_IF(ERROR, !daemon_state_android->Initialize())
- << "Failed to initialize system state.";
-#endif // USE_OMAHA
-
-#if USE_BINDER
- // Create the Binder Service.
-#if USE_OMAHA
- binder_service_ = new BinderUpdateEngineBrilloService{real_system_state};
-#else // !USE_OMAHA
- binder_service_ = new BinderUpdateEngineAndroidService{
- daemon_state_android->service_delegate()};
-#endif // USE_OMAHA
- auto binder_wrapper = android::BinderWrapper::Get();
- if (!binder_wrapper->RegisterService(binder_service_->ServiceName(),
- binder_service_)) {
- LOG(ERROR) << "Failed to register binder service.";
- }
-
- daemon_state_->AddObserver(binder_service_.get());
-#endif // USE_BINDER
-
-#if USE_DBUS
- // Create the DBus service.
- dbus_adaptor_.reset(new UpdateEngineAdaptor(real_system_state));
- daemon_state_->AddObserver(dbus_adaptor_.get());
-
- dbus_adaptor_->RegisterAsync(base::Bind(&UpdateEngineDaemon::OnDBusRegistered,
- base::Unretained(this)));
- LOG(INFO) << "Waiting for DBus object to be registered.";
-#else // !USE_DBUS
- daemon_state_->StartUpdater();
-#endif // USE_DBUS
- return EX_OK;
-}
-
-#if USE_DBUS
-void UpdateEngineDaemon::OnDBusRegistered(bool succeeded) {
- if (!succeeded) {
- LOG(ERROR) << "Registering the UpdateEngineAdaptor";
- QuitWithExitCode(1);
- return;
- }
-
- // Take ownership of the service now that everything is initialized. We need
- // to this now and not before to avoid exposing a well known DBus service
- // path that doesn't have the service it is supposed to implement.
- if (!dbus_adaptor_->RequestOwnership()) {
- LOG(ERROR) << "Unable to take ownership of the DBus service, is there "
- << "other update_engine daemon running?";
- QuitWithExitCode(1);
- return;
- }
- daemon_state_->StartUpdater();
-}
-#endif // USE_DBUS
-
-} // namespace chromeos_update_engine
diff --git a/daemon_android.cc b/daemon_android.cc
new file mode 100644
index 0000000..1aa921f
--- /dev/null
+++ b/daemon_android.cc
@@ -0,0 +1,64 @@
+//
+// Copyright (C) 2015 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/daemon_android.h"
+
+#include <sysexits.h>
+
+#include <binderwrapper/binder_wrapper.h>
+
+#include "update_engine/daemon_state_android.h"
+
+using std::unique_ptr;
+
+namespace chromeos_update_engine {
+
+unique_ptr<DaemonBase> DaemonBase::CreateInstance() {
+ return std::make_unique<DaemonAndroid>();
+}
+
+int DaemonAndroid::OnInit() {
+ // Register the |subprocess_| singleton with this Daemon as the signal
+ // handler.
+ subprocess_.Init(this);
+
+ int exit_code = brillo::Daemon::OnInit();
+ if (exit_code != EX_OK)
+ return exit_code;
+
+ android::BinderWrapper::Create();
+ binder_watcher_.Init();
+
+ DaemonStateAndroid* daemon_state_android = new DaemonStateAndroid();
+ daemon_state_.reset(daemon_state_android);
+ LOG_IF(ERROR, !daemon_state_android->Initialize())
+ << "Failed to initialize system state.";
+
+ // Create the Binder Service.
+ binder_service_ = new BinderUpdateEngineAndroidService{
+ daemon_state_android->service_delegate()};
+ auto binder_wrapper = android::BinderWrapper::Get();
+ if (!binder_wrapper->RegisterService(binder_service_->ServiceName(),
+ binder_service_)) {
+ LOG(ERROR) << "Failed to register binder service.";
+ }
+
+ daemon_state_->AddObserver(binder_service_.get());
+ daemon_state_->StartUpdater();
+ return EX_OK;
+}
+
+} // namespace chromeos_update_engine
diff --git a/daemon_android.h b/daemon_android.h
new file mode 100644
index 0000000..baead37
--- /dev/null
+++ b/daemon_android.h
@@ -0,0 +1,56 @@
+//
+// Copyright (C) 2015 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_DAEMON_ANDROID_H_
+#define UPDATE_ENGINE_DAEMON_ANDROID_H_
+
+#include <memory>
+
+#include <brillo/binder_watcher.h>
+
+#include "update_engine/binder_service_android.h"
+#include "update_engine/common/subprocess.h"
+#include "update_engine/daemon_base.h"
+#include "update_engine/daemon_state_interface.h"
+
+namespace chromeos_update_engine {
+
+class DaemonAndroid : public DaemonBase {
+ public:
+ DaemonAndroid() = default;
+
+ protected:
+ int OnInit() override;
+
+ private:
+ // The Subprocess singleton class requires a |brillo::MessageLoop| in the
+ // current thread, so we need to initialize it from this class instead of
+ // the main() function.
+ Subprocess subprocess_;
+
+ brillo::BinderWatcher binder_watcher_;
+ android::sp<BinderUpdateEngineAndroidService> binder_service_;
+
+ // The daemon state with all the required daemon classes for the configured
+ // platform.
+ std::unique_ptr<DaemonStateInterface> daemon_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(DaemonAndroid);
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_DAEMON_ANDROID_H_
diff --git a/daemon_base.h b/daemon_base.h
new file mode 100644
index 0000000..742a0ba
--- /dev/null
+++ b/daemon_base.h
@@ -0,0 +1,40 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_DAEMON_BASE_H_
+#define UPDATE_ENGINE_DAEMON_BASE_H_
+
+#include <memory>
+
+#include <brillo/daemons/daemon.h>
+
+namespace chromeos_update_engine {
+
+class DaemonBase : public brillo::Daemon {
+ public:
+ DaemonBase() = default;
+ virtual ~DaemonBase() = default;
+
+ // Creates an instance of the daemon.
+ static std::unique_ptr<DaemonBase> CreateInstance();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DaemonBase);
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_DAEMON_BASE_H_
diff --git a/daemon_chromeos.cc b/daemon_chromeos.cc
new file mode 100644
index 0000000..21740d8
--- /dev/null
+++ b/daemon_chromeos.cc
@@ -0,0 +1,82 @@
+//
+// Copyright (C) 2015 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/daemon_chromeos.h"
+
+#include <sysexits.h>
+
+#include <base/bind.h>
+#include <base/location.h>
+
+#include "update_engine/real_system_state.h"
+
+using brillo::Daemon;
+using std::unique_ptr;
+
+namespace chromeos_update_engine {
+
+unique_ptr<DaemonBase> DaemonBase::CreateInstance() {
+ return std::make_unique<DaemonChromeOS>();
+}
+
+int DaemonChromeOS::OnInit() {
+ // Register the |subprocess_| singleton with this Daemon as the signal
+ // handler.
+ subprocess_.Init(this);
+
+ int exit_code = Daemon::OnInit();
+ if (exit_code != EX_OK)
+ return exit_code;
+
+ // Initialize update engine global state but continue if something fails.
+ // TODO(deymo): Move the daemon_state_ initialization to a factory method
+ // avoiding the explicit re-usage of the |bus| instance, shared between
+ // D-Bus service and D-Bus client calls.
+ RealSystemState* real_system_state = new RealSystemState();
+ daemon_state_.reset(real_system_state);
+ LOG_IF(ERROR, !real_system_state->Initialize())
+ << "Failed to initialize system state.";
+
+ // Create the DBus service.
+ dbus_adaptor_.reset(new UpdateEngineAdaptor(real_system_state));
+ daemon_state_->AddObserver(dbus_adaptor_.get());
+
+ dbus_adaptor_->RegisterAsync(
+ base::Bind(&DaemonChromeOS::OnDBusRegistered, base::Unretained(this)));
+ LOG(INFO) << "Waiting for DBus object to be registered.";
+ return EX_OK;
+}
+
+void DaemonChromeOS::OnDBusRegistered(bool succeeded) {
+ if (!succeeded) {
+ LOG(ERROR) << "Registering the UpdateEngineAdaptor";
+ QuitWithExitCode(1);
+ return;
+ }
+
+ // Take ownership of the service now that everything is initialized. We need
+ // to this now and not before to avoid exposing a well known DBus service
+ // path that doesn't have the service it is supposed to implement.
+ if (!dbus_adaptor_->RequestOwnership()) {
+ LOG(ERROR) << "Unable to take ownership of the DBus service, is there "
+ << "other update_engine daemon running?";
+ QuitWithExitCode(1);
+ return;
+ }
+ daemon_state_->StartUpdater();
+}
+
+} // namespace chromeos_update_engine
diff --git a/daemon.h b/daemon_chromeos.h
similarity index 63%
rename from daemon.h
rename to daemon_chromeos.h
index c10bb28..657e797 100644
--- a/daemon.h
+++ b/daemon_chromeos.h
@@ -14,41 +14,26 @@
// limitations under the License.
//
-#ifndef UPDATE_ENGINE_DAEMON_H_
-#define UPDATE_ENGINE_DAEMON_H_
+#ifndef UPDATE_ENGINE_DAEMON_CHROMEOS_H_
+#define UPDATE_ENGINE_DAEMON_CHROMEOS_H_
#include <memory>
-#include <string>
-#if USE_BINDER
-#include <brillo/binder_watcher.h>
-#endif // USE_BINDER
-#include <brillo/daemons/daemon.h>
-
-#if USE_BINDER
-#if USE_OMAHA
-#include "update_engine/binder_service_brillo.h"
-#else // !USE_OMAHA
-#include "update_engine/binder_service_android.h"
-#endif // USE_OMAHA
-#endif // USE_BINDER
#include "update_engine/common/subprocess.h"
+#include "update_engine/daemon_base.h"
#include "update_engine/daemon_state_interface.h"
-#if USE_DBUS
#include "update_engine/dbus_service.h"
-#endif // USE_DBUS
namespace chromeos_update_engine {
-class UpdateEngineDaemon : public brillo::Daemon {
+class DaemonChromeOS : public DaemonBase {
public:
- UpdateEngineDaemon() = default;
+ DaemonChromeOS() = default;
protected:
int OnInit() override;
private:
-#if USE_DBUS
// Run from the main loop when the |dbus_adaptor_| object is registered. At
// this point we can request ownership of the DBus service name and continue
// initialization.
@@ -56,32 +41,19 @@
// Main D-Bus service adaptor.
std::unique_ptr<UpdateEngineAdaptor> dbus_adaptor_;
-#endif // USE_DBUS
// The Subprocess singleton class requires a brillo::MessageLoop in the
// current thread, so we need to initialize it from this class instead of
// the main() function.
Subprocess subprocess_;
-#if USE_BINDER
- brillo::BinderWatcher binder_watcher_;
-#endif // USE_BINDER
-
-#if USE_BINDER
-#if USE_OMAHA
- android::sp<BinderUpdateEngineBrilloService> binder_service_;
-#else // !USE_OMAHA
- android::sp<BinderUpdateEngineAndroidService> binder_service_;
-#endif // USE_OMAHA
-#endif // USE_BINDER
-
// The daemon state with all the required daemon classes for the configured
// platform.
std::unique_ptr<DaemonStateInterface> daemon_state_;
- DISALLOW_COPY_AND_ASSIGN(UpdateEngineDaemon);
+ DISALLOW_COPY_AND_ASSIGN(DaemonChromeOS);
};
} // namespace chromeos_update_engine
-#endif // UPDATE_ENGINE_DAEMON_H_
+#endif // UPDATE_ENGINE_DAEMON_CHROMEOS_H_
diff --git a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml
index f81d4ed..ac2f021 100644
--- a/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml
+++ b/dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml
@@ -1,4 +1,19 @@
<?xml version="1.0" encoding="utf-8" ?>
+<!--
+ Copyright (C) 2019 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+!-->
<node name="/org/chromium/UpdateEngine">
<interface name="org.chromium.UpdateEngineInterface">
<annotation name="org.freedesktop.DBus.GLib.CSymbol"
@@ -20,7 +35,12 @@
<arg type="i" name="flags" direction="in" />
</method>
<method name="AttemptInstall">
- <arg type="s" name="dlc_request" direction="in" />
+ <arg type="s" name="omaha_url" direction="in" />
+ <arg type="as" name="dlc_ids" direction="in">
+ <tp:docstring>
+ The list of DLC IDs that needs to be installed.
+ </tp:docstring>
+ </arg>
</method>
<method name="AttemptRollback">
<arg type="b" name="powerwash" direction="in" />
@@ -30,12 +50,26 @@
</method>
<method name="ResetStatus">
</method>
- <method name="GetStatus">
- <arg type="x" name="last_checked_time" direction="out" />
- <arg type="d" name="progress" direction="out" />
- <arg type="s" name="current_operation" direction="out" />
- <arg type="s" name="new_version" direction="out" />
- <arg type="x" name="new_size" direction="out" />
+ <method name="SetDlcActiveValue">
+ <arg type="b" name="is_active" direction="in">
+ <tp:docstring>
+ If the DLC is being set to active or inactive.
+ </tp:docstring>
+ </arg>
+ <arg type="s" name="dlc_id" direction="in">
+ <tp:docstring>
+ The ID of the DLC module that will be set to active/inactive.
+ </tp:docstring>
+ </arg>
+ </method>
+ <method name="GetStatusAdvanced">
+ <arg type="ay" name="status" direction="out">
+ <tp:docstring>
+ The current status serialized in a protobuf.
+ </tp:docstring>
+ <annotation name="org.chromium.DBus.Argument.ProtobufClass"
+ value="update_engine::StatusResult"/>
+ </arg>
</method>
<method name="RebootIfNeeded">
</method>
@@ -80,12 +114,14 @@
<method name="GetDurationSinceUpdate">
<arg type="x" name="usec_wallclock" direction="out" />
</method>
- <signal name="StatusUpdate">
- <arg type="x" name="last_checked_time" />
- <arg type="d" name="progress" />
- <arg type="s" name="current_operation" />
- <arg type="s" name="new_version" />
- <arg type="x" name="new_size" />
+ <signal name="StatusUpdateAdvanced">
+ <arg type="ay" name="status" direction="out">
+ <tp:docstring>
+ The current status serialized in a protobuf.
+ </tp:docstring>
+ <annotation name="org.chromium.DBus.Argument.ProtobufClass"
+ value="update_engine::StatusResult"/>
+ </arg>
</signal>
<method name="GetPrevVersion">
<arg type="s" name="prev_version" direction="out" />
@@ -96,8 +132,5 @@
<method name="GetLastAttemptError">
<arg type="i" name="last_attempt_error" direction="out" />
</method>
- <method name="GetEolStatus">
- <arg type="i" name="eol_status" direction="out" />
- </method>
</interface>
</node>
diff --git a/dbus_service.cc b/dbus_service.cc
index 7296053..a282d1e 100644
--- a/dbus_service.cc
+++ b/dbus_service.cc
@@ -20,9 +20,9 @@
#include <vector>
#include <update_engine/dbus-constants.h>
-#include <update_engine/proto_bindings/update_engine.pb.h>
#include "update_engine/dbus_connection.h"
+#include "update_engine/proto_bindings/update_engine.pb.h"
#include "update_engine/update_status_utils.h"
namespace chromeos_update_engine {
@@ -31,8 +31,27 @@
using chromeos_update_engine::UpdateEngineService;
using std::string;
using std::vector;
+using update_engine::Operation;
+using update_engine::StatusResult;
using update_engine::UpdateEngineStatus;
+namespace {
+// Converts the internal |UpdateEngineStatus| to the protobuf |StatusResult|.
+void ConvertToStatusResult(const UpdateEngineStatus& ue_status,
+ StatusResult* out_status) {
+ out_status->set_last_checked_time(ue_status.last_checked_time);
+ out_status->set_progress(ue_status.progress);
+ out_status->set_current_operation(static_cast<Operation>(ue_status.status));
+ out_status->set_new_version(ue_status.new_version);
+ out_status->set_new_size(ue_status.new_size_bytes);
+ out_status->set_is_enterprise_rollback(ue_status.is_enterprise_rollback);
+ out_status->set_is_install(ue_status.is_install);
+ out_status->set_eol_date(ue_status.eol_date);
+ out_status->set_will_powerwash_after_reboot(
+ ue_status.will_powerwash_after_reboot);
+}
+} // namespace
+
DBusUpdateEngineService::DBusUpdateEngineService(SystemState* system_state)
: common_(new UpdateEngineService{system_state}) {}
@@ -63,26 +82,9 @@
}
bool DBusUpdateEngineService::AttemptInstall(ErrorPtr* error,
- const string& dlc_request) {
- // Parse the raw parameters into protobuf.
- DlcParameters dlc_parameters;
- if (!dlc_parameters.ParseFromString(dlc_request)) {
- *error = brillo::Error::Create(
- FROM_HERE, "update_engine", "INTERNAL", "parameters are invalid.");
- return false;
- }
- // Extract fields from the protobuf.
- vector<string> dlc_module_ids;
- for (const auto& dlc_info : dlc_parameters.dlc_infos()) {
- if (dlc_info.dlc_id().empty()) {
- *error = brillo::Error::Create(
- FROM_HERE, "update_engine", "INTERNAL", "parameters are invalid.");
- return false;
- }
- dlc_module_ids.push_back(dlc_info.dlc_id());
- }
- return common_->AttemptInstall(
- error, dlc_parameters.omaha_url(), dlc_module_ids);
+ const string& in_omaha_url,
+ const vector<string>& dlc_ids) {
+ return common_->AttemptInstall(error, in_omaha_url, dlc_ids);
}
bool DBusUpdateEngineService::AttemptRollback(ErrorPtr* error,
@@ -99,21 +101,20 @@
return common_->ResetStatus(error);
}
-bool DBusUpdateEngineService::GetStatus(ErrorPtr* error,
- int64_t* out_last_checked_time,
- double* out_progress,
- string* out_current_operation,
- string* out_new_version,
- int64_t* out_new_size) {
+bool DBusUpdateEngineService::SetDlcActiveValue(brillo::ErrorPtr* error,
+ bool is_active,
+ const string& dlc_id) {
+ return common_->SetDlcActiveValue(error, is_active, dlc_id);
+}
+
+bool DBusUpdateEngineService::GetStatusAdvanced(ErrorPtr* error,
+ StatusResult* out_status) {
UpdateEngineStatus status;
if (!common_->GetStatus(error, &status)) {
return false;
}
- *out_last_checked_time = status.last_checked_time;
- *out_progress = status.progress;
- *out_current_operation = UpdateStatusToString(status.status);
- *out_new_version = status.new_version;
- *out_new_size = status.new_size_bytes;
+
+ ConvertToStatusResult(status, out_status);
return true;
}
@@ -191,11 +192,6 @@
return common_->GetLastAttemptError(error, out_last_attempt_error);
}
-bool DBusUpdateEngineService::GetEolStatus(ErrorPtr* error,
- int32_t* out_eol_status) {
- return common_->GetEolStatus(error, out_eol_status);
-}
-
UpdateEngineAdaptor::UpdateEngineAdaptor(SystemState* system_state)
: org::chromium::UpdateEngineInterfaceAdaptor(&dbus_service_),
bus_(DBusConnection::Get()->GetDBus()),
@@ -217,11 +213,11 @@
void UpdateEngineAdaptor::SendStatusUpdate(
const UpdateEngineStatus& update_engine_status) {
- SendStatusUpdateSignal(update_engine_status.last_checked_time,
- update_engine_status.progress,
- UpdateStatusToString(update_engine_status.status),
- update_engine_status.new_version,
- update_engine_status.new_size_bytes);
+ StatusResult status;
+ ConvertToStatusResult(update_engine_status, &status);
+
+ // Send |StatusUpdateAdvanced| signal.
+ SendStatusUpdateAdvancedSignal(status);
}
} // namespace chromeos_update_engine
diff --git a/dbus_service.h b/dbus_service.h
index 134461b..873909e 100644
--- a/dbus_service.h
+++ b/dbus_service.h
@@ -21,9 +21,11 @@
#include <memory>
#include <string>
+#include <vector>
#include <base/memory/ref_counted.h>
#include <brillo/errors/error.h>
+#include <update_engine/proto_bindings/update_engine.pb.h>
#include "update_engine/common_service.h"
#include "update_engine/service_observer_interface.h"
@@ -50,7 +52,8 @@
int32_t in_flags_as_int) override;
bool AttemptInstall(brillo::ErrorPtr* error,
- const std::string& dlc_request) override;
+ const std::string& in_omaha_url,
+ const std::vector<std::string>& dlc_ids) override;
bool AttemptRollback(brillo::ErrorPtr* error, bool in_powerwash) override;
@@ -62,15 +65,17 @@
// update. This is used for development only.
bool ResetStatus(brillo::ErrorPtr* error) override;
- // Returns the current status of the Update Engine. If an update is in
- // progress, the number of operations, size to download and overall progress
- // is reported.
- bool GetStatus(brillo::ErrorPtr* error,
- int64_t* out_last_checked_time,
- double* out_progress,
- std::string* out_current_operation,
- std::string* out_new_version,
- int64_t* out_new_size) override;
+ // Sets the DLC as active or inactive. When set to active, the ping metadata
+ // for the DLC is updated accordingly. When set to inactive, the metadata
+ // for the DLC is deleted.
+ bool SetDlcActiveValue(brillo::ErrorPtr* error,
+ bool is_active,
+ const std::string& dlc_id) override;
+
+ // Similar to Above, but returns a protobuffer instead. In the future it will
+ // have more features and is easily extendable.
+ bool GetStatusAdvanced(brillo::ErrorPtr* error,
+ update_engine::StatusResult* out_status) override;
// Reboots the device if an update is applied and a reboot is required.
bool RebootIfNeeded(brillo::ErrorPtr* error) override;
@@ -150,9 +155,6 @@
bool GetLastAttemptError(brillo::ErrorPtr* error,
int32_t* out_last_attempt_error) override;
- // Returns the current end-of-life status of the device in |out_eol_status|.
- bool GetEolStatus(brillo::ErrorPtr* error, int32_t* out_eol_status) override;
-
private:
std::unique_ptr<UpdateEngineService> common_;
};
diff --git a/dbus_test_utils.h b/dbus_test_utils.h
index b3748ce..72fd4e0 100644
--- a/dbus_test_utils.h
+++ b/dbus_test_utils.h
@@ -17,8 +17,10 @@
#ifndef UPDATE_ENGINE_DBUS_TEST_UTILS_H_
#define UPDATE_ENGINE_DBUS_TEST_UTILS_H_
+#include <memory>
#include <set>
#include <string>
+#include <utility>
#include <base/bind.h>
#include <brillo/message_loops/message_loop.h>
@@ -27,13 +29,13 @@
namespace chromeos_update_engine {
namespace dbus_test_utils {
-#define MOCK_SIGNAL_HANDLER_EXPECT_SIGNAL_HANDLER( \
- mock_signal_handler, mock_proxy, signal) \
- do { \
- EXPECT_CALL((mock_proxy), \
- Register##signal##SignalHandler(::testing::_, ::testing::_)) \
- .WillOnce(::chromeos_update_engine::dbus_test_utils::GrabCallbacks( \
- &(mock_signal_handler))); \
+#define MOCK_SIGNAL_HANDLER_EXPECT_SIGNAL_HANDLER( \
+ mock_signal_handler, mock_proxy, signal) \
+ do { \
+ EXPECT_CALL((mock_proxy), \
+ DoRegister##signal##SignalHandler(::testing::_, ::testing::_)) \
+ .WillOnce(::chromeos_update_engine::dbus_test_utils::GrabCallbacks( \
+ &(mock_signal_handler))); \
} while (false)
template <typename T>
@@ -52,10 +54,10 @@
void GrabCallbacks(
const base::Callback<T>& signal_callback,
- dbus::ObjectProxy::OnConnectedCallback on_connected_callback) {
+ dbus::ObjectProxy::OnConnectedCallback* on_connected_callback) {
signal_callback_.reset(new base::Callback<T>(signal_callback));
- on_connected_callback_.reset(
- new dbus::ObjectProxy::OnConnectedCallback(on_connected_callback));
+ on_connected_callback_.reset(new dbus::ObjectProxy::OnConnectedCallback(
+ std::move(*on_connected_callback)));
// Notify from the main loop that the callback was connected.
callback_connected_task_ = brillo::MessageLoop::current()->PostTask(
FROM_HERE,
@@ -66,7 +68,7 @@
private:
void OnCallbackConnected() {
callback_connected_task_ = brillo::MessageLoop::kTaskIdNull;
- on_connected_callback_->Run("", "", true);
+ std::move(*on_connected_callback_).Run("", "", true);
}
brillo::MessageLoop::TaskId callback_connected_task_{
diff --git a/dlcservice_chromeos.cc b/dlcservice_chromeos.cc
index e95f08f..08482ee 100644
--- a/dlcservice_chromeos.cc
+++ b/dlcservice_chromeos.cc
@@ -16,8 +16,10 @@
#include "update_engine/dlcservice_chromeos.h"
-#include <dlcservice/dbus-proxies.h>
+#include <brillo/errors/error.h>
#include <dlcservice/proto_bindings/dlcservice.pb.h>
+// NOLINTNEXTLINE(build/include_alpha) "dbus-proxies.h" needs "dlcservice.pb.h"
+#include <dlcservice/dbus-proxies.h>
#include "update_engine/dbus_connection.h"
@@ -26,27 +28,48 @@
namespace chromeos_update_engine {
+namespace {
+org::chromium::DlcServiceInterfaceProxy GetDlcServiceProxy() {
+ return {DBusConnection::Get()->GetDBus()};
+}
+} // namespace
+
std::unique_ptr<DlcServiceInterface> CreateDlcService() {
return std::make_unique<DlcServiceChromeOS>();
}
-bool DlcServiceChromeOS::GetInstalled(vector<string>* dlc_module_ids) {
- if (!dlc_module_ids)
+bool DlcServiceChromeOS::GetDlcsToUpdate(vector<string>* dlc_ids) {
+ if (!dlc_ids)
return false;
- org::chromium::DlcServiceInterfaceProxy dlcservice_proxy(
- DBusConnection::Get()->GetDBus());
- string dlc_module_list_str;
- if (!dlcservice_proxy.GetInstalled(&dlc_module_list_str, nullptr)) {
- LOG(ERROR) << "dlcservice does not return installed DLC module list.";
+ dlc_ids->clear();
+
+ brillo::ErrorPtr err;
+ if (!GetDlcServiceProxy().GetDlcsToUpdate(dlc_ids, &err)) {
+ LOG(ERROR) << "dlcservice failed to return DLCs that need to be updated. "
+ << "ErrorCode=" << err->GetCode()
+ << ", ErrMsg=" << err->GetMessage();
+ dlc_ids->clear();
return false;
}
- dlcservice::DlcModuleList dlc_module_list;
- if (!dlc_module_list.ParseFromString(dlc_module_list_str)) {
- LOG(ERROR) << "Errors parsing DlcModuleList protobuf.";
+ return true;
+}
+
+bool DlcServiceChromeOS::InstallCompleted(const vector<string>& dlc_ids) {
+ brillo::ErrorPtr err;
+ if (!GetDlcServiceProxy().InstallCompleted(dlc_ids, &err)) {
+ LOG(ERROR) << "dlcservice failed to complete install. ErrCode="
+ << err->GetCode() << ", ErrMsg=" << err->GetMessage();
return false;
}
- for (const auto& dlc_module_info : dlc_module_list.dlc_module_infos()) {
- dlc_module_ids->emplace_back(dlc_module_info.dlc_id());
+ return true;
+}
+
+bool DlcServiceChromeOS::UpdateCompleted(const vector<string>& dlc_ids) {
+ brillo::ErrorPtr err;
+ if (!GetDlcServiceProxy().UpdateCompleted(dlc_ids, &err)) {
+ LOG(ERROR) << "dlcservice failed to complete updated. ErrCode="
+ << err->GetCode() << ", ErrMsg=" << err->GetMessage();
+ return false;
}
return true;
}
diff --git a/dlcservice_chromeos.h b/dlcservice_chromeos.h
index 8d103c1..8828e1a 100644
--- a/dlcservice_chromeos.h
+++ b/dlcservice_chromeos.h
@@ -32,8 +32,19 @@
DlcServiceChromeOS() = default;
~DlcServiceChromeOS() = default;
- // BootControlInterface overrides.
- bool GetInstalled(std::vector<std::string>* dlc_module_ids) override;
+ // DlcServiceInterface overrides.
+
+ // Will clear the |dlc_ids|, passed to be modified. Clearing by default has
+ // the added benefit of avoiding indeterminate behavior in the case that
+ // |dlc_ids| wasn't empty to begin which would lead to possible duplicates and
+ // cases when error was not checked it's still safe.
+ bool GetDlcsToUpdate(std::vector<std::string>* dlc_ids) override;
+
+ // Call into dlcservice for it to mark the DLC IDs as being installed.
+ bool InstallCompleted(const std::vector<std::string>& dlc_ids) override;
+
+ // Call into dlcservice for it to mark the DLC IDs as being updated.
+ bool UpdateCompleted(const std::vector<std::string>& dlc_ids) override;
private:
DISALLOW_COPY_AND_ASSIGN(DlcServiceChromeOS);
diff --git a/excluder_chromeos.cc b/excluder_chromeos.cc
new file mode 100644
index 0000000..bfd6f04
--- /dev/null
+++ b/excluder_chromeos.cc
@@ -0,0 +1,63 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/excluder_chromeos.h"
+
+#include <memory>
+#include <vector>
+
+#include <base/logging.h>
+#include <base/strings/string_piece.h>
+#include <base/strings/string_number_conversions.h>
+#include <base/strings/string_split.h>
+
+#include "update_engine/common/constants.h"
+#include "update_engine/common/prefs.h"
+#include "update_engine/system_state.h"
+
+using std::string;
+using std::vector;
+
+namespace chromeos_update_engine {
+
+std::unique_ptr<ExcluderInterface> CreateExcluder(PrefsInterface* prefs) {
+ return std::make_unique<ExcluderChromeOS>(prefs);
+}
+
+ExcluderChromeOS::ExcluderChromeOS(PrefsInterface* prefs) : prefs_(prefs) {}
+
+bool ExcluderChromeOS::Exclude(const string& name) {
+ auto key = prefs_->CreateSubKey({kExclusionPrefsSubDir, name});
+ return prefs_->SetString(key, "");
+}
+
+bool ExcluderChromeOS::IsExcluded(const string& name) {
+ auto key = prefs_->CreateSubKey({kExclusionPrefsSubDir, name});
+ return prefs_->Exists(key);
+}
+
+bool ExcluderChromeOS::Reset() {
+ bool ret = true;
+ vector<string> keys;
+ if (!prefs_->GetSubKeys(kExclusionPrefsSubDir, &keys))
+ return false;
+ for (const auto& key : keys)
+ if (!(ret &= prefs_->Delete(key)))
+ LOG(ERROR) << "Failed to delete exclusion pref for " << key;
+ return ret;
+}
+
+} // namespace chromeos_update_engine
diff --git a/excluder_chromeos.h b/excluder_chromeos.h
new file mode 100644
index 0000000..e4c1a52
--- /dev/null
+++ b/excluder_chromeos.h
@@ -0,0 +1,52 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_EXCLUDER_CHROMEOS_H_
+#define UPDATE_ENGINE_EXCLUDER_CHROMEOS_H_
+
+#include <string>
+
+#include "update_engine/common/excluder_interface.h"
+#include "update_engine/common/prefs_interface.h"
+
+namespace chromeos_update_engine {
+
+class SystemState;
+
+// The Chrome OS implementation of the |ExcluderInterface|.
+class ExcluderChromeOS : public ExcluderInterface {
+ public:
+ explicit ExcluderChromeOS(PrefsInterface* prefs);
+ ~ExcluderChromeOS() = default;
+
+ // |ExcluderInterface| overrides.
+ bool Exclude(const std::string& name) override;
+ bool IsExcluded(const std::string& name) override;
+ bool Reset() override;
+
+ // Not copyable or movable.
+ ExcluderChromeOS(const ExcluderChromeOS&) = delete;
+ ExcluderChromeOS& operator=(const ExcluderChromeOS&) = delete;
+ ExcluderChromeOS(ExcluderChromeOS&&) = delete;
+ ExcluderChromeOS& operator=(ExcluderChromeOS&&) = delete;
+
+ private:
+ PrefsInterface* prefs_;
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_EXCLUDER_CHROMEOS_H_
diff --git a/excluder_chromeos_unittest.cc b/excluder_chromeos_unittest.cc
new file mode 100644
index 0000000..a8c14b3
--- /dev/null
+++ b/excluder_chromeos_unittest.cc
@@ -0,0 +1,66 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/excluder_chromeos.h"
+
+#include <memory>
+
+#include <base/files/file_util.h>
+#include <base/files/scoped_temp_dir.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/prefs.h"
+
+using std::string;
+using std::unique_ptr;
+
+namespace chromeos_update_engine {
+
+constexpr char kDummyHash[] =
+ "71ff43d76e2488e394e46872f5b066cc25e394c2c3e3790dd319517883b33db1";
+
+class ExcluderChromeOSTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ ASSERT_TRUE(tempdir_.CreateUniqueTempDir());
+ ASSERT_TRUE(base::PathExists(tempdir_.GetPath()));
+ ASSERT_TRUE(prefs_.Init(tempdir_.GetPath()));
+ excluder_ = std::make_unique<ExcluderChromeOS>(&prefs_);
+ }
+
+ base::ScopedTempDir tempdir_;
+ Prefs prefs_;
+ unique_ptr<ExcluderChromeOS> excluder_;
+};
+
+TEST_F(ExcluderChromeOSTest, ExclusionCheck) {
+ EXPECT_FALSE(excluder_->IsExcluded(kDummyHash));
+ EXPECT_TRUE(excluder_->Exclude(kDummyHash));
+ EXPECT_TRUE(excluder_->IsExcluded(kDummyHash));
+}
+
+TEST_F(ExcluderChromeOSTest, ResetFlow) {
+ EXPECT_TRUE(excluder_->Exclude("abc"));
+ EXPECT_TRUE(excluder_->Exclude(kDummyHash));
+ EXPECT_TRUE(excluder_->IsExcluded("abc"));
+ EXPECT_TRUE(excluder_->IsExcluded(kDummyHash));
+
+ EXPECT_TRUE(excluder_->Reset());
+ EXPECT_FALSE(excluder_->IsExcluded("abc"));
+ EXPECT_FALSE(excluder_->IsExcluded(kDummyHash));
+}
+
+} // namespace chromeos_update_engine
diff --git a/hardware_android.cc b/hardware_android.cc
index 068468b..ac6cf16 100644
--- a/hardware_android.cc
+++ b/hardware_android.cc
@@ -121,6 +121,11 @@
return GetProperty(kPropBootBaseband, "");
}
+string HardwareAndroid::GetDeviceRequisition() const {
+ LOG(WARNING) << "STUB: Getting requisition is not supported.";
+ return "";
+}
+
int HardwareAndroid::GetMinKernelKeyVersion() const {
LOG(WARNING) << "STUB: No Kernel key version is available.";
return -1;
@@ -152,9 +157,10 @@
return 0;
}
-bool HardwareAndroid::SchedulePowerwash(bool is_rollback) {
+bool HardwareAndroid::SchedulePowerwash(bool save_rollback_data) {
LOG(INFO) << "Scheduling a powerwash to BCB.";
- LOG_IF(WARNING, is_rollback) << "is_rollback was true but isn't supported.";
+ LOG_IF(WARNING, save_rollback_data) << "save_rollback_data was true but "
+ << "isn't supported.";
string err;
if (!update_bootloader_message({"--wipe_data", "--reason=wipe_data_from_ota"},
&err)) {
diff --git a/hardware_android.h b/hardware_android.h
index 145a936..e0368f9 100644
--- a/hardware_android.h
+++ b/hardware_android.h
@@ -42,13 +42,14 @@
std::string GetHardwareClass() const override;
std::string GetFirmwareVersion() const override;
std::string GetECVersion() const override;
+ std::string GetDeviceRequisition() const override;
int GetMinKernelKeyVersion() const override;
int GetMinFirmwareKeyVersion() const override;
int GetMaxFirmwareKeyRollforward() const override;
bool SetMaxFirmwareKeyRollforward(int firmware_max_rollforward) override;
bool SetMaxKernelKeyRollforward(int kernel_max_rollforward) override;
int GetPowerwashCount() const override;
- bool SchedulePowerwash(bool is_rollback) override;
+ bool SchedulePowerwash(bool save_rollback_data) override;
bool CancelPowerwash() override;
bool GetNonVolatileDirectory(base::FilePath* path) const override;
bool GetPowerwashSafeDirectory(base::FilePath* path) const override;
diff --git a/hardware_chromeos.cc b/hardware_chromeos.cc
index a49375e..5ff1b29 100644
--- a/hardware_chromeos.cc
+++ b/hardware_chromeos.cc
@@ -61,6 +61,11 @@
const char kPowerwashMarkerFile[] =
"/mnt/stateful_partition/factory_install_reset";
+// The name of the marker file used to trigger a save of rollback data
+// during the next shutdown.
+const char kRollbackSaveMarkerFile[] =
+ "/mnt/stateful_partition/.save_rollback_data";
+
// The contents of the powerwash marker file for the non-rollback case.
const char kPowerwashCommand[] = "safe fast keepimg reason=update_engine\n";
@@ -76,6 +81,29 @@
const char* kActivePingKey = "first_active_omaha_ping_sent";
+const char* kOemRequisitionKey = "oem_device_requisition";
+
+// Gets a string value from the vpd for a given key using the `vpd_get_value`
+// shell command. Returns true on success.
+int GetVpdValue(string key, string* result) {
+ int exit_code = 0;
+ string value, error;
+ vector<string> cmd = {"vpd_get_value", key};
+ if (!chromeos_update_engine::Subprocess::SynchronousExec(
+ cmd, &exit_code, &value, &error) ||
+ exit_code) {
+ LOG(ERROR) << "Failed to get vpd key for " << value
+ << " with exit code: " << exit_code << " and error: " << error;
+ return false;
+ } else if (!error.empty()) {
+ LOG(INFO) << "vpd_get_value succeeded but with following errors: " << error;
+ }
+
+ base::TrimWhitespaceASCII(value, base::TRIM_ALL, &value);
+ *result = value;
+ return true;
+}
+
} // namespace
namespace chromeos_update_engine {
@@ -172,19 +200,25 @@
}
string HardwareChromeOS::GetECVersion() const {
- string input_line;
+ string input_line, error;
int exit_code = 0;
vector<string> cmd = {"/usr/sbin/mosys", "-k", "ec", "info"};
- bool success = Subprocess::SynchronousExec(cmd, &exit_code, &input_line);
- if (!success || exit_code) {
- LOG(ERROR) << "Unable to read ec info from mosys (" << exit_code << ")";
+ if (!Subprocess::SynchronousExec(cmd, &exit_code, &input_line, &error) ||
+ exit_code != 0) {
+ LOG(ERROR) << "Unable to read EC info from mosys with exit code: "
+ << exit_code << " and error: " << error;
return "";
}
return utils::ParseECVersion(input_line);
}
+string HardwareChromeOS::GetDeviceRequisition() const {
+ string requisition;
+ return GetVpdValue(kOemRequisitionKey, &requisition) ? requisition : "";
+}
+
int HardwareChromeOS::GetMinKernelKeyVersion() const {
return VbGetSystemPropertyInt("tpm_kernver");
}
@@ -226,15 +260,25 @@
return powerwash_count;
}
-bool HardwareChromeOS::SchedulePowerwash(bool is_rollback) {
+bool HardwareChromeOS::SchedulePowerwash(bool save_rollback_data) {
+ if (save_rollback_data) {
+ if (!utils::WriteFile(kRollbackSaveMarkerFile, nullptr, 0)) {
+ PLOG(ERROR) << "Error in creating rollback save marker file: "
+ << kRollbackSaveMarkerFile << ". Rollback will not"
+ << " preserve any data.";
+ } else {
+ LOG(INFO) << "Rollback data save has been scheduled on next shutdown.";
+ }
+ }
+
const char* powerwash_command =
- is_rollback ? kRollbackPowerwashCommand : kPowerwashCommand;
+ save_rollback_data ? kRollbackPowerwashCommand : kPowerwashCommand;
bool result = utils::WriteFile(
kPowerwashMarkerFile, powerwash_command, strlen(powerwash_command));
if (result) {
LOG(INFO) << "Created " << kPowerwashMarkerFile
- << " to powerwash on next reboot (is_rollback=" << is_rollback
- << ")";
+ << " to powerwash on next reboot ("
+ << "save_rollback_data=" << save_rollback_data << ")";
} else {
PLOG(ERROR) << "Error in creating powerwash marker file: "
<< kPowerwashMarkerFile;
@@ -254,6 +298,11 @@
<< kPowerwashMarkerFile;
}
+ // Delete the rollback save marker file if it existed.
+ if (!base::DeleteFile(base::FilePath(kRollbackSaveMarkerFile), false)) {
+ PLOG(ERROR) << "Could not remove rollback save marker";
+ }
+
return result;
}
@@ -291,17 +340,11 @@
}
bool HardwareChromeOS::GetFirstActiveOmahaPingSent() const {
- int exit_code = 0;
string active_ping_str;
- vector<string> cmd = {"vpd_get_value", kActivePingKey};
- if (!Subprocess::SynchronousExec(cmd, &exit_code, &active_ping_str) ||
- exit_code) {
- LOG(ERROR) << "Failed to get vpd key for " << kActivePingKey
- << " with exit code: " << exit_code;
+ if (!GetVpdValue(kActivePingKey, &active_ping_str)) {
return false;
}
- base::TrimWhitespaceASCII(active_ping_str, base::TRIM_ALL, &active_ping_str);
int active_ping;
if (active_ping_str.empty() ||
!base::StringToInt(active_ping_str, &active_ping)) {
@@ -313,22 +356,28 @@
bool HardwareChromeOS::SetFirstActiveOmahaPingSent() {
int exit_code = 0;
- string output;
+ string output, error;
vector<string> vpd_set_cmd = {
"vpd", "-i", "RW_VPD", "-s", string(kActivePingKey) + "=1"};
- if (!Subprocess::SynchronousExec(vpd_set_cmd, &exit_code, &output) ||
+ if (!Subprocess::SynchronousExec(vpd_set_cmd, &exit_code, &output, &error) ||
exit_code) {
LOG(ERROR) << "Failed to set vpd key for " << kActivePingKey
- << " with exit code: " << exit_code << " with error: " << output;
+ << " with exit code: " << exit_code << " with output: " << output
+ << " and error: " << error;
return false;
+ } else if (!error.empty()) {
+ LOG(INFO) << "vpd succeeded but with error logs: " << error;
}
vector<string> vpd_dump_cmd = {"dump_vpd_log", "--force"};
- if (!Subprocess::SynchronousExec(vpd_dump_cmd, &exit_code, &output) ||
+ if (!Subprocess::SynchronousExec(vpd_dump_cmd, &exit_code, &output, &error) ||
exit_code) {
LOG(ERROR) << "Failed to cache " << kActivePingKey << " using dump_vpd_log"
- << " with exit code: " << exit_code << " with error: " << output;
+ << " with exit code: " << exit_code << " with output: " << output
+ << " and error: " << error;
return false;
+ } else if (!error.empty()) {
+ LOG(INFO) << "dump_vpd_log succeeded but with error logs: " << error;
}
return true;
}
diff --git a/hardware_chromeos.h b/hardware_chromeos.h
index 2bea989..e14ae9a 100644
--- a/hardware_chromeos.h
+++ b/hardware_chromeos.h
@@ -47,13 +47,14 @@
std::string GetHardwareClass() const override;
std::string GetFirmwareVersion() const override;
std::string GetECVersion() const override;
+ std::string GetDeviceRequisition() const override;
int GetMinKernelKeyVersion() const override;
int GetMinFirmwareKeyVersion() const override;
int GetMaxFirmwareKeyRollforward() const override;
bool SetMaxFirmwareKeyRollforward(int firmware_max_rollforward) override;
bool SetMaxKernelKeyRollforward(int kernel_max_rollforward) override;
int GetPowerwashCount() const override;
- bool SchedulePowerwash(bool is_rollback) override;
+ bool SchedulePowerwash(bool save_rollback_data) override;
bool CancelPowerwash() override;
bool GetNonVolatileDirectory(base::FilePath* path) const override;
bool GetPowerwashSafeDirectory(base::FilePath* path) const override;
diff --git a/init/update-engine.conf b/init/update-engine.conf
index d3681db..ca54c4a 100644
--- a/init/update-engine.conf
+++ b/init/update-engine.conf
@@ -25,6 +25,7 @@
# The default is 10 failures every 5 seconds, but even if we crash early, it is
# hard to catch that. So here we set the crash rate as 10 failures every 20
# seconds which will include the default and more.
+respawn
respawn limit 10 20
expect fork
diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc
index ce3475d..7c53a2d 100644
--- a/libcurl_http_fetcher.cc
+++ b/libcurl_http_fetcher.cc
@@ -16,6 +16,8 @@
#include "update_engine/libcurl_http_fetcher.h"
+#include <netinet/in.h>
+#include <resolv.h>
#include <sys/types.h>
#include <unistd.h>
@@ -26,6 +28,7 @@
#include <base/format_macros.h>
#include <base/location.h>
#include <base/logging.h>
+#include <base/strings/string_split.h>
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
@@ -75,8 +78,10 @@
#ifdef __ANDROID__
qtaguid_untagSocket(item);
#endif // __ANDROID__
+
LibcurlHttpFetcher* fetcher = static_cast<LibcurlHttpFetcher*>(clientp);
// Stop watching the socket before closing it.
+#ifdef __ANDROID__
for (size_t t = 0; t < arraysize(fetcher->fd_task_maps_); ++t) {
const auto fd_task_pair = fetcher->fd_task_maps_[t].find(item);
if (fd_task_pair != fetcher->fd_task_maps_[t].end()) {
@@ -88,6 +93,11 @@
fetcher->fd_task_maps_[t].erase(item);
}
}
+#else
+ for (size_t t = 0; t < base::size(fetcher->fd_controller_maps_); ++t) {
+ fetcher->fd_controller_maps_[t].erase(item);
+ }
+#endif // __ANDROID__
// Documentation for this callback says to return 0 on success or 1 on error.
if (!IGNORE_EINTR(close(item)))
@@ -269,11 +279,11 @@
} else if (base::StartsWith(
url_, "https://", base::CompareCase::INSENSITIVE_ASCII)) {
SetCurlOptionsForHttps();
-#if !USE_OMAHA
+#ifdef __ANDROID__
} else if (base::StartsWith(
url_, "file://", base::CompareCase::INSENSITIVE_ASCII)) {
SetCurlOptionsForFile();
-#endif
+#endif // __ANDROID__
} else {
LOG(ERROR) << "Received invalid URI: " << url_;
// Lock down to no protocol supported for the transfer.
@@ -305,6 +315,7 @@
LOG(INFO) << "Setting up curl options for HTTPS";
CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_SSL_VERIFYPEER, 1), CURLE_OK);
CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_SSL_VERIFYHOST, 2), CURLE_OK);
+ CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_CAINFO, nullptr), CURLE_OK);
CHECK_EQ(curl_easy_setopt(
curl_handle_, CURLOPT_CAPATH, constants::kCACertificatesPath),
CURLE_OK);
@@ -391,6 +402,37 @@
extra_headers_[base::ToLowerASCII(header_name)] = header_line;
}
+// Inputs: header_name, header_value
+// Example:
+// extra_headers_ = { {"foo":"foo: 123"}, {"bar":"bar:"} }
+// string tmp = "gibberish";
+// Case 1:
+// GetHeader("foo", &tmp) -> tmp = "123", return true.
+// Case 2:
+// GetHeader("bar", &tmp) -> tmp = "", return true.
+// Case 3:
+// GetHeader("moo", &tmp) -> tmp = "", return false.
+bool LibcurlHttpFetcher::GetHeader(const string& header_name,
+ string* header_value) const {
+ // Initially clear |header_value| to handle both success and failures without
+ // leaving |header_value| in a unclear state.
+ header_value->clear();
+ auto header_key = base::ToLowerASCII(header_name);
+ auto header_line_itr = extra_headers_.find(header_key);
+ // If the |header_name| was never set, indicate so by returning false.
+ if (header_line_itr == extra_headers_.end())
+ return false;
+ // From |SetHeader()| the check for |header_name| to not include ":" is
+ // verified, so finding the first index of ":" is a safe operation.
+ auto header_line = header_line_itr->second;
+ *header_value = header_line.substr(header_line.find(':') + 1);
+ // The following is neccessary to remove the leading ' ' before the header
+ // value that was place only if |header_value| passed to |SetHeader()| was
+ // a non-empty string.
+ header_value->erase(0, 1);
+ return true;
+}
+
void LibcurlHttpFetcher::CurlPerformOnce() {
CHECK(transfer_in_progress_);
int running_handles = 0;
@@ -406,6 +448,18 @@
}
}
+ // When retcode is not |CURLM_OK| at this point, libcurl has an internal error
+ // that it is less likely to recover from (libcurl bug, out-of-memory, etc.).
+ // In case of an update check, we send UMA metrics and log the error.
+ if (is_update_check_ &&
+ (retcode == CURLM_OUT_OF_MEMORY || retcode == CURLM_INTERNAL_ERROR)) {
+ auxiliary_error_code_ = ErrorCode::kInternalLibCurlError;
+ LOG(ERROR) << "curl_multi_perform is in an unrecoverable error condition: "
+ << retcode;
+ } else if (retcode != CURLM_OK) {
+ LOG(ERROR) << "curl_multi_perform returns error: " << retcode;
+ }
+
// If the transfer completes while paused, we should ignore the failure once
// the fetcher is unpaused.
if (running_handles == 0 && transfer_paused_ && !ignore_failure_) {
@@ -428,13 +482,35 @@
if (http_response_code_) {
LOG(INFO) << "HTTP response code: " << http_response_code_;
no_network_retry_count_ = 0;
+ unresolved_host_state_machine_.UpdateState(false);
} else {
LOG(ERROR) << "Unable to get http response code.";
+ CURLcode curl_code = GetCurlCode();
+ LOG(ERROR) << "Return code for the transfer: " << curl_code;
+ if (curl_code == CURLE_COULDNT_RESOLVE_HOST) {
+ LOG(ERROR) << "libcurl can not resolve host.";
+ unresolved_host_state_machine_.UpdateState(true);
+ auxiliary_error_code_ = ErrorCode::kUnresolvedHostError;
+ }
}
// we're done!
CleanUp();
+ if (unresolved_host_state_machine_.GetState() ==
+ UnresolvedHostStateMachine::State::kRetry) {
+ // Based on
+ // https://curl.haxx.se/docs/todo.html#updated_DNS_server_while_running,
+ // update_engine process should call res_init() and unconditionally retry.
+ res_init();
+ no_network_max_retries_++;
+ LOG(INFO) << "Will retry after reloading resolv.conf because last attempt "
+ "failed to resolve host.";
+ } else if (unresolved_host_state_machine_.GetState() ==
+ UnresolvedHostStateMachine::State::kRetriedSuccess) {
+ auxiliary_error_code_ = ErrorCode::kUnresolvedHostRecovered;
+ }
+
// TODO(petkov): This temporary code tries to deal with the case where the
// update engine performs an update check while the network is not ready
// (e.g., right after resume). Longer term, we should check if the network
@@ -615,6 +691,7 @@
// We should iterate through all file descriptors up to libcurl's fd_max or
// the highest one we're tracking, whichever is larger.
+#ifdef __ANDROID__
for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) {
if (!fd_task_maps_[t].empty())
fd_max = max(fd_max, fd_task_maps_[t].rbegin()->first);
@@ -670,6 +747,63 @@
}
}
}
+#else
+ for (size_t t = 0; t < base::size(fd_controller_maps_); ++t) {
+ if (!fd_controller_maps_[t].empty())
+ fd_max = max(fd_max, fd_controller_maps_[t].rbegin()->first);
+ }
+
+ // For each fd, if we're not tracking it, track it. If we are tracking it, but
+ // libcurl doesn't care about it anymore, stop tracking it. After this loop,
+ // there should be exactly as many tasks scheduled in
+ // fd_controller_maps_[0|1] as there are read/write fds that we're tracking.
+ for (int fd = 0; fd <= fd_max; ++fd) {
+ // Note that fd_exc is unused in the current version of libcurl so is_exc
+ // should always be false.
+ bool is_exc = FD_ISSET(fd, &fd_exc) != 0;
+ bool must_track[2] = {
+ is_exc || (FD_ISSET(fd, &fd_read) != 0), // track 0 -- read
+ is_exc || (FD_ISSET(fd, &fd_write) != 0) // track 1 -- write
+ };
+
+ for (size_t t = 0; t < base::size(fd_controller_maps_); ++t) {
+ bool tracked =
+ fd_controller_maps_[t].find(fd) != fd_controller_maps_[t].end();
+
+ if (!must_track[t]) {
+ // If we have an outstanding io_channel, remove it.
+ fd_controller_maps_[t].erase(fd);
+ continue;
+ }
+
+ // If we are already tracking this fd, continue -- nothing to do.
+ if (tracked)
+ continue;
+
+ // Track a new fd.
+ switch (t) {
+ case 0: // Read
+ fd_controller_maps_[t][fd] =
+ base::FileDescriptorWatcher::WatchReadable(
+ fd,
+ base::BindRepeating(&LibcurlHttpFetcher::CurlPerformOnce,
+ base::Unretained(this)));
+ break;
+ case 1: // Write
+ fd_controller_maps_[t][fd] =
+ base::FileDescriptorWatcher::WatchWritable(
+ fd,
+ base::BindRepeating(&LibcurlHttpFetcher::CurlPerformOnce,
+ base::Unretained(this)));
+ }
+ static int io_counter = 0;
+ io_counter++;
+ if (io_counter % 50 == 0) {
+ LOG(INFO) << "io_counter = " << io_counter;
+ }
+ }
+ }
+#endif // __ANDROID__
// Set up a timeout callback for libcurl.
if (timeout_id_ == MessageLoop::kTaskIdNull) {
@@ -714,6 +848,7 @@
MessageLoop::current()->CancelTask(timeout_id_);
timeout_id_ = MessageLoop::kTaskIdNull;
+#ifdef __ANDROID__
for (size_t t = 0; t < arraysize(fd_task_maps_); ++t) {
for (const auto& fd_taks_pair : fd_task_maps_[t]) {
if (!MessageLoop::current()->CancelTask(fd_taks_pair.second)) {
@@ -724,6 +859,11 @@
}
fd_task_maps_[t].clear();
}
+#else
+ for (size_t t = 0; t < base::size(fd_controller_maps_); ++t) {
+ fd_controller_maps_[t].clear();
+ }
+#endif // __ANDROID__
if (curl_http_headers_) {
curl_slist_free_all(curl_http_headers_);
@@ -755,6 +895,66 @@
CURLINFO_RESPONSE_CODE,
&http_response_code) == CURLE_OK) {
http_response_code_ = static_cast<int>(http_response_code);
+ } else {
+ LOG(ERROR) << "Unable to get http response code from curl_easy_getinfo";
+ }
+}
+
+CURLcode LibcurlHttpFetcher::GetCurlCode() {
+ CURLcode curl_code = CURLE_OK;
+ while (true) {
+ // Repeated calls to |curl_multi_info_read| will return a new struct each
+ // time, until a NULL is returned as a signal that there is no more to get
+ // at this point.
+ int msgs_in_queue;
+ CURLMsg* curl_msg =
+ curl_multi_info_read(curl_multi_handle_, &msgs_in_queue);
+ if (curl_msg == nullptr)
+ break;
+ // When |curl_msg| is |CURLMSG_DONE|, a transfer of an easy handle is done,
+ // and then data contains the return code for this transfer.
+ if (curl_msg->msg == CURLMSG_DONE) {
+ // Make sure |curl_multi_handle_| has one and only one easy handle
+ // |curl_handle_|.
+ CHECK_EQ(curl_handle_, curl_msg->easy_handle);
+ // Transfer return code reference:
+ // https://curl.haxx.se/libcurl/c/libcurl-errors.html
+ curl_code = curl_msg->data.result;
+ }
+ }
+
+ // Gets connection error if exists.
+ long connect_error = 0; // NOLINT(runtime/int) - curl needs long.
+ CURLcode res =
+ curl_easy_getinfo(curl_handle_, CURLINFO_OS_ERRNO, &connect_error);
+ if (res == CURLE_OK && connect_error) {
+ LOG(ERROR) << "Connect error code from the OS: " << connect_error;
+ }
+
+ return curl_code;
+}
+
+void UnresolvedHostStateMachine::UpdateState(bool failed_to_resolve_host) {
+ switch (state_) {
+ case State::kInit:
+ if (failed_to_resolve_host) {
+ state_ = State::kRetry;
+ }
+ break;
+ case State::kRetry:
+ if (failed_to_resolve_host) {
+ state_ = State::kNotRetry;
+ } else {
+ state_ = State::kRetriedSuccess;
+ }
+ break;
+ case State::kNotRetry:
+ break;
+ case State::kRetriedSuccess:
+ break;
+ default:
+ NOTREACHED();
+ break;
}
}
diff --git a/libcurl_http_fetcher.h b/libcurl_http_fetcher.h
index 25a2df3..4854f40 100644
--- a/libcurl_http_fetcher.h
+++ b/libcurl_http_fetcher.h
@@ -24,6 +24,7 @@
#include <curl/curl.h>
+#include <base/files/file_descriptor_watcher_posix.h>
#include <base/logging.h>
#include <base/macros.h>
#include <brillo/message_loops/message_loop.h>
@@ -37,6 +38,48 @@
namespace chromeos_update_engine {
+// |UnresolvedHostStateMachine| is a representation of internal state machine of
+// |LibcurlHttpFetcher|.
+class UnresolvedHostStateMachine {
+ public:
+ UnresolvedHostStateMachine() = default;
+ enum class State {
+ kInit = 0,
+ kRetry = 1,
+ kRetriedSuccess = 2,
+ kNotRetry = 3,
+ };
+
+ State GetState() { return state_; }
+
+ // Updates the following internal state machine:
+ //
+ // |kInit|
+ // |
+ // |
+ // \/
+ // (Try, host Unresolved)
+ // |
+ // |
+ // \/
+ // |kRetry| --> (Retry, host resolved)
+ // | |
+ // | |
+ // \/ \/
+ // (Retry, host Unresolved) |kRetriedSuccess|
+ // |
+ // |
+ // \/
+ // |kNotRetry|
+ //
+ void UpdateState(bool failed_to_resolve_host);
+
+ private:
+ State state_ = {State::kInit};
+
+ DISALLOW_COPY_AND_ASSIGN(UnresolvedHostStateMachine);
+};
+
class LibcurlHttpFetcher : public HttpFetcher {
public:
LibcurlHttpFetcher(ProxyResolver* proxy_resolver,
@@ -61,6 +104,9 @@
void SetHeader(const std::string& header_name,
const std::string& header_value) override;
+ bool GetHeader(const std::string& header_name,
+ std::string* header_value) const override;
+
// Suspend the transfer by calling curl_easy_pause(CURLPAUSE_ALL).
void Pause() override;
@@ -85,6 +131,8 @@
no_network_max_retries_ = retries;
}
+ int get_no_network_max_retries() { return no_network_max_retries_; }
+
void set_server_to_check(ServerToCheck server_to_check) {
server_to_check_ = server_to_check;
}
@@ -106,7 +154,13 @@
max_retry_count_ = max_retry_count;
}
+ void set_is_update_check(bool is_update_check) {
+ is_update_check_ = is_update_check;
+ }
+
private:
+ FRIEND_TEST(LibcurlHttpFetcherTest, HostResolvedTest);
+
// libcurl's CURLOPT_CLOSESOCKETFUNCTION callback function. Called when
// closing a socket created with the CURLOPT_OPENSOCKETFUNCTION callback.
static int LibcurlCloseSocketCallback(void* clientp, curl_socket_t item);
@@ -116,7 +170,10 @@
void ProxiesResolved();
// Asks libcurl for the http response code and stores it in the object.
- void GetHttpResponseCode();
+ virtual void GetHttpResponseCode();
+
+ // Returns the last |CURLcode|.
+ CURLcode GetCurlCode();
// Checks whether stored HTTP response is within the success range.
inline bool IsHttpResponseSuccess() {
@@ -161,7 +218,7 @@
}
// Cleans up the following if they are non-null:
- // curl(m) handles, fd_task_maps_, timeout_id_.
+ // curl(m) handles, fd_controller_maps_(fd_task_maps_), timeout_id_.
void CleanUp();
// Force terminate the transfer. This will invoke the delegate's (if any)
@@ -198,7 +255,12 @@
// the message loop. libcurl may open/close descriptors and switch their
// directions so maintain two separate lists so that watch conditions can be
// set appropriately.
+#ifdef __ANDROID__
std::map<int, brillo::MessageLoop::TaskId> fd_task_maps_[2];
+#else
+ std::map<int, std::unique_ptr<base::FileDescriptorWatcher::Controller>>
+ fd_controller_maps_[2];
+#endif // __ANDROID__
// The TaskId of the timer we're waiting on. kTaskIdNull if we are not waiting
// on it.
@@ -265,6 +327,12 @@
// ServerToCheck::kNone.
ServerToCheck server_to_check_{ServerToCheck::kNone};
+ // True if this object is for update check.
+ bool is_update_check_{false};
+
+ // Internal state machine.
+ UnresolvedHostStateMachine unresolved_host_state_machine_;
+
int low_speed_limit_bps_{kDownloadLowSpeedLimitBps};
int low_speed_time_seconds_{kDownloadLowSpeedTimeSeconds};
int connect_timeout_seconds_{kDownloadConnectTimeoutSeconds};
diff --git a/libcurl_http_fetcher_unittest.cc b/libcurl_http_fetcher_unittest.cc
new file mode 100644
index 0000000..8064b99
--- /dev/null
+++ b/libcurl_http_fetcher_unittest.cc
@@ -0,0 +1,212 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/libcurl_http_fetcher.h"
+
+#include <string>
+
+#include <brillo/message_loops/fake_message_loop.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/common/fake_hardware.h"
+#include "update_engine/common/mock_proxy_resolver.h"
+#include "update_engine/mock_libcurl_http_fetcher.h"
+
+using std::string;
+
+namespace chromeos_update_engine {
+
+namespace {
+constexpr char kHeaderName[] = "X-Goog-Test-Header";
+}
+
+class LibcurlHttpFetcherTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ loop_.SetAsCurrent();
+ fake_hardware_.SetIsOfficialBuild(true);
+ fake_hardware_.SetIsOOBEEnabled(false);
+ }
+
+ brillo::FakeMessageLoop loop_{nullptr};
+ FakeHardware fake_hardware_;
+ MockLibcurlHttpFetcher libcurl_fetcher_{nullptr, &fake_hardware_};
+ UnresolvedHostStateMachine state_machine_;
+};
+
+TEST_F(LibcurlHttpFetcherTest, GetEmptyHeaderValueTest) {
+ const string header_value = "";
+ string actual_header_value;
+ libcurl_fetcher_.SetHeader(kHeaderName, header_value);
+ EXPECT_TRUE(libcurl_fetcher_.GetHeader(kHeaderName, &actual_header_value));
+ EXPECT_EQ("", actual_header_value);
+}
+
+TEST_F(LibcurlHttpFetcherTest, GetHeaderTest) {
+ const string header_value = "This-is-value 123";
+ string actual_header_value;
+ libcurl_fetcher_.SetHeader(kHeaderName, header_value);
+ EXPECT_TRUE(libcurl_fetcher_.GetHeader(kHeaderName, &actual_header_value));
+ EXPECT_EQ(header_value, actual_header_value);
+}
+
+TEST_F(LibcurlHttpFetcherTest, GetNonExistentHeaderValueTest) {
+ string actual_header_value;
+ // Skip |SetHeaader()| call.
+ EXPECT_FALSE(libcurl_fetcher_.GetHeader(kHeaderName, &actual_header_value));
+ // Even after a failed |GetHeaderValue()|, enforce that the passed pointer to
+ // modifiable string was cleared to be empty.
+ EXPECT_EQ("", actual_header_value);
+}
+
+TEST_F(LibcurlHttpFetcherTest, GetHeaderEdgeCaseTest) {
+ const string header_value = "\a\b\t\v\f\r\\ edge:-case: \a\b\t\v\f\r\\";
+ string actual_header_value;
+ libcurl_fetcher_.SetHeader(kHeaderName, header_value);
+ EXPECT_TRUE(libcurl_fetcher_.GetHeader(kHeaderName, &actual_header_value));
+ EXPECT_EQ(header_value, actual_header_value);
+}
+
+TEST_F(LibcurlHttpFetcherTest, InvalidURLTest) {
+ int no_network_max_retries = 1;
+ libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries);
+
+ libcurl_fetcher_.BeginTransfer("not-a-URL");
+ while (loop_.PendingTasks()) {
+ loop_.RunOnce(true);
+ }
+
+ EXPECT_EQ(libcurl_fetcher_.get_no_network_max_retries(),
+ no_network_max_retries);
+}
+
+#ifdef __ANDROID__
+TEST_F(LibcurlHttpFetcherTest, CouldntResolveHostTest) {
+ int no_network_max_retries = 1;
+ libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries);
+
+ // This test actually sends request to internet but according to
+ // https://tools.ietf.org/html/rfc2606#section-2, .invalid domain names are
+ // reserved and sure to be invalid. Ideally we should mock libcurl or
+ // reorganize LibcurlHttpFetcher so the part that sends request can be mocked
+ // easily.
+ // TODO(xiaochu) Refactor LibcurlHttpFetcher (and its relates) so it's
+ // easier to mock the part that depends on internet connectivity.
+ libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid");
+ while (loop_.PendingTasks()) {
+ loop_.RunOnce(true);
+ }
+
+ // If libcurl fails to resolve the name, we call res_init() to reload
+ // resolv.conf and retry exactly once more. See crbug.com/982813 for details.
+ EXPECT_EQ(libcurl_fetcher_.get_no_network_max_retries(),
+ no_network_max_retries + 1);
+}
+#else
+TEST_F(LibcurlHttpFetcherTest, CouldNotResolveHostTest) {
+ int no_network_max_retries = 1;
+ libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries);
+
+ libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid");
+
+ // The first time it can't resolve.
+ loop_.RunOnce(true);
+ EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(),
+ ErrorCode::kUnresolvedHostError);
+
+ while (loop_.PendingTasks()) {
+ loop_.RunOnce(true);
+ }
+ // The auxilary error code should've have been changed.
+ EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(),
+ ErrorCode::kUnresolvedHostError);
+
+ // If libcurl fails to resolve the name, we call res_init() to reload
+ // resolv.conf and retry exactly once more. See crbug.com/982813 for details.
+ EXPECT_EQ(libcurl_fetcher_.get_no_network_max_retries(),
+ no_network_max_retries + 1);
+}
+
+TEST_F(LibcurlHttpFetcherTest, HostResolvedTest) {
+ int no_network_max_retries = 2;
+ libcurl_fetcher_.set_no_network_max_retries(no_network_max_retries);
+
+ // This test actually sends request to internet but according to
+ // https://tools.ietf.org/html/rfc2606#section-2, .invalid domain names are
+ // reserved and sure to be invalid. Ideally we should mock libcurl or
+ // reorganize LibcurlHttpFetcher so the part that sends request can be mocked
+ // easily.
+ // TODO(xiaochu) Refactor LibcurlHttpFetcher (and its relates) so it's
+ // easier to mock the part that depends on internet connectivity.
+ libcurl_fetcher_.BeginTransfer("https://An-uNres0lvable-uRl.invalid");
+
+ // The first time it can't resolve.
+ loop_.RunOnce(true);
+ EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(),
+ ErrorCode::kUnresolvedHostError);
+
+ // The second time, it will resolve, with error code 200 but we set the
+ // download size be smaller than the transfer size so it will retry again.
+ EXPECT_CALL(libcurl_fetcher_, GetHttpResponseCode())
+ .WillOnce(testing::Invoke(
+ [this]() { libcurl_fetcher_.http_response_code_ = 200; }))
+ .WillRepeatedly(testing::Invoke(
+ [this]() { libcurl_fetcher_.http_response_code_ = 0; }));
+ libcurl_fetcher_.transfer_size_ = 10;
+
+ // This time the host is resolved. But after that again we can't resolve
+ // anymore (See above).
+ loop_.RunOnce(true);
+ EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(),
+ ErrorCode::kUnresolvedHostRecovered);
+
+ while (loop_.PendingTasks()) {
+ loop_.RunOnce(true);
+ }
+ // The auxilary error code should not have been changed.
+ EXPECT_EQ(libcurl_fetcher_.GetAuxiliaryErrorCode(),
+ ErrorCode::kUnresolvedHostRecovered);
+
+ // If libcurl fails to resolve the name, we call res_init() to reload
+ // resolv.conf and retry exactly once more. See crbug.com/982813 for details.
+ EXPECT_EQ(libcurl_fetcher_.get_no_network_max_retries(),
+ no_network_max_retries + 1);
+}
+#endif // __ANDROID__
+
+TEST_F(LibcurlHttpFetcherTest, HttpFetcherStateMachineRetryFailedTest) {
+ state_machine_.UpdateState(true);
+ state_machine_.UpdateState(true);
+ EXPECT_EQ(state_machine_.GetState(),
+ UnresolvedHostStateMachine::State::kNotRetry);
+}
+
+TEST_F(LibcurlHttpFetcherTest, HttpFetcherStateMachineRetrySucceedTest) {
+ state_machine_.UpdateState(true);
+ state_machine_.UpdateState(false);
+ EXPECT_EQ(state_machine_.GetState(),
+ UnresolvedHostStateMachine::State::kRetriedSuccess);
+}
+
+TEST_F(LibcurlHttpFetcherTest, HttpFetcherStateMachineNoRetryTest) {
+ state_machine_.UpdateState(false);
+ state_machine_.UpdateState(false);
+ EXPECT_EQ(state_machine_.GetState(),
+ UnresolvedHostStateMachine::State::kInit);
+}
+
+} // namespace chromeos_update_engine
diff --git a/main.cc b/main.cc
index 4377a15..ceb5b56 100644
--- a/main.cc
+++ b/main.cc
@@ -23,9 +23,10 @@
#include <base/logging.h>
#include <brillo/flag_helper.h>
+#include "update_engine/common/subprocess.h"
#include "update_engine/common/terminator.h"
#include "update_engine/common/utils.h"
-#include "update_engine/daemon.h"
+#include "update_engine/daemon_base.h"
#include "update_engine/logging.h"
using std::string;
@@ -63,8 +64,8 @@
// Done _after_ log file creation.
umask(S_IRWXG | S_IRWXO);
- chromeos_update_engine::UpdateEngineDaemon update_engine_daemon;
- int exit_code = update_engine_daemon.Run();
+ auto daemon = chromeos_update_engine::DaemonBase::CreateInstance();
+ int exit_code = daemon->Run();
chromeos_update_engine::Subprocess::Get().FlushBufferedLogsAtExit();
diff --git a/metrics_constants.h b/metrics_constants.h
index 137143a..db21d90 100644
--- a/metrics_constants.h
+++ b/metrics_constants.h
@@ -119,12 +119,12 @@
kUnknown = 0, // Unknown.
kEthernet = 1, // Ethernet.
kWifi = 2, // Wireless.
- kWimax = 3, // WiMax.
- kBluetooth = 4, // Bluetooth.
kCellular = 5, // Cellular.
kTetheredEthernet = 6, // Tethered (Ethernet).
kTetheredWifi = 7, // Tethered (Wifi).
kDisconnected = 8, // Disconnected.
+ // deprecated: kWimax = 3,
+ // deprecated: kBluetooth = 4,
kNumConstants,
kUnset = -1
diff --git a/metrics_reporter_android.h b/metrics_reporter_android.h
index e320c12..7770619 100644
--- a/metrics_reporter_android.h
+++ b/metrics_reporter_android.h
@@ -31,8 +31,6 @@
~MetricsReporterAndroid() override = default;
- void Initialize() override {}
-
void ReportRollbackMetrics(metrics::RollbackResult result) override {}
void ReportEnterpriseRollbackMetrics(
diff --git a/metrics_reporter_interface.h b/metrics_reporter_interface.h
index fce8bfd..180a680 100644
--- a/metrics_reporter_interface.h
+++ b/metrics_reporter_interface.h
@@ -42,8 +42,6 @@
public:
virtual ~MetricsReporterInterface() = default;
- virtual void Initialize() = 0;
-
// Helper function to report metrics related to user-initiated rollback. The
// following metrics are reported:
//
diff --git a/metrics_reporter_omaha.cc b/metrics_reporter_omaha.cc
index 14819d8..fb4e4ce 100644
--- a/metrics_reporter_omaha.cc
+++ b/metrics_reporter_omaha.cc
@@ -144,10 +144,6 @@
MetricsReporterOmaha::MetricsReporterOmaha()
: metrics_lib_(new MetricsLibrary()) {}
-void MetricsReporterOmaha::Initialize() {
- metrics_lib_->Init();
-}
-
void MetricsReporterOmaha::ReportDailyMetrics(base::TimeDelta os_age) {
string metric = metrics::kMetricDailyOSAgeDays;
LOG(INFO) << "Uploading " << utils::FormatTimeDelta(os_age) << " for metric "
diff --git a/metrics_reporter_omaha.h b/metrics_reporter_omaha.h
index 5680dec..c84ac1e 100644
--- a/metrics_reporter_omaha.h
+++ b/metrics_reporter_omaha.h
@@ -108,8 +108,6 @@
~MetricsReporterOmaha() override = default;
- void Initialize() override;
-
void ReportRollbackMetrics(metrics::RollbackResult result) override;
void ReportEnterpriseRollbackMetrics(
diff --git a/metrics_reporter_stub.h b/metrics_reporter_stub.h
index 25660b5..0cfeea0 100644
--- a/metrics_reporter_stub.h
+++ b/metrics_reporter_stub.h
@@ -31,8 +31,6 @@
~MetricsReporterStub() override = default;
- void Initialize() override {}
-
void ReportRollbackMetrics(metrics::RollbackResult result) override {}
void ReportEnterpriseRollbackMetrics(
diff --git a/metrics_utils.cc b/metrics_utils.cc
index 9abc3ef..da3a2c3 100644
--- a/metrics_utils.cc
+++ b/metrics_utils.cc
@@ -280,12 +280,6 @@
else
return metrics::ConnectionType::kWifi;
- case ConnectionType::kWimax:
- return metrics::ConnectionType::kWimax;
-
- case ConnectionType::kBluetooth:
- return metrics::ConnectionType::kBluetooth;
-
case ConnectionType::kCellular:
return metrics::ConnectionType::kCellular;
}
diff --git a/metrics_utils_unittest.cc b/metrics_utils_unittest.cc
index e7c4c26..6ea996f 100644
--- a/metrics_utils_unittest.cc
+++ b/metrics_utils_unittest.cc
@@ -41,12 +41,6 @@
EXPECT_EQ(
metrics::ConnectionType::kWifi,
GetConnectionType(ConnectionType::kWifi, ConnectionTethering::kUnknown));
- EXPECT_EQ(
- metrics::ConnectionType::kWimax,
- GetConnectionType(ConnectionType::kWimax, ConnectionTethering::kUnknown));
- EXPECT_EQ(metrics::ConnectionType::kBluetooth,
- GetConnectionType(ConnectionType::kBluetooth,
- ConnectionTethering::kUnknown));
EXPECT_EQ(metrics::ConnectionType::kCellular,
GetConnectionType(ConnectionType::kCellular,
ConnectionTethering::kUnknown));
diff --git a/mock_libcurl_http_fetcher.h b/mock_libcurl_http_fetcher.h
new file mode 100644
index 0000000..a8ef0f4
--- /dev/null
+++ b/mock_libcurl_http_fetcher.h
@@ -0,0 +1,37 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_MOCK_LIBCURL_HTTP_FETCHER_H_
+#define UPDATE_ENGINE_MOCK_LIBCURL_HTTP_FETCHER_H_
+
+#include <gmock/gmock.h>
+
+#include "update_engine/connection_manager_interface.h"
+
+namespace chromeos_update_engine {
+
+class MockLibcurlHttpFetcher : public LibcurlHttpFetcher {
+ public:
+ MockLibcurlHttpFetcher(ProxyResolver* proxy_resolver,
+ HardwareInterface* hardware)
+ : LibcurlHttpFetcher(proxy_resolver, hardware) {}
+
+ MOCK_METHOD0(GetHttpResponseCode, void());
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_MOCK_LIBCURL_HTTP_FETCHER_H_
diff --git a/mock_update_attempter.h b/mock_update_attempter.h
index 5df5a6b..ad34802 100644
--- a/mock_update_attempter.h
+++ b/mock_update_attempter.h
@@ -30,12 +30,14 @@
public:
using UpdateAttempter::UpdateAttempter;
- MOCK_METHOD7(Update,
+ MOCK_METHOD9(Update,
void(const std::string& app_version,
const std::string& omaha_url,
const std::string& target_channel,
const std::string& target_version_prefix,
bool rollback_allowed,
+ bool rollback_data_save_requested,
+ int rollback_allowed_milestones,
bool obey_proxies,
bool interactive));
@@ -53,9 +55,13 @@
UpdateAttemptFlags flags));
MOCK_METHOD2(CheckForInstall,
- bool(const std::vector<std::string>& dlc_module_ids,
+ bool(const std::vector<std::string>& dlc_ids,
const std::string& omaha_url));
+ MOCK_METHOD2(SetDlcActiveValue, bool(bool, const std::string&));
+
+ MOCK_CONST_METHOD0(GetExcluder, ExcluderInterface*(void));
+
MOCK_METHOD0(RefreshDevicePolicy, void(void));
MOCK_CONST_METHOD0(consecutive_failed_update_checks, unsigned int(void));
diff --git a/omaha_request_action.cc b/omaha_request_action.cc
index fae9471..3a0b91c 100644
--- a/omaha_request_action.cc
+++ b/omaha_request_action.cc
@@ -26,7 +26,9 @@
#include <vector>
#include <base/bind.h>
+#include <base/files/file_util.h>
#include <base/logging.h>
+#include <base/optional.h>
#include <base/rand_util.h>
#include <base/strings/string_number_conversions.h>
#include <base/strings/string_split.h>
@@ -43,20 +45,23 @@
#include "update_engine/common/hardware_interface.h"
#include "update_engine/common/hash_calculator.h"
#include "update_engine/common/platform_constants.h"
+#include "update_engine/common/prefs.h"
#include "update_engine/common/prefs_interface.h"
#include "update_engine/common/utils.h"
#include "update_engine/connection_manager_interface.h"
#include "update_engine/metrics_reporter_interface.h"
#include "update_engine/metrics_utils.h"
+#include "update_engine/omaha_request_builder_xml.h"
#include "update_engine/omaha_request_params.h"
#include "update_engine/p2p_manager.h"
#include "update_engine/payload_state_interface.h"
+#include "update_engine/update_attempter.h"
+using base::Optional;
using base::Time;
using base::TimeDelta;
using chromeos_update_manager::kRollforwardInfinity;
using std::map;
-using std::numeric_limits;
using std::string;
using std::vector;
@@ -106,382 +111,13 @@
constexpr char kValPostInstall[] = "postinstall";
constexpr char kValNoUpdate[] = "noupdate";
-constexpr char kOmahaUpdaterVersion[] = "0.1.0.0";
-
-// X-Goog-Update headers.
-constexpr char kXGoogleUpdateInteractivity[] = "X-Goog-Update-Interactivity";
-constexpr char kXGoogleUpdateAppId[] = "X-Goog-Update-AppId";
-constexpr char kXGoogleUpdateUpdater[] = "X-Goog-Update-Updater";
-
// updatecheck attributes (without the underscore prefix).
-constexpr char kAttrEol[] = "eol";
+// Deprecated: "eol"
+constexpr char kAttrEolDate[] = "eol_date";
constexpr char kAttrRollback[] = "rollback";
constexpr char kAttrFirmwareVersion[] = "firmware_version";
constexpr char kAttrKernelVersion[] = "kernel_version";
-namespace {
-
-// Returns an XML ping element attribute assignment with attribute
-// |name| and value |ping_days| if |ping_days| has a value that needs
-// to be sent, or an empty string otherwise.
-string GetPingAttribute(const string& name, int ping_days) {
- if (ping_days > 0 || ping_days == OmahaRequestAction::kNeverPinged)
- return base::StringPrintf(" %s=\"%d\"", name.c_str(), ping_days);
- return "";
-}
-
-// Returns an XML ping element if any of the elapsed days need to be
-// sent, or an empty string otherwise.
-string GetPingXml(int ping_active_days, int ping_roll_call_days) {
- string ping_active = GetPingAttribute("a", ping_active_days);
- string ping_roll_call = GetPingAttribute("r", ping_roll_call_days);
- if (!ping_active.empty() || !ping_roll_call.empty()) {
- return base::StringPrintf(" <ping active=\"1\"%s%s></ping>\n",
- ping_active.c_str(),
- ping_roll_call.c_str());
- }
- return "";
-}
-
-// Returns an XML that goes into the body of the <app> element of the Omaha
-// request based on the given parameters.
-string GetAppBody(const OmahaEvent* event,
- OmahaRequestParams* params,
- bool ping_only,
- bool include_ping,
- bool skip_updatecheck,
- int ping_active_days,
- int ping_roll_call_days,
- PrefsInterface* prefs) {
- string app_body;
- if (event == nullptr) {
- if (include_ping)
- app_body = GetPingXml(ping_active_days, ping_roll_call_days);
- if (!ping_only) {
- if (!skip_updatecheck) {
- app_body += " <updatecheck";
- if (!params->target_version_prefix().empty()) {
- app_body += base::StringPrintf(
- " targetversionprefix=\"%s\"",
- XmlEncodeWithDefault(params->target_version_prefix(), "")
- .c_str());
- // Rollback requires target_version_prefix set.
- if (params->rollback_allowed()) {
- app_body += " rollback_allowed=\"true\"";
- }
- }
- app_body += "></updatecheck>\n";
- }
-
- // If this is the first update check after a reboot following a previous
- // update, generate an event containing the previous version number. If
- // the previous version preference file doesn't exist the event is still
- // generated with a previous version of 0.0.0.0 -- this is relevant for
- // older clients or new installs. The previous version event is not sent
- // for ping-only requests because they come before the client has
- // rebooted. The previous version event is also not sent if it was already
- // sent for this new version with a previous updatecheck.
- string prev_version;
- if (!prefs->GetString(kPrefsPreviousVersion, &prev_version)) {
- prev_version = "0.0.0.0";
- }
- // We only store a non-empty previous version value after a successful
- // update in the previous boot. After reporting it back to the server,
- // we clear the previous version value so it doesn't get reported again.
- if (!prev_version.empty()) {
- app_body += base::StringPrintf(
- " <event eventtype=\"%d\" eventresult=\"%d\" "
- "previousversion=\"%s\"></event>\n",
- OmahaEvent::kTypeRebootedAfterUpdate,
- OmahaEvent::kResultSuccess,
- XmlEncodeWithDefault(prev_version, "0.0.0.0").c_str());
- LOG_IF(WARNING, !prefs->SetString(kPrefsPreviousVersion, ""))
- << "Unable to reset the previous version.";
- }
- }
- } else {
- // The error code is an optional attribute so append it only if the result
- // is not success.
- string error_code;
- if (event->result != OmahaEvent::kResultSuccess) {
- error_code = base::StringPrintf(" errorcode=\"%d\"",
- static_cast<int>(event->error_code));
- }
- app_body = base::StringPrintf(
- " <event eventtype=\"%d\" eventresult=\"%d\"%s></event>\n",
- event->type,
- event->result,
- error_code.c_str());
- }
-
- return app_body;
-}
-
-// Returns the cohort* argument to include in the <app> tag for the passed
-// |arg_name| and |prefs_key|, if any. The return value is suitable to
-// concatenate to the list of arguments and includes a space at the end.
-string GetCohortArgXml(PrefsInterface* prefs,
- const string arg_name,
- const string prefs_key) {
- // There's nothing wrong with not having a given cohort setting, so we check
- // existence first to avoid the warning log message.
- if (!prefs->Exists(prefs_key))
- return "";
- string cohort_value;
- if (!prefs->GetString(prefs_key, &cohort_value) || cohort_value.empty())
- return "";
- // This is a sanity check to avoid sending a huge XML file back to Ohama due
- // to a compromised stateful partition making the update check fail in low
- // network environments envent after a reboot.
- if (cohort_value.size() > 1024) {
- LOG(WARNING) << "The omaha cohort setting " << arg_name
- << " has a too big value, which must be an error or an "
- "attacker trying to inhibit updates.";
- return "";
- }
-
- string escaped_xml_value;
- if (!XmlEncode(cohort_value, &escaped_xml_value)) {
- LOG(WARNING) << "The omaha cohort setting " << arg_name
- << " is ASCII-7 invalid, ignoring it.";
- return "";
- }
-
- return base::StringPrintf(
- "%s=\"%s\" ", arg_name.c_str(), escaped_xml_value.c_str());
-}
-
-struct OmahaAppData {
- string id;
- string version;
- string product_components;
-};
-
-bool IsValidComponentID(const string& id) {
- for (char c : id) {
- if (!isalnum(c) && c != '-' && c != '_' && c != '.')
- return false;
- }
- return true;
-}
-
-// Returns an XML that corresponds to the entire <app> node of the Omaha
-// request based on the given parameters.
-string GetAppXml(const OmahaEvent* event,
- OmahaRequestParams* params,
- const OmahaAppData& app_data,
- bool ping_only,
- bool include_ping,
- bool skip_updatecheck,
- int ping_active_days,
- int ping_roll_call_days,
- int install_date_in_days,
- SystemState* system_state) {
- string app_body = GetAppBody(event,
- params,
- ping_only,
- include_ping,
- skip_updatecheck,
- ping_active_days,
- ping_roll_call_days,
- system_state->prefs());
- string app_versions;
-
- // If we are downgrading to a more stable channel and we are allowed to do
- // powerwash, then pass 0.0.0.0 as the version. This is needed to get the
- // highest-versioned payload on the destination channel.
- if (params->ShouldPowerwash()) {
- LOG(INFO) << "Passing OS version as 0.0.0.0 as we are set to powerwash "
- << "on downgrading to the version in the more stable channel";
- app_versions = "version=\"0.0.0.0\" from_version=\"" +
- XmlEncodeWithDefault(app_data.version, "0.0.0.0") + "\" ";
- } else {
- app_versions = "version=\"" +
- XmlEncodeWithDefault(app_data.version, "0.0.0.0") + "\" ";
- }
-
- string download_channel = params->download_channel();
- string app_channels =
- "track=\"" + XmlEncodeWithDefault(download_channel, "") + "\" ";
- if (params->current_channel() != download_channel) {
- app_channels += "from_track=\"" +
- XmlEncodeWithDefault(params->current_channel(), "") + "\" ";
- }
-
- string delta_okay_str = params->delta_okay() ? "true" : "false";
-
- // If install_date_days is not set (e.g. its value is -1 ), don't
- // include the attribute.
- string install_date_in_days_str = "";
- if (install_date_in_days >= 0) {
- install_date_in_days_str =
- base::StringPrintf("installdate=\"%d\" ", install_date_in_days);
- }
-
- string app_cohort_args;
- app_cohort_args +=
- GetCohortArgXml(system_state->prefs(), "cohort", kPrefsOmahaCohort);
- app_cohort_args += GetCohortArgXml(
- system_state->prefs(), "cohorthint", kPrefsOmahaCohortHint);
- app_cohort_args += GetCohortArgXml(
- system_state->prefs(), "cohortname", kPrefsOmahaCohortName);
-
- string fingerprint_arg;
- if (!params->os_build_fingerprint().empty()) {
- fingerprint_arg = "fingerprint=\"" +
- XmlEncodeWithDefault(params->os_build_fingerprint(), "") +
- "\" ";
- }
-
- string buildtype_arg;
- if (!params->os_build_type().empty()) {
- buildtype_arg = "os_build_type=\"" +
- XmlEncodeWithDefault(params->os_build_type(), "") + "\" ";
- }
-
- string product_components_args;
- if (!params->ShouldPowerwash() && !app_data.product_components.empty()) {
- brillo::KeyValueStore store;
- if (store.LoadFromString(app_data.product_components)) {
- for (const string& key : store.GetKeys()) {
- if (!IsValidComponentID(key)) {
- LOG(ERROR) << "Invalid component id: " << key;
- continue;
- }
- string version;
- if (!store.GetString(key, &version)) {
- LOG(ERROR) << "Failed to get version for " << key
- << " in product_components.";
- continue;
- }
- product_components_args +=
- base::StringPrintf("_%s.version=\"%s\" ",
- key.c_str(),
- XmlEncodeWithDefault(version, "").c_str());
- }
- } else {
- LOG(ERROR) << "Failed to parse product_components:\n"
- << app_data.product_components;
- }
- }
-
- // clang-format off
- string app_xml = " <app "
- "appid=\"" + XmlEncodeWithDefault(app_data.id, "") + "\" " +
- app_cohort_args +
- app_versions +
- app_channels +
- product_components_args +
- fingerprint_arg +
- buildtype_arg +
- "lang=\"" + XmlEncodeWithDefault(params->app_lang(), "en-US") + "\" " +
- "board=\"" + XmlEncodeWithDefault(params->os_board(), "") + "\" " +
- "hardware_class=\"" + XmlEncodeWithDefault(params->hwid(), "") + "\" " +
- "delta_okay=\"" + delta_okay_str + "\" "
- "fw_version=\"" + XmlEncodeWithDefault(params->fw_version(), "") + "\" " +
- "ec_version=\"" + XmlEncodeWithDefault(params->ec_version(), "") + "\" " +
- install_date_in_days_str +
- ">\n" +
- app_body +
- " </app>\n";
- // clang-format on
- return app_xml;
-}
-
-// Returns an XML that corresponds to the entire <os> node of the Omaha
-// request based on the given parameters.
-string GetOsXml(OmahaRequestParams* params) {
- string os_xml =
- " <os "
- "version=\"" +
- XmlEncodeWithDefault(params->os_version(), "") + "\" " + "platform=\"" +
- XmlEncodeWithDefault(params->os_platform(), "") + "\" " + "sp=\"" +
- XmlEncodeWithDefault(params->os_sp(), "") +
- "\">"
- "</os>\n";
- return os_xml;
-}
-
-// Returns an XML that corresponds to the entire Omaha request based on the
-// given parameters.
-string GetRequestXml(const OmahaEvent* event,
- OmahaRequestParams* params,
- bool ping_only,
- bool include_ping,
- int ping_active_days,
- int ping_roll_call_days,
- int install_date_in_days,
- SystemState* system_state) {
- string os_xml = GetOsXml(params);
- OmahaAppData product_app = {
- .id = params->GetAppId(),
- .version = params->app_version(),
- .product_components = params->product_components()};
- // Skips updatecheck for platform app in case of an install operation.
- string app_xml = GetAppXml(event,
- params,
- product_app,
- ping_only,
- include_ping,
- params->is_install(), /* skip_updatecheck */
- ping_active_days,
- ping_roll_call_days,
- install_date_in_days,
- system_state);
- if (!params->system_app_id().empty()) {
- OmahaAppData system_app = {.id = params->system_app_id(),
- .version = params->system_version()};
- app_xml += GetAppXml(event,
- params,
- system_app,
- ping_only,
- include_ping,
- false, /* skip_updatecheck */
- ping_active_days,
- ping_roll_call_days,
- install_date_in_days,
- system_state);
- }
- // Create APP ID according to |dlc_module_id| (sticking the current AppID to
- // the DLC module ID with an underscode).
- for (const auto& dlc_module_id : params->dlc_module_ids()) {
- OmahaAppData dlc_module_app = {
- .id = params->GetAppId() + "_" + dlc_module_id,
- .version = params->app_version()};
- app_xml += GetAppXml(event,
- params,
- dlc_module_app,
- ping_only,
- include_ping,
- false, /* skip_updatecheck */
- ping_active_days,
- ping_roll_call_days,
- install_date_in_days,
- system_state);
- }
-
- string install_source = base::StringPrintf(
- "installsource=\"%s\" ",
- (params->interactive() ? "ondemandupdate" : "scheduler"));
-
- string updater_version = XmlEncodeWithDefault(
- base::StringPrintf(
- "%s-%s", constants::kOmahaUpdaterID, kOmahaUpdaterVersion),
- "");
- string request_xml =
- "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
- "<request protocol=\"3.0\" " +
- ("version=\"" + updater_version +
- "\" "
- "updaterversion=\"" +
- updater_version + "\" " + install_source + "ismachine=\"1\">\n") +
- os_xml + app_xml + "</request>\n";
-
- return request_xml;
-}
-
-} // namespace
-
// Struct used for holding data obtained when parsing the XML.
struct OmahaParserData {
explicit OmahaParserData(XML_Parser _xml_parser) : xml_parser(_xml_parser) {}
@@ -506,12 +142,9 @@
string manifest_version;
map<string, string> action_postinstall_attrs;
string updatecheck_status;
- string cohort;
- string cohorthint;
- string cohortname;
- bool cohort_set = false;
- bool cohorthint_set = false;
- bool cohortname_set = false;
+ Optional<string> cohort;
+ Optional<string> cohorthint;
+ Optional<string> cohortname;
struct Package {
string name;
@@ -547,21 +180,14 @@
if (data->current_path == "/response/app") {
OmahaParserData::App app;
- if (attrs.find(kAttrAppId) != attrs.end()) {
+ if (attrs.find(kAttrAppId) != attrs.end())
app.id = attrs[kAttrAppId];
- }
- if (attrs.find(kAttrCohort) != attrs.end()) {
- app.cohort_set = true;
+ if (attrs.find(kAttrCohort) != attrs.end())
app.cohort = attrs[kAttrCohort];
- }
- if (attrs.find(kAttrCohortHint) != attrs.end()) {
- app.cohorthint_set = true;
+ if (attrs.find(kAttrCohortHint) != attrs.end())
app.cohorthint = attrs[kAttrCohortHint];
- }
- if (attrs.find(kAttrCohortName) != attrs.end()) {
- app.cohortname_set = true;
+ if (attrs.find(kAttrCohortName) != attrs.end())
app.cohortname = attrs[kAttrCohortName];
- }
data->apps.push_back(std::move(app));
} else if (data->current_path == "/response/app/updatecheck") {
if (!data->apps.empty())
@@ -645,54 +271,12 @@
} // namespace
-bool XmlEncode(const string& input, string* output) {
- if (std::find_if(input.begin(), input.end(), [](const char c) {
- return c & 0x80;
- }) != input.end()) {
- LOG(WARNING) << "Invalid ASCII-7 string passed to the XML encoder:";
- utils::HexDumpString(input);
- return false;
- }
- output->clear();
- // We need at least input.size() space in the output, but the code below will
- // handle it if we need more.
- output->reserve(input.size());
- for (char c : input) {
- switch (c) {
- case '\"':
- output->append(""");
- break;
- case '\'':
- output->append("'");
- break;
- case '&':
- output->append("&");
- break;
- case '<':
- output->append("<");
- break;
- case '>':
- output->append(">");
- break;
- default:
- output->push_back(c);
- }
- }
- return true;
-}
-
-string XmlEncodeWithDefault(const string& input, const string& default_value) {
- string output;
- if (XmlEncode(input, &output))
- return output;
- return default_value;
-}
-
OmahaRequestAction::OmahaRequestAction(
SystemState* system_state,
OmahaEvent* event,
std::unique_ptr<HttpFetcher> http_fetcher,
- bool ping_only)
+ bool ping_only,
+ const string& session_id)
: system_state_(system_state),
params_(system_state->request_params()),
event_(event),
@@ -700,7 +284,8 @@
policy_provider_(std::make_unique<policy::PolicyProvider>()),
ping_only_(ping_only),
ping_active_days_(0),
- ping_roll_call_days_(0) {
+ ping_roll_call_days_(0),
+ session_id_(session_id) {
policy_provider_->Reload();
}
@@ -708,7 +293,7 @@
// Calculates the value to use for the ping days parameter.
int OmahaRequestAction::CalculatePingDays(const string& key) {
- int days = kNeverPinged;
+ int days = kPingNeverPinged;
int64_t last_ping = 0;
if (system_state_->prefs()->GetInt64(key, &last_ping) && last_ping >= 0) {
days = (Time::Now() - Time::FromInternalValue(last_ping)).InDays();
@@ -739,8 +324,8 @@
}
bool OmahaRequestAction::ShouldPing() const {
- if (ping_active_days_ == OmahaRequestAction::kNeverPinged &&
- ping_roll_call_days_ == OmahaRequestAction::kNeverPinged) {
+ if (ping_active_days_ == kPingNeverPinged &&
+ ping_roll_call_days_ == kPingNeverPinged) {
int powerwash_count = system_state_->hardware()->GetPowerwashCount();
if (powerwash_count > 0) {
LOG(INFO) << "Not sending ping with a=-1 r=-1 to omaha because "
@@ -821,6 +406,45 @@
return num_days;
}
+void OmahaRequestAction::StorePingReply(
+ const OmahaParserData& parser_data) const {
+ for (const auto& app : parser_data.apps) {
+ auto it = params_->dlc_apps_params().find(app.id);
+ if (it == params_->dlc_apps_params().end())
+ continue;
+
+ const OmahaRequestParams::AppParams& dlc_params = it->second;
+ const string& dlc_id = dlc_params.name;
+ // Skip if the ping for this DLC was not sent.
+ if (!dlc_params.send_ping)
+ continue;
+
+ PrefsInterface* prefs = system_state_->prefs();
+ // Reset the active metadata value to |kPingInactiveValue|.
+ auto active_key =
+ prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive});
+ if (!prefs->SetInt64(active_key, kPingInactiveValue))
+ LOG(ERROR) << "Failed to set the value of ping metadata '" << active_key
+ << "'.";
+
+ auto last_rollcall_key =
+ prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall});
+ if (!prefs->SetString(last_rollcall_key, parser_data.daystart_elapsed_days))
+ LOG(ERROR) << "Failed to set the value of ping metadata '"
+ << last_rollcall_key << "'.";
+
+ if (dlc_params.ping_active) {
+ // Write the value of elapsed_days into |kPrefsPingLastActive| only if
+ // the previous ping was an active one.
+ auto last_active_key =
+ prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive});
+ if (!prefs->SetString(last_active_key, parser_data.daystart_elapsed_days))
+ LOG(ERROR) << "Failed to set the value of ping metadata '"
+ << last_active_key << "'.";
+ }
+ }
+}
+
void OmahaRequestAction::PerformAction() {
http_fetcher_->set_delegate(this);
InitPingDays();
@@ -829,14 +453,16 @@
return;
}
- string request_post(GetRequestXml(event_.get(),
- params_,
- ping_only_,
- ShouldPing(), // include_ping
- ping_active_days_,
- ping_roll_call_days_,
- GetInstallDate(system_state_),
- system_state_));
+ OmahaRequestBuilderXml omaha_request(event_.get(),
+ params_,
+ ping_only_,
+ ShouldPing(), // include_ping
+ ping_active_days_,
+ ping_roll_call_days_,
+ GetInstallDate(system_state_),
+ system_state_->prefs(),
+ session_id_);
+ string request_post = omaha_request.GetRequest();
// Set X-Goog-Update headers.
http_fetcher_->SetHeader(kXGoogleUpdateInteractivity,
@@ -909,6 +535,7 @@
// False otherwise, in which case it sets any error code using |completer|.
bool ParsePackage(OmahaParserData::App* app,
OmahaResponse* output_object,
+ bool can_exclude,
ScopedActionCompleter* completer) {
if (app->updatecheck_status.empty() ||
app->updatecheck_status == kValNoUpdate) {
@@ -955,6 +582,7 @@
LOG(INFO) << "Found package " << package.name;
OmahaResponse::Package out_package;
+ out_package.can_exclude = can_exclude;
for (const string& codebase : app->url_codebase) {
if (codebase.empty()) {
LOG(ERROR) << "Omaha Response URL has empty codebase";
@@ -1000,9 +628,46 @@
return true;
}
+// Removes the candidate URLs which are excluded within packages, if all the
+// candidate URLs are excluded within a package, the package will be excluded.
+void ProcessExclusions(OmahaResponse* output_object,
+ ExcluderInterface* excluder) {
+ for (auto package_it = output_object->packages.begin();
+ package_it != output_object->packages.end();
+ /* Increment logic in loop */) {
+ // If package cannot be excluded, quickly continue.
+ if (!package_it->can_exclude) {
+ ++package_it;
+ continue;
+ }
+ // Remove the excluded payload URLs.
+ for (auto payload_url_it = package_it->payload_urls.begin();
+ payload_url_it != package_it->payload_urls.end();
+ /* Increment logic in loop */) {
+ auto exclusion_name = utils::GetExclusionName(*payload_url_it);
+ // If payload URL is not excluded, quickly continue.
+ if (!excluder->IsExcluded(exclusion_name)) {
+ ++payload_url_it;
+ continue;
+ }
+ LOG(INFO) << "Excluding payload URL=" << *payload_url_it
+ << " for payload hash=" << package_it->hash;
+ payload_url_it = package_it->payload_urls.erase(payload_url_it);
+ }
+ // If there are no candidate payload URLs, remove the package.
+ if (package_it->payload_urls.empty()) {
+ LOG(INFO) << "Excluding payload hash=" << package_it->hash;
+ package_it = output_object->packages.erase(package_it);
+ continue;
+ }
+ ++package_it;
+ }
+}
+
// Parses the 2 key version strings kernel_version and firmware_version. If the
// field is not present, or cannot be parsed the values default to 0xffff.
-void ParseRollbackVersions(OmahaParserData* parser_data,
+void ParseRollbackVersions(int allowed_milestones,
+ OmahaParserData* parser_data,
OmahaResponse* output_object) {
utils::ParseRollbackKeyVersion(
parser_data->updatecheck_attrs[kAttrFirmwareVersion],
@@ -1012,6 +677,37 @@
parser_data->updatecheck_attrs[kAttrKernelVersion],
&output_object->rollback_key_version.kernel_key,
&output_object->rollback_key_version.kernel);
+
+ // Create the attribute name strings for milestone N - allowed_milestones.
+ const string firmware_max_rollforward_attr =
+ base::StringPrintf("%s_%i", kAttrFirmwareVersion, allowed_milestones);
+ const string kernel_max_rollforward_attr =
+ base::StringPrintf("%s_%i", kAttrKernelVersion, allowed_milestones);
+
+ const bool max_firmware_and_kernel_exist =
+ parser_data->updatecheck_attrs.count(firmware_max_rollforward_attr) > 0 &&
+ parser_data->updatecheck_attrs.count(kernel_max_rollforward_attr) > 0;
+
+ string firmware_version;
+ string kernel_version;
+ if (max_firmware_and_kernel_exist) {
+ firmware_version =
+ parser_data->updatecheck_attrs[firmware_max_rollforward_attr];
+ kernel_version =
+ parser_data->updatecheck_attrs[kernel_max_rollforward_attr];
+ }
+
+ LOG(INFO) << "For milestone N-" << allowed_milestones
+ << " firmware_key_version=" << firmware_version
+ << " kernel_key_version=" << kernel_version;
+
+ OmahaResponse::RollbackKeyVersion version;
+ utils::ParseRollbackKeyVersion(
+ firmware_version, &version.firmware_key, &version.firmware);
+ utils::ParseRollbackKeyVersion(
+ kernel_version, &version.kernel_key, &version.kernel);
+
+ output_object->past_rollback_key_version = std::move(version);
}
} // namespace
@@ -1064,18 +760,18 @@
// We persist the cohorts sent by omaha even if the status is "noupdate".
for (const auto& app : parser_data->apps) {
if (app.id == params_->GetAppId()) {
- if (app.cohort_set)
- PersistCohortData(kPrefsOmahaCohort, app.cohort);
- if (app.cohorthint_set)
- PersistCohortData(kPrefsOmahaCohortHint, app.cohorthint);
- if (app.cohortname_set)
- PersistCohortData(kPrefsOmahaCohortName, app.cohortname);
+ if (app.cohort)
+ PersistCohortData(kPrefsOmahaCohort, app.cohort.value());
+ if (app.cohorthint)
+ PersistCohortData(kPrefsOmahaCohortHint, app.cohorthint.value());
+ if (app.cohortname)
+ PersistCohortData(kPrefsOmahaCohortName, app.cohortname.value());
break;
}
}
- // Parse the updatecheck attributes.
- PersistEolStatus(parser_data->updatecheck_attrs);
+ PersistEolInfo(parser_data->updatecheck_attrs);
+
// Rollback-related updatecheck attributes.
// Defaults to false if attribute is not present.
output_object->is_rollback =
@@ -1083,7 +779,8 @@
// Parses the rollback versions of the current image. If the fields do not
// exist they default to 0xffff for the 4 key versions.
- ParseRollbackVersions(parser_data, output_object);
+ ParseRollbackVersions(
+ params_->rollback_allowed_milestones(), parser_data, output_object);
if (!ParseStatus(parser_data, output_object, completer))
return false;
@@ -1093,9 +790,15 @@
// Package has to be parsed after Params now because ParseParams need to make
// sure that postinstall action exists.
- for (auto& app : parser_data->apps)
- if (!ParsePackage(&app, output_object, completer))
+ for (auto& app : parser_data->apps) {
+ // Only allow exclusions for a non-critical package during an update. For
+ // non-critical package installations, let the errors propagate instead
+ // of being handled inside update_engine as installations are a dlcservice
+ // specific feature.
+ bool can_exclude = !params_->is_install() && params_->IsDlcAppId(app.id);
+ if (!ParsePackage(&app, output_object, can_exclude, completer))
return false;
+ }
return true;
}
@@ -1107,6 +810,14 @@
for (const auto& app : parser_data->apps) {
const string& status = app.updatecheck_status;
if (status == kValNoUpdate) {
+ // If the app is a DLC, allow status "noupdate" to support DLC
+ // deprecations.
+ if (params_->IsDlcAppId(app.id)) {
+ LOG(INFO) << "No update for <app> " << app.id
+ << " but update continuing since a DLC.";
+ params_->SetDlcNoUpdate(app.id);
+ continue;
+ }
// Don't update if any app has status="noupdate".
LOG(INFO) << "No update for <app> " << app.id;
output_object->update_exists = false;
@@ -1226,12 +937,25 @@
return;
}
+ ErrorCode aux_error_code = fetcher->GetAuxiliaryErrorCode();
+ if (aux_error_code != ErrorCode::kSuccess) {
+ metrics::DownloadErrorCode download_error_code =
+ metrics_utils::GetDownloadErrorCode(aux_error_code);
+ system_state_->metrics_reporter()->ReportUpdateCheckMetrics(
+ system_state_,
+ metrics::CheckResult::kUnset,
+ metrics::CheckReaction::kUnset,
+ download_error_code);
+ }
+
if (!successful) {
- LOG(ERROR) << "Omaha request network transfer failed.";
int code = GetHTTPResponseCode();
+ LOG(ERROR) << "Omaha request network transfer failed with HTTPResponseCode="
+ << code;
// Makes sure we send sane error values.
if (code < 0 || code >= 1000) {
code = 999;
+ LOG(WARNING) << "Converting to sane HTTPResponseCode=" << code;
}
completer.set_code(static_cast<ErrorCode>(
static_cast<int>(ErrorCode::kOmahaRequestHTTPResponseBase) + code));
@@ -1285,6 +1009,9 @@
}
}
+ // Create/update the metadata files for each DLC app received.
+ StorePingReply(parser_data);
+
if (!HasOutputPipe()) {
// Just set success to whether or not the http transfer succeeded,
// which must be true at this point in the code.
@@ -1295,6 +1022,8 @@
OmahaResponse output_object;
if (!ParseResponse(&parser_data, &output_object, &completer))
return;
+ ProcessExclusions(&output_object,
+ system_state_->update_attempter()->GetExcluder());
output_object.update_exists = true;
SetOutputObject(output_object);
@@ -1680,13 +1409,17 @@
return true;
}
-bool OmahaRequestAction::PersistEolStatus(const map<string, string>& attrs) {
- auto eol_attr = attrs.find(kAttrEol);
- if (eol_attr != attrs.end()) {
- return system_state_->prefs()->SetString(kPrefsOmahaEolStatus,
- eol_attr->second);
- } else if (system_state_->prefs()->Exists(kPrefsOmahaEolStatus)) {
- return system_state_->prefs()->Delete(kPrefsOmahaEolStatus);
+bool OmahaRequestAction::PersistEolInfo(const map<string, string>& attrs) {
+ // If EOL date attribute is not sent, don't delete the old persisted EOL
+ // date information.
+ auto eol_date_attr = attrs.find(kAttrEolDate);
+ if (eol_date_attr != attrs.end()) {
+ const auto& eol_date = eol_date_attr->second;
+ if (!system_state_->prefs()->SetString(kPrefsOmahaEolDate, eol_date)) {
+ LOG(ERROR) << "Setting EOL date failed.";
+ return false;
+ }
+ LOG(INFO) << "Set EOL date to " << eol_date;
}
return true;
}
@@ -1783,6 +1516,14 @@
return true;
}
+ // Currently non-critical updates always update alongside the platform update
+ // (a critical update) so this case should never actually be hit if the
+ // request to Omaha for updates are correct. In other words, stop the update
+ // from happening as there are no packages in the response to process.
+ if (response.packages.empty()) {
+ LOG(ERROR) << "All packages were excluded.";
+ }
+
// Note: We could technically delete the UpdateFirstSeenAt state when we
// return true. If we do, it'll mean a device has to restart the
// UpdateFirstSeenAt and thus help scattering take effect when the AU is
diff --git a/omaha_request_action.h b/omaha_request_action.h
index 8db5fb9..30b3d22 100644
--- a/omaha_request_action.h
+++ b/omaha_request_action.h
@@ -33,6 +33,7 @@
#include "update_engine/common/action.h"
#include "update_engine/common/http_fetcher.h"
+#include "update_engine/omaha_request_builder_xml.h"
#include "update_engine/omaha_response.h"
#include "update_engine/system_state.h"
@@ -45,56 +46,6 @@
namespace chromeos_update_engine {
-// Encodes XML entities in a given string. Input must be ASCII-7 valid. If
-// the input is invalid, the default value is used instead.
-std::string XmlEncodeWithDefault(const std::string& input,
- const std::string& default_value);
-
-// Escapes text so it can be included as character data and attribute
-// values. The |input| string must be valid ASCII-7, no UTF-8 supported.
-// Returns whether the |input| was valid and escaped properly in |output|.
-bool XmlEncode(const std::string& input, std::string* output);
-
-// This struct encapsulates the Omaha event information. For a
-// complete list of defined event types and results, see
-// http://code.google.com/p/omaha/wiki/ServerProtocol#event
-struct OmahaEvent {
- // The Type values correspond to EVENT_TYPE values of Omaha.
- enum Type {
- kTypeUnknown = 0,
- kTypeDownloadComplete = 1,
- kTypeInstallComplete = 2,
- kTypeUpdateComplete = 3,
- kTypeUpdateDownloadStarted = 13,
- kTypeUpdateDownloadFinished = 14,
- // Chromium OS reserved type sent after the first reboot following an update
- // completed.
- kTypeRebootedAfterUpdate = 54,
- };
-
- // The Result values correspond to EVENT_RESULT values of Omaha.
- enum Result {
- kResultError = 0,
- kResultSuccess = 1,
- kResultUpdateDeferred = 9, // When we ignore/defer updates due to policy.
- };
-
- OmahaEvent()
- : type(kTypeUnknown),
- result(kResultError),
- error_code(ErrorCode::kError) {}
- explicit OmahaEvent(Type in_type)
- : type(in_type),
- result(kResultSuccess),
- error_code(ErrorCode::kSuccess) {}
- OmahaEvent(Type in_type, Result in_result, ErrorCode in_error_code)
- : type(in_type), result(in_result), error_code(in_error_code) {}
-
- Type type;
- Result result;
- ErrorCode error_code;
-};
-
class NoneType;
class OmahaRequestAction;
class OmahaRequestParams;
@@ -116,14 +67,13 @@
class OmahaRequestAction : public Action<OmahaRequestAction>,
public HttpFetcherDelegate {
public:
- static const int kNeverPinged = -1;
static const int kPingTimeJump = -2;
- // We choose this value of 10 as a heuristic for a work day in trying
+ // We choose this value of 3 as a heuristic for a work day in trying
// each URL, assuming we check roughly every 45 mins. This is a good time to
- // wait - neither too long nor too little - so we don't give up the preferred
- // URLs that appear earlier in list too quickly before moving on to the
- // fallback ones.
- static const int kDefaultMaxFailureCountPerUrl = 10;
+ // wait so we don't give up the preferred URLs, but allow using the URL that
+ // appears earlier in list for every payload before resorting to the fallback
+ // URLs in the candiate URL list.
+ static const int kDefaultMaxFailureCountPerUrl = 3;
// If staging is enabled, set the maximum wait time to 28 days, since that is
// the predetermined wait time for staging.
@@ -154,7 +104,8 @@
OmahaRequestAction(SystemState* system_state,
OmahaEvent* event,
std::unique_ptr<HttpFetcher> http_fetcher,
- bool ping_only);
+ bool ping_only,
+ const std::string& session_id);
~OmahaRequestAction() override;
typedef ActionTraits<OmahaRequestAction>::InputObjectType InputObjectType;
typedef ActionTraits<OmahaRequestAction>::OutputObjectType OutputObjectType;
@@ -188,6 +139,9 @@
GetInstallDateWhenOOBECompletedWithValidDate);
FRIEND_TEST(OmahaRequestActionTest,
GetInstallDateWhenOOBECompletedDateChanges);
+ friend class UpdateAttempterTest;
+ FRIEND_TEST(UpdateAttempterTest, SessionIdTestEnforceEmptyStrPingOmaha);
+ FRIEND_TEST(UpdateAttempterTest, SessionIdTestConsistencyInUpdateFlow);
// Enumeration used in PersistInstallDate().
enum InstallDateProvisioningSource {
@@ -229,9 +183,9 @@
bool PersistCohortData(const std::string& prefs_key,
const std::string& new_value);
- // Parse and persist the end-of-life status flag sent back in the updatecheck
- // tag attributes. The flag will be validated and stored in the Prefs.
- bool PersistEolStatus(const std::map<std::string, std::string>& attrs);
+ // Parses and persists the end-of-life date flag sent back in the updatecheck
+ // tag attributes. The flags will be validated and stored in the Prefs.
+ bool PersistEolInfo(const std::map<std::string, std::string>& attrs);
// If this is an update check request, initializes
// |ping_active_days_| and |ping_roll_call_days_| to values that may
@@ -246,6 +200,10 @@
// send to Omaha and thus we should include them in the response.
bool ShouldPing() const;
+ // Process Omaha's response to a ping request and store the results in the DLC
+ // metadata directory.
+ void StorePingReply(const OmahaParserData& parser_data) const;
+
// Returns true if the download of a new update should be deferred.
// False if the update can be downloaded.
bool ShouldDeferDownload(OmahaResponse* output_object);
@@ -352,6 +310,8 @@
int ping_active_days_;
int ping_roll_call_days_;
+ std::string session_id_;
+
DISALLOW_COPY_AND_ASSIGN(OmahaRequestAction);
};
diff --git a/omaha_request_action_fuzzer.cc b/omaha_request_action_fuzzer.cc
index 6c2f7ca..6c41b12 100644
--- a/omaha_request_action_fuzzer.cc
+++ b/omaha_request_action_fuzzer.cc
@@ -38,7 +38,8 @@
nullptr,
std::make_unique<chromeos_update_engine::MockHttpFetcher>(
data, size, nullptr),
- false);
+ false,
+ "" /* session_id */);
auto collector_action =
std::make_unique<chromeos_update_engine::ObjectCollectorAction<
chromeos_update_engine::OmahaResponse>>();
diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc
index 1786bcc..6a0c213 100644
--- a/omaha_request_action_unittest.cc
+++ b/omaha_request_action_unittest.cc
@@ -18,6 +18,7 @@
#include <stdint.h>
+#include <limits>
#include <memory>
#include <string>
#include <utility>
@@ -34,6 +35,7 @@
#include <brillo/message_loops/fake_message_loop.h>
#include <brillo/message_loops/message_loop.h>
#include <brillo/message_loops/message_loop_utils.h>
+#include <expat.h>
#include <gtest/gtest.h>
#include <policy/libpolicy.h>
#include <policy/mock_libpolicy.h>
@@ -42,6 +44,7 @@
#include "update_engine/common/constants.h"
#include "update_engine/common/fake_prefs.h"
#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/mock_excluder.h"
#include "update_engine/common/mock_http_fetcher.h"
#include "update_engine/common/platform_constants.h"
#include "update_engine/common/prefs.h"
@@ -50,12 +53,15 @@
#include "update_engine/metrics_reporter_interface.h"
#include "update_engine/mock_connection_manager.h"
#include "update_engine/mock_payload_state.h"
+#include "update_engine/omaha_request_builder_xml.h"
#include "update_engine/omaha_request_params.h"
+#include "update_engine/omaha_utils.h"
#include "update_engine/update_manager/rollback_prefs.h"
using base::Time;
using base::TimeDelta;
using chromeos_update_manager::kRollforwardInfinity;
+using std::pair;
using std::string;
using std::vector;
using testing::_;
@@ -70,6 +76,7 @@
using testing::ReturnRef;
using testing::SaveArg;
using testing::SetArgPointee;
+using testing::StrictMock;
namespace {
@@ -81,12 +88,21 @@
const char kTestAppId[] = "test-app-id";
const char kTestAppId2[] = "test-app2-id";
const char kTestAppIdSkipUpdatecheck[] = "test-app-id-skip-updatecheck";
+const char kDlcId1[] = "dlc-id-1";
+const char kDlcId2[] = "dlc-id-2";
// This is a helper struct to allow unit tests build an update response with the
// values they care about.
struct FakeUpdateResponse {
string GetRollbackVersionAttributes() const {
- return (rollback ? " _rollback=\"true\"" : "") +
+ string num_milestones;
+ num_milestones = base::NumberToString(rollback_allowed_milestones);
+ const string rollback_version =
+ " _firmware_version_" + num_milestones + "=\"" +
+ past_rollback_key_version.first + "\"" + " _kernel_version_" +
+ num_milestones + "=\"" + past_rollback_key_version.second + "\"";
+
+ return (rollback ? " _rollback=\"true\"" : "") + rollback_version +
(!rollback_firmware_version.empty()
? " _firmware_version=\"" + rollback_firmware_version + "\""
: "") +
@@ -119,6 +135,8 @@
}
string GetUpdateResponse() const {
+ chromeos_update_engine::OmahaRequestParams request_params{nullptr};
+ request_params.set_app_id(app_id);
return "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
"protocol=\"3.0\">"
"<daystart elapsed_seconds=\"100\"" +
@@ -140,7 +158,7 @@
version +
"\">"
"<packages><package hash=\"not-used\" name=\"" +
- filename + "\" size=\"" + base::Int64ToString(size) +
+ filename + "\" size=\"" + base::NumberToString(size) +
"\" hash_sha256=\"" + hash + "\"/>" +
(multi_package ? "<package name=\"package2\" size=\"222\" "
"hash_sha256=\"hash2\"/>"
@@ -184,6 +202,22 @@
(multi_app_skip_updatecheck
? "<app appid=\"" + app_id_skip_updatecheck + "\"></app>"
: "") +
+ (dlc_app_update
+ ? "<app appid=\"" + request_params.GetDlcAppId(kDlcId1) +
+ "\" status=\"ok\">"
+ "<updatecheck status=\"ok\"><urls><url codebase=\"" +
+ codebase + "\"/><url codebase=\"" + codebase2 +
+ "\"/></urls><manifest version=\"" + version +
+ "\"><packages><package name=\"package3\" size=\"333\" "
+ "hash_sha256=\"hash3\"/></packages><actions>"
+ "<action event=\"install\" run=\".signed\"/>"
+ "<action event=\"postinstall\" MetadataSize=\"33\"/>"
+ "</actions></manifest></updatecheck></app>"
+ : "") +
+ (dlc_app_no_update
+ ? "<app appid=\"" + request_params.GetDlcAppId(kDlcId2) +
+ "\"><updatecheck status=\"noupdate\"/></app>"
+ : "") +
"</response>";
}
@@ -232,6 +266,10 @@
bool multi_app_skip_updatecheck = false;
// Whether to include more than one package in an app.
bool multi_package = false;
+ // Whether to include a DLC app with updatecheck tag.
+ bool dlc_app_update = false;
+ // Whether to include a DLC app with no updatecheck tag.
+ bool dlc_app_no_update = false;
// Whether the payload is a rollback.
bool rollback = false;
@@ -239,6 +277,14 @@
string rollback_firmware_version = "";
// The verified boot kernel key version for the rollback image.
string rollback_kernel_version = "";
+ // The number of milestones back that the verified boot key version has been
+ // supplied.
+ uint32_t rollback_allowed_milestones = 0;
+ // The verified boot key version for the
+ // |current - rollback_allowed_milestones| most recent release.
+ // The pair contains <firmware_key_version, kernel_key_version> each
+ // of which is in the form "key_version.version".
+ pair<string, string> past_rollback_key_version;
};
} // namespace
@@ -294,6 +340,20 @@
std::unique_ptr<OmahaResponse> omaha_response_;
};
+struct TestUpdateCheckParams {
+ string http_response;
+ int fail_http_response_code;
+ bool ping_only;
+ bool is_consumer_device;
+ int rollback_allowed_milestones;
+ bool is_policy_loaded;
+ ErrorCode expected_code;
+ metrics::CheckResult expected_check_result;
+ metrics::CheckReaction expected_check_reaction;
+ metrics::DownloadErrorCode expected_download_error_code;
+ string session_id;
+};
+
class OmahaRequestActionTest : public ::testing::Test {
protected:
void SetUp() override {
@@ -314,60 +374,46 @@
request_params_.set_rollback_allowed(false);
request_params_.set_is_powerwash_allowed(false);
request_params_.set_is_install(false);
- request_params_.set_dlc_module_ids({});
+ request_params_.set_dlc_apps_params({});
fake_system_state_.set_request_params(&request_params_);
fake_system_state_.set_prefs(&fake_prefs_);
+
+ // Setting the default update check params. Lookup |TestUpdateCheck()|.
+ tuc_params_ = {
+ .http_response = "",
+ .fail_http_response_code = -1,
+ .ping_only = false,
+ .is_consumer_device = true,
+ .rollback_allowed_milestones = 0,
+ .is_policy_loaded = false,
+ .expected_code = ErrorCode::kSuccess,
+ .expected_check_result = metrics::CheckResult::kUpdateAvailable,
+ .expected_check_reaction = metrics::CheckReaction::kUpdating,
+ .expected_download_error_code = metrics::DownloadErrorCode::kUnset,
+ };
+
+ ON_CALL(*fake_system_state_.mock_update_attempter(), GetExcluder())
+ .WillByDefault(Return(&mock_excluder_));
}
- // Returns true iff an output response was obtained from the
- // OmahaRequestAction. |prefs| may be null, in which case a local MockPrefs
- // is used. |payload_state| may be null, in which case a local mock is used.
- // |p2p_manager| may be null, in which case a local mock is used.
- // |connection_manager| may be null, in which case a local mock is used.
- // out_response may be null. If |fail_http_response_code| is non-negative,
- // the transfer will fail with that code. |ping_only| is passed through to the
- // OmahaRequestAction constructor. out_post_data may be null; if non-null, the
- // post-data received by the mock HttpFetcher is returned.
+ // This function uses the parameters in |tuc_params_| to do an update check.
+ // It will fill out |post_str| with the result data and |response| with
+ // |OmahaResponse|. Returns true iff an output response was obtained from the
+ // |OmahaRequestAction|. If |fail_http_response_code| is non-negative, the
+ // transfer will fail with that code. |ping_only| is passed through to the
+ // |OmahaRequestAction| constructor.
//
// The |expected_check_result|, |expected_check_reaction| and
- // |expected_error_code| parameters are for checking expectations
- // about reporting UpdateEngine.Check.{Result,Reaction,DownloadError}
- // UMA statistics. Use the appropriate ::kUnset value to specify that
- // the given metric should not be reported.
- bool TestUpdateCheck(const string& http_response,
- int fail_http_response_code,
- bool ping_only,
- bool is_consumer_device,
- int rollback_allowed_milestones,
- bool is_policy_loaded,
- ErrorCode expected_code,
- metrics::CheckResult expected_check_result,
- metrics::CheckReaction expected_check_reaction,
- metrics::DownloadErrorCode expected_download_error_code,
- OmahaResponse* out_response,
- brillo::Blob* out_post_data);
+ // |expected_error_code| parameters are for checking expectations about
+ // reporting UpdateEngine.Check.{Result,Reaction,DownloadError} UMA
+ // statistics. Use the appropriate ::kUnset value to specify that the given
+ // metric should not be reported.
+ bool TestUpdateCheck();
- // Overload of TestUpdateCheck that does not supply |is_consumer_device| or
- // |rollback_allowed_milestones| which are only required for rollback tests.
- bool TestUpdateCheck(const string& http_response,
- int fail_http_response_code,
- bool ping_only,
- ErrorCode expected_code,
- metrics::CheckResult expected_check_result,
- metrics::CheckReaction expected_check_reaction,
- metrics::DownloadErrorCode expected_download_error_code,
- OmahaResponse* out_response,
- brillo::Blob* out_post_data);
-
- void TestRollbackCheck(bool is_consumer_device,
- int rollback_allowed_milestones,
- bool is_policy_loaded,
- OmahaResponse* out_response);
-
- void TestEvent(OmahaEvent* event,
- const string& http_response,
- brillo::Blob* out_post_data);
+ // Tests events using |event| and |https_response|. It will fill up |post_str|
+ // with the result data.
+ void TestEvent(OmahaEvent* event, const string& http_response);
// Runs and checks a ping test. |ping_only| indicates whether it should send
// only a ping or also an updatecheck.
@@ -389,6 +435,7 @@
bool expected_allow_p2p_for_sharing,
const string& expected_p2p_url);
+ StrictMock<MockExcluder> mock_excluder_;
FakeSystemState fake_system_state_;
FakeUpdateResponse fake_update_response_;
// Used by all tests.
@@ -399,54 +446,86 @@
OmahaRequestActionTestProcessorDelegate delegate_;
bool test_http_fetcher_headers_{false};
+
+ TestUpdateCheckParams tuc_params_;
+
+ // TODO(ahassani): Add trailing _ to these two variables.
+ OmahaResponse response;
+ string post_str;
};
-bool OmahaRequestActionTest::TestUpdateCheck(
- const string& http_response,
- int fail_http_response_code,
- bool ping_only,
- bool is_consumer_device,
- int rollback_allowed_milestones,
- bool is_policy_loaded,
- ErrorCode expected_code,
- metrics::CheckResult expected_check_result,
- metrics::CheckReaction expected_check_reaction,
- metrics::DownloadErrorCode expected_download_error_code,
- OmahaResponse* out_response,
- brillo::Blob* out_post_data) {
+class OmahaRequestActionDlcPingTest : public OmahaRequestActionTest {
+ protected:
+ void SetUp() override {
+ OmahaRequestActionTest::SetUp();
+ dlc_id_ = "dlc0";
+ active_key_ = PrefsInterface::CreateSubKey(
+ {kDlcPrefsSubDir, dlc_id_, kPrefsPingActive});
+ last_active_key_ = PrefsInterface::CreateSubKey(
+ {kDlcPrefsSubDir, dlc_id_, kPrefsPingLastActive});
+ last_rollcall_key_ = PrefsInterface::CreateSubKey(
+ {kDlcPrefsSubDir, dlc_id_, kPrefsPingLastRollcall});
+
+ tuc_params_.http_response =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+ "protocol=\"3.0\"><daystart elapsed_days=\"4763\" "
+ "elapsed_seconds=\"36540\"/><app appid=\"test-app-id\" status=\"ok\">\""
+ "<updatecheck status=\"noupdate\"/></app><app "
+ "appid=\"test-app-id_dlc0\" "
+ "status=\"ok\"><ping status=\"ok\"/><updatecheck status=\"noupdate\"/>"
+ "</app></response>";
+ tuc_params_.expected_check_result =
+ metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+ }
+
+ std::string dlc_id_;
+ std::string active_key_;
+ std::string last_active_key_;
+ std::string last_rollcall_key_;
+};
+bool OmahaRequestActionTest::TestUpdateCheck() {
brillo::FakeMessageLoop loop(nullptr);
loop.SetAsCurrent();
- auto fetcher = std::make_unique<MockHttpFetcher>(
- http_response.data(), http_response.size(), nullptr);
- if (fail_http_response_code >= 0) {
- fetcher->FailTransfer(fail_http_response_code);
+ auto fetcher =
+ std::make_unique<MockHttpFetcher>(tuc_params_.http_response.data(),
+ tuc_params_.http_response.size(),
+ nullptr);
+ if (tuc_params_.fail_http_response_code >= 0) {
+ fetcher->FailTransfer(tuc_params_.fail_http_response_code);
}
// This ensures the tests didn't forget to update fake_system_state_ if they
// are not using the default request_params_.
EXPECT_EQ(&request_params_, fake_system_state_.request_params());
- auto omaha_request_action = std::make_unique<OmahaRequestAction>(
- &fake_system_state_, nullptr, std::move(fetcher), ping_only);
+ auto omaha_request_action =
+ std::make_unique<OmahaRequestAction>(&fake_system_state_,
+ nullptr,
+ std::move(fetcher),
+ tuc_params_.ping_only,
+ tuc_params_.session_id);
auto mock_policy_provider =
std::make_unique<NiceMock<policy::MockPolicyProvider>>();
EXPECT_CALL(*mock_policy_provider, IsConsumerDevice())
- .WillRepeatedly(Return(is_consumer_device));
+ .WillRepeatedly(Return(tuc_params_.is_consumer_device));
EXPECT_CALL(*mock_policy_provider, device_policy_is_loaded())
- .WillRepeatedly(Return(is_policy_loaded));
+ .WillRepeatedly(Return(tuc_params_.is_policy_loaded));
const policy::MockDevicePolicy device_policy;
- const bool get_allowed_milestone_succeeds = rollback_allowed_milestones >= 0;
+ const bool get_allowed_milestone_succeeds =
+ tuc_params_.rollback_allowed_milestones >= 0;
EXPECT_CALL(device_policy, GetRollbackAllowedMilestones(_))
- .WillRepeatedly(DoAll(SetArgPointee<0>(rollback_allowed_milestones),
- Return(get_allowed_milestone_succeeds)));
+ .WillRepeatedly(
+ DoAll(SetArgPointee<0>(tuc_params_.rollback_allowed_milestones),
+ Return(get_allowed_milestone_succeeds)));
EXPECT_CALL(*mock_policy_provider, GetDevicePolicy())
.WillRepeatedly(ReturnRef(device_policy));
omaha_request_action->policy_provider_ = std::move(mock_policy_provider);
- delegate_.expected_code_ = expected_code;
+ delegate_.expected_code_ = tuc_params_.expected_code;
delegate_.interactive_ = request_params_.interactive();
delegate_.test_http_fetcher_headers_ = test_http_fetcher_headers_;
ActionProcessor processor;
@@ -462,75 +541,30 @@
ReportUpdateCheckMetrics(_, _, _, _))
.Times(AnyNumber());
- EXPECT_CALL(*fake_system_state_.mock_metrics_reporter(),
- ReportUpdateCheckMetrics(_,
- expected_check_result,
- expected_check_reaction,
- expected_download_error_code))
- .Times(ping_only ? 0 : 1);
+ EXPECT_CALL(
+ *fake_system_state_.mock_metrics_reporter(),
+ ReportUpdateCheckMetrics(_,
+ tuc_params_.expected_check_result,
+ tuc_params_.expected_check_reaction,
+ tuc_params_.expected_download_error_code))
+ .Times(tuc_params_.ping_only ? 0 : 1);
loop.PostTask(base::Bind(
[](ActionProcessor* processor) { processor->StartProcessing(); },
base::Unretained(&processor)));
loop.Run();
EXPECT_FALSE(loop.PendingTasks());
- if (delegate_.omaha_response_ && out_response)
- *out_response = *delegate_.omaha_response_;
- if (out_post_data)
- *out_post_data = delegate_.post_data_;
+ if (delegate_.omaha_response_)
+ response = *delegate_.omaha_response_;
+ post_str = string(delegate_.post_data_.begin(), delegate_.post_data_.end());
return delegate_.omaha_response_ != nullptr;
}
-bool OmahaRequestActionTest::TestUpdateCheck(
- const string& http_response,
- int fail_http_response_code,
- bool ping_only,
- ErrorCode expected_code,
- metrics::CheckResult expected_check_result,
- metrics::CheckReaction expected_check_reaction,
- metrics::DownloadErrorCode expected_download_error_code,
- OmahaResponse* out_response,
- brillo::Blob* out_post_data) {
- return TestUpdateCheck(http_response,
- fail_http_response_code,
- ping_only,
- true, // is_consumer_device
- 0, // rollback_allowed_milestones
- false, // is_policy_loaded
- expected_code,
- expected_check_result,
- expected_check_reaction,
- expected_download_error_code,
- out_response,
- out_post_data);
-}
-
-void OmahaRequestActionTest::TestRollbackCheck(bool is_consumer_device,
- int rollback_allowed_milestones,
- bool is_policy_loaded,
- OmahaResponse* out_response) {
- fake_update_response_.deadline = "20101020";
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- is_consumer_device,
- rollback_allowed_milestones,
- is_policy_loaded,
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- out_response,
- nullptr));
- ASSERT_TRUE(out_response->update_exists);
-}
-
// Tests Event requests -- they should always succeed. |out_post_data| may be
// null; if non-null, the post-data received by the mock HttpFetcher is
// returned.
void OmahaRequestActionTest::TestEvent(OmahaEvent* event,
- const string& http_response,
- brillo::Blob* out_post_data) {
+ const string& http_response) {
brillo::FakeMessageLoop loop(nullptr);
loop.SetAsCurrent();
@@ -539,7 +573,8 @@
event,
std::make_unique<MockHttpFetcher>(
http_response.data(), http_response.size(), nullptr),
- false);
+ false,
+ "");
ActionProcessor processor;
processor.set_delegate(&delegate_);
processor.EnqueueAction(std::move(action));
@@ -550,100 +585,69 @@
loop.Run();
EXPECT_FALSE(loop.PendingTasks());
- if (out_post_data)
- *out_post_data = delegate_.post_data_;
+ post_str = string(delegate_.post_data_.begin(), delegate_.post_data_.end());
}
TEST_F(OmahaRequestActionTest, RejectEntities) {
- OmahaResponse response;
fake_update_response_.include_entity = true;
- ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kOmahaRequestXMLHasEntityDecl,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+ tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLHasEntityDecl;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_FALSE(TestUpdateCheck());
EXPECT_FALSE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, NoUpdateTest) {
- OmahaResponse response;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_TRUE(TestUpdateCheck());
EXPECT_FALSE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, MultiAppNoUpdateTest) {
- OmahaResponse response;
fake_update_response_.multi_app_no_update = true;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_TRUE(TestUpdateCheck());
EXPECT_FALSE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, MultiAppNoPartialUpdateTest) {
- OmahaResponse response;
fake_update_response_.multi_app_no_update = true;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_TRUE(TestUpdateCheck());
EXPECT_FALSE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, NoSelfUpdateTest) {
- OmahaResponse response;
- ASSERT_TRUE(TestUpdateCheck(
+ tuc_params_.http_response =
"<response><app><updatecheck status=\"ok\"><manifest><actions><action "
"event=\"postinstall\" noupdate=\"true\"/></actions>"
- "</manifest></updatecheck></app></response>",
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ "</manifest></updatecheck></app></response>";
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_TRUE(TestUpdateCheck());
EXPECT_FALSE(response.update_exists);
}
// Test that all the values in the response are parsed in a normal update
// response.
TEST_F(OmahaRequestActionTest, ValidUpdateTest) {
- OmahaResponse response;
fake_update_response_.deadline = "20101020";
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
EXPECT_EQ(fake_update_response_.version, response.version);
EXPECT_EQ("", response.system_version);
@@ -664,17 +668,11 @@
}
TEST_F(OmahaRequestActionTest, MultiPackageUpdateTest) {
- OmahaResponse response;
fake_update_response_.multi_package = true;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
EXPECT_EQ(fake_update_response_.version, response.version);
EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
@@ -693,17 +691,11 @@
}
TEST_F(OmahaRequestActionTest, MultiAppUpdateTest) {
- OmahaResponse response;
fake_update_response_.multi_app = true;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
EXPECT_EQ(fake_update_response_.version, response.version);
EXPECT_EQ(fake_update_response_.GetPayloadUrl(),
@@ -722,20 +714,13 @@
}
TEST_F(OmahaRequestActionTest, MultiAppAndSystemUpdateTest) {
- OmahaResponse response;
fake_update_response_.multi_app = true;
- // trigger the lining up of the app and system versions
+ // Trigger the lining up of the app and system versions.
request_params_.set_system_app_id(fake_update_response_.app_id2);
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
EXPECT_EQ(fake_update_response_.version, response.version);
EXPECT_EQ(fake_update_response_.version2, response.system_version);
@@ -755,18 +740,12 @@
}
TEST_F(OmahaRequestActionTest, MultiAppPartialUpdateTest) {
- OmahaResponse response;
fake_update_response_.multi_app = true;
fake_update_response_.multi_app_self_update = true;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
EXPECT_EQ(fake_update_response_.version, response.version);
EXPECT_EQ("", response.system_version);
@@ -783,18 +762,12 @@
}
TEST_F(OmahaRequestActionTest, MultiAppMultiPackageUpdateTest) {
- OmahaResponse response;
fake_update_response_.multi_app = true;
fake_update_response_.multi_package = true;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
EXPECT_EQ(fake_update_response_.version, response.version);
EXPECT_EQ("", response.system_version);
@@ -820,55 +793,42 @@
}
TEST_F(OmahaRequestActionTest, PowerwashTest) {
- OmahaResponse response;
fake_update_response_.powerwash = true;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
EXPECT_TRUE(response.powerwash_required);
}
TEST_F(OmahaRequestActionTest, ExtraHeadersSentInteractiveTest) {
- OmahaResponse response;
request_params_.set_interactive(true);
test_http_fetcher_headers_ = true;
- ASSERT_FALSE(TestUpdateCheck("invalid xml>",
- -1,
- false, // ping_only
- ErrorCode::kOmahaRequestXMLParseError,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = "invalid xml>";
+ tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_FALSE(TestUpdateCheck());
+
EXPECT_FALSE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, ExtraHeadersSentNoInteractiveTest) {
- OmahaResponse response;
request_params_.set_interactive(false);
test_http_fetcher_headers_ = true;
- ASSERT_FALSE(TestUpdateCheck("invalid xml>",
- -1,
- false, // ping_only
- ErrorCode::kOmahaRequestXMLParseError,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = "invalid xml>";
+ tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_FALSE(TestUpdateCheck());
+
EXPECT_FALSE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, ValidUpdateBlockedByConnection) {
- OmahaResponse response;
// Set up a connection manager that doesn't allow a valid update over
// the current ethernet connection.
MockConnectionManager mock_cm;
@@ -881,24 +841,19 @@
EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kEthernet, _))
.WillRepeatedly(Return(false));
- ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kOmahaUpdateIgnoredPerPolicy,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kIgnored,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.expected_code = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kIgnored;
+
+ ASSERT_FALSE(TestUpdateCheck());
+
EXPECT_FALSE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, ValidUpdateOverCellularAllowedByDevicePolicy) {
// This test tests that update over cellular is allowed as device policy
// says yes.
- OmahaResponse response;
MockConnectionManager mock_cm;
-
fake_system_state_.set_connection_manager(&mock_cm);
EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
@@ -910,24 +865,17 @@
EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
.WillRepeatedly(Return(true));
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, ValidUpdateOverCellularBlockedByDevicePolicy) {
// This test tests that update over cellular is blocked as device policy
// says no.
- OmahaResponse response;
MockConnectionManager mock_cm;
-
fake_system_state_.set_connection_manager(&mock_cm);
EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
@@ -939,15 +887,12 @@
EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
.WillRepeatedly(Return(false));
- ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kOmahaUpdateIgnoredPerPolicy,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kIgnored,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.expected_code = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kIgnored;
+
+ ASSERT_FALSE(TestUpdateCheck());
+
EXPECT_FALSE(response.update_exists);
}
@@ -955,9 +900,7 @@
ValidUpdateOverCellularAllowedByUserPermissionTrue) {
// This test tests that, when device policy is not set, update over cellular
// is allowed as permission for update over cellular is set to true.
- OmahaResponse response;
MockConnectionManager mock_cm;
-
fake_prefs_.SetBoolean(kPrefsUpdateOverCellularPermission, true);
fake_system_state_.set_connection_manager(&mock_cm);
@@ -970,15 +913,10 @@
EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
.WillRepeatedly(Return(true));
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
}
@@ -987,7 +925,6 @@
// This test tests that, when device policy is not set and permission for
// update over cellular is set to false or does not exist, update over
// cellular is blocked as update target does not match the omaha response.
- OmahaResponse response;
MockConnectionManager mock_cm;
// A version different from the version in omaha response.
string diff_version = "99.99.99";
@@ -1008,15 +945,12 @@
EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
.WillRepeatedly(Return(true));
- ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kOmahaUpdateIgnoredOverCellular,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kIgnored,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.expected_code = ErrorCode::kOmahaUpdateIgnoredOverCellular;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kIgnored;
+
+ ASSERT_FALSE(TestUpdateCheck());
+
EXPECT_FALSE(response.update_exists);
}
@@ -1025,7 +959,6 @@
// This test tests that, when device policy is not set and permission for
// update over cellular is set to false or does not exist, update over
// cellular is allowed as update target matches the omaha response.
- OmahaResponse response;
MockConnectionManager mock_cm;
// A version same as the version in omaha response.
string new_version = fake_update_response_.version;
@@ -1045,96 +978,67 @@
EXPECT_CALL(mock_cm, IsUpdateAllowedOver(ConnectionType::kCellular, _))
.WillRepeatedly(Return(true));
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, ValidUpdateBlockedByRollback) {
string rollback_version = "1234.0.0";
- OmahaResponse response;
-
MockPayloadState mock_payload_state;
fake_system_state_.set_payload_state(&mock_payload_state);
+ fake_update_response_.version = rollback_version;
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.expected_code = ErrorCode::kOmahaUpdateIgnoredPerPolicy;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kIgnored;
EXPECT_CALL(mock_payload_state, GetRollbackVersion())
.WillRepeatedly(Return(rollback_version));
- fake_update_response_.version = rollback_version;
- ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kOmahaUpdateIgnoredPerPolicy,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kIgnored,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_FALSE(TestUpdateCheck());
+
EXPECT_FALSE(response.update_exists);
}
// Verify that update checks called during OOBE will not try to download an
// update if the response doesn't include the deadline field.
TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBE) {
- OmahaResponse response;
fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.expected_code = ErrorCode::kNonCriticalUpdateInOOBE;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
// TODO(senj): set better default value for metrics::checkresult in
// OmahaRequestAction::ActionCompleted.
- ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kNonCriticalUpdateInOOBE,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_FALSE(TestUpdateCheck());
+
EXPECT_FALSE(response.update_exists);
}
// Verify that the IsOOBEComplete() value is ignored when the OOBE flow is not
// enabled.
TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBEDisabled) {
- OmahaResponse response;
fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
fake_system_state_.fake_hardware()->SetIsOOBEEnabled(false);
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
}
// Verify that update checks called during OOBE will still try to download an
// update if the response includes the deadline field.
TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBEDeadlineSet) {
- OmahaResponse response;
fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
fake_update_response_.deadline = "20101020";
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
}
@@ -1142,21 +1046,18 @@
// update if a rollback happened, even when the response includes the deadline
// field.
TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesBeforeOOBERollback) {
- OmahaResponse response;
fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
fake_update_response_.deadline = "20101020";
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.expected_code = ErrorCode::kNonCriticalUpdateInOOBE;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
EXPECT_CALL(*(fake_system_state_.mock_payload_state()), GetRollbackHappened())
.WillOnce(Return(true));
- ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kNonCriticalUpdateInOOBE,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_FALSE(TestUpdateCheck());
+
EXPECT_FALSE(response.update_exists);
}
@@ -1166,11 +1067,14 @@
// kOmahaUpdateIgnoredOverCellular error in this case might cause undesired UX
// in OOBE (warning the user about an update that will be skipped).
TEST_F(OmahaRequestActionTest, SkipNonCriticalUpdatesInOOBEOverCellular) {
- OmahaResponse response;
fake_system_state_.fake_hardware()->UnsetIsOOBEComplete();
MockConnectionManager mock_cm;
fake_system_state_.set_connection_manager(&mock_cm);
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.expected_code = ErrorCode::kNonCriticalUpdateInOOBE;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
EXPECT_CALL(mock_cm, GetConnectionProperties(_, _))
.WillRepeatedly(DoAll(SetArgPointee<0>(ConnectionType::kCellular),
@@ -1179,111 +1083,77 @@
EXPECT_CALL(mock_cm, IsAllowedConnectionTypesForUpdateSet())
.WillRepeatedly(Return(false));
- ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kNonCriticalUpdateInOOBE,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_FALSE(TestUpdateCheck());
+
EXPECT_FALSE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, WallClockBasedWaitAloneCausesScattering) {
- OmahaResponse response;
request_params_.set_wall_clock_based_wait_enabled(true);
request_params_.set_update_check_count_wait_enabled(false);
request_params_.set_waiting_period(TimeDelta::FromDays(2));
-
fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring;
- ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kOmahaUpdateDeferredPerPolicy,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kDeferring,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_FALSE(TestUpdateCheck());
+
EXPECT_FALSE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest,
+ WallClockBasedWaitAloneCausesScatteringInteractive) {
+ request_params_.set_wall_clock_based_wait_enabled(true);
+ request_params_.set_update_check_count_wait_enabled(false);
+ request_params_.set_waiting_period(TimeDelta::FromDays(2));
+ request_params_.set_interactive(true);
+ fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
// Verify if we are interactive check we don't defer.
- request_params_.set_interactive(true);
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, NoWallClockBasedWaitCausesNoScattering) {
- OmahaResponse response;
request_params_.set_wall_clock_based_wait_enabled(false);
request_params_.set_waiting_period(TimeDelta::FromDays(2));
request_params_.set_update_check_count_wait_enabled(true);
request_params_.set_min_update_checks_needed(1);
request_params_.set_max_update_checks_allowed(8);
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, ZeroMaxDaysToScatterCausesNoScattering) {
- OmahaResponse response;
request_params_.set_wall_clock_based_wait_enabled(true);
request_params_.set_waiting_period(TimeDelta::FromDays(2));
request_params_.set_update_check_count_wait_enabled(true);
request_params_.set_min_update_checks_needed(1);
request_params_.set_max_update_checks_allowed(8);
-
fake_update_response_.max_days_to_scatter = "0";
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, ZeroUpdateCheckCountCausesNoScattering) {
- OmahaResponse response;
request_params_.set_wall_clock_based_wait_enabled(true);
request_params_.set_waiting_period(TimeDelta());
request_params_.set_update_check_count_wait_enabled(true);
request_params_.set_min_update_checks_needed(0);
request_params_.set_max_update_checks_allowed(0);
-
fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_TRUE(TestUpdateCheck());
int64_t count;
ASSERT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateCheckCount, &count));
@@ -1292,141 +1162,118 @@
}
TEST_F(OmahaRequestActionTest, NonZeroUpdateCheckCountCausesScattering) {
- OmahaResponse response;
request_params_.set_wall_clock_based_wait_enabled(true);
request_params_.set_waiting_period(TimeDelta());
request_params_.set_update_check_count_wait_enabled(true);
request_params_.set_min_update_checks_needed(1);
request_params_.set_max_update_checks_allowed(8);
-
fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring;
- ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kOmahaUpdateDeferredPerPolicy,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kDeferring,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_FALSE(TestUpdateCheck());
int64_t count;
ASSERT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateCheckCount, &count));
ASSERT_GT(count, 0);
EXPECT_FALSE(response.update_exists);
-
- // Verify if we are interactive check we don't defer.
- request_params_.set_interactive(true);
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
- EXPECT_TRUE(response.update_exists);
}
-TEST_F(OmahaRequestActionTest, ExistingUpdateCheckCountCausesScattering) {
- OmahaResponse response;
+TEST_F(OmahaRequestActionTest,
+ NonZeroUpdateCheckCountCausesScatteringInteractive) {
request_params_.set_wall_clock_based_wait_enabled(true);
request_params_.set_waiting_period(TimeDelta());
request_params_.set_update_check_count_wait_enabled(true);
request_params_.set_min_update_checks_needed(1);
request_params_.set_max_update_checks_allowed(8);
-
+ request_params_.set_interactive(true);
fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ // Verify if we are interactive check we don't defer.
+ ASSERT_TRUE(TestUpdateCheck());
+
+ EXPECT_TRUE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, ExistingUpdateCheckCountCausesScattering) {
+ request_params_.set_wall_clock_based_wait_enabled(true);
+ request_params_.set_waiting_period(TimeDelta());
+ request_params_.set_update_check_count_wait_enabled(true);
+ request_params_.set_min_update_checks_needed(1);
+ request_params_.set_max_update_checks_allowed(8);
+ fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring;
ASSERT_TRUE(fake_prefs_.SetInt64(kPrefsUpdateCheckCount, 5));
-
- ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kOmahaUpdateDeferredPerPolicy,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kDeferring,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_FALSE(TestUpdateCheck());
int64_t count;
ASSERT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateCheckCount, &count));
- // count remains the same, as the decrementing happens in update_attempter
+ // |count| remains the same, as the decrementing happens in update_attempter
// which this test doesn't exercise.
ASSERT_EQ(count, 5);
EXPECT_FALSE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest,
+ ExistingUpdateCheckCountCausesScatteringInteractive) {
+ request_params_.set_wall_clock_based_wait_enabled(true);
+ request_params_.set_waiting_period(TimeDelta());
+ request_params_.set_update_check_count_wait_enabled(true);
+ request_params_.set_min_update_checks_needed(1);
+ request_params_.set_max_update_checks_allowed(8);
+ request_params_.set_interactive(true);
+ fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ ASSERT_TRUE(fake_prefs_.SetInt64(kPrefsUpdateCheckCount, 5));
// Verify if we are interactive check we don't defer.
- request_params_.set_interactive(true);
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_TRUE(TestUpdateCheck());
EXPECT_TRUE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, StagingTurnedOnCausesScattering) {
// If staging is on, the value for max days to scatter should be ignored, and
// staging's scatter value should be used.
- OmahaResponse response;
request_params_.set_wall_clock_based_wait_enabled(true);
request_params_.set_waiting_period(TimeDelta::FromDays(6));
request_params_.set_update_check_count_wait_enabled(false);
-
fake_system_state_.fake_clock()->SetWallclockTime(Time::Now());
ASSERT_TRUE(fake_prefs_.SetInt64(kPrefsWallClockStagingWaitPeriod, 6));
// This should not prevent scattering due to staging.
fake_update_response_.max_days_to_scatter = "0";
- ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kOmahaUpdateDeferredPerPolicy,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kDeferring,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring;
+
+ ASSERT_FALSE(TestUpdateCheck());
+
EXPECT_FALSE(response.update_exists);
// Interactive updates should not be affected.
request_params_.set_interactive(true);
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.expected_code = ErrorCode::kSuccess;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUpdating;
+
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, CohortsArePersisted) {
- OmahaResponse response;
fake_update_response_.include_cohorts = true;
fake_update_response_.cohort = "s/154454/8479665";
fake_update_response_.cohorthint = "please-put-me-on-beta";
fake_update_response_.cohortname = "stable";
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_TRUE(TestUpdateCheck());
string value;
EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohort, &value));
@@ -1440,7 +1287,6 @@
}
TEST_F(OmahaRequestActionTest, CohortsAreUpdated) {
- OmahaResponse response;
EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohort, "old_value"));
EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohortHint, "old_hint"));
EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohortName, "old_name"));
@@ -1448,16 +1294,9 @@
fake_update_response_.cohort = "s/154454/8479665";
fake_update_response_.cohorthint = "please-put-me-on-beta";
fake_update_response_.cohortname = "";
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_TRUE(TestUpdateCheck());
string value;
EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohort, &value));
@@ -1470,18 +1309,10 @@
}
TEST_F(OmahaRequestActionTest, CohortsAreNotModifiedWhenMissing) {
- OmahaResponse response;
- EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohort, "old_value"));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ EXPECT_TRUE(fake_prefs_.SetString(kPrefsOmahaCohort, "old_value"));
+ ASSERT_TRUE(TestUpdateCheck());
string value;
EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohort, &value));
@@ -1492,21 +1323,15 @@
}
TEST_F(OmahaRequestActionTest, CohortsArePersistedWhenNoUpdate) {
- OmahaResponse response;
fake_update_response_.include_cohorts = true;
fake_update_response_.cohort = "s/154454/8479665";
fake_update_response_.cohorthint = "please-put-me-on-beta";
fake_update_response_.cohortname = "stable";
+ tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_TRUE(TestUpdateCheck());
string value;
EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohort, &value));
@@ -1520,22 +1345,14 @@
}
TEST_F(OmahaRequestActionTest, MultiAppCohortTest) {
- OmahaResponse response;
fake_update_response_.multi_app = true;
fake_update_response_.include_cohorts = true;
fake_update_response_.cohort = "s/154454/8479665";
fake_update_response_.cohorthint = "please-put-me-on-beta";
fake_update_response_.cohortname = "stable";
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_TRUE(TestUpdateCheck());
string value;
EXPECT_TRUE(fake_prefs_.GetString(kPrefsOmahaCohort, &value));
@@ -1550,7 +1367,6 @@
TEST_F(OmahaRequestActionTest, NoOutputPipeTest) {
const string http_response(fake_update_response_.GetNoUpdateResponse());
-
brillo::FakeMessageLoop loop(nullptr);
loop.SetAsCurrent();
@@ -1559,7 +1375,8 @@
nullptr,
std::make_unique<MockHttpFetcher>(
http_response.data(), http_response.size(), nullptr),
- false);
+ false,
+ "");
ActionProcessor processor;
processor.set_delegate(&delegate_);
processor.EnqueueAction(std::move(action));
@@ -1573,92 +1390,71 @@
}
TEST_F(OmahaRequestActionTest, InvalidXmlTest) {
- OmahaResponse response;
- ASSERT_FALSE(TestUpdateCheck("invalid xml>",
- -1,
- false, // ping_only
- ErrorCode::kOmahaRequestXMLParseError,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = "invalid xml>";
+ tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_FALSE(TestUpdateCheck());
EXPECT_FALSE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, EmptyResponseTest) {
- OmahaResponse response;
- ASSERT_FALSE(TestUpdateCheck("",
- -1,
- false, // ping_only
- ErrorCode::kOmahaRequestEmptyResponseError,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.expected_code = ErrorCode::kOmahaRequestEmptyResponseError;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_FALSE(TestUpdateCheck());
EXPECT_FALSE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, MissingStatusTest) {
- OmahaResponse response;
- ASSERT_FALSE(TestUpdateCheck(
+ tuc_params_.http_response =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
"<daystart elapsed_seconds=\"100\"/>"
"<app appid=\"foo\" status=\"ok\">"
"<ping status=\"ok\"/>"
- "<updatecheck/></app></response>",
- -1,
- false, // ping_only
- ErrorCode::kOmahaResponseInvalid,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ "<updatecheck/></app></response>";
+ tuc_params_.expected_code = ErrorCode::kOmahaResponseInvalid;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_FALSE(TestUpdateCheck());
EXPECT_FALSE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, InvalidStatusTest) {
- OmahaResponse response;
- ASSERT_FALSE(TestUpdateCheck(
+ tuc_params_.http_response =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
"<daystart elapsed_seconds=\"100\"/>"
"<app appid=\"foo\" status=\"ok\">"
"<ping status=\"ok\"/>"
- "<updatecheck status=\"InvalidStatusTest\"/></app></response>",
- -1,
- false, // ping_only
- ErrorCode::kOmahaResponseInvalid,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ "<updatecheck status=\"InvalidStatusTest\"/></app></response>";
+ tuc_params_.expected_code = ErrorCode::kOmahaResponseInvalid;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_FALSE(TestUpdateCheck());
EXPECT_FALSE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, MissingNodesetTest) {
- OmahaResponse response;
- ASSERT_FALSE(TestUpdateCheck(
+ tuc_params_.http_response =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
"<daystart elapsed_seconds=\"100\"/>"
"<app appid=\"foo\" status=\"ok\">"
"<ping status=\"ok\"/>"
- "</app></response>",
- -1,
- false, // ping_only
- ErrorCode::kOmahaResponseInvalid,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ "</app></response>";
+ tuc_params_.expected_code = ErrorCode::kOmahaResponseInvalid;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_FALSE(TestUpdateCheck());
EXPECT_FALSE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, MissingFieldTest) {
- string input_response =
+ tuc_params_.http_response =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?><response protocol=\"3.0\">"
"<daystart elapsed_seconds=\"100\"/>"
// the appid needs to match that in the request params
@@ -1675,18 +1471,9 @@
"IsDeltaPayload=\"false\" "
"sha256=\"not-used\" "
"/></actions></manifest></updatecheck></app></response>";
- LOG(INFO) << "Input Response = " << input_response;
- OmahaResponse response;
- ASSERT_TRUE(TestUpdateCheck(input_response,
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
EXPECT_EQ("10.2.3.4", response.version);
EXPECT_EQ("http://missing/field/test/f",
@@ -1723,7 +1510,8 @@
nullptr,
std::make_unique<MockHttpFetcher>(
http_response.data(), http_response.size(), nullptr),
- false);
+ false,
+ "");
TerminateEarlyTestProcessorDelegate delegate;
ActionProcessor processor;
processor.set_delegate(&delegate);
@@ -1734,31 +1522,8 @@
EXPECT_FALSE(loop.PendingTasks());
}
-TEST_F(OmahaRequestActionTest, XmlEncodeTest) {
- string output;
- EXPECT_TRUE(XmlEncode("ab", &output));
- EXPECT_EQ("ab", output);
- EXPECT_TRUE(XmlEncode("a<b", &output));
- EXPECT_EQ("a<b", output);
- EXPECT_TRUE(XmlEncode("<&>\"\'\\", &output));
- EXPECT_EQ("<&>"'\\", output);
- EXPECT_TRUE(XmlEncode("<&>", &output));
- EXPECT_EQ("&lt;&amp;&gt;", output);
- // Check that unterminated UTF-8 strings are handled properly.
- EXPECT_FALSE(XmlEncode("\xc2", &output));
- // Fail with invalid ASCII-7 chars.
- EXPECT_FALSE(XmlEncode("This is an 'n' with a tilde: \xc3\xb1", &output));
-}
-
-TEST_F(OmahaRequestActionTest, XmlEncodeWithDefaultTest) {
- EXPECT_EQ("<&>", XmlEncodeWithDefault("<&>", "something else"));
- EXPECT_EQ("<not escaped>", XmlEncodeWithDefault("\xc2", "<not escaped>"));
-}
-
TEST_F(OmahaRequestActionTest, XmlEncodeIsUsedForParams) {
- brillo::Blob post_data;
-
- // Make sure XML Encode is being called on the params
+ // Make sure XML Encode is being called on the params.
request_params_.set_os_sp("testtheservice_pack>");
request_params_.set_os_board("x86 generic<id");
request_params_.set_current_channel("unittest_track<");
@@ -1769,18 +1534,13 @@
fake_prefs_.SetString(
kPrefsOmahaCohortName,
base::JoinString(vector<string>(100, "My spoon is too big."), " "));
- OmahaResponse response;
- ASSERT_FALSE(TestUpdateCheck("invalid xml>",
- -1,
- false, // ping_only
- ErrorCode::kOmahaRequestXMLParseError,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- &post_data));
- // convert post_data to string
- string post_str(post_data.begin(), post_data.end());
+ tuc_params_.http_response = "invalid xml>";
+ tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_FALSE(TestUpdateCheck());
+
EXPECT_NE(string::npos, post_str.find("testtheservice_pack>"));
EXPECT_EQ(string::npos, post_str.find("testtheservice_pack>"));
EXPECT_NE(string::npos, post_str.find("x86 generic<id"));
@@ -1798,19 +1558,12 @@
}
TEST_F(OmahaRequestActionTest, XmlDecodeTest) {
- OmahaResponse response;
fake_update_response_.deadline = "<20110101";
fake_update_response_.more_info_url = "testthe<url";
fake_update_response_.codebase = "testthe&codebase/";
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ ASSERT_TRUE(TestUpdateCheck());
EXPECT_EQ("testthe<url", response.more_info_url);
EXPECT_EQ("testthe&codebase/file.signed",
@@ -1819,43 +1572,29 @@
}
TEST_F(OmahaRequestActionTest, ParseIntTest) {
- OmahaResponse response;
// overflows int32_t:
fake_update_response_.size = 123123123123123ull;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ ASSERT_TRUE(TestUpdateCheck());
EXPECT_EQ(fake_update_response_.size, response.packages[0].size);
}
TEST_F(OmahaRequestActionTest, FormatUpdateCheckOutputTest) {
- brillo::Blob post_data;
NiceMock<MockPrefs> prefs;
fake_system_state_.set_prefs(&prefs);
+ tuc_params_.http_response = "invalid xml>";
+ tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
EXPECT_CALL(prefs, GetString(kPrefsPreviousVersion, _))
.WillOnce(DoAll(SetArgPointee<1>(string("")), Return(true)));
// An existing but empty previous version means that we didn't reboot to a new
// update, therefore, no need to update the previous version.
EXPECT_CALL(prefs, SetString(kPrefsPreviousVersion, _)).Times(0);
- ASSERT_FALSE(TestUpdateCheck("invalid xml>",
- -1,
- false, // ping_only
- ErrorCode::kOmahaRequestXMLParseError,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr, // response
- &post_data));
- // convert post_data to string
- string post_str(post_data.begin(), post_data.end());
+ ASSERT_FALSE(TestUpdateCheck());
+
EXPECT_NE(
post_str.find(" <ping active=\"1\" a=\"-1\" r=\"-1\"></ping>\n"
" <updatecheck></updatecheck>\n"),
@@ -1869,12 +1608,9 @@
}
TEST_F(OmahaRequestActionTest, FormatSuccessEventOutputTest) {
- brillo::Blob post_data;
TestEvent(new OmahaEvent(OmahaEvent::kTypeUpdateDownloadStarted),
- "invalid xml>",
- &post_data);
- // convert post_data to string
- string post_str(post_data.begin(), post_data.end());
+ "invalid xml>");
+
string expected_event = base::StringPrintf(
" <event eventtype=\"%d\" eventresult=\"%d\"></event>\n",
OmahaEvent::kTypeUpdateDownloadStarted,
@@ -1885,14 +1621,11 @@
}
TEST_F(OmahaRequestActionTest, FormatErrorEventOutputTest) {
- brillo::Blob post_data;
TestEvent(new OmahaEvent(OmahaEvent::kTypeDownloadComplete,
OmahaEvent::kResultError,
ErrorCode::kError),
- "invalid xml>",
- &post_data);
- // convert post_data to string
- string post_str(post_data.begin(), post_data.end());
+ "invalid xml>");
+
string expected_event = base::StringPrintf(
" <event eventtype=\"%d\" eventresult=\"%d\" "
"errorcode=\"%d\"></event>\n",
@@ -1910,7 +1643,8 @@
nullptr,
std::make_unique<MockHttpFetcher>(
http_response.data(), http_response.size(), nullptr),
- false);
+ false,
+ "");
EXPECT_FALSE(update_check_action.IsEvent());
OmahaRequestAction event_action(
@@ -1918,29 +1652,23 @@
new OmahaEvent(OmahaEvent::kTypeUpdateComplete),
std::make_unique<MockHttpFetcher>(
http_response.data(), http_response.size(), nullptr),
- false);
+ false,
+ "");
EXPECT_TRUE(event_action.IsEvent());
}
TEST_F(OmahaRequestActionTest, FormatDeltaOkayOutputTest) {
+ tuc_params_.http_response = "invalid xml>";
+ tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
for (int i = 0; i < 2; i++) {
bool delta_okay = i == 1;
const char* delta_okay_str = delta_okay ? "true" : "false";
- brillo::Blob post_data;
-
request_params_.set_delta_okay(delta_okay);
- ASSERT_FALSE(TestUpdateCheck("invalid xml>",
- -1,
- false, // ping_only
- ErrorCode::kOmahaRequestXMLParseError,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- &post_data));
- // convert post_data to string
- string post_str(post_data.begin(), post_data.end());
+ ASSERT_FALSE(TestUpdateCheck());
EXPECT_NE(
post_str.find(base::StringPrintf(" delta_okay=\"%s\"", delta_okay_str)),
string::npos)
@@ -1949,25 +1677,17 @@
}
TEST_F(OmahaRequestActionTest, FormatInteractiveOutputTest) {
+ tuc_params_.http_response = "invalid xml>";
+ tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
for (int i = 0; i < 2; i++) {
bool interactive = i == 1;
const char* interactive_str = interactive ? "ondemandupdate" : "scheduler";
- brillo::Blob post_data;
- FakeSystemState fake_system_state;
-
request_params_.set_interactive(interactive);
- ASSERT_FALSE(TestUpdateCheck("invalid xml>",
- -1,
- false, // ping_only
- ErrorCode::kOmahaRequestXMLParseError,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- &post_data));
- // convert post_data to string
- string post_str(post_data.begin(), post_data.end());
+ ASSERT_FALSE(TestUpdateCheck());
EXPECT_NE(post_str.find(
base::StringPrintf("installsource=\"%s\"", interactive_str)),
string::npos)
@@ -1976,25 +1696,17 @@
}
TEST_F(OmahaRequestActionTest, FormatTargetVersionPrefixOutputTest) {
+ tuc_params_.http_response = "invalid xml>";
+ tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
for (int i = 0; i < 2; i++) {
bool target_version_set = i == 1;
const char* target_version_prefix = target_version_set ? "10032." : "";
- brillo::Blob post_data;
- FakeSystemState fake_system_state;
-
request_params_.set_target_version_prefix(target_version_prefix);
- ASSERT_FALSE(TestUpdateCheck("invalid xml>",
- -1,
- false, // ping_only
- ErrorCode::kOmahaRequestXMLParseError,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- &post_data));
- // convert post_data to string
- string post_str(post_data.begin(), post_data.end());
+ ASSERT_FALSE(TestUpdateCheck());
if (target_version_set) {
EXPECT_NE(post_str.find("<updatecheck targetversionprefix=\"10032.\">"),
string::npos)
@@ -2007,27 +1719,19 @@
}
TEST_F(OmahaRequestActionTest, FormatRollbackAllowedOutputTest) {
+ tuc_params_.http_response = "invalid xml>";
+ tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
for (int i = 0; i < 4; i++) {
bool rollback_allowed = i / 2 == 0;
bool target_version_set = i % 2 == 0;
- brillo::Blob post_data;
- FakeSystemState fake_system_state;
-
request_params_.set_target_version_prefix(target_version_set ? "10032."
: "");
request_params_.set_rollback_allowed(rollback_allowed);
- ASSERT_FALSE(TestUpdateCheck("invalid xml>",
- -1,
- false, // ping_only
- ErrorCode::kOmahaRequestXMLParseError,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- &post_data));
- // convert post_data to string
- string post_str(post_data.begin(), post_data.end());
+ ASSERT_FALSE(TestUpdateCheck());
if (rollback_allowed && target_version_set) {
EXPECT_NE(post_str.find("rollback_allowed=\"true\""), string::npos)
<< "i = " << i;
@@ -2056,6 +1760,47 @@
EXPECT_EQ(ErrorCode::kError, error_event.error_code);
}
+TEST_F(OmahaRequestActionTest, DeviceQuickFixBuildTokenIsSetTest) {
+ // If DeviceQuickFixBuildToken value is set it takes precedence over pref
+ // value.
+ constexpr char autoupdate_token[] = "autoupdate_token>";
+ constexpr char xml_encoded_autoupdate_token[] = "autoupdate_token>";
+ constexpr char omaha_cohort_hint[] = "cohort_hint";
+
+ tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+ request_params_.set_autoupdate_token(autoupdate_token);
+ fake_prefs_.SetString(kPrefsOmahaCohortHint, omaha_cohort_hint);
+
+ ASSERT_TRUE(TestUpdateCheck());
+
+ EXPECT_NE(string::npos,
+ post_str.find("cohorthint=\"" +
+ string(xml_encoded_autoupdate_token) + "\""));
+ EXPECT_EQ(string::npos, post_str.find(autoupdate_token));
+ EXPECT_EQ(string::npos, post_str.find(omaha_cohort_hint));
+}
+
+TEST_F(OmahaRequestActionTest, DeviceQuickFixBuildTokenIsNotSetTest) {
+ // If DeviceQuickFixBuildToken is not set, pref value will be provided in
+ // cohorthint attribute.
+ constexpr char omaha_cohort_hint[] = "evil_string>";
+ constexpr char xml_encoded_cohort_hint[] = "evil_string>";
+
+ tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+ fake_prefs_.SetString(kPrefsOmahaCohortHint, omaha_cohort_hint);
+
+ ASSERT_TRUE(TestUpdateCheck());
+
+ EXPECT_NE(
+ string::npos,
+ post_str.find("cohorthint=\"" + string(xml_encoded_cohort_hint) + "\""));
+ EXPECT_EQ(string::npos, post_str.find(omaha_cohort_hint));
+}
+
void OmahaRequestActionTest::PingTest(bool ping_only) {
NiceMock<MockPrefs> prefs;
fake_system_state_.set_prefs(&prefs);
@@ -2073,17 +1818,14 @@
.WillOnce(DoAll(SetArgPointee<1>(six_days_ago), Return(true)));
EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
.WillOnce(DoAll(SetArgPointee<1>(five_days_ago), Return(true)));
- brillo::Blob post_data;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
- -1,
- ping_only,
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- &post_data));
- string post_str(post_data.begin(), post_data.end());
+
+ tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+ tuc_params_.ping_only = ping_only;
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_NE(post_str.find("<ping active=\"1\" a=\"6\" r=\"5\"></ping>"),
string::npos);
if (ping_only) {
@@ -2118,17 +1860,13 @@
.WillOnce(DoAll(SetArgPointee<1>(three_days_ago), Return(true)));
EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
.WillOnce(DoAll(SetArgPointee<1>(now), Return(true)));
- brillo::Blob post_data;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- &post_data));
- string post_str(post_data.begin(), post_data.end());
+
+ tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_NE(post_str.find("<ping active=\"1\" a=\"3\"></ping>"), string::npos);
}
@@ -2147,17 +1885,13 @@
.WillOnce(DoAll(SetArgPointee<1>(now), Return(true)));
EXPECT_CALL(prefs, GetInt64(kPrefsLastRollCallPingDay, _))
.WillOnce(DoAll(SetArgPointee<1>(four_days_ago), Return(true)));
- brillo::Blob post_data;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- &post_data));
- string post_str(post_data.begin(), post_data.end());
+
+ tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_NE(post_str.find("<ping active=\"1\" r=\"4\"></ping>\n"),
string::npos);
}
@@ -2182,17 +1916,13 @@
.WillOnce(Return(true));
EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _))
.WillOnce(Return(true));
- brillo::Blob post_data;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- &post_data));
- string post_str(post_data.begin(), post_data.end());
+
+ tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_EQ(post_str.find("ping"), string::npos);
}
@@ -2207,17 +1937,14 @@
.WillOnce(DoAll(SetArgPointee<1>(now), Return(true)));
EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0);
EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0);
- brillo::Blob post_data;
- EXPECT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
- -1,
- true, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUnset,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- &post_data));
- EXPECT_EQ(0U, post_data.size());
+
+ tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+ tuc_params_.ping_only = true;
+ tuc_params_.expected_check_result = metrics::CheckResult::kUnset;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ EXPECT_TRUE(TestUpdateCheck());
+ EXPECT_TRUE(post_str.empty());
}
TEST_F(OmahaRequestActionTest, BackInTimePingTest) {
@@ -2238,21 +1965,16 @@
.WillOnce(Return(true));
EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _))
.WillOnce(Return(true));
- brillo::Blob post_data;
- ASSERT_TRUE(
- TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
- "protocol=\"3.0\"><daystart elapsed_seconds=\"100\"/>"
- "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
- "<updatecheck status=\"noupdate\"/></app></response>",
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- &post_data));
- string post_str(post_data.begin(), post_data.end());
+
+ tuc_params_.http_response =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+ "protocol=\"3.0\"><daystart elapsed_seconds=\"100\"/>"
+ "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
+ "<updatecheck status=\"noupdate\"/></app></response>";
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_TRUE(TestUpdateCheck());
EXPECT_EQ(post_str.find("ping"), string::npos);
}
@@ -2277,19 +1999,16 @@
SetInt64(kPrefsLastRollCallPingDay,
AllOf(Ge(midnight), Le(midnight_slack))))
.WillOnce(Return(true));
- ASSERT_TRUE(
- TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
- "protocol=\"3.0\"><daystart elapsed_seconds=\"200\"/>"
- "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
- "<updatecheck status=\"noupdate\"/></app></response>",
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- nullptr));
+
+ tuc_params_.http_response =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+ "protocol=\"3.0\"><daystart elapsed_seconds=\"200\"/>"
+ "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
+ "<updatecheck status=\"noupdate\"/></app></response>";
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_TRUE(TestUpdateCheck());
}
TEST_F(OmahaRequestActionTest, NoElapsedSecondsTest) {
@@ -2299,19 +2018,16 @@
EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0);
EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0);
- ASSERT_TRUE(
- TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
- "protocol=\"3.0\"><daystart blah=\"200\"/>"
- "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
- "<updatecheck status=\"noupdate\"/></app></response>",
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- nullptr));
+
+ tuc_params_.http_response =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+ "protocol=\"3.0\"><daystart blah=\"200\"/>"
+ "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
+ "<updatecheck status=\"noupdate\"/></app></response>";
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_TRUE(TestUpdateCheck());
}
TEST_F(OmahaRequestActionTest, BadElapsedSecondsTest) {
@@ -2321,97 +2037,61 @@
EXPECT_CALL(prefs, SetInt64(_, _)).Times(AnyNumber());
EXPECT_CALL(prefs, SetInt64(kPrefsLastActivePingDay, _)).Times(0);
EXPECT_CALL(prefs, SetInt64(kPrefsLastRollCallPingDay, _)).Times(0);
- ASSERT_TRUE(
- TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
- "protocol=\"3.0\"><daystart elapsed_seconds=\"x\"/>"
- "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
- "<updatecheck status=\"noupdate\"/></app></response>",
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- nullptr));
-}
-TEST_F(OmahaRequestActionTest, ParseUpdateCheckAttributesTest) {
- // Test that the "eol" flags is only parsed from the "_eol" attribute and not
- // the "eol" attribute.
- ASSERT_TRUE(
- TestUpdateCheck("<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
- "protocol=\"3.0\"><app appid=\"foo\" status=\"ok\">"
- "<ping status=\"ok\"/><updatecheck status=\"noupdate\" "
- "_eol=\"security-only\" eol=\"eol\" _foo=\"bar\"/>"
- "</app></response>",
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- nullptr));
- string eol_pref;
- EXPECT_TRUE(
- fake_system_state_.prefs()->GetString(kPrefsOmahaEolStatus, &eol_pref));
- // Note that the eol="eol" attribute should be ignored and the _eol should be
- // used instead.
- EXPECT_EQ("security-only", eol_pref);
+ tuc_params_.http_response =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+ "protocol=\"3.0\"><daystart elapsed_seconds=\"x\"/>"
+ "<app appid=\"foo\" status=\"ok\"><ping status=\"ok\"/>"
+ "<updatecheck status=\"noupdate\"/></app></response>";
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_TRUE(TestUpdateCheck());
}
TEST_F(OmahaRequestActionTest, NoUniqueIDTest) {
- brillo::Blob post_data;
- ASSERT_FALSE(TestUpdateCheck("invalid xml>",
- -1,
- false, // ping_only
- ErrorCode::kOmahaRequestXMLParseError,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr, // response
- &post_data));
- // convert post_data to string
- string post_str(post_data.begin(), post_data.end());
+ tuc_params_.http_response = "invalid xml>";
+ tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_FALSE(TestUpdateCheck());
+
EXPECT_EQ(post_str.find("machineid="), string::npos);
EXPECT_EQ(post_str.find("userid="), string::npos);
}
TEST_F(OmahaRequestActionTest, NetworkFailureTest) {
- OmahaResponse response;
const int http_error_code =
static_cast<int>(ErrorCode::kOmahaRequestHTTPResponseBase) + 501;
- ASSERT_FALSE(TestUpdateCheck("",
- 501,
- false, // ping_only
- static_cast<ErrorCode>(http_error_code),
- metrics::CheckResult::kDownloadError,
- metrics::CheckReaction::kUnset,
- static_cast<metrics::DownloadErrorCode>(501),
- &response,
- nullptr));
+ tuc_params_.fail_http_response_code = 501;
+ tuc_params_.expected_code = static_cast<ErrorCode>(http_error_code);
+ tuc_params_.expected_check_result = metrics::CheckResult::kDownloadError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+ tuc_params_.expected_download_error_code =
+ static_cast<metrics::DownloadErrorCode>(501);
+
+ ASSERT_FALSE(TestUpdateCheck());
+
EXPECT_FALSE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, NetworkFailureBadHTTPCodeTest) {
- OmahaResponse response;
const int http_error_code =
static_cast<int>(ErrorCode::kOmahaRequestHTTPResponseBase) + 999;
- ASSERT_FALSE(TestUpdateCheck("",
- 1500,
- false, // ping_only
- static_cast<ErrorCode>(http_error_code),
- metrics::CheckResult::kDownloadError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kHttpStatusOther,
- &response,
- nullptr));
+
+ tuc_params_.fail_http_response_code = 1500;
+ tuc_params_.expected_code = static_cast<ErrorCode>(http_error_code);
+ tuc_params_.expected_check_result = metrics::CheckResult::kDownloadError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+ tuc_params_.expected_download_error_code =
+ metrics::DownloadErrorCode::kHttpStatusOther;
+
+ ASSERT_FALSE(TestUpdateCheck());
EXPECT_FALSE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, TestUpdateFirstSeenAtGetsPersistedFirstTime) {
- OmahaResponse response;
request_params_.set_wall_clock_based_wait_enabled(true);
request_params_.set_waiting_period(TimeDelta().FromDays(1));
request_params_.set_update_check_count_wait_enabled(false);
@@ -2419,15 +2099,12 @@
Time arbitrary_date;
ASSERT_TRUE(Time::FromString("6/4/1989", &arbitrary_date));
fake_system_state_.fake_clock()->SetWallclockTime(arbitrary_date);
- ASSERT_FALSE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kOmahaUpdateDeferredPerPolicy,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kDeferring,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.expected_code = ErrorCode::kOmahaUpdateDeferredPerPolicy;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kDeferring;
+
+ ASSERT_FALSE(TestUpdateCheck());
int64_t timestamp = 0;
ASSERT_TRUE(fake_prefs_.GetInt64(kPrefsUpdateFirstSeenAt, ×tamp));
@@ -2436,20 +2113,14 @@
// Verify if we are interactive check we don't defer.
request_params_.set_interactive(true);
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.expected_code = ErrorCode::kSuccess;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUpdating;
+
+ ASSERT_TRUE(TestUpdateCheck());
EXPECT_TRUE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, TestUpdateFirstSeenAtGetsUsedIfAlreadyPresent) {
- OmahaResponse response;
request_params_.set_wall_clock_based_wait_enabled(true);
request_params_.set_waiting_period(TimeDelta().FromDays(1));
request_params_.set_update_check_count_wait_enabled(false);
@@ -2460,16 +2131,10 @@
ASSERT_TRUE(
fake_prefs_.SetInt64(kPrefsUpdateFirstSeenAt, t1.ToInternalValue()));
fake_system_state_.fake_clock()->SetWallclockTime(t2);
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ ASSERT_TRUE(TestUpdateCheck());
EXPECT_TRUE(response.update_exists);
// Make sure the timestamp t1 is unchanged showing that it was reused.
@@ -2483,7 +2148,6 @@
base::ScopedTempDir tempdir;
ASSERT_TRUE(tempdir.CreateUniqueTempDir());
- brillo::Blob post_data;
request_params_.set_root(tempdir.GetPath().value());
request_params_.set_app_id("{22222222-2222-2222-2222-222222222222}");
request_params_.set_app_version("1.2.3.4");
@@ -2493,17 +2157,14 @@
request_params_.SetTargetChannel("stable-channel", true, nullptr));
request_params_.UpdateDownloadChannel();
EXPECT_TRUE(request_params_.ShouldPowerwash());
- ASSERT_FALSE(TestUpdateCheck("invalid xml>",
- -1,
- false, // ping_only
- ErrorCode::kOmahaRequestXMLParseError,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr, // response
- &post_data));
- // convert post_data to string
- string post_str(post_data.begin(), post_data.end());
+
+ tuc_params_.http_response = "invalid xml>";
+ tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_FALSE(TestUpdateCheck());
+
EXPECT_NE(
string::npos,
post_str.find("appid=\"{22222222-2222-2222-2222-222222222222}\" "
@@ -2517,7 +2178,6 @@
base::ScopedTempDir tempdir;
ASSERT_TRUE(tempdir.CreateUniqueTempDir());
- brillo::Blob post_data;
request_params_.set_root(tempdir.GetPath().value());
request_params_.set_app_id("{11111111-1111-1111-1111-111111111111}");
request_params_.set_app_version("5.6.7.8");
@@ -2527,17 +2187,14 @@
request_params_.SetTargetChannel("canary-channel", false, nullptr));
request_params_.UpdateDownloadChannel();
EXPECT_FALSE(request_params_.ShouldPowerwash());
- ASSERT_FALSE(TestUpdateCheck("invalid xml>",
- -1,
- false, // ping_only
- ErrorCode::kOmahaRequestXMLParseError,
- metrics::CheckResult::kParsingError,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr, // response
- &post_data));
- // Convert post_data to string.
- string post_str(post_data.begin(), post_data.end());
+
+ tuc_params_.http_response = "invalid xml>";
+ tuc_params_.expected_code = ErrorCode::kOmahaRequestXMLParseError;
+ tuc_params_.expected_check_result = metrics::CheckResult::kParsingError;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_FALSE(TestUpdateCheck());
+
EXPECT_NE(
string::npos,
post_str.find("appid=\"{11111111-1111-1111-1111-111111111111}\" "
@@ -2554,19 +2211,13 @@
// Flag that the device was powerwashed in the past.
fake_system_state_.fake_hardware()->SetPowerwashCount(1);
+ tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
- brillo::Blob post_data;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- &post_data));
+ ASSERT_TRUE(TestUpdateCheck());
+
// We shouldn't send a ping in this case since powerwash > 0.
- string post_str(post_data.begin(), post_data.end());
EXPECT_EQ(string::npos, post_str.find("<ping"));
}
@@ -2581,19 +2232,14 @@
// Flag that the device has sent first active ping in the past.
fake_system_state_.fake_hardware()->SetFirstActiveOmahaPingSent();
- brillo::Blob post_data;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- &post_data));
+ tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_TRUE(TestUpdateCheck());
+
// We shouldn't send a ping in this case since
// first_active_omaha_ping_sent=true
- string post_str(post_data.begin(), post_data.end());
EXPECT_EQ(string::npos, post_str.find("<ping"));
}
@@ -2602,17 +2248,11 @@
// Flag that the device was updated in a previous boot.
fake_prefs_.SetString(kPrefsPreviousVersion, "1.2.3.4");
- brillo::Blob post_data;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- nullptr,
- &post_data));
- string post_str(post_data.begin(), post_data.end());
+ tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_TRUE(TestUpdateCheck());
// An event 54 is included and has the right version.
EXPECT_NE(
@@ -2639,7 +2279,6 @@
bool expected_allow_p2p_for_downloading,
bool expected_allow_p2p_for_sharing,
const string& expected_p2p_url) {
- OmahaResponse response;
bool actual_allow_p2p_for_downloading = initial_allow_p2p_for_downloading;
bool actual_allow_p2p_for_sharing = initial_allow_p2p_for_sharing;
string actual_p2p_url;
@@ -2670,15 +2309,11 @@
fake_update_response_.disable_p2p_for_downloading =
omaha_disable_p2p_for_downloading;
fake_update_response_.disable_p2p_for_sharing = omaha_disable_p2p_for_sharing;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.expected_check_result = metrics::CheckResult::kUpdateAvailable;
+
+ ASSERT_TRUE(TestUpdateCheck());
EXPECT_TRUE(response.update_exists);
EXPECT_EQ(omaha_disable_p2p_for_downloading,
@@ -2772,20 +2407,12 @@
bool OmahaRequestActionTest::InstallDateParseHelper(const string& elapsed_days,
OmahaResponse* response) {
fake_update_response_.elapsed_days = elapsed_days;
- return TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- response,
- nullptr);
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ return TestUpdateCheck();
}
TEST_F(OmahaRequestActionTest, ParseInstallDateFromResponse) {
- OmahaResponse response;
-
// Simulate a successful update check that happens during OOBE. The
// deadline in the response is needed to force the update attempt to
// occur; responses without a deadline seen during OOBE will normally
@@ -2901,11 +2528,13 @@
ReportKeyVersionMetrics(min_kernel_version, min_kernel_version, true))
.Times(1);
- OmahaResponse response;
- TestRollbackCheck(false /* is_consumer_device */,
- 3 /* rollback_allowed_milestones */,
- false /* is_policy_loaded */,
- &response);
+ fake_update_response_.deadline = "20101020";
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.is_consumer_device = false;
+ tuc_params_.rollback_allowed_milestones = 3;
+
+ EXPECT_TRUE(TestUpdateCheck());
+ EXPECT_TRUE(response.update_exists);
// Verify kernel_max_rollforward was set to the current minimum
// kernel key version. This has the effect of freezing roll
@@ -2935,11 +2564,13 @@
ReportKeyVersionMetrics(min_kernel_version, kRollforwardInfinity, true))
.Times(1);
- OmahaResponse response;
- TestRollbackCheck(true /* is_consumer_device */,
- 3 /* rollback_allowed_milestones */,
- false /* is_policy_loaded */,
- &response);
+ fake_update_response_.deadline = "20101020";
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.is_consumer_device = true;
+ tuc_params_.rollback_allowed_milestones = 3;
+
+ EXPECT_TRUE(TestUpdateCheck());
+ EXPECT_TRUE(response.update_exists);
// Verify that with rollback disabled that kernel_max_rollforward
// was set to logical infinity. This is the expected behavior for
@@ -2968,11 +2599,14 @@
ReportKeyVersionMetrics(min_kernel_version, min_kernel_version, true))
.Times(1);
- OmahaResponse response;
- TestRollbackCheck(false /* is_consumer_device */,
- allowed_milestones,
- true /* is_policy_loaded */,
- &response);
+ fake_update_response_.deadline = "20101020";
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.is_consumer_device = false;
+ tuc_params_.rollback_allowed_milestones = allowed_milestones;
+ tuc_params_.is_policy_loaded = true;
+
+ EXPECT_TRUE(TestUpdateCheck());
+ EXPECT_TRUE(response.update_exists);
// Verify that with rollback enabled that kernel_max_rollforward
// was set to the current minimum kernel key version. This has
@@ -3002,11 +2636,14 @@
ReportKeyVersionMetrics(min_kernel_version, kRollforwardInfinity, true))
.Times(1);
- OmahaResponse response;
- TestRollbackCheck(false /* is_consumer_device */,
- allowed_milestones,
- true /* is_policy_loaded */,
- &response);
+ fake_update_response_.deadline = "20101020";
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.is_consumer_device = false;
+ tuc_params_.rollback_allowed_milestones = allowed_milestones;
+ tuc_params_.is_policy_loaded = true;
+
+ EXPECT_TRUE(TestUpdateCheck());
+ EXPECT_TRUE(response.update_exists);
// Verify that with rollback disabled that kernel_max_rollforward
// was set to logical infinity.
@@ -3015,24 +2652,30 @@
}
TEST_F(OmahaRequestActionTest, RollbackResponseParsedNoEntries) {
- OmahaResponse response;
fake_update_response_.rollback = true;
- TestRollbackCheck(false /* is_consumer_device */,
- 4 /* rollback_allowed_milestones */,
- true /* is_policy_loaded */,
- &response);
+ fake_update_response_.deadline = "20101020";
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.is_consumer_device = false;
+ tuc_params_.rollback_allowed_milestones = 4;
+ tuc_params_.is_policy_loaded = true;
+
+ EXPECT_TRUE(TestUpdateCheck());
+ EXPECT_TRUE(response.update_exists);
EXPECT_TRUE(response.is_rollback);
}
TEST_F(OmahaRequestActionTest, RollbackResponseValidVersionsParsed) {
- OmahaResponse response;
fake_update_response_.rollback_firmware_version = "1.2";
fake_update_response_.rollback_kernel_version = "3.4";
fake_update_response_.rollback = true;
- TestRollbackCheck(false /* is_consumer_device */,
- 4 /* rollback_allowed_milestones */,
- true /* is_policy_loaded */,
- &response);
+ fake_update_response_.deadline = "20101020";
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.is_consumer_device = false;
+ tuc_params_.rollback_allowed_milestones = 4;
+ tuc_params_.is_policy_loaded = true;
+
+ EXPECT_TRUE(TestUpdateCheck());
+ EXPECT_TRUE(response.update_exists);
EXPECT_TRUE(response.is_rollback);
EXPECT_EQ(1, response.rollback_key_version.firmware_key);
EXPECT_EQ(2, response.rollback_key_version.firmware);
@@ -3046,17 +2689,10 @@
Time now = Time::Now();
fake_clock.SetWallclockTime(now);
fake_system_state_.set_clock(&fake_clock);
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
- OmahaResponse response;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
EXPECT_TRUE(fake_prefs_.Exists(kPrefsUpdateFirstSeenAt));
@@ -3073,43 +2709,27 @@
fake_clock.SetWallclockTime(now);
fake_system_state_.set_clock(&fake_clock);
- OmahaResponse response;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetNoUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kNoUpdateAvailable,
- metrics::CheckReaction::kUnset,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetNoUpdateResponse();
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_FALSE(response.update_exists);
EXPECT_FALSE(fake_prefs_.Exists(kPrefsUpdateFirstSeenAt));
}
TEST_F(OmahaRequestActionTest, InstallTest) {
- OmahaResponse response;
request_params_.set_is_install(true);
- request_params_.set_dlc_module_ids({"dlc_no_0", "dlc_no_1"});
- brillo::Blob post_data;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- true, // is_consumer_device
- 0, // rollback_allowed_milestones
- false, // is_policy_loaded
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- &post_data));
- // Convert post_data to string.
- string post_str(post_data.begin(), post_data.end());
- for (const auto& dlc_module_id : request_params_.dlc_module_ids()) {
- EXPECT_NE(string::npos,
- post_str.find("appid=\"" + fake_update_response_.app_id + "_" +
- dlc_module_id + "\""));
+ request_params_.set_dlc_apps_params(
+ {{request_params_.GetDlcAppId("dlc_no_0"), {.name = "dlc_no_0"}},
+ {request_params_.GetDlcAppId("dlc_no_1"), {.name = "dlc_no_1"}}});
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ ASSERT_TRUE(TestUpdateCheck());
+
+ for (const auto& it : request_params_.dlc_apps_params()) {
+ EXPECT_NE(string::npos, post_str.find("appid=\"" + it.first + "\""));
}
EXPECT_NE(string::npos,
post_str.find("appid=\"" + fake_update_response_.app_id + "\""));
@@ -3121,27 +2741,332 @@
updatecheck_count++;
pos++;
}
- EXPECT_EQ(request_params_.dlc_module_ids().size(), updatecheck_count);
+ EXPECT_EQ(request_params_.dlc_apps_params().size(), updatecheck_count);
+ EXPECT_TRUE(response.update_exists);
}
TEST_F(OmahaRequestActionTest, InstallMissingPlatformVersionTest) {
fake_update_response_.multi_app_skip_updatecheck = true;
fake_update_response_.multi_app_no_update = false;
request_params_.set_is_install(true);
- request_params_.set_dlc_module_ids({"dlc_no_0", "dlc_no_1"});
+ request_params_.set_dlc_apps_params(
+ {{request_params_.GetDlcAppId("dlc_no_0"), {.name = "dlc_no_0"}},
+ {request_params_.GetDlcAppId("dlc_no_1"), {.name = "dlc_no_1"}}});
request_params_.set_app_id(fake_update_response_.app_id_skip_updatecheck);
- OmahaResponse response;
- ASSERT_TRUE(TestUpdateCheck(fake_update_response_.GetUpdateResponse(),
- -1,
- false, // ping_only
- ErrorCode::kSuccess,
- metrics::CheckResult::kUpdateAvailable,
- metrics::CheckReaction::kUpdating,
- metrics::DownloadErrorCode::kUnset,
- &response,
- nullptr));
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ ASSERT_TRUE(TestUpdateCheck());
+
EXPECT_TRUE(response.update_exists);
EXPECT_EQ(fake_update_response_.current_version, response.version);
}
+TEST_F(OmahaRequestActionTest, UpdateWithDlcTest) {
+ request_params_.set_dlc_apps_params(
+ {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}});
+ fake_update_response_.dlc_app_update = true;
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false));
+ ASSERT_TRUE(TestUpdateCheck());
+
+ EXPECT_EQ(response.packages.size(), 2u);
+ // Two candidate URLs.
+ EXPECT_EQ(response.packages[1].payload_urls.size(), 2u);
+ EXPECT_TRUE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, UpdateWithPartiallyExcludedDlcTest) {
+ request_params_.set_dlc_apps_params(
+ {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}});
+ fake_update_response_.dlc_app_update = true;
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ // The first DLC candidate URL is excluded.
+ EXPECT_CALL(mock_excluder_, IsExcluded(_))
+ .WillOnce(Return(true))
+ .WillOnce(Return(false));
+ ASSERT_TRUE(TestUpdateCheck());
+
+ EXPECT_EQ(response.packages.size(), 2u);
+ // One candidate URL.
+ EXPECT_EQ(response.packages[1].payload_urls.size(), 1u);
+ EXPECT_TRUE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, UpdateWithExcludedDlcTest) {
+ request_params_.set_dlc_apps_params(
+ {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}});
+ fake_update_response_.dlc_app_update = true;
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ // Both DLC candidate URLs are excluded.
+ EXPECT_CALL(mock_excluder_, IsExcluded(_))
+ .WillOnce(Return(true))
+ .WillOnce(Return(true));
+ ASSERT_TRUE(TestUpdateCheck());
+
+ EXPECT_EQ(response.packages.size(), 1u);
+ EXPECT_TRUE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, UpdateWithDeprecatedDlcTest) {
+ request_params_.set_dlc_apps_params(
+ {{request_params_.GetDlcAppId(kDlcId2), {.name = kDlcId2}}});
+ fake_update_response_.dlc_app_no_update = true;
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false));
+ ASSERT_TRUE(TestUpdateCheck());
+
+ EXPECT_TRUE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, UpdateWithDlcAndDeprecatedDlcTest) {
+ request_params_.set_dlc_apps_params(
+ {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}},
+ {request_params_.GetDlcAppId(kDlcId2), {.name = kDlcId2}}});
+ fake_update_response_.dlc_app_update = true;
+ fake_update_response_.dlc_app_no_update = true;
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false));
+ ASSERT_TRUE(TestUpdateCheck());
+
+ EXPECT_TRUE(response.update_exists);
+}
+
+TEST_F(OmahaRequestActionTest, PastRollbackVersionsNoEntries) {
+ fake_update_response_.rollback = true;
+ fake_update_response_.rollback_allowed_milestones = 4;
+ request_params_.set_rollback_allowed_milestones(4);
+ fake_update_response_.deadline = "20101020";
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.is_consumer_device = false;
+ tuc_params_.rollback_allowed_milestones = 4;
+ tuc_params_.is_policy_loaded = true;
+
+ EXPECT_TRUE(TestUpdateCheck());
+ EXPECT_TRUE(response.update_exists);
+ EXPECT_TRUE(response.is_rollback);
+ EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
+ response.past_rollback_key_version.firmware_key);
+ EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
+ response.past_rollback_key_version.firmware);
+ EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
+ response.past_rollback_key_version.kernel_key);
+ EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
+ response.past_rollback_key_version.kernel);
+}
+
+TEST_F(OmahaRequestActionTest, PastRollbackVersionsValidEntries) {
+ request_params_.set_rollback_allowed_milestones(4);
+ fake_update_response_.rollback = true;
+ fake_update_response_.rollback_allowed_milestones = 4;
+ fake_update_response_.rollback_firmware_version = "4.3";
+ fake_update_response_.rollback_kernel_version = "2.1";
+ fake_update_response_.past_rollback_key_version =
+ std::make_pair("16.15", "14.13");
+ fake_update_response_.deadline = "20101020";
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.is_consumer_device = false;
+ tuc_params_.rollback_allowed_milestones = 4;
+ tuc_params_.is_policy_loaded = true;
+
+ EXPECT_TRUE(TestUpdateCheck());
+ EXPECT_TRUE(response.update_exists);
+ EXPECT_TRUE(response.is_rollback);
+ EXPECT_EQ(16, response.past_rollback_key_version.firmware_key);
+ EXPECT_EQ(15, response.past_rollback_key_version.firmware);
+ EXPECT_EQ(14, response.past_rollback_key_version.kernel_key);
+ EXPECT_EQ(13, response.past_rollback_key_version.kernel);
+}
+
+TEST_F(OmahaRequestActionTest, MismatchNumberOfVersions) {
+ fake_update_response_.rollback = true;
+ fake_update_response_.rollback_allowed_milestones = 2;
+ fake_update_response_.deadline = "20101020";
+ request_params_.set_rollback_allowed_milestones(4);
+
+ // Since |request_params_.rollback_allowed_milestones| is 4 but the response
+ // is constructed with |fake_update_response_.rollback_allowed_milestones| set
+ // to 2, OmahaRequestAction will look for the key values of N-4 version but
+ // only the N-2 version will exist.
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ tuc_params_.is_consumer_device = false;
+ tuc_params_.rollback_allowed_milestones = 2;
+ tuc_params_.is_policy_loaded = true;
+
+ EXPECT_TRUE(TestUpdateCheck());
+ EXPECT_TRUE(response.update_exists);
+ EXPECT_TRUE(response.is_rollback);
+ EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
+ response.past_rollback_key_version.firmware_key);
+ EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
+ response.past_rollback_key_version.firmware);
+ EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
+ response.past_rollback_key_version.kernel_key);
+ EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
+ response.past_rollback_key_version.kernel);
+}
+
+TEST_F(OmahaRequestActionTest, IncludeRequisitionTest) {
+ request_params_.set_device_requisition("remora");
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ ASSERT_TRUE(TestUpdateCheck());
+ EXPECT_NE(string::npos, post_str.find("requisition=\"remora\""));
+}
+
+TEST_F(OmahaRequestActionTest, NoIncludeRequisitionTest) {
+ request_params_.set_device_requisition("");
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+ ASSERT_TRUE(TestUpdateCheck());
+ EXPECT_EQ(string::npos, post_str.find("requisition"));
+}
+
+TEST_F(OmahaRequestActionTest, PersistEolDateTest) {
+ tuc_params_.http_response =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+ "protocol=\"3.0\"><app appid=\"foo\" status=\"ok\">"
+ "<ping status=\"ok\"/><updatecheck status=\"noupdate\" "
+ "_eol_date=\"200\" _foo=\"bar\"/></app></response>";
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_TRUE(TestUpdateCheck());
+
+ string eol_date;
+ EXPECT_TRUE(
+ fake_system_state_.prefs()->GetString(kPrefsOmahaEolDate, &eol_date));
+ EXPECT_EQ("200", eol_date);
+}
+
+TEST_F(OmahaRequestActionTest, PersistEolMissingDateTest) {
+ tuc_params_.http_response =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+ "protocol=\"3.0\"><app appid=\"foo\" status=\"ok\">"
+ "<ping status=\"ok\"/><updatecheck status=\"noupdate\" "
+ "_foo=\"bar\"/></app></response>";
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ const string kDate = "123";
+ fake_system_state_.prefs()->SetString(kPrefsOmahaEolDate, kDate);
+
+ ASSERT_TRUE(TestUpdateCheck());
+
+ string eol_date;
+ EXPECT_TRUE(
+ fake_system_state_.prefs()->GetString(kPrefsOmahaEolDate, &eol_date));
+ EXPECT_EQ(kDate, eol_date);
+}
+
+TEST_F(OmahaRequestActionTest, PersistEolBadDateTest) {
+ tuc_params_.http_response =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\"?><response "
+ "protocol=\"3.0\"><app appid=\"foo\" status=\"ok\">"
+ "<ping status=\"ok\"/><updatecheck status=\"noupdate\" "
+ "_eol_date=\"bad\" foo=\"bar\"/></app></response>";
+ tuc_params_.expected_check_result = metrics::CheckResult::kNoUpdateAvailable;
+ tuc_params_.expected_check_reaction = metrics::CheckReaction::kUnset;
+
+ ASSERT_TRUE(TestUpdateCheck());
+
+ string eol_date;
+ EXPECT_TRUE(
+ fake_system_state_.prefs()->GetString(kPrefsOmahaEolDate, &eol_date));
+ EXPECT_EQ(kEolDateInvalid, StringToEolDate(eol_date));
+}
+
+TEST_F(OmahaRequestActionDlcPingTest, StorePingReplyNoPing) {
+ OmahaRequestParams::AppParams app_param = {.name = dlc_id_};
+ request_params_.set_dlc_apps_params(
+ {{request_params_.GetDlcAppId(dlc_id_), app_param}});
+
+ ASSERT_TRUE(TestUpdateCheck());
+
+ int64_t temp_int;
+ // If there was no ping, the metadata files shouldn't exist yet.
+ EXPECT_FALSE(fake_prefs_.GetInt64(active_key_, &temp_int));
+ EXPECT_FALSE(fake_prefs_.GetInt64(last_active_key_, &temp_int));
+ EXPECT_FALSE(fake_prefs_.GetInt64(last_rollcall_key_, &temp_int));
+}
+
+TEST_F(OmahaRequestActionDlcPingTest, StorePingReplyActiveTest) {
+ // Create Active value
+ fake_prefs_.SetInt64(active_key_, 0);
+
+ OmahaRequestParams::AppParams app_param = {
+ .active_counting_type = OmahaRequestParams::kDateBased,
+ .name = dlc_id_,
+ .ping_active = 1,
+ .send_ping = true};
+ request_params_.set_dlc_apps_params(
+ {{request_params_.GetDlcAppId(dlc_id_), app_param}});
+
+ int64_t temp_int;
+ string temp_str;
+ ASSERT_TRUE(TestUpdateCheck());
+ EXPECT_TRUE(fake_prefs_.GetInt64(active_key_, &temp_int));
+ EXPECT_EQ(temp_int, kPingInactiveValue);
+ EXPECT_TRUE(fake_prefs_.GetString(last_active_key_, &temp_str));
+ EXPECT_EQ(temp_str, "4763");
+ EXPECT_TRUE(fake_prefs_.GetString(last_rollcall_key_, &temp_str));
+ EXPECT_EQ(temp_str, "4763");
+}
+
+TEST_F(OmahaRequestActionDlcPingTest, StorePingReplyInactiveTest) {
+ // Create Active value
+ fake_prefs_.SetInt64(active_key_, 0);
+
+ OmahaRequestParams::AppParams app_param = {
+ .active_counting_type = OmahaRequestParams::kDateBased,
+ .name = dlc_id_,
+ .ping_active = 0,
+ .send_ping = true};
+ request_params_.set_dlc_apps_params(
+ {{request_params_.GetDlcAppId(dlc_id_), app_param}});
+
+ // Set the previous active value to an older value than 4763.
+ fake_prefs_.SetString(last_active_key_, "555");
+
+ int64_t temp_int;
+ ASSERT_TRUE(TestUpdateCheck());
+ EXPECT_TRUE(fake_prefs_.GetInt64(active_key_, &temp_int));
+ EXPECT_EQ(temp_int, kPingInactiveValue);
+ string temp_str;
+ EXPECT_TRUE(fake_prefs_.GetString(last_active_key_, &temp_str));
+ EXPECT_EQ(temp_str, "555");
+ EXPECT_TRUE(fake_prefs_.GetString(last_rollcall_key_, &temp_str));
+ EXPECT_EQ(temp_str, "4763");
+}
+
+TEST_F(OmahaRequestActionTest, OmahaResponseUpdateCanExcludeCheck) {
+ request_params_.set_dlc_apps_params(
+ {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}});
+ fake_update_response_.dlc_app_update = true;
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false));
+ ASSERT_TRUE(TestUpdateCheck());
+ ASSERT_TRUE(delegate_.omaha_response_);
+ const auto& packages = delegate_.omaha_response_->packages;
+ ASSERT_EQ(packages.size(), 2);
+
+ EXPECT_FALSE(packages[0].can_exclude);
+ EXPECT_TRUE(packages[1].can_exclude);
+}
+
+TEST_F(OmahaRequestActionTest, OmahaResponseInstallCannotExcludeCheck) {
+ request_params_.set_is_install(true);
+ request_params_.set_dlc_apps_params(
+ {{request_params_.GetDlcAppId(kDlcId1), {.name = kDlcId1}}});
+ fake_update_response_.dlc_app_update = true;
+ tuc_params_.http_response = fake_update_response_.GetUpdateResponse();
+
+ EXPECT_CALL(mock_excluder_, IsExcluded(_)).WillRepeatedly(Return(false));
+ ASSERT_TRUE(TestUpdateCheck());
+ ASSERT_TRUE(delegate_.omaha_response_);
+ const auto& packages = delegate_.omaha_response_->packages;
+ ASSERT_EQ(packages.size(), 2);
+
+ EXPECT_FALSE(packages[0].can_exclude);
+ EXPECT_FALSE(packages[1].can_exclude);
+}
+
} // namespace chromeos_update_engine
diff --git a/omaha_request_builder_xml.cc b/omaha_request_builder_xml.cc
new file mode 100644
index 0000000..097b9f1
--- /dev/null
+++ b/omaha_request_builder_xml.cc
@@ -0,0 +1,435 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/omaha_request_builder_xml.h"
+
+#include <inttypes.h>
+
+#include <string>
+
+#include <base/guid.h>
+#include <base/logging.h>
+#include <base/strings/string_number_conversions.h>
+#include <base/strings/string_util.h>
+#include <base/strings/stringprintf.h>
+#include <base/time/time.h>
+
+#include "update_engine/common/constants.h"
+#include "update_engine/common/prefs_interface.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/omaha_request_params.h"
+
+using std::string;
+
+namespace chromeos_update_engine {
+
+const char kNoVersion[] = "0.0.0.0";
+const int kPingNeverPinged = -1;
+const int kPingUnknownValue = -2;
+const int kPingActiveValue = 1;
+const int kPingInactiveValue = 0;
+
+bool XmlEncode(const string& input, string* output) {
+ if (std::find_if(input.begin(), input.end(), [](const char c) {
+ return c & 0x80;
+ }) != input.end()) {
+ LOG(WARNING) << "Invalid ASCII-7 string passed to the XML encoder:";
+ utils::HexDumpString(input);
+ return false;
+ }
+ output->clear();
+ // We need at least input.size() space in the output, but the code below will
+ // handle it if we need more.
+ output->reserve(input.size());
+ for (char c : input) {
+ switch (c) {
+ case '\"':
+ output->append(""");
+ break;
+ case '\'':
+ output->append("'");
+ break;
+ case '&':
+ output->append("&");
+ break;
+ case '<':
+ output->append("<");
+ break;
+ case '>':
+ output->append(">");
+ break;
+ default:
+ output->push_back(c);
+ }
+ }
+ return true;
+}
+
+string XmlEncodeWithDefault(const string& input, const string& default_value) {
+ string output;
+ if (XmlEncode(input, &output))
+ return output;
+ return default_value;
+}
+
+string OmahaRequestBuilderXml::GetPing() const {
+ // Returns an XML ping element attribute assignment with attribute
+ // |name| and value |ping_days| if |ping_days| has a value that needs
+ // to be sent, or an empty string otherwise.
+ auto GetPingAttribute = [](const char* name, int ping_days) -> string {
+ if (ping_days > 0 || ping_days == kPingNeverPinged)
+ return base::StringPrintf(" %s=\"%d\"", name, ping_days);
+ return "";
+ };
+
+ string ping_active = GetPingAttribute("a", ping_active_days_);
+ string ping_roll_call = GetPingAttribute("r", ping_roll_call_days_);
+ if (!ping_active.empty() || !ping_roll_call.empty()) {
+ return base::StringPrintf(" <ping active=\"1\"%s%s></ping>\n",
+ ping_active.c_str(),
+ ping_roll_call.c_str());
+ }
+ return "";
+}
+
+string OmahaRequestBuilderXml::GetPingDateBased(
+ const OmahaRequestParams::AppParams& app_params) const {
+ if (!app_params.send_ping)
+ return "";
+ string ping_active = "";
+ string ping_ad = "";
+ if (app_params.ping_active == kPingActiveValue) {
+ ping_active =
+ base::StringPrintf(" active=\"%" PRId64 "\"", app_params.ping_active);
+ ping_ad = base::StringPrintf(" ad=\"%" PRId64 "\"",
+ app_params.ping_date_last_active);
+ }
+
+ string ping_rd = base::StringPrintf(" rd=\"%" PRId64 "\"",
+ app_params.ping_date_last_rollcall);
+
+ return base::StringPrintf(" <ping%s%s%s></ping>\n",
+ ping_active.c_str(),
+ ping_ad.c_str(),
+ ping_rd.c_str());
+}
+
+string OmahaRequestBuilderXml::GetAppBody(const OmahaAppData& app_data) const {
+ string app_body;
+ if (event_ == nullptr) {
+ if (app_data.app_params.send_ping) {
+ switch (app_data.app_params.active_counting_type) {
+ case OmahaRequestParams::kDayBased:
+ app_body = GetPing();
+ break;
+ case OmahaRequestParams::kDateBased:
+ app_body = GetPingDateBased(app_data.app_params);
+ break;
+ default:
+ NOTREACHED();
+ }
+ }
+ if (!ping_only_) {
+ if (!app_data.skip_update) {
+ app_body += " <updatecheck";
+ if (!params_->target_version_prefix().empty()) {
+ app_body += base::StringPrintf(
+ " targetversionprefix=\"%s\"",
+ XmlEncodeWithDefault(params_->target_version_prefix()).c_str());
+ // Rollback requires target_version_prefix set.
+ if (params_->rollback_allowed()) {
+ app_body += " rollback_allowed=\"true\"";
+ }
+ }
+ app_body += "></updatecheck>\n";
+ }
+
+ // If this is the first update check after a reboot following a previous
+ // update, generate an event containing the previous version number. If
+ // the previous version preference file doesn't exist the event is still
+ // generated with a previous version of 0.0.0.0 -- this is relevant for
+ // older clients or new installs. The previous version event is not sent
+ // for ping-only requests because they come before the client has
+ // rebooted. The previous version event is also not sent if it was already
+ // sent for this new version with a previous updatecheck.
+ string prev_version;
+ if (!prefs_->GetString(kPrefsPreviousVersion, &prev_version)) {
+ prev_version = kNoVersion;
+ }
+ // We only store a non-empty previous version value after a successful
+ // update in the previous boot. After reporting it back to the server,
+ // we clear the previous version value so it doesn't get reported again.
+ if (!prev_version.empty()) {
+ app_body += base::StringPrintf(
+ " <event eventtype=\"%d\" eventresult=\"%d\" "
+ "previousversion=\"%s\"></event>\n",
+ OmahaEvent::kTypeRebootedAfterUpdate,
+ OmahaEvent::kResultSuccess,
+ XmlEncodeWithDefault(prev_version, kNoVersion).c_str());
+ LOG_IF(WARNING, !prefs_->SetString(kPrefsPreviousVersion, ""))
+ << "Unable to reset the previous version.";
+ }
+ }
+ } else {
+ // The error code is an optional attribute so append it only if the result
+ // is not success.
+ string error_code;
+ if (event_->result != OmahaEvent::kResultSuccess) {
+ error_code = base::StringPrintf(" errorcode=\"%d\"",
+ static_cast<int>(event_->error_code));
+ }
+ app_body = base::StringPrintf(
+ " <event eventtype=\"%d\" eventresult=\"%d\"%s></event>\n",
+ event_->type,
+ event_->result,
+ error_code.c_str());
+ }
+
+ return app_body;
+}
+
+string OmahaRequestBuilderXml::GetCohortArg(const string arg_name,
+ const string prefs_key,
+ const string override_value) const {
+ string cohort_value;
+ if (!override_value.empty()) {
+ // |override_value| take precedence over pref value.
+ cohort_value = override_value;
+ } else {
+ // There's nothing wrong with not having a given cohort setting, so we check
+ // existence first to avoid the warning log message.
+ if (!prefs_->Exists(prefs_key))
+ return "";
+ if (!prefs_->GetString(prefs_key, &cohort_value) || cohort_value.empty())
+ return "";
+ }
+ // This is a sanity check to avoid sending a huge XML file back to Ohama due
+ // to a compromised stateful partition making the update check fail in low
+ // network environments envent after a reboot.
+ if (cohort_value.size() > 1024) {
+ LOG(WARNING) << "The omaha cohort setting " << arg_name
+ << " has a too big value, which must be an error or an "
+ "attacker trying to inhibit updates.";
+ return "";
+ }
+
+ string escaped_xml_value;
+ if (!XmlEncode(cohort_value, &escaped_xml_value)) {
+ LOG(WARNING) << "The omaha cohort setting " << arg_name
+ << " is ASCII-7 invalid, ignoring it.";
+ return "";
+ }
+
+ return base::StringPrintf(
+ "%s=\"%s\" ", arg_name.c_str(), escaped_xml_value.c_str());
+}
+
+bool IsValidComponentID(const string& id) {
+ for (char c : id) {
+ if (!isalnum(c) && c != '-' && c != '_' && c != '.')
+ return false;
+ }
+ return true;
+}
+
+string OmahaRequestBuilderXml::GetApp(const OmahaAppData& app_data) const {
+ string app_body = GetAppBody(app_data);
+ string app_versions;
+
+ // If we are downgrading to a more stable channel and we are allowed to do
+ // powerwash, then pass 0.0.0.0 as the version. This is needed to get the
+ // highest-versioned payload on the destination channel.
+ if (params_->ShouldPowerwash()) {
+ LOG(INFO) << "Passing OS version as 0.0.0.0 as we are set to powerwash "
+ << "on downgrading to the version in the more stable channel";
+ app_versions = "version=\"" + string(kNoVersion) + "\" from_version=\"" +
+ XmlEncodeWithDefault(app_data.version, kNoVersion) + "\" ";
+ } else {
+ app_versions = "version=\"" +
+ XmlEncodeWithDefault(app_data.version, kNoVersion) + "\" ";
+ }
+
+ string download_channel = params_->download_channel();
+ string app_channels =
+ "track=\"" + XmlEncodeWithDefault(download_channel) + "\" ";
+ if (params_->current_channel() != download_channel) {
+ app_channels += "from_track=\"" +
+ XmlEncodeWithDefault(params_->current_channel()) + "\" ";
+ }
+
+ string delta_okay_str =
+ params_->delta_okay() && !params_->is_install() ? "true" : "false";
+
+ // If install_date_days is not set (e.g. its value is -1 ), don't
+ // include the attribute.
+ string install_date_in_days_str = "";
+ if (install_date_in_days_ >= 0) {
+ install_date_in_days_str =
+ base::StringPrintf("installdate=\"%d\" ", install_date_in_days_);
+ }
+
+ string app_cohort_args;
+ app_cohort_args += GetCohortArg("cohort", kPrefsOmahaCohort);
+ app_cohort_args += GetCohortArg("cohortname", kPrefsOmahaCohortName);
+
+ // Policy provided value overrides pref.
+ string autoupdate_token = params_->autoupdate_token();
+ app_cohort_args += GetCohortArg("cohorthint",
+ kPrefsOmahaCohortHint,
+ autoupdate_token /* override_value */);
+
+ string fingerprint_arg;
+ if (!params_->os_build_fingerprint().empty()) {
+ fingerprint_arg = "fingerprint=\"" +
+ XmlEncodeWithDefault(params_->os_build_fingerprint()) +
+ "\" ";
+ }
+
+ string buildtype_arg;
+ if (!params_->os_build_type().empty()) {
+ buildtype_arg = "os_build_type=\"" +
+ XmlEncodeWithDefault(params_->os_build_type()) + "\" ";
+ }
+
+ string product_components_args;
+ if (!params_->ShouldPowerwash() && !app_data.product_components.empty()) {
+ brillo::KeyValueStore store;
+ if (store.LoadFromString(app_data.product_components)) {
+ for (const string& key : store.GetKeys()) {
+ if (!IsValidComponentID(key)) {
+ LOG(ERROR) << "Invalid component id: " << key;
+ continue;
+ }
+ string version;
+ if (!store.GetString(key, &version)) {
+ LOG(ERROR) << "Failed to get version for " << key
+ << " in product_components.";
+ continue;
+ }
+ product_components_args +=
+ base::StringPrintf("_%s.version=\"%s\" ",
+ key.c_str(),
+ XmlEncodeWithDefault(version).c_str());
+ }
+ } else {
+ LOG(ERROR) << "Failed to parse product_components:\n"
+ << app_data.product_components;
+ }
+ }
+
+ string requisition_arg;
+ if (!params_->device_requisition().empty()) {
+ requisition_arg = "requisition=\"" +
+ XmlEncodeWithDefault(params_->device_requisition()) +
+ "\" ";
+ }
+
+ // clang-format off
+ string app_xml = " <app "
+ "appid=\"" + XmlEncodeWithDefault(app_data.id) + "\" " +
+ app_cohort_args +
+ app_versions +
+ app_channels +
+ product_components_args +
+ fingerprint_arg +
+ buildtype_arg +
+ "board=\"" + XmlEncodeWithDefault(params_->os_board()) + "\" " +
+ "hardware_class=\"" + XmlEncodeWithDefault(params_->hwid()) + "\" " +
+ "delta_okay=\"" + delta_okay_str + "\" " +
+ install_date_in_days_str +
+
+ // DLC excluded for installs and updates.
+ (app_data.is_dlc ? "" :
+ "lang=\"" + XmlEncodeWithDefault(params_->app_lang(), "en-US") + "\" " +
+ "fw_version=\"" + XmlEncodeWithDefault(params_->fw_version()) + "\" " +
+ "ec_version=\"" + XmlEncodeWithDefault(params_->ec_version()) + "\" " +
+ requisition_arg) +
+
+ ">\n" +
+ app_body +
+ " </app>\n";
+ // clang-format on
+ return app_xml;
+}
+
+string OmahaRequestBuilderXml::GetOs() const {
+ string os_xml =
+ " <os "
+ "version=\"" +
+ XmlEncodeWithDefault(params_->os_version()) + "\" " + "platform=\"" +
+ XmlEncodeWithDefault(params_->os_platform()) + "\" " + "sp=\"" +
+ XmlEncodeWithDefault(params_->os_sp()) +
+ "\">"
+ "</os>\n";
+ return os_xml;
+}
+
+string OmahaRequestBuilderXml::GetRequest() const {
+ string os_xml = GetOs();
+ string app_xml = GetApps();
+
+ string request_xml = base::StringPrintf(
+ "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
+ "<request requestid=\"%s\" sessionid=\"%s\""
+ " protocol=\"3.0\" updater=\"%s\" updaterversion=\"%s\""
+ " installsource=\"%s\" ismachine=\"1\">\n%s%s</request>\n",
+ base::GenerateGUID().c_str() /* requestid */,
+ session_id_.c_str(),
+ constants::kOmahaUpdaterID,
+ kOmahaUpdaterVersion,
+ params_->interactive() ? "ondemandupdate" : "scheduler",
+ os_xml.c_str(),
+ app_xml.c_str());
+
+ return request_xml;
+}
+
+string OmahaRequestBuilderXml::GetApps() const {
+ string app_xml = "";
+ OmahaAppData product_app = {
+ .id = params_->GetAppId(),
+ .version = params_->app_version(),
+ .product_components = params_->product_components(),
+ // Skips updatecheck for platform app in case of an install operation.
+ .skip_update = params_->is_install(),
+ .is_dlc = false,
+
+ .app_params = {.active_counting_type = OmahaRequestParams::kDayBased,
+ .send_ping = include_ping_}};
+ app_xml += GetApp(product_app);
+ if (!params_->system_app_id().empty()) {
+ OmahaAppData system_app = {
+ .id = params_->system_app_id(),
+ .version = params_->system_version(),
+ .skip_update = false,
+ .is_dlc = false,
+ .app_params = {.active_counting_type = OmahaRequestParams::kDayBased,
+ .send_ping = include_ping_}};
+ app_xml += GetApp(system_app);
+ }
+ for (const auto& it : params_->dlc_apps_params()) {
+ OmahaAppData dlc_app_data = {
+ .id = it.first,
+ .version = params_->is_install() ? kNoVersion : params_->app_version(),
+ .skip_update = false,
+ .is_dlc = true,
+ .app_params = it.second};
+ app_xml += GetApp(dlc_app_data);
+ }
+ return app_xml;
+}
+
+} // namespace chromeos_update_engine
diff --git a/omaha_request_builder_xml.h b/omaha_request_builder_xml.h
new file mode 100644
index 0000000..50c708d
--- /dev/null
+++ b/omaha_request_builder_xml.h
@@ -0,0 +1,199 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_OMAHA_REQUEST_BUILDER_XML_H_
+#define UPDATE_ENGINE_OMAHA_REQUEST_BUILDER_XML_H_
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <gtest/gtest_prod.h> // for FRIEND_TEST
+
+#include <brillo/secure_blob.h>
+#include <curl/curl.h>
+
+#include "update_engine/common/action.h"
+#include "update_engine/common/http_fetcher.h"
+#include "update_engine/omaha_request_params.h"
+#include "update_engine/omaha_response.h"
+#include "update_engine/system_state.h"
+
+namespace chromeos_update_engine {
+
+extern const char kNoVersion[];
+extern const int kPingNeverPinged;
+extern const int kPingUnknownValue;
+extern const int kPingActiveValue;
+extern const int kPingInactiveValue;
+
+// This struct encapsulates the Omaha event information. For a
+// complete list of defined event types and results, see
+// http://code.google.com/p/omaha/wiki/ServerProtocol#event
+struct OmahaEvent {
+ // The Type values correspond to EVENT_TYPE values of Omaha.
+ enum Type {
+ kTypeUnknown = 0,
+ kTypeDownloadComplete = 1,
+ kTypeInstallComplete = 2,
+ kTypeUpdateComplete = 3,
+ kTypeUpdateDownloadStarted = 13,
+ kTypeUpdateDownloadFinished = 14,
+ // Chromium OS reserved type sent after the first reboot following an update
+ // completed.
+ kTypeRebootedAfterUpdate = 54,
+ };
+
+ // The Result values correspond to EVENT_RESULT values of Omaha.
+ enum Result {
+ kResultError = 0,
+ kResultSuccess = 1,
+ kResultUpdateDeferred = 9, // When we ignore/defer updates due to policy.
+ };
+
+ OmahaEvent()
+ : type(kTypeUnknown),
+ result(kResultError),
+ error_code(ErrorCode::kError) {}
+ explicit OmahaEvent(Type in_type)
+ : type(in_type),
+ result(kResultSuccess),
+ error_code(ErrorCode::kSuccess) {}
+ OmahaEvent(Type in_type, Result in_result, ErrorCode in_error_code)
+ : type(in_type), result(in_result), error_code(in_error_code) {}
+
+ Type type;
+ Result result;
+ ErrorCode error_code;
+};
+
+struct OmahaAppData {
+ std::string id;
+ std::string version;
+ std::string product_components;
+ bool skip_update;
+ bool is_dlc;
+ OmahaRequestParams::AppParams app_params;
+};
+
+// Encodes XML entities in a given string. Input must be ASCII-7 valid. If
+// the input is invalid, the default value is used instead.
+std::string XmlEncodeWithDefault(const std::string& input,
+ const std::string& default_value = "");
+
+// Escapes text so it can be included as character data and attribute
+// values. The |input| string must be valid ASCII-7, no UTF-8 supported.
+// Returns whether the |input| was valid and escaped properly in |output|.
+bool XmlEncode(const std::string& input, std::string* output);
+
+// Returns a boolean based on examining each character on whether it's a valid
+// component (meaning all characters are an alphanum excluding '-', '_', '.').
+bool IsValidComponentID(const std::string& id);
+
+class OmahaRequestBuilder {
+ public:
+ OmahaRequestBuilder() = default;
+ virtual ~OmahaRequestBuilder() = default;
+
+ virtual std::string GetRequest() const = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(OmahaRequestBuilder);
+};
+
+class OmahaRequestBuilderXml : OmahaRequestBuilder {
+ public:
+ OmahaRequestBuilderXml(const OmahaEvent* event,
+ OmahaRequestParams* params,
+ bool ping_only,
+ bool include_ping,
+ int ping_active_days,
+ int ping_roll_call_days,
+ int install_date_in_days,
+ PrefsInterface* prefs,
+ const std::string& session_id)
+ : event_(event),
+ params_(params),
+ ping_only_(ping_only),
+ include_ping_(include_ping),
+ ping_active_days_(ping_active_days),
+ ping_roll_call_days_(ping_roll_call_days),
+ install_date_in_days_(install_date_in_days),
+ prefs_(prefs),
+ session_id_(session_id) {}
+
+ ~OmahaRequestBuilderXml() override = default;
+
+ // Returns an XML that corresponds to the entire Omaha request.
+ std::string GetRequest() const override;
+
+ private:
+ FRIEND_TEST(OmahaRequestBuilderXmlTest, PlatformGetAppTest);
+ FRIEND_TEST(OmahaRequestBuilderXmlTest, DlcGetAppTest);
+
+ // Returns an XML that corresponds to the entire <os> node of the Omaha
+ // request based on the member variables.
+ std::string GetOs() const;
+
+ // Returns an XML that corresponds to all <app> nodes of the Omaha
+ // request based on the given parameters.
+ std::string GetApps() const;
+
+ // Returns an XML that corresponds to the single <app> node of the Omaha
+ // request based on the given parameters.
+ std::string GetApp(const OmahaAppData& app_data) const;
+
+ // Returns an XML that goes into the body of the <app> element of the Omaha
+ // request based on the given parameters.
+ std::string GetAppBody(const OmahaAppData& app_data) const;
+
+ // Returns the cohort* argument to include in the <app> tag for the passed
+ // |arg_name| and |prefs_key|, if any. The return value is suitable to
+ // concatenate to the list of arguments and includes a space at the end.
+ std::string GetCohortArg(const std::string arg_name,
+ const std::string prefs_key,
+ const std::string override_value = "") const;
+
+ // Returns an XML ping element if any of the elapsed days need to be
+ // sent, or an empty string otherwise.
+ std::string GetPing() const;
+
+ // Returns an XML ping element if any of the elapsed days need to be
+ // sent, or an empty string otherwise.
+ std::string GetPingDateBased(
+ const OmahaRequestParams::AppParams& app_params) const;
+
+ const OmahaEvent* event_;
+ OmahaRequestParams* params_;
+ bool ping_only_;
+ bool include_ping_;
+ int ping_active_days_;
+ int ping_roll_call_days_;
+ int install_date_in_days_;
+ PrefsInterface* prefs_;
+ std::string session_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(OmahaRequestBuilderXml);
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_OMAHA_REQUEST_BUILDER_XML_H_
diff --git a/omaha_request_builder_xml_unittest.cc b/omaha_request_builder_xml_unittest.cc
new file mode 100644
index 0000000..017acec
--- /dev/null
+++ b/omaha_request_builder_xml_unittest.cc
@@ -0,0 +1,322 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/omaha_request_builder_xml.h"
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <base/guid.h>
+#include <gtest/gtest.h>
+
+#include "update_engine/fake_system_state.h"
+
+using std::pair;
+using std::string;
+using std::vector;
+
+namespace chromeos_update_engine {
+
+namespace {
+// Helper to find key and extract value from the given string |xml|, instead
+// of using a full parser. The attribute key will be followed by "=\"" as xml
+// attribute values must be within double quotes (not single quotes).
+static string FindAttributeKeyValueInXml(const string& xml,
+ const string& key,
+ const size_t val_size) {
+ string key_with_quotes = key + "=\"";
+ const size_t val_start_pos = xml.find(key);
+ if (val_start_pos == string::npos)
+ return "";
+ return xml.substr(val_start_pos + key_with_quotes.size(), val_size);
+}
+// Helper to find the count of substring in a string.
+static size_t CountSubstringInString(const string& str, const string& substr) {
+ size_t count = 0, pos = 0;
+ while ((pos = str.find(substr, pos ? pos + 1 : 0)) != string::npos)
+ ++count;
+ return count;
+}
+} // namespace
+
+class OmahaRequestBuilderXmlTest : public ::testing::Test {
+ protected:
+ void SetUp() override {}
+ void TearDown() override {}
+
+ FakeSystemState fake_system_state_;
+ static constexpr size_t kGuidSize = 36;
+};
+
+TEST_F(OmahaRequestBuilderXmlTest, XmlEncodeTest) {
+ string output;
+ vector<pair<string, string>> xml_encode_pairs = {
+ {"ab", "ab"},
+ {"a<b", "a<b"},
+ {"<&>\"\'\\", "<&>"'\\"},
+ {"<&>", "&lt;&amp;&gt;"}};
+ for (const auto& xml_encode_pair : xml_encode_pairs) {
+ const auto& before_encoding = xml_encode_pair.first;
+ const auto& after_encoding = xml_encode_pair.second;
+ EXPECT_TRUE(XmlEncode(before_encoding, &output));
+ EXPECT_EQ(after_encoding, output);
+ }
+ // Check that unterminated UTF-8 strings are handled properly.
+ EXPECT_FALSE(XmlEncode("\xc2", &output));
+ // Fail with invalid ASCII-7 chars.
+ EXPECT_FALSE(XmlEncode("This is an 'n' with a tilde: \xc3\xb1", &output));
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, XmlEncodeWithDefaultTest) {
+ EXPECT_EQ("", XmlEncodeWithDefault(""));
+ EXPECT_EQ("<&>", XmlEncodeWithDefault("<&>", "something else"));
+ EXPECT_EQ("<not escaped>", XmlEncodeWithDefault("\xc2", "<not escaped>"));
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, PlatformGetAppTest) {
+ OmahaRequestParams omaha_request_params{&fake_system_state_};
+ omaha_request_params.set_device_requisition("device requisition");
+ OmahaRequestBuilderXml omaha_request{nullptr,
+ &omaha_request_params,
+ false,
+ false,
+ 0,
+ 0,
+ 0,
+ fake_system_state_.prefs(),
+ ""};
+ OmahaAppData dlc_app_data = {.id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
+ .version = "",
+ .skip_update = false,
+ .is_dlc = false};
+
+ // Verify that the attributes that shouldn't be missing for Platform AppID are
+ // in fact present in the <app ...></app>.
+ const string app = omaha_request.GetApp(dlc_app_data);
+ EXPECT_NE(string::npos, app.find("lang="));
+ EXPECT_NE(string::npos, app.find("fw_version="));
+ EXPECT_NE(string::npos, app.find("ec_version="));
+ EXPECT_NE(string::npos, app.find("requisition="));
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, DlcGetAppTest) {
+ OmahaRequestParams omaha_request_params{&fake_system_state_};
+ omaha_request_params.set_device_requisition("device requisition");
+ OmahaRequestBuilderXml omaha_request{nullptr,
+ &omaha_request_params,
+ false,
+ false,
+ 0,
+ 0,
+ 0,
+ fake_system_state_.prefs(),
+ ""};
+ OmahaAppData dlc_app_data = {
+ .id = "_dlc_id", .version = "", .skip_update = false, .is_dlc = true};
+
+ // Verify that the attributes that should be missing for DLC AppIDs are in
+ // fact not present in the <app ...></app>.
+ const string app = omaha_request.GetApp(dlc_app_data);
+ EXPECT_EQ(string::npos, app.find("lang="));
+ EXPECT_EQ(string::npos, app.find("fw_version="));
+ EXPECT_EQ(string::npos, app.find("ec_version="));
+ EXPECT_EQ(string::npos, app.find("requisition="));
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlRequestIdTest) {
+ OmahaRequestParams omaha_request_params{&fake_system_state_};
+ OmahaRequestBuilderXml omaha_request{nullptr,
+ &omaha_request_params,
+ false,
+ false,
+ 0,
+ 0,
+ 0,
+ fake_system_state_.prefs(),
+ ""};
+ const string request_xml = omaha_request.GetRequest();
+ const string key = "requestid";
+ const string request_id =
+ FindAttributeKeyValueInXml(request_xml, key, kGuidSize);
+ // A valid |request_id| is either a GUID version 4 or empty string.
+ if (!request_id.empty())
+ EXPECT_TRUE(base::IsValidGUID(request_id));
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlSessionIdTest) {
+ const string gen_session_id = base::GenerateGUID();
+ OmahaRequestParams omaha_request_params{&fake_system_state_};
+ OmahaRequestBuilderXml omaha_request{nullptr,
+ &omaha_request_params,
+ false,
+ false,
+ 0,
+ 0,
+ 0,
+ fake_system_state_.prefs(),
+ gen_session_id};
+ const string request_xml = omaha_request.GetRequest();
+ const string key = "sessionid";
+ const string session_id =
+ FindAttributeKeyValueInXml(request_xml, key, kGuidSize);
+ // A valid |session_id| is either a GUID version 4 or empty string.
+ if (!session_id.empty()) {
+ EXPECT_TRUE(base::IsValidGUID(session_id));
+ }
+ EXPECT_EQ(gen_session_id, session_id);
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlPlatformUpdateTest) {
+ OmahaRequestParams omaha_request_params{&fake_system_state_};
+ OmahaRequestBuilderXml omaha_request{nullptr,
+ &omaha_request_params,
+ false,
+ false,
+ 0,
+ 0,
+ 0,
+ fake_system_state_.prefs(),
+ ""};
+ const string request_xml = omaha_request.GetRequest();
+ EXPECT_EQ(1, CountSubstringInString(request_xml, "<updatecheck"))
+ << request_xml;
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlPlatformUpdateWithDlcsTest) {
+ OmahaRequestParams omaha_request_params{&fake_system_state_};
+ omaha_request_params.set_dlc_apps_params(
+ {{omaha_request_params.GetDlcAppId("dlc_no_0"), {.name = "dlc_no_0"}},
+ {omaha_request_params.GetDlcAppId("dlc_no_1"), {.name = "dlc_no_1"}}});
+ OmahaRequestBuilderXml omaha_request{nullptr,
+ &omaha_request_params,
+ false,
+ false,
+ 0,
+ 0,
+ 0,
+ fake_system_state_.prefs(),
+ ""};
+ const string request_xml = omaha_request.GetRequest();
+ EXPECT_EQ(3, CountSubstringInString(request_xml, "<updatecheck"))
+ << request_xml;
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcInstallationTest) {
+ OmahaRequestParams omaha_request_params{&fake_system_state_};
+ const std::map<std::string, OmahaRequestParams::AppParams> dlcs = {
+ {omaha_request_params.GetDlcAppId("dlc_no_0"), {.name = "dlc_no_0"}},
+ {omaha_request_params.GetDlcAppId("dlc_no_1"), {.name = "dlc_no_1"}}};
+ omaha_request_params.set_dlc_apps_params(dlcs);
+ omaha_request_params.set_is_install(true);
+ OmahaRequestBuilderXml omaha_request{nullptr,
+ &omaha_request_params,
+ false,
+ false,
+ 0,
+ 0,
+ 0,
+ fake_system_state_.prefs(),
+ ""};
+ const string request_xml = omaha_request.GetRequest();
+ EXPECT_EQ(2, CountSubstringInString(request_xml, "<updatecheck"))
+ << request_xml;
+
+ auto FindAppId = [request_xml](size_t pos) -> size_t {
+ return request_xml.find("<app appid", pos);
+ };
+ // Skip over the Platform AppID, which is always first.
+ size_t pos = FindAppId(0);
+ for (auto&& _ : dlcs) {
+ (void)_;
+ EXPECT_NE(string::npos, (pos = FindAppId(pos + 1))) << request_xml;
+ const string dlc_app_id_version = FindAttributeKeyValueInXml(
+ request_xml.substr(pos), "version", string(kNoVersion).size());
+ EXPECT_EQ(kNoVersion, dlc_app_id_version);
+
+ const string false_str = "false";
+ const string dlc_app_id_delta_okay = FindAttributeKeyValueInXml(
+ request_xml.substr(pos), "delta_okay", false_str.length());
+ EXPECT_EQ(false_str, dlc_app_id_delta_okay);
+ }
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcNoPing) {
+ OmahaRequestParams omaha_request_params{&fake_system_state_};
+ omaha_request_params.set_dlc_apps_params(
+ {{omaha_request_params.GetDlcAppId("dlc_no_0"), {.name = "dlc_no_0"}}});
+ OmahaRequestBuilderXml omaha_request{nullptr,
+ &omaha_request_params,
+ false,
+ false,
+ 0,
+ 0,
+ 0,
+ fake_system_state_.prefs(),
+ ""};
+ const string request_xml = omaha_request.GetRequest();
+ EXPECT_EQ(0, CountSubstringInString(request_xml, "<ping")) << request_xml;
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcPingRollCallNoActive) {
+ OmahaRequestParams omaha_request_params{&fake_system_state_};
+ omaha_request_params.set_dlc_apps_params(
+ {{omaha_request_params.GetDlcAppId("dlc_no_0"),
+ {.active_counting_type = OmahaRequestParams::kDateBased,
+ .name = "dlc_no_0",
+ .ping_date_last_active = 25,
+ .ping_date_last_rollcall = 36,
+ .send_ping = true}}});
+ OmahaRequestBuilderXml omaha_request{nullptr,
+ &omaha_request_params,
+ false,
+ false,
+ 0,
+ 0,
+ 0,
+ fake_system_state_.prefs(),
+ ""};
+ const string request_xml = omaha_request.GetRequest();
+ EXPECT_EQ(1, CountSubstringInString(request_xml, "<ping rd=\"36\""))
+ << request_xml;
+}
+
+TEST_F(OmahaRequestBuilderXmlTest, GetRequestXmlDlcPingRollCallAndActive) {
+ OmahaRequestParams omaha_request_params{&fake_system_state_};
+ omaha_request_params.set_dlc_apps_params(
+ {{omaha_request_params.GetDlcAppId("dlc_no_0"),
+ {.active_counting_type = OmahaRequestParams::kDateBased,
+ .name = "dlc_no_0",
+ .ping_active = 1,
+ .ping_date_last_active = 25,
+ .ping_date_last_rollcall = 36,
+ .send_ping = true}}});
+ OmahaRequestBuilderXml omaha_request{nullptr,
+ &omaha_request_params,
+ false,
+ false,
+ 0,
+ 0,
+ 0,
+ fake_system_state_.prefs(),
+ ""};
+ const string request_xml = omaha_request.GetRequest();
+ EXPECT_EQ(1,
+ CountSubstringInString(request_xml,
+ "<ping active=\"1\" ad=\"25\" rd=\"36\""))
+ << request_xml;
+}
+} // namespace chromeos_update_engine
diff --git a/omaha_request_params.cc b/omaha_request_params.cc
index 8c410f1..d4b8d64 100644
--- a/omaha_request_params.cc
+++ b/omaha_request_params.cc
@@ -25,6 +25,7 @@
#include <vector>
#include <base/files/file_util.h>
+#include <base/stl_util.h>
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
#include <brillo/key_value_store.h>
@@ -39,9 +40,7 @@
#define CALL_MEMBER_FN(object, member) ((object).*(member))
-using std::map;
using std::string;
-using std::vector;
namespace chromeos_update_engine {
@@ -95,6 +94,7 @@
fw_version_ = system_state_->hardware()->GetFirmwareVersion();
ec_version_ = system_state_->hardware()->GetECVersion();
}
+ device_requisition_ = system_state_->hardware()->GetDeviceRequisition();
if (image_props_.current_channel == mutable_image_props_.target_channel) {
// deltas are only okay if the /.nodelta file does not exist. if we don't
@@ -123,7 +123,7 @@
// Set the interactive flag accordingly.
interactive_ = in_interactive;
- dlc_module_ids_.clear();
+ dlc_apps_params_.clear();
// Set false so it will do update by default.
is_install_ = false;
return true;
@@ -217,7 +217,7 @@
}
int OmahaRequestParams::GetChannelIndex(const string& channel) const {
- for (size_t t = 0; t < arraysize(kChannelsByStability); ++t)
+ for (size_t t = 0; t < base::size(kChannelsByStability); ++t)
if (channel == kChannelsByStability[t])
return t;
@@ -247,4 +247,21 @@
: image_props_.product_id;
}
+string OmahaRequestParams::GetDlcAppId(const std::string& dlc_id) const {
+ // Create APP ID according to |dlc_id| (sticking the current AppID to the
+ // DLC module ID with an underscode).
+ return GetAppId() + "_" + dlc_id;
+}
+
+bool OmahaRequestParams::IsDlcAppId(const std::string& app_id) const {
+ return dlc_apps_params().find(app_id) != dlc_apps_params().end();
+}
+
+void OmahaRequestParams::SetDlcNoUpdate(const string& app_id) {
+ auto itr = dlc_apps_params_.find(app_id);
+ if (itr == dlc_apps_params_.end())
+ return;
+ itr->second.updated = false;
+}
+
} // namespace chromeos_update_engine
diff --git a/omaha_request_params.h b/omaha_request_params.h
index 18235c0..3452965 100644
--- a/omaha_request_params.h
+++ b/omaha_request_params.h
@@ -19,6 +19,7 @@
#include <stdint.h>
+#include <map>
#include <string>
#include <vector>
@@ -26,6 +27,7 @@
#include <base/time/time.h>
#include <gtest/gtest_prod.h> // for FRIEND_TEST
+#include "update_engine/common/constants.h"
#include "update_engine/common/platform_constants.h"
#include "update_engine/image_properties.h"
@@ -51,6 +53,7 @@
delta_okay_(true),
interactive_(false),
rollback_allowed_(false),
+ rollback_data_save_requested_(false),
wall_clock_based_wait_enabled_(false),
update_check_count_wait_enabled_(false),
min_update_checks_needed_(kDefaultMinUpdateChecks),
@@ -59,6 +62,24 @@
virtual ~OmahaRequestParams();
+ enum ActiveCountingType {
+ kDayBased = 0,
+ kDateBased,
+ };
+
+ struct AppParams {
+ ActiveCountingType active_counting_type;
+ // |name| is only used for DLCs to store the DLC ID.
+ std::string name;
+ int64_t ping_active;
+ int64_t ping_date_last_active;
+ int64_t ping_date_last_rollcall;
+ bool send_ping;
+ // |updated| is only used for DLCs to decide sending DBus message to
+ // dlcservice on an install/update completion.
+ bool updated = true;
+ };
+
// Setters and getters for the various properties.
inline std::string os_platform() const { return os_platform_; }
inline std::string os_version() const { return os_version_; }
@@ -84,6 +105,7 @@
inline std::string hwid() const { return hwid_; }
inline std::string fw_version() const { return fw_version_; }
inline std::string ec_version() const { return ec_version_; }
+ inline std::string device_requisition() const { return device_requisition_; }
inline void set_app_version(const std::string& version) {
image_props_.version = version;
@@ -132,6 +154,23 @@
inline bool rollback_allowed() const { return rollback_allowed_; }
+ inline void set_rollback_data_save_requested(
+ bool rollback_data_save_requested) {
+ rollback_data_save_requested_ = rollback_data_save_requested;
+ }
+
+ inline bool rollback_data_save_requested() const {
+ return rollback_data_save_requested_;
+ }
+
+ inline void set_rollback_allowed_milestones(int rollback_allowed_milestones) {
+ rollback_allowed_milestones_ = rollback_allowed_milestones;
+ }
+
+ inline int rollback_allowed_milestones() const {
+ return rollback_allowed_milestones_;
+ }
+
inline void set_wall_clock_based_wait_enabled(bool enabled) {
wall_clock_based_wait_enabled_ = enabled;
}
@@ -165,20 +204,37 @@
inline int64_t max_update_checks_allowed() const {
return max_update_checks_allowed_;
}
- inline void set_dlc_module_ids(
- const std::vector<std::string>& dlc_module_ids) {
- dlc_module_ids_ = dlc_module_ids;
+ inline void set_dlc_apps_params(
+ const std::map<std::string, AppParams>& dlc_apps_params) {
+ dlc_apps_params_ = dlc_apps_params;
}
- inline std::vector<std::string> dlc_module_ids() const {
- return dlc_module_ids_;
+ inline const std::map<std::string, AppParams>& dlc_apps_params() const {
+ return dlc_apps_params_;
}
inline void set_is_install(bool is_install) { is_install_ = is_install; }
inline bool is_install() const { return is_install_; }
- // Returns the app id corresponding to the current value of the
+ inline void set_autoupdate_token(const std::string& token) {
+ autoupdate_token_ = token;
+ }
+ inline const std::string& autoupdate_token() const {
+ return autoupdate_token_;
+ }
+
+ // Returns the App ID corresponding to the current value of the
// download channel.
virtual std::string GetAppId() const;
+ // Returns the DLC app ID.
+ virtual std::string GetDlcAppId(const std::string& dlc_id) const;
+
+ // Returns true if the App ID is a DLC App ID that is currently part of the
+ // request parameters.
+ virtual bool IsDlcAppId(const std::string& app_id) const;
+
+ // If the App ID is a DLC App ID will set to no update.
+ void SetDlcNoUpdate(const std::string& app_id);
+
// Suggested defaults
static const char kOsVersion[];
static const int64_t kDefaultMinUpdateChecks = 0;
@@ -210,8 +266,10 @@
// or Init is called again.
virtual void UpdateDownloadChannel();
- // Returns whether we should powerwash for this update.
- virtual bool ShouldPowerwash() const;
+ // Returns whether we should powerwash for this update. Note that this is
+ // just an indication, the final decision to powerwash or not is made in the
+ // response handler.
+ bool ShouldPowerwash() const;
// Check if the provided update URL is official, meaning either the default
// autoupdate server or the autoupdate autotest server.
@@ -240,6 +298,9 @@
void set_is_powerwash_allowed(bool powerwash_allowed) {
mutable_image_props_.is_powerwash_allowed = powerwash_allowed;
}
+ void set_device_requisition(const std::string& requisition) {
+ device_requisition_ = requisition;
+ }
private:
FRIEND_TEST(OmahaRequestParamsTest, ChannelIndexTest);
@@ -309,6 +370,9 @@
std::string hwid_; // Hardware Qualification ID of the client
std::string fw_version_; // Chrome OS Firmware Version.
std::string ec_version_; // Chrome OS EC Version.
+ // TODO(b:133324571) tracks removal of this field once it is no longer
+ // needed in AU requests. Remove by October 1st 2019.
+ std::string device_requisition_; // Chrome OS Requisition type.
bool delta_okay_; // If this client can accept a delta
bool interactive_; // Whether this is a user-initiated update check
@@ -322,6 +386,12 @@
// Whether the client is accepting rollback images too.
bool rollback_allowed_;
+ // Whether rollbacks should preserve some system state during powerwash.
+ bool rollback_data_save_requested_;
+
+ // How many milestones the client can rollback to.
+ int rollback_allowed_milestones_;
+
// True if scattering or staging are enabled, in which case waiting_period_
// specifies the amount of absolute time that we've to wait for before sending
// a request to Omaha.
@@ -339,14 +409,19 @@
// When reading files, prepend root_ to the paths. Useful for testing.
std::string root_;
- // A list of DLC module IDs to install.
- std::vector<std::string> dlc_module_ids_;
+ // A list of DLC modules to install. A mapping from DLC App ID to |AppParams|.
+ std::map<std::string, AppParams> dlc_apps_params_;
// This variable defines whether the payload is being installed in the current
// partition. At the moment, this is used for installing DLC modules on the
// current active partition instead of the inactive partition.
bool is_install_;
+ // Token used when making an update request for a specific build.
+ // For example: Token for a Quick Fix Build:
+ // https://cloud.google.com/docs/chrome-enterprise/policies/?policy=DeviceQuickFixBuildToken
+ std::string autoupdate_token_;
+
DISALLOW_COPY_AND_ASSIGN(OmahaRequestParams);
};
diff --git a/omaha_request_params_unittest.cc b/omaha_request_params_unittest.cc
index 7332431..bfcbc32 100644
--- a/omaha_request_params_unittest.cc
+++ b/omaha_request_params_unittest.cc
@@ -258,4 +258,8 @@
EXPECT_TRUE(params_.CollectECFWVersions());
}
+TEST_F(OmahaRequestParamsTest, RequisitionIsSetTest) {
+ EXPECT_TRUE(params_.Init("", "", false));
+ EXPECT_EQ("fake_requisition", params_.device_requisition());
+}
} // namespace chromeos_update_engine
diff --git a/omaha_response.h b/omaha_response.h
index 0ac09df..2b86fe7 100644
--- a/omaha_response.h
+++ b/omaha_response.h
@@ -51,6 +51,9 @@
// True if the payload described in this response is a delta payload.
// False if it's a full payload.
bool is_delta = false;
+ // True if the payload can be excluded from updating if consistently faulty.
+ // False if the payload is critical to update.
+ bool can_exclude = false;
};
std::vector<Package> packages;
@@ -102,6 +105,13 @@
// Key versions of the returned rollback image. Values are 0xffff if the
// image not a rollback, or the fields were not present.
RollbackKeyVersion rollback_key_version;
+
+ // Key versions of the N - rollback_allowed_milestones release. For example,
+ // if the current version is 70 and rollback_allowed_milestones is 4, this
+ // will contain the key versions of version 66. This is used to ensure that
+ // the kernel and firmware keys are at most those of v66 so that v66 can be
+ // rolled back to.
+ RollbackKeyVersion past_rollback_key_version;
};
static_assert(sizeof(off_t) == 8, "off_t not 64 bit");
diff --git a/omaha_response_handler_action.cc b/omaha_response_handler_action.cc
index ab41b84..040f8e7 100644
--- a/omaha_response_handler_action.cc
+++ b/omaha_response_handler_action.cc
@@ -21,6 +21,7 @@
#include <base/logging.h>
#include <base/strings/string_number_conversions.h>
+#include <base/version.h>
#include <policy/device_policy.h>
#include "update_engine/common/constants.h"
@@ -34,6 +35,7 @@
#include "update_engine/update_manager/policy.h"
#include "update_engine/update_manager/update_manager.h"
+using chromeos_update_manager::kRollforwardInfinity;
using chromeos_update_manager::Policy;
using chromeos_update_manager::UpdateManager;
using std::numeric_limits;
@@ -68,6 +70,9 @@
}
// This is the url to the first package, not all packages.
+ // (For updates): All |Action|s prior to this must pass in non-excluded URLs
+ // within the |OmahaResponse|, reference exlusion logic in
+ // |OmahaRequestAction| and keep the enforcement of exclusions for updates.
install_plan_.download_url = current_url;
install_plan_.version = response.version;
install_plan_.system_version = response.system_version;
@@ -96,7 +101,8 @@
return;
}
install_plan_.payloads.push_back(
- {.size = package.size,
+ {.payload_urls = package.payload_urls,
+ .size = package.size,
.metadata_size = package.metadata_size,
.metadata_signature = package.metadata_signature,
.hash = raw_hash,
@@ -145,10 +151,13 @@
completer.set_code(ErrorCode::kOmahaResponseInvalid);
return;
}
+
+ // Calculate the values on the version values on current device.
auto min_kernel_key_version = static_cast<uint32_t>(
system_state_->hardware()->GetMinKernelKeyVersion());
auto min_firmware_key_version = static_cast<uint32_t>(
system_state_->hardware()->GetMinFirmwareKeyVersion());
+
uint32_t kernel_key_version =
static_cast<uint32_t>(response.rollback_key_version.kernel_key) << 16 |
static_cast<uint32_t>(response.rollback_key_version.kernel);
@@ -157,6 +166,12 @@
<< 16 |
static_cast<uint32_t>(response.rollback_key_version.firmware);
+ LOG(INFO) << "Rollback image versions:"
+ << " device_kernel_key_version=" << min_kernel_key_version
+ << " image_kernel_key_version=" << kernel_key_version
+ << " device_firmware_key_version=" << min_firmware_key_version
+ << " image_firmware_key_version=" << firmware_key_version;
+
// Don't attempt a rollback if the versions are incompatible or the
// target image does not specify the version information.
if (kernel_key_version == numeric_limits<uint32_t>::max() ||
@@ -168,10 +183,30 @@
return;
}
install_plan_.is_rollback = true;
+ install_plan_.rollback_data_save_requested =
+ params->rollback_data_save_requested();
}
- if (response.powerwash_required || params->ShouldPowerwash())
+ // Powerwash if either the response requires it or the parameters indicated
+ // powerwash and we are downgrading the version.
+ if (response.powerwash_required) {
install_plan_.powerwash_required = true;
+ } else if (params->ShouldPowerwash()) {
+ base::Version new_version(response.version);
+ base::Version current_version(params->app_version());
+
+ if (!new_version.IsValid()) {
+ LOG(WARNING) << "Not powerwashing,"
+ << " the update's version number is unreadable."
+ << " Update's version number: " << response.version;
+ } else if (!current_version.IsValid()) {
+ LOG(WARNING) << "Not powerwashing,"
+ << " the current version number is unreadable."
+ << " Current version number: " << params->app_version();
+ } else if (new_version < current_version) {
+ install_plan_.powerwash_required = true;
+ }
+ }
TEST_AND_RETURN(HasOutputPipe());
if (HasOutputPipe())
@@ -208,6 +243,53 @@
update_manager->PolicyRequest(
&Policy::UpdateCanBeApplied, &ec, &install_plan_);
completer.set_code(ec);
+
+ const auto allowed_milestones = params->rollback_allowed_milestones();
+ if (allowed_milestones > 0) {
+ auto max_firmware_rollforward = numeric_limits<uint32_t>::max();
+ auto max_kernel_rollforward = numeric_limits<uint32_t>::max();
+
+ // Determine the version to update the max rollforward verified boot
+ // value.
+ OmahaResponse::RollbackKeyVersion version =
+ response.past_rollback_key_version;
+
+ // Determine the max rollforward values to be set in the TPM.
+ max_firmware_rollforward = static_cast<uint32_t>(version.firmware_key)
+ << 16 |
+ static_cast<uint32_t>(version.firmware);
+ max_kernel_rollforward = static_cast<uint32_t>(version.kernel_key) << 16 |
+ static_cast<uint32_t>(version.kernel);
+
+ // In the case that the value is 0xffffffff, log a warning because the
+ // device should not be installing a rollback image without having version
+ // information.
+ if (max_firmware_rollforward == numeric_limits<uint32_t>::max() ||
+ max_kernel_rollforward == numeric_limits<uint32_t>::max()) {
+ LOG(WARNING)
+ << "Max rollforward values were not sent in rollback response: "
+ << " max_kernel_rollforward=" << max_kernel_rollforward
+ << " max_firmware_rollforward=" << max_firmware_rollforward
+ << " rollback_allowed_milestones="
+ << params->rollback_allowed_milestones();
+ } else {
+ LOG(INFO) << "Setting the max rollforward values: "
+ << " max_kernel_rollforward=" << max_kernel_rollforward
+ << " max_firmware_rollforward=" << max_firmware_rollforward
+ << " rollback_allowed_milestones="
+ << params->rollback_allowed_milestones();
+ system_state_->hardware()->SetMaxKernelKeyRollforward(
+ max_kernel_rollforward);
+ // TODO(crbug/783998): Set max firmware rollforward when implemented.
+ }
+ } else {
+ LOG(INFO) << "Rollback is not allowed. Setting max rollforward values"
+ << " to infinity";
+ // When rollback is not allowed, explicitly set the max roll forward to
+ // infinity.
+ system_state_->hardware()->SetMaxKernelKeyRollforward(kRollforwardInfinity);
+ // TODO(crbug/783998): Set max firmware rollforward when implemented.
+ }
}
bool OmahaResponseHandlerAction::AreHashChecksMandatory(
diff --git a/omaha_response_handler_action_unittest.cc b/omaha_response_handler_action_unittest.cc
index b47040b..04cfa73 100644
--- a/omaha_response_handler_action_unittest.cc
+++ b/omaha_response_handler_action_unittest.cc
@@ -38,6 +38,7 @@
using chromeos_update_engine::test_utils::WriteFileString;
using chromeos_update_manager::EvalStatus;
using chromeos_update_manager::FakeUpdateManager;
+using chromeos_update_manager::kRollforwardInfinity;
using chromeos_update_manager::MockPolicy;
using std::string;
using testing::_;
@@ -429,10 +430,11 @@
EXPECT_EQ(in.version, install_plan.version);
}
-TEST_F(OmahaResponseHandlerActionTest, ChangeToMoreStableChannelTest) {
+TEST_F(OmahaResponseHandlerActionTest,
+ ChangeToMoreStableVersionAndChannelTest) {
OmahaResponse in;
in.update_exists = true;
- in.version = "a.b.c.d";
+ in.version = "1.0.0.0";
in.packages.push_back({.payload_urls = {"https://MoreStableChannelTest"},
.size = 1,
.hash = kPayloadHashHex});
@@ -453,7 +455,7 @@
#endif // __ANDROID__
EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr));
params.UpdateDownloadChannel();
- EXPECT_TRUE(params.ShouldPowerwash());
+ params.set_app_version("2.0.0.0");
fake_system_state_.set_request_params(¶ms);
InstallPlan install_plan;
@@ -461,10 +463,79 @@
EXPECT_TRUE(install_plan.powerwash_required);
}
-TEST_F(OmahaResponseHandlerActionTest, ChangeToLessStableChannelTest) {
+TEST_F(OmahaResponseHandlerActionTest,
+ ChangeToMoreStableVersionAndChannelPowerwashNotAllowedTest) {
OmahaResponse in;
in.update_exists = true;
- in.version = "a.b.c.d";
+ in.version = "1.0.0.0";
+ in.packages.push_back({.payload_urls = {"https://MoreStableChannelTest"},
+ .size = 1,
+ .hash = kPayloadHashHex});
+ in.more_info_url = "http://more/info";
+
+ // Create a uniquely named test directory.
+ base::ScopedTempDir tempdir;
+ ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+
+ OmahaRequestParams params(&fake_system_state_);
+ fake_system_state_.fake_hardware()->SetIsOfficialBuild(false);
+ params.set_root(tempdir.GetPath().value());
+ params.set_current_channel("canary-channel");
+ // The |ImageProperties| in Android uses prefs to store
+ // |MutableImageProperties|.
+#ifdef __ANDROID__
+ EXPECT_CALL(*fake_system_state_.mock_prefs(), SetBoolean(_, true))
+ .WillOnce(Return(true));
+#endif // __ANDROID__
+ EXPECT_TRUE(params.SetTargetChannel("stable-channel", false, nullptr));
+ params.UpdateDownloadChannel();
+ params.set_app_version("2.0.0.0");
+
+ fake_system_state_.set_request_params(¶ms);
+ InstallPlan install_plan;
+ EXPECT_TRUE(DoTest(in, "", &install_plan));
+ EXPECT_FALSE(install_plan.powerwash_required);
+}
+
+TEST_F(OmahaResponseHandlerActionTest,
+ ChangeToMoreStableChannelButNewerVersionTest) {
+ OmahaResponse in;
+ in.update_exists = true;
+ in.version = "12345.96.0.0";
+ in.packages.push_back({.payload_urls = {"https://ChannelDownVersionUp"},
+ .size = 1,
+ .hash = kPayloadHashHex});
+ in.more_info_url = "http://more/info";
+
+ // Create a uniquely named test directory.
+ base::ScopedTempDir tempdir;
+ ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+
+ OmahaRequestParams params(&fake_system_state_);
+ fake_system_state_.fake_hardware()->SetIsOfficialBuild(false);
+ params.set_root(tempdir.GetPath().value());
+ params.set_current_channel("beta-channel");
+ // The |ImageProperties| in Android uses prefs to store
+ // |MutableImageProperties|.
+#ifdef __ANDROID__
+ EXPECT_CALL(*fake_system_state_.mock_prefs(), SetBoolean(_, true))
+ .WillOnce(Return(true));
+#endif // __ANDROID__
+ EXPECT_TRUE(params.SetTargetChannel("stable-channel", true, nullptr));
+ params.UpdateDownloadChannel();
+ params.set_app_version("12345.48.0.0");
+
+ fake_system_state_.set_request_params(¶ms);
+ InstallPlan install_plan;
+ EXPECT_TRUE(DoTest(in, "", &install_plan));
+ EXPECT_FALSE(install_plan.powerwash_required);
+}
+
+TEST_F(OmahaResponseHandlerActionTest,
+ ChangeToLessStableVersionAndChannelTest) {
+ OmahaResponse in;
+ in.update_exists = true;
+ in.version = "2.0.0.0";
in.packages.push_back({.payload_urls = {"https://LessStableChannelTest"},
.size = 15,
.hash = kPayloadHashHex});
@@ -485,7 +556,7 @@
#endif // __ANDROID__
EXPECT_TRUE(params.SetTargetChannel("canary-channel", false, nullptr));
params.UpdateDownloadChannel();
- EXPECT_FALSE(params.ShouldPowerwash());
+ params.set_app_version("1.0.0.0");
fake_system_state_.set_request_params(¶ms);
InstallPlan install_plan;
@@ -534,21 +605,44 @@
.size = 1,
.hash = kPayloadHashHex});
in.is_rollback = true;
- in.rollback_key_version.kernel = 1;
- in.rollback_key_version.kernel = 2;
- in.rollback_key_version.firmware_key = 3;
- in.rollback_key_version.firmware = 4;
+
+ // The rollback payload is 2 versions behind stable.
+ in.rollback_key_version.kernel = 24;
+ in.rollback_key_version.kernel = 23;
+ in.rollback_key_version.firmware_key = 22;
+ in.rollback_key_version.firmware = 21;
+
+ OmahaResponse::RollbackKeyVersion m4;
+ m4.firmware_key = 16;
+ m4.firmware = 15;
+ m4.kernel_key = 14;
+ m4.kernel = 13;
+
+ in.past_rollback_key_version = m4;
fake_system_state_.fake_hardware()->SetMinKernelKeyVersion(0x00010002);
fake_system_state_.fake_hardware()->SetMinFirmwareKeyVersion(0x00030004);
+ fake_system_state_.fake_hardware()->SetMaxKernelKeyRollforward(0xaaaaaaaa);
+ // TODO(crbug/783998): Add support for firmware when implemented.
+
OmahaRequestParams params(&fake_system_state_);
params.set_rollback_allowed(true);
+ params.set_rollback_allowed_milestones(4);
fake_system_state_.set_request_params(¶ms);
InstallPlan install_plan;
EXPECT_TRUE(DoTest(in, "", &install_plan));
EXPECT_TRUE(install_plan.is_rollback);
+
+ // The max rollforward should be set the values of the image
+ // rollback_allowed_milestones (4 for this test) in the past.
+ const uint32_t expected_max_kernel_rollforward =
+ static_cast<uint32_t>(m4.kernel_key) << 16 |
+ static_cast<uint32_t>(m4.kernel);
+ EXPECT_EQ(expected_max_kernel_rollforward,
+ fake_system_state_.fake_hardware()->GetMaxKernelKeyRollforward());
+ // TODO(crbug/783998): Add support for firmware when implemented.
}
TEST_F(OmahaResponseHandlerActionTest, RollbackKernelVersionErrorTest) {
@@ -563,18 +657,36 @@
in.rollback_key_version.firmware_key = 3;
in.rollback_key_version.firmware = 4;
+ OmahaResponse::RollbackKeyVersion m4;
+ m4.firmware_key = 16;
+ m4.firmware = 15;
+ m4.kernel_key = 14;
+ m4.kernel = 13;
+ in.past_rollback_key_version = m4;
+
fake_system_state_.fake_hardware()->SetMinKernelKeyVersion(0x00010002);
fake_system_state_.fake_hardware()->SetMinFirmwareKeyVersion(0x00030004);
+ const uint32_t current_kernel_max_rollforward = 0xaaaaaaaa;
+ fake_system_state_.fake_hardware()->SetMaxKernelKeyRollforward(
+ current_kernel_max_rollforward);
OmahaRequestParams params(&fake_system_state_);
params.set_rollback_allowed(true);
+ params.set_rollback_allowed_milestones(4);
fake_system_state_.set_request_params(¶ms);
InstallPlan install_plan;
EXPECT_FALSE(DoTest(in, "", &install_plan));
+
+ // Max rollforward is not changed in error cases.
+ EXPECT_EQ(current_kernel_max_rollforward,
+ fake_system_state_.fake_hardware()->GetMaxKernelKeyRollforward());
+ // TODO(crbug/783998): Add support for firmware when implemented.
}
TEST_F(OmahaResponseHandlerActionTest, RollbackFirmwareVersionErrorTest) {
+ // TODO(crbug/783998): Add handling for max_firmware_rollforward when
+ // implemented.
OmahaResponse in;
in.update_exists = true;
in.packages.push_back({.payload_urls = {"https://RollbackTest"},
@@ -591,6 +703,7 @@
OmahaRequestParams params(&fake_system_state_);
params.set_rollback_allowed(true);
+ params.set_rollback_allowed_milestones(4);
fake_system_state_.set_request_params(¶ms);
InstallPlan install_plan;
@@ -605,13 +718,23 @@
.hash = kPayloadHashHex});
in.is_rollback = false;
+ const uint32_t current_kernel_max_rollforward = 0xaaaaaaaa;
+ fake_system_state_.fake_hardware()->SetMaxKernelKeyRollforward(
+ current_kernel_max_rollforward);
+
OmahaRequestParams params(&fake_system_state_);
params.set_rollback_allowed(true);
+ params.set_rollback_allowed_milestones(4);
fake_system_state_.set_request_params(¶ms);
InstallPlan install_plan;
EXPECT_TRUE(DoTest(in, "", &install_plan));
EXPECT_FALSE(install_plan.is_rollback);
+
+ // Max rollforward is not changed for non-rollback cases.
+ EXPECT_EQ(current_kernel_max_rollforward,
+ fake_system_state_.fake_hardware()->GetMaxKernelKeyRollforward());
+ // TODO(crbug/783998): Add support for firmware when implemented.
}
TEST_F(OmahaResponseHandlerActionTest, RollbackNotAllowedTest) {
@@ -624,10 +747,46 @@
OmahaRequestParams params(&fake_system_state_);
params.set_rollback_allowed(false);
+ params.set_rollback_allowed_milestones(4);
+
+ const uint32_t current_kernel_max_rollforward = 0xaaaaaaaa;
+ fake_system_state_.fake_hardware()->SetMaxKernelKeyRollforward(
+ current_kernel_max_rollforward);
fake_system_state_.set_request_params(¶ms);
InstallPlan install_plan;
EXPECT_FALSE(DoTest(in, "", &install_plan));
+
+ // This case generates an error so, do not update max rollforward.
+ EXPECT_EQ(current_kernel_max_rollforward,
+ fake_system_state_.fake_hardware()->GetMaxKernelKeyRollforward());
+ // TODO(crbug/783998): Add support for firmware when implemented.
+}
+
+TEST_F(OmahaResponseHandlerActionTest, NormalUpdateWithZeroMilestonesAllowed) {
+ OmahaResponse in;
+ in.update_exists = true;
+ in.packages.push_back({.payload_urls = {"https://RollbackTest"},
+ .size = 1,
+ .hash = kPayloadHashHex});
+ in.is_rollback = false;
+
+ OmahaRequestParams params(&fake_system_state_);
+ params.set_rollback_allowed(true);
+ params.set_rollback_allowed_milestones(0);
+
+ const uint32_t current_kernel_max_rollforward = 0xaaaaaaaa;
+ fake_system_state_.fake_hardware()->SetMaxKernelKeyRollforward(
+ current_kernel_max_rollforward);
+
+ fake_system_state_.set_request_params(¶ms);
+ InstallPlan install_plan;
+ EXPECT_TRUE(DoTest(in, "", &install_plan));
+
+ // When allowed_milestones is 0, this is set to infinity.
+ EXPECT_EQ(kRollforwardInfinity,
+ fake_system_state_.fake_hardware()->GetMaxKernelKeyRollforward());
+ // TODO(crbug/783998): Add support for firmware when implemented.
}
TEST_F(OmahaResponseHandlerActionTest, SystemVersionTest) {
diff --git a/omaha_utils.cc b/omaha_utils.cc
index 6bd7525..c7f9921 100644
--- a/omaha_utils.cc
+++ b/omaha_utils.cc
@@ -17,41 +17,21 @@
#include "update_engine/omaha_utils.h"
#include <base/logging.h>
+#include <base/strings/string_number_conversions.h>
namespace chromeos_update_engine {
-namespace {
+const EolDate kEolDateInvalid = -9999;
-// The possible string values for the end-of-life status.
-const char kEolStatusSupported[] = "supported";
-const char kEolStatusSecurityOnly[] = "security-only";
-const char kEolStatusEol[] = "eol";
-
-} // namespace
-
-const char* EolStatusToString(EolStatus eol_status) {
- switch (eol_status) {
- case EolStatus::kSupported:
- return kEolStatusSupported;
- case EolStatus::kSecurityOnly:
- return kEolStatusSecurityOnly;
- case EolStatus::kEol:
- return kEolStatusEol;
- }
- // Only reached if an invalid number is casted to |EolStatus|.
- LOG(WARNING) << "Invalid EolStatus value: " << static_cast<int>(eol_status);
- return kEolStatusSupported;
+std::string EolDateToString(EolDate eol_date) {
+ return base::NumberToString(eol_date);
}
-EolStatus StringToEolStatus(const std::string& eol_status) {
- if (eol_status == kEolStatusSupported || eol_status.empty())
- return EolStatus::kSupported;
- if (eol_status == kEolStatusSecurityOnly)
- return EolStatus::kSecurityOnly;
- if (eol_status == kEolStatusEol)
- return EolStatus::kEol;
- LOG(WARNING) << "Invalid end-of-life attribute: " << eol_status;
- return EolStatus::kSupported;
+EolDate StringToEolDate(const std::string& eol_date) {
+ EolDate date = kEolDateInvalid;
+ if (!base::StringToInt64(eol_date, &date))
+ return kEolDateInvalid;
+ return date;
}
} // namespace chromeos_update_engine
diff --git a/omaha_utils.h b/omaha_utils.h
index 8614540..458bf9e 100644
--- a/omaha_utils.h
+++ b/omaha_utils.h
@@ -21,19 +21,18 @@
namespace chromeos_update_engine {
-// The end-of-life status of the device.
-enum class EolStatus {
- kSupported = 0,
- kSecurityOnly,
- kEol,
-};
+using EolDate = int64_t;
-// Returns the string representation of the |eol_status|.
-const char* EolStatusToString(EolStatus eol_status);
+// |EolDate| indicating an invalid end-of-life date.
+extern const EolDate kEolDateInvalid;
-// Converts the end-of-life status string to an EolStatus numeric value. In case
-// of an invalid string, the default "supported" value will be used instead.
-EolStatus StringToEolStatus(const std::string& eol_status);
+// Returns the string representation of the |eol_date|.
+std::string EolDateToString(EolDate eol_date);
+
+// Converts the end-of-life date string to an EolDate numeric value. In case
+// of an invalid string, the default |kEolDateInvalid| value will be used
+// instead.
+EolDate StringToEolDate(const std::string& eol_date);
} // namespace chromeos_update_engine
diff --git a/omaha_utils_unittest.cc b/omaha_utils_unittest.cc
index 8ceb76b..849905a 100644
--- a/omaha_utils_unittest.cc
+++ b/omaha_utils_unittest.cc
@@ -23,20 +23,17 @@
class OmahaUtilsTest : public ::testing::Test {};
-TEST(OmahaUtilsTest, EolStatusTest) {
- EXPECT_EQ(EolStatus::kEol, StringToEolStatus("eol"));
-
+TEST(OmahaUtilsTest, EolDateTest) {
// Supported values are converted back and forth properly.
- const std::vector<EolStatus> tests = {
- EolStatus::kSupported, EolStatus::kSecurityOnly, EolStatus::kEol};
- for (EolStatus eol_status : tests) {
- EXPECT_EQ(eol_status, StringToEolStatus(EolStatusToString(eol_status)))
- << "The StringToEolStatus() was " << EolStatusToString(eol_status);
+ const std::vector<EolDate> tests = {kEolDateInvalid, -1, 0, 1};
+ for (EolDate eol_date : tests) {
+ EXPECT_EQ(eol_date, StringToEolDate(EolDateToString(eol_date)))
+ << "The StringToEolDate() was " << EolDateToString(eol_date);
}
// Invalid values are assumed as "supported".
- EXPECT_EQ(EolStatus::kSupported, StringToEolStatus(""));
- EXPECT_EQ(EolStatus::kSupported, StringToEolStatus("hello, world!"));
+ EXPECT_EQ(kEolDateInvalid, StringToEolDate(""));
+ EXPECT_EQ(kEolDateInvalid, StringToEolDate("hello, world!"));
}
} // namespace chromeos_update_engine
diff --git a/p2p_manager.cc b/p2p_manager.cc
index 6720908..00ff8ce 100644
--- a/p2p_manager.cc
+++ b/p2p_manager.cc
@@ -65,7 +65,6 @@
using chromeos_update_manager::EvalStatus;
using chromeos_update_manager::Policy;
using chromeos_update_manager::UpdateManager;
-using std::map;
using std::pair;
using std::string;
using std::unique_ptr;
@@ -249,12 +248,12 @@
bool P2PManagerImpl::EnsureP2P(bool should_be_running) {
int return_code = 0;
- string output;
+ string stderr;
may_be_running_ = true; // Unless successful, we must be conservative.
vector<string> args = configuration_->GetInitctlArgs(should_be_running);
- if (!Subprocess::SynchronousExec(args, &return_code, &output)) {
+ if (!Subprocess::SynchronousExec(args, &return_code, nullptr, &stderr)) {
LOG(ERROR) << "Error spawning " << utils::StringVectorToString(args);
return false;
}
@@ -268,7 +267,7 @@
const char* expected_error_message =
should_be_running ? "initctl: Job is already running: p2p\n"
: "initctl: Unknown instance \n";
- if (output != expected_error_message)
+ if (stderr != expected_error_message)
return false;
}
diff --git a/parcelable_update_engine_status.cc b/parcelable_update_engine_status.cc
deleted file mode 100644
index 8a2dbeb..0000000
--- a/parcelable_update_engine_status.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/parcelable_update_engine_status.h"
-#include "update_engine/update_status_utils.h"
-
-#include <binder/Parcel.h>
-
-using update_engine::UpdateEngineStatus;
-
-namespace android {
-namespace brillo {
-
-ParcelableUpdateEngineStatus::ParcelableUpdateEngineStatus(
- const UpdateEngineStatus& status)
- : last_checked_time_(status.last_checked_time),
- current_operation_(
- chromeos_update_engine::UpdateStatusToString(status.status)),
- progress_(status.progress),
- current_version_(String16{status.current_version.c_str()}),
- current_system_version_(String16{status.current_system_version.c_str()}),
- new_size_(status.new_size_bytes),
- new_version_(String16{status.new_version.c_str()}),
- new_system_version_(String16{status.new_system_version.c_str()}) {}
-
-status_t ParcelableUpdateEngineStatus::writeToParcel(Parcel* parcel) const {
- status_t status;
-
- status = parcel->writeInt64(last_checked_time_);
- if (status != OK) {
- return status;
- }
-
- status = parcel->writeString16(current_operation_);
- if (status != OK) {
- return status;
- }
-
- status = parcel->writeDouble(progress_);
- if (status != OK) {
- return status;
- }
-
- status = parcel->writeString16(current_version_);
- if (status != OK) {
- return status;
- }
-
- status = parcel->writeString16(current_system_version_);
- if (status != OK) {
- return status;
- }
-
- status = parcel->writeInt64(new_size_);
- if (status != OK) {
- return status;
- }
-
- status = parcel->writeString16(new_version_);
- if (status != OK) {
- return status;
- }
-
- return parcel->writeString16(new_system_version_);
-}
-
-status_t ParcelableUpdateEngineStatus::readFromParcel(const Parcel* parcel) {
- status_t status;
-
- status = parcel->readInt64(&last_checked_time_);
- if (status != OK) {
- return status;
- }
-
- status = parcel->readString16(¤t_operation_);
- if (status != OK) {
- return status;
- }
-
- status = parcel->readDouble(&progress_);
- if (status != OK) {
- return status;
- }
-
- status = parcel->readString16(¤t_version_);
- if (status != OK) {
- return status;
- }
-
- status = parcel->readString16(¤t_system_version_);
- if (status != OK) {
- return status;
- }
-
- status = parcel->readInt64(&new_size_);
- if (status != OK) {
- return status;
- }
-
- status = parcel->readString16(&new_version_);
- if (status != OK) {
- return status;
- }
-
- return parcel->readString16(&new_system_version_);
-}
-
-} // namespace brillo
-} // namespace android
diff --git a/parcelable_update_engine_status.h b/parcelable_update_engine_status.h
deleted file mode 100644
index 3feac76..0000000
--- a/parcelable_update_engine_status.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_
-#define UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_
-
-#include <binder/Parcelable.h>
-#include <utils/String16.h>
-
-#include "update_engine/client_library/include/update_engine/update_status.h"
-
-namespace android {
-namespace brillo {
-
-// Parcelable object containing the current status of update engine, to be sent
-// over binder to clients from the server.
-class ParcelableUpdateEngineStatus : public Parcelable {
- public:
- ParcelableUpdateEngineStatus() = default;
- explicit ParcelableUpdateEngineStatus(
- const update_engine::UpdateEngineStatus& status);
- virtual ~ParcelableUpdateEngineStatus() = default;
-
- status_t writeToParcel(Parcel* parcel) const override;
- status_t readFromParcel(const Parcel* parcel) override;
-
- // This list is kept in the Parcelable serialization order.
-
- // When the update_engine last checked for updates (seconds since unix Epoch)
- int64_t last_checked_time_;
- // The current status/operation of the update_engine.
- android::String16 current_operation_;
- // The current progress (0.0f-1.0f).
- double progress_;
- // The current product version.
- android::String16 current_version_;
- // The current system version.
- android::String16 current_system_version_;
- // The size of the update (bytes). This is int64_t for java compatibility.
- int64_t new_size_;
- // The new product version.
- android::String16 new_version_;
- // The new system version, if there is one (empty, otherwise).
- android::String16 new_system_version_;
-};
-
-} // namespace brillo
-} // namespace android
-
-#endif // UPDATE_ENGINE_PARCELABLE_UPDATE_ENGINE_STATUS_H_
diff --git a/parcelable_update_engine_status_unittest.cc b/parcelable_update_engine_status_unittest.cc
deleted file mode 100644
index 20decb6..0000000
--- a/parcelable_update_engine_status_unittest.cc
+++ /dev/null
@@ -1,92 +0,0 @@
-//
-// Copyright (C) 2017 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/parcelable_update_engine_status.h"
-#include "update_engine/update_status_utils.h"
-
-#include <binder/Parcel.h>
-#include <gtest/gtest.h>
-
-using android::Parcel;
-using android::status_t;
-using android::String16;
-using android::brillo::ParcelableUpdateEngineStatus;
-using update_engine::UpdateEngineStatus;
-using update_engine::UpdateStatus;
-
-TEST(ParcelableUpdateEngineStatusTest, TestCreationFromUpdateEngineStatus) {
- // This test creates an object and verifies that all the UpdateEngineStatus
- // values are properly reflected in the Parcelable version of the class.
-
- UpdateEngineStatus ue_status = {123456789,
- UpdateStatus::DOWNLOADING,
- "0.1.2.3",
- "1.2.3.4",
- 0.5f,
- 34567,
- "2.3.4.5",
- "3.4.5.6"};
- ParcelableUpdateEngineStatus parcelable_status(ue_status);
- EXPECT_EQ(ue_status.last_checked_time, parcelable_status.last_checked_time_);
- EXPECT_EQ(
- String16{chromeos_update_engine::UpdateStatusToString(ue_status.status)},
- parcelable_status.current_operation_);
- EXPECT_EQ(String16{ue_status.current_version.c_str()},
- parcelable_status.current_version_);
- EXPECT_EQ(String16{ue_status.current_system_version.c_str()},
- parcelable_status.current_system_version_);
- EXPECT_EQ(ue_status.progress, parcelable_status.progress_);
- EXPECT_EQ(static_cast<int64_t>(ue_status.new_size_bytes),
- parcelable_status.new_size_);
- EXPECT_EQ(String16{ue_status.new_version.c_str()},
- parcelable_status.new_version_);
- EXPECT_EQ(String16{ue_status.new_system_version.c_str()},
- parcelable_status.new_system_version_);
-}
-
-TEST(ParcelableUpdateEngineStatusTest, TestParceling) {
- // This tests the writeToParcel and readFromParcel methods for being correctly
- // matched.
- UpdateEngineStatus ue_status = {123456789,
- UpdateStatus::DOWNLOADING,
- "0.1.2.3",
- "1.2.3.4",
- 0.5f,
- 34567,
- "2.3.4.5",
- "3.4.5.6"};
- ParcelableUpdateEngineStatus source_status(ue_status);
- Parcel parcel_source, parcel_target;
- status_t status = source_status.writeToParcel(&parcel_source);
- EXPECT_EQ(::android::OK, status);
- size_t parcel_len = parcel_source.dataSize();
- status = parcel_target.setData(parcel_source.data(), parcel_len);
- EXPECT_EQ(::android::OK, status);
- ParcelableUpdateEngineStatus target_status;
- status = target_status.readFromParcel(&parcel_target);
- EXPECT_EQ(::android::OK, status);
-
- EXPECT_EQ(source_status.last_checked_time_, target_status.last_checked_time_);
- EXPECT_EQ(source_status.current_operation_, target_status.current_operation_);
- EXPECT_EQ(source_status.current_version_, target_status.current_version_);
- EXPECT_EQ(source_status.current_system_version_,
- target_status.current_system_version_);
- EXPECT_EQ(source_status.progress_, target_status.progress_);
- EXPECT_EQ(source_status.new_size_, target_status.new_size_);
- EXPECT_EQ(source_status.new_version_, target_status.new_version_);
- EXPECT_EQ(source_status.new_system_version_,
- target_status.new_system_version_);
-}
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index d9b739d..af1baa4 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -57,9 +57,6 @@
#endif // USE_FEC
#include "update_engine/payload_consumer/file_descriptor_utils.h"
#include "update_engine/payload_consumer/mount_history.h"
-#if USE_MTD
-#include "update_engine/payload_consumer/mtd_file_descriptor.h"
-#endif // USE_MTD
#include "update_engine/payload_consumer/payload_constants.h"
#include "update_engine/payload_consumer/payload_verifier.h"
#include "update_engine/payload_consumer/xz_extent_writer.h"
@@ -79,40 +76,9 @@
namespace {
const int kUpdateStateOperationInvalid = -1;
const int kMaxResumedUpdateFailures = 10;
-#if USE_MTD
-const int kUbiVolumeAttachTimeout = 5 * 60;
-#endif
const uint64_t kCacheSize = 1024 * 1024; // 1MB
-FileDescriptorPtr CreateFileDescriptor(const char* path) {
- FileDescriptorPtr ret;
-#if USE_MTD
- if (strstr(path, "/dev/ubi") == path) {
- if (!UbiFileDescriptor::IsUbi(path)) {
- // The volume might not have been attached at boot time.
- int volume_no;
- if (utils::SplitPartitionName(path, nullptr, &volume_no)) {
- utils::TryAttachingUbiVolume(volume_no, kUbiVolumeAttachTimeout);
- }
- }
- if (UbiFileDescriptor::IsUbi(path)) {
- LOG(INFO) << path << " is a UBI device.";
- ret.reset(new UbiFileDescriptor);
- }
- } else if (MtdFileDescriptor::IsMtd(path)) {
- LOG(INFO) << path << " is an MTD device.";
- ret.reset(new MtdFileDescriptor);
- } else {
- LOG(INFO) << path << " is not an MTD nor a UBI device.";
-#endif
- ret.reset(new EintrSafeFileDescriptor);
-#if USE_MTD
- }
-#endif
- return ret;
-}
-
// Opens path for read/write. On success returns an open FileDescriptor
// and sets *err to 0. On failure, sets *err to errno and returns nullptr.
FileDescriptorPtr OpenFile(const char* path,
@@ -124,18 +90,11 @@
bool read_only = (mode & O_ACCMODE) == O_RDONLY;
utils::SetBlockDeviceReadOnly(path, read_only);
- FileDescriptorPtr fd = CreateFileDescriptor(path);
+ FileDescriptorPtr fd(new EintrSafeFileDescriptor());
if (cache_writes && !read_only) {
fd = FileDescriptorPtr(new CachedFileDescriptor(fd, kCacheSize));
LOG(INFO) << "Caching writes.";
}
-#if USE_MTD
- // On NAND devices, we can either read, or write, but not both. So here we
- // use O_WRONLY.
- if (UbiFileDescriptor::IsUbi(path) || MtdFileDescriptor::IsMtd(path)) {
- mode = O_WRONLY;
- }
-#endif
if (!fd->Open(path, mode, 000)) {
*err = errno;
PLOG(ERROR) << "Unable to open file " << path;
@@ -359,11 +318,10 @@
install_plan_->partitions.size() - partitions_.size();
const InstallPlan::Partition& install_part =
install_plan_->partitions[num_previous_partitions + current_partition_];
- // Open source fds if we have a delta payload with minor version >= 2, or for
- // partitions in the partial update.
+ // Open source fds if we have a delta payload, or for partitions in the
+ // partial update.
bool source_may_exist = manifest_.partial_update() ||
- (payload_->type == InstallPayloadType::kDelta &&
- GetMinorVersion() != kInPlaceMinorPayloadVersion);
+ payload_->type == InstallPayloadType::kDelta;
// We shouldn't open the source partition in certain cases, e.g. some dynamic
// partitions in delta payload, partitions included in the full payload for
// partial updates. Use the source size as the indicator.
@@ -419,9 +377,8 @@
if (current_partition_ >= partitions_.size())
return false;
- // No support for ECC in minor version 1 or full payloads.
- if (payload_->type == InstallPayloadType::kFull ||
- GetMinorVersion() == kInPlaceMinorPayloadVersion)
+ // No support for ECC for full payloads.
+ if (payload_->type == InstallPayloadType::kFull)
return false;
#if USE_FEC
@@ -510,6 +467,21 @@
return MetadataParseResult::kError;
}
}
+
+ // Check that the |metadata signature size_| and |metadata_size_| are not
+ // very big numbers. This is necessary since |update_engine| needs to write
+ // these values into the buffer before being able to use them, and if an
+ // attacker sets these values to a very big number, the buffer will overflow
+ // and |update_engine| will crash. A simple way of solving this is to check
+ // that the size of both values is smaller than the payload itself.
+ if (metadata_size_ + metadata_signature_size_ > payload_->size) {
+ LOG(ERROR) << "The size of the metadata_size(" << metadata_size_ << ")"
+ << " or metadata signature(" << metadata_signature_size_ << ")"
+ << " is greater than the size of the payload"
+ << "(" << payload_->size << ")";
+ *error = ErrorCode::kDownloadInvalidMetadataSize;
+ return MetadataParseResult::kError;
+ }
}
// Now that we have validated the metadata size, we should wait for the full
@@ -572,7 +544,7 @@
#define OP_DURATION_HISTOGRAM(_op_name, _start_time) \
LOCAL_HISTOGRAM_CUSTOM_TIMES( \
"UpdateEngine.DownloadAction.InstallOperation::" _op_name ".Duration", \
- base::TimeTicks::Now() - _start_time, \
+ (base::TimeTicks::Now() - _start_time), \
base::TimeDelta::FromMilliseconds(10), \
base::TimeDelta::FromMinutes(5), \
20);
@@ -737,14 +709,6 @@
op_result = PerformZeroOrDiscardOperation(op);
OP_DURATION_HISTOGRAM("ZERO_OR_DISCARD", op_start_time);
break;
- case InstallOperation::MOVE:
- op_result = PerformMoveOperation(op);
- OP_DURATION_HISTOGRAM("MOVE", op_start_time);
- break;
- case InstallOperation::BSDIFF:
- op_result = PerformBsdiffOperation(op);
- OP_DURATION_HISTOGRAM("BSDIFF", op_start_time);
- break;
case InstallOperation::SOURCE_COPY:
op_result = PerformSourceCopyOperation(op, error);
OP_DURATION_HISTOGRAM("SOURCE_COPY", op_start_time);
@@ -775,8 +739,7 @@
// In major version 2, we don't add dummy operation to the payload.
// If we already extracted the signature we should skip this step.
- if (major_payload_version_ == kBrilloMajorPayloadVersion &&
- manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
+ if (manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
signatures_message_data_.empty()) {
if (manifest_.signatures_offset() != buffer_offset_) {
LOG(ERROR) << "Payload signatures offset points to blob offset "
@@ -811,49 +774,9 @@
}
bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) {
- if (major_payload_version_ == kBrilloMajorPayloadVersion) {
- partitions_.clear();
- for (const PartitionUpdate& partition : manifest_.partitions()) {
- partitions_.push_back(partition);
- }
- } else if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
- LOG(INFO) << "Converting update information from old format.";
- PartitionUpdate root_part;
- root_part.set_partition_name(kPartitionNameRoot);
-#ifdef __ANDROID__
- LOG(WARNING) << "Legacy payload major version provided to an Android "
- "build. Assuming no post-install. Please use major version "
- "2 or newer.";
- root_part.set_run_postinstall(false);
-#else
- root_part.set_run_postinstall(true);
-#endif // __ANDROID__
- if (manifest_.has_old_rootfs_info()) {
- *root_part.mutable_old_partition_info() = manifest_.old_rootfs_info();
- manifest_.clear_old_rootfs_info();
- }
- if (manifest_.has_new_rootfs_info()) {
- *root_part.mutable_new_partition_info() = manifest_.new_rootfs_info();
- manifest_.clear_new_rootfs_info();
- }
- *root_part.mutable_operations() = manifest_.install_operations();
- manifest_.clear_install_operations();
- partitions_.push_back(std::move(root_part));
-
- PartitionUpdate kern_part;
- kern_part.set_partition_name(kPartitionNameKernel);
- kern_part.set_run_postinstall(false);
- if (manifest_.has_old_kernel_info()) {
- *kern_part.mutable_old_partition_info() = manifest_.old_kernel_info();
- manifest_.clear_old_kernel_info();
- }
- if (manifest_.has_new_kernel_info()) {
- *kern_part.mutable_new_partition_info() = manifest_.new_kernel_info();
- manifest_.clear_new_kernel_info();
- }
- *kern_part.mutable_operations() = manifest_.kernel_install_operations();
- manifest_.clear_kernel_install_operations();
- partitions_.push_back(std::move(kern_part));
+ partitions_.clear();
+ for (const PartitionUpdate& partition : manifest_.partitions()) {
+ partitions_.push_back(partition);
}
// For VAB and partial updates, the partition preparation will copy the
@@ -871,6 +794,8 @@
}
}
+ // Partitions in manifest are no longer needed after preparing partitions.
+ manifest_.clear_partitions();
// TODO(xunchang) TBD: allow partial update only on devices with dynamic
// partition.
if (manifest_.partial_update()) {
@@ -965,10 +890,6 @@
install_plan_->partitions.push_back(install_part);
}
- if (major_payload_version_ == kBrilloMajorPayloadVersion) {
- manifest_.clear_partitions();
- }
-
// TODO(xunchang) only need to load the partitions for those in payload.
// Because we have already loaded the other once when generating SOURCE_COPY
// operations.
@@ -1063,14 +984,6 @@
TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
- // Extract the signature message if it's in this operation.
- if (ExtractSignatureMessageFromOperation(operation)) {
- // If this is dummy replace operation, we ignore it after extracting the
- // signature.
- DiscardBuffer(true, 0);
- return true;
- }
-
// Setup the ExtentWriter stack based on the operation type.
std::unique_ptr<ExtentWriter> writer = std::make_unique<DirectExtentWriter>();
@@ -1129,57 +1042,6 @@
return true;
}
-bool DeltaPerformer::PerformMoveOperation(const InstallOperation& operation) {
- // Calculate buffer size. Note, this function doesn't do a sliding
- // window to copy in case the source and destination blocks overlap.
- // If we wanted to do a sliding window, we could program the server
- // to generate deltas that effectively did a sliding window.
-
- uint64_t blocks_to_read = 0;
- for (int i = 0; i < operation.src_extents_size(); i++)
- blocks_to_read += operation.src_extents(i).num_blocks();
-
- uint64_t blocks_to_write = 0;
- for (int i = 0; i < operation.dst_extents_size(); i++)
- blocks_to_write += operation.dst_extents(i).num_blocks();
-
- DCHECK_EQ(blocks_to_write, blocks_to_read);
- brillo::Blob buf(blocks_to_write * block_size_);
-
- // Read in bytes.
- ssize_t bytes_read = 0;
- for (int i = 0; i < operation.src_extents_size(); i++) {
- ssize_t bytes_read_this_iteration = 0;
- const Extent& extent = operation.src_extents(i);
- const size_t bytes = extent.num_blocks() * block_size_;
- TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole);
- TEST_AND_RETURN_FALSE(utils::PReadAll(target_fd_,
- &buf[bytes_read],
- bytes,
- extent.start_block() * block_size_,
- &bytes_read_this_iteration));
- TEST_AND_RETURN_FALSE(bytes_read_this_iteration ==
- static_cast<ssize_t>(bytes));
- bytes_read += bytes_read_this_iteration;
- }
-
- // Write bytes out.
- ssize_t bytes_written = 0;
- for (int i = 0; i < operation.dst_extents_size(); i++) {
- const Extent& extent = operation.dst_extents(i);
- const size_t bytes = extent.num_blocks() * block_size_;
- TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole);
- TEST_AND_RETURN_FALSE(utils::PWriteAll(target_fd_,
- &buf[bytes_written],
- bytes,
- extent.start_block() * block_size_));
- bytes_written += bytes;
- }
- DCHECK_EQ(bytes_written, bytes_read);
- DCHECK_EQ(bytes_written, static_cast<ssize_t>(buf.size()));
- return true;
-}
-
bool DeltaPerformer::ValidateSourceHash(const brillo::Blob& calculated_hash,
const InstallOperation& operation,
const FileDescriptorPtr source_fd,
@@ -1411,47 +1273,6 @@
return true;
}
-bool DeltaPerformer::PerformBsdiffOperation(const InstallOperation& operation) {
- // Since we delete data off the beginning of the buffer as we use it,
- // the data we need should be exactly at the beginning of the buffer.
- TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
- TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
-
- string input_positions;
- TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.src_extents(),
- block_size_,
- operation.src_length(),
- &input_positions));
- string output_positions;
- TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.dst_extents(),
- block_size_,
- operation.dst_length(),
- &output_positions));
-
- TEST_AND_RETURN_FALSE(bsdiff::bspatch(target_path_.c_str(),
- target_path_.c_str(),
- buffer_.data(),
- buffer_.size(),
- input_positions.c_str(),
- output_positions.c_str()) == 0);
- DiscardBuffer(true, buffer_.size());
-
- if (operation.dst_length() % block_size_) {
- // Zero out rest of final block.
- // TODO(adlr): build this into bspatch; it's more efficient that way.
- const Extent& last_extent =
- operation.dst_extents(operation.dst_extents_size() - 1);
- const uint64_t end_byte =
- (last_extent.start_block() + last_extent.num_blocks()) * block_size_;
- const uint64_t begin_byte =
- end_byte - (block_size_ - operation.dst_length() % block_size_);
- brillo::Blob zeros(end_byte - begin_byte);
- TEST_AND_RETURN_FALSE(utils::PWriteAll(
- target_fd_, zeros.data(), end_byte - begin_byte, begin_byte));
- }
- return true;
-}
-
namespace {
class BsdiffExtentFile : public bsdiff::FileInterface {
@@ -1660,19 +1481,6 @@
return true;
}
-bool DeltaPerformer::ExtractSignatureMessageFromOperation(
- const InstallOperation& operation) {
- if (operation.type() != InstallOperation::REPLACE ||
- !manifest_.has_signatures_offset() ||
- manifest_.signatures_offset() != operation.data_offset()) {
- return false;
- }
- TEST_AND_RETURN_FALSE(manifest_.has_signatures_size() &&
- manifest_.signatures_size() == operation.data_length());
- TEST_AND_RETURN_FALSE(ExtractSignatureMessage());
- return true;
-}
-
bool DeltaPerformer::ExtractSignatureMessage() {
TEST_AND_RETURN_FALSE(signatures_message_data_.empty());
TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset());
@@ -1744,11 +1552,11 @@
ErrorCode DeltaPerformer::ValidateManifest() {
// Perform assorted checks to sanity check the manifest, make sure it
// matches data from other sources, and that it is a supported version.
- bool has_old_fields =
- (manifest_.has_old_kernel_info() || manifest_.has_old_rootfs_info());
- for (const PartitionUpdate& partition : manifest_.partitions()) {
- has_old_fields = has_old_fields || partition.has_old_partition_info();
- }
+ bool has_old_fields = std::any_of(manifest_.partitions().begin(),
+ manifest_.partitions().end(),
+ [](const PartitionUpdate& partition) {
+ return partition.has_old_partition_info();
+ });
// The presence of an old partition hash is the sole indicator for a delta
// update.
@@ -1790,16 +1598,12 @@
}
}
- if (major_payload_version_ != kChromeOSMajorPayloadVersion) {
- if (manifest_.has_old_rootfs_info() || manifest_.has_new_rootfs_info() ||
- manifest_.has_old_kernel_info() || manifest_.has_new_kernel_info() ||
- manifest_.install_operations_size() != 0 ||
- manifest_.kernel_install_operations_size() != 0) {
- LOG(ERROR) << "Manifest contains deprecated field only supported in "
- << "major payload version 1, but the payload major version is "
- << major_payload_version_;
- return ErrorCode::kPayloadMismatchedType;
- }
+ if (manifest_.has_old_rootfs_info() || manifest_.has_new_rootfs_info() ||
+ manifest_.has_old_kernel_info() || manifest_.has_new_kernel_info() ||
+ manifest_.install_operations_size() != 0 ||
+ manifest_.kernel_install_operations_size() != 0) {
+ LOG(ERROR) << "Manifest contains deprecated fields.";
+ return ErrorCode::kPayloadMismatchedType;
}
if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) {
@@ -1814,18 +1618,8 @@
" the payload with an older timestamp.";
}
- if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
- if (manifest_.has_dynamic_partition_metadata()) {
- LOG(ERROR)
- << "Should not contain dynamic_partition_metadata for major version "
- << kChromeOSMajorPayloadVersion
- << ". Please use major version 2 or above.";
- return ErrorCode::kPayloadMismatchedType;
- }
- }
-
- // TODO(garnold) we should be adding more and more manifest checks, such as
- // partition boundaries etc (see chromium-os:37661).
+ // TODO(crbug.com/37661) we should be adding more and more manifest checks,
+ // such as partition boundaries, etc.
return ErrorCode::kSuccess;
}
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index 01fcc5c..7b30a83 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -254,8 +254,6 @@
// set even if it fails.
bool PerformReplaceOperation(const InstallOperation& operation);
bool PerformZeroOrDiscardOperation(const InstallOperation& operation);
- bool PerformMoveOperation(const InstallOperation& operation);
- bool PerformBsdiffOperation(const InstallOperation& operation);
bool PerformSourceCopyOperation(const InstallOperation& operation,
ErrorCode* error);
bool PerformSourceBsdiffOperation(const InstallOperation& operation,
@@ -270,11 +268,6 @@
FileDescriptorPtr ChooseSourceFD(const InstallOperation& operation,
ErrorCode* error);
- // Extracts the payload signature message from the blob on the |operation| if
- // the offset matches the one specified by the manifest. Returns whether the
- // signature was extracted.
- bool ExtractSignatureMessageFromOperation(const InstallOperation& operation);
-
// Extracts the payload signature message from the current |buffer_| if the
// offset matches the one specified by the manifest. Returns whether the
// signature was extracted.
diff --git a/payload_consumer/delta_performer_fuzzer.cc b/payload_consumer/delta_performer_fuzzer.cc
new file mode 100644
index 0000000..73082c4
--- /dev/null
+++ b/payload_consumer/delta_performer_fuzzer.cc
@@ -0,0 +1,105 @@
+//
+// Copyright 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <string>
+
+#include <base/logging.h>
+#include <fuzzer/FuzzedDataProvider.h>
+
+#include "update_engine/common/fake_boot_control.h"
+#include "update_engine/common/fake_hardware.h"
+#include "update_engine/common/prefs.h"
+#include "update_engine/payload_consumer/delta_performer.h"
+#include "update_engine/payload_consumer/download_action.h"
+#include "update_engine/payload_consumer/install_plan.h"
+
+namespace chromeos_update_engine {
+
+class FakeDownloadActionDelegate : public DownloadActionDelegate {
+ public:
+ FakeDownloadActionDelegate() = default;
+ ~FakeDownloadActionDelegate() = default;
+
+ // DownloadActionDelegate overrides;
+ void BytesReceived(uint64_t bytes_progressed,
+ uint64_t bytes_received,
+ uint64_t total) override{};
+
+ bool ShouldCancel(ErrorCode* cancel_reason) override { return false; };
+
+ void DownloadComplete() override{};
+
+ DISALLOW_COPY_AND_ASSIGN(FakeDownloadActionDelegate);
+};
+
+void FuzzDeltaPerformer(const uint8_t* data, size_t size) {
+ MemoryPrefs prefs;
+ FakeBootControl boot_control;
+ FakeHardware hardware;
+ FakeDownloadActionDelegate download_action_delegate;
+
+ FuzzedDataProvider data_provider(data, size);
+
+ InstallPlan install_plan{
+ .target_slot = 1,
+ .partitions = {InstallPlan::Partition{
+ .source_path = "/dev/zero",
+ .source_size = 4096,
+ .target_path = "/dev/null",
+ .target_size = 4096,
+ }},
+ .hash_checks_mandatory = true,
+ };
+
+ InstallPlan::Payload payload{
+ .size = data_provider.ConsumeIntegralInRange<uint64_t>(0, 10000),
+ .metadata_size = data_provider.ConsumeIntegralInRange<uint64_t>(0, 1000),
+ .hash = data_provider.ConsumeBytes<uint8_t>(32),
+ .type = static_cast<InstallPayloadType>(
+ data_provider.ConsumeIntegralInRange(0, 3)),
+ .already_applied = data_provider.ConsumeBool(),
+ };
+
+ DeltaPerformer performer(&prefs,
+ &boot_control,
+ &hardware,
+ &download_action_delegate,
+ &install_plan,
+ &payload,
+ data_provider.ConsumeBool());
+ do {
+ auto chunk_size = data_provider.ConsumeIntegralInRange<size_t>(0, 100);
+ auto data = data_provider.ConsumeBytes<uint8_t>(chunk_size);
+ if (!performer.Write(data.data(), data.size()))
+ break;
+ } while (data_provider.remaining_bytes() > 0);
+}
+
+} // namespace chromeos_update_engine
+
+class Environment {
+ public:
+ Environment() { logging::SetMinLogLevel(logging::LOG_FATAL); }
+};
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ if (size > 1000000) {
+ return 0;
+ }
+
+ static Environment env;
+ chromeos_update_engine::FuzzDeltaPerformer(data, size);
+ return 0;
+}
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index 4797137..16641c6 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -25,6 +25,7 @@
#include <base/files/file_path.h>
#include <base/files/file_util.h>
+#include <base/stl_util.h>
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
#include <google/protobuf/repeated_field.h>
@@ -78,6 +79,7 @@
string delta_path;
uint64_t metadata_size;
+ uint32_t metadata_signature_size;
string old_kernel;
brillo::Blob old_kernel_data;
@@ -186,16 +188,19 @@
string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath);
size_t signature_size;
ASSERT_TRUE(PayloadSigner::GetMaximumSignatureSize(private_key_path,
- &signature_size));
- brillo::Blob hash;
+ &signature_size));
+ brillo::Blob metadata_hash, payload_hash;
ASSERT_TRUE(PayloadSigner::HashPayloadForSigning(
- payload_path, {signature_size}, &hash, nullptr));
- brillo::Blob signature;
- ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature));
+ payload_path, {signature_size}, &payload_hash, &metadata_hash));
+ brillo::Blob metadata_signature, payload_signature;
+ ASSERT_TRUE(PayloadSigner::SignHash(
+ payload_hash, private_key_path, &payload_signature));
+ ASSERT_TRUE(PayloadSigner::SignHash(
+ metadata_hash, private_key_path, &metadata_signature));
ASSERT_TRUE(PayloadSigner::AddSignatureToPayload(payload_path,
{signature_size},
- {signature},
- {},
+ {payload_signature},
+ {metadata_signature},
payload_path,
out_metadata_size));
EXPECT_TRUE(PayloadSigner::VerifySignedPayload(
@@ -216,19 +221,21 @@
}
string signature_size_string = base::JoinString(signature_size_strings, ":");
- test_utils::ScopedTempFile hash_file("hash.XXXXXX");
+ test_utils::ScopedTempFile hash_file("hash.XXXXXX"),
+ metadata_hash_file("hash.XXXXXX");
string delta_generator_path = GetBuildArtifactsPath("delta_generator");
ASSERT_EQ(0,
System(base::StringPrintf(
- "%s -in_file=%s -signature_size=%s -out_hash_file=%s",
+ "%s -in_file=%s -signature_size=%s -out_hash_file=%s "
+ "-out_metadata_hash_file=%s",
delta_generator_path.c_str(),
payload_path.c_str(),
signature_size_string.c_str(),
- hash_file.path().c_str())));
+ hash_file.path().c_str(), metadata_hash_file.path().c_str())));
// Sign the hash with all private keys.
- vector<test_utils::ScopedTempFile> sig_files;
- vector<string> sig_file_paths;
+ vector<test_utils::ScopedTempFile> sig_files, metadata_sig_files;
+ vector<string> sig_file_paths, metadata_sig_file_paths;
for (const auto& key_path : private_key_paths) {
brillo::Blob hash, signature;
ASSERT_TRUE(utils::ReadFile(hash_file.path(), &hash));
@@ -238,17 +245,31 @@
ASSERT_TRUE(test_utils::WriteFileVector(sig_file.path(), signature));
sig_file_paths.push_back(sig_file.path());
sig_files.push_back(std::move(sig_file));
+
+ brillo::Blob metadata_hash, metadata_signature;
+ ASSERT_TRUE(utils::ReadFile(metadata_hash_file.path(), &metadata_hash));
+ ASSERT_TRUE(PayloadSigner::SignHash(metadata_hash, key_path, &metadata_signature));
+
+ test_utils::ScopedTempFile metadata_sig_file("signature.XXXXXX");
+ ASSERT_TRUE(test_utils::WriteFileVector(metadata_sig_file.path(), metadata_signature));
+
+ metadata_sig_file_paths.push_back(metadata_sig_file.path());
+ metadata_sig_files.push_back(std::move(metadata_sig_file));
}
string sig_files_string = base::JoinString(sig_file_paths, ":");
+ string metadata_sig_files_string = base::JoinString(metadata_sig_file_paths, ":");
// Add the signature to the payload.
ASSERT_EQ(0,
System(base::StringPrintf("%s --signature_size=%s -in_file=%s "
- "-payload_signature_file=%s -out_file=%s",
+ "-payload_signature_file=%s "
+ "-metadata_signature_file=%s "
+ "-out_file=%s",
delta_generator_path.c_str(),
signature_size_string.c_str(),
payload_path.c_str(),
sig_files_string.c_str(),
+ metadata_sig_files_string.c_str(),
payload_path.c_str())));
int verify_result = System(base::StringPrintf("%s -in_file=%s -public_key=%s",
@@ -330,7 +351,6 @@
static void GenerateDeltaFile(bool full_kernel,
bool full_rootfs,
- bool noop,
ssize_t chunk_size,
SignatureTest signature_test,
DeltaState* state,
@@ -407,24 +427,16 @@
ones.size()));
}
- if (noop) {
- EXPECT_TRUE(base::CopyFile(base::FilePath(state->a_img),
- base::FilePath(state->b_img)));
- old_image_info = new_image_info;
- } else {
- if (minor_version == kSourceMinorPayloadVersion) {
- // Create a result image with image_size bytes of garbage.
- brillo::Blob ones(state->image_size, 0xff);
- EXPECT_TRUE(utils::WriteFile(
- state->result_img.c_str(), ones.data(), ones.size()));
- EXPECT_EQ(utils::FileSize(state->a_img),
- utils::FileSize(state->result_img));
- }
+ // Create a result image with image_size bytes of garbage.
+ brillo::Blob ones(state->image_size, 0xff);
+ EXPECT_TRUE(
+ utils::WriteFile(state->result_img.c_str(), ones.data(), ones.size()));
+ EXPECT_EQ(utils::FileSize(state->a_img), utils::FileSize(state->result_img));
- EXPECT_TRUE(
- base::CopyFile(GetBuildArtifactsPath().Append("gen/disk_ext2_4k.img"),
- base::FilePath(state->b_img)));
-
+ EXPECT_TRUE(
+ base::CopyFile(GetBuildArtifactsPath().Append("gen/disk_ext2_4k.img"),
+ base::FilePath(state->b_img)));
+ {
// Make some changes to the B image.
string b_mnt;
ScopedLoopMounter b_mounter(state->b_img, &b_mnt, 0);
@@ -499,10 +511,6 @@
std::copy(
std::begin(kNewData), std::end(kNewData), state->new_kernel_data.begin());
- if (noop) {
- state->old_kernel_data = state->new_kernel_data;
- }
-
// Write kernels to disk
EXPECT_TRUE(utils::WriteFile(state->old_kernel.c_str(),
state->old_kernel_data.data(),
@@ -526,7 +534,7 @@
payload_config.is_delta = !full_rootfs;
payload_config.hard_chunk_size = chunk_size;
payload_config.rootfs_partition_size = kRootFSPartitionSize;
- payload_config.version.major = kChromeOSMajorPayloadVersion;
+ payload_config.version.major = kBrilloMajorPayloadVersion;
payload_config.version.minor = minor_version;
if (!full_rootfs) {
payload_config.source.partitions.emplace_back(kPartitionNameRoot);
@@ -605,7 +613,6 @@
static void ApplyDeltaFile(bool full_kernel,
bool full_rootfs,
- bool noop,
SignatureTest signature_test,
DeltaState* state,
bool hash_checks_mandatory,
@@ -619,6 +626,9 @@
EXPECT_TRUE(payload_metadata.ParsePayloadHeader(state->delta));
state->metadata_size = payload_metadata.GetMetadataSize();
LOG(INFO) << "Metadata size: " << state->metadata_size;
+ state->metadata_signature_size =
+ payload_metadata.GetMetadataSignatureSize();
+ LOG(INFO) << "Metadata signature size: " << state->metadata_signature_size;
DeltaArchiveManifest manifest;
EXPECT_TRUE(payload_metadata.GetManifest(state->delta, &manifest));
@@ -630,7 +640,8 @@
EXPECT_TRUE(manifest.has_signatures_size());
Signatures sigs_message;
EXPECT_TRUE(sigs_message.ParseFromArray(
- &state->delta[state->metadata_size + manifest.signatures_offset()],
+ &state->delta[state->metadata_size + state->metadata_signature_size +
+ manifest.signatures_offset()],
manifest.signatures_size()));
if (signature_test == kSignatureGeneratedShellRotateCl1 ||
signature_test == kSignatureGeneratedShellRotateCl2)
@@ -653,18 +664,38 @@
EXPECT_FALSE(signature.data().empty());
}
- if (noop) {
- EXPECT_EQ(0, manifest.install_operations_size());
- EXPECT_EQ(1, manifest.kernel_install_operations_size());
- }
-
+ // TODO(ahassani): Make |DeltaState| into a partition list kind of struct
+ // instead of hardcoded kernel/rootfs so its cleaner and we can make the
+ // following code into a helper function instead.
+ const auto& kernel_part = *std::find_if(
+ manifest.partitions().begin(),
+ manifest.partitions().end(),
+ [](const PartitionUpdate& partition) {
+ return partition.partition_name() == kPartitionNameKernel;
+ });
if (full_kernel) {
- EXPECT_FALSE(manifest.has_old_kernel_info());
+ EXPECT_FALSE(kernel_part.has_old_partition_info());
} else {
EXPECT_EQ(state->old_kernel_data.size(),
- manifest.old_kernel_info().size());
- EXPECT_FALSE(manifest.old_kernel_info().hash().empty());
+ kernel_part.old_partition_info().size());
+ EXPECT_FALSE(kernel_part.old_partition_info().hash().empty());
}
+ EXPECT_EQ(state->new_kernel_data.size(),
+ kernel_part.new_partition_info().size());
+ EXPECT_FALSE(kernel_part.new_partition_info().hash().empty());
+
+ const auto& rootfs_part =
+ *std::find_if(manifest.partitions().begin(),
+ manifest.partitions().end(),
+ [](const PartitionUpdate& partition) {
+ return partition.partition_name() == kPartitionNameRoot;
+ });
+ if (full_rootfs) {
+ EXPECT_FALSE(rootfs_part.has_old_partition_info());
+ } else {
+ EXPECT_FALSE(rootfs_part.old_partition_info().hash().empty());
+ }
+ EXPECT_FALSE(rootfs_part.new_partition_info().hash().empty());
EXPECT_EQ(manifest.new_image_info().channel(), "test-channel");
EXPECT_EQ(manifest.new_image_info().board(), "test-board");
@@ -674,47 +705,21 @@
EXPECT_EQ(manifest.new_image_info().build_version(), "test-build-version");
if (!full_rootfs) {
- if (noop) {
- EXPECT_EQ(manifest.old_image_info().channel(), "test-channel");
- EXPECT_EQ(manifest.old_image_info().board(), "test-board");
- EXPECT_EQ(manifest.old_image_info().version(), "test-version");
- EXPECT_EQ(manifest.old_image_info().key(), "test-key");
- EXPECT_EQ(manifest.old_image_info().build_channel(),
- "test-build-channel");
- EXPECT_EQ(manifest.old_image_info().build_version(),
- "test-build-version");
- } else {
- EXPECT_EQ(manifest.old_image_info().channel(), "src-channel");
- EXPECT_EQ(manifest.old_image_info().board(), "src-board");
- EXPECT_EQ(manifest.old_image_info().version(), "src-version");
- EXPECT_EQ(manifest.old_image_info().key(), "src-key");
- EXPECT_EQ(manifest.old_image_info().build_channel(),
- "src-build-channel");
- EXPECT_EQ(manifest.old_image_info().build_version(),
- "src-build-version");
- }
+ EXPECT_EQ(manifest.old_image_info().channel(), "src-channel");
+ EXPECT_EQ(manifest.old_image_info().board(), "src-board");
+ EXPECT_EQ(manifest.old_image_info().version(), "src-version");
+ EXPECT_EQ(manifest.old_image_info().key(), "src-key");
+ EXPECT_EQ(manifest.old_image_info().build_channel(), "src-build-channel");
+ EXPECT_EQ(manifest.old_image_info().build_version(), "src-build-version");
}
-
- if (full_rootfs) {
- EXPECT_FALSE(manifest.has_old_rootfs_info());
- EXPECT_FALSE(manifest.has_old_image_info());
- EXPECT_TRUE(manifest.has_new_image_info());
- } else {
- EXPECT_EQ(state->image_size, manifest.old_rootfs_info().size());
- EXPECT_FALSE(manifest.old_rootfs_info().hash().empty());
- }
-
- EXPECT_EQ(state->new_kernel_data.size(), manifest.new_kernel_info().size());
- EXPECT_EQ(state->image_size, manifest.new_rootfs_info().size());
-
- EXPECT_FALSE(manifest.new_kernel_info().hash().empty());
- EXPECT_FALSE(manifest.new_rootfs_info().hash().empty());
}
MockPrefs prefs;
EXPECT_CALL(prefs, SetInt64(kPrefsManifestMetadataSize, state->metadata_size))
.WillOnce(Return(true));
- EXPECT_CALL(prefs, SetInt64(kPrefsManifestSignatureSize, 0))
+ EXPECT_CALL(
+ prefs,
+ SetInt64(kPrefsManifestSignatureSize, state->metadata_signature_size))
.WillOnce(Return(true));
EXPECT_CALL(prefs, SetInt64(kPrefsUpdateStateNextOperation, _))
.WillRepeatedly(Return(true));
@@ -741,7 +746,8 @@
// Update the A image in place.
InstallPlan* install_plan = &state->install_plan;
install_plan->hash_checks_mandatory = hash_checks_mandatory;
- install_plan->payloads = {{.metadata_size = state->metadata_size,
+ install_plan->payloads = {{.size = state->delta.size(),
+ .metadata_size = state->metadata_size,
.type = (full_kernel && full_rootfs)
? InstallPayloadType::kFull
: InstallPayloadType::kDelta}};
@@ -788,25 +794,14 @@
// The partitions should be empty before DeltaPerformer.
install_plan->partitions.clear();
- // With minor version 2, we want the target to be the new image, result_img,
- // but with version 1, we want to update A in place.
- string target_root, target_kernel;
- if (minor_version == kSourceMinorPayloadVersion) {
- target_root = state->result_img;
- target_kernel = state->result_kernel;
- } else {
- target_root = state->a_img;
- target_kernel = state->old_kernel;
- }
-
state->fake_boot_control_.SetPartitionDevice(
kPartitionNameRoot, install_plan->source_slot, state->a_img);
state->fake_boot_control_.SetPartitionDevice(
kPartitionNameKernel, install_plan->source_slot, state->old_kernel);
state->fake_boot_control_.SetPartitionDevice(
- kPartitionNameRoot, install_plan->target_slot, target_root);
+ kPartitionNameRoot, install_plan->target_slot, state->result_img);
state->fake_boot_control_.SetPartitionDevice(
- kPartitionNameKernel, install_plan->target_slot, target_kernel);
+ kPartitionNameKernel, install_plan->target_slot, state->result_kernel);
ErrorCode expected_error, actual_error;
bool continue_writing;
@@ -885,21 +880,13 @@
return;
}
- brillo::Blob updated_kernel_partition;
- if (minor_version == kSourceMinorPayloadVersion) {
- CompareFilesByBlock(
- state->result_kernel, state->new_kernel, state->kernel_size);
- CompareFilesByBlock(state->result_img, state->b_img, state->image_size);
- EXPECT_TRUE(
- utils::ReadFile(state->result_kernel, &updated_kernel_partition));
- } else {
- CompareFilesByBlock(
- state->old_kernel, state->new_kernel, state->kernel_size);
- CompareFilesByBlock(state->a_img, state->b_img, state->image_size);
- EXPECT_TRUE(utils::ReadFile(state->old_kernel, &updated_kernel_partition));
- }
+ CompareFilesByBlock(
+ state->result_kernel, state->new_kernel, state->kernel_size);
+ CompareFilesByBlock(state->result_img, state->b_img, state->image_size);
- ASSERT_GE(updated_kernel_partition.size(), arraysize(kNewData));
+ brillo::Blob updated_kernel_partition;
+ EXPECT_TRUE(utils::ReadFile(state->result_kernel, &updated_kernel_partition));
+ ASSERT_GE(updated_kernel_partition.size(), base::size(kNewData));
EXPECT_TRUE(std::equal(std::begin(kNewData),
std::end(kNewData),
updated_kernel_partition.begin()));
@@ -944,7 +931,6 @@
void DoSmallImageTest(bool full_kernel,
bool full_rootfs,
- bool noop,
ssize_t chunk_size,
SignatureTest signature_test,
bool hash_checks_mandatory,
@@ -953,7 +939,6 @@
DeltaPerformer* performer = nullptr;
GenerateDeltaFile(full_kernel,
full_rootfs,
- noop,
chunk_size,
signature_test,
&state,
@@ -968,7 +953,6 @@
ScopedPathUnlinker result_kernel_unlinker(state.result_kernel);
ApplyDeltaFile(full_kernel,
full_rootfs,
- noop,
signature_test,
&state,
hash_checks_mandatory,
@@ -983,8 +967,7 @@
bool hash_checks_mandatory) {
DeltaState state;
uint64_t minor_version = kFullPayloadMinorVersion;
- GenerateDeltaFile(
- true, true, false, -1, kSignatureGenerated, &state, minor_version);
+ GenerateDeltaFile(true, true, -1, kSignatureGenerated, &state, minor_version);
ScopedPathUnlinker a_img_unlinker(state.a_img);
ScopedPathUnlinker b_img_unlinker(state.b_img);
ScopedPathUnlinker delta_unlinker(state.delta_path);
@@ -993,7 +976,6 @@
DeltaPerformer* performer = nullptr;
ApplyDeltaFile(true,
true,
- false,
kSignatureGenerated,
&state,
hash_checks_mandatory,
@@ -1004,24 +986,18 @@
}
TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageTest) {
- DoSmallImageTest(false,
- false,
- false,
- -1,
- kSignatureGenerator,
- false,
- kInPlaceMinorPayloadVersion);
+ DoSmallImageTest(
+ false, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion);
}
TEST(DeltaPerformerIntegrationTest,
RunAsRootSmallImageSignaturePlaceholderTest) {
DoSmallImageTest(false,
false,
- false,
-1,
kSignatureGeneratedPlaceholder,
false,
- kInPlaceMinorPayloadVersion);
+ kSourceMinorPayloadVersion);
}
TEST(DeltaPerformerIntegrationTest,
@@ -1029,131 +1005,97 @@
DeltaState state;
GenerateDeltaFile(false,
false,
- false,
-1,
kSignatureGeneratedPlaceholderMismatch,
&state,
- kInPlaceMinorPayloadVersion);
+ kSourceMinorPayloadVersion);
}
TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageChunksTest) {
DoSmallImageTest(false,
false,
- false,
kBlockSize,
kSignatureGenerator,
false,
- kInPlaceMinorPayloadVersion);
+ kSourceMinorPayloadVersion);
}
TEST(DeltaPerformerIntegrationTest, RunAsRootFullKernelSmallImageTest) {
- DoSmallImageTest(true,
- false,
- false,
- -1,
- kSignatureGenerator,
- false,
- kInPlaceMinorPayloadVersion);
+ DoSmallImageTest(
+ true, false, -1, kSignatureGenerator, false, kSourceMinorPayloadVersion);
}
TEST(DeltaPerformerIntegrationTest, RunAsRootFullSmallImageTest) {
DoSmallImageTest(true,
true,
- false,
-1,
kSignatureGenerator,
true,
kFullPayloadMinorVersion);
}
-TEST(DeltaPerformerIntegrationTest, RunAsRootNoopSmallImageTest) {
- DoSmallImageTest(false,
- false,
- true,
- -1,
- kSignatureGenerator,
- false,
- kInPlaceMinorPayloadVersion);
-}
-
TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignNoneTest) {
- DoSmallImageTest(false,
- false,
- false,
- -1,
- kSignatureNone,
- false,
- kInPlaceMinorPayloadVersion);
+ DoSmallImageTest(
+ false, false, -1, kSignatureNone, false, kSourceMinorPayloadVersion);
}
TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedTest) {
- DoSmallImageTest(false,
- false,
- false,
- -1,
- kSignatureGenerated,
- true,
- kInPlaceMinorPayloadVersion);
+ DoSmallImageTest(
+ false, false, -1, kSignatureGenerated, true, kSourceMinorPayloadVersion);
}
TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSignGeneratedShellTest) {
DoSmallImageTest(false,
false,
- false,
-1,
kSignatureGeneratedShell,
false,
- kInPlaceMinorPayloadVersion);
+ kSourceMinorPayloadVersion);
}
TEST(DeltaPerformerIntegrationTest,
RunAsRootSmallImageSignGeneratedShellECKeyTest) {
DoSmallImageTest(false,
false,
- false,
-1,
kSignatureGeneratedShellECKey,
false,
- kInPlaceMinorPayloadVersion);
+ kSourceMinorPayloadVersion);
}
TEST(DeltaPerformerIntegrationTest,
RunAsRootSmallImageSignGeneratedShellBadKeyTest) {
DoSmallImageTest(false,
false,
- false,
-1,
kSignatureGeneratedShellBadKey,
false,
- kInPlaceMinorPayloadVersion);
+ kSourceMinorPayloadVersion);
}
TEST(DeltaPerformerIntegrationTest,
RunAsRootSmallImageSignGeneratedShellRotateCl1Test) {
DoSmallImageTest(false,
false,
- false,
-1,
kSignatureGeneratedShellRotateCl1,
false,
- kInPlaceMinorPayloadVersion);
+ kSourceMinorPayloadVersion);
}
TEST(DeltaPerformerIntegrationTest,
RunAsRootSmallImageSignGeneratedShellRotateCl2Test) {
DoSmallImageTest(false,
false,
- false,
-1,
kSignatureGeneratedShellRotateCl2,
false,
- kInPlaceMinorPayloadVersion);
+ kSourceMinorPayloadVersion);
}
TEST(DeltaPerformerIntegrationTest, RunAsRootSmallImageSourceOpsTest) {
DoSmallImageTest(false,
false,
- false,
-1,
kSignatureGenerator,
false,
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index e9022ba..44107cd 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -27,6 +27,7 @@
#include <base/files/file_path.h>
#include <base/files/file_util.h>
#include <base/files/scoped_temp_dir.h>
+#include <base/stl_util.h>
#include <base/strings/string_number_conversions.h>
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
@@ -43,6 +44,7 @@
#include "update_engine/payload_consumer/fake_file_descriptor.h"
#include "update_engine/payload_consumer/mock_download_action.h"
#include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_consumer/payload_metadata.h"
#include "update_engine/payload_generator/bzip.h"
#include "update_engine/payload_generator/extent_ranges.h"
#include "update_engine/payload_generator/payload_file.h"
@@ -286,6 +288,7 @@
test_utils::ScopedTempFile new_part("Partition-XXXXXX");
EXPECT_TRUE(test_utils::WriteFileVector(new_part.path(), target_data));
+ payload_.size = payload_data.size();
// We installed the operations only in the rootfs partition, but the
// delta performer needs to access all the partitions.
fake_boot_control_.SetPartitionDevice(
@@ -318,14 +321,17 @@
// Set a valid magic string and version number 1.
EXPECT_TRUE(performer_.Write("CrAU", 4));
- uint64_t version = htobe64(kChromeOSMajorPayloadVersion);
+ uint64_t version = htobe64(kBrilloMajorPayloadVersion);
EXPECT_TRUE(performer_.Write(&version, 8));
payload_.metadata_size = expected_metadata_size;
+ payload_.size = actual_metadata_size + 1;
ErrorCode error_code;
- // When filling in size in manifest, exclude the size of the 20-byte header.
- uint64_t size_in_manifest = htobe64(actual_metadata_size - 20);
- bool result = performer_.Write(&size_in_manifest, 8, &error_code);
+ // When filling in size in manifest, exclude the size of the 24-byte header.
+ uint64_t size_in_manifest = htobe64(actual_metadata_size - 24);
+ performer_.Write(&size_in_manifest, 8, &error_code);
+ auto signature_size = htobe64(10);
+ bool result = performer_.Write(&signature_size, 4, &error_code);
if (expected_metadata_size == actual_metadata_size ||
!hash_checks_mandatory) {
EXPECT_TRUE(result);
@@ -337,7 +343,7 @@
EXPECT_LT(performer_.Close(), 0);
}
- // Generates a valid delta file but tests the delta performer by suppling
+ // Generates a valid delta file but tests the delta performer by supplying
// different metadata signatures as per metadata_signature_test flag and
// sees if the result of the parsing are as per hash_checks_mandatory flag.
void DoMetadataSignatureTest(MetadataSignatureTest metadata_signature_test,
@@ -347,9 +353,10 @@
brillo::Blob payload = GeneratePayload(brillo::Blob(),
vector<AnnotatedOperation>(),
sign_payload,
- kChromeOSMajorPayloadVersion,
+ kBrilloMajorPayloadVersion,
kFullPayloadMinorVersion);
+ payload_.size = payload.size();
LOG(INFO) << "Payload size: " << payload.size();
install_plan_.hash_checks_mandatory = hash_checks_mandatory;
@@ -361,6 +368,9 @@
switch (metadata_signature_test) {
case kEmptyMetadataSignature:
payload_.metadata_signature.clear();
+ // We need to set the signature size in a signed payload to zero.
+ std::fill(
+ std::next(payload.begin(), 20), std::next(payload.begin(), 24), 0);
expected_result = MetadataParseResult::kError;
expected_error = ErrorCode::kDownloadMetadataSignatureMissingError;
break;
@@ -455,7 +465,7 @@
brillo::Blob payload_data = GeneratePayload(expected_data,
aops,
false,
- kChromeOSMajorPayloadVersion,
+ kBrilloMajorPayloadVersion,
kFullPayloadMinorVersion);
EXPECT_EQ(expected_data, ApplyPayload(payload_data, "/dev/null", true));
@@ -477,7 +487,7 @@
brillo::Blob payload_data = GeneratePayload(expected_data,
aops,
false,
- kChromeOSMajorPayloadVersion,
+ kBrilloMajorPayloadVersion,
kFullPayloadMinorVersion);
testing::Mock::VerifyAndClearExpectations(&mock_delegate_);
@@ -739,12 +749,12 @@
TEST_F(DeltaPerformerTest, ExtentsToByteStringTest) {
uint64_t test[] = {1, 1, 4, 2, 0, 1};
- static_assert(arraysize(test) % 2 == 0, "Array size uneven");
+ static_assert(base::size(test) % 2 == 0, "Array size uneven");
const uint64_t block_size = 4096;
const uint64_t file_length = 4 * block_size - 13;
google::protobuf::RepeatedPtrField<Extent> extents;
- for (size_t i = 0; i < arraysize(test); i += 2) {
+ for (size_t i = 0; i < base::size(test); i += 2) {
*(extents.Add()) = ExtentForRange(test[i], test[i + 1]);
}
@@ -758,27 +768,32 @@
TEST_F(DeltaPerformerTest, ValidateManifestFullGoodTest) {
// The Manifest we are validating.
DeltaArchiveManifest manifest;
- manifest.mutable_new_kernel_info();
- manifest.mutable_new_rootfs_info();
+ for (const auto& part_name : {"kernel", "rootfs"}) {
+ auto part = manifest.add_partitions();
+ part->set_partition_name(part_name);
+ part->mutable_new_partition_info();
+ }
manifest.set_minor_version(kFullPayloadMinorVersion);
RunManifestValidation(manifest,
- kChromeOSMajorPayloadVersion,
+ kBrilloMajorPayloadVersion,
InstallPayloadType::kFull,
ErrorCode::kSuccess);
}
-TEST_F(DeltaPerformerTest, ValidateManifestDeltaGoodTest) {
+TEST_F(DeltaPerformerTest, ValidateManifestDeltaMaxGoodTest) {
// The Manifest we are validating.
DeltaArchiveManifest manifest;
- manifest.mutable_old_kernel_info();
- manifest.mutable_old_rootfs_info();
- manifest.mutable_new_kernel_info();
- manifest.mutable_new_rootfs_info();
+ for (const auto& part_name : {"kernel", "rootfs"}) {
+ auto part = manifest.add_partitions();
+ part->set_partition_name(part_name);
+ part->mutable_old_partition_info();
+ part->mutable_new_partition_info();
+ }
manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
RunManifestValidation(manifest,
- kChromeOSMajorPayloadVersion,
+ kBrilloMajorPayloadVersion,
InstallPayloadType::kDelta,
ErrorCode::kSuccess);
}
@@ -786,14 +801,16 @@
TEST_F(DeltaPerformerTest, ValidateManifestDeltaMinGoodTest) {
// The Manifest we are validating.
DeltaArchiveManifest manifest;
- manifest.mutable_old_kernel_info();
- manifest.mutable_old_rootfs_info();
- manifest.mutable_new_kernel_info();
- manifest.mutable_new_rootfs_info();
+ for (const auto& part_name : {"kernel", "rootfs"}) {
+ auto part = manifest.add_partitions();
+ part->set_partition_name(part_name);
+ part->mutable_old_partition_info();
+ part->mutable_new_partition_info();
+ }
manifest.set_minor_version(kMinSupportedMinorPayloadVersion);
RunManifestValidation(manifest,
- kChromeOSMajorPayloadVersion,
+ kBrilloMajorPayloadVersion,
InstallPayloadType::kDelta,
ErrorCode::kSuccess);
}
@@ -811,9 +828,11 @@
TEST_F(DeltaPerformerTest, ValidateManifestDeltaUnsetMinorVersion) {
// The Manifest we are validating.
DeltaArchiveManifest manifest;
- // Add an empty old_rootfs_info() to trick the DeltaPerformer into think that
- // this is a delta payload manifest with a missing minor version.
- manifest.mutable_old_rootfs_info();
+ // Add an empty rootfs partition info to trick the DeltaPerformer into think
+ // that this is a delta payload manifest with a missing minor version.
+ auto rootfs = manifest.add_partitions();
+ rootfs->set_partition_name("rootfs");
+ rootfs->mutable_old_partition_info();
RunManifestValidation(manifest,
kMaxSupportedMajorPayloadVersion,
@@ -824,27 +843,15 @@
TEST_F(DeltaPerformerTest, ValidateManifestFullOldKernelTest) {
// The Manifest we are validating.
DeltaArchiveManifest manifest;
- manifest.mutable_old_kernel_info();
- manifest.mutable_new_kernel_info();
- manifest.mutable_new_rootfs_info();
- manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
-
+ for (const auto& part_name : {"kernel", "rootfs"}) {
+ auto part = manifest.add_partitions();
+ part->set_partition_name(part_name);
+ part->mutable_old_partition_info();
+ part->mutable_new_partition_info();
+ }
+ manifest.mutable_partitions(0)->clear_old_partition_info();
RunManifestValidation(manifest,
- kChromeOSMajorPayloadVersion,
- InstallPayloadType::kFull,
- ErrorCode::kPayloadMismatchedType);
-}
-
-TEST_F(DeltaPerformerTest, ValidateManifestFullOldRootfsTest) {
- // The Manifest we are validating.
- DeltaArchiveManifest manifest;
- manifest.mutable_old_rootfs_info();
- manifest.mutable_new_kernel_info();
- manifest.mutable_new_rootfs_info();
- manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
-
- RunManifestValidation(manifest,
- kChromeOSMajorPayloadVersion,
+ kBrilloMajorPayloadVersion,
InstallPayloadType::kFull,
ErrorCode::kPayloadMismatchedType);
}
@@ -869,8 +876,8 @@
// Generate a bad version number.
manifest.set_minor_version(kMaxSupportedMinorPayloadVersion + 10000);
- // Mark the manifest as a delta payload by setting old_rootfs_info.
- manifest.mutable_old_rootfs_info();
+ // Mark the manifest as a delta payload by setting |old_partition_info|.
+ manifest.add_partitions()->mutable_old_partition_info();
RunManifestValidation(manifest,
kMaxSupportedMajorPayloadVersion,
@@ -897,15 +904,27 @@
EXPECT_TRUE(performer_.Write(kDeltaMagic, sizeof(kDeltaMagic)));
uint64_t major_version = htobe64(kBrilloMajorPayloadVersion);
- EXPECT_TRUE(performer_.Write(&major_version, 8));
+ EXPECT_TRUE(
+ performer_.Write(&major_version, PayloadMetadata::kDeltaVersionSize));
uint64_t manifest_size = rand_r(&seed) % 256;
- uint64_t manifest_size_be = htobe64(manifest_size);
- EXPECT_TRUE(performer_.Write(&manifest_size_be, 8));
-
uint32_t metadata_signature_size = rand_r(&seed) % 256;
+
+ // The payload size has to be bigger than the |metadata_size| and
+ // |metadata_signature_size|
+ payload_.size = PayloadMetadata::kDeltaManifestSizeOffset +
+ PayloadMetadata::kDeltaManifestSizeSize +
+ PayloadMetadata::kDeltaMetadataSignatureSizeSize +
+ manifest_size + metadata_signature_size + 1;
+
+ uint64_t manifest_size_be = htobe64(manifest_size);
+ EXPECT_TRUE(performer_.Write(&manifest_size_be,
+ PayloadMetadata::kDeltaManifestSizeSize));
+
uint32_t metadata_signature_size_be = htobe32(metadata_signature_size);
- EXPECT_TRUE(performer_.Write(&metadata_signature_size_be, 4));
+ EXPECT_TRUE(
+ performer_.Write(&metadata_signature_size_be,
+ PayloadMetadata::kDeltaMetadataSignatureSizeSize));
EXPECT_LT(performer_.Close(), 0);
@@ -915,10 +934,74 @@
EXPECT_EQ(metadata_signature_size, performer_.metadata_signature_size_);
}
+TEST_F(DeltaPerformerTest, BrilloMetadataSizeNOKTest) {
+ unsigned int seed = time(nullptr);
+ EXPECT_TRUE(performer_.Write(kDeltaMagic, sizeof(kDeltaMagic)));
+
+ uint64_t major_version = htobe64(kBrilloMajorPayloadVersion);
+ EXPECT_TRUE(
+ performer_.Write(&major_version, PayloadMetadata::kDeltaVersionSize));
+
+ uint64_t manifest_size = UINT64_MAX - 600; // Subtract to avoid wrap around.
+ uint64_t manifest_offset = PayloadMetadata::kDeltaManifestSizeOffset +
+ PayloadMetadata::kDeltaManifestSizeSize +
+ PayloadMetadata::kDeltaMetadataSignatureSizeSize;
+ payload_.metadata_size = manifest_offset + manifest_size;
+ uint32_t metadata_signature_size = rand_r(&seed) % 256;
+
+ // The payload size is greater than the payload header but smaller than
+ // |metadata_signature_size| + |metadata_size|
+ payload_.size = manifest_offset + metadata_signature_size + 1;
+
+ uint64_t manifest_size_be = htobe64(manifest_size);
+ EXPECT_TRUE(performer_.Write(&manifest_size_be,
+ PayloadMetadata::kDeltaManifestSizeSize));
+ uint32_t metadata_signature_size_be = htobe32(metadata_signature_size);
+
+ ErrorCode error;
+ EXPECT_FALSE(
+ performer_.Write(&metadata_signature_size_be,
+ PayloadMetadata::kDeltaMetadataSignatureSizeSize + 1,
+ &error));
+
+ EXPECT_EQ(ErrorCode::kDownloadInvalidMetadataSize, error);
+}
+
+TEST_F(DeltaPerformerTest, BrilloMetadataSignatureSizeNOKTest) {
+ unsigned int seed = time(nullptr);
+ EXPECT_TRUE(performer_.Write(kDeltaMagic, sizeof(kDeltaMagic)));
+
+ uint64_t major_version = htobe64(kBrilloMajorPayloadVersion);
+ EXPECT_TRUE(
+ performer_.Write(&major_version, PayloadMetadata::kDeltaVersionSize));
+
+ uint64_t manifest_size = rand_r(&seed) % 256;
+ // Subtract from UINT32_MAX to avoid wrap around.
+ uint32_t metadata_signature_size = UINT32_MAX - 600;
+
+ // The payload size is greater than |manifest_size| but smaller than
+ // |metadata_signature_size|
+ payload_.size = manifest_size + 1;
+
+ uint64_t manifest_size_be = htobe64(manifest_size);
+ EXPECT_TRUE(performer_.Write(&manifest_size_be,
+ PayloadMetadata::kDeltaManifestSizeSize));
+
+ uint32_t metadata_signature_size_be = htobe32(metadata_signature_size);
+ ErrorCode error;
+ EXPECT_FALSE(
+ performer_.Write(&metadata_signature_size_be,
+ PayloadMetadata::kDeltaMetadataSignatureSizeSize + 1,
+ &error));
+
+ EXPECT_EQ(ErrorCode::kDownloadInvalidMetadataSize, error);
+}
+
TEST_F(DeltaPerformerTest, BrilloParsePayloadMetadataTest) {
brillo::Blob payload_data = GeneratePayload(
{}, {}, true, kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion);
install_plan_.hash_checks_mandatory = true;
+ payload_.size = payload_data.size();
ErrorCode error;
EXPECT_EQ(MetadataParseResult::kSuccess,
performer_.ParsePayloadMetadata(payload_data, &error));
diff --git a/payload_consumer/download_action.cc b/payload_consumer/download_action.cc
index 09afc42..45df5a9 100644
--- a/payload_consumer/download_action.cc
+++ b/payload_consumer/download_action.cc
@@ -56,9 +56,6 @@
delegate_(nullptr),
p2p_sharing_fd_(-1),
p2p_visible_(true) {
-#if BASE_VER < 576279
- base::StatisticsRecorder::Initialize();
-#endif
}
DownloadAction::~DownloadAction() {}
diff --git a/payload_consumer/filesystem_verifier_action.cc b/payload_consumer/filesystem_verifier_action.cc
index 36e5a35..f9e7f81 100644
--- a/payload_consumer/filesystem_verifier_action.cc
+++ b/payload_consumer/filesystem_verifier_action.cc
@@ -57,6 +57,7 @@
abort_action_completer.set_code(ErrorCode::kSuccess);
return;
}
+ install_plan_.Dump();
StartPartitionHashing();
abort_action_completer.set_should_complete(false);
diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc
index 766b27c..a313627 100644
--- a/payload_consumer/install_plan.cc
+++ b/payload_consumer/install_plan.cc
@@ -29,6 +29,11 @@
namespace chromeos_update_engine {
+string PayloadUrlsToString(
+ const decltype(InstallPlan::Payload::payload_urls)& payload_urls) {
+ return "(" + base::JoinString(payload_urls, ",") + ")";
+}
+
string InstallPayloadTypeToString(InstallPayloadType type) {
switch (type) {
case InstallPayloadType::kUnknown:
@@ -66,8 +71,9 @@
string payloads_str;
for (const auto& payload : payloads) {
payloads_str += base::StringPrintf(
- ", payload: (size: %" PRIu64 ", metadata_size: %" PRIu64
+ ", payload: (urls: %s, size: %" PRIu64 ", metadata_size: %" PRIu64
", metadata signature: %s, hash: %s, payload type: %s)",
+ PayloadUrlsToString(payload.payload_urls).c_str(),
payload.size,
payload.metadata_size,
payload.metadata_signature.c_str(),
@@ -92,8 +98,8 @@
<< version_str
<< ", source_slot: " << BootControlInterface::SlotName(source_slot)
<< ", target_slot: " << BootControlInterface::SlotName(target_slot)
- << ", url: " << url_str << payloads_str << partitions_str
- << ", hash_checks_mandatory: "
+ << ", initial url: " << url_str << payloads_str
+ << partitions_str << ", hash_checks_mandatory: "
<< utils::ToString(hash_checks_mandatory)
<< ", powerwash_required: " << utils::ToString(powerwash_required)
<< ", switch_slot_on_reboot: "
diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h
index ede36b3..7a95ab4 100644
--- a/payload_consumer/install_plan.h
+++ b/payload_consumer/install_plan.h
@@ -58,6 +58,7 @@
std::string system_version;
struct Payload {
+ std::vector<std::string> payload_urls; // URLs to download the payload
uint64_t size = 0; // size of the payload
uint64_t metadata_size = 0; // size of the metadata
std::string metadata_signature; // signature of the metadata in base64
@@ -69,7 +70,8 @@
bool already_applied = false;
bool operator==(const Payload& that) const {
- return size == that.size && metadata_size == that.metadata_size &&
+ return payload_urls == that.payload_urls && size == that.size &&
+ metadata_size == that.metadata_size &&
metadata_signature == that.metadata_signature &&
hash == that.hash && type == that.type &&
already_applied == that.already_applied;
@@ -146,6 +148,9 @@
// True if this update is a rollback.
bool is_rollback{false};
+ // True if this rollback should preserve some system data.
+ bool rollback_data_save_requested{false};
+
// True if the update should write verity.
// False otherwise.
bool write_verity{true};
diff --git a/payload_consumer/mtd_file_descriptor.cc b/payload_consumer/mtd_file_descriptor.cc
deleted file mode 100644
index 5d940cb..0000000
--- a/payload_consumer/mtd_file_descriptor.cc
+++ /dev/null
@@ -1,263 +0,0 @@
-//
-// Copyright (C) 2014 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_consumer/mtd_file_descriptor.h"
-
-#include <fcntl.h>
-#include <mtd/ubi-user.h>
-#include <sys/ioctl.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-
-#include <memory>
-#include <string>
-
-#include <base/files/file_path.h>
-#include <base/strings/string_number_conversions.h>
-#include <base/strings/string_util.h>
-#include <base/strings/stringprintf.h>
-
-#include "update_engine/common/subprocess.h"
-#include "update_engine/common/utils.h"
-
-using std::string;
-
-namespace {
-
-static const char kSysfsClassUbi[] = "/sys/class/ubi/";
-static const char kUsableEbSize[] = "/usable_eb_size";
-static const char kReservedEbs[] = "/reserved_ebs";
-
-using chromeos_update_engine::UbiVolumeInfo;
-using chromeos_update_engine::utils::ReadFile;
-
-// Return a UbiVolumeInfo pointer if |path| is a UBI volume. Otherwise, return
-// a null unique pointer.
-std::unique_ptr<UbiVolumeInfo> GetUbiVolumeInfo(const string& path) {
- base::FilePath device_node(path);
- base::FilePath ubi_name(device_node.BaseName());
-
- string sysfs_node(kSysfsClassUbi);
- sysfs_node.append(ubi_name.MaybeAsASCII());
-
- std::unique_ptr<UbiVolumeInfo> ret;
-
- // Obtain volume info from sysfs.
- string s_reserved_ebs;
- if (!ReadFile(sysfs_node + kReservedEbs, &s_reserved_ebs)) {
- LOG(ERROR) << "Cannot read " << sysfs_node + kReservedEbs;
- return ret;
- }
- string s_eb_size;
- if (!ReadFile(sysfs_node + kUsableEbSize, &s_eb_size)) {
- LOG(ERROR) << "Cannot read " << sysfs_node + kUsableEbSize;
- return ret;
- }
-
- base::TrimWhitespaceASCII(
- s_reserved_ebs, base::TRIM_TRAILING, &s_reserved_ebs);
- base::TrimWhitespaceASCII(s_eb_size, base::TRIM_TRAILING, &s_eb_size);
-
- uint64_t reserved_ebs, eb_size;
- if (!base::StringToUint64(s_reserved_ebs, &reserved_ebs)) {
- LOG(ERROR) << "Cannot parse reserved_ebs: " << s_reserved_ebs;
- return ret;
- }
- if (!base::StringToUint64(s_eb_size, &eb_size)) {
- LOG(ERROR) << "Cannot parse usable_eb_size: " << s_eb_size;
- return ret;
- }
-
- ret.reset(new UbiVolumeInfo);
- ret->reserved_ebs = reserved_ebs;
- ret->eraseblock_size = eb_size;
- return ret;
-}
-
-} // namespace
-
-namespace chromeos_update_engine {
-
-MtdFileDescriptor::MtdFileDescriptor()
- : read_ctx_(nullptr, &mtd_read_close),
- write_ctx_(nullptr, &mtd_write_close) {}
-
-bool MtdFileDescriptor::IsMtd(const char* path) {
- uint64_t size;
- return mtd_node_info(path, &size, nullptr, nullptr) == 0;
-}
-
-bool MtdFileDescriptor::Open(const char* path, int flags, mode_t mode) {
- // This File Descriptor does not support read and write.
- TEST_AND_RETURN_FALSE((flags & O_ACCMODE) != O_RDWR);
- // But we need to open the underlying file descriptor in O_RDWR mode because
- // during write, we need to read back to verify the write actually sticks or
- // we have to skip the block. That job is done by mtdutils library.
- if ((flags & O_ACCMODE) == O_WRONLY) {
- flags &= ~O_ACCMODE;
- flags |= O_RDWR;
- }
- TEST_AND_RETURN_FALSE(
- EintrSafeFileDescriptor::Open(path, flags | O_CLOEXEC, mode));
-
- if ((flags & O_ACCMODE) == O_RDWR) {
- write_ctx_.reset(mtd_write_descriptor(fd_, path));
- nr_written_ = 0;
- } else {
- read_ctx_.reset(mtd_read_descriptor(fd_, path));
- }
-
- if (!read_ctx_ && !write_ctx_) {
- Close();
- return false;
- }
-
- return true;
-}
-
-bool MtdFileDescriptor::Open(const char* path, int flags) {
- mode_t cur = umask(022);
- umask(cur);
- return Open(path, flags, 0777 & ~cur);
-}
-
-ssize_t MtdFileDescriptor::Read(void* buf, size_t count) {
- CHECK(read_ctx_);
- return mtd_read_data(read_ctx_.get(), static_cast<char*>(buf), count);
-}
-
-ssize_t MtdFileDescriptor::Write(const void* buf, size_t count) {
- CHECK(write_ctx_);
- ssize_t result =
- mtd_write_data(write_ctx_.get(), static_cast<const char*>(buf), count);
- if (result > 0) {
- nr_written_ += result;
- }
- return result;
-}
-
-off64_t MtdFileDescriptor::Seek(off64_t offset, int whence) {
- if (write_ctx_) {
- // Ignore seek in write mode.
- return nr_written_;
- }
- return EintrSafeFileDescriptor::Seek(offset, whence);
-}
-
-bool MtdFileDescriptor::Close() {
- read_ctx_.reset();
- write_ctx_.reset();
- return EintrSafeFileDescriptor::Close();
-}
-
-bool UbiFileDescriptor::IsUbi(const char* path) {
- base::FilePath device_node(path);
- base::FilePath ubi_name(device_node.BaseName());
- TEST_AND_RETURN_FALSE(base::StartsWith(
- ubi_name.MaybeAsASCII(), "ubi", base::CompareCase::SENSITIVE));
-
- return static_cast<bool>(GetUbiVolumeInfo(path));
-}
-
-bool UbiFileDescriptor::Open(const char* path, int flags, mode_t mode) {
- std::unique_ptr<UbiVolumeInfo> info = GetUbiVolumeInfo(path);
- if (!info) {
- return false;
- }
-
- // This File Descriptor does not support read and write.
- TEST_AND_RETURN_FALSE((flags & O_ACCMODE) != O_RDWR);
- TEST_AND_RETURN_FALSE(
- EintrSafeFileDescriptor::Open(path, flags | O_CLOEXEC, mode));
-
- usable_eb_blocks_ = info->reserved_ebs;
- eraseblock_size_ = info->eraseblock_size;
- volume_size_ = usable_eb_blocks_ * eraseblock_size_;
-
- if ((flags & O_ACCMODE) == O_WRONLY) {
- // It's best to use volume update ioctl so that UBI layer will mark the
- // volume as being updated, and only clear that mark if the update is
- // successful. We will need to pad to the whole volume size at close.
- uint64_t vsize = volume_size_;
- if (ioctl(fd_, UBI_IOCVOLUP, &vsize) != 0) {
- PLOG(ERROR) << "Cannot issue volume update ioctl";
- EintrSafeFileDescriptor::Close();
- return false;
- }
- mode_ = kWriteOnly;
- nr_written_ = 0;
- } else {
- mode_ = kReadOnly;
- }
-
- return true;
-}
-
-bool UbiFileDescriptor::Open(const char* path, int flags) {
- mode_t cur = umask(022);
- umask(cur);
- return Open(path, flags, 0777 & ~cur);
-}
-
-ssize_t UbiFileDescriptor::Read(void* buf, size_t count) {
- CHECK(mode_ == kReadOnly);
- return EintrSafeFileDescriptor::Read(buf, count);
-}
-
-ssize_t UbiFileDescriptor::Write(const void* buf, size_t count) {
- CHECK(mode_ == kWriteOnly);
- ssize_t nr_chunk = EintrSafeFileDescriptor::Write(buf, count);
- if (nr_chunk >= 0) {
- nr_written_ += nr_chunk;
- }
- return nr_chunk;
-}
-
-off64_t UbiFileDescriptor::Seek(off64_t offset, int whence) {
- if (mode_ == kWriteOnly) {
- // Ignore seek in write mode.
- return nr_written_;
- }
- return EintrSafeFileDescriptor::Seek(offset, whence);
-}
-
-bool UbiFileDescriptor::Close() {
- bool pad_ok = true;
- if (IsOpen() && mode_ == kWriteOnly) {
- char buf[1024];
- memset(buf, 0xFF, sizeof(buf));
- while (nr_written_ < volume_size_) {
- // We have written less than the whole volume. In order for us to clear
- // the update marker, we need to fill the rest. It is recommended to fill
- // UBI writes with 0xFF.
- uint64_t to_write = volume_size_ - nr_written_;
- if (to_write > sizeof(buf)) {
- to_write = sizeof(buf);
- }
- ssize_t nr_chunk = EintrSafeFileDescriptor::Write(buf, to_write);
- if (nr_chunk < 0) {
- LOG(ERROR) << "Cannot 0xFF-pad before closing.";
- // There is an error, but we can't really do any meaningful thing here.
- pad_ok = false;
- break;
- }
- nr_written_ += nr_chunk;
- }
- }
- return EintrSafeFileDescriptor::Close() && pad_ok;
-}
-
-} // namespace chromeos_update_engine
diff --git a/payload_consumer/mtd_file_descriptor.h b/payload_consumer/mtd_file_descriptor.h
deleted file mode 100644
index c0170b7..0000000
--- a/payload_consumer/mtd_file_descriptor.h
+++ /dev/null
@@ -1,103 +0,0 @@
-//
-// Copyright (C) 2014 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_MTD_FILE_DESCRIPTOR_H_
-#define UPDATE_ENGINE_PAYLOAD_CONSUMER_MTD_FILE_DESCRIPTOR_H_
-
-// This module defines file descriptors that deal with NAND media. We are
-// concerned with raw NAND access (as MTD device), and through UBI layer.
-
-#include <memory>
-
-#include <mtdutils.h>
-
-#include "update_engine/payload_consumer/file_descriptor.h"
-
-namespace chromeos_update_engine {
-
-// A class defining the file descriptor API for raw MTD device. This file
-// descriptor supports either random read, or sequential write but not both at
-// once.
-class MtdFileDescriptor : public EintrSafeFileDescriptor {
- public:
- MtdFileDescriptor();
-
- static bool IsMtd(const char* path);
-
- bool Open(const char* path, int flags, mode_t mode) override;
- bool Open(const char* path, int flags) override;
- ssize_t Read(void* buf, size_t count) override;
- ssize_t Write(const void* buf, size_t count) override;
- off64_t Seek(off64_t offset, int whence) override;
- uint64_t BlockDevSize() override { return 0; }
- bool BlkIoctl(int request,
- uint64_t start,
- uint64_t length,
- int* result) override {
- return false;
- }
- bool Close() override;
-
- private:
- std::unique_ptr<MtdReadContext, decltype(&mtd_read_close)> read_ctx_;
- std::unique_ptr<MtdWriteContext, decltype(&mtd_write_close)> write_ctx_;
- uint64_t nr_written_;
-};
-
-struct UbiVolumeInfo {
- // Number of eraseblocks.
- uint64_t reserved_ebs;
- // Size of each eraseblock.
- uint64_t eraseblock_size;
-};
-
-// A file descriptor to update a UBI volume, similar to MtdFileDescriptor.
-// Once the file descriptor is opened for write, the volume is marked as being
-// updated. The volume will not be usable until an update is completed. See
-// UBI_IOCVOLUP ioctl operation.
-class UbiFileDescriptor : public EintrSafeFileDescriptor {
- public:
- // Perform some queries about |path| to see if it is a UBI volume.
- static bool IsUbi(const char* path);
-
- bool Open(const char* path, int flags, mode_t mode) override;
- bool Open(const char* path, int flags) override;
- ssize_t Read(void* buf, size_t count) override;
- ssize_t Write(const void* buf, size_t count) override;
- off64_t Seek(off64_t offset, int whence) override;
- uint64_t BlockDevSize() override { return 0; }
- bool BlkIoctl(int request,
- uint64_t start,
- uint64_t length,
- int* result) override {
- return false;
- }
- bool Close() override;
-
- private:
- enum Mode { kReadOnly, kWriteOnly };
-
- uint64_t usable_eb_blocks_;
- uint64_t eraseblock_size_;
- uint64_t volume_size_;
- uint64_t nr_written_;
-
- Mode mode_;
-};
-
-} // namespace chromeos_update_engine
-
-#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_MTD_FILE_DESCRIPTOR_H_
diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc
index a2368a4..1c987bd 100644
--- a/payload_consumer/payload_constants.cc
+++ b/payload_consumer/payload_constants.cc
@@ -16,24 +16,26 @@
#include "update_engine/payload_consumer/payload_constants.h"
+#include <base/logging.h>
+
namespace chromeos_update_engine {
-const uint64_t kChromeOSMajorPayloadVersion = 1;
+// const uint64_t kChromeOSMajorPayloadVersion = 1; DEPRECATED
const uint64_t kBrilloMajorPayloadVersion = 2;
-const uint32_t kMinSupportedMinorPayloadVersion = 1;
-const uint32_t kMaxSupportedMinorPayloadVersion = 6;
+const uint64_t kMinSupportedMajorPayloadVersion = kBrilloMajorPayloadVersion;
+const uint64_t kMaxSupportedMajorPayloadVersion = kBrilloMajorPayloadVersion;
const uint32_t kFullPayloadMinorVersion = 0;
-const uint32_t kInPlaceMinorPayloadVersion = 1;
+// const uint32_t kInPlaceMinorPayloadVersion = 1; DEPRECATED
const uint32_t kSourceMinorPayloadVersion = 2;
const uint32_t kOpSrcHashMinorPayloadVersion = 3;
const uint32_t kBrotliBsdiffMinorPayloadVersion = 4;
const uint32_t kPuffdiffMinorPayloadVersion = 5;
const uint32_t kVerityMinorPayloadVersion = 6;
-const uint64_t kMinSupportedMajorPayloadVersion = 1;
-const uint64_t kMaxSupportedMajorPayloadVersion = 2;
+const uint32_t kMinSupportedMinorPayloadVersion = kSourceMinorPayloadVersion;
+const uint32_t kMaxSupportedMinorPayloadVersion = kVerityMinorPayloadVersion;
const uint64_t kMaxPayloadHeaderSize = 24;
@@ -44,10 +46,6 @@
const char* InstallOperationTypeName(InstallOperation::Type op_type) {
switch (op_type) {
- case InstallOperation::BSDIFF:
- return "BSDIFF";
- case InstallOperation::MOVE:
- return "MOVE";
case InstallOperation::REPLACE:
return "REPLACE";
case InstallOperation::REPLACE_BZ:
@@ -66,6 +64,10 @@
return "PUFFDIFF";
case InstallOperation::BROTLI_BSDIFF:
return "BROTLI_BSDIFF";
+
+ case InstallOperation::BSDIFF:
+ case InstallOperation::MOVE:
+ NOTREACHED();
}
return "<unknown_op>";
}
diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h
index 1642488..5c2d17c 100644
--- a/payload_consumer/payload_constants.h
+++ b/payload_consumer/payload_constants.h
@@ -26,7 +26,7 @@
namespace chromeos_update_engine {
// The major version used by Chrome OS.
-extern const uint64_t kChromeOSMajorPayloadVersion;
+// extern const uint64_t kChromeOSMajorPayloadVersion; DEPRECATED
// The major version used by Brillo.
extern const uint64_t kBrilloMajorPayloadVersion;
@@ -39,7 +39,7 @@
extern const uint32_t kFullPayloadMinorVersion;
// The minor version used by the in-place delta generator algorithm.
-extern const uint32_t kInPlaceMinorPayloadVersion;
+// extern const uint32_t kInPlaceMinorPayloadVersion; DEPRECATED
// The minor version used by the A to B delta generator algorithm.
extern const uint32_t kSourceMinorPayloadVersion;
diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc
index 0952646..01f3b62 100644
--- a/payload_consumer/payload_metadata.cc
+++ b/payload_consumer/payload_metadata.cc
@@ -37,34 +37,18 @@
const uint64_t PayloadMetadata::kDeltaManifestSizeSize = 8;
const uint64_t PayloadMetadata::kDeltaMetadataSignatureSizeSize = 4;
-bool PayloadMetadata::GetMetadataSignatureSizeOffset(
- uint64_t* out_offset) const {
- if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
- *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
- return true;
- }
- return false;
+uint64_t PayloadMetadata::GetMetadataSignatureSizeOffset() const {
+ return kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
}
-bool PayloadMetadata::GetManifestOffset(uint64_t* out_offset) const {
- // Actual manifest begins right after the manifest size field or
- // metadata signature size field if major version >= 2.
- if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
- *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
- return true;
- }
- if (major_payload_version_ == kBrilloMajorPayloadVersion) {
- *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize +
- kDeltaMetadataSignatureSizeSize;
- return true;
- }
- LOG(ERROR) << "Unknown major payload version: " << major_payload_version_;
- return false;
+uint64_t PayloadMetadata::GetManifestOffset() const {
+ // Actual manifest begins right after the metadata signature size field.
+ return kDeltaManifestSizeOffset + kDeltaManifestSizeSize +
+ kDeltaMetadataSignatureSizeSize;
}
MetadataParseResult PayloadMetadata::ParsePayloadHeader(
const brillo::Blob& payload, ErrorCode* error) {
- uint64_t manifest_offset;
// Ensure we have data to cover the major payload version.
if (payload.size() < kDeltaManifestSizeOffset)
return MetadataParseResult::kInsufficientData;
@@ -76,6 +60,11 @@
return MetadataParseResult::kError;
}
+ uint64_t manifest_offset = GetManifestOffset();
+ // Check again with the manifest offset.
+ if (payload.size() < manifest_offset)
+ return MetadataParseResult::kInsufficientData;
+
// Extract the payload version from the metadata.
static_assert(sizeof(major_payload_version_) == kDeltaVersionSize,
"Major payload version size mismatch");
@@ -93,15 +82,6 @@
return MetadataParseResult::kError;
}
- // Get the manifest offset now that we have payload version.
- if (!GetManifestOffset(&manifest_offset)) {
- *error = ErrorCode::kUnsupportedMajorPayloadVersion;
- return MetadataParseResult::kError;
- }
- // Check again with the manifest offset.
- if (payload.size() < manifest_offset)
- return MetadataParseResult::kInsufficientData;
-
// Next, parse the manifest size.
static_assert(sizeof(manifest_size_) == kDeltaManifestSizeSize,
"manifest_size size mismatch");
@@ -113,30 +93,26 @@
metadata_size_ = manifest_offset + manifest_size_;
if (metadata_size_ < manifest_size_) {
// Overflow detected.
+ LOG(ERROR) << "Overflow detected on manifest size.";
*error = ErrorCode::kDownloadInvalidMetadataSize;
return MetadataParseResult::kError;
}
- if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
- // Parse the metadata signature size.
- static_assert(
- sizeof(metadata_signature_size_) == kDeltaMetadataSignatureSizeSize,
- "metadata_signature_size size mismatch");
- uint64_t metadata_signature_size_offset;
- if (!GetMetadataSignatureSizeOffset(&metadata_signature_size_offset)) {
- *error = ErrorCode::kError;
- return MetadataParseResult::kError;
- }
- memcpy(&metadata_signature_size_,
- &payload[metadata_signature_size_offset],
- kDeltaMetadataSignatureSizeSize);
- metadata_signature_size_ = be32toh(metadata_signature_size_);
+ // Parse the metadata signature size.
+ static_assert(
+ sizeof(metadata_signature_size_) == kDeltaMetadataSignatureSizeSize,
+ "metadata_signature_size size mismatch");
+ uint64_t metadata_signature_size_offset = GetMetadataSignatureSizeOffset();
+ memcpy(&metadata_signature_size_,
+ &payload[metadata_signature_size_offset],
+ kDeltaMetadataSignatureSizeSize);
+ metadata_signature_size_ = be32toh(metadata_signature_size_);
- if (metadata_size_ + metadata_signature_size_ < metadata_size_) {
- // Overflow detected.
- *error = ErrorCode::kDownloadInvalidMetadataSize;
- return MetadataParseResult::kError;
- }
+ if (metadata_size_ + metadata_signature_size_ < metadata_size_) {
+ // Overflow detected.
+ LOG(ERROR) << "Overflow detected on metadata and signature size.";
+ *error = ErrorCode::kDownloadInvalidMetadataSize;
+ return MetadataParseResult::kError;
}
return MetadataParseResult::kSuccess;
}
@@ -148,9 +124,7 @@
bool PayloadMetadata::GetManifest(const brillo::Blob& payload,
DeltaArchiveManifest* out_manifest) const {
- uint64_t manifest_offset;
- if (!GetManifestOffset(&manifest_offset))
- return false;
+ uint64_t manifest_offset = GetManifestOffset();
CHECK_GE(payload.size(), manifest_offset + manifest_size_);
return out_manifest->ParseFromArray(&payload[manifest_offset],
manifest_size_);
@@ -176,7 +150,7 @@
<< metadata_signature;
return ErrorCode::kDownloadMetadataSignatureError;
}
- } else if (major_payload_version_ == kBrilloMajorPayloadVersion) {
+ } else {
metadata_signature_protobuf.assign(
payload.begin() + metadata_size_,
payload.begin() + metadata_size_ + metadata_signature_size_);
@@ -225,4 +199,32 @@
return ErrorCode::kSuccess;
}
+bool PayloadMetadata::ParsePayloadFile(const string& payload_path,
+ DeltaArchiveManifest* manifest,
+ Signatures* metadata_signatures) {
+ brillo::Blob payload;
+ TEST_AND_RETURN_FALSE(
+ utils::ReadFileChunk(payload_path, 0, kMaxPayloadHeaderSize, &payload));
+ TEST_AND_RETURN_FALSE(ParsePayloadHeader(payload));
+
+ if (manifest != nullptr) {
+ TEST_AND_RETURN_FALSE(
+ utils::ReadFileChunk(payload_path,
+ kMaxPayloadHeaderSize,
+ GetMetadataSize() - kMaxPayloadHeaderSize,
+ &payload));
+ TEST_AND_RETURN_FALSE(GetManifest(payload, manifest));
+ }
+
+ if (metadata_signatures != nullptr) {
+ payload.clear();
+ TEST_AND_RETURN_FALSE(utils::ReadFileChunk(
+ payload_path, GetMetadataSize(), GetMetadataSignatureSize(), &payload));
+ TEST_AND_RETURN_FALSE(
+ metadata_signatures->ParseFromArray(payload.data(), payload.size()));
+ }
+
+ return true;
+}
+
} // namespace chromeos_update_engine
diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h
index 75ef8f9..cc42253 100644
--- a/payload_consumer/payload_metadata.h
+++ b/payload_consumer/payload_metadata.h
@@ -88,15 +88,20 @@
bool GetManifest(const brillo::Blob& payload,
DeltaArchiveManifest* out_manifest) const;
- private:
- // Set |*out_offset| to the byte offset at which the manifest protobuf begins
- // in a payload. Return true on success, false if the offset is unknown.
- bool GetManifestOffset(uint64_t* out_offset) const;
+ // Parses a payload file |payload_path| and prepares the metadata properties,
+ // manifest and metadata signatures. Can be used as an easy to use utility to
+ // get the payload information without manually the process.
+ bool ParsePayloadFile(const std::string& payload_path,
+ DeltaArchiveManifest* manifest,
+ Signatures* metadata_signatures);
- // Set |*out_offset| to the byte offset where the size of the metadata
- // signature is stored in a payload. Return true on success, if this field is
- // not present in the payload, return false.
- bool GetMetadataSignatureSizeOffset(uint64_t* out_offset) const;
+ private:
+ // Returns the byte offset at which the manifest protobuf begins in a payload.
+ uint64_t GetManifestOffset() const;
+
+ // Returns the byte offset where the size of the metadata signature is stored
+ // in a payload.
+ uint64_t GetMetadataSignatureSizeOffset() const;
uint64_t metadata_size_{0};
uint64_t manifest_size_{0};
diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc
index c08cfc2..c520c7e 100644
--- a/payload_consumer/postinstall_runner_action.cc
+++ b/payload_consumer/postinstall_runner_action.cc
@@ -28,6 +28,7 @@
#include <base/files/file_path.h>
#include <base/files/file_util.h>
#include <base/logging.h>
+#include <base/stl_util.h>
#include <base/strings/string_split.h>
#include <base/strings/string_util.h>
@@ -57,9 +58,14 @@
CHECK(HasInputObject());
install_plan_ = GetInputObject();
- // Currently we're always powerwashing when rolling back.
+ // We always powerwash when rolling back, however policy can determine
+ // if this is a full/normal powerwash, or a special rollback powerwash
+ // that retains a small amount of system state such as enrollment and
+ // network configuration. In both cases all user accounts are deleted.
if (install_plan_.powerwash_required || install_plan_.is_rollback) {
- if (hardware_->SchedulePowerwash(install_plan_.is_rollback)) {
+ bool save_rollback_data =
+ install_plan_.is_rollback && install_plan_.rollback_data_save_requested;
+ if (hardware_->SchedulePowerwash(save_rollback_data)) {
powerwash_scheduled_ = true;
} else {
return CompletePostinstall(ErrorCode::kPostinstallPowerwashError);
@@ -108,8 +114,7 @@
const InstallPlan::Partition& partition =
install_plan_.partitions[current_partition_];
- const string mountable_device =
- utils::MakePartitionNameForMount(partition.target_path);
+ const string mountable_device = partition.target_path;
if (mountable_device.empty()) {
LOG(ERROR) << "Cannot make mountable device from " << partition.target_path;
return CompletePostinstall(ErrorCode::kPostinstallRunnerError);
@@ -215,6 +220,7 @@
PLOG(ERROR) << "Unable to set non-blocking I/O mode on fd " << progress_fd_;
}
+#ifdef __ANDROID__
progress_task_ = MessageLoop::current()->WatchFileDescriptor(
FROM_HERE,
progress_fd_,
@@ -222,6 +228,13 @@
true,
base::Bind(&PostinstallRunnerAction::OnProgressFdReady,
base::Unretained(this)));
+#else
+ progress_controller_ = base::FileDescriptorWatcher::WatchReadable(
+ progress_fd_,
+ base::BindRepeating(&PostinstallRunnerAction::OnProgressFdReady,
+ base::Unretained(this)));
+#endif // __ANDROID__
+
}
void PostinstallRunnerAction::OnProgressFdReady() {
@@ -231,7 +244,7 @@
bytes_read = 0;
bool eof;
bool ok =
- utils::ReadAll(progress_fd_, buf, arraysize(buf), &bytes_read, &eof);
+ utils::ReadAll(progress_fd_, buf, base::size(buf), &bytes_read, &eof);
progress_buffer_.append(buf, bytes_read);
// Process every line.
vector<string> lines = base::SplitString(
@@ -246,8 +259,12 @@
if (!ok || eof) {
// There was either an error or an EOF condition, so we are done watching
// the file descriptor.
+#ifdef __ANDROID__
MessageLoop::current()->CancelTask(progress_task_);
progress_task_ = MessageLoop::kTaskIdNull;
+#else
+ progress_controller_.reset();
+#endif // __ANDROID__
return;
}
} while (bytes_read);
@@ -291,10 +308,15 @@
fs_mount_dir_.clear();
progress_fd_ = -1;
+#ifdef __ANDROID__
if (progress_task_ != MessageLoop::kTaskIdNull) {
MessageLoop::current()->CancelTask(progress_task_);
progress_task_ = MessageLoop::kTaskIdNull;
}
+#else
+ progress_controller_.reset();
+#endif // __ANDROID__
+
progress_buffer_.clear();
}
diff --git a/payload_consumer/postinstall_runner_action.h b/payload_consumer/postinstall_runner_action.h
index b9b7069..e5dfc40 100644
--- a/payload_consumer/postinstall_runner_action.h
+++ b/payload_consumer/postinstall_runner_action.h
@@ -17,9 +17,11 @@
#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_POSTINSTALL_RUNNER_ACTION_H_
#define UPDATE_ENGINE_PAYLOAD_CONSUMER_POSTINSTALL_RUNNER_ACTION_H_
+#include <memory>
#include <string>
#include <vector>
+#include <base/files/file_descriptor_watcher_posix.h>
#include <brillo/message_loops/message_loop.h>
#include <gtest/gtest_prod.h>
@@ -139,7 +141,12 @@
// The parent progress file descriptor used to watch for progress reports from
// the postinstall program and the task watching for them.
int progress_fd_{-1};
+
+#ifdef __ANDROID__
brillo::MessageLoop::TaskId progress_task_{brillo::MessageLoop::kTaskIdNull};
+#else
+ std::unique_ptr<base::FileDescriptorWatcher::Controller> progress_controller_;
+#endif // __ANDROID__
// A buffer of a partial read line from the progress file descriptor.
std::string progress_buffer_;
diff --git a/payload_consumer/postinstall_runner_action_unittest.cc b/payload_consumer/postinstall_runner_action_unittest.cc
index e9313f1..0041d31 100644
--- a/payload_consumer/postinstall_runner_action_unittest.cc
+++ b/payload_consumer/postinstall_runner_action_unittest.cc
@@ -100,7 +100,8 @@
void RunPostinstallAction(const string& device_path,
const string& postinstall_program,
bool powerwash_required,
- bool is_rollback);
+ bool is_rollback,
+ bool save_rollback_data);
void RunPostinstallActionWithInstallPlan(const InstallPlan& install_plan);
@@ -143,7 +144,14 @@
base::TimeDelta::FromMilliseconds(10));
} else {
CHECK(processor_);
- processor_->StopProcessing();
+ // Must |PostDelayedTask()| here to be safe that |FileDescriptorWatcher|
+ // doesn't leak memory, do not directly call |StopProcessing()|.
+ loop_.PostDelayedTask(
+ FROM_HERE,
+ base::Bind(
+ [](ActionProcessor* processor) { processor->StopProcessing(); },
+ base::Unretained(processor_)),
+ base::TimeDelta::FromMilliseconds(100));
}
}
@@ -172,7 +180,8 @@
const string& device_path,
const string& postinstall_program,
bool powerwash_required,
- bool is_rollback) {
+ bool is_rollback,
+ bool save_rollback_data) {
InstallPlan::Partition part;
part.name = "part";
part.target_path = device_path;
@@ -183,6 +192,7 @@
install_plan.download_url = "http://127.0.0.1:8080/update";
install_plan.powerwash_required = powerwash_required;
install_plan.is_rollback = is_rollback;
+ install_plan.rollback_data_save_requested = save_rollback_data;
RunPostinstallActionWithInstallPlan(install_plan);
}
@@ -256,7 +266,8 @@
TEST_F(PostinstallRunnerActionTest, RunAsRootSimpleTest) {
ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
- RunPostinstallAction(loop.dev(), kPostinstallDefaultScript, false, false);
+ RunPostinstallAction(
+ loop.dev(), kPostinstallDefaultScript, false, false, false);
EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
EXPECT_TRUE(processor_delegate_.processing_done_called_);
@@ -267,7 +278,7 @@
TEST_F(PostinstallRunnerActionTest, RunAsRootRunSymlinkFileTest) {
ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
- RunPostinstallAction(loop.dev(), "bin/postinst_link", false, false);
+ RunPostinstallAction(loop.dev(), "bin/postinst_link", false, false, false);
EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
}
@@ -277,6 +288,7 @@
RunPostinstallAction(loop.dev(),
"bin/postinst_example",
/*powerwash_required=*/true,
+ false,
false);
EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
@@ -285,14 +297,31 @@
EXPECT_FALSE(fake_hardware_.GetIsRollbackPowerwashScheduled());
}
-TEST_F(PostinstallRunnerActionTest, RunAsRootRollbackTest) {
+TEST_F(PostinstallRunnerActionTest, RunAsRootRollbackTestNoDataSave) {
ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
// Run a simple postinstall program, rollback happened.
RunPostinstallAction(loop.dev(),
"bin/postinst_example",
false,
- /*is_rollback=*/true);
+ /*is_rollback=*/true,
+ /*save_rollback_data=*/false);
+ EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
+
+ // Check that powerwash was scheduled and that it's NOT a rollback powerwash.
+ EXPECT_TRUE(fake_hardware_.IsPowerwashScheduled());
+ EXPECT_FALSE(fake_hardware_.GetIsRollbackPowerwashScheduled());
+}
+
+TEST_F(PostinstallRunnerActionTest, RunAsRootRollbackTestWithDataSave) {
+ ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
+
+ // Run a simple postinstall program, rollback happened.
+ RunPostinstallAction(loop.dev(),
+ "bin/postinst_example",
+ false,
+ /*is_rollback=*/true,
+ /*save_rollback_data=*/true);
EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
// Check that powerwash was scheduled and that it's a rollback powerwash.
@@ -303,7 +332,8 @@
// Runs postinstall from a partition file that doesn't mount, so it should
// fail.
TEST_F(PostinstallRunnerActionTest, RunAsRootCantMountTest) {
- RunPostinstallAction("/dev/null", kPostinstallDefaultScript, false, false);
+ RunPostinstallAction(
+ "/dev/null", kPostinstallDefaultScript, false, false, false);
EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
// In case of failure, Postinstall should not signal a powerwash even if it
@@ -337,7 +367,7 @@
// fail.
TEST_F(PostinstallRunnerActionTest, RunAsRootErrScriptTest) {
ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
- RunPostinstallAction(loop.dev(), "bin/postinst_fail1", false, false);
+ RunPostinstallAction(loop.dev(), "bin/postinst_fail1", false, false, false);
EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
}
@@ -345,7 +375,7 @@
// UMA with a different error code. Test those cases are properly detected.
TEST_F(PostinstallRunnerActionTest, RunAsRootFirmwareBErrScriptTest) {
ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
- RunPostinstallAction(loop.dev(), "bin/postinst_fail3", false, false);
+ RunPostinstallAction(loop.dev(), "bin/postinst_fail3", false, false, false);
EXPECT_EQ(ErrorCode::kPostinstallBootedFromFirmwareB,
processor_delegate_.code_);
}
@@ -353,7 +383,7 @@
// Check that you can't specify an absolute path.
TEST_F(PostinstallRunnerActionTest, RunAsRootAbsolutePathNotAllowedTest) {
ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
- RunPostinstallAction(loop.dev(), "/etc/../bin/sh", false, false);
+ RunPostinstallAction(loop.dev(), "/etc/../bin/sh", false, false, false);
EXPECT_EQ(ErrorCode::kPostinstallRunnerError, processor_delegate_.code_);
}
@@ -362,7 +392,8 @@
// SElinux labels are only set on Android.
TEST_F(PostinstallRunnerActionTest, RunAsRootCheckFileContextsTest) {
ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
- RunPostinstallAction(loop.dev(), "bin/self_check_context", false, false);
+ RunPostinstallAction(
+ loop.dev(), "bin/self_check_context", false, false, false);
EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
}
#endif // __ANDROID__
@@ -375,7 +406,7 @@
loop_.PostTask(FROM_HERE,
base::Bind(&PostinstallRunnerActionTest::SuspendRunningAction,
base::Unretained(this)));
- RunPostinstallAction(loop.dev(), "bin/postinst_suspend", false, false);
+ RunPostinstallAction(loop.dev(), "bin/postinst_suspend", false, false, false);
// postinst_suspend returns 0 only if it was suspended at some point.
EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
EXPECT_TRUE(processor_delegate_.processing_done_called_);
@@ -387,7 +418,7 @@
// Wait for the action to start and then cancel it.
CancelWhenStarted();
- RunPostinstallAction(loop.dev(), "bin/postinst_suspend", false, false);
+ RunPostinstallAction(loop.dev(), "bin/postinst_suspend", false, false, false);
// When canceling the action, the action never finished and therefore we had
// a ProcessingStopped call instead.
EXPECT_FALSE(processor_delegate_.code_set_);
@@ -410,7 +441,8 @@
ScopedLoopbackDeviceBinder loop(postinstall_image_, false, nullptr);
setup_action_delegate_ = &mock_delegate_;
- RunPostinstallAction(loop.dev(), "bin/postinst_progress", false, false);
+ RunPostinstallAction(
+ loop.dev(), "bin/postinst_progress", false, false, false);
EXPECT_EQ(ErrorCode::kSuccess, processor_delegate_.code_);
}
diff --git a/payload_generator/ab_generator_unittest.cc b/payload_generator/ab_generator_unittest.cc
index 270657a..7a95284 100644
--- a/payload_generator/ab_generator_unittest.cc
+++ b/payload_generator/ab_generator_unittest.cc
@@ -30,10 +30,10 @@
#include "update_engine/common/test_utils.h"
#include "update_engine/common/utils.h"
#include "update_engine/payload_generator/annotated_operation.h"
-#include "update_engine/payload_generator/bzip.h"
#include "update_engine/payload_generator/delta_diff_generator.h"
#include "update_engine/payload_generator/extent_ranges.h"
#include "update_engine/payload_generator/extent_utils.h"
+#include "update_engine/payload_generator/xz.h"
using std::string;
using std::vector;
@@ -48,8 +48,8 @@
return ext.start_block() == start_block && ext.num_blocks() == num_blocks;
}
-// Tests splitting of a REPLACE/REPLACE_BZ operation.
-void TestSplitReplaceOrReplaceBzOperation(InstallOperation::Type orig_type,
+// Tests splitting of a REPLACE/REPLACE_XZ operation.
+void TestSplitReplaceOrReplaceXzOperation(InstallOperation::Type orig_type,
bool compressible) {
const size_t op_ex1_start_block = 2;
const size_t op_ex1_num_blocks = 2;
@@ -71,7 +71,7 @@
}
ASSERT_EQ(part_size, part_data.size());
test_utils::ScopedTempFile part_file(
- "SplitReplaceOrReplaceBzTest_part.XXXXXX");
+ "SplitReplaceOrReplaceXzTest_part.XXXXXX");
ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
// Create original operation and blob data.
@@ -97,7 +97,7 @@
if (orig_type == InstallOperation::REPLACE) {
op_blob = op_data;
} else {
- ASSERT_TRUE(BzipCompress(op_data, &op_blob));
+ ASSERT_TRUE(XzCompress(op_data, &op_blob));
}
op.set_data_offset(0);
op.set_data_length(op_blob.size());
@@ -108,7 +108,7 @@
// Create the data file.
test_utils::ScopedTempFile data_file(
- "SplitReplaceOrReplaceBzTest_data.XXXXXX");
+ "SplitReplaceOrReplaceXzTest_data.XXXXXX");
EXPECT_TRUE(test_utils::WriteFileVector(data_file.path(), op_blob));
int data_fd = open(data_file.path().c_str(), O_RDWR, 000);
EXPECT_GE(data_fd, 0);
@@ -118,14 +118,14 @@
// Split the operation.
vector<AnnotatedOperation> result_ops;
- PayloadVersion version(kChromeOSMajorPayloadVersion,
+ PayloadVersion version(kBrilloMajorPayloadVersion,
kSourceMinorPayloadVersion);
ASSERT_TRUE(ABGenerator::SplitAReplaceOp(
version, aop, part_file.path(), &result_ops, &blob_file));
// Check the result.
InstallOperation::Type expected_type =
- compressible ? InstallOperation::REPLACE_BZ : InstallOperation::REPLACE;
+ compressible ? InstallOperation::REPLACE_XZ : InstallOperation::REPLACE;
ASSERT_EQ(2U, result_ops.size());
@@ -143,7 +143,7 @@
part_data.begin() + op_ex1_offset + op_ex1_size);
brillo::Blob first_expected_blob;
if (compressible) {
- ASSERT_TRUE(BzipCompress(first_expected_data, &first_expected_blob));
+ ASSERT_TRUE(XzCompress(first_expected_data, &first_expected_blob));
} else {
first_expected_blob = first_expected_data;
}
@@ -173,7 +173,7 @@
part_data.begin() + op_ex2_offset + op_ex2_size);
brillo::Blob second_expected_blob;
if (compressible) {
- ASSERT_TRUE(BzipCompress(second_expected_data, &second_expected_blob));
+ ASSERT_TRUE(XzCompress(second_expected_data, &second_expected_blob));
} else {
second_expected_blob = second_expected_data;
}
@@ -199,8 +199,8 @@
}
}
-// Tests merging of REPLACE/REPLACE_BZ operations.
-void TestMergeReplaceOrReplaceBzOperations(InstallOperation::Type orig_type,
+// Tests merging of REPLACE/REPLACE_XZ operations.
+void TestMergeReplaceOrReplaceXzOperations(InstallOperation::Type orig_type,
bool compressible) {
const size_t first_op_num_blocks = 1;
const size_t second_op_num_blocks = 2;
@@ -221,7 +221,7 @@
}
ASSERT_EQ(part_size, part_data.size());
test_utils::ScopedTempFile part_file(
- "MergeReplaceOrReplaceBzTest_part.XXXXXX");
+ "MergeReplaceOrReplaceXzTest_part.XXXXXX");
ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
// Create original operations and blob data.
@@ -239,7 +239,7 @@
if (orig_type == InstallOperation::REPLACE) {
first_op_blob = first_op_data;
} else {
- ASSERT_TRUE(BzipCompress(first_op_data, &first_op_blob));
+ ASSERT_TRUE(XzCompress(first_op_data, &first_op_blob));
}
first_op.set_data_offset(0);
first_op.set_data_length(first_op_blob.size());
@@ -259,7 +259,7 @@
if (orig_type == InstallOperation::REPLACE) {
second_op_blob = second_op_data;
} else {
- ASSERT_TRUE(BzipCompress(second_op_data, &second_op_blob));
+ ASSERT_TRUE(XzCompress(second_op_data, &second_op_blob));
}
second_op.set_data_offset(first_op_blob.size());
second_op.set_data_length(second_op_blob.size());
@@ -272,7 +272,7 @@
// Create the data file.
test_utils::ScopedTempFile data_file(
- "MergeReplaceOrReplaceBzTest_data.XXXXXX");
+ "MergeReplaceOrReplaceXzTest_data.XXXXXX");
EXPECT_TRUE(test_utils::WriteFileVector(data_file.path(), blob_data));
int data_fd = open(data_file.path().c_str(), O_RDWR, 000);
EXPECT_GE(data_fd, 0);
@@ -281,14 +281,14 @@
BlobFileWriter blob_file(data_fd, &data_file_size);
// Merge the operations.
- PayloadVersion version(kChromeOSMajorPayloadVersion,
+ PayloadVersion version(kBrilloMajorPayloadVersion,
kSourceMinorPayloadVersion);
EXPECT_TRUE(ABGenerator::MergeOperations(
&aops, version, 5, part_file.path(), &blob_file));
// Check the result.
InstallOperation::Type expected_op_type =
- compressible ? InstallOperation::REPLACE_BZ : InstallOperation::REPLACE;
+ compressible ? InstallOperation::REPLACE_XZ : InstallOperation::REPLACE;
EXPECT_EQ(1U, aops.size());
InstallOperation new_op = aops[0].op;
EXPECT_EQ(expected_op_type, new_op.type());
@@ -303,7 +303,7 @@
part_data.begin() + total_op_size);
brillo::Blob expected_blob;
if (compressible) {
- ASSERT_TRUE(BzipCompress(expected_data, &expected_blob));
+ ASSERT_TRUE(XzCompress(expected_data, &expected_blob));
} else {
expected_blob = expected_data;
}
@@ -384,19 +384,19 @@
}
TEST_F(ABGeneratorTest, SplitReplaceTest) {
- TestSplitReplaceOrReplaceBzOperation(InstallOperation::REPLACE, false);
+ TestSplitReplaceOrReplaceXzOperation(InstallOperation::REPLACE, false);
}
-TEST_F(ABGeneratorTest, SplitReplaceIntoReplaceBzTest) {
- TestSplitReplaceOrReplaceBzOperation(InstallOperation::REPLACE, true);
+TEST_F(ABGeneratorTest, SplitReplaceIntoReplaceXzTest) {
+ TestSplitReplaceOrReplaceXzOperation(InstallOperation::REPLACE, true);
}
-TEST_F(ABGeneratorTest, SplitReplaceBzTest) {
- TestSplitReplaceOrReplaceBzOperation(InstallOperation::REPLACE_BZ, true);
+TEST_F(ABGeneratorTest, SplitReplaceXzTest) {
+ TestSplitReplaceOrReplaceXzOperation(InstallOperation::REPLACE_XZ, true);
}
-TEST_F(ABGeneratorTest, SplitReplaceBzIntoReplaceTest) {
- TestSplitReplaceOrReplaceBzOperation(InstallOperation::REPLACE_BZ, false);
+TEST_F(ABGeneratorTest, SplitReplaceXzIntoReplaceTest) {
+ TestSplitReplaceOrReplaceXzOperation(InstallOperation::REPLACE_XZ, false);
}
TEST_F(ABGeneratorTest, SortOperationsByDestinationTest) {
@@ -464,7 +464,7 @@
aops.push_back(third_aop);
BlobFileWriter blob_file(0, nullptr);
- PayloadVersion version(kChromeOSMajorPayloadVersion,
+ PayloadVersion version(kBrilloMajorPayloadVersion,
kSourceMinorPayloadVersion);
EXPECT_TRUE(ABGenerator::MergeOperations(&aops, version, 5, "", &blob_file));
@@ -484,19 +484,19 @@
}
TEST_F(ABGeneratorTest, MergeReplaceOperationsTest) {
- TestMergeReplaceOrReplaceBzOperations(InstallOperation::REPLACE, false);
+ TestMergeReplaceOrReplaceXzOperations(InstallOperation::REPLACE, false);
}
-TEST_F(ABGeneratorTest, MergeReplaceOperationsToReplaceBzTest) {
- TestMergeReplaceOrReplaceBzOperations(InstallOperation::REPLACE, true);
+TEST_F(ABGeneratorTest, MergeReplaceOperationsToReplaceXzTest) {
+ TestMergeReplaceOrReplaceXzOperations(InstallOperation::REPLACE, true);
}
-TEST_F(ABGeneratorTest, MergeReplaceBzOperationsTest) {
- TestMergeReplaceOrReplaceBzOperations(InstallOperation::REPLACE_BZ, true);
+TEST_F(ABGeneratorTest, MergeReplaceXzOperationsTest) {
+ TestMergeReplaceOrReplaceXzOperations(InstallOperation::REPLACE_XZ, true);
}
-TEST_F(ABGeneratorTest, MergeReplaceBzOperationsToReplaceTest) {
- TestMergeReplaceOrReplaceBzOperations(InstallOperation::REPLACE_BZ, false);
+TEST_F(ABGeneratorTest, MergeReplaceXzOperationsToReplaceTest) {
+ TestMergeReplaceOrReplaceXzOperations(InstallOperation::REPLACE_XZ, false);
}
TEST_F(ABGeneratorTest, NoMergeOperationsTest) {
@@ -537,7 +537,7 @@
aops.push_back(fourth_aop);
BlobFileWriter blob_file(0, nullptr);
- PayloadVersion version(kChromeOSMajorPayloadVersion,
+ PayloadVersion version(kBrilloMajorPayloadVersion,
kSourceMinorPayloadVersion);
EXPECT_TRUE(ABGenerator::MergeOperations(&aops, version, 4, "", &blob_file));
diff --git a/payload_generator/cycle_breaker.cc b/payload_generator/cycle_breaker.cc
deleted file mode 100644
index d6eeed2..0000000
--- a/payload_generator/cycle_breaker.cc
+++ /dev/null
@@ -1,218 +0,0 @@
-//
-// Copyright (C) 2012 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/cycle_breaker.h"
-
-#include <inttypes.h>
-
-#include <limits>
-#include <set>
-#include <string>
-#include <utility>
-
-#include <base/stl_util.h>
-#include <base/strings/string_util.h>
-#include <base/strings/stringprintf.h>
-
-#include "update_engine/payload_generator/graph_utils.h"
-#include "update_engine/payload_generator/tarjan.h"
-
-using std::make_pair;
-using std::set;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-// This is the outer function from the original paper.
-void CycleBreaker::BreakCycles(const Graph& graph, set<Edge>* out_cut_edges) {
- cut_edges_.clear();
-
- // Make a copy, which we will modify by removing edges. Thus, in each
- // iteration subgraph_ is the current subgraph or the original with
- // vertices we desire. This variable was "A_K" in the original paper.
- subgraph_ = graph;
-
- // The paper calls for the "adjacency structure (i.e., graph) of
- // strong (-ly connected) component K with least vertex in subgraph
- // induced by {s, s + 1, ..., n}".
- // We arbitrarily order each vertex by its index in the graph. Thus,
- // each iteration, we are looking at the subgraph {s, s + 1, ..., n}
- // and looking for the strongly connected component with vertex s.
-
- TarjanAlgorithm tarjan;
- skipped_ops_ = 0;
-
- for (Graph::size_type i = 0; i < subgraph_.size(); i++) {
- InstallOperation::Type op_type = graph[i].aop.op.type();
- if (op_type == InstallOperation::REPLACE ||
- op_type == InstallOperation::REPLACE_BZ) {
- skipped_ops_++;
- continue;
- }
-
- if (i > 0) {
- // Erase node (i - 1) from subgraph_. First, erase what it points to
- subgraph_[i - 1].out_edges.clear();
- // Now, erase any pointers to node (i - 1)
- for (Graph::size_type j = i; j < subgraph_.size(); j++) {
- subgraph_[j].out_edges.erase(i - 1);
- }
- }
-
- // Calculate SCC (strongly connected component) with vertex i.
- vector<Vertex::Index> component_indexes;
- tarjan.Execute(i, &subgraph_, &component_indexes);
-
- // Set subgraph edges for the components in the SCC.
- for (vector<Vertex::Index>::iterator it = component_indexes.begin();
- it != component_indexes.end();
- ++it) {
- subgraph_[*it].subgraph_edges.clear();
- for (vector<Vertex::Index>::iterator jt = component_indexes.begin();
- jt != component_indexes.end();
- ++jt) {
- // If there's a link from *it -> *jt in the graph,
- // add a subgraph_ edge
- if (base::ContainsKey(subgraph_[*it].out_edges, *jt))
- subgraph_[*it].subgraph_edges.insert(*jt);
- }
- }
-
- current_vertex_ = i;
- blocked_.clear();
- blocked_.resize(subgraph_.size());
- blocked_graph_.clear();
- blocked_graph_.resize(subgraph_.size());
- Circuit(current_vertex_, 0);
- }
-
- out_cut_edges->swap(cut_edges_);
- LOG(INFO) << "Cycle breaker skipped " << skipped_ops_ << " ops.";
- DCHECK(stack_.empty());
-}
-
-static const size_t kMaxEdgesToConsider = 2;
-
-void CycleBreaker::HandleCircuit() {
- stack_.push_back(current_vertex_);
- CHECK_GE(stack_.size(), static_cast<vector<Vertex::Index>::size_type>(2));
- Edge min_edge = make_pair(stack_[0], stack_[1]);
- uint64_t min_edge_weight = std::numeric_limits<uint64_t>::max();
- size_t edges_considered = 0;
- for (vector<Vertex::Index>::const_iterator it = stack_.begin();
- it != (stack_.end() - 1);
- ++it) {
- Edge edge = make_pair(*it, *(it + 1));
- if (cut_edges_.find(edge) != cut_edges_.end()) {
- stack_.pop_back();
- return;
- }
- uint64_t edge_weight = graph_utils::EdgeWeight(subgraph_, edge);
- if (edge_weight < min_edge_weight) {
- min_edge_weight = edge_weight;
- min_edge = edge;
- }
- edges_considered++;
- if (edges_considered == kMaxEdgesToConsider)
- break;
- }
- cut_edges_.insert(min_edge);
- stack_.pop_back();
-}
-
-void CycleBreaker::Unblock(Vertex::Index u) {
- blocked_[u] = false;
-
- for (Vertex::EdgeMap::iterator it = blocked_graph_[u].out_edges.begin();
- it != blocked_graph_[u].out_edges.end();) {
- Vertex::Index w = it->first;
- blocked_graph_[u].out_edges.erase(it++);
- if (blocked_[w])
- Unblock(w);
- }
-}
-
-bool CycleBreaker::StackContainsCutEdge() const {
- for (vector<Vertex::Index>::const_iterator it = ++stack_.begin(),
- e = stack_.end();
- it != e;
- ++it) {
- Edge edge = make_pair(*(it - 1), *it);
- if (base::ContainsKey(cut_edges_, edge)) {
- return true;
- }
- }
- return false;
-}
-
-bool CycleBreaker::Circuit(Vertex::Index vertex, Vertex::Index depth) {
- // "vertex" was "v" in the original paper.
- bool found = false; // Was "f" in the original paper.
- stack_.push_back(vertex);
- blocked_[vertex] = true;
- {
- static int counter = 0;
- counter++;
- if (counter == 10000) {
- counter = 0;
- std::string stack_str;
- for (Vertex::Index index : stack_) {
- stack_str += std::to_string(index);
- stack_str += " -> ";
- }
- LOG(INFO) << "stack: " << stack_str;
- }
- }
-
- for (Vertex::SubgraphEdgeMap::iterator w =
- subgraph_[vertex].subgraph_edges.begin();
- w != subgraph_[vertex].subgraph_edges.end();
- ++w) {
- if (*w == current_vertex_) {
- // The original paper called for printing stack_ followed by
- // current_vertex_ here, which is a cycle. Instead, we call
- // HandleCircuit() to break it.
- HandleCircuit();
- found = true;
- } else if (!blocked_[*w]) {
- if (Circuit(*w, depth + 1)) {
- found = true;
- if ((depth > kMaxEdgesToConsider) || StackContainsCutEdge())
- break;
- }
- }
- }
-
- if (found) {
- Unblock(vertex);
- } else {
- for (Vertex::SubgraphEdgeMap::iterator w =
- subgraph_[vertex].subgraph_edges.begin();
- w != subgraph_[vertex].subgraph_edges.end();
- ++w) {
- if (blocked_graph_[*w].out_edges.find(vertex) ==
- blocked_graph_[*w].out_edges.end()) {
- blocked_graph_[*w].out_edges.insert(
- make_pair(vertex, EdgeProperties()));
- }
- }
- }
- CHECK_EQ(vertex, stack_.back());
- stack_.pop_back();
- return found;
-}
-
-} // namespace chromeos_update_engine
diff --git a/payload_generator/cycle_breaker.h b/payload_generator/cycle_breaker.h
deleted file mode 100644
index 01518fe..0000000
--- a/payload_generator/cycle_breaker.h
+++ /dev/null
@@ -1,71 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_CYCLE_BREAKER_H_
-#define UPDATE_ENGINE_PAYLOAD_GENERATOR_CYCLE_BREAKER_H_
-
-// This is a modified implementation of Donald B. Johnson's algorithm for
-// finding all elementary cycles (a.k.a. circuits) in a directed graph.
-// See the paper "Finding All the Elementary Circuits of a Directed Graph"
-// at http://dutta.csc.ncsu.edu/csc791_spring07/wrap/circuits_johnson.pdf
-// for reference.
-
-// Note: this version of the algorithm not only finds cycles, but breaks them.
-// It uses a simple greedy algorithm for cutting: when a cycle is discovered,
-// the edge with the least weight is cut. Longer term we may wish to do
-// something more intelligent, since the goal is (ideally) to minimize the
-// sum of the weights of all cut cycles. In practice, it's intractable
-// to consider all cycles before cutting any; there are simply too many.
-// In a sample graph representative of a typical workload, I found over
-// 5 * 10^15 cycles.
-
-#include <set>
-#include <vector>
-
-#include "update_engine/payload_generator/graph_types.h"
-
-namespace chromeos_update_engine {
-
-class CycleBreaker {
- public:
- CycleBreaker() : skipped_ops_(0) {}
- // out_cut_edges is replaced with the cut edges.
- void BreakCycles(const Graph& graph, std::set<Edge>* out_cut_edges);
-
- size_t skipped_ops() const { return skipped_ops_; }
-
- private:
- void HandleCircuit();
- void Unblock(Vertex::Index u);
- bool Circuit(Vertex::Index vertex, Vertex::Index depth);
- bool StackContainsCutEdge() const;
-
- std::vector<bool> blocked_; // "blocked" in the paper
- Vertex::Index current_vertex_; // "s" in the paper
- std::vector<Vertex::Index> stack_; // the stack variable in the paper
- Graph subgraph_; // "A_K" in the paper
- Graph blocked_graph_; // "B" in the paper
-
- std::set<Edge> cut_edges_;
-
- // Number of operations skipped b/c we know they don't have any
- // incoming edges.
- size_t skipped_ops_;
-};
-
-} // namespace chromeos_update_engine
-
-#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_CYCLE_BREAKER_H_
diff --git a/payload_generator/cycle_breaker_unittest.cc b/payload_generator/cycle_breaker_unittest.cc
deleted file mode 100644
index fdcf49b..0000000
--- a/payload_generator/cycle_breaker_unittest.cc
+++ /dev/null
@@ -1,279 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/cycle_breaker.h"
-
-#include <set>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include <base/logging.h>
-#include <base/stl_util.h>
-#include <gtest/gtest.h>
-
-#include "update_engine/payload_generator/graph_types.h"
-
-using std::make_pair;
-using std::pair;
-using std::set;
-using std::string;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-namespace {
-void SetOpForNodes(Graph* graph) {
- for (Vertex& vertex : *graph) {
- vertex.aop.op.set_type(InstallOperation::MOVE);
- }
-}
-} // namespace
-
-class CycleBreakerTest : public ::testing::Test {};
-
-TEST(CycleBreakerTest, SimpleTest) {
- int counter = 0;
- const Vertex::Index n_a = counter++;
- const Vertex::Index n_b = counter++;
- const Vertex::Index n_c = counter++;
- const Vertex::Index n_d = counter++;
- const Vertex::Index n_e = counter++;
- const Vertex::Index n_f = counter++;
- const Vertex::Index n_g = counter++;
- const Vertex::Index n_h = counter++;
- const Graph::size_type kNodeCount = counter++;
-
- Graph graph(kNodeCount);
- SetOpForNodes(&graph);
-
- graph[n_a].out_edges.insert(make_pair(n_e, EdgeProperties()));
- graph[n_a].out_edges.insert(make_pair(n_f, EdgeProperties()));
- graph[n_b].out_edges.insert(make_pair(n_a, EdgeProperties()));
- graph[n_c].out_edges.insert(make_pair(n_d, EdgeProperties()));
- graph[n_d].out_edges.insert(make_pair(n_e, EdgeProperties()));
- graph[n_d].out_edges.insert(make_pair(n_f, EdgeProperties()));
- graph[n_e].out_edges.insert(make_pair(n_b, EdgeProperties()));
- graph[n_e].out_edges.insert(make_pair(n_c, EdgeProperties()));
- graph[n_e].out_edges.insert(make_pair(n_f, EdgeProperties()));
- graph[n_f].out_edges.insert(make_pair(n_g, EdgeProperties()));
- graph[n_g].out_edges.insert(make_pair(n_h, EdgeProperties()));
- graph[n_h].out_edges.insert(make_pair(n_g, EdgeProperties()));
-
- CycleBreaker breaker;
-
- set<Edge> broken_edges;
- breaker.BreakCycles(graph, &broken_edges);
-
- // The following cycles must be cut:
- // A->E->B
- // C->D->E
- // G->H
-
- EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_a, n_e)) ||
- base::ContainsKey(broken_edges, make_pair(n_e, n_b)) ||
- base::ContainsKey(broken_edges, make_pair(n_b, n_a)));
- EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_c, n_d)) ||
- base::ContainsKey(broken_edges, make_pair(n_d, n_e)) ||
- base::ContainsKey(broken_edges, make_pair(n_e, n_c)));
- EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_g, n_h)) ||
- base::ContainsKey(broken_edges, make_pair(n_h, n_g)));
- EXPECT_EQ(3U, broken_edges.size());
-}
-
-namespace {
-pair<Vertex::Index, EdgeProperties> EdgeWithWeight(Vertex::Index dest,
- uint64_t weight) {
- EdgeProperties props;
- props.extents.resize(1);
- props.extents[0].set_num_blocks(weight);
- return make_pair(dest, props);
-}
-} // namespace
-
-// This creates a bunch of cycles like this:
-//
-// root <------.
-// (t)-> / | \ |
-// V V V |
-// N N N |
-// \ | / |
-// VVV |
-// N |
-// / | \ |
-// V V V |
-// N N N |
-// ... |
-// (s)-> \ | / |
-// VVV |
-// N |
-// \_________/
-//
-// such that the original cutting algo would cut edges (s). We changed
-// the algorithm to cut cycles (t) instead, since they are closer to the
-// root, and that can massively speed up cycle cutting.
-TEST(CycleBreakerTest, AggressiveCutTest) {
- size_t counter = 0;
-
- const int kNodesPerGroup = 4;
- const int kGroups = 33;
-
- Graph graph(kGroups * kNodesPerGroup + 1); // + 1 for the root node
- SetOpForNodes(&graph);
-
- const Vertex::Index n_root = counter++;
-
- Vertex::Index last_hub = n_root;
- for (int i = 0; i < kGroups; i++) {
- uint64_t weight = 5;
- if (i == 0)
- weight = 2;
- else if (i == (kGroups - 1))
- weight = 1;
-
- const Vertex::Index next_hub = counter++;
-
- for (int j = 0; j < (kNodesPerGroup - 1); j++) {
- const Vertex::Index node = counter++;
- graph[last_hub].out_edges.insert(EdgeWithWeight(node, weight));
- graph[node].out_edges.insert(EdgeWithWeight(next_hub, weight));
- }
- last_hub = next_hub;
- }
-
- graph[last_hub].out_edges.insert(EdgeWithWeight(n_root, 5));
-
- EXPECT_EQ(counter, graph.size());
-
- CycleBreaker breaker;
-
- set<Edge> broken_edges;
- LOG(INFO) << "If this hangs for more than 1 second, the test has failed.";
- breaker.BreakCycles(graph, &broken_edges);
-
- set<Edge> expected_cuts;
-
- for (Vertex::EdgeMap::const_iterator it = graph[n_root].out_edges.begin(),
- e = graph[n_root].out_edges.end();
- it != e;
- ++it) {
- expected_cuts.insert(make_pair(n_root, it->first));
- }
-
- EXPECT_TRUE(broken_edges == expected_cuts);
-}
-
-TEST(CycleBreakerTest, WeightTest) {
- size_t counter = 0;
- const Vertex::Index n_a = counter++;
- const Vertex::Index n_b = counter++;
- const Vertex::Index n_c = counter++;
- const Vertex::Index n_d = counter++;
- const Vertex::Index n_e = counter++;
- const Vertex::Index n_f = counter++;
- const Vertex::Index n_g = counter++;
- const Vertex::Index n_h = counter++;
- const Vertex::Index n_i = counter++;
- const Vertex::Index n_j = counter++;
- const Graph::size_type kNodeCount = counter++;
-
- Graph graph(kNodeCount);
- SetOpForNodes(&graph);
-
- graph[n_a].out_edges.insert(EdgeWithWeight(n_b, 4));
- graph[n_a].out_edges.insert(EdgeWithWeight(n_f, 3));
- graph[n_a].out_edges.insert(EdgeWithWeight(n_h, 2));
- graph[n_b].out_edges.insert(EdgeWithWeight(n_a, 3));
- graph[n_b].out_edges.insert(EdgeWithWeight(n_c, 4));
- graph[n_c].out_edges.insert(EdgeWithWeight(n_b, 5));
- graph[n_c].out_edges.insert(EdgeWithWeight(n_d, 3));
- graph[n_d].out_edges.insert(EdgeWithWeight(n_a, 6));
- graph[n_d].out_edges.insert(EdgeWithWeight(n_e, 3));
- graph[n_e].out_edges.insert(EdgeWithWeight(n_d, 4));
- graph[n_e].out_edges.insert(EdgeWithWeight(n_g, 5));
- graph[n_f].out_edges.insert(EdgeWithWeight(n_g, 2));
- graph[n_g].out_edges.insert(EdgeWithWeight(n_f, 3));
- graph[n_g].out_edges.insert(EdgeWithWeight(n_d, 5));
- graph[n_h].out_edges.insert(EdgeWithWeight(n_i, 8));
- graph[n_i].out_edges.insert(EdgeWithWeight(n_e, 4));
- graph[n_i].out_edges.insert(EdgeWithWeight(n_h, 9));
- graph[n_i].out_edges.insert(EdgeWithWeight(n_j, 6));
-
- CycleBreaker breaker;
-
- set<Edge> broken_edges;
- breaker.BreakCycles(graph, &broken_edges);
-
- // These are required to be broken:
- EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_b, n_a)));
- EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_b, n_c)));
- EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_d, n_e)));
- EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_f, n_g)));
- EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_h, n_i)));
-}
-
-TEST(CycleBreakerTest, UnblockGraphTest) {
- size_t counter = 0;
- const Vertex::Index n_a = counter++;
- const Vertex::Index n_b = counter++;
- const Vertex::Index n_c = counter++;
- const Vertex::Index n_d = counter++;
- const Graph::size_type kNodeCount = counter++;
-
- Graph graph(kNodeCount);
- SetOpForNodes(&graph);
-
- graph[n_a].out_edges.insert(EdgeWithWeight(n_b, 1));
- graph[n_a].out_edges.insert(EdgeWithWeight(n_c, 1));
- graph[n_b].out_edges.insert(EdgeWithWeight(n_c, 2));
- graph[n_c].out_edges.insert(EdgeWithWeight(n_b, 2));
- graph[n_b].out_edges.insert(EdgeWithWeight(n_d, 2));
- graph[n_d].out_edges.insert(EdgeWithWeight(n_a, 2));
-
- CycleBreaker breaker;
-
- set<Edge> broken_edges;
- breaker.BreakCycles(graph, &broken_edges);
-
- // These are required to be broken:
- EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_a, n_b)));
- EXPECT_TRUE(base::ContainsKey(broken_edges, make_pair(n_a, n_c)));
-}
-
-TEST(CycleBreakerTest, SkipOpsTest) {
- size_t counter = 0;
- const Vertex::Index n_a = counter++;
- const Vertex::Index n_b = counter++;
- const Vertex::Index n_c = counter++;
- const Graph::size_type kNodeCount = counter++;
-
- Graph graph(kNodeCount);
- SetOpForNodes(&graph);
- graph[n_a].aop.op.set_type(InstallOperation::REPLACE_BZ);
- graph[n_c].aop.op.set_type(InstallOperation::REPLACE);
-
- graph[n_a].out_edges.insert(EdgeWithWeight(n_b, 1));
- graph[n_c].out_edges.insert(EdgeWithWeight(n_b, 1));
-
- CycleBreaker breaker;
-
- set<Edge> broken_edges;
- breaker.BreakCycles(graph, &broken_edges);
-
- EXPECT_EQ(2U, breaker.skipped_ops());
-}
-
-} // namespace chromeos_update_engine
diff --git a/payload_generator/deflate_utils.cc b/payload_generator/deflate_utils.cc
index 01402dd..c874bfd 100644
--- a/payload_generator/deflate_utils.cc
+++ b/payload_generator/deflate_utils.cc
@@ -46,7 +46,7 @@
// TODO(*): Optimize this so we don't have to read all extents into memory in
// case it is large.
bool CopyExtentsToFile(const string& in_path,
- const vector<Extent> extents,
+ const vector<Extent>& extents,
const string& out_path,
size_t block_size) {
brillo::Blob data(utils::BlocksInExtents(extents) * block_size);
@@ -284,8 +284,9 @@
TEST_AND_RETURN_FALSE(
CopyExtentsToFile(part.path, file.extents, path.value(), kBlockSize));
// Test if it is actually a Squashfs file.
- auto sqfs =
- SquashfsFilesystem::CreateFromFile(path.value(), extract_deflates);
+ auto sqfs = SquashfsFilesystem::CreateFromFile(path.value(),
+ extract_deflates,
+ /*load_settings=*/false);
if (sqfs) {
// It is an squashfs file. Get its files to replace with itself.
vector<FilesystemInterface::File> files;
@@ -306,7 +307,7 @@
}
}
- if (is_regular_file && extract_deflates) {
+ if (is_regular_file && extract_deflates && !file.is_compressed) {
// Search for deflates if the file is in zip or gzip format.
// .zvoice files may eventually move out of rootfs. If that happens,
// remove ".zvoice" (crbug.com/782918).
diff --git a/payload_generator/delta_diff_generator.cc b/payload_generator/delta_diff_generator.cc
index d484d32..595a41e 100644
--- a/payload_generator/delta_diff_generator.cc
+++ b/payload_generator/delta_diff_generator.cc
@@ -37,7 +37,6 @@
#include "update_engine/payload_generator/blob_file_writer.h"
#include "update_engine/payload_generator/delta_diff_utils.h"
#include "update_engine/payload_generator/full_update_generator.h"
-#include "update_engine/payload_generator/inplace_generator.h"
#include "update_engine/payload_generator/payload_file.h"
using std::string;
@@ -93,13 +92,8 @@
unique_ptr<OperationsGenerator> strategy;
if (!old_part.path.empty()) {
// Delta update.
- if (config.version.minor == kInPlaceMinorPayloadVersion) {
- LOG(INFO) << "Using generator InplaceGenerator().";
- strategy.reset(new InplaceGenerator());
- } else {
- LOG(INFO) << "Using generator ABGenerator().";
- strategy.reset(new ABGenerator());
- }
+ LOG(INFO) << "Using generator ABGenerator().";
+ strategy.reset(new ABGenerator());
} else {
LOG(INFO) << "Using generator FullUpdateGenerator().";
strategy.reset(new FullUpdateGenerator());
@@ -110,11 +104,6 @@
TEST_AND_RETURN_FALSE(strategy->GenerateOperations(
config, old_part, new_part, &blob_file, &aops));
- // Filter the no-operations. OperationsGenerators should not output this
- // kind of operations normally, but this is an extra step to fix that if
- // happened.
- diff_utils::FilterNoopOperations(&aops);
-
TEST_AND_RETURN_FALSE(payload.AddPartition(old_part, new_part, aops));
}
}
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index 4ba6e24..22752e8 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -83,103 +83,6 @@
const int kBrotliCompressionQuality = 11;
-// Process a range of blocks from |range_start| to |range_end| in the extent at
-// position |*idx_p| of |extents|. If |do_remove| is true, this range will be
-// removed, which may cause the extent to be trimmed, split or removed entirely.
-// The value of |*idx_p| is updated to point to the next extent to be processed.
-// Returns true iff the next extent to process is a new or updated one.
-bool ProcessExtentBlockRange(vector<Extent>* extents,
- size_t* idx_p,
- const bool do_remove,
- uint64_t range_start,
- uint64_t range_end) {
- size_t idx = *idx_p;
- uint64_t start_block = (*extents)[idx].start_block();
- uint64_t num_blocks = (*extents)[idx].num_blocks();
- uint64_t range_size = range_end - range_start;
-
- if (do_remove) {
- if (range_size == num_blocks) {
- // Remove the entire extent.
- extents->erase(extents->begin() + idx);
- } else if (range_end == num_blocks) {
- // Trim the end of the extent.
- (*extents)[idx].set_num_blocks(num_blocks - range_size);
- idx++;
- } else if (range_start == 0) {
- // Trim the head of the extent.
- (*extents)[idx].set_start_block(start_block + range_size);
- (*extents)[idx].set_num_blocks(num_blocks - range_size);
- } else {
- // Trim the middle, splitting the remainder into two parts.
- (*extents)[idx].set_num_blocks(range_start);
- Extent e;
- e.set_start_block(start_block + range_end);
- e.set_num_blocks(num_blocks - range_end);
- idx++;
- extents->insert(extents->begin() + idx, e);
- }
- } else if (range_end == num_blocks) {
- // Done with this extent.
- idx++;
- } else {
- return false;
- }
-
- *idx_p = idx;
- return true;
-}
-
-// Remove identical corresponding block ranges in |src_extents| and
-// |dst_extents|. Used for preventing moving of blocks onto themselves during
-// MOVE operations. The value of |total_bytes| indicates the actual length of
-// content; this may be slightly less than the total size of blocks, in which
-// case the last block is only partly occupied with data. Returns the total
-// number of bytes removed.
-size_t RemoveIdenticalBlockRanges(vector<Extent>* src_extents,
- vector<Extent>* dst_extents,
- const size_t total_bytes) {
- size_t src_idx = 0;
- size_t dst_idx = 0;
- uint64_t src_offset = 0, dst_offset = 0;
- size_t removed_bytes = 0, nonfull_block_bytes;
- bool do_remove = false;
- while (src_idx < src_extents->size() && dst_idx < dst_extents->size()) {
- do_remove = ((*src_extents)[src_idx].start_block() + src_offset ==
- (*dst_extents)[dst_idx].start_block() + dst_offset);
-
- uint64_t src_num_blocks = (*src_extents)[src_idx].num_blocks();
- uint64_t dst_num_blocks = (*dst_extents)[dst_idx].num_blocks();
- uint64_t min_num_blocks =
- std::min(src_num_blocks - src_offset, dst_num_blocks - dst_offset);
- uint64_t prev_src_offset = src_offset;
- uint64_t prev_dst_offset = dst_offset;
- src_offset += min_num_blocks;
- dst_offset += min_num_blocks;
-
- bool new_src = ProcessExtentBlockRange(
- src_extents, &src_idx, do_remove, prev_src_offset, src_offset);
- bool new_dst = ProcessExtentBlockRange(
- dst_extents, &dst_idx, do_remove, prev_dst_offset, dst_offset);
- if (new_src) {
- src_offset = 0;
- }
- if (new_dst) {
- dst_offset = 0;
- }
-
- if (do_remove)
- removed_bytes += min_num_blocks * kBlockSize;
- }
-
- // If we removed the last block and this block is only partly used by file
- // content, deduct the unused portion from the total removed byte count.
- if (do_remove && (nonfull_block_bytes = total_bytes % kBlockSize))
- removed_bytes -= kBlockSize - nonfull_block_bytes;
-
- return removed_bytes;
-}
-
// Storing a diff operation has more overhead over replace operation in the
// manifest, we need to store an additional src_sha256_hash which is 32 bytes
// and not compressible, and also src_extents which could use anywhere from a
@@ -318,13 +221,11 @@
return;
}
- if (!version_.InplaceUpdate()) {
- if (!ABGenerator::FragmentOperations(
- version_, &file_aops_, new_part_, blob_file_)) {
- LOG(ERROR) << "Failed to fragment operations for " << name_;
- failed_ = true;
- return;
- }
+ if (!ABGenerator::FragmentOperations(
+ version_, &file_aops_, new_part_, blob_file_)) {
+ LOG(ERROR) << "Failed to fragment operations for " << name_;
+ failed_ = true;
+ return;
}
LOG(INFO) << "Encoded file " << name_ << " (" << new_extents_blocks_
@@ -349,12 +250,13 @@
if (old_file_iter != old_files_map.end())
return old_file_iter->second;
- // No old file match for the new file name, use a similar file with the
- // shortest levenshtein distance.
+ // No old file matches the new file name. Use a similar file with the
+ // shortest levenshtein distance instead.
// This works great if the file has version number in it, but even for
// a completely new file, using a similar file can still help.
- int min_distance = new_file_name.size();
- const FilesystemInterface::File* old_file;
+ int min_distance =
+ LevenshteinDistance(new_file_name, old_files_map.begin()->first);
+ const FilesystemInterface::File* old_file = &old_files_map.begin()->second;
for (const auto& pair : old_files_map) {
int distance = LevenshteinDistance(new_file_name, pair.first);
if (distance < min_distance) {
@@ -447,12 +349,8 @@
// from the same source blocks. At that time, this code can die. -adlr
FilesystemInterface::File old_file =
GetOldFile(old_files_map, new_file.name);
- vector<Extent> old_file_extents;
- if (version.InplaceUpdate())
- old_file_extents =
- FilterExtentRanges(old_file.extents, old_visited_blocks);
- else
- old_file_extents = FilterExtentRanges(old_file.extents, old_zero_blocks);
+ auto old_file_extents =
+ FilterExtentRanges(old_file.extents, old_zero_blocks);
old_visited_blocks.AddExtents(old_file_extents);
file_delta_processors.emplace_back(old_part.path,
@@ -541,21 +439,6 @@
&old_block_ids,
&new_block_ids));
- // If the update is inplace, we map all the blocks that didn't move,
- // regardless of the contents since they are already copied and no operation
- // is required.
- if (version.InplaceUpdate()) {
- uint64_t num_blocks = std::min(old_num_blocks, new_num_blocks);
- for (uint64_t block = 0; block < num_blocks; block++) {
- if (old_block_ids[block] == new_block_ids[block] &&
- !old_visited_blocks->ContainsBlock(block) &&
- !new_visited_blocks->ContainsBlock(block)) {
- old_visited_blocks->AddBlock(block);
- new_visited_blocks->AddBlock(block);
- }
- }
- }
-
// A mapping from the block_id to the list of block numbers with that block id
// in the old partition. This is used to lookup where in the old partition
// is a block from the new partition.
@@ -602,10 +485,6 @@
AppendBlockToExtents(&old_identical_blocks,
old_blocks_map_it->second.back());
AppendBlockToExtents(&new_identical_blocks, block);
- // We can't reuse source blocks in minor version 1 because the cycle
- // breaking algorithm used in the in-place update doesn't support that.
- if (version.InplaceUpdate())
- old_blocks_map_it->second.pop_back();
}
if (chunk_blocks == -1)
@@ -657,9 +536,7 @@
aops->emplace_back();
AnnotatedOperation* aop = &aops->back();
aop->name = "<identical-blocks>";
- aop->op.set_type(version.OperationAllowed(InstallOperation::SOURCE_COPY)
- ? InstallOperation::SOURCE_COPY
- : InstallOperation::MOVE);
+ aop->op.set_type(InstallOperation::SOURCE_COPY);
uint64_t chunk_num_blocks =
std::min(static_cast<uint64_t>(extent.num_blocks()) - op_block_offset,
@@ -704,6 +581,11 @@
InstallOperation operation;
uint64_t total_blocks = utils::BlocksInExtents(new_extents);
+ if (chunk_blocks == 0) {
+ LOG(ERROR) << "Invalid number of chunk_blocks. Cannot be 0.";
+ return false;
+ }
+
if (chunk_blocks == -1)
chunk_blocks = total_blocks;
@@ -732,13 +614,8 @@
// Check if the operation writes nothing.
if (operation.dst_extents_size() == 0) {
- if (operation.type() == InstallOperation::MOVE) {
- LOG(INFO) << "Empty MOVE operation (" << name << "), skipping";
- continue;
- } else {
- LOG(ERROR) << "Empty non-MOVE operation";
- return false;
- }
+ LOG(ERROR) << "Empty non-MOVE operation";
+ return false;
}
// Now, insert into the list of operations.
@@ -828,8 +705,7 @@
// Disable bsdiff, and puffdiff when the data is too big.
bool bsdiff_allowed =
- version.OperationAllowed(InstallOperation::SOURCE_BSDIFF) ||
- version.OperationAllowed(InstallOperation::BSDIFF);
+ version.OperationAllowed(InstallOperation::SOURCE_BSDIFF);
if (bsdiff_allowed &&
blocks_to_read * kBlockSize > kMaxBsdiffDestinationSize) {
LOG(INFO) << "bsdiff blacklisted, data too big: "
@@ -878,9 +754,7 @@
kBlockSize));
if (old_data == new_data) {
// No change in data.
- operation.set_type(version.OperationAllowed(InstallOperation::SOURCE_COPY)
- ? InstallOperation::SOURCE_COPY
- : InstallOperation::MOVE);
+ operation.set_type(InstallOperation::SOURCE_COPY);
data_blob = brillo::Blob();
} else if (IsDiffOperationBetter(
operation, data_blob.size(), 0, src_extents.size())) {
@@ -892,7 +766,7 @@
ScopedPathUnlinker unlinker(patch.value());
std::unique_ptr<bsdiff::PatchWriterInterface> bsdiff_patch_writer;
- InstallOperation::Type operation_type = InstallOperation::BSDIFF;
+ InstallOperation::Type operation_type = InstallOperation::SOURCE_BSDIFF;
if (version.OperationAllowed(InstallOperation::BROTLI_BSDIFF)) {
bsdiff_patch_writer =
bsdiff::CreateBSDF2PatchWriter(patch.value(),
@@ -901,9 +775,6 @@
operation_type = InstallOperation::BROTLI_BSDIFF;
} else {
bsdiff_patch_writer = bsdiff::CreateBsdiffPatchWriter(patch.value());
- if (version.OperationAllowed(InstallOperation::SOURCE_BSDIFF)) {
- operation_type = InstallOperation::SOURCE_BSDIFF;
- }
}
brillo::Blob bsdiff_delta;
@@ -976,23 +847,14 @@
}
}
- // Remove identical src/dst block ranges in MOVE operations.
- if (operation.type() == InstallOperation::MOVE) {
- auto removed_bytes =
- RemoveIdenticalBlockRanges(&src_extents, &dst_extents, new_data.size());
- operation.set_src_length(old_data.size() - removed_bytes);
- operation.set_dst_length(new_data.size() - removed_bytes);
- }
-
// WARNING: We always set legacy |src_length| and |dst_length| fields for
// BSDIFF. For SOURCE_BSDIFF we only set them for minor version 3 and
// lower. This is needed because we used to use these two parameters in the
// SOURCE_BSDIFF for minor version 3 and lower, but we do not need them
// anymore in higher minor versions. This means if we stop adding these
// parameters for those minor versions, the delta payloads will be invalid.
- if (operation.type() == InstallOperation::BSDIFF ||
- (operation.type() == InstallOperation::SOURCE_BSDIFF &&
- version.minor <= kOpSrcHashMinorPayloadVersion)) {
+ if (operation.type() == InstallOperation::SOURCE_BSDIFF &&
+ version.minor <= kOpSrcHashMinorPayloadVersion) {
operation.set_src_length(old_data.size());
operation.set_dst_length(new_data.size());
}
@@ -1021,22 +883,6 @@
op_type == InstallOperation::DISCARD);
}
-// Returns true if |op| is a no-op operation that doesn't do any useful work
-// (e.g., a move operation that copies blocks onto themselves).
-bool IsNoopOperation(const InstallOperation& op) {
- return (op.type() == InstallOperation::MOVE &&
- ExpandExtents(op.src_extents()) == ExpandExtents(op.dst_extents()));
-}
-
-void FilterNoopOperations(vector<AnnotatedOperation>* ops) {
- ops->erase(std::remove_if(ops->begin(),
- ops->end(),
- [](const AnnotatedOperation& aop) {
- return IsNoopOperation(aop.op);
- }),
- ops->end());
-}
-
bool InitializePartitionInfo(const PartitionConfig& part, PartitionInfo* info) {
info->set_size(part.size);
HashCalculator hasher;
diff --git a/payload_generator/delta_diff_utils.h b/payload_generator/delta_diff_utils.h
index 2211b30..c75d16d 100644
--- a/payload_generator/delta_diff_utils.h
+++ b/payload_generator/delta_diff_utils.h
@@ -127,14 +127,6 @@
// Returns true if an operation with type |op_type| has no |src_extents|.
bool IsNoSourceOperation(InstallOperation::Type op_type);
-// Returns true if |op| is a no-op operation that doesn't do any useful work
-// (e.g., a move operation that copies blocks onto themselves).
-bool IsNoopOperation(const InstallOperation& op);
-
-// Filters all the operations that are no-op, maintaining the relative order
-// of the rest of the operations.
-void FilterNoopOperations(std::vector<AnnotatedOperation>* ops);
-
bool InitializePartitionInfo(const PartitionConfig& partition,
PartitionInfo* info);
diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc
index b2950e8..0857f9c 100644
--- a/payload_generator/delta_diff_utils_unittest.cc
+++ b/payload_generator/delta_diff_utils_unittest.cc
@@ -136,7 +136,7 @@
bool RunDeltaMovedAndZeroBlocks(ssize_t chunk_blocks,
uint32_t minor_version) {
BlobFileWriter blob_file(blob_fd_, &blob_size_);
- PayloadVersion version(kChromeOSMajorPayloadVersion, minor_version);
+ PayloadVersion version(kBrilloMajorPayloadVersion, minor_version);
ExtentRanges old_zero_blocks;
return diff_utils::DeltaMovedAndZeroBlocks(&aops_,
old_part_.path,
@@ -194,164 +194,6 @@
}
}
-TEST_F(DeltaDiffUtilsTest, MoveSmallTest) {
- brillo::Blob data_blob(block_size_);
- test_utils::FillWithData(&data_blob);
-
- // The old file is on a different block than the new one.
- vector<Extent> old_extents = {ExtentForRange(11, 1)};
- vector<Extent> new_extents = {ExtentForRange(1, 1)};
-
- EXPECT_TRUE(WriteExtents(old_part_.path, old_extents, kBlockSize, data_blob));
- EXPECT_TRUE(WriteExtents(new_part_.path, new_extents, kBlockSize, data_blob));
-
- brillo::Blob data;
- InstallOperation op;
- EXPECT_TRUE(diff_utils::ReadExtentsToDiff(
- old_part_.path,
- new_part_.path,
- old_extents,
- new_extents,
- {}, // old_deflates
- {}, // new_deflates
- PayloadVersion(kChromeOSMajorPayloadVersion, kInPlaceMinorPayloadVersion),
- &data,
- &op));
- EXPECT_TRUE(data.empty());
-
- EXPECT_TRUE(op.has_type());
- EXPECT_EQ(InstallOperation::MOVE, op.type());
- EXPECT_FALSE(op.has_data_offset());
- EXPECT_FALSE(op.has_data_length());
- EXPECT_EQ(1, op.src_extents_size());
- EXPECT_EQ(kBlockSize, op.src_length());
- EXPECT_EQ(1, op.dst_extents_size());
- EXPECT_EQ(kBlockSize, op.dst_length());
- EXPECT_EQ(utils::BlocksInExtents(op.src_extents()),
- utils::BlocksInExtents(op.dst_extents()));
- EXPECT_EQ(1U, utils::BlocksInExtents(op.dst_extents()));
-}
-
-TEST_F(DeltaDiffUtilsTest, MoveWithSameBlock) {
- // Setup the old/new files so that it has immobile chunks; we make sure to
- // utilize all sub-cases of such chunks: blocks 21--22 induce a split (src)
- // and complete removal (dst), whereas blocks 24--25 induce trimming of the
- // tail (src) and head (dst) of extents. The final block (29) is used for
- // ensuring we properly account for the number of bytes removed in cases where
- // the last block is partly filled. The detailed configuration:
- //
- // Old: [ 20 21 22 23 24 25 ] [ 28 29 ]
- // New: [ 18 ] [ 21 22 ] [ 20 ] [ 24 25 26 ] [ 29 ]
- // Same: ^^ ^^ ^^ ^^ ^^
- vector<Extent> old_extents = {ExtentForRange(20, 6), ExtentForRange(28, 2)};
- vector<Extent> new_extents = {ExtentForRange(18, 1),
- ExtentForRange(21, 2),
- ExtentForRange(20, 1),
- ExtentForRange(24, 3),
- ExtentForRange(29, 1)};
-
- uint64_t num_blocks = utils::BlocksInExtents(old_extents);
- EXPECT_EQ(num_blocks, utils::BlocksInExtents(new_extents));
-
- // The size of the data should match the total number of blocks. Each block
- // has a different content.
- brillo::Blob file_data;
- for (uint64_t i = 0; i < num_blocks; ++i) {
- file_data.resize(file_data.size() + kBlockSize, 'a' + i);
- }
-
- EXPECT_TRUE(WriteExtents(old_part_.path, old_extents, kBlockSize, file_data));
- EXPECT_TRUE(WriteExtents(new_part_.path, new_extents, kBlockSize, file_data));
-
- brillo::Blob data;
- InstallOperation op;
- EXPECT_TRUE(diff_utils::ReadExtentsToDiff(
- old_part_.path,
- new_part_.path,
- old_extents,
- new_extents,
- {}, // old_deflates
- {}, // new_deflates
- PayloadVersion(kChromeOSMajorPayloadVersion, kInPlaceMinorPayloadVersion),
- &data,
- &op));
-
- EXPECT_TRUE(data.empty());
-
- EXPECT_TRUE(op.has_type());
- EXPECT_EQ(InstallOperation::MOVE, op.type());
- EXPECT_FALSE(op.has_data_offset());
- EXPECT_FALSE(op.has_data_length());
-
- // The expected old and new extents that actually moved. See comment above.
- old_extents = {
- ExtentForRange(20, 1), ExtentForRange(23, 1), ExtentForRange(28, 1)};
- new_extents = {
- ExtentForRange(18, 1), ExtentForRange(20, 1), ExtentForRange(26, 1)};
- num_blocks = utils::BlocksInExtents(old_extents);
-
- EXPECT_EQ(num_blocks * kBlockSize, op.src_length());
- EXPECT_EQ(num_blocks * kBlockSize, op.dst_length());
-
- EXPECT_EQ(old_extents.size(), static_cast<size_t>(op.src_extents_size()));
- for (int i = 0; i < op.src_extents_size(); i++) {
- EXPECT_EQ(old_extents[i].start_block(), op.src_extents(i).start_block())
- << "i == " << i;
- EXPECT_EQ(old_extents[i].num_blocks(), op.src_extents(i).num_blocks())
- << "i == " << i;
- }
-
- EXPECT_EQ(new_extents.size(), static_cast<size_t>(op.dst_extents_size()));
- for (int i = 0; i < op.dst_extents_size(); i++) {
- EXPECT_EQ(new_extents[i].start_block(), op.dst_extents(i).start_block())
- << "i == " << i;
- EXPECT_EQ(new_extents[i].num_blocks(), op.dst_extents(i).num_blocks())
- << "i == " << i;
- }
-}
-
-TEST_F(DeltaDiffUtilsTest, BsdiffSmallTest) {
- // Test a BSDIFF operation from block 1 to block 2.
- brillo::Blob data_blob(kBlockSize);
- test_utils::FillWithData(&data_blob);
-
- // The old file is on a different block than the new one.
- vector<Extent> old_extents = {ExtentForRange(1, 1)};
- vector<Extent> new_extents = {ExtentForRange(2, 1)};
-
- EXPECT_TRUE(WriteExtents(old_part_.path, old_extents, kBlockSize, data_blob));
- // Modify one byte in the new file.
- data_blob[0]++;
- EXPECT_TRUE(WriteExtents(new_part_.path, new_extents, kBlockSize, data_blob));
-
- brillo::Blob data;
- InstallOperation op;
- EXPECT_TRUE(diff_utils::ReadExtentsToDiff(
- old_part_.path,
- new_part_.path,
- old_extents,
- new_extents,
- {}, // old_deflates
- {}, // new_deflates
- PayloadVersion(kChromeOSMajorPayloadVersion, kInPlaceMinorPayloadVersion),
- &data,
- &op));
-
- EXPECT_FALSE(data.empty());
-
- EXPECT_TRUE(op.has_type());
- EXPECT_EQ(InstallOperation::BSDIFF, op.type());
- EXPECT_FALSE(op.has_data_offset());
- EXPECT_FALSE(op.has_data_length());
- EXPECT_EQ(1, op.src_extents_size());
- EXPECT_EQ(kBlockSize, op.src_length());
- EXPECT_EQ(1, op.dst_extents_size());
- EXPECT_EQ(kBlockSize, op.dst_length());
- EXPECT_EQ(utils::BlocksInExtents(op.src_extents()),
- utils::BlocksInExtents(op.dst_extents()));
- EXPECT_EQ(1U, utils::BlocksInExtents(op.dst_extents()));
-}
-
TEST_F(DeltaDiffUtilsTest, ReplaceSmallTest) {
// The old file is on a different block than the new one.
vector<Extent> old_extents = {ExtentForRange(1, 1)};
@@ -383,8 +225,7 @@
new_extents,
{}, // old_deflates
{}, // new_deflates
- PayloadVersion(kChromeOSMajorPayloadVersion,
- kInPlaceMinorPayloadVersion),
+ PayloadVersion(kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion),
&data,
&op));
EXPECT_FALSE(data.empty());
@@ -426,7 +267,7 @@
new_extents,
{}, // old_deflates
{}, // new_deflates
- PayloadVersion(kChromeOSMajorPayloadVersion, kSourceMinorPayloadVersion),
+ PayloadVersion(kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion),
&data,
&op));
EXPECT_TRUE(data.empty());
@@ -460,7 +301,7 @@
new_extents,
{}, // old_deflates
{}, // new_deflates
- PayloadVersion(kChromeOSMajorPayloadVersion, kSourceMinorPayloadVersion),
+ PayloadVersion(kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion),
&data,
&op));
@@ -500,49 +341,6 @@
EXPECT_EQ(InstallOperation::REPLACE_BZ, op.type());
}
-TEST_F(DeltaDiffUtilsTest, IsNoopOperationTest) {
- InstallOperation op;
- op.set_type(InstallOperation::REPLACE_BZ);
- EXPECT_FALSE(diff_utils::IsNoopOperation(op));
- op.set_type(InstallOperation::MOVE);
- EXPECT_TRUE(diff_utils::IsNoopOperation(op));
- *(op.add_src_extents()) = ExtentForRange(3, 2);
- *(op.add_dst_extents()) = ExtentForRange(3, 2);
- EXPECT_TRUE(diff_utils::IsNoopOperation(op));
- *(op.add_src_extents()) = ExtentForRange(7, 5);
- *(op.add_dst_extents()) = ExtentForRange(7, 5);
- EXPECT_TRUE(diff_utils::IsNoopOperation(op));
- *(op.add_src_extents()) = ExtentForRange(20, 2);
- *(op.add_dst_extents()) = ExtentForRange(20, 1);
- *(op.add_dst_extents()) = ExtentForRange(21, 1);
- EXPECT_TRUE(diff_utils::IsNoopOperation(op));
- *(op.add_src_extents()) = ExtentForRange(24, 1);
- *(op.add_dst_extents()) = ExtentForRange(25, 1);
- EXPECT_FALSE(diff_utils::IsNoopOperation(op));
-}
-
-TEST_F(DeltaDiffUtilsTest, FilterNoopOperations) {
- AnnotatedOperation aop1;
- aop1.op.set_type(InstallOperation::REPLACE_BZ);
- *(aop1.op.add_dst_extents()) = ExtentForRange(3, 2);
- aop1.name = "aop1";
-
- AnnotatedOperation aop2 = aop1;
- aop2.name = "aop2";
-
- AnnotatedOperation noop;
- noop.op.set_type(InstallOperation::MOVE);
- *(noop.op.add_src_extents()) = ExtentForRange(3, 2);
- *(noop.op.add_dst_extents()) = ExtentForRange(3, 2);
- noop.name = "noop";
-
- vector<AnnotatedOperation> ops = {noop, aop1, noop, noop, aop2, noop};
- diff_utils::FilterNoopOperations(&ops);
- EXPECT_EQ(2u, ops.size());
- EXPECT_EQ("aop1", ops[0].name);
- EXPECT_EQ("aop2", ops[1].name);
-}
-
// Test the simple case where all the blocks are different and no new blocks are
// zeroed.
TEST_F(DeltaDiffUtilsTest, NoZeroedOrUniqueBlocksDetected) {
@@ -550,7 +348,7 @@
InitializePartitionWithUniqueBlocks(new_part_, block_size_, 42);
EXPECT_TRUE(RunDeltaMovedAndZeroBlocks(-1, // chunk_blocks
- kInPlaceMinorPayloadVersion));
+ kSourceMinorPayloadVersion));
EXPECT_EQ(0U, old_visited_blocks_.blocks());
EXPECT_EQ(0U, new_visited_blocks_.blocks());
@@ -558,29 +356,6 @@
EXPECT_TRUE(aops_.empty());
}
-// Test that when the partitions have identical blocks in the same positions no
-// MOVE operation is performed and all the blocks are handled.
-TEST_F(DeltaDiffUtilsTest, IdenticalPartitionsDontMove) {
- InitializePartitionWithUniqueBlocks(old_part_, block_size_, 42);
- InitializePartitionWithUniqueBlocks(new_part_, block_size_, 42);
-
- // Mark some of the blocks as already visited.
- vector<Extent> already_visited = {ExtentForRange(5, 10),
- ExtentForRange(25, 10)};
- old_visited_blocks_.AddExtents(already_visited);
- new_visited_blocks_.AddExtents(already_visited);
-
- // Most of the blocks rest in the same place, but there's no need for MOVE
- // operations on those blocks.
- EXPECT_TRUE(RunDeltaMovedAndZeroBlocks(-1, // chunk_blocks
- kInPlaceMinorPayloadVersion));
-
- EXPECT_EQ(kDefaultBlockCount, old_visited_blocks_.blocks());
- EXPECT_EQ(kDefaultBlockCount, new_visited_blocks_.blocks());
- EXPECT_EQ(0, blob_size_);
- EXPECT_TRUE(aops_.empty());
-}
-
// Test that when the partitions have identical blocks in the same positions
// MOVE operation is performed and all the blocks are handled.
TEST_F(DeltaDiffUtilsTest, IdenticalBlocksAreCopiedFromSource) {
@@ -701,16 +476,14 @@
EXPECT_TRUE(WriteExtents(old_part_.path, old_zeros, block_size_, zeros_data));
EXPECT_TRUE(RunDeltaMovedAndZeroBlocks(5, // chunk_blocks
- kInPlaceMinorPayloadVersion));
+ kSourceMinorPayloadVersion));
- // Zeroed blocks from old_visited_blocks_ were copied over, so me actually
- // use them regardless of the trivial MOVE operation not being emitted.
+ // Zeroed blocks from |old_visited_blocks_| were copied over.
EXPECT_EQ(old_zeros,
old_visited_blocks_.GetExtentsForBlockCount(
old_visited_blocks_.blocks()));
- // All the new zeroed blocks should be used, part with REPLACE_BZ and part
- // trivial MOVE operations (not included).
+ // All the new zeroed blocks should be used with REPLACE_BZ.
EXPECT_EQ(new_zeros,
new_visited_blocks_.GetExtentsForBlockCount(
new_visited_blocks_.blocks()));
@@ -721,7 +494,8 @@
// This range should be split.
ExtentForRange(30, 5),
ExtentForRange(35, 5),
- ExtentForRange(40, 3),
+ ExtentForRange(40, 5),
+ ExtentForRange(45, 5),
};
EXPECT_EQ(expected_op_extents.size(), aops_.size());
@@ -821,6 +595,8 @@
"update_engine");
EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "bin/delta_generator").name,
"delta_generator");
+ // Check file name with minimum size.
+ EXPECT_EQ(diff_utils::GetOldFile(old_files_map, "a").name, "filename");
}
} // namespace chromeos_update_engine
diff --git a/payload_generator/extent_ranges.cc b/payload_generator/extent_ranges.cc
index 0e3f087..4600efe 100644
--- a/payload_generator/extent_ranges.cc
+++ b/payload_generator/extent_ranges.cc
@@ -27,7 +27,6 @@
#include "update_engine/payload_consumer/payload_constants.h"
#include "update_engine/payload_generator/extent_utils.h"
-using std::set;
using std::vector;
namespace chromeos_update_engine {
diff --git a/payload_generator/extent_ranges_unittest.cc b/payload_generator/extent_ranges_unittest.cc
index 2bcffed..326e936 100644
--- a/payload_generator/extent_ranges_unittest.cc
+++ b/payload_generator/extent_ranges_unittest.cc
@@ -18,6 +18,7 @@
#include <vector>
+#include <base/stl_util.h>
#include <gtest/gtest.h>
#include "update_engine/common/test_utils.h"
@@ -53,7 +54,7 @@
#define EXPECT_RANGE_EQ(ranges, var) \
do { \
- ExpectRangeEq(ranges, var, arraysize(var), __LINE__); \
+ ExpectRangeEq(ranges, var, base::size(var), __LINE__); \
} while (0)
void ExpectRangesOverlapOrTouch(uint64_t a_start,
diff --git a/payload_generator/filesystem_interface.h b/payload_generator/filesystem_interface.h
index d04295c..05d387f 100644
--- a/payload_generator/filesystem_interface.h
+++ b/payload_generator/filesystem_interface.h
@@ -62,6 +62,13 @@
// indicating the starting block, and the number of consecutive blocks.
std::vector<Extent> extents;
+ // If true, the file is already compressed on the disk, so we don't need to
+ // parse it again for deflates. For example, image .gz files inside a
+ // compressed SquashFS image. They might have already been compressed by the
+ // mksquashfs, so we can't really parse the file and look for deflate
+ // compressed parts anymore.
+ bool is_compressed = false;
+
// All the deflate locations in the file. These locations are not relative
// to the extents. They are relative to the file system itself.
std::vector<puffin::BitExtent> deflates;
diff --git a/payload_generator/full_update_generator_unittest.cc b/payload_generator/full_update_generator_unittest.cc
index e398125..5f39e8b 100644
--- a/payload_generator/full_update_generator_unittest.cc
+++ b/payload_generator/full_update_generator_unittest.cc
@@ -90,7 +90,7 @@
EXPECT_EQ(config_.hard_chunk_size / config_.block_size,
aops[i].op.dst_extents(0).num_blocks());
if (aops[i].op.type() != InstallOperation::REPLACE) {
- EXPECT_EQ(InstallOperation::REPLACE_BZ, aops[i].op.type());
+ EXPECT_EQ(InstallOperation::REPLACE_XZ, aops[i].op.type());
}
}
}
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index f035ff1..f7df211 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -38,6 +38,7 @@
#include "update_engine/payload_consumer/payload_constants.h"
#include "update_engine/payload_generator/delta_diff_generator.h"
#include "update_engine/payload_generator/payload_generation_config.h"
+#include "update_engine/payload_generator/payload_properties.h"
#include "update_engine/payload_generator/payload_signer.h"
#include "update_engine/payload_generator/xz.h"
#include "update_engine/update_metadata.pb.h"
@@ -53,6 +54,9 @@
namespace {
+constexpr char kPayloadPropertiesFormatKeyValue[] = "key-value";
+constexpr char kPayloadPropertiesFormatJson[] = "json";
+
void ParseSignatureSizes(const string& signature_sizes_flag,
vector<size_t>* signature_sizes) {
signature_sizes->clear();
@@ -267,14 +271,24 @@
return true;
}
-int ExtractProperties(const string& payload_path, const string& props_file) {
- brillo::KeyValueStore properties;
- TEST_AND_RETURN_FALSE(
- PayloadSigner::ExtractPayloadProperties(payload_path, &properties));
- if (props_file == "-") {
- printf("%s", properties.SaveToString().c_str());
+bool ExtractProperties(const string& payload_path,
+ const string& props_file,
+ const string& props_format) {
+ string properties;
+ PayloadProperties payload_props(payload_path);
+ if (props_format == kPayloadPropertiesFormatKeyValue) {
+ TEST_AND_RETURN_FALSE(payload_props.GetPropertiesAsKeyValue(&properties));
+ } else if (props_format == kPayloadPropertiesFormatJson) {
+ TEST_AND_RETURN_FALSE(payload_props.GetPropertiesAsJson(&properties));
} else {
- properties.Save(base::FilePath(props_file));
+ LOG(FATAL) << "Invalid option " << props_format
+ << " for --properties_format flag.";
+ }
+ if (props_file == "-") {
+ printf("%s", properties.c_str());
+ } else {
+ utils::WriteFile(
+ props_file.c_str(), properties.c_str(), properties.length());
LOG(INFO) << "Generated properties file at " << props_file;
}
return true;
@@ -361,7 +375,11 @@
DEFINE_string(properties_file,
"",
"If passed, dumps the payload properties of the payload passed "
- "in --in_file and exits.");
+ "in --in_file and exits. Look at --properties_format.");
+ DEFINE_string(properties_format,
+ kPayloadPropertiesFormatKeyValue,
+ "Defines the format of the --properties_file. The acceptable "
+ "values are: key-value (default) and json");
DEFINE_int64(max_timestamp,
0,
"The maximum timestamp of the OS allowed to apply this "
@@ -500,7 +518,10 @@
return VerifySignedPayload(FLAGS_in_file, FLAGS_public_key);
}
if (!FLAGS_properties_file.empty()) {
- return ExtractProperties(FLAGS_in_file, FLAGS_properties_file) ? 0 : 1;
+ return ExtractProperties(
+ FLAGS_in_file, FLAGS_properties_file, FLAGS_properties_format)
+ ? 0
+ : 1;
}
// A payload generation was requested. Convert the flags to a
@@ -521,16 +542,10 @@
partition_names = base::SplitString(
FLAGS_partition_names, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
CHECK(!partition_names.empty());
- if (FLAGS_major_version == kChromeOSMajorPayloadVersion ||
- FLAGS_new_partitions.empty()) {
- LOG_IF(FATAL, partition_names.size() != 2)
- << "To support more than 2 partitions, please use the "
- << "--new_partitions flag and major version 2.";
- LOG_IF(FATAL,
- partition_names[0] != kPartitionNameRoot ||
- partition_names[1] != kPartitionNameKernel)
- << "To support non-default partition name, please use the "
- << "--new_partitions flag and major version 2.";
+ if (FLAGS_major_version < kMinSupportedMajorPayloadVersion ||
+ FLAGS_major_version > kMaxSupportedMajorPayloadVersion) {
+ LOG(FATAL) << "Unsupported major version " << FLAGS_major_version;
+ return 1;
}
if (!FLAGS_new_partitions.empty()) {
@@ -591,8 +606,6 @@
}
if (!FLAGS_new_postinstall_config_file.empty()) {
- LOG_IF(FATAL, FLAGS_major_version == kChromeOSMajorPayloadVersion)
- << "Postinstall config is only allowed in major version 2 or newer.";
brillo::KeyValueStore store;
CHECK(store.Load(base::FilePath(FLAGS_new_postinstall_config_file)));
CHECK(payload_config.target.LoadPostInstallConfig(store));
@@ -610,9 +623,6 @@
CHECK(payload_config.target.LoadImageSize());
if (!FLAGS_dynamic_partition_info_file.empty()) {
- LOG_IF(FATAL, FLAGS_major_version == kChromeOSMajorPayloadVersion)
- << "Dynamic partition info is only allowed in major version 2 or "
- "newer.";
brillo::KeyValueStore store;
CHECK(store.Load(base::FilePath(FLAGS_dynamic_partition_info_file)));
CHECK(payload_config.target.LoadDynamicPartitionMetadata(store));
@@ -656,25 +666,40 @@
// Autodetect minor_version by looking at the update_engine.conf in the old
// image.
if (payload_config.is_delta) {
- payload_config.version.minor = kInPlaceMinorPayloadVersion;
brillo::KeyValueStore store;
uint32_t minor_version;
+ bool minor_version_found = false;
for (const PartitionConfig& part : payload_config.source.partitions) {
if (part.fs_interface && part.fs_interface->LoadSettings(&store) &&
utils::GetMinorVersion(store, &minor_version)) {
payload_config.version.minor = minor_version;
+ minor_version_found = true;
+ LOG(INFO) << "Auto-detected minor_version="
+ << payload_config.version.minor;
break;
}
}
+ if (!minor_version_found) {
+ LOG(FATAL) << "Failed to detect the minor version.";
+ return 1;
+ }
} else {
payload_config.version.minor = kFullPayloadMinorVersion;
+ LOG(INFO) << "Using non-delta minor_version="
+ << payload_config.version.minor;
}
- LOG(INFO) << "Auto-detected minor_version=" << payload_config.version.minor;
} else {
payload_config.version.minor = FLAGS_minor_version;
LOG(INFO) << "Using provided minor_version=" << FLAGS_minor_version;
}
+ if (payload_config.version.minor != kFullPayloadMinorVersion &&
+ (payload_config.version.minor < kMinSupportedMinorPayloadVersion ||
+ payload_config.version.minor > kMaxSupportedMinorPayloadVersion)) {
+ LOG(FATAL) << "Unsupported minor version " << payload_config.version.minor;
+ return 1;
+ }
+
payload_config.max_timestamp = FLAGS_max_timestamp;
if (payload_config.version.minor >= kVerityMinorPayloadVersion)
diff --git a/payload_generator/graph_types.cc b/payload_generator/graph_types.cc
deleted file mode 100644
index c03766d..0000000
--- a/payload_generator/graph_types.cc
+++ /dev/null
@@ -1,23 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/graph_types.h"
-
-namespace chromeos_update_engine {
-
-const Vertex::Index Vertex::kInvalidIndex = static_cast<Vertex::Index>(-1);
-
-} // namespace chromeos_update_engine
diff --git a/payload_generator/graph_types.h b/payload_generator/graph_types.h
deleted file mode 100644
index f96b0f3..0000000
--- a/payload_generator/graph_types.h
+++ /dev/null
@@ -1,87 +0,0 @@
-//
-// Copyright (C) 2009 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_TYPES_H_
-#define UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_TYPES_H_
-
-#include <map>
-#include <set>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include <base/macros.h>
-
-#include "update_engine/payload_generator/annotated_operation.h"
-#include "update_engine/payload_generator/extent_utils.h"
-#include "update_engine/update_metadata.pb.h"
-
-// A few classes that help in generating delta images use these types
-// for the graph work.
-
-namespace chromeos_update_engine {
-
-struct EdgeProperties {
- // Read-before extents. I.e., blocks in |extents| must be read by the
- // node pointed to before the pointing node runs (presumably b/c it
- // overwrites these blocks).
- std::vector<Extent> extents;
-
- // Write before extents. I.e., blocks in |write_extents| must be written
- // by the node pointed to before the pointing node runs (presumably
- // b/c it reads the data written by the other node).
- std::vector<Extent> write_extents;
-
- bool operator==(const EdgeProperties& that) const {
- return extents == that.extents && write_extents == that.write_extents;
- }
-};
-
-struct Vertex {
- Vertex() : valid(true), index(-1), lowlink(-1) {}
- bool valid;
-
- typedef std::map<std::vector<Vertex>::size_type, EdgeProperties> EdgeMap;
- EdgeMap out_edges;
-
- // We sometimes wish to consider a subgraph of a graph. A subgraph would have
- // a subset of the vertices from the graph and a subset of the edges.
- // When considering this vertex within a subgraph, subgraph_edges stores
- // the out-edges.
- typedef std::set<std::vector<Vertex>::size_type> SubgraphEdgeMap;
- SubgraphEdgeMap subgraph_edges;
-
- // For Tarjan's algorithm:
- std::vector<Vertex>::size_type index;
- std::vector<Vertex>::size_type lowlink;
-
- // Other Vertex properties:
- AnnotatedOperation aop;
-
- typedef std::vector<Vertex>::size_type Index;
- static const Vertex::Index kInvalidIndex;
-};
-
-typedef std::vector<Vertex> Graph;
-
-typedef std::pair<Vertex::Index, Vertex::Index> Edge;
-
-const uint64_t kTempBlockStart = 1ULL << 60;
-static_assert(kTempBlockStart != 0, "kTempBlockStart invalid");
-
-} // namespace chromeos_update_engine
-
-#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_TYPES_H_
diff --git a/payload_generator/graph_utils.cc b/payload_generator/graph_utils.cc
deleted file mode 100644
index 7f5cf8f..0000000
--- a/payload_generator/graph_utils.cc
+++ /dev/null
@@ -1,142 +0,0 @@
-//
-// Copyright (C) 2009 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/graph_utils.h"
-
-#include <string>
-#include <utility>
-#include <vector>
-
-#include <base/logging.h>
-#include <base/macros.h>
-
-#include "update_engine/payload_consumer/payload_constants.h"
-#include "update_engine/payload_generator/annotated_operation.h"
-#include "update_engine/payload_generator/extent_utils.h"
-
-using std::make_pair;
-using std::pair;
-using std::string;
-using std::vector;
-
-namespace chromeos_update_engine {
-namespace graph_utils {
-
-uint64_t EdgeWeight(const Graph& graph, const Edge& edge) {
- uint64_t weight = 0;
- const vector<Extent>& extents =
- graph[edge.first].out_edges.find(edge.second)->second.extents;
- for (vector<Extent>::const_iterator it = extents.begin(); it != extents.end();
- ++it) {
- if (it->start_block() != kSparseHole)
- weight += it->num_blocks();
- }
- return weight;
-}
-
-void AddReadBeforeDep(Vertex* src, Vertex::Index dst, uint64_t block) {
- Vertex::EdgeMap::iterator edge_it = src->out_edges.find(dst);
- if (edge_it == src->out_edges.end()) {
- // Must create new edge
- pair<Vertex::EdgeMap::iterator, bool> result =
- src->out_edges.insert(make_pair(dst, EdgeProperties()));
- CHECK(result.second);
- edge_it = result.first;
- }
- AppendBlockToExtents(&edge_it->second.extents, block);
-}
-
-void AddReadBeforeDepExtents(Vertex* src,
- Vertex::Index dst,
- const vector<Extent>& extents) {
- // TODO(adlr): Be more efficient than adding each block individually.
- for (vector<Extent>::const_iterator it = extents.begin(), e = extents.end();
- it != e;
- ++it) {
- const Extent& extent = *it;
- for (uint64_t block = extent.start_block(),
- block_end = extent.start_block() + extent.num_blocks();
- block != block_end;
- ++block) {
- AddReadBeforeDep(src, dst, block);
- }
- }
-}
-
-void DropWriteBeforeDeps(Vertex::EdgeMap* edge_map) {
- // Specially crafted for-loop for the map-iterate-delete dance.
- for (Vertex::EdgeMap::iterator it = edge_map->begin();
- it != edge_map->end();) {
- if (!it->second.write_extents.empty())
- it->second.write_extents.clear();
- if (it->second.extents.empty()) {
- // Erase *it, as it contains no blocks
- edge_map->erase(it++);
- } else {
- ++it;
- }
- }
-}
-
-// For each node N in graph, drop all edges N->|index|.
-void DropIncomingEdgesTo(Graph* graph, Vertex::Index index) {
- // This would be much more efficient if we had doubly-linked
- // edges in the graph.
- for (Graph::iterator it = graph->begin(), e = graph->end(); it != e; ++it) {
- it->out_edges.erase(index);
- }
-}
-
-namespace {
-template <typename T>
-void DumpExtents(const T& field, int prepend_space_count) {
- string header(prepend_space_count, ' ');
- for (const auto& extent : field) {
- LOG(INFO) << header << "(" << extent.start_block() << ", "
- << extent.num_blocks() << ")";
- }
-}
-
-void DumpOutEdges(const Vertex::EdgeMap& out_edges) {
- for (Vertex::EdgeMap::const_iterator it = out_edges.begin(),
- e = out_edges.end();
- it != e;
- ++it) {
- LOG(INFO) << " " << it->first << " read-before:";
- DumpExtents(it->second.extents, 6);
- LOG(INFO) << " write-before:";
- DumpExtents(it->second.write_extents, 6);
- }
-}
-} // namespace
-
-void DumpGraph(const Graph& graph) {
- LOG(INFO) << "Graph length: " << graph.size();
- for (Graph::size_type i = 0, e = graph.size(); i != e; ++i) {
- LOG(INFO) << i << (graph[i].valid ? "" : "-INV") << ": "
- << graph[i].aop.name << ": "
- << InstallOperationTypeName(graph[i].aop.op.type());
- LOG(INFO) << " src_extents:";
- DumpExtents(graph[i].aop.op.src_extents(), 4);
- LOG(INFO) << " dst_extents:";
- DumpExtents(graph[i].aop.op.dst_extents(), 4);
- LOG(INFO) << " out edges:";
- DumpOutEdges(graph[i].out_edges);
- }
-}
-
-} // namespace graph_utils
-} // namespace chromeos_update_engine
diff --git a/payload_generator/graph_utils.h b/payload_generator/graph_utils.h
deleted file mode 100644
index 7024215..0000000
--- a/payload_generator/graph_utils.h
+++ /dev/null
@@ -1,54 +0,0 @@
-//
-// Copyright (C) 2009 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_UTILS_H_
-#define UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_UTILS_H_
-
-#include <vector>
-
-#include <base/macros.h>
-
-#include "update_engine/payload_generator/graph_types.h"
-#include "update_engine/update_metadata.pb.h"
-
-// A few utility functions for graphs
-
-namespace chromeos_update_engine {
-
-namespace graph_utils {
-
-// Returns the number of blocks represented by all extents in the edge.
-uint64_t EdgeWeight(const Graph& graph, const Edge& edge);
-
-// These add a read-before dependency from graph[src] -> graph[dst]. If the dep
-// already exists, the block/s is/are added to the existing edge.
-void AddReadBeforeDep(Vertex* src, Vertex::Index dst, uint64_t block);
-void AddReadBeforeDepExtents(Vertex* src,
- Vertex::Index dst,
- const std::vector<Extent>& extents);
-
-void DropWriteBeforeDeps(Vertex::EdgeMap* edge_map);
-
-// For each node N in graph, drop all edges N->|index|.
-void DropIncomingEdgesTo(Graph* graph, Vertex::Index index);
-
-void DumpGraph(const Graph& graph);
-
-} // namespace graph_utils
-
-} // namespace chromeos_update_engine
-
-#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_GRAPH_UTILS_H_
diff --git a/payload_generator/graph_utils_unittest.cc b/payload_generator/graph_utils_unittest.cc
deleted file mode 100644
index 07e7664..0000000
--- a/payload_generator/graph_utils_unittest.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-//
-// Copyright (C) 2009 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/graph_utils.h"
-
-#include <utility>
-#include <vector>
-
-#include <gtest/gtest.h>
-
-#include "update_engine/payload_consumer/payload_constants.h"
-#include "update_engine/payload_generator/extent_ranges.h"
-#include "update_engine/payload_generator/extent_utils.h"
-
-using std::make_pair;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-class GraphUtilsTest : public ::testing::Test {};
-
-TEST(GraphUtilsTest, SimpleTest) {
- Graph graph(2);
-
- graph[0].out_edges.insert(make_pair(1, EdgeProperties()));
-
- vector<Extent>& extents = graph[0].out_edges[1].extents;
-
- EXPECT_EQ(0U, extents.size());
- AppendBlockToExtents(&extents, 0);
- EXPECT_EQ(1U, extents.size());
- AppendBlockToExtents(&extents, 1);
- AppendBlockToExtents(&extents, 2);
- EXPECT_EQ(1U, extents.size());
- AppendBlockToExtents(&extents, 4);
-
- EXPECT_EQ(2U, extents.size());
- EXPECT_EQ(0U, extents[0].start_block());
- EXPECT_EQ(3U, extents[0].num_blocks());
- EXPECT_EQ(4U, extents[1].start_block());
- EXPECT_EQ(1U, extents[1].num_blocks());
-
- EXPECT_EQ(4U, graph_utils::EdgeWeight(graph, make_pair(0, 1)));
-}
-
-TEST(GraphUtilsTest, DepsTest) {
- Graph graph(3);
-
- graph_utils::AddReadBeforeDep(&graph[0], 1, 3);
- EXPECT_EQ(1U, graph[0].out_edges.size());
- {
- Extent& extent = graph[0].out_edges[1].extents[0];
- EXPECT_EQ(3U, extent.start_block());
- EXPECT_EQ(1U, extent.num_blocks());
- }
- graph_utils::AddReadBeforeDep(&graph[0], 1, 4);
- EXPECT_EQ(1U, graph[0].out_edges.size());
- {
- Extent& extent = graph[0].out_edges[1].extents[0];
- EXPECT_EQ(3U, extent.start_block());
- EXPECT_EQ(2U, extent.num_blocks());
- }
- graph_utils::AddReadBeforeDepExtents(
- &graph[2], 1, vector<Extent>(1, ExtentForRange(5, 2)));
- EXPECT_EQ(1U, graph[2].out_edges.size());
- {
- Extent& extent = graph[2].out_edges[1].extents[0];
- EXPECT_EQ(5U, extent.start_block());
- EXPECT_EQ(2U, extent.num_blocks());
- }
- // Change most recent edge from read-before to write-before
- graph[2].out_edges[1].write_extents.swap(graph[2].out_edges[1].extents);
- graph_utils::DropWriteBeforeDeps(&graph[2].out_edges);
- EXPECT_EQ(0U, graph[2].out_edges.size());
-
- EXPECT_EQ(1U, graph[0].out_edges.size());
- graph_utils::DropIncomingEdgesTo(&graph, 1);
- EXPECT_EQ(0U, graph[0].out_edges.size());
-}
-
-} // namespace chromeos_update_engine
diff --git a/payload_generator/inplace_generator.cc b/payload_generator/inplace_generator.cc
deleted file mode 100644
index d553cc4..0000000
--- a/payload_generator/inplace_generator.cc
+++ /dev/null
@@ -1,798 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/inplace_generator.h"
-
-#include <algorithm>
-#include <map>
-#include <set>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include <base/stl_util.h>
-
-#include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/payload_constants.h"
-#include "update_engine/payload_generator/cycle_breaker.h"
-#include "update_engine/payload_generator/delta_diff_generator.h"
-#include "update_engine/payload_generator/delta_diff_utils.h"
-#include "update_engine/payload_generator/extent_ranges.h"
-#include "update_engine/payload_generator/graph_types.h"
-#include "update_engine/payload_generator/graph_utils.h"
-#include "update_engine/payload_generator/topological_sort.h"
-#include "update_engine/update_metadata.pb.h"
-
-using std::make_pair;
-using std::map;
-using std::pair;
-using std::set;
-using std::string;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-using Block = InplaceGenerator::Block;
-
-namespace {
-
-// The only PayloadVersion supported by this implementation.
-const PayloadVersion kInPlacePayloadVersion{kChromeOSMajorPayloadVersion,
- kInPlaceMinorPayloadVersion};
-
-// This class allocates non-existent temp blocks, starting from
-// kTempBlockStart. Other code is responsible for converting these
-// temp blocks into real blocks, as the client can't read or write to
-// these blocks.
-class DummyExtentAllocator {
- public:
- vector<Extent> Allocate(const uint64_t block_count) {
- vector<Extent> ret(1);
- ret[0].set_start_block(next_block_);
- ret[0].set_num_blocks(block_count);
- next_block_ += block_count;
- return ret;
- }
-
- private:
- uint64_t next_block_{kTempBlockStart};
-};
-
-// Takes a vector of blocks and returns an equivalent vector of Extent
-// objects.
-vector<Extent> CompressExtents(const vector<uint64_t>& blocks) {
- vector<Extent> new_extents;
- for (uint64_t block : blocks) {
- AppendBlockToExtents(&new_extents, block);
- }
- return new_extents;
-}
-
-// Helper class to compare two operations by start block of the first Extent in
-// their destination extents given the index of the operations in the graph.
-class IndexedInstallOperationsDstComparator {
- public:
- explicit IndexedInstallOperationsDstComparator(Graph* graph)
- : graph_(graph) {}
-
- // Compares the operations in the vertex a and b of graph_.
- bool operator()(size_t a, size_t b) const {
- return diff_utils::CompareAopsByDestination((*graph_)[a].aop,
- (*graph_)[b].aop);
- }
-
- private:
- const Graph* const graph_;
-};
-
-} // namespace
-
-void InplaceGenerator::CheckGraph(const Graph& graph) {
- for (const Vertex& v : graph) {
- CHECK(v.aop.op.has_type());
- }
-}
-
-void InplaceGenerator::SubstituteBlocks(Vertex* vertex,
- const vector<Extent>& remove_extents,
- const vector<Extent>& replace_extents) {
- // First, expand out the blocks that op reads from
- vector<uint64_t> read_blocks = ExpandExtents(vertex->aop.op.src_extents());
- {
- // Expand remove_extents and replace_extents
- vector<uint64_t> remove_extents_expanded = ExpandExtents(remove_extents);
- vector<uint64_t> replace_extents_expanded = ExpandExtents(replace_extents);
- CHECK_EQ(remove_extents_expanded.size(), replace_extents_expanded.size());
- map<uint64_t, uint64_t> conversion;
- for (vector<uint64_t>::size_type i = 0; i < replace_extents_expanded.size();
- i++) {
- conversion[remove_extents_expanded[i]] = replace_extents_expanded[i];
- }
- ApplyMap(&read_blocks, conversion);
- for (auto& edge_prop_pair : vertex->out_edges) {
- vector<uint64_t> write_before_deps_expanded =
- ExpandExtents(edge_prop_pair.second.write_extents);
- ApplyMap(&write_before_deps_expanded, conversion);
- edge_prop_pair.second.write_extents =
- CompressExtents(write_before_deps_expanded);
- }
- }
- // Convert read_blocks back to extents
- vertex->aop.op.clear_src_extents();
- vector<Extent> new_extents = CompressExtents(read_blocks);
- StoreExtents(new_extents, vertex->aop.op.mutable_src_extents());
-}
-
-bool InplaceGenerator::CutEdges(Graph* graph,
- const set<Edge>& edges,
- vector<CutEdgeVertexes>* out_cuts) {
- DummyExtentAllocator scratch_allocator;
- vector<CutEdgeVertexes> cuts;
- cuts.reserve(edges.size());
-
- uint64_t scratch_blocks_used = 0;
- for (const Edge& edge : edges) {
- cuts.resize(cuts.size() + 1);
- vector<Extent> old_extents =
- (*graph)[edge.first].out_edges[edge.second].extents;
- // Choose some scratch space
- scratch_blocks_used += graph_utils::EdgeWeight(*graph, edge);
- cuts.back().tmp_extents =
- scratch_allocator.Allocate(graph_utils::EdgeWeight(*graph, edge));
- // create vertex to copy original->scratch
- cuts.back().new_vertex = graph->size();
- graph->emplace_back();
- cuts.back().old_src = edge.first;
- cuts.back().old_dst = edge.second;
-
- EdgeProperties& cut_edge_properties =
- (*graph)[edge.first].out_edges.find(edge.second)->second;
-
- // This should never happen, as we should only be cutting edges between
- // real file nodes, and write-before relationships are created from
- // a real file node to a temp copy node:
- CHECK(cut_edge_properties.write_extents.empty())
- << "Can't cut edge that has write-before relationship.";
-
- // make node depend on the copy operation
- (*graph)[edge.first].out_edges.insert(
- make_pair(graph->size() - 1, cut_edge_properties));
-
- // Set src/dst extents and other proto variables for copy operation
- graph->back().aop.op.set_type(InstallOperation::MOVE);
- StoreExtents(cut_edge_properties.extents,
- graph->back().aop.op.mutable_src_extents());
- StoreExtents(cuts.back().tmp_extents,
- graph->back().aop.op.mutable_dst_extents());
- graph->back().aop.op.set_src_length(graph_utils::EdgeWeight(*graph, edge) *
- kBlockSize);
- graph->back().aop.op.set_dst_length(graph->back().aop.op.src_length());
-
- // make the dest node read from the scratch space
- SubstituteBlocks(&((*graph)[edge.second]),
- (*graph)[edge.first].out_edges[edge.second].extents,
- cuts.back().tmp_extents);
-
- // delete the old edge
- CHECK_EQ(static_cast<Graph::size_type>(1),
- (*graph)[edge.first].out_edges.erase(edge.second));
-
- // Add an edge from dst to copy operation
- EdgeProperties write_before_edge_properties;
- write_before_edge_properties.write_extents = cuts.back().tmp_extents;
- (*graph)[edge.second].out_edges.insert(
- make_pair(graph->size() - 1, write_before_edge_properties));
- }
- out_cuts->swap(cuts);
- return true;
-}
-
-// Creates all the edges for the graph. Writers of a block point to
-// readers of the same block. This is because for an edge A->B, B
-// must complete before A executes.
-void InplaceGenerator::CreateEdges(Graph* graph, const vector<Block>& blocks) {
- for (vector<Block>::size_type i = 0; i < blocks.size(); i++) {
- // Blocks with both a reader and writer get an edge
- if (blocks[i].reader == Vertex::kInvalidIndex ||
- blocks[i].writer == Vertex::kInvalidIndex)
- continue;
- // Don't have a node depend on itself
- if (blocks[i].reader == blocks[i].writer)
- continue;
- // See if there's already an edge we can add onto
- Vertex::EdgeMap::iterator edge_it =
- (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
- if (edge_it == (*graph)[blocks[i].writer].out_edges.end()) {
- // No existing edge. Create one
- (*graph)[blocks[i].writer].out_edges.insert(
- make_pair(blocks[i].reader, EdgeProperties()));
- edge_it = (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
- CHECK(edge_it != (*graph)[blocks[i].writer].out_edges.end());
- }
- AppendBlockToExtents(&edge_it->second.extents, i);
- }
-}
-
-namespace {
-
-class SortCutsByTopoOrderLess {
- public:
- explicit SortCutsByTopoOrderLess(
- const vector<vector<Vertex::Index>::size_type>& table)
- : table_(table) {}
- bool operator()(const CutEdgeVertexes& a, const CutEdgeVertexes& b) {
- return table_[a.old_dst] < table_[b.old_dst];
- }
-
- private:
- const vector<vector<Vertex::Index>::size_type>& table_;
-};
-
-} // namespace
-
-void InplaceGenerator::GenerateReverseTopoOrderMap(
- const vector<Vertex::Index>& op_indexes,
- vector<vector<Vertex::Index>::size_type>* reverse_op_indexes) {
- vector<vector<Vertex::Index>::size_type> table(op_indexes.size());
- for (vector<Vertex::Index>::size_type i = 0, e = op_indexes.size(); i != e;
- ++i) {
- Vertex::Index node = op_indexes[i];
- if (table.size() < (node + 1)) {
- table.resize(node + 1);
- }
- table[node] = i;
- }
- reverse_op_indexes->swap(table);
-}
-
-void InplaceGenerator::SortCutsByTopoOrder(
- const vector<Vertex::Index>& op_indexes, vector<CutEdgeVertexes>* cuts) {
- // first, make a reverse lookup table.
- vector<vector<Vertex::Index>::size_type> table;
- GenerateReverseTopoOrderMap(op_indexes, &table);
- SortCutsByTopoOrderLess less(table);
- sort(cuts->begin(), cuts->end(), less);
-}
-
-void InplaceGenerator::MoveAndSortFullOpsToBack(
- Graph* graph, vector<Vertex::Index>* op_indexes) {
- vector<Vertex::Index> ret;
- vector<Vertex::Index> full_ops;
- ret.reserve(op_indexes->size());
- for (auto op_index : *op_indexes) {
- InstallOperation::Type type = (*graph)[op_index].aop.op.type();
- if (type == InstallOperation::REPLACE ||
- type == InstallOperation::REPLACE_BZ) {
- full_ops.push_back(op_index);
- } else {
- ret.push_back(op_index);
- }
- }
- LOG(INFO) << "Stats: " << full_ops.size() << " full ops out of "
- << (full_ops.size() + ret.size()) << " total ops.";
- // Sort full ops according to their dst_extents.
- sort(full_ops.begin(),
- full_ops.end(),
- IndexedInstallOperationsDstComparator(graph));
- ret.insert(ret.end(), full_ops.begin(), full_ops.end());
- op_indexes->swap(ret);
-}
-
-namespace {
-
-template <typename T>
-bool TempBlocksExistInExtents(const T& extents) {
- for (const auto& extent : extents) {
- uint64_t start = extent.start_block();
- uint64_t num = extent.num_blocks();
- if (start >= kTempBlockStart || (start + num) >= kTempBlockStart) {
- LOG(ERROR) << "temp block!";
- LOG(ERROR) << "start: " << start << ", num: " << num;
- LOG(ERROR) << "kTempBlockStart: " << kTempBlockStart;
- LOG(ERROR) << "returning true";
- return true;
- }
- // check for wrap-around, which would be a bug:
- CHECK(start <= (start + num));
- }
- return false;
-}
-
-// Converts the cuts, which must all have the same |old_dst| member,
-// to full. It does this by converting the |old_dst| to REPLACE or
-// REPLACE_BZ, dropping all incoming edges to |old_dst|, and marking
-// all temp nodes invalid.
-bool ConvertCutsToFull(
- Graph* graph,
- const string& new_part,
- BlobFileWriter* blob_file,
- vector<Vertex::Index>* op_indexes,
- vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
- const vector<CutEdgeVertexes>& cuts) {
- CHECK(!cuts.empty());
- set<Vertex::Index> deleted_nodes;
- for (const CutEdgeVertexes& cut : cuts) {
- TEST_AND_RETURN_FALSE(
- InplaceGenerator::ConvertCutToFullOp(graph, cut, new_part, blob_file));
- deleted_nodes.insert(cut.new_vertex);
- }
- deleted_nodes.insert(cuts[0].old_dst);
-
- vector<Vertex::Index> new_op_indexes;
- new_op_indexes.reserve(op_indexes->size());
- for (Vertex::Index vertex_index : *op_indexes) {
- if (base::ContainsKey(deleted_nodes, vertex_index))
- continue;
- new_op_indexes.push_back(vertex_index);
- }
- new_op_indexes.push_back(cuts[0].old_dst);
- op_indexes->swap(new_op_indexes);
- InplaceGenerator::GenerateReverseTopoOrderMap(*op_indexes,
- reverse_op_indexes);
- return true;
-}
-
-// Tries to assign temp blocks for a collection of cuts, all of which share
-// the same old_dst member. If temp blocks can't be found, old_dst will be
-// converted to a REPLACE or REPLACE_BZ operation. Returns true on success,
-// which can happen even if blocks are converted to full. Returns false
-// on exceptional error cases.
-bool AssignBlockForAdjoiningCuts(
- Graph* graph,
- const string& new_part,
- BlobFileWriter* blob_file,
- vector<Vertex::Index>* op_indexes,
- vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
- const vector<CutEdgeVertexes>& cuts) {
- CHECK(!cuts.empty());
- const Vertex::Index old_dst = cuts[0].old_dst;
- // Calculate # of blocks needed
- uint64_t blocks_needed = 0;
- vector<uint64_t> cuts_blocks_needed(cuts.size());
- for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
- uint64_t cut_blocks_needed = 0;
- for (const Extent& extent : cuts[i].tmp_extents) {
- cut_blocks_needed += extent.num_blocks();
- }
- blocks_needed += cut_blocks_needed;
- cuts_blocks_needed[i] = cut_blocks_needed;
- }
-
- // Find enough blocks
- ExtentRanges scratch_ranges;
- // Each block that's supplying temp blocks and the corresponding blocks:
- typedef vector<pair<Vertex::Index, ExtentRanges>> SupplierVector;
- SupplierVector block_suppliers;
- uint64_t scratch_blocks_found = 0;
- for (vector<Vertex::Index>::size_type i = (*reverse_op_indexes)[old_dst] + 1,
- e = op_indexes->size();
- i < e;
- ++i) {
- Vertex::Index test_node = (*op_indexes)[i];
- if (!(*graph)[test_node].valid)
- continue;
- // See if this node has sufficient blocks
- ExtentRanges ranges;
- ranges.AddRepeatedExtents((*graph)[test_node].aop.op.dst_extents());
- ranges.SubtractExtent(
- ExtentForRange(kTempBlockStart, kSparseHole - kTempBlockStart));
- ranges.SubtractRepeatedExtents((*graph)[test_node].aop.op.src_extents());
- // For now, for simplicity, subtract out all blocks in read-before
- // dependencies.
- for (Vertex::EdgeMap::const_iterator
- edge_i = (*graph)[test_node].out_edges.begin(),
- edge_e = (*graph)[test_node].out_edges.end();
- edge_i != edge_e;
- ++edge_i) {
- ranges.SubtractExtents(edge_i->second.extents);
- }
-
- // Prevent using the block 0 as scratch space due to crbug.com/480751.
- if (ranges.ContainsBlock(0)) {
- LOG(INFO) << "Removing block 0 from the selected scratch range in vertex "
- << i;
- ranges.SubtractBlock(0);
- }
-
- if (ranges.blocks() == 0)
- continue;
-
- if (ranges.blocks() + scratch_blocks_found > blocks_needed) {
- // trim down ranges
- vector<Extent> new_ranges =
- ranges.GetExtentsForBlockCount(blocks_needed - scratch_blocks_found);
- ranges = ExtentRanges();
- ranges.AddExtents(new_ranges);
- }
- scratch_ranges.AddRanges(ranges);
- block_suppliers.push_back(make_pair(test_node, ranges));
- scratch_blocks_found += ranges.blocks();
- if (scratch_ranges.blocks() >= blocks_needed)
- break;
- }
- if (scratch_ranges.blocks() < blocks_needed) {
- LOG(INFO) << "Unable to find sufficient scratch";
- TEST_AND_RETURN_FALSE(ConvertCutsToFull(
- graph, new_part, blob_file, op_indexes, reverse_op_indexes, cuts));
- return true;
- }
- // Use the scratch we found
- TEST_AND_RETURN_FALSE(scratch_ranges.blocks() == scratch_blocks_found);
-
- // Make all the suppliers depend on this node
- for (const auto& index_range_pair : block_suppliers) {
- graph_utils::AddReadBeforeDepExtents(
- &(*graph)[index_range_pair.first],
- old_dst,
- index_range_pair.second.GetExtentsForBlockCount(
- index_range_pair.second.blocks()));
- }
-
- // Replace temp blocks in each cut
- for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
- const CutEdgeVertexes& cut = cuts[i];
- vector<Extent> real_extents =
- scratch_ranges.GetExtentsForBlockCount(cuts_blocks_needed[i]);
- scratch_ranges.SubtractExtents(real_extents);
-
- // Fix the old dest node w/ the real blocks
- InplaceGenerator::SubstituteBlocks(
- &(*graph)[old_dst], cut.tmp_extents, real_extents);
-
- // Fix the new node w/ the real blocks. Since the new node is just a
- // copy operation, we can replace all the dest extents w/ the real
- // blocks.
- InstallOperation* op = &(*graph)[cut.new_vertex].aop.op;
- op->clear_dst_extents();
- StoreExtents(real_extents, op->mutable_dst_extents());
- }
- return true;
-}
-
-} // namespace
-
-bool InplaceGenerator::AssignTempBlocks(
- Graph* graph,
- const string& new_part,
- BlobFileWriter* blob_file,
- vector<Vertex::Index>* op_indexes,
- vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
- const vector<CutEdgeVertexes>& cuts) {
- CHECK(!cuts.empty());
-
- // group of cuts w/ the same old_dst:
- vector<CutEdgeVertexes> cuts_group;
-
- for (vector<CutEdgeVertexes>::size_type i = cuts.size() - 1, e = 0; true;
- --i) {
- LOG(INFO) << "Fixing temp blocks in cut " << i
- << ": old dst: " << cuts[i].old_dst
- << " new vertex: " << cuts[i].new_vertex
- << " path: " << (*graph)[cuts[i].old_dst].aop.name;
-
- if (cuts_group.empty() || (cuts_group[0].old_dst == cuts[i].old_dst)) {
- cuts_group.push_back(cuts[i]);
- } else {
- CHECK(!cuts_group.empty());
- TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
- new_part,
- blob_file,
- op_indexes,
- reverse_op_indexes,
- cuts_group));
- cuts_group.clear();
- cuts_group.push_back(cuts[i]);
- }
-
- if (i == e) {
- // break out of for() loop
- break;
- }
- }
- CHECK(!cuts_group.empty());
- TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(
- graph, new_part, blob_file, op_indexes, reverse_op_indexes, cuts_group));
- return true;
-}
-
-bool InplaceGenerator::NoTempBlocksRemain(const Graph& graph) {
- size_t idx = 0;
- for (Graph::const_iterator it = graph.begin(), e = graph.end(); it != e;
- ++it, ++idx) {
- if (!it->valid)
- continue;
- const InstallOperation& op = it->aop.op;
- if (TempBlocksExistInExtents(op.dst_extents()) ||
- TempBlocksExistInExtents(op.src_extents())) {
- LOG(INFO) << "bad extents in node " << idx;
- LOG(INFO) << "so yeah";
- return false;
- }
-
- // Check out-edges:
- for (const auto& edge_prop_pair : it->out_edges) {
- if (TempBlocksExistInExtents(edge_prop_pair.second.extents) ||
- TempBlocksExistInExtents(edge_prop_pair.second.write_extents)) {
- LOG(INFO) << "bad out edge in node " << idx;
- LOG(INFO) << "so yeah";
- return false;
- }
- }
- }
- return true;
-}
-
-bool InplaceGenerator::ConvertCutToFullOp(Graph* graph,
- const CutEdgeVertexes& cut,
- const string& new_part,
- BlobFileWriter* blob_file) {
- // Drop all incoming edges, keep all outgoing edges
-
- // Keep all outgoing edges
- if ((*graph)[cut.old_dst].aop.op.type() != InstallOperation::REPLACE_BZ &&
- (*graph)[cut.old_dst].aop.op.type() != InstallOperation::REPLACE) {
- Vertex::EdgeMap out_edges = (*graph)[cut.old_dst].out_edges;
- graph_utils::DropWriteBeforeDeps(&out_edges);
-
- // Replace the operation with a REPLACE or REPLACE_BZ to generate the same
- // |new_extents| list of blocks and update the graph.
- vector<AnnotatedOperation> new_aop;
- vector<Extent> new_extents;
- ExtentsToVector((*graph)[cut.old_dst].aop.op.dst_extents(), &new_extents);
- TEST_AND_RETURN_FALSE(diff_utils::DeltaReadFile(
- &new_aop,
- "", // old_part
- new_part,
- vector<Extent>(), // old_extents
- new_extents,
- {}, // old_deflates
- {}, // new_deflates
- (*graph)[cut.old_dst].aop.name,
- -1, // chunk_blocks, forces to have a single operation.
- kInPlacePayloadVersion,
- blob_file));
- TEST_AND_RETURN_FALSE(new_aop.size() == 1);
- TEST_AND_RETURN_FALSE(AddInstallOpToGraph(
- graph, cut.old_dst, nullptr, new_aop.front().op, new_aop.front().name));
-
- (*graph)[cut.old_dst].out_edges = out_edges;
-
- // Right now we don't have doubly-linked edges, so we have to scan
- // the whole graph.
- graph_utils::DropIncomingEdgesTo(graph, cut.old_dst);
- }
-
- // Delete temp node
- (*graph)[cut.old_src].out_edges.erase(cut.new_vertex);
- CHECK((*graph)[cut.old_dst].out_edges.find(cut.new_vertex) ==
- (*graph)[cut.old_dst].out_edges.end());
- (*graph)[cut.new_vertex].valid = false;
- LOG(INFO) << "marked node invalid: " << cut.new_vertex;
- return true;
-}
-
-bool InplaceGenerator::ConvertGraphToDag(Graph* graph,
- const string& new_part,
- BlobFileWriter* blob_file,
- vector<Vertex::Index>* final_order,
- Vertex::Index scratch_vertex) {
- CycleBreaker cycle_breaker;
- LOG(INFO) << "Finding cycles...";
- set<Edge> cut_edges;
- cycle_breaker.BreakCycles(*graph, &cut_edges);
- LOG(INFO) << "done finding cycles";
- CheckGraph(*graph);
-
- // Calculate number of scratch blocks needed
-
- LOG(INFO) << "Cutting cycles...";
- vector<CutEdgeVertexes> cuts;
- TEST_AND_RETURN_FALSE(CutEdges(graph, cut_edges, &cuts));
- LOG(INFO) << "done cutting cycles";
- LOG(INFO) << "There are " << cuts.size() << " cuts.";
- CheckGraph(*graph);
-
- LOG(INFO) << "Creating initial topological order...";
- TopologicalSort(*graph, final_order);
- LOG(INFO) << "done with initial topo order";
- CheckGraph(*graph);
-
- LOG(INFO) << "Moving full ops to the back";
- MoveAndSortFullOpsToBack(graph, final_order);
- LOG(INFO) << "done moving full ops to back";
-
- vector<vector<Vertex::Index>::size_type> inverse_final_order;
- GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
-
- SortCutsByTopoOrder(*final_order, &cuts);
-
- if (!cuts.empty())
- TEST_AND_RETURN_FALSE(AssignTempBlocks(
- graph, new_part, blob_file, final_order, &inverse_final_order, cuts));
- LOG(INFO) << "Making sure all temp blocks have been allocated";
-
- // Remove the scratch node, if any
- if (scratch_vertex != Vertex::kInvalidIndex) {
- final_order->erase(final_order->begin() +
- inverse_final_order[scratch_vertex]);
- (*graph)[scratch_vertex].valid = false;
- GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
- }
-
- graph_utils::DumpGraph(*graph);
- CHECK(NoTempBlocksRemain(*graph));
- LOG(INFO) << "done making sure all temp blocks are allocated";
- return true;
-}
-
-void InplaceGenerator::CreateScratchNode(uint64_t start_block,
- uint64_t num_blocks,
- Vertex* vertex) {
- vertex->aop.name = "<scratch>";
- vertex->aop.op.set_type(InstallOperation::REPLACE_BZ);
- vertex->aop.op.set_data_offset(0);
- vertex->aop.op.set_data_length(0);
- Extent* extent = vertex->aop.op.add_dst_extents();
- extent->set_start_block(start_block);
- extent->set_num_blocks(num_blocks);
-}
-
-bool InplaceGenerator::AddInstallOpToBlocksVector(
- const InstallOperation& operation,
- const Graph& graph,
- Vertex::Index vertex,
- vector<Block>* blocks) {
- // See if this is already present.
- TEST_AND_RETURN_FALSE(operation.dst_extents_size() > 0);
-
- enum BlockField { READER = 0, WRITER, BLOCK_FIELD_COUNT };
- for (int field = READER; field < BLOCK_FIELD_COUNT; field++) {
- const char* past_participle = (field == READER) ? "read" : "written";
- const google::protobuf::RepeatedPtrField<Extent>& extents =
- (field == READER) ? operation.src_extents() : operation.dst_extents();
- Vertex::Index Block::*access_type =
- (field == READER) ? &Block::reader : &Block::writer;
-
- for (const Extent& extent : extents) {
- for (uint64_t block = extent.start_block();
- block < (extent.start_block() + extent.num_blocks());
- block++) {
- if ((*blocks)[block].*access_type != Vertex::kInvalidIndex) {
- LOG(FATAL) << "Block " << block << " is already " << past_participle
- << " by " << (*blocks)[block].*access_type << "("
- << graph[(*blocks)[block].*access_type].aop.name
- << ") and also " << vertex << "(" << graph[vertex].aop.name
- << ")";
- }
- (*blocks)[block].*access_type = vertex;
- }
- }
- }
- return true;
-}
-
-bool InplaceGenerator::AddInstallOpToGraph(Graph* graph,
- Vertex::Index existing_vertex,
- vector<Block>* blocks,
- const InstallOperation& operation,
- const string& op_name) {
- Vertex::Index vertex = existing_vertex;
- if (vertex == Vertex::kInvalidIndex) {
- graph->emplace_back();
- vertex = graph->size() - 1;
- }
- (*graph)[vertex].aop.op = operation;
- CHECK((*graph)[vertex].aop.op.has_type());
- (*graph)[vertex].aop.name = op_name;
-
- if (blocks)
- TEST_AND_RETURN_FALSE(InplaceGenerator::AddInstallOpToBlocksVector(
- (*graph)[vertex].aop.op, *graph, vertex, blocks));
- return true;
-}
-
-void InplaceGenerator::ApplyMap(vector<uint64_t>* collection,
- const map<uint64_t, uint64_t>& the_map) {
- for (uint64_t& elem : *collection) {
- const auto& map_it = the_map.find(elem);
- if (map_it != the_map.end())
- elem = map_it->second;
- }
-}
-
-bool InplaceGenerator::ResolveReadAfterWriteDependencies(
- const PartitionConfig& old_part,
- const PartitionConfig& new_part,
- uint64_t partition_size,
- size_t block_size,
- BlobFileWriter* blob_file,
- vector<AnnotatedOperation>* aops) {
- // Convert the operations to the graph.
- Graph graph;
- CheckGraph(graph);
- vector<Block> blocks(std::max(old_part.size, new_part.size) / block_size);
- for (const auto& aop : *aops) {
- AddInstallOpToGraph(
- &graph, Vertex::kInvalidIndex, &blocks, aop.op, aop.name);
- }
- CheckGraph(graph);
-
- // Final scratch block (if there's space)
- Vertex::Index scratch_vertex = Vertex::kInvalidIndex;
- if (blocks.size() < (partition_size / block_size)) {
- scratch_vertex = graph.size();
- graph.emplace_back();
- size_t scratch_blocks = (partition_size / block_size) - blocks.size();
- LOG(INFO) << "Added " << scratch_blocks << " scratch space blocks.";
- CreateScratchNode(blocks.size(), scratch_blocks, &graph.back());
- }
- CheckGraph(graph);
-
- LOG(INFO) << "Creating edges...";
- CreateEdges(&graph, blocks);
- LOG(INFO) << "Done creating edges";
- CheckGraph(graph);
-
- vector<Vertex::Index> final_order;
- TEST_AND_RETURN_FALSE(ConvertGraphToDag(
- &graph, new_part.path, blob_file, &final_order, scratch_vertex));
-
- // Copy operations over to the |aops| vector in the final_order generated by
- // the topological sort.
- aops->clear();
- aops->reserve(final_order.size());
- for (const Vertex::Index vertex_index : final_order) {
- const Vertex& vertex = graph[vertex_index];
- aops->push_back(vertex.aop);
- }
- return true;
-}
-
-bool InplaceGenerator::GenerateOperations(const PayloadGenerationConfig& config,
- const PartitionConfig& old_part,
- const PartitionConfig& new_part,
- BlobFileWriter* blob_file,
- vector<AnnotatedOperation>* aops) {
- TEST_AND_RETURN_FALSE(old_part.name == new_part.name);
- TEST_AND_RETURN_FALSE(config.version.major == kInPlacePayloadVersion.major);
- TEST_AND_RETURN_FALSE(config.version.minor == kInPlacePayloadVersion.minor);
-
- ssize_t hard_chunk_blocks =
- (config.hard_chunk_size == -1
- ? -1
- : config.hard_chunk_size / config.block_size);
- size_t soft_chunk_blocks = config.soft_chunk_size / config.block_size;
- uint64_t partition_size = new_part.size;
- if (new_part.name == kPartitionNameRoot)
- partition_size = config.rootfs_partition_size;
-
- LOG(INFO) << "Delta compressing " << new_part.name << " partition...";
- TEST_AND_RETURN_FALSE(diff_utils::DeltaReadPartition(aops,
- old_part,
- new_part,
- hard_chunk_blocks,
- soft_chunk_blocks,
- config.version,
- blob_file));
- LOG(INFO) << "Done reading " << new_part.name;
-
- TEST_AND_RETURN_FALSE(ResolveReadAfterWriteDependencies(
- old_part, new_part, partition_size, config.block_size, blob_file, aops));
- LOG(INFO) << "Done reordering " << new_part.name;
- return true;
-}
-
-}; // namespace chromeos_update_engine
diff --git a/payload_generator/inplace_generator.h b/payload_generator/inplace_generator.h
deleted file mode 100644
index e7298d2..0000000
--- a/payload_generator/inplace_generator.h
+++ /dev/null
@@ -1,240 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_INPLACE_GENERATOR_H_
-#define UPDATE_ENGINE_PAYLOAD_GENERATOR_INPLACE_GENERATOR_H_
-
-#include <map>
-#include <set>
-#include <string>
-#include <vector>
-
-#include "update_engine/payload_generator/blob_file_writer.h"
-#include "update_engine/payload_generator/delta_diff_generator.h"
-#include "update_engine/payload_generator/graph_types.h"
-#include "update_engine/payload_generator/operations_generator.h"
-
-// InplaceGenerator contains all functionality related to the inplace algorithm
-// for generating update payloads. These are the functions used when delta minor
-// version is 1.
-
-namespace chromeos_update_engine {
-
-// This struct stores all relevant info for an edge that is cut between
-// nodes old_src -> old_dst by creating new vertex new_vertex. The new
-// relationship is:
-// old_src -(read before)-> new_vertex <-(write before)- old_dst
-// new_vertex is a MOVE operation that moves some existing blocks into
-// temp space. The temp extents are, by necessity, stored in new_vertex
-// (as dst extents) and old_dst (as src extents), but they are also broken
-// out into tmp_extents, as the nodes themselves may contain many more
-// extents.
-struct CutEdgeVertexes {
- Vertex::Index new_vertex;
- Vertex::Index old_src;
- Vertex::Index old_dst;
- std::vector<Extent> tmp_extents;
-};
-
-class InplaceGenerator : public OperationsGenerator {
- public:
- // Represents a disk block on the install partition.
- struct Block {
- // During install, each block on the install partition will be written
- // and some may be read (in all likelihood, many will be read).
- // The reading and writing will be performed by InstallOperations,
- // each of which has a corresponding vertex in a graph.
- // A Block object tells which vertex will read or write this block
- // at install time.
- // Generally, there will be a vector of Block objects whose length
- // is the number of blocks on the install partition.
- Block() : reader(Vertex::kInvalidIndex), writer(Vertex::kInvalidIndex) {}
- Vertex::Index reader;
- Vertex::Index writer;
- };
-
- InplaceGenerator() = default;
-
- // Checks all the operations in the graph have a type assigned.
- static void CheckGraph(const Graph& graph);
-
- // Modifies blocks read by 'op' so that any blocks referred to by
- // 'remove_extents' are replaced with blocks from 'replace_extents'.
- // 'remove_extents' and 'replace_extents' must be the same number of blocks.
- // Blocks will be substituted in the order listed in the vectors.
- // E.g. if 'op' reads blocks 1, 2, 3, 4, 5, 6, 7, 8, remove_extents
- // contains blocks 6, 2, 3, 5, and replace blocks contains
- // 12, 13, 14, 15, then op will be changed to read from:
- // 1, 13, 14, 4, 15, 12, 7, 8
- static void SubstituteBlocks(Vertex* vertex,
- const std::vector<Extent>& remove_extents,
- const std::vector<Extent>& replace_extents);
-
- // Cuts 'edges' from 'graph' according to the AU algorithm. This means
- // for each edge A->B, remove the dependency that B occur before A.
- // Do this by creating a new operation X that copies from the blocks
- // specified by the edge's properties to temp space T. Modify B to read
- // from T rather than the blocks in the edge. Modify A to depend on X,
- // but not on B. Free space is found by looking in 'blocks'.
- // Returns true on success.
- static bool CutEdges(Graph* graph,
- const std::set<Edge>& edges,
- std::vector<CutEdgeVertexes>* out_cuts);
-
- // Creates all the edges for the graph. Writers of a block point to
- // readers of the same block. This is because for an edge A->B, B
- // must complete before A executes.
- static void CreateEdges(Graph* graph, const std::vector<Block>& blocks);
-
- // Takes |op_indexes|, which is effectively a mapping from order in
- // which the op is performed -> graph vertex index, and produces the
- // reverse: a mapping from graph vertex index -> op_indexes index.
- static void GenerateReverseTopoOrderMap(
- const std::vector<Vertex::Index>& op_indexes,
- std::vector<std::vector<Vertex::Index>::size_type>* reverse_op_indexes);
-
- // Sorts the vector |cuts| by its |cuts[].old_dest| member. Order is
- // determined by the order of elements in op_indexes.
- static void SortCutsByTopoOrder(const std::vector<Vertex::Index>& op_indexes,
- std::vector<CutEdgeVertexes>* cuts);
-
- // Given a topologically sorted graph |op_indexes| and |graph|, alters
- // |op_indexes| to move all the full operations to the end of the vector.
- // Full operations should not be depended on, so this is safe.
- static void MoveAndSortFullOpsToBack(Graph* graph,
- std::vector<Vertex::Index>* op_indexes);
-
- // Returns true iff there are no extents in the graph that refer to temp
- // blocks. Temp blocks are in the range [kTempBlockStart, kSparseHole).
- static bool NoTempBlocksRemain(const Graph& graph);
-
- // Takes a |graph|, which has edges that must be cut, as listed in
- // |cuts|. Cuts the edges. Maintains a list in which the operations
- // will be performed (in |op_indexes|) and the reverse (in
- // |reverse_op_indexes|). Cutting edges requires scratch space, and
- // if insufficient scratch is found, the file is reread and will be
- // send down (either as REPLACE or REPLACE_BZ). Returns true on
- // success.
- static bool AssignTempBlocks(
- Graph* graph,
- const std::string& new_part,
- BlobFileWriter* blob_file,
- std::vector<Vertex::Index>* op_indexes,
- std::vector<std::vector<Vertex::Index>::size_type>* reverse_op_indexes,
- const std::vector<CutEdgeVertexes>& cuts);
-
- // Handles allocation of temp blocks to a cut edge by converting the
- // dest node to a full op. This removes the need for temp blocks, but
- // comes at the cost of a worse compression ratio.
- // For example, say we have A->B->A. It would first be cut to form:
- // A->B->N<-A, where N copies blocks to temp space. If there are no
- // temp blocks, this function can be called to convert it to the form:
- // A->B. Now, A is a full operation.
- static bool ConvertCutToFullOp(Graph* graph,
- const CutEdgeVertexes& cut,
- const std::string& new_part,
- BlobFileWriter* blob_file);
-
- // Takes a graph, which is not a DAG, which represents the files just
- // read from disk, and converts it into a DAG by breaking all cycles
- // and finding temp space to resolve broken edges.
- // The final order of the nodes is given in |final_order|
- // Some files may need to be reread from disk, thus |fd| and
- // |data_file_size| are be passed.
- // If |scratch_vertex| is not kInvalidIndex, removes it from
- // |final_order| before returning.
- // Returns true on success.
- static bool ConvertGraphToDag(Graph* graph,
- const std::string& new_part,
- BlobFileWriter* blob_file,
- std::vector<Vertex::Index>* final_order,
- Vertex::Index scratch_vertex);
-
- // Creates a dummy REPLACE_BZ node in the given |vertex|. This can be used
- // to provide scratch space. The node writes |num_blocks| blocks starting at
- // |start_block|The node should be marked invalid before writing all nodes to
- // the output file.
- static void CreateScratchNode(uint64_t start_block,
- uint64_t num_blocks,
- Vertex* vertex);
-
- // The |blocks| vector contains a reader and writer for each block on the
- // filesystem that's being in-place updated. We populate the reader/writer
- // fields of |blocks| by calling this function.
- // For each block in |operation| that is read or written, find that block
- // in |blocks| and set the reader/writer field to the vertex passed.
- // |graph| is not strictly necessary, but useful for printing out
- // error messages.
- static bool AddInstallOpToBlocksVector(const InstallOperation& operation,
- const Graph& graph,
- Vertex::Index vertex,
- std::vector<Block>* blocks);
-
- // Add a vertex (if |existing_vertex| is kInvalidVertex) or update an
- // |existing_vertex| with the passed |operation|.
- // This method will also register the vertex as the reader or writer of the
- // blocks involved in the operation updating the |blocks| vector. The
- // |op_name| associated with the Vertex is used for logging purposes.
- static bool AddInstallOpToGraph(Graph* graph,
- Vertex::Index existing_vertex,
- std::vector<Block>* blocks,
- const InstallOperation& operation,
- const std::string& op_name);
-
- // Apply the transformation stored in |the_map| to the |collection| vector
- // replacing the map keys found in |collection| with its associated value in
- // |the_map|.
- static void ApplyMap(std::vector<uint64_t>* collection,
- const std::map<uint64_t, uint64_t>& the_map);
-
- // Resolve all read-after-write dependencies in the operation list |aops|. The
- // operations in |aops| are such that they generate the desired |new_part| if
- // applied reading always from the original image. This function reorders the
- // operations and generates new operations when needed to make these
- // operations produce the same |new_part| result when applied in-place.
- // The new operations will create blobs in |data_file_fd| and update
- // the file size pointed by |data_file_size| if needed.
- // On success, stores the new operations in |aops| in the right order and
- // returns true.
- static bool ResolveReadAfterWriteDependencies(
- const PartitionConfig& old_part,
- const PartitionConfig& new_part,
- uint64_t partition_size,
- size_t block_size,
- BlobFileWriter* blob_file,
- std::vector<AnnotatedOperation>* aops);
-
- // Generate the update payload operations for the given partition using
- // only operations that read from the target and/or write to the target,
- // hence, applying the payload "in-place" in the target partition. This method
- // assumes that the contents of the source image are pre-copied to the target
- // partition, up to the size of the source image. Use this method to generate
- // a delta update with the minor version kInPlaceMinorPayloadVersion.
- // The operations are stored in |aops|. All the offsets in the operations
- // reference the data written to |blob_file|.
- bool GenerateOperations(const PayloadGenerationConfig& config,
- const PartitionConfig& old_part,
- const PartitionConfig& new_part,
- BlobFileWriter* blob_file,
- std::vector<AnnotatedOperation>* aops) override;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(InplaceGenerator);
-};
-
-}; // namespace chromeos_update_engine
-
-#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_INPLACE_GENERATOR_H_
diff --git a/payload_generator/inplace_generator_unittest.cc b/payload_generator/inplace_generator_unittest.cc
deleted file mode 100644
index 8028f36..0000000
--- a/payload_generator/inplace_generator_unittest.cc
+++ /dev/null
@@ -1,752 +0,0 @@
-//
-// Copyright (C) 2015 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/inplace_generator.h"
-
-#include <map>
-#include <memory>
-#include <set>
-#include <sstream>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include <base/format_macros.h>
-#include <base/logging.h>
-#include <base/strings/string_util.h>
-#include <base/strings/stringprintf.h>
-#include <gtest/gtest.h>
-
-#include "update_engine/common/test_utils.h"
-#include "update_engine/common/utils.h"
-#include "update_engine/payload_generator/cycle_breaker.h"
-#include "update_engine/payload_generator/delta_diff_generator.h"
-#include "update_engine/payload_generator/delta_diff_utils.h"
-#include "update_engine/payload_generator/extent_ranges.h"
-#include "update_engine/payload_generator/graph_types.h"
-#include "update_engine/payload_generator/graph_utils.h"
-
-using std::map;
-using std::set;
-using std::string;
-using std::stringstream;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-using Block = InplaceGenerator::Block;
-
-namespace {
-
-void GenVertex(Vertex* out,
- const vector<Extent>& src_extents,
- const vector<Extent>& dst_extents,
- const string& path,
- InstallOperation::Type type) {
- out->aop.op.set_type(type);
- out->aop.name = path;
- StoreExtents(src_extents, out->aop.op.mutable_src_extents());
- StoreExtents(dst_extents, out->aop.op.mutable_dst_extents());
-}
-
-vector<Extent> VectOfExt(uint64_t start_block, uint64_t num_blocks) {
- return vector<Extent>(1, ExtentForRange(start_block, num_blocks));
-}
-
-EdgeProperties EdgeWithReadDep(const vector<Extent>& extents) {
- EdgeProperties ret;
- ret.extents = extents;
- return ret;
-}
-
-EdgeProperties EdgeWithWriteDep(const vector<Extent>& extents) {
- EdgeProperties ret;
- ret.write_extents = extents;
- return ret;
-}
-
-template <typename T>
-void DumpVect(const vector<T>& vect) {
- stringstream ss(stringstream::out);
- for (typename vector<T>::const_iterator it = vect.begin(), e = vect.end();
- it != e;
- ++it) {
- ss << *it << ", ";
- }
- LOG(INFO) << "{" << ss.str() << "}";
-}
-
-void AppendExtent(vector<Extent>* vect, uint64_t start, uint64_t length) {
- vect->resize(vect->size() + 1);
- vect->back().set_start_block(start);
- vect->back().set_num_blocks(length);
-}
-
-void OpAppendExtent(InstallOperation* op, uint64_t start, uint64_t length) {
- Extent* extent = op->add_src_extents();
- extent->set_start_block(start);
- extent->set_num_blocks(length);
-}
-
-} // namespace
-
-class InplaceGeneratorTest : public ::testing::Test {
- protected:
- // Initialize |blob_path_|, |blob_file_size_| and |blob_file_fd_| variables
- // with a new blob file. The file is closed and removed automatically when
- // the test finishes.
- void CreateBlobFile() {
- // blob_fd_closer_ takes a pointer to blob_fd_. Make sure we destroy a
- // previous instance before overriding blob_fd_.
- blob_fd_closer_.reset();
- EXPECT_TRUE(utils::MakeTempFile(
- "InplaceGenerator_blob_file.XXXXXX", &blob_path_, &blob_fd_));
- blob_path_unlinker_.reset(new ScopedPathUnlinker(blob_path_));
- blob_fd_closer_.reset(new ScopedFdCloser(&blob_fd_));
- blob_file_size_ = 0;
- EXPECT_GE(blob_fd_, 0);
- blob_file_.reset(new BlobFileWriter(blob_fd_, &blob_file_size_));
- }
-
- // Dump the list of operations |aops| in case of test failure.
- void DumpAopsOnFailure(const vector<AnnotatedOperation>& aops) {
- if (HasNonfatalFailure()) {
- LOG(INFO) << "Result operation list:";
- for (const auto& aop : aops) {
- LOG(INFO) << aop;
- }
- }
- }
-
- // Blob file name, file descriptor and file size used to store operation
- // blobs.
- string blob_path_;
- int blob_fd_{-1};
- off_t blob_file_size_{0};
- std::unique_ptr<BlobFileWriter> blob_file_;
- std::unique_ptr<ScopedPathUnlinker> blob_path_unlinker_;
- std::unique_ptr<ScopedFdCloser> blob_fd_closer_;
-};
-
-TEST_F(InplaceGeneratorTest, BlockDefaultValues) {
- // Tests that a Block is initialized with the default values as a
- // Vertex::kInvalidIndex. This is required by the delta generators.
- Block block;
- EXPECT_EQ(Vertex::kInvalidIndex, block.reader);
- EXPECT_EQ(Vertex::kInvalidIndex, block.writer);
-}
-
-TEST_F(InplaceGeneratorTest, SubstituteBlocksTest) {
- vector<Extent> remove_blocks;
- AppendExtent(&remove_blocks, 3, 3);
- AppendExtent(&remove_blocks, 7, 1);
- vector<Extent> replace_blocks;
- AppendExtent(&replace_blocks, 10, 2);
- AppendExtent(&replace_blocks, 13, 2);
- Vertex vertex;
- InstallOperation& op = vertex.aop.op;
- OpAppendExtent(&op, 4, 3);
- OpAppendExtent(&op, kSparseHole, 4); // Sparse hole in file
- OpAppendExtent(&op, 3, 1);
- OpAppendExtent(&op, 7, 3);
-
- InplaceGenerator::SubstituteBlocks(&vertex, remove_blocks, replace_blocks);
-
- EXPECT_EQ(7, op.src_extents_size());
- EXPECT_EQ(11U, op.src_extents(0).start_block());
- EXPECT_EQ(1U, op.src_extents(0).num_blocks());
- EXPECT_EQ(13U, op.src_extents(1).start_block());
- EXPECT_EQ(1U, op.src_extents(1).num_blocks());
- EXPECT_EQ(6U, op.src_extents(2).start_block());
- EXPECT_EQ(1U, op.src_extents(2).num_blocks());
- EXPECT_EQ(kSparseHole, op.src_extents(3).start_block());
- EXPECT_EQ(4U, op.src_extents(3).num_blocks());
- EXPECT_EQ(10U, op.src_extents(4).start_block());
- EXPECT_EQ(1U, op.src_extents(4).num_blocks());
- EXPECT_EQ(14U, op.src_extents(5).start_block());
- EXPECT_EQ(1U, op.src_extents(5).num_blocks());
- EXPECT_EQ(8U, op.src_extents(6).start_block());
- EXPECT_EQ(2U, op.src_extents(6).num_blocks());
-}
-
-TEST_F(InplaceGeneratorTest, CutEdgesTest) {
- Graph graph;
- vector<Block> blocks(9);
-
- // Create nodes in graph
- {
- graph.resize(graph.size() + 1);
- graph.back().aop.op.set_type(InstallOperation::MOVE);
- // Reads from blocks 3, 5, 7
- vector<Extent> extents;
- AppendBlockToExtents(&extents, 3);
- AppendBlockToExtents(&extents, 5);
- AppendBlockToExtents(&extents, 7);
- StoreExtents(extents, graph.back().aop.op.mutable_src_extents());
- blocks[3].reader = graph.size() - 1;
- blocks[5].reader = graph.size() - 1;
- blocks[7].reader = graph.size() - 1;
-
- // Writes to blocks 1, 2, 4
- extents.clear();
- AppendBlockToExtents(&extents, 1);
- AppendBlockToExtents(&extents, 2);
- AppendBlockToExtents(&extents, 4);
- StoreExtents(extents, graph.back().aop.op.mutable_dst_extents());
- blocks[1].writer = graph.size() - 1;
- blocks[2].writer = graph.size() - 1;
- blocks[4].writer = graph.size() - 1;
- }
- {
- graph.resize(graph.size() + 1);
- graph.back().aop.op.set_type(InstallOperation::MOVE);
- // Reads from blocks 1, 2, 4
- vector<Extent> extents;
- AppendBlockToExtents(&extents, 1);
- AppendBlockToExtents(&extents, 2);
- AppendBlockToExtents(&extents, 4);
- StoreExtents(extents, graph.back().aop.op.mutable_src_extents());
- blocks[1].reader = graph.size() - 1;
- blocks[2].reader = graph.size() - 1;
- blocks[4].reader = graph.size() - 1;
-
- // Writes to blocks 3, 5, 6
- extents.clear();
- AppendBlockToExtents(&extents, 3);
- AppendBlockToExtents(&extents, 5);
- AppendBlockToExtents(&extents, 6);
- StoreExtents(extents, graph.back().aop.op.mutable_dst_extents());
- blocks[3].writer = graph.size() - 1;
- blocks[5].writer = graph.size() - 1;
- blocks[6].writer = graph.size() - 1;
- }
-
- // Create edges
- InplaceGenerator::CreateEdges(&graph, blocks);
-
- // Find cycles
- CycleBreaker cycle_breaker;
- set<Edge> cut_edges;
- cycle_breaker.BreakCycles(graph, &cut_edges);
-
- EXPECT_EQ(1U, cut_edges.size());
- EXPECT_TRUE(cut_edges.end() !=
- cut_edges.find(std::pair<Vertex::Index, Vertex::Index>(1, 0)));
-
- vector<CutEdgeVertexes> cuts;
- EXPECT_TRUE(InplaceGenerator::CutEdges(&graph, cut_edges, &cuts));
-
- EXPECT_EQ(3U, graph.size());
-
- // Check new node in graph:
- EXPECT_EQ(InstallOperation::MOVE, graph.back().aop.op.type());
- EXPECT_EQ(2, graph.back().aop.op.src_extents_size());
- EXPECT_EQ(1, graph.back().aop.op.dst_extents_size());
- EXPECT_EQ(kTempBlockStart, graph.back().aop.op.dst_extents(0).start_block());
- EXPECT_EQ(2U, graph.back().aop.op.dst_extents(0).num_blocks());
- EXPECT_TRUE(graph.back().out_edges.empty());
-
- // Check that old node reads from new blocks
- EXPECT_EQ(2, graph[0].aop.op.src_extents_size());
- EXPECT_EQ(kTempBlockStart, graph[0].aop.op.src_extents(0).start_block());
- EXPECT_EQ(2U, graph[0].aop.op.src_extents(0).num_blocks());
- EXPECT_EQ(7U, graph[0].aop.op.src_extents(1).start_block());
- EXPECT_EQ(1U, graph[0].aop.op.src_extents(1).num_blocks());
-
- // And that the old dst extents haven't changed
- EXPECT_EQ(2, graph[0].aop.op.dst_extents_size());
- EXPECT_EQ(1U, graph[0].aop.op.dst_extents(0).start_block());
- EXPECT_EQ(2U, graph[0].aop.op.dst_extents(0).num_blocks());
- EXPECT_EQ(4U, graph[0].aop.op.dst_extents(1).start_block());
- EXPECT_EQ(1U, graph[0].aop.op.dst_extents(1).num_blocks());
-
- // Ensure it only depends on the next node and the new temp node
- EXPECT_EQ(2U, graph[0].out_edges.size());
- EXPECT_TRUE(graph[0].out_edges.end() != graph[0].out_edges.find(1));
- EXPECT_TRUE(graph[0].out_edges.end() !=
- graph[0].out_edges.find(graph.size() - 1));
-
- // Check second node has unchanged extents
- EXPECT_EQ(2, graph[1].aop.op.src_extents_size());
- EXPECT_EQ(1U, graph[1].aop.op.src_extents(0).start_block());
- EXPECT_EQ(2U, graph[1].aop.op.src_extents(0).num_blocks());
- EXPECT_EQ(4U, graph[1].aop.op.src_extents(1).start_block());
- EXPECT_EQ(1U, graph[1].aop.op.src_extents(1).num_blocks());
-
- EXPECT_EQ(2, graph[1].aop.op.dst_extents_size());
- EXPECT_EQ(3U, graph[1].aop.op.dst_extents(0).start_block());
- EXPECT_EQ(1U, graph[1].aop.op.dst_extents(0).num_blocks());
- EXPECT_EQ(5U, graph[1].aop.op.dst_extents(1).start_block());
- EXPECT_EQ(2U, graph[1].aop.op.dst_extents(1).num_blocks());
-
- // Ensure it only depends on the next node
- EXPECT_EQ(1U, graph[1].out_edges.size());
- EXPECT_TRUE(graph[1].out_edges.end() != graph[1].out_edges.find(2));
-}
-
-TEST_F(InplaceGeneratorTest, AssignTempBlocksReuseTest) {
- Graph graph(9);
-
- const vector<Extent> empt;
- uint64_t tmp = kTempBlockStart;
- const string kFilename = "/foo";
-
- vector<CutEdgeVertexes> cuts;
- cuts.resize(3);
-
- // Simple broken loop:
- GenVertex(
- &graph[0], VectOfExt(0, 1), VectOfExt(1, 1), "", InstallOperation::MOVE);
- GenVertex(&graph[1],
- VectOfExt(tmp, 1),
- VectOfExt(0, 1),
- "",
- InstallOperation::MOVE);
- GenVertex(&graph[2],
- VectOfExt(1, 1),
- VectOfExt(tmp, 1),
- "",
- InstallOperation::MOVE);
- // Corresponding edges:
- graph[0].out_edges[2] = EdgeWithReadDep(VectOfExt(1, 1));
- graph[1].out_edges[2] = EdgeWithWriteDep(VectOfExt(tmp, 1));
- graph[1].out_edges[0] = EdgeWithReadDep(VectOfExt(0, 1));
- // Store the cut:
- cuts[0].old_dst = 1;
- cuts[0].old_src = 0;
- cuts[0].new_vertex = 2;
- cuts[0].tmp_extents = VectOfExt(tmp, 1);
- tmp++;
-
- // Slightly more complex pair of loops:
- GenVertex(
- &graph[3], VectOfExt(4, 2), VectOfExt(2, 2), "", InstallOperation::MOVE);
- GenVertex(
- &graph[4], VectOfExt(6, 1), VectOfExt(7, 1), "", InstallOperation::MOVE);
- GenVertex(&graph[5],
- VectOfExt(tmp, 3),
- VectOfExt(4, 3),
- kFilename,
- InstallOperation::MOVE);
- GenVertex(&graph[6],
- VectOfExt(2, 2),
- VectOfExt(tmp, 2),
- "",
- InstallOperation::MOVE);
- GenVertex(&graph[7],
- VectOfExt(7, 1),
- VectOfExt(tmp + 2, 1),
- "",
- InstallOperation::MOVE);
- // Corresponding edges:
- graph[3].out_edges[6] = EdgeWithReadDep(VectOfExt(2, 2));
- graph[4].out_edges[7] = EdgeWithReadDep(VectOfExt(7, 1));
- graph[5].out_edges[6] = EdgeWithWriteDep(VectOfExt(tmp, 2));
- graph[5].out_edges[7] = EdgeWithWriteDep(VectOfExt(tmp + 2, 1));
- graph[5].out_edges[3] = EdgeWithReadDep(VectOfExt(4, 2));
- graph[5].out_edges[4] = EdgeWithReadDep(VectOfExt(6, 1));
- // Store the cuts:
- cuts[1].old_dst = 5;
- cuts[1].old_src = 3;
- cuts[1].new_vertex = 6;
- cuts[1].tmp_extents = VectOfExt(tmp, 2);
- cuts[2].old_dst = 5;
- cuts[2].old_src = 4;
- cuts[2].new_vertex = 7;
- cuts[2].tmp_extents = VectOfExt(tmp + 2, 1);
-
- // Supplier of temp block:
- GenVertex(&graph[8], empt, VectOfExt(8, 1), "", InstallOperation::REPLACE);
-
- // Specify the final order:
- vector<Vertex::Index> op_indexes;
- op_indexes.push_back(2);
- op_indexes.push_back(0);
- op_indexes.push_back(1);
- op_indexes.push_back(6);
- op_indexes.push_back(3);
- op_indexes.push_back(7);
- op_indexes.push_back(4);
- op_indexes.push_back(5);
- op_indexes.push_back(8);
-
- vector<vector<Vertex::Index>::size_type> reverse_op_indexes;
- InplaceGenerator::GenerateReverseTopoOrderMap(op_indexes,
- &reverse_op_indexes);
-
- CreateBlobFile();
- EXPECT_TRUE(InplaceGenerator::AssignTempBlocks(&graph,
- "/dev/zero",
- blob_file_.get(),
- &op_indexes,
- &reverse_op_indexes,
- cuts));
- EXPECT_FALSE(graph[6].valid);
- EXPECT_FALSE(graph[7].valid);
- EXPECT_EQ(1, graph[1].aop.op.src_extents_size());
- EXPECT_EQ(2U, graph[1].aop.op.src_extents(0).start_block());
- EXPECT_EQ(1U, graph[1].aop.op.src_extents(0).num_blocks());
- EXPECT_EQ(InstallOperation::REPLACE_BZ, graph[5].aop.op.type());
-}
-
-TEST_F(InplaceGeneratorTest, MoveAndSortFullOpsToBackTest) {
- Graph graph(4);
- graph[0].aop.name = "A";
- graph[0].aop.op.set_type(InstallOperation::REPLACE);
- graph[1].aop.name = "B";
- graph[1].aop.op.set_type(InstallOperation::BSDIFF);
- graph[2].aop.name = "C";
- graph[2].aop.op.set_type(InstallOperation::REPLACE_BZ);
- graph[3].aop.name = "D";
- graph[3].aop.op.set_type(InstallOperation::MOVE);
-
- vector<Vertex::Index> vect(graph.size());
-
- for (vector<Vertex::Index>::size_type i = 0; i < vect.size(); ++i) {
- vect[i] = i;
- }
- InplaceGenerator::MoveAndSortFullOpsToBack(&graph, &vect);
- EXPECT_EQ(vect.size(), graph.size());
- EXPECT_EQ(graph[vect[0]].aop.name, "B");
- EXPECT_EQ(graph[vect[1]].aop.name, "D");
- EXPECT_EQ(graph[vect[2]].aop.name, "A");
- EXPECT_EQ(graph[vect[3]].aop.name, "C");
-}
-
-TEST_F(InplaceGeneratorTest, AssignTempBlocksTest) {
- Graph graph(9);
- const vector<Extent> empt; // empty
- const string kFilename = "/foo";
-
- // Some scratch space:
- GenVertex(&graph[0], empt, VectOfExt(200, 1), "", InstallOperation::REPLACE);
- GenVertex(&graph[1], empt, VectOfExt(210, 10), "", InstallOperation::REPLACE);
- GenVertex(&graph[2], empt, VectOfExt(220, 1), "", InstallOperation::REPLACE);
-
- // A cycle that requires 10 blocks to break:
- GenVertex(&graph[3],
- VectOfExt(10, 11),
- VectOfExt(0, 9),
- "",
- InstallOperation::BSDIFF);
- graph[3].out_edges[4] = EdgeWithReadDep(VectOfExt(0, 9));
- GenVertex(&graph[4],
- VectOfExt(0, 9),
- VectOfExt(10, 11),
- "",
- InstallOperation::BSDIFF);
- graph[4].out_edges[3] = EdgeWithReadDep(VectOfExt(10, 11));
-
- // A cycle that requires 9 blocks to break:
- GenVertex(&graph[5],
- VectOfExt(40, 11),
- VectOfExt(30, 10),
- "",
- InstallOperation::BSDIFF);
- graph[5].out_edges[6] = EdgeWithReadDep(VectOfExt(30, 10));
- GenVertex(&graph[6],
- VectOfExt(30, 10),
- VectOfExt(40, 11),
- "",
- InstallOperation::BSDIFF);
- graph[6].out_edges[5] = EdgeWithReadDep(VectOfExt(40, 11));
-
- // A cycle that requires 40 blocks to break (which is too many):
- GenVertex(&graph[7],
- VectOfExt(120, 50),
- VectOfExt(60, 40),
- "",
- InstallOperation::BSDIFF);
- graph[7].out_edges[8] = EdgeWithReadDep(VectOfExt(60, 40));
- GenVertex(&graph[8],
- VectOfExt(60, 40),
- VectOfExt(120, 50),
- kFilename,
- InstallOperation::BSDIFF);
- graph[8].out_edges[7] = EdgeWithReadDep(VectOfExt(120, 50));
-
- graph_utils::DumpGraph(graph);
-
- vector<Vertex::Index> final_order;
-
- CreateBlobFile();
- EXPECT_TRUE(InplaceGenerator::ConvertGraphToDag(&graph,
- "/dev/zero",
- blob_file_.get(),
- &final_order,
- Vertex::kInvalidIndex));
-
- Graph expected_graph(12);
- GenVertex(&expected_graph[0],
- empt,
- VectOfExt(200, 1),
- "",
- InstallOperation::REPLACE);
- GenVertex(&expected_graph[1],
- empt,
- VectOfExt(210, 10),
- "",
- InstallOperation::REPLACE);
- GenVertex(&expected_graph[2],
- empt,
- VectOfExt(220, 1),
- "",
- InstallOperation::REPLACE);
- GenVertex(&expected_graph[3],
- VectOfExt(10, 11),
- VectOfExt(0, 9),
- "",
- InstallOperation::BSDIFF);
- expected_graph[3].out_edges[9] = EdgeWithReadDep(VectOfExt(0, 9));
- GenVertex(&expected_graph[4],
- VectOfExt(60, 9),
- VectOfExt(10, 11),
- "",
- InstallOperation::BSDIFF);
- expected_graph[4].out_edges[3] = EdgeWithReadDep(VectOfExt(10, 11));
- expected_graph[4].out_edges[9] = EdgeWithWriteDep(VectOfExt(60, 9));
- GenVertex(&expected_graph[5],
- VectOfExt(40, 11),
- VectOfExt(30, 10),
- "",
- InstallOperation::BSDIFF);
- expected_graph[5].out_edges[10] = EdgeWithReadDep(VectOfExt(30, 10));
-
- GenVertex(&expected_graph[6],
- VectOfExt(60, 10),
- VectOfExt(40, 11),
- "",
- InstallOperation::BSDIFF);
- expected_graph[6].out_edges[5] = EdgeWithReadDep(VectOfExt(40, 11));
- expected_graph[6].out_edges[10] = EdgeWithWriteDep(VectOfExt(60, 10));
-
- GenVertex(&expected_graph[7],
- VectOfExt(120, 50),
- VectOfExt(60, 40),
- "",
- InstallOperation::BSDIFF);
- expected_graph[7].out_edges[6] = EdgeWithReadDep(VectOfExt(60, 10));
-
- GenVertex(&expected_graph[8],
- empt,
- VectOfExt(0, 50),
- "/foo",
- InstallOperation::REPLACE_BZ);
- expected_graph[8].out_edges[7] = EdgeWithReadDep(VectOfExt(120, 50));
-
- GenVertex(&expected_graph[9],
- VectOfExt(0, 9),
- VectOfExt(60, 9),
- "",
- InstallOperation::MOVE);
-
- GenVertex(&expected_graph[10],
- VectOfExt(30, 10),
- VectOfExt(60, 10),
- "",
- InstallOperation::MOVE);
- expected_graph[10].out_edges[4] = EdgeWithReadDep(VectOfExt(60, 9));
-
- EXPECT_EQ(12U, graph.size());
- EXPECT_FALSE(graph.back().valid);
- for (Graph::size_type i = 0; i < graph.size() - 1; i++) {
- EXPECT_TRUE(graph[i].out_edges == expected_graph[i].out_edges);
- if (i == 8) {
- // special case
- } else {
- // EXPECT_TRUE(graph[i] == expected_graph[i]) << "i = " << i;
- }
- }
-}
-
-TEST_F(InplaceGeneratorTest, CreateScratchNodeTest) {
- Vertex vertex;
- InplaceGenerator::CreateScratchNode(12, 34, &vertex);
- EXPECT_EQ(InstallOperation::REPLACE_BZ, vertex.aop.op.type());
- EXPECT_EQ(0U, vertex.aop.op.data_offset());
- EXPECT_EQ(0U, vertex.aop.op.data_length());
- EXPECT_EQ(1, vertex.aop.op.dst_extents_size());
- EXPECT_EQ(12U, vertex.aop.op.dst_extents(0).start_block());
- EXPECT_EQ(34U, vertex.aop.op.dst_extents(0).num_blocks());
-}
-
-TEST_F(InplaceGeneratorTest, ApplyMapTest) {
- vector<uint64_t> collection = {1, 2, 3, 4, 6};
- vector<uint64_t> expected_values = {1, 2, 5, 4, 8};
- map<uint64_t, uint64_t> value_map;
- value_map[3] = 5;
- value_map[6] = 8;
- value_map[5] = 10;
-
- InplaceGenerator::ApplyMap(&collection, value_map);
- EXPECT_EQ(expected_values, collection);
-}
-
-// We can't produce MOVE operations with a source or destination in the block 0.
-// This test checks that the cycle breaker procedure doesn't produce such
-// operations.
-TEST_F(InplaceGeneratorTest, ResolveReadAfterWriteDependenciesAvoidMoveToZero) {
- size_t block_size = 4096;
- size_t num_blocks = 4;
- vector<AnnotatedOperation> aops;
-
- // Create a REPLACE_BZ for block 0, and a circular dependency among all other
- // blocks. This situation would prefer to issue a MOVE to scratch space and
- // the only available block is 0.
- aops.emplace_back();
- aops.back().name = base::StringPrintf("<bz-block-0>");
- aops.back().op.set_type(InstallOperation::REPLACE_BZ);
- StoreExtents({ExtentForRange(0, 1)}, aops.back().op.mutable_dst_extents());
-
- for (size_t i = 1; i < num_blocks; i++) {
- AnnotatedOperation aop;
- aop.name = base::StringPrintf("<op-%" PRIuS ">", i);
- aop.op.set_type(InstallOperation::BSDIFF);
- StoreExtents({ExtentForRange(1 + i % (num_blocks - 1), 1)},
- aop.op.mutable_src_extents());
- StoreExtents({ExtentForRange(i, 1)}, aop.op.mutable_dst_extents());
- aops.push_back(aop);
- }
-
- PartitionConfig part("part");
- part.path = "/dev/zero";
- part.size = num_blocks * block_size;
-
- CreateBlobFile();
-
- // We ran two tests here. The first one without enough blocks for the scratch
- // space, forcing it to create a new full operation and the second case with
- // one extra block in the partition that can be used for the move operation.
- for (const auto part_blocks : vector<uint64_t>{num_blocks, num_blocks + 1}) {
- SCOPED_TRACE(
- base::StringPrintf("Using partition_blocks=%" PRIu64, part_blocks));
- vector<AnnotatedOperation> result_aops = aops;
- EXPECT_TRUE(InplaceGenerator::ResolveReadAfterWriteDependencies(
- part,
- part,
- part_blocks * block_size,
- block_size,
- blob_file_.get(),
- &result_aops));
-
- size_t full_ops = 0;
- for (const auto& aop : result_aops) {
- if (diff_utils::IsAReplaceOperation(aop.op.type()))
- full_ops++;
-
- if (aop.op.type() != InstallOperation::MOVE)
- continue;
- for (const Extent& extent : aop.op.src_extents()) {
- EXPECT_NE(0U, extent.start_block())
- << "On src extents for aop: " << aop;
- }
- for (const Extent& extent : aop.op.dst_extents()) {
- EXPECT_NE(0U, extent.start_block())
- << "On dst extents for aop: " << aop;
- }
- }
-
- // If there's extra space in the partition, it should not use a new full
- // operation for it.
- EXPECT_EQ(part_blocks == num_blocks ? 2U : 1U, full_ops);
-
- DumpAopsOnFailure(result_aops);
- }
-}
-
-// Test that we can shrink a filesystem and break cycles.
-TEST_F(InplaceGeneratorTest, ResolveReadAfterWriteDependenciesShrinkData) {
- size_t block_size = 4096;
- size_t old_blocks = 10;
- size_t new_blocks = 8;
- vector<AnnotatedOperation> aops;
-
- // Create a loop using the blocks 1-6 and one other operation writing to the
- // block 7 from outside the new partition. The loop in the blocks 1-6 uses
- // two-block operations, so it needs two blocks of scratch space. It can't use
- // the block 0 as scratch space (see previous test) and it can't use the
- // blocks 7 or 8 due the last move operation.
-
- aops.emplace_back();
- aops.back().name = base::StringPrintf("<bz-block-0>");
- aops.back().op.set_type(InstallOperation::REPLACE_BZ);
- StoreExtents({ExtentForRange(0, 1)}, aops.back().op.mutable_dst_extents());
-
- const size_t num_ops = 3;
- for (size_t i = 0; i < num_ops; i++) {
- AnnotatedOperation aop;
- aop.name = base::StringPrintf("<op-%" PRIuS ">", i);
- aop.op.set_type(InstallOperation::BSDIFF);
- StoreExtents({ExtentForRange(1 + 2 * i, 2)}, aop.op.mutable_src_extents());
- StoreExtents({ExtentForRange(1 + 2 * ((i + 1) % num_ops), 2)},
- aop.op.mutable_dst_extents());
- aops.push_back(aop);
- }
-
- {
- AnnotatedOperation aop;
- aop.name = "<op-shrink>";
- aop.op.set_type(InstallOperation::BSDIFF);
- StoreExtents({ExtentForRange(8, 1)}, aop.op.mutable_src_extents());
- StoreExtents({ExtentForRange(7, 1)}, aop.op.mutable_dst_extents());
- aops.push_back(aop);
- }
-
- PartitionConfig old_part("part");
- old_part.path = "/dev/zero";
- old_part.size = old_blocks * block_size;
-
- PartitionConfig new_part("part");
- new_part.path = "/dev/zero";
- new_part.size = new_blocks * block_size;
-
- CreateBlobFile();
-
- EXPECT_TRUE(InplaceGenerator::ResolveReadAfterWriteDependencies(
- old_part,
- new_part,
- (old_blocks + 2) * block_size, // enough scratch space.
- block_size,
- blob_file_.get(),
- &aops));
-
- size_t full_ops = 0;
- for (const auto& aop : aops) {
- if (diff_utils::IsAReplaceOperation(aop.op.type()))
- full_ops++;
- }
- // There should be only one REPLACE* operation, the one we added for block 0.
- EXPECT_EQ(1U, full_ops);
-
- // There should be only one MOVE operation, the one used to break the loop
- // which should write to scratch space past the block 7 (the last block of the
- // new partition) which is being written later.
- size_t move_ops = 0;
- for (const auto& aop : aops) {
- if (aop.op.type() == InstallOperation::MOVE) {
- move_ops++;
- for (const Extent& extent : aop.op.dst_extents()) {
- EXPECT_LE(7U, extent.start_block())
- << "On dst extents for aop: " << aop;
- }
- }
- }
- EXPECT_EQ(1U, move_ops);
-
- DumpAopsOnFailure(aops);
-}
-
-} // namespace chromeos_update_engine
diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc
index a111fd6..69325d7 100644
--- a/payload_generator/payload_file.cc
+++ b/payload_generator/payload_file.cc
@@ -74,11 +74,9 @@
manifest_.set_block_size(config.block_size);
manifest_.set_max_timestamp(config.max_timestamp);
- if (major_version_ == kBrilloMajorPayloadVersion) {
- if (config.target.dynamic_partition_metadata != nullptr)
- *(manifest_.mutable_dynamic_partition_metadata()) =
- *(config.target.dynamic_partition_metadata);
- }
+ if (config.target.dynamic_partition_metadata != nullptr)
+ *(manifest_.mutable_dynamic_partition_metadata()) =
+ *(config.target.dynamic_partition_metadata);
return true;
}
@@ -86,13 +84,6 @@
bool PayloadFile::AddPartition(const PartitionConfig& old_conf,
const PartitionConfig& new_conf,
const vector<AnnotatedOperation>& aops) {
- // Check partitions order for Chrome OS
- if (major_version_ == kChromeOSMajorPayloadVersion) {
- const vector<const char*> part_order = {kPartitionNameRoot,
- kPartitionNameKernel};
- TEST_AND_RETURN_FALSE(part_vec_.size() < part_order.size());
- TEST_AND_RETURN_FALSE(new_conf.name == part_order[part_vec_.size()]);
- }
Partition part;
part.name = new_conf.name;
part.aops = aops;
@@ -134,66 +125,45 @@
}
// Copy the operations and partition info from the part_vec_ to the manifest.
- manifest_.clear_install_operations();
- manifest_.clear_kernel_install_operations();
manifest_.clear_partitions();
for (const auto& part : part_vec_) {
- if (major_version_ == kBrilloMajorPayloadVersion) {
- PartitionUpdate* partition = manifest_.add_partitions();
- partition->set_partition_name(part.name);
- if (part.postinstall.run) {
- partition->set_run_postinstall(true);
- if (!part.postinstall.path.empty())
- partition->set_postinstall_path(part.postinstall.path);
- if (!part.postinstall.filesystem_type.empty())
- partition->set_filesystem_type(part.postinstall.filesystem_type);
- partition->set_postinstall_optional(part.postinstall.optional);
+ PartitionUpdate* partition = manifest_.add_partitions();
+ partition->set_partition_name(part.name);
+ if (part.postinstall.run) {
+ partition->set_run_postinstall(true);
+ if (!part.postinstall.path.empty())
+ partition->set_postinstall_path(part.postinstall.path);
+ if (!part.postinstall.filesystem_type.empty())
+ partition->set_filesystem_type(part.postinstall.filesystem_type);
+ partition->set_postinstall_optional(part.postinstall.optional);
+ }
+ if (!part.verity.IsEmpty()) {
+ if (part.verity.hash_tree_extent.num_blocks() != 0) {
+ *partition->mutable_hash_tree_data_extent() =
+ part.verity.hash_tree_data_extent;
+ *partition->mutable_hash_tree_extent() = part.verity.hash_tree_extent;
+ partition->set_hash_tree_algorithm(part.verity.hash_tree_algorithm);
+ if (!part.verity.hash_tree_salt.empty())
+ partition->set_hash_tree_salt(part.verity.hash_tree_salt.data(),
+ part.verity.hash_tree_salt.size());
}
- if (!part.verity.IsEmpty()) {
- if (part.verity.hash_tree_extent.num_blocks() != 0) {
- *partition->mutable_hash_tree_data_extent() =
- part.verity.hash_tree_data_extent;
- *partition->mutable_hash_tree_extent() = part.verity.hash_tree_extent;
- partition->set_hash_tree_algorithm(part.verity.hash_tree_algorithm);
- if (!part.verity.hash_tree_salt.empty())
- partition->set_hash_tree_salt(part.verity.hash_tree_salt.data(),
- part.verity.hash_tree_salt.size());
- }
- if (part.verity.fec_extent.num_blocks() != 0) {
- *partition->mutable_fec_data_extent() = part.verity.fec_data_extent;
- *partition->mutable_fec_extent() = part.verity.fec_extent;
- partition->set_fec_roots(part.verity.fec_roots);
- }
- }
- for (const AnnotatedOperation& aop : part.aops) {
- *partition->add_operations() = aop.op;
- }
- if (part.old_info.has_size() || part.old_info.has_hash())
- *(partition->mutable_old_partition_info()) = part.old_info;
- if (part.new_info.has_size() || part.new_info.has_hash())
- *(partition->mutable_new_partition_info()) = part.new_info;
- } else {
- // major_version_ == kChromeOSMajorPayloadVersion
- if (part.name == kPartitionNameKernel) {
- for (const AnnotatedOperation& aop : part.aops)
- *manifest_.add_kernel_install_operations() = aop.op;
- if (part.old_info.has_size() || part.old_info.has_hash())
- *manifest_.mutable_old_kernel_info() = part.old_info;
- if (part.new_info.has_size() || part.new_info.has_hash())
- *manifest_.mutable_new_kernel_info() = part.new_info;
- } else {
- for (const AnnotatedOperation& aop : part.aops)
- *manifest_.add_install_operations() = aop.op;
- if (part.old_info.has_size() || part.old_info.has_hash())
- *manifest_.mutable_old_rootfs_info() = part.old_info;
- if (part.new_info.has_size() || part.new_info.has_hash())
- *manifest_.mutable_new_rootfs_info() = part.new_info;
+ if (part.verity.fec_extent.num_blocks() != 0) {
+ *partition->mutable_fec_data_extent() = part.verity.fec_data_extent;
+ *partition->mutable_fec_extent() = part.verity.fec_extent;
+ partition->set_fec_roots(part.verity.fec_roots);
}
}
+ for (const AnnotatedOperation& aop : part.aops) {
+ *partition->add_operations() = aop.op;
+ }
+ if (part.old_info.has_size() || part.old_info.has_hash())
+ *(partition->mutable_old_partition_info()) = part.old_info;
+ if (part.new_info.has_size() || part.new_info.has_hash())
+ *(partition->mutable_new_partition_info()) = part.new_info;
}
// Signatures appear at the end of the blobs. Note the offset in the
- // manifest_.
+ // |manifest_|.
uint64_t signature_blob_length = 0;
if (!private_key_path.empty()) {
TEST_AND_RETURN_FALSE(PayloadSigner::SignatureBlobLength(
@@ -201,7 +171,6 @@
PayloadSigner::AddSignatureToManifest(
next_blob_offset,
signature_blob_length,
- major_version_ == kChromeOSMajorPayloadVersion,
&manifest_);
}
@@ -229,18 +198,14 @@
TEST_AND_RETURN_FALSE(
WriteUint64AsBigEndian(&writer, serialized_manifest.size()));
- // Write metadata signature size.
- uint32_t metadata_signature_size = 0;
- if (major_version_ == kBrilloMajorPayloadVersion) {
- // Metadata signature has the same size as payload signature, because they
- // are both the same kind of signature for the same kind of hash.
- uint32_t metadata_signature_size = htobe32(signature_blob_length);
- TEST_AND_RETURN_FALSE_ERRNO(writer.Write(&metadata_signature_size,
- sizeof(metadata_signature_size)));
- metadata_size += sizeof(metadata_signature_size);
- // Set correct size instead of big endian size.
- metadata_signature_size = signature_blob_length;
- }
+ // Metadata signature has the same size as payload signature, because they
+ // are both the same kind of signature for the same kind of hash.
+ uint32_t metadata_signature_size = htobe32(signature_blob_length);
+ TEST_AND_RETURN_FALSE_ERRNO(
+ writer.Write(&metadata_signature_size, sizeof(metadata_signature_size)));
+ metadata_size += sizeof(metadata_signature_size);
+ // Set correct size instead of big endian size.
+ metadata_signature_size = signature_blob_length;
// Write protobuf
LOG(INFO) << "Writing final delta file protobuf... "
@@ -249,8 +214,7 @@
writer.Write(serialized_manifest.data(), serialized_manifest.size()));
// Write metadata signature blob.
- if (major_version_ == kBrilloMajorPayloadVersion &&
- !private_key_path.empty()) {
+ if (!private_key_path.empty()) {
brillo::Blob metadata_hash;
TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile(
payload_file, metadata_size, &metadata_hash));
@@ -261,7 +225,7 @@
writer.Write(metadata_signature.data(), metadata_signature.size()));
}
- // Append the data blobs
+ // Append the data blobs.
LOG(INFO) << "Writing final delta file data blobs...";
int blobs_fd = open(ordered_blobs_path.c_str(), O_RDONLY, 0);
ScopedFdCloser blobs_fd_closer(&blobs_fd);
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index 88cca30..b653a03 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -32,6 +32,7 @@
#include "update_engine/payload_generator/ext2_filesystem.h"
#include "update_engine/payload_generator/mapfile_filesystem.h"
#include "update_engine/payload_generator/raw_filesystem.h"
+#include "update_engine/payload_generator/squashfs_filesystem.h"
using std::string;
@@ -86,6 +87,14 @@
return true;
}
+ fs_interface = SquashfsFilesystem::CreateFromFile(path,
+ /*extract_deflates=*/true,
+ /*load_settings=*/true);
+ if (fs_interface) {
+ TEST_AND_RETURN_FALSE(fs_interface->GetBlockSize() == kBlockSize);
+ return true;
+ }
+
// Fall back to a RAW filesystem.
TEST_AND_RETURN_FALSE(size % kBlockSize == 0);
fs_interface = RawFilesystem::Create(
@@ -219,10 +228,8 @@
}
bool PayloadVersion::Validate() const {
- TEST_AND_RETURN_FALSE(major == kChromeOSMajorPayloadVersion ||
- major == kBrilloMajorPayloadVersion);
+ TEST_AND_RETURN_FALSE(major == kBrilloMajorPayloadVersion);
TEST_AND_RETURN_FALSE(minor == kFullPayloadMinorVersion ||
- minor == kInPlaceMinorPayloadVersion ||
minor == kSourceMinorPayloadVersion ||
minor == kOpSrcHashMinorPayloadVersion ||
minor == kBrotliBsdiffMinorPayloadVersion ||
@@ -237,13 +244,10 @@
case InstallOperation::REPLACE:
case InstallOperation::REPLACE_BZ:
// These operations were included in the original payload format.
- return true;
-
case InstallOperation::REPLACE_XZ:
- // These operations are included in the major version used in Brillo, but
- // can also be used with minor version 3 or newer.
- return major == kBrilloMajorPayloadVersion ||
- minor >= kOpSrcHashMinorPayloadVersion;
+ // These operations are included minor version 3 or newer and full
+ // payloads.
+ return true;
case InstallOperation::ZERO:
case InstallOperation::DISCARD:
@@ -252,14 +256,6 @@
// them for delta payloads for now.
return minor >= kBrotliBsdiffMinorPayloadVersion;
- // Delta operations:
- case InstallOperation::MOVE:
- case InstallOperation::BSDIFF:
- // MOVE and BSDIFF were replaced by SOURCE_COPY and SOURCE_BSDIFF and
- // should not be used in newer delta versions, since the idempotent checks
- // were removed.
- return minor == kInPlaceMinorPayloadVersion;
-
case InstallOperation::SOURCE_COPY:
case InstallOperation::SOURCE_BSDIFF:
return minor >= kSourceMinorPayloadVersion;
@@ -269,6 +265,10 @@
case InstallOperation::PUFFDIFF:
return minor >= kPuffdiffMinorPayloadVersion;
+
+ case InstallOperation::MOVE:
+ case InstallOperation::BSDIFF:
+ NOTREACHED();
}
return false;
}
@@ -277,10 +277,6 @@
return minor != kFullPayloadMinorVersion;
}
-bool PayloadVersion::InplaceUpdate() const {
- return minor == kInPlaceMinorPayloadVersion;
-}
-
bool PayloadGenerationConfig::Validate() const {
TEST_AND_RETURN_FALSE(version.Validate());
TEST_AND_RETURN_FALSE(version.IsDelta() == is_delta);
@@ -307,11 +303,6 @@
for (const PartitionConfig& part : target.partitions) {
TEST_AND_RETURN_FALSE(part.ValidateExists());
TEST_AND_RETURN_FALSE(part.size % block_size == 0);
- if (version.minor == kInPlaceMinorPayloadVersion &&
- part.name == kPartitionNameRoot)
- TEST_AND_RETURN_FALSE(rootfs_partition_size >= part.size);
- if (version.major == kChromeOSMajorPayloadVersion)
- TEST_AND_RETURN_FALSE(part.postinstall.IsEmpty());
if (version.minor < kVerityMinorPayloadVersion)
TEST_AND_RETURN_FALSE(part.verity.IsEmpty());
}
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index e90edde..af6f181 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -173,10 +173,6 @@
// Whether this payload version is a delta payload.
bool IsDelta() const;
- // Tells whether the update is done in-place, that is, whether the operations
- // read and write from the same partition.
- bool InplaceUpdate() const;
-
// The major version of the payload.
uint64_t major;
diff --git a/payload_generator/payload_properties.cc b/payload_generator/payload_properties.cc
new file mode 100644
index 0000000..bc82eb7
--- /dev/null
+++ b/payload_generator/payload_properties.cc
@@ -0,0 +1,142 @@
+//
+// Copyright 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/payload_properties.h"
+
+#include <algorithm>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <base/json/json_writer.h>
+#include <base/strings/string_util.h>
+#include <base/values.h>
+#include <brillo/data_encoding.h>
+
+#include "update_engine/common/constants.h"
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/payload_metadata.h"
+#include "update_engine/update_metadata.pb.h"
+
+using std::string;
+using std::vector;
+
+namespace chromeos_update_engine {
+
+namespace {
+// These ones are needed by the GoldenEye.
+const char kPayloadPropertyJsonVersion[] = "version";
+const char kPayloadPropertyJsonPayloadHash[] = "sha256_hex";
+const char kPayloadPropertyJsonMetadataSize[] = "metadata_size";
+const char kPayloadPropertyJsonMetadataSignature[] = "metadata_signature";
+
+// These are needed by the Nebraska and devserver.
+const char kPayloadPropertyJsonPayloadSize[] = "size";
+const char kPayloadPropertyJsonIsDelta[] = "is_delta";
+const char kPayloadPropertyJsonTargetVersion[] = "target_version";
+const char kPayloadPropertyJsonSourceVersion[] = "source_version";
+} // namespace
+
+PayloadProperties::PayloadProperties(const string& payload_path)
+ : payload_path_(payload_path) {}
+
+bool PayloadProperties::GetPropertiesAsJson(string* json_str) {
+ TEST_AND_RETURN_FALSE(LoadFromPayload());
+
+ base::DictionaryValue properties;
+ properties.SetInteger(kPayloadPropertyJsonVersion, version_);
+ properties.SetInteger(kPayloadPropertyJsonMetadataSize, metadata_size_);
+ properties.SetString(kPayloadPropertyJsonMetadataSignature,
+ metadata_signatures_);
+ properties.SetInteger(kPayloadPropertyJsonPayloadSize, payload_size_);
+ properties.SetString(kPayloadPropertyJsonPayloadHash, payload_hash_);
+ properties.SetBoolean(kPayloadPropertyJsonIsDelta, is_delta_);
+ properties.SetString(kPayloadPropertyJsonTargetVersion, target_version_);
+ if (is_delta_) {
+ properties.SetString(kPayloadPropertyJsonSourceVersion, source_version_);
+ }
+
+ return base::JSONWriter::Write(properties, json_str);
+}
+
+bool PayloadProperties::GetPropertiesAsKeyValue(string* key_value_str) {
+ TEST_AND_RETURN_FALSE(LoadFromPayload());
+
+ brillo::KeyValueStore properties;
+ properties.SetString(kPayloadPropertyFileSize, std::to_string(payload_size_));
+ properties.SetString(kPayloadPropertyMetadataSize,
+ std::to_string(metadata_size_));
+ properties.SetString(kPayloadPropertyFileHash, payload_hash_);
+ properties.SetString(kPayloadPropertyMetadataHash, metadata_hash_);
+
+ *key_value_str = properties.SaveToString();
+ return true;
+}
+
+bool PayloadProperties::LoadFromPayload() {
+ PayloadMetadata payload_metadata;
+ DeltaArchiveManifest manifest;
+ Signatures metadata_signatures;
+ TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadFile(
+ payload_path_, &manifest, &metadata_signatures));
+
+ metadata_size_ = payload_metadata.GetMetadataSize();
+ payload_size_ = utils::FileSize(payload_path_);
+
+ brillo::Blob metadata_hash;
+ TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile(
+ payload_path_, metadata_size_, &metadata_hash) ==
+ static_cast<off_t>(metadata_size_));
+ metadata_hash_ = brillo::data_encoding::Base64Encode(metadata_hash);
+
+ brillo::Blob payload_hash;
+ TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile(
+ payload_path_, payload_size_, &payload_hash) ==
+ static_cast<off_t>(payload_size_));
+ payload_hash_ = brillo::data_encoding::Base64Encode(payload_hash);
+
+ if (payload_metadata.GetMetadataSignatureSize() > 0) {
+ TEST_AND_RETURN_FALSE(metadata_signatures.signatures_size() > 0);
+ vector<string> base64_signatures;
+ for (const auto& sig : metadata_signatures.signatures()) {
+ base64_signatures.push_back(
+ brillo::data_encoding::Base64Encode(sig.data()));
+ }
+ metadata_signatures_ = base::JoinString(base64_signatures, ":");
+ }
+
+ is_delta_ = manifest.has_old_image_info() ||
+ std::any_of(manifest.partitions().begin(),
+ manifest.partitions().end(),
+ [](const PartitionUpdate& part) {
+ return part.has_old_partition_info();
+ });
+
+ if (manifest.has_new_image_info()) {
+ target_version_ = manifest.new_image_info().version();
+ } else {
+ target_version_ = "99999.0.0";
+ }
+
+ // No need to set the source version if it was not a delta payload.
+ if (is_delta_ && manifest.has_old_image_info()) {
+ source_version_ = manifest.old_image_info().version();
+ }
+ return true;
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_generator/payload_properties.h b/payload_generator/payload_properties.h
new file mode 100644
index 0000000..3b34511
--- /dev/null
+++ b/payload_generator/payload_properties.h
@@ -0,0 +1,73 @@
+//
+// Copyright 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_PAYLOAD_PROPERTIES_H_
+#define UPDATE_ENGINE_PAYLOAD_GENERATOR_PAYLOAD_PROPERTIES_H_
+
+#include <string>
+
+#include <brillo/key_value_store.h>
+#include <brillo/secure_blob.h>
+
+namespace chromeos_update_engine {
+
+// A class for extracting information about a payload from the payload file
+// itself. Currently the metadata can be exported as a json file or a key/value
+// properties file. But more can be added if required.
+class PayloadProperties {
+ public:
+ explicit PayloadProperties(const std::string& payload_path);
+ ~PayloadProperties() = default;
+
+ // Get the properties in a json format. The json file will be used in
+ // autotests, cros flash, etc. Mainly in Chrome OS.
+ bool GetPropertiesAsJson(std::string* json_str);
+
+ // Get the properties of the payload as a key/value store. This is mainly used
+ // in Android.
+ bool GetPropertiesAsKeyValue(std::string* key_value_str);
+
+ private:
+ // Does the main job of reading the payload and extracting information from
+ // it.
+ bool LoadFromPayload();
+
+ // The path to the payload file.
+ std::string payload_path_;
+
+ // The version of the metadata json format. If the output json file changes
+ // format, this needs to be increased.
+ int version_{2};
+
+ size_t metadata_size_;
+ std::string metadata_hash_;
+ std::string metadata_signatures_;
+
+ size_t payload_size_;
+ std::string payload_hash_;
+
+ // Whether the payload is a delta (true) or full (false).
+ bool is_delta_;
+
+ std::string target_version_;
+ std::string source_version_;
+
+ DISALLOW_COPY_AND_ASSIGN(PayloadProperties);
+};
+
+} // namespace chromeos_update_engine
+
+#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_PAYLOAD_PROPERTIES_H_
diff --git a/payload_generator/payload_properties_unittest.cc b/payload_generator/payload_properties_unittest.cc
new file mode 100644
index 0000000..db3902c
--- /dev/null
+++ b/payload_generator/payload_properties_unittest.cc
@@ -0,0 +1,144 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/payload_properties.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <base/files/file_util.h>
+#include <base/files/scoped_file.h>
+#include <base/files/scoped_temp_dir.h>
+#include <base/rand_util.h>
+#include <base/strings/stringprintf.h>
+#include <brillo/data_encoding.h>
+
+#include <gtest/gtest.h>
+
+#include "update_engine/common/hash_calculator.h"
+#include "update_engine/common/test_utils.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_generator/delta_diff_generator.h"
+#include "update_engine/payload_generator/delta_diff_utils.h"
+#include "update_engine/payload_generator/full_update_generator.h"
+#include "update_engine/payload_generator/operations_generator.h"
+#include "update_engine/payload_generator/payload_file.h"
+#include "update_engine/payload_generator/payload_generation_config.h"
+
+using chromeos_update_engine::test_utils::ScopedTempFile;
+using std::string;
+using std::unique_ptr;
+using std::vector;
+
+namespace chromeos_update_engine {
+
+// TODO(kimjae): current implementation is very specific to a static way of
+// producing a deterministic test. It would definitely be beneficial to
+// extend the |PayloadPropertiesTest::SetUp()| into a generic helper or
+// seperate class that can handle creation of different |PayloadFile|s.
+class PayloadPropertiesTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ PayloadGenerationConfig config;
+ config.version.major = kBrilloMajorPayloadVersion;
+ config.version.minor = kSourceMinorPayloadVersion;
+ config.source.image_info.set_version("123.0.0");
+ config.target.image_info.set_version("456.7.8");
+ PayloadFile payload;
+ EXPECT_TRUE(payload.Init(config));
+
+ const string kTempFileTemplate = "temp_data.XXXXXX";
+ int data_file_fd;
+ string temp_file_path;
+ EXPECT_TRUE(
+ utils::MakeTempFile(kTempFileTemplate, &temp_file_path, &data_file_fd));
+ ScopedPathUnlinker temp_file_unlinker(temp_file_path);
+ EXPECT_LE(0, data_file_fd);
+
+ const auto SetupPartitionConfig =
+ [](PartitionConfig* config, const string& path, size_t size) {
+ config->path = path;
+ config->size = size;
+ };
+ const auto WriteZerosToFile = [](const char path[], size_t size) {
+ string zeros(size, '\0');
+ EXPECT_TRUE(utils::WriteFile(path, zeros.c_str(), zeros.size()));
+ };
+ ScopedTempFile old_part_file;
+ ScopedTempFile new_part_file;
+ PartitionConfig old_part(kPartitionNameRoot);
+ PartitionConfig new_part(kPartitionNameRoot);
+ SetupPartitionConfig(&old_part, old_part_file.path(), 0);
+ SetupPartitionConfig(&new_part, new_part_file.path(), 10000);
+ WriteZerosToFile(old_part_file.path().c_str(), old_part.size);
+ WriteZerosToFile(new_part_file.path().c_str(), new_part.size);
+
+ // Select payload generation strategy based on the config.
+ unique_ptr<OperationsGenerator> strategy(new FullUpdateGenerator());
+
+ vector<AnnotatedOperation> aops;
+ off_t data_file_size = 0;
+ BlobFileWriter blob_file_writer(data_file_fd, &data_file_size);
+ // Generate the operations using the strategy we selected above.
+ EXPECT_TRUE(strategy->GenerateOperations(
+ config, old_part, new_part, &blob_file_writer, &aops));
+
+ payload.AddPartition(old_part, new_part, aops);
+
+ uint64_t metadata_size;
+ EXPECT_TRUE(payload.WritePayload(
+ payload_file.path(), temp_file_path, "", &metadata_size));
+ }
+
+ ScopedTempFile payload_file;
+};
+
+// Validate the hash of file exists within the output.
+TEST_F(PayloadPropertiesTest, GetPropertiesAsJsonTestHash) {
+ constexpr char kJsonProperties[] =
+ "{"
+ R"("is_delta":true,)"
+ R"("metadata_signature":"",)"
+ R"("metadata_size":187,)"
+ R"("sha256_hex":"Rtrj9v3xXhrAi1741HAojtGxAQEOZ7mDyhzskIF4PJc=",)"
+ R"("size":233,)"
+ R"("source_version":"123.0.0",)"
+ R"("target_version":"456.7.8",)"
+ R"("version":2)"
+ "}";
+ string json;
+ EXPECT_TRUE(
+ PayloadProperties(payload_file.path()).GetPropertiesAsJson(&json));
+ EXPECT_EQ(kJsonProperties, json) << "JSON contents:\n" << json;
+}
+
+// Validate the hash of file and metadata are within the output.
+TEST_F(PayloadPropertiesTest, GetPropertiesAsKeyValueTestHash) {
+ constexpr char kKeyValueProperties[] =
+ "FILE_HASH=Rtrj9v3xXhrAi1741HAojtGxAQEOZ7mDyhzskIF4PJc=\n"
+ "FILE_SIZE=233\n"
+ "METADATA_HASH=kiXTexy/s2aPttf4+r8KRZWYZ6FYvwhU6rJGcnnI+U0=\n"
+ "METADATA_SIZE=187\n";
+ string key_value;
+ EXPECT_TRUE(PayloadProperties{payload_file.path()}.GetPropertiesAsKeyValue(
+ &key_value));
+ EXPECT_EQ(kKeyValueProperties, key_value) << "Key Value contents:\n"
+ << key_value;
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc
index 72780b1..7e5fd4e 100644
--- a/payload_generator/payload_signer.cc
+++ b/payload_generator/payload_signer.cc
@@ -104,22 +104,20 @@
uint64_t metadata_size = payload_metadata.GetMetadataSize();
uint32_t metadata_signature_size =
payload_metadata.GetMetadataSignatureSize();
- if (payload_metadata.GetMajorVersion() == kBrilloMajorPayloadVersion) {
- // Write metadata signature size in header.
- uint32_t metadata_signature_size_be = htobe32(metadata_signature.size());
- memcpy(payload.data() + manifest_offset,
- &metadata_signature_size_be,
- sizeof(metadata_signature_size_be));
- manifest_offset += sizeof(metadata_signature_size_be);
- // Replace metadata signature.
- payload.erase(payload.begin() + metadata_size,
- payload.begin() + metadata_size + metadata_signature_size);
- payload.insert(payload.begin() + metadata_size,
- metadata_signature.begin(),
- metadata_signature.end());
- metadata_signature_size = metadata_signature.size();
- LOG(INFO) << "Metadata signature size: " << metadata_signature_size;
- }
+ // Write metadata signature size in header.
+ uint32_t metadata_signature_size_be = htobe32(metadata_signature.size());
+ memcpy(payload.data() + manifest_offset,
+ &metadata_signature_size_be,
+ sizeof(metadata_signature_size_be));
+ manifest_offset += sizeof(metadata_signature_size_be);
+ // Replace metadata signature.
+ payload.erase(payload.begin() + metadata_size,
+ payload.begin() + metadata_size + metadata_signature_size);
+ payload.insert(payload.begin() + metadata_size,
+ metadata_signature.begin(),
+ metadata_signature.end());
+ metadata_signature_size = metadata_signature.size();
+ LOG(INFO) << "Metadata signature size: " << metadata_signature_size;
DeltaArchiveManifest manifest;
TEST_AND_RETURN_FALSE(payload_metadata.GetManifest(payload, &manifest));
@@ -143,7 +141,6 @@
PayloadSigner::AddSignatureToManifest(
payload.size() - metadata_size - metadata_signature_size,
payload_signature.size(),
- payload_metadata.GetMajorVersion() == kChromeOSMajorPayloadVersion,
&manifest);
// Updates the payload to include the new manifest.
@@ -241,25 +238,12 @@
void PayloadSigner::AddSignatureToManifest(uint64_t signature_blob_offset,
uint64_t signature_blob_length,
- bool add_dummy_op,
DeltaArchiveManifest* manifest) {
LOG(INFO) << "Making room for signature in file";
manifest->set_signatures_offset(signature_blob_offset);
LOG(INFO) << "set? " << manifest->has_signatures_offset();
manifest->set_signatures_offset(signature_blob_offset);
manifest->set_signatures_size(signature_blob_length);
- // Add a dummy op at the end to appease older clients
- if (add_dummy_op) {
- InstallOperation* dummy_op = manifest->add_kernel_install_operations();
- dummy_op->set_type(InstallOperation::REPLACE);
- dummy_op->set_data_offset(signature_blob_offset);
- dummy_op->set_data_length(signature_blob_length);
- Extent* dummy_extent = dummy_op->add_dst_extents();
- // Tell the dummy op to write this data to a big sparse hole
- dummy_extent->set_start_block(kSparseHole);
- dummy_extent->set_num_blocks(
- utils::DivRoundUp(signature_blob_length, kBlockSize));
- }
}
bool PayloadSigner::VerifySignedPayload(const string& payload_path,
@@ -512,35 +496,4 @@
return true;
}
-bool PayloadSigner::ExtractPayloadProperties(
- const string& payload_path, brillo::KeyValueStore* properties) {
- brillo::Blob payload;
- TEST_AND_RETURN_FALSE(
- utils::ReadFileChunk(payload_path, 0, kMaxPayloadHeaderSize, &payload));
-
- PayloadMetadata payload_metadata;
- TEST_AND_RETURN_FALSE(payload_metadata.ParsePayloadHeader(payload));
- uint64_t metadata_size = payload_metadata.GetMetadataSize();
-
- uint64_t file_size = utils::FileSize(payload_path);
- properties->SetString(kPayloadPropertyFileSize, std::to_string(file_size));
- properties->SetString(kPayloadPropertyMetadataSize,
- std::to_string(metadata_size));
-
- brillo::Blob file_hash, metadata_hash;
- TEST_AND_RETURN_FALSE(
- HashCalculator::RawHashOfFile(payload_path, file_size, &file_hash) ==
- static_cast<off_t>(file_size));
-
- TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile(
- payload_path, metadata_size, &metadata_hash) ==
- static_cast<off_t>(metadata_size));
-
- properties->SetString(kPayloadPropertyFileHash,
- brillo::data_encoding::Base64Encode(file_hash));
- properties->SetString(kPayloadPropertyMetadataHash,
- brillo::data_encoding::Base64Encode(metadata_hash));
- return true;
-}
-
} // namespace chromeos_update_engine
diff --git a/payload_generator/payload_signer.h b/payload_generator/payload_signer.h
index bd1e32f..06e4823 100644
--- a/payload_generator/payload_signer.h
+++ b/payload_generator/payload_signer.h
@@ -39,12 +39,9 @@
static bool VerifySignedPayload(const std::string& payload_path,
const std::string& public_key_path);
- // Adds specified signature offset/length to given |manifest|, also adds a
- // dummy operation that points to a signature blob located at the specified
- // offset/length if |add_dummy_op| is true.
+ // Adds specified signature offset/length to given |manifest|.
static void AddSignatureToManifest(uint64_t signature_blob_offset,
uint64_t signature_blob_length,
- bool add_dummy_op,
DeltaArchiveManifest* manifest);
// Given a raw |hash| and a private key in |private_key_path| calculates the
diff --git a/payload_generator/payload_signer_unittest.cc b/payload_generator/payload_signer_unittest.cc
index bf7100b..fe62997 100644
--- a/payload_generator/payload_signer_unittest.cc
+++ b/payload_generator/payload_signer_unittest.cc
@@ -20,6 +20,7 @@
#include <vector>
#include <base/logging.h>
+#include <base/stl_util.h>
#include <gtest/gtest.h>
#include "update_engine/common/hash_calculator.h"
@@ -118,8 +119,8 @@
EXPECT_EQ(1, signatures.signatures_size());
const Signatures::Signature& sig = signatures.signatures(0);
const string& sig_data = sig.data();
- ASSERT_EQ(arraysize(kDataSignature), sig_data.size());
- for (size_t i = 0; i < arraysize(kDataSignature); i++) {
+ ASSERT_EQ(base::size(kDataSignature), sig_data.size());
+ for (size_t i = 0; i < base::size(kDataSignature); i++) {
EXPECT_EQ(kDataSignature[i], static_cast<uint8_t>(sig_data[i]));
}
}
diff --git a/payload_generator/squashfs_filesystem.cc b/payload_generator/squashfs_filesystem.cc
index 6c892f5..eb4fda3 100644
--- a/payload_generator/squashfs_filesystem.cc
+++ b/payload_generator/squashfs_filesystem.cc
@@ -23,6 +23,7 @@
#include <utility>
#include <base/files/file_util.h>
+#include <base/files/scoped_temp_dir.h>
#include <base/logging.h>
#include <base/strings/string_number_conversions.h>
#include <base/strings/string_split.h>
@@ -36,6 +37,8 @@
#include "update_engine/payload_generator/extent_utils.h"
#include "update_engine/update_metadata.pb.h"
+using base::FilePath;
+using base::ScopedTempDir;
using std::string;
using std::unique_ptr;
using std::vector;
@@ -49,6 +52,8 @@
constexpr uint64_t kSquashfsCompressedBit = 1 << 24;
constexpr uint32_t kSquashfsZlibCompression = 1;
+constexpr char kUpdateEngineConf[] = "etc/update_engine.conf";
+
bool ReadSquashfsHeader(const brillo::Blob blob,
SquashfsFilesystem::SquashfsHeader* header) {
if (blob.size() < kSquashfsSuperBlockSize) {
@@ -76,18 +81,59 @@
// Run unsquashfs to get the system file map.
// unsquashfs -m <map-file> <squashfs-file>
vector<string> cmd = {"unsquashfs", "-m", map_file, sqfs_path};
- string stdout;
+ string stdout, stderr;
int exit_code;
- if (!Subprocess::SynchronousExec(cmd, &exit_code, &stdout) ||
+ if (!Subprocess::SynchronousExec(cmd, &exit_code, &stdout, &stderr) ||
exit_code != 0) {
- LOG(ERROR) << "Failed to run unsquashfs -m. The stdout content was: "
- << stdout;
+ LOG(ERROR) << "Failed to run `unsquashfs -m` with stdout content: "
+ << stdout << " and stderr content: " << stderr;
return false;
}
TEST_AND_RETURN_FALSE(utils::ReadFile(map_file, map));
return true;
}
+bool GetUpdateEngineConfig(const std::string& sqfs_path, string* config) {
+ ScopedTempDir unsquash_dir;
+ if (!unsquash_dir.CreateUniqueTempDir()) {
+ PLOG(ERROR) << "Failed to create a temporary directory.";
+ return false;
+ }
+
+ // Run unsquashfs to extract update_engine.conf
+ // -f: To force overriding if the target directory exists.
+ // -d: The directory to unsquash the files.
+ vector<string> cmd = {"unsquashfs",
+ "-f",
+ "-d",
+ unsquash_dir.GetPath().value(),
+ sqfs_path,
+ kUpdateEngineConf};
+ string stdout, stderr;
+ int exit_code;
+ if (!Subprocess::SynchronousExec(cmd, &exit_code, &stdout, &stderr) ||
+ exit_code != 0) {
+ PLOG(ERROR) << "Failed to unsquashfs etc/update_engine.conf with stdout: "
+ << stdout << " and stderr: " << stderr;
+ return false;
+ }
+
+ auto config_path = unsquash_dir.GetPath().Append(kUpdateEngineConf);
+ string config_content;
+ if (!utils::ReadFile(config_path.value(), &config_content)) {
+ PLOG(ERROR) << "Failed to read " << config_path.value();
+ return false;
+ }
+
+ if (config_content.empty()) {
+ LOG(ERROR) << "update_engine config file was empty!!";
+ return false;
+ }
+
+ *config = std::move(config_content);
+ return true;
+}
+
} // namespace
bool SquashfsFilesystem::Init(const string& map,
@@ -120,6 +166,7 @@
uint64_t start;
TEST_AND_RETURN_FALSE(base::StringToUint64(splits[1], &start));
uint64_t cur_offset = start;
+ bool is_compressed = false;
for (size_t i = 2; i < splits.size(); ++i) {
uint64_t blk_size;
TEST_AND_RETURN_FALSE(base::StringToUint64(splits[i], &blk_size));
@@ -127,10 +174,11 @@
auto new_blk_size = blk_size & ~kSquashfsCompressedBit;
TEST_AND_RETURN_FALSE(new_blk_size <= header.block_size);
if (new_blk_size > 0 && !(blk_size & kSquashfsCompressedBit)) {
- // Compressed block
+ // It is a compressed block.
if (is_zlib && extract_deflates) {
zlib_blks.emplace_back(cur_offset, new_blk_size);
}
+ is_compressed = true;
}
cur_offset += new_blk_size;
}
@@ -140,6 +188,7 @@
File file;
file.name = splits[0].as_string();
file.extents = {ExtentForBytes(kBlockSize, start, cur_offset - start)};
+ file.is_compressed = is_compressed;
files_.emplace_back(file);
}
}
@@ -151,7 +200,8 @@
// If there is any overlap between two consecutive extents, remove them. Here
// we are assuming all files have exactly one extent. If this assumption
// changes then this implementation needs to change too.
- for (auto first = files_.begin(), second = first + 1;
+ for (auto first = files_.begin(),
+ second = first + (first == files_.end() ? 0 : 1);
first != files_.end() && second != files_.end();
second = first + 1) {
auto first_begin = first->extents[0].start_block();
@@ -217,6 +267,14 @@
return a.offset < b.offset;
});
+ // Sometimes a squashfs can have a two files that are hard linked. In this
+ // case both files will have the same starting offset in the image and hence
+ // the same zlib blocks. So we need to remove these duplicates to eliminate
+ // further potential probems. As a matter of fact the next statement will
+ // fail if there are duplicates (there will be overlap between two blocks).
+ auto last = std::unique(zlib_blks.begin(), zlib_blks.end());
+ zlib_blks.erase(last, zlib_blks.end());
+
// Sanity check. Make sure zlib blocks are not overlapping.
auto result = std::adjacent_find(
zlib_blks.begin(),
@@ -239,12 +297,12 @@
}
unique_ptr<SquashfsFilesystem> SquashfsFilesystem::CreateFromFile(
- const string& sqfs_path, bool extract_deflates) {
+ const string& sqfs_path, bool extract_deflates, bool load_settings) {
if (sqfs_path.empty())
return nullptr;
brillo::StreamPtr sqfs_file =
- brillo::FileStream::Open(base::FilePath(sqfs_path),
+ brillo::FileStream::Open(FilePath(sqfs_path),
brillo::Stream::AccessMode::READ,
brillo::FileStream::Disposition::OPEN_EXISTING,
nullptr);
@@ -278,6 +336,12 @@
return nullptr;
}
+ if (load_settings) {
+ if (!GetUpdateEngineConfig(sqfs_path, &sqfs->update_engine_config_)) {
+ return nullptr;
+ }
+ }
+
return sqfs;
}
@@ -311,9 +375,12 @@
}
bool SquashfsFilesystem::LoadSettings(brillo::KeyValueStore* store) const {
- // Settings not supported in squashfs.
- LOG(ERROR) << "squashfs doesn't support LoadSettings().";
- return false;
+ if (!store->LoadFromString(update_engine_config_)) {
+ LOG(ERROR) << "Failed to load the settings with config: "
+ << update_engine_config_;
+ return false;
+ }
+ return true;
}
bool SquashfsFilesystem::IsSquashfsImage(const brillo::Blob& blob) {
diff --git a/payload_generator/squashfs_filesystem.h b/payload_generator/squashfs_filesystem.h
index b79f8c7..5045dfc 100644
--- a/payload_generator/squashfs_filesystem.h
+++ b/payload_generator/squashfs_filesystem.h
@@ -59,7 +59,7 @@
// |extract_deflates| is true, it will process files to find location of all
// deflate streams.
static std::unique_ptr<SquashfsFilesystem> CreateFromFile(
- const std::string& sqfs_path, bool extract_deflates);
+ const std::string& sqfs_path, bool extract_deflates, bool load_settings);
// Creates the file system from a file map |filemap| which is a multi-line
// string with each line with the following format:
@@ -113,6 +113,9 @@
// All the files in the filesystem.
std::vector<File> files_;
+ // The content of /etc/update_engine.conf.
+ std::string update_engine_config_;
+
DISALLOW_COPY_AND_ASSIGN(SquashfsFilesystem);
};
diff --git a/payload_generator/squashfs_filesystem_unittest.cc b/payload_generator/squashfs_filesystem_unittest.cc
index 29fcf1c..68ca9df 100644
--- a/payload_generator/squashfs_filesystem_unittest.cc
+++ b/payload_generator/squashfs_filesystem_unittest.cc
@@ -112,7 +112,7 @@
#ifdef __CHROMEOS__
TEST_F(SquashfsFilesystemTest, EmptyFilesystemTest) {
unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFile(
- GetBuildArtifactsPath("gen/disk_sqfs_empty.img"), true);
+ GetBuildArtifactsPath("gen/disk_sqfs_empty.img"), true, false);
CheckSquashfs(fs);
// Even an empty squashfs filesystem is rounded up to 4K.
@@ -133,7 +133,7 @@
TEST_F(SquashfsFilesystemTest, DefaultFilesystemTest) {
unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFile(
- GetBuildArtifactsPath("gen/disk_sqfs_default.img"), true);
+ GetBuildArtifactsPath("gen/disk_sqfs_default.img"), true, false);
CheckSquashfs(fs);
vector<FilesystemInterface::File> files;
@@ -148,6 +148,18 @@
EXPECT_EQ(files[0].name, file.name);
EXPECT_EQ(files[0].extents, file.extents);
}
+
+TEST_F(SquashfsFilesystemTest, UpdateEngineConfigTest) {
+ unique_ptr<SquashfsFilesystem> fs = SquashfsFilesystem::CreateFromFile(
+ GetBuildArtifactsPath("gen/disk_sqfs_unittest.img"), true, true);
+ CheckSquashfs(fs);
+
+ brillo::KeyValueStore kvs;
+ EXPECT_TRUE(fs->LoadSettings(&kvs));
+ string minor_version;
+ EXPECT_TRUE(kvs.GetString("PAYLOAD_MINOR_VERSION", &minor_version));
+ EXPECT_EQ(minor_version, "1234");
+}
#endif // __CHROMEOS__
TEST_F(SquashfsFilesystemTest, SimpleFileMapTest) {
diff --git a/payload_generator/tarjan.cc b/payload_generator/tarjan.cc
deleted file mode 100644
index 2d4ca31..0000000
--- a/payload_generator/tarjan.cc
+++ /dev/null
@@ -1,83 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-#include "update_engine/payload_generator/tarjan.h"
-
-#include <algorithm>
-#include <vector>
-
-#include <base/logging.h>
-#include <base/stl_util.h>
-
-using std::min;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-namespace {
-const vector<Vertex>::size_type kInvalidIndex = -1;
-}
-
-void TarjanAlgorithm::Execute(Vertex::Index vertex,
- Graph* graph,
- vector<Vertex::Index>* out) {
- stack_.clear();
- components_.clear();
- index_ = 0;
- for (Graph::iterator it = graph->begin(); it != graph->end(); ++it)
- it->index = it->lowlink = kInvalidIndex;
- required_vertex_ = vertex;
-
- Tarjan(vertex, graph);
- if (!components_.empty())
- out->swap(components_[0]);
-}
-
-void TarjanAlgorithm::Tarjan(Vertex::Index vertex, Graph* graph) {
- CHECK_EQ((*graph)[vertex].index, kInvalidIndex);
- (*graph)[vertex].index = index_;
- (*graph)[vertex].lowlink = index_;
- index_++;
- stack_.push_back(vertex);
- for (Vertex::EdgeMap::iterator it = (*graph)[vertex].out_edges.begin();
- it != (*graph)[vertex].out_edges.end();
- ++it) {
- Vertex::Index vertex_next = it->first;
- if ((*graph)[vertex_next].index == kInvalidIndex) {
- Tarjan(vertex_next, graph);
- (*graph)[vertex].lowlink =
- min((*graph)[vertex].lowlink, (*graph)[vertex_next].lowlink);
- } else if (base::ContainsValue(stack_, vertex_next)) {
- (*graph)[vertex].lowlink =
- min((*graph)[vertex].lowlink, (*graph)[vertex_next].index);
- }
- }
- if ((*graph)[vertex].lowlink == (*graph)[vertex].index) {
- vector<Vertex::Index> component;
- Vertex::Index other_vertex;
- do {
- other_vertex = stack_.back();
- stack_.pop_back();
- component.push_back(other_vertex);
- } while (other_vertex != vertex && !stack_.empty());
-
- if (base::ContainsValue(component, required_vertex_)) {
- components_.resize(components_.size() + 1);
- component.swap(components_.back());
- }
- }
-}
-
-} // namespace chromeos_update_engine
diff --git a/payload_generator/tarjan.h b/payload_generator/tarjan.h
deleted file mode 100644
index 39ac4e4..0000000
--- a/payload_generator/tarjan.h
+++ /dev/null
@@ -1,53 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_TARJAN_H_
-#define UPDATE_ENGINE_PAYLOAD_GENERATOR_TARJAN_H_
-
-// This is an implementation of Tarjan's algorithm which finds all
-// Strongly Connected Components in a graph.
-
-// Note: a true Tarjan algorithm would find all strongly connected components
-// in the graph. This implementation will only find the strongly connected
-// component containing the vertex passed in.
-
-#include <vector>
-
-#include "update_engine/payload_generator/graph_types.h"
-
-namespace chromeos_update_engine {
-
-class TarjanAlgorithm {
- public:
- TarjanAlgorithm() : index_(0), required_vertex_(0) {}
-
- // 'out' is set to the result if there is one, otherwise it's untouched.
- void Execute(Vertex::Index vertex,
- Graph* graph,
- std::vector<Vertex::Index>* out);
-
- private:
- void Tarjan(Vertex::Index vertex, Graph* graph);
-
- Vertex::Index index_;
- Vertex::Index required_vertex_;
- std::vector<Vertex::Index> stack_;
- std::vector<std::vector<Vertex::Index>> components_;
-};
-
-} // namespace chromeos_update_engine
-
-#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_TARJAN_H_
diff --git a/payload_generator/tarjan_unittest.cc b/payload_generator/tarjan_unittest.cc
deleted file mode 100644
index b271227..0000000
--- a/payload_generator/tarjan_unittest.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/tarjan.h"
-
-#include <string>
-#include <utility>
-
-#include <base/logging.h>
-#include <base/stl_util.h>
-#include <gtest/gtest.h>
-
-#include "update_engine/payload_generator/graph_types.h"
-
-using std::make_pair;
-using std::string;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-class TarjanAlgorithmTest : public ::testing::Test {};
-
-TEST(TarjanAlgorithmTest, SimpleTest) {
- const Vertex::Index n_a = 0;
- const Vertex::Index n_b = 1;
- const Vertex::Index n_c = 2;
- const Vertex::Index n_d = 3;
- const Vertex::Index n_e = 4;
- const Vertex::Index n_f = 5;
- const Vertex::Index n_g = 6;
- const Vertex::Index n_h = 7;
- const Graph::size_type kNodeCount = 8;
-
- Graph graph(kNodeCount);
-
- graph[n_a].out_edges.insert(make_pair(n_e, EdgeProperties()));
- graph[n_a].out_edges.insert(make_pair(n_f, EdgeProperties()));
- graph[n_b].out_edges.insert(make_pair(n_a, EdgeProperties()));
- graph[n_c].out_edges.insert(make_pair(n_d, EdgeProperties()));
- graph[n_d].out_edges.insert(make_pair(n_e, EdgeProperties()));
- graph[n_d].out_edges.insert(make_pair(n_f, EdgeProperties()));
- graph[n_e].out_edges.insert(make_pair(n_b, EdgeProperties()));
- graph[n_e].out_edges.insert(make_pair(n_c, EdgeProperties()));
- graph[n_e].out_edges.insert(make_pair(n_f, EdgeProperties()));
- graph[n_f].out_edges.insert(make_pair(n_g, EdgeProperties()));
- graph[n_g].out_edges.insert(make_pair(n_h, EdgeProperties()));
- graph[n_h].out_edges.insert(make_pair(n_g, EdgeProperties()));
-
- TarjanAlgorithm tarjan;
-
- for (Vertex::Index i = n_a; i <= n_e; i++) {
- vector<Vertex::Index> vertex_indexes;
- tarjan.Execute(i, &graph, &vertex_indexes);
-
- EXPECT_EQ(5U, vertex_indexes.size());
- EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_a));
- EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_b));
- EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_c));
- EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_d));
- EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_e));
- }
-
- {
- vector<Vertex::Index> vertex_indexes;
- tarjan.Execute(n_f, &graph, &vertex_indexes);
-
- EXPECT_EQ(1U, vertex_indexes.size());
- EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_f));
- }
-
- for (Vertex::Index i = n_g; i <= n_h; i++) {
- vector<Vertex::Index> vertex_indexes;
- tarjan.Execute(i, &graph, &vertex_indexes);
-
- EXPECT_EQ(2U, vertex_indexes.size());
- EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_g));
- EXPECT_TRUE(base::ContainsValue(vertex_indexes, n_h));
- }
-}
-
-} // namespace chromeos_update_engine
diff --git a/payload_generator/topological_sort.cc b/payload_generator/topological_sort.cc
deleted file mode 100644
index 0abd708..0000000
--- a/payload_generator/topological_sort.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/topological_sort.h"
-
-#include <set>
-#include <vector>
-
-#include <base/logging.h>
-
-using std::set;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-namespace {
-void TopologicalSortVisit(const Graph& graph,
- set<Vertex::Index>* visited_nodes,
- vector<Vertex::Index>* nodes,
- Vertex::Index node) {
- if (visited_nodes->find(node) != visited_nodes->end())
- return;
-
- visited_nodes->insert(node);
- // Visit all children.
- for (Vertex::EdgeMap::const_iterator it = graph[node].out_edges.begin();
- it != graph[node].out_edges.end();
- ++it) {
- TopologicalSortVisit(graph, visited_nodes, nodes, it->first);
- }
- // Visit this node.
- nodes->push_back(node);
-}
-} // namespace
-
-void TopologicalSort(const Graph& graph, vector<Vertex::Index>* out) {
- set<Vertex::Index> visited_nodes;
-
- for (Vertex::Index i = 0; i < graph.size(); i++) {
- TopologicalSortVisit(graph, &visited_nodes, out, i);
- }
-}
-
-} // namespace chromeos_update_engine
diff --git a/payload_generator/topological_sort.h b/payload_generator/topological_sort.h
deleted file mode 100644
index 461cbe1..0000000
--- a/payload_generator/topological_sort.h
+++ /dev/null
@@ -1,42 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_TOPOLOGICAL_SORT_H_
-#define UPDATE_ENGINE_PAYLOAD_GENERATOR_TOPOLOGICAL_SORT_H_
-
-#include <vector>
-
-#include "update_engine/payload_generator/graph_types.h"
-
-namespace chromeos_update_engine {
-
-// Performs a topological sort on the directed graph 'graph' and stores
-// the nodes, in order visited, in 'out'.
-// For example, this graph:
-// A ---> C ----.
-// \ v
-// `--> B --> D
-// Might result in this in 'out':
-// out[0] = D
-// out[1] = B
-// out[2] = C
-// out[3] = A
-// Note: results are undefined if there is a cycle in the graph.
-void TopologicalSort(const Graph& graph, std::vector<Vertex::Index>* out);
-
-} // namespace chromeos_update_engine
-
-#endif // UPDATE_ENGINE_PAYLOAD_GENERATOR_TOPOLOGICAL_SORT_H_
diff --git a/payload_generator/topological_sort_unittest.cc b/payload_generator/topological_sort_unittest.cc
deleted file mode 100644
index aa296d8..0000000
--- a/payload_generator/topological_sort_unittest.cc
+++ /dev/null
@@ -1,96 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/payload_generator/topological_sort.h"
-
-#include <utility>
-#include <vector>
-
-#include <gtest/gtest.h>
-
-#include "update_engine/payload_generator/graph_types.h"
-
-using std::make_pair;
-using std::vector;
-
-namespace chromeos_update_engine {
-
-class TopologicalSortTest : public ::testing::Test {};
-
-namespace {
-// Returns true if the value is found in vect. If found, the index is stored
-// in out_index if out_index is not null.
-template <typename T>
-bool IndexOf(const vector<T>& vect,
- const T& value,
- typename vector<T>::size_type* out_index) {
- for (typename vector<T>::size_type i = 0; i < vect.size(); i++) {
- if (vect[i] == value) {
- if (out_index) {
- *out_index = i;
- }
- return true;
- }
- }
- return false;
-}
-} // namespace
-
-TEST(TopologicalSortTest, SimpleTest) {
- int counter = 0;
- const Vertex::Index n_a = counter++;
- const Vertex::Index n_b = counter++;
- const Vertex::Index n_c = counter++;
- const Vertex::Index n_d = counter++;
- const Vertex::Index n_e = counter++;
- const Vertex::Index n_f = counter++;
- const Vertex::Index n_g = counter++;
- const Vertex::Index n_h = counter++;
- const Vertex::Index n_i = counter++;
- const Vertex::Index n_j = counter++;
- const Graph::size_type kNodeCount = counter++;
-
- Graph graph(kNodeCount);
-
- graph[n_i].out_edges.insert(make_pair(n_j, EdgeProperties()));
- graph[n_i].out_edges.insert(make_pair(n_c, EdgeProperties()));
- graph[n_i].out_edges.insert(make_pair(n_e, EdgeProperties()));
- graph[n_i].out_edges.insert(make_pair(n_h, EdgeProperties()));
- graph[n_c].out_edges.insert(make_pair(n_b, EdgeProperties()));
- graph[n_b].out_edges.insert(make_pair(n_a, EdgeProperties()));
- graph[n_e].out_edges.insert(make_pair(n_d, EdgeProperties()));
- graph[n_e].out_edges.insert(make_pair(n_g, EdgeProperties()));
- graph[n_g].out_edges.insert(make_pair(n_d, EdgeProperties()));
- graph[n_g].out_edges.insert(make_pair(n_f, EdgeProperties()));
- graph[n_d].out_edges.insert(make_pair(n_a, EdgeProperties()));
-
- vector<Vertex::Index> sorted;
- TopologicalSort(graph, &sorted);
-
- for (Vertex::Index i = 0; i < graph.size(); i++) {
- vector<Vertex::Index>::size_type src_index = 0;
- EXPECT_TRUE(IndexOf(sorted, i, &src_index));
- for (Vertex::EdgeMap::const_iterator it = graph[i].out_edges.begin();
- it != graph[i].out_edges.end();
- ++it) {
- vector<Vertex::Index>::size_type dst_index = 0;
- EXPECT_TRUE(IndexOf(sorted, it->first, &dst_index));
- EXPECT_LT(dst_index, src_index);
- }
- }
-}
-
-} // namespace chromeos_update_engine
diff --git a/payload_state.cc b/payload_state.cc
index 3ba6391..bde7999 100644
--- a/payload_state.cc
+++ b/payload_state.cc
@@ -37,6 +37,7 @@
#include "update_engine/omaha_request_params.h"
#include "update_engine/payload_consumer/install_plan.h"
#include "update_engine/system_state.h"
+#include "update_engine/update_attempter.h"
using base::Time;
using base::TimeDelta;
@@ -60,6 +61,8 @@
PayloadState::PayloadState()
: prefs_(nullptr),
+ powerwash_safe_prefs_(nullptr),
+ excluder_(nullptr),
using_p2p_for_downloading_(false),
p2p_num_attempts_(0),
payload_attempt_number_(0),
@@ -79,6 +82,7 @@
system_state_ = system_state;
prefs_ = system_state_->prefs();
powerwash_safe_prefs_ = system_state_->powerwash_safe_prefs();
+ excluder_ = system_state_->update_attempter()->GetExcluder();
LoadResponseSignature();
LoadPayloadAttemptNumber();
LoadFullPayloadAttemptNumber();
@@ -308,6 +312,7 @@
case ErrorCode::kUnsupportedMinorPayloadVersion:
case ErrorCode::kPayloadTimestampError:
case ErrorCode::kVerityCalculationError:
+ ExcludeCurrentPayload();
IncrementUrlIndex();
break;
@@ -471,9 +476,7 @@
void PayloadState::IncrementUrlIndex() {
size_t next_url_index = url_index_ + 1;
- size_t max_url_size = 0;
- for (const auto& urls : candidate_urls_)
- max_url_size = std::max(max_url_size, urls.size());
+ size_t max_url_size = candidate_urls_[payload_index_].size();
if (next_url_index < max_url_size) {
LOG(INFO) << "Incrementing the URL index for next attempt";
SetUrlIndex(next_url_index);
@@ -502,10 +505,29 @@
} else {
LOG(INFO) << "Reached max number of failures for Url" << GetUrlIndex()
<< ". Trying next available URL";
+ ExcludeCurrentPayload();
IncrementUrlIndex();
}
}
+void PayloadState::ExcludeCurrentPayload() {
+ const auto& package = response_.packages[payload_index_];
+ if (!package.can_exclude) {
+ LOG(INFO) << "Not excluding as marked non-excludable for package hash="
+ << package.hash;
+ return;
+ }
+ auto exclusion_name = utils::GetExclusionName(GetCurrentUrl());
+ if (!excluder_->Exclude(exclusion_name))
+ LOG(WARNING) << "Failed to exclude "
+ << " Package Hash=" << package.hash
+ << " CurrentUrl=" << GetCurrentUrl();
+ else
+ LOG(INFO) << "Excluded "
+ << " Package Hash=" << package.hash
+ << " CurrentUrl=" << GetCurrentUrl();
+}
+
void PayloadState::UpdateBackoffExpiryTime() {
if (response_.disable_payload_backoff) {
LOG(INFO) << "Resetting backoff expiry time as payload backoff is disabled";
@@ -904,6 +926,7 @@
bool PayloadState::NextPayload() {
if (payload_index_ + 1 >= candidate_urls_.size())
return false;
+ SetUrlIndex(0);
SetPayloadIndex(payload_index_ + 1);
return true;
}
diff --git a/payload_state.h b/payload_state.h
index 5ef1220..d13c642 100644
--- a/payload_state.h
+++ b/payload_state.h
@@ -24,6 +24,7 @@
#include <base/time/time.h>
#include <gtest/gtest_prod.h> // for FRIEND_TEST
+#include "update_engine/common/excluder_interface.h"
#include "update_engine/common/prefs_interface.h"
#include "update_engine/metrics_constants.h"
#include "update_engine/payload_state_interface.h"
@@ -156,6 +157,10 @@
FRIEND_TEST(PayloadStateTest, RollbackHappened);
FRIEND_TEST(PayloadStateTest, RollbackVersion);
FRIEND_TEST(PayloadStateTest, UpdateSuccessWithWipedPrefs);
+ FRIEND_TEST(PayloadStateTest, NextPayloadResetsUrlIndex);
+ FRIEND_TEST(PayloadStateTest, ExcludeNoopForNonExcludables);
+ FRIEND_TEST(PayloadStateTest, ExcludeOnlyCanExcludables);
+ FRIEND_TEST(PayloadStateTest, IncrementFailureExclusionTest);
// Helper called when an attempt has begun, is called by
// UpdateResumed(), UpdateRestarted() and Rollback().
@@ -180,6 +185,12 @@
// to the next URL and resets the failure count for that URL.
void IncrementFailureCount();
+ // Excludes the current payload + current candidate URL from being part of
+ // future updates/retries. Whenever |SetResponse()| or |NextPayload()| decide
+ // on the initial current URL index and the next payload respectively, it will
+ // advanced based on exclusions.
+ void ExcludeCurrentPayload();
+
// Updates the backoff expiry time exponentially based on the current
// payload attempt number.
void UpdateBackoffExpiryTime();
@@ -428,6 +439,11 @@
// This object persists across powerwashes.
PrefsInterface* powerwash_safe_prefs_;
+ // Interface object with which we determine exclusion decisions for
+ // payloads/partitions during the update. This must be set by calling the
+ // Initialize method before calling any other method.
+ ExcluderInterface* excluder_;
+
// This is the current response object from Omaha.
OmahaResponse response_;
diff --git a/payload_state_unittest.cc b/payload_state_unittest.cc
index 869c24e..bf9aed4 100644
--- a/payload_state_unittest.cc
+++ b/payload_state_unittest.cc
@@ -23,9 +23,11 @@
#include <gtest/gtest.h>
#include "update_engine/common/constants.h"
+#include "update_engine/common/excluder_interface.h"
#include "update_engine/common/fake_clock.h"
#include "update_engine/common/fake_hardware.h"
#include "update_engine/common/fake_prefs.h"
+#include "update_engine/common/mock_excluder.h"
#include "update_engine/common/mock_prefs.h"
#include "update_engine/common/prefs.h"
#include "update_engine/common/test_utils.h"
@@ -44,6 +46,7 @@
using testing::NiceMock;
using testing::Return;
using testing::SetArgPointee;
+using testing::StrictMock;
namespace chromeos_update_engine {
@@ -1012,10 +1015,6 @@
NiceMock<MockPrefs>* mock_powerwash_safe_prefs =
fake_system_state.mock_powerwash_safe_prefs();
- EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
-
- // Verify pre-conditions are good.
- EXPECT_TRUE(payload_state.GetRollbackVersion().empty());
// Mock out the os version and make sure it's blacklisted correctly.
string rollback_version = "2345.0.0";
@@ -1023,6 +1022,11 @@
params.Init(rollback_version, "", false);
fake_system_state.set_request_params(¶ms);
+ EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+
+ // Verify pre-conditions are good.
+ EXPECT_TRUE(payload_state.GetRollbackVersion().empty());
+
EXPECT_CALL(*mock_powerwash_safe_prefs,
SetString(kPrefsRollbackVersion, rollback_version));
payload_state.Rollback();
@@ -1353,15 +1357,15 @@
PayloadState payload_state;
FakeSystemState fake_system_state;
- EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
- SetupPayloadStateWith2Urls(
- "Hash6437", true, false, &payload_state, &response);
-
// Mock the request to a request where the delta was disabled.
OmahaRequestParams params(&fake_system_state);
params.set_delta_okay(false);
fake_system_state.set_request_params(¶ms);
+ EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+ SetupPayloadStateWith2Urls(
+ "Hash6437", true, false, &payload_state, &response);
+
// Simulate a successful download and update.
payload_state.DownloadComplete();
@@ -1655,4 +1659,123 @@
EXPECT_EQ(null_time, payload_state.GetP2PFirstAttemptTimestamp());
}
+TEST(PayloadStateTest, NextPayloadResetsUrlIndex) {
+ PayloadState payload_state;
+ FakeSystemState fake_system_state;
+ StrictMock<MockExcluder> mock_excluder;
+ EXPECT_CALL(*fake_system_state.mock_update_attempter(), GetExcluder())
+ .WillOnce(Return(&mock_excluder));
+ EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+
+ OmahaResponse response;
+ response.packages.push_back(
+ {.payload_urls = {"http://test1a", "http://test2a"},
+ .size = 123456789,
+ .metadata_size = 58123,
+ .metadata_signature = "msign",
+ .hash = "hash"});
+ response.packages.push_back({.payload_urls = {"http://test1b"},
+ .size = 123456789,
+ .metadata_size = 58123,
+ .metadata_signature = "msign",
+ .hash = "hash"});
+ payload_state.SetResponse(response);
+
+ EXPECT_EQ(payload_state.GetCurrentUrl(), "http://test1a");
+ payload_state.IncrementUrlIndex();
+ EXPECT_EQ(payload_state.GetCurrentUrl(), "http://test2a");
+
+ EXPECT_TRUE(payload_state.NextPayload());
+ EXPECT_EQ(payload_state.GetCurrentUrl(), "http://test1b");
+}
+
+TEST(PayloadStateTest, ExcludeNoopForNonExcludables) {
+ PayloadState payload_state;
+ FakeSystemState fake_system_state;
+ StrictMock<MockExcluder> mock_excluder;
+ EXPECT_CALL(*fake_system_state.mock_update_attempter(), GetExcluder())
+ .WillOnce(Return(&mock_excluder));
+ EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+
+ OmahaResponse response;
+ response.packages.push_back(
+ {.payload_urls = {"http://test1a", "http://test2a"},
+ .size = 123456789,
+ .metadata_size = 58123,
+ .metadata_signature = "msign",
+ .hash = "hash",
+ .can_exclude = false});
+ payload_state.SetResponse(response);
+
+ EXPECT_CALL(mock_excluder, Exclude(_)).Times(0);
+ payload_state.ExcludeCurrentPayload();
+}
+
+TEST(PayloadStateTest, ExcludeOnlyCanExcludables) {
+ PayloadState payload_state;
+ FakeSystemState fake_system_state;
+ StrictMock<MockExcluder> mock_excluder;
+ EXPECT_CALL(*fake_system_state.mock_update_attempter(), GetExcluder())
+ .WillOnce(Return(&mock_excluder));
+ EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+
+ OmahaResponse response;
+ response.packages.push_back(
+ {.payload_urls = {"http://test1a", "http://test2a"},
+ .size = 123456789,
+ .metadata_size = 58123,
+ .metadata_signature = "msign",
+ .hash = "hash",
+ .can_exclude = true});
+ payload_state.SetResponse(response);
+
+ EXPECT_CALL(mock_excluder, Exclude(utils::GetExclusionName("http://test1a")))
+ .WillOnce(Return(true));
+ payload_state.ExcludeCurrentPayload();
+}
+
+TEST(PayloadStateTest, IncrementFailureExclusionTest) {
+ PayloadState payload_state;
+ FakeSystemState fake_system_state;
+ StrictMock<MockExcluder> mock_excluder;
+ EXPECT_CALL(*fake_system_state.mock_update_attempter(), GetExcluder())
+ .WillOnce(Return(&mock_excluder));
+ EXPECT_TRUE(payload_state.Initialize(&fake_system_state));
+
+ OmahaResponse response;
+ // Critical package.
+ response.packages.push_back(
+ {.payload_urls = {"http://crit-test1a", "http://crit-test2a"},
+ .size = 123456789,
+ .metadata_size = 58123,
+ .metadata_signature = "msign",
+ .hash = "hash",
+ .can_exclude = false});
+ // Non-critical package.
+ response.packages.push_back(
+ {.payload_urls = {"http://test1a", "http://test2a"},
+ .size = 123456789,
+ .metadata_size = 58123,
+ .metadata_signature = "msign",
+ .hash = "hash",
+ .can_exclude = true});
+ response.max_failure_count_per_url = 2;
+ payload_state.SetResponse(response);
+
+ // Critical package won't be excluded.
+ // Increment twice as failure count allowed per URL is set to 2.
+ payload_state.IncrementFailureCount();
+ payload_state.IncrementFailureCount();
+
+ EXPECT_TRUE(payload_state.NextPayload());
+
+ // First increment failure should not exclude.
+ payload_state.IncrementFailureCount();
+
+ // Second increment failure should exclude.
+ EXPECT_CALL(mock_excluder, Exclude(utils::GetExclusionName("http://test1a")))
+ .WillOnce(Return(true));
+ payload_state.IncrementFailureCount();
+}
+
} // namespace chromeos_update_engine
diff --git a/real_system_state.cc b/real_system_state.cc
index 2f18b4d..74a37f3 100644
--- a/real_system_state.cc
+++ b/real_system_state.cc
@@ -32,7 +32,7 @@
#include "update_engine/common/boot_control.h"
#include "update_engine/common/boot_control_stub.h"
#include "update_engine/common/constants.h"
-#include "update_engine/common/dlcservice.h"
+#include "update_engine/common/dlcservice_interface.h"
#include "update_engine/common/hardware.h"
#include "update_engine/common/utils.h"
#include "update_engine/metrics_reporter_omaha.h"
@@ -54,8 +54,6 @@
}
bool RealSystemState::Initialize() {
- metrics_reporter_.Initialize();
-
boot_control_ = boot_control::CreateBootControl();
if (!boot_control_) {
LOG(WARNING) << "Unable to create BootControl instance, using stub "
@@ -189,14 +187,13 @@
return false;
}
- // For devices that are not rollback enabled (ie. consumer devices),
- // initialize max kernel key version to 0xfffffffe, which is logically
- // infinity.
- if (policy_provider_.IsConsumerDevice()) {
+ // For images that are build for debugging purposes like test images
+ // initialize max kernel key version to 0xfffffffe, which is logical infinity.
+ if (!hardware_->IsOfficialBuild()) {
if (!hardware()->SetMaxKernelKeyRollforward(
chromeos_update_manager::kRollforwardInfinity)) {
LOG(ERROR) << "Failed to set kernel_max_rollforward to infinity for"
- << " consumer devices";
+ << " device with test/dev image.";
}
}
diff --git a/real_system_state.h b/real_system_state.h
index 4712008..807a205 100644
--- a/real_system_state.h
+++ b/real_system_state.h
@@ -144,7 +144,7 @@
// Interface for dlcservice.
std::unique_ptr<DlcServiceInterface> dlcservice_;
- // Interface for the clock.
+ // Interface for the bootloader control.
std::unique_ptr<BootControlInterface> boot_control_;
// Interface for the clock.
diff --git a/sample_images/generate_images.sh b/sample_images/generate_images.sh
index 8478682..e0b54ae 100755
--- a/sample_images/generate_images.sh
+++ b/sample_images/generate_images.sh
@@ -270,6 +270,7 @@
# Add squashfs sample images.
generate_image disk_sqfs_empty sqfs empty $((1024 * 4096)) 4096
generate_image disk_sqfs_default sqfs default $((1024 * 4096)) 4096
+ generate_image disk_sqfs_unittest sqfs unittest $((1024 * 4096)) 4096
# Generate the tarball and delete temporary images.
echo "Packing tar file sample_images.tar.bz2"
diff --git a/sample_images/sample_images.tar.bz2 b/sample_images/sample_images.tar.bz2
index 6215482..5c80a51 100644
--- a/sample_images/sample_images.tar.bz2
+++ b/sample_images/sample_images.tar.bz2
Binary files differ
diff --git a/scripts/blockdiff.py b/scripts/blockdiff.py
index 5793def..95893cf 100755
--- a/scripts/blockdiff.py
+++ b/scripts/blockdiff.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python
#
# Copyright (C) 2013 The Android Open Source Project
#
@@ -17,6 +17,7 @@
"""Block diff utility."""
+from __future__ import absolute_import
from __future__ import print_function
# pylint: disable=import-error
@@ -46,7 +47,7 @@
"""
if max_length < 0:
- max_length = sys.maxint
+ max_length = sys.maxsize
diff_list = []
num_blocks = extent_start = extent_length = 0
while max_length or extent_length:
diff --git a/scripts/paycheck.py b/scripts/paycheck.py
index 9d61778..f4ccca2 100755
--- a/scripts/paycheck.py
+++ b/scripts/paycheck.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python
#
# Copyright (C) 2013 The Android Open Source Project
#
@@ -17,6 +17,7 @@
"""Command-line tool for checking and applying Chrome OS update payloads."""
+from __future__ import absolute_import
from __future__ import print_function
# pylint: disable=import-error
@@ -26,18 +27,42 @@
import sys
import tempfile
-from update_payload import common
+from six.moves import zip
from update_payload import error
+
lib_dir = os.path.join(os.path.dirname(__file__), 'lib')
if os.path.exists(lib_dir) and os.path.isdir(lib_dir):
sys.path.insert(1, lib_dir)
-import update_payload
+import update_payload # pylint: disable=wrong-import-position
_TYPE_FULL = 'full'
_TYPE_DELTA = 'delta'
+def CheckApplyPayload(args):
+ """Whether to check the result after applying the payload.
+
+ Args:
+ args: Parsed command arguments (the return value of
+ ArgumentParser.parse_args).
+
+ Returns:
+ Boolean value whether to check.
+ """
+ return args.dst_part_paths is not None
+
+def ApplyPayload(args):
+ """Whether to apply the payload.
+
+ Args:
+ args: Parsed command arguments (the return value of
+ ArgumentParser.parse_args).
+
+ Returns:
+ Boolean value whether to apply the payload.
+ """
+ return CheckApplyPayload(args) or args.out_dst_part_paths is not None
def ParseArguments(argv):
"""Parse and validate command-line arguments.
@@ -49,9 +74,9 @@
Returns the arguments returned by the argument parser.
"""
parser = argparse.ArgumentParser(
- description=('Applies a Chrome OS update PAYLOAD to src_kern and '
- 'src_root emitting dst_kern and dst_root, respectively. '
- 'src_kern and src_root are only needed for delta payloads. '
+ description=('Applies a Chrome OS update PAYLOAD to src_part_paths'
+ 'emitting dst_part_paths, respectively. '
+ 'src_part_paths are only needed for delta payloads. '
'When no partitions are provided, verifies the payload '
'integrity.'),
epilog=('Note: a payload may verify correctly but fail to apply, and '
@@ -93,13 +118,6 @@
check_args.add_argument('-s', '--metadata-size', metavar='NUM', default=0,
help='the metadata size to verify with the one in'
' payload')
- # TODO(tbrindus): deprecated in favour of --part_sizes
- check_args.add_argument('-p', '--root-part-size', metavar='NUM',
- default=0, type=int,
- help='override rootfs partition size auto-inference')
- check_args.add_argument('-P', '--kern-part-size', metavar='NUM',
- default=0, type=int,
- help='override kernel partition size auto-inference')
check_args.add_argument('--part_sizes', metavar='NUM', nargs='+', type=int,
help='override partition size auto-inference')
@@ -113,21 +131,6 @@
help='use the specified bspatch binary')
apply_args.add_argument('--puffpatch-path', metavar='FILE',
help='use the specified puffpatch binary')
- # TODO(tbrindus): deprecated in favour of --dst_part_paths
- apply_args.add_argument('--dst_kern', metavar='FILE',
- help='destination kernel partition file')
- apply_args.add_argument('--dst_root', metavar='FILE',
- help='destination root partition file')
- # TODO(tbrindus): deprecated in favour of --src_part_paths
- apply_args.add_argument('--src_kern', metavar='FILE',
- help='source kernel partition file')
- apply_args.add_argument('--src_root', metavar='FILE',
- help='source root partition file')
- # TODO(tbrindus): deprecated in favour of --out_dst_part_paths
- apply_args.add_argument('--out_dst_kern', metavar='FILE',
- help='created destination kernel partition file')
- apply_args.add_argument('--out_dst_root', metavar='FILE',
- help='created destination root partition file')
apply_args.add_argument('--src_part_paths', metavar='FILE', nargs='+',
help='source partitition files')
@@ -143,36 +146,28 @@
# Parse command-line arguments.
args = parser.parse_args(argv)
- # TODO(tbrindus): temporary workaround to keep old-style flags from breaking
- # without having to handle both types in our code. Remove after flag usage is
- # removed from calling scripts.
- args.part_names = args.part_names or [common.KERNEL, common.ROOTFS]
- args.part_sizes = args.part_sizes or [args.kern_part_size,
- args.root_part_size]
- args.src_part_paths = args.src_part_paths or [args.src_kern, args.src_root]
- args.dst_part_paths = args.dst_part_paths or [args.dst_kern, args.dst_root]
- args.out_dst_part_paths = args.out_dst_part_paths or [args.out_dst_kern,
- args.out_dst_root]
-
- # Make sure we don't have new dependencies on old flags by deleting them from
- # the namespace here.
- for old in ['kern_part_size', 'root_part_size', 'src_kern', 'src_root',
- 'dst_kern', 'dst_root', 'out_dst_kern', 'out_dst_root']:
- delattr(args, old)
-
# There are several options that imply --check.
args.check = (args.check or args.report or args.assert_type or
args.block_size or args.allow_unhashed or
args.disabled_tests or args.meta_sig or args.key or
- any(args.part_sizes) or args.metadata_size)
+ args.part_sizes is not None or args.metadata_size)
+ # Makes sure the following arguments have the same length as |part_names| if
+ # set.
for arg in ['part_sizes', 'src_part_paths', 'dst_part_paths',
'out_dst_part_paths']:
+ if getattr(args, arg) is None:
+ # Parameter is not set.
+ continue
if len(args.part_names) != len(getattr(args, arg, [])):
parser.error('partitions in --%s do not match --part_names' % arg)
- if all(args.dst_part_paths) or all(args.out_dst_part_paths):
- if all(args.src_part_paths):
+ def _IsSrcPartPathsProvided(args):
+ return args.src_part_paths is not None
+
+ # Makes sure parameters are coherent with payload type.
+ if ApplyPayload(args):
+ if _IsSrcPartPathsProvided(args):
if args.assert_type == _TYPE_FULL:
parser.error('%s payload does not accept source partition arguments'
% _TYPE_FULL)
@@ -208,7 +203,7 @@
# Parse and validate arguments.
args = ParseArguments(argv[1:])
- with open(args.payload) as payload_file:
+ with open(args.payload, 'rb') as payload_file:
payload = update_payload.Payload(payload_file)
try:
# Initialize payload.
@@ -230,8 +225,9 @@
report_file = open(args.report, 'w')
do_close_report_file = True
- part_sizes = dict(zip(args.part_names, args.part_sizes))
- metadata_sig_file = args.meta_sig and open(args.meta_sig)
+ part_sizes = (args.part_sizes and
+ dict(zip(args.part_names, args.part_sizes)))
+ metadata_sig_file = args.meta_sig and open(args.meta_sig, 'rb')
payload.Check(
pubkey_file_name=args.key,
metadata_sig_file=metadata_sig_file,
@@ -249,7 +245,7 @@
report_file.close()
# Apply payload.
- if all(args.dst_part_paths) or all(args.out_dst_part_paths):
+ if ApplyPayload(args):
dargs = {'bsdiff_in_place': not args.extract_bsdiff}
if args.bspatch_path:
dargs['bspatch_path'] = args.bspatch_path
@@ -260,9 +256,9 @@
out_dst_parts = {}
file_handles = []
- if all(args.out_dst_part_paths):
+ if args.out_dst_part_paths is not None:
for name, path in zip(args.part_names, args.out_dst_part_paths):
- handle = open(path, 'w+')
+ handle = open(path, 'wb+')
file_handles.append(handle)
out_dst_parts[name] = handle.name
else:
@@ -275,7 +271,7 @@
# If destination kernel and rootfs partitions are not given, then this
# just becomes an apply operation with no check.
- if all(args.dst_part_paths):
+ if CheckApplyPayload(args):
# Prior to comparing, add the unused space past the filesystem
# boundary in the new target partitions to become the same size as
# the given partitions. This will truncate to larger size.
@@ -293,7 +289,7 @@
# files are created as temp files and will be deleted upon close().
for handle in file_handles:
handle.close()
- except error.PayloadError, e:
+ except error.PayloadError as e:
sys.stderr.write('Error: %s\n' % e)
return 1
diff --git a/scripts/payload_info.py b/scripts/payload_info.py
index 09a7cf7..965bb76 100755
--- a/scripts/payload_info.py
+++ b/scripts/payload_info.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 The Android Open Source Project
@@ -18,16 +18,17 @@
"""payload_info: Show information about an update payload."""
+from __future__ import absolute_import
from __future__ import print_function
import argparse
-import itertools
import sys
import textwrap
+from six.moves import range
import update_payload
-MAJOR_PAYLOAD_VERSION_CHROMEOS = 1
+
MAJOR_PAYLOAD_VERSION_BRILLO = 2
def DisplayValue(key, value):
@@ -41,12 +42,12 @@
def DisplayHexData(data, indent=0):
"""Print out binary data as a hex values."""
for off in range(0, len(data), 16):
- chunk = data[off:off + 16]
+ chunk = bytearray(data[off:off + 16])
print(' ' * indent +
- ' '.join('%.2x' % ord(c) for c in chunk) +
+ ' '.join('%.2x' % c for c in chunk) +
' ' * (16 - len(chunk)) +
' | ' +
- ''.join(c if 32 <= ord(c) < 127 else '.' for c in chunk))
+ ''.join(chr(c) if 32 <= c < 127 else '.' for c in chunk))
class PayloadCommand(object):
@@ -69,15 +70,11 @@
def _DisplayManifest(self):
"""Show information from the payload manifest."""
manifest = self.payload.manifest
- if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
- DisplayValue('Number of partitions', len(manifest.partitions))
- for partition in manifest.partitions:
- DisplayValue(' Number of "%s" ops' % partition.partition_name,
- len(partition.operations))
- else:
- DisplayValue('Number of operations', len(manifest.install_operations))
- DisplayValue('Number of kernel ops',
- len(manifest.kernel_install_operations))
+ DisplayValue('Number of partitions', len(manifest.partitions))
+ for partition in manifest.partitions:
+ DisplayValue(' Number of "%s" ops' % partition.partition_name,
+ len(partition.operations))
+
DisplayValue('Block size', manifest.block_size)
DisplayValue('Minor version', manifest.minor_version)
@@ -131,8 +128,8 @@
Args:
name: The name you want displayed above the operation table.
- operations: The install_operations object that you want to display
- information about.
+ operations: The operations object that you want to display information
+ about.
"""
def _DisplayExtents(extents, name):
"""Show information about extents."""
@@ -149,7 +146,7 @@
op_dict = update_payload.common.OpType.NAMES
print('%s:' % name)
- for op, op_count in itertools.izip(operations, itertools.count()):
+ for op_count, op in enumerate(operations):
print(' %d: %s' % (op_count, op_dict[op.type]))
if op.HasField('data_offset'):
print(' Data offset: %s' % op.data_offset)
@@ -170,14 +167,9 @@
read_blocks = 0
written_blocks = 0
num_write_seeks = 0
- if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
- partitions_operations = [part.operations for part in manifest.partitions]
- else:
- partitions_operations = [manifest.install_operations,
- manifest.kernel_install_operations]
- for operations in partitions_operations:
+ for partition in manifest.partitions:
last_ext = None
- for curr_op in operations:
+ for curr_op in partition.operations:
read_blocks += sum([ext.num_blocks for ext in curr_op.src_extents])
written_blocks += sum([ext.num_blocks for ext in curr_op.dst_extents])
for curr_ext in curr_op.dst_extents:
@@ -187,15 +179,10 @@
num_write_seeks += 1
last_ext = curr_ext
- if manifest.minor_version == 1:
- # Rootfs and kernel are written during the filesystem copy in version 1.
- written_blocks += manifest.old_rootfs_info.size / manifest.block_size
- written_blocks += manifest.old_kernel_info.size / manifest.block_size
- # Old and new rootfs and kernel are read once during verification
- read_blocks += manifest.old_rootfs_info.size / manifest.block_size
- read_blocks += manifest.old_kernel_info.size / manifest.block_size
- read_blocks += manifest.new_rootfs_info.size / manifest.block_size
- read_blocks += manifest.new_kernel_info.size / manifest.block_size
+ # Old and new partitions are read once during verification.
+ read_blocks += partition.old_partition_info.size // manifest.block_size
+ read_blocks += partition.new_partition_info.size // manifest.block_size
+
stats = {'read_blocks': read_blocks,
'written_blocks': written_blocks,
'num_write_seeks': num_write_seeks}
@@ -219,21 +206,15 @@
self._DisplayStats(self.payload.manifest)
if self.options.list_ops:
print()
- if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
- for partition in self.payload.manifest.partitions:
- self._DisplayOps('%s install operations' % partition.partition_name,
- partition.operations)
- else:
- self._DisplayOps('Install operations',
- self.payload.manifest.install_operations)
- self._DisplayOps('Kernel install operations',
- self.payload.manifest.kernel_install_operations)
+ for partition in self.payload.manifest.partitions:
+ self._DisplayOps('%s install operations' % partition.partition_name,
+ partition.operations)
def main():
parser = argparse.ArgumentParser(
description='Show information about an update payload.')
- parser.add_argument('payload_file', type=file,
+ parser.add_argument('payload_file', type=argparse.FileType('rb'),
help='The update payload file.')
parser.add_argument('--list_ops', default=False, action='store_true',
help='List the install operations and their extents.')
@@ -245,5 +226,6 @@
PayloadCommand(args).Run()
+
if __name__ == '__main__':
sys.exit(main())
diff --git a/scripts/payload_info_unittest.py b/scripts/payload_info_unittest.py
index a4ee9d5..07bb679 100755
--- a/scripts/payload_info_unittest.py
+++ b/scripts/payload_info_unittest.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python
#
# Copyright (C) 2015 The Android Open Source Project
#
@@ -17,24 +17,31 @@
"""Unit testing payload_info.py."""
+# Disable check for function names to avoid errors based on old code
+# pylint: disable-msg=invalid-name
+
+from __future__ import absolute_import
from __future__ import print_function
-import StringIO
-import collections
-import mock
import sys
import unittest
+from contextlib import contextmanager
+
+from six.moves import StringIO
+
+import mock # pylint: disable=import-error
+
import payload_info
import update_payload
-from contextlib import contextmanager
-
from update_payload import update_metadata_pb2
+
class FakePayloadError(Exception):
"""A generic error when using the FakePayload."""
+
class FakeOption(object):
"""Fake options object for testing."""
@@ -42,11 +49,12 @@
self.list_ops = False
self.stats = False
self.signatures = False
- for key, val in kwargs.iteritems():
+ for key, val in kwargs.items():
setattr(self, key, val)
if not hasattr(self, 'payload_file'):
self.payload_file = None
+
class FakeOp(object):
"""Fake manifest operation for testing."""
@@ -54,48 +62,57 @@
self.src_extents = src_extents
self.dst_extents = dst_extents
self.type = op_type
- for key, val in kwargs.iteritems():
+ for key, val in kwargs.items():
setattr(self, key, val)
def HasField(self, field):
return hasattr(self, field)
+
+class FakeExtent(object):
+ """Fake Extent for testing."""
+ def __init__(self, start_block, num_blocks):
+ self.start_block = start_block
+ self.num_blocks = num_blocks
+
+
+class FakePartitionInfo(object):
+ """Fake PartitionInfo for testing."""
+ def __init__(self, size):
+ self.size = size
+
+
class FakePartition(object):
"""Fake PartitionUpdate field for testing."""
- def __init__(self, partition_name, operations):
+ def __init__(self, partition_name, operations, old_size, new_size):
self.partition_name = partition_name
self.operations = operations
+ self.old_partition_info = FakePartitionInfo(old_size)
+ self.new_partition_info = FakePartitionInfo(new_size)
+
class FakeManifest(object):
"""Fake manifest for testing."""
- def __init__(self, major_version):
- FakeExtent = collections.namedtuple('FakeExtent',
- ['start_block', 'num_blocks'])
- self.install_operations = [FakeOp([],
- [FakeExtent(1, 1), FakeExtent(2, 2)],
- update_payload.common.OpType.REPLACE_BZ,
- dst_length=3*4096,
- data_offset=1,
- data_length=1)]
- self.kernel_install_operations = [FakeOp(
- [FakeExtent(1, 1)],
- [FakeExtent(x, x) for x in xrange(20)],
- update_payload.common.OpType.SOURCE_COPY,
- src_length=4096)]
- if major_version == payload_info.MAJOR_PAYLOAD_VERSION_BRILLO:
- self.partitions = [FakePartition('root', self.install_operations),
- FakePartition('kernel',
- self.kernel_install_operations)]
- self.install_operations = self.kernel_install_operations = []
+ def __init__(self):
+ self.partitions = [
+ FakePartition(update_payload.common.ROOTFS,
+ [FakeOp([], [FakeExtent(1, 1), FakeExtent(2, 2)],
+ update_payload.common.OpType.REPLACE_BZ,
+ dst_length=3*4096,
+ data_offset=1,
+ data_length=1)
+ ], 1 * 4096, 3 * 4096),
+ FakePartition(update_payload.common.KERNEL,
+ [FakeOp([FakeExtent(1, 1)],
+ [FakeExtent(x, x) for x in range(20)],
+ update_payload.common.OpType.SOURCE_COPY,
+ src_length=4096)
+ ], 2 * 4096, 4 * 4096),
+ ]
self.block_size = 4096
self.minor_version = 4
- FakePartInfo = collections.namedtuple('FakePartInfo', ['size'])
- self.old_rootfs_info = FakePartInfo(1 * 4096)
- self.old_kernel_info = FakePartInfo(2 * 4096)
- self.new_rootfs_info = FakePartInfo(3 * 4096)
- self.new_kernel_info = FakePartInfo(4 * 4096)
self.signatures_offset = None
self.signatures_size = None
@@ -103,26 +120,27 @@
"""Fake HasField method based on the python members."""
return hasattr(self, field_name) and getattr(self, field_name) is not None
+
class FakeHeader(object):
"""Fake payload header for testing."""
- def __init__(self, version, manifest_len, metadata_signature_len):
- self.version = version
+ def __init__(self, manifest_len, metadata_signature_len):
+ self.version = payload_info.MAJOR_PAYLOAD_VERSION_BRILLO
self.manifest_len = manifest_len
self.metadata_signature_len = metadata_signature_len
@property
def size(self):
- return (20 if self.version == payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS
- else 24)
+ return 24
+
class FakePayload(object):
"""Fake payload for testing."""
- def __init__(self, major_version):
- self._header = FakeHeader(major_version, 222, 0)
+ def __init__(self):
+ self._header = FakeHeader(222, 0)
self.header = None
- self._manifest = FakeManifest(major_version)
+ self._manifest = FakeManifest()
self.manifest = None
self._blobs = {}
@@ -152,7 +170,7 @@
def _AddSignatureToProto(proto, **kwargs):
"""Add a new Signature element to the passed proto."""
new_signature = proto.signatures.add()
- for key, val in kwargs.iteritems():
+ for key, val in kwargs.items():
setattr(new_signature, key, val)
def AddPayloadSignature(self, **kwargs):
@@ -170,6 +188,7 @@
self._header.metadata_signature_len = len(blob)
self._blobs[-len(blob)] = blob
+
class PayloadCommandTest(unittest.TestCase):
"""Test class for our PayloadCommand class."""
@@ -178,7 +197,7 @@
"""A tool for capturing the sys.stdout"""
stdout = sys.stdout
try:
- sys.stdout = StringIO.StringIO()
+ sys.stdout = StringIO()
yield sys.stdout
finally:
sys.stdout = stdout
@@ -192,60 +211,33 @@
with mock.patch.object(update_payload, 'Payload', return_value=payload), \
self.OutputCapturer() as output:
payload_cmd.Run()
- self.assertEquals(output.getvalue(), expected_out)
+ self.assertEqual(output.getvalue(), expected_out)
def testDisplayValue(self):
"""Verify that DisplayValue prints what we expect."""
with self.OutputCapturer() as output:
payload_info.DisplayValue('key', 'value')
- self.assertEquals(output.getvalue(), 'key: value\n')
+ self.assertEqual(output.getvalue(), 'key: value\n')
def testRun(self):
"""Verify that Run parses and displays the payload like we expect."""
payload_cmd = payload_info.PayloadCommand(FakeOption(action='show'))
- payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
- expected_out = """Payload version: 1
+ payload = FakePayload()
+ expected_out = """Payload version: 2
Manifest length: 222
-Number of operations: 1
-Number of kernel ops: 1
+Number of partitions: 2
+ Number of "root" ops: 1
+ Number of "kernel" ops: 1
Block size: 4096
Minor version: 4
"""
self.TestCommand(payload_cmd, payload, expected_out)
- def testListOpsOnVersion1(self):
- """Verify that the --list_ops option gives the correct output."""
- payload_cmd = payload_info.PayloadCommand(
- FakeOption(list_ops=True, action='show'))
- payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
- expected_out = """Payload version: 1
-Manifest length: 222
-Number of operations: 1
-Number of kernel ops: 1
-Block size: 4096
-Minor version: 4
-
-Install operations:
- 0: REPLACE_BZ
- Data offset: 1
- Data length: 1
- Destination: 2 extents (3 blocks)
- (1,1) (2,2)
-Kernel install operations:
- 0: SOURCE_COPY
- Source: 1 extent (1 block)
- (1,1)
- Destination: 20 extents (190 blocks)
- (0,0) (1,1) (2,2) (3,3) (4,4) (5,5) (6,6) (7,7) (8,8) (9,9) (10,10)
- (11,11) (12,12) (13,13) (14,14) (15,15) (16,16) (17,17) (18,18) (19,19)
-"""
- self.TestCommand(payload_cmd, payload, expected_out)
-
def testListOpsOnVersion2(self):
"""Verify that the --list_ops option gives the correct output."""
payload_cmd = payload_info.PayloadCommand(
FakeOption(list_ops=True, action='show'))
- payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO)
+ payload = FakePayload()
expected_out = """Payload version: 2
Manifest length: 222
Number of partitions: 2
@@ -270,28 +262,11 @@
"""
self.TestCommand(payload_cmd, payload, expected_out)
- def testStatsOnVersion1(self):
- """Verify that the --stats option works correctly."""
- payload_cmd = payload_info.PayloadCommand(
- FakeOption(stats=True, action='show'))
- payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
- expected_out = """Payload version: 1
-Manifest length: 222
-Number of operations: 1
-Number of kernel ops: 1
-Block size: 4096
-Minor version: 4
-Blocks read: 11
-Blocks written: 193
-Seeks when writing: 18
-"""
- self.TestCommand(payload_cmd, payload, expected_out)
-
def testStatsOnVersion2(self):
"""Verify that the --stats option works correctly on version 2."""
payload_cmd = payload_info.PayloadCommand(
FakeOption(stats=True, action='show'))
- payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO)
+ payload = FakePayload()
expected_out = """Payload version: 2
Manifest length: 222
Number of partitions: 2
@@ -309,11 +284,12 @@
"""Verify that the --signatures option works with unsigned payloads."""
payload_cmd = payload_info.PayloadCommand(
FakeOption(action='show', signatures=True))
- payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
- expected_out = """Payload version: 1
+ payload = FakePayload()
+ expected_out = """Payload version: 2
Manifest length: 222
-Number of operations: 1
-Number of kernel ops: 1
+Number of partitions: 2
+ Number of "root" ops: 1
+ Number of "kernel" ops: 1
Block size: 4096
Minor version: 4
No metadata signatures stored in the payload
@@ -325,11 +301,11 @@
"""Verify that the --signatures option shows the present signatures."""
payload_cmd = payload_info.PayloadCommand(
FakeOption(action='show', signatures=True))
- payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO)
+ payload = FakePayload()
payload.AddPayloadSignature(version=1,
- data='12345678abcdefgh\x00\x01\x02\x03')
- payload.AddPayloadSignature(data='I am a signature so access is yes.')
- payload.AddMetadataSignature(data='\x00\x0a\x0c')
+ data=b'12345678abcdefgh\x00\x01\x02\x03')
+ payload.AddPayloadSignature(data=b'I am a signature so access is yes.')
+ payload.AddMetadataSignature(data=b'\x00\x0a\x0c')
expected_out = """Payload version: 2
Manifest length: 222
Number of partitions: 2
@@ -353,5 +329,6 @@
"""
self.TestCommand(payload_cmd, payload, expected_out)
+
if __name__ == '__main__':
unittest.main()
diff --git a/scripts/update_device.py b/scripts/update_device.py
index 49f766d..7be3edb 100755
--- a/scripts/update_device.py
+++ b/scripts/update_device.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python
#
# Copyright (C) 2017 The Android Open Source Project
#
@@ -17,8 +17,9 @@
"""Send an A/B update to an Android device over adb."""
+from __future__ import absolute_import
+
import argparse
-import BaseHTTPServer
import hashlib
import logging
import os
@@ -29,6 +30,8 @@
import xml.etree.ElementTree
import zipfile
+from six.moves import BaseHTTPServer
+
import update_payload.payload
@@ -41,6 +44,7 @@
# The port on the device that update_engine should connect to.
DEVICE_PORT = 1234
+
def CopyFileObjLength(fsrc, fdst, buffer_size=128 * 1024, copy_length=None):
"""Copy from a file object to another.
@@ -137,7 +141,6 @@
start_range = file_size - int(e)
return start_range, end_range
-
def do_GET(self): # pylint: disable=invalid-name
"""Reply with the requested payload file."""
if self.path != '/payload':
@@ -180,7 +183,6 @@
f.seek(serving_start + start_range)
CopyFileObjLength(f, self.wfile, copy_length=end_range - start_range)
-
def do_POST(self): # pylint: disable=invalid-name
"""Reply with the omaha response xml."""
if self.path != '/update':
@@ -451,5 +453,6 @@
return 0
+
if __name__ == '__main__':
sys.exit(main())
diff --git a/scripts/update_payload/__init__.py b/scripts/update_payload/__init__.py
index 8ee95e2..6e77678 100644
--- a/scripts/update_payload/__init__.py
+++ b/scripts/update_payload/__init__.py
@@ -17,6 +17,8 @@
"""Library for processing, verifying and applying Chrome OS update payloads."""
# Just raise the interface classes to the root namespace.
+from __future__ import absolute_import
+
from update_payload.checker import CHECKS_TO_DISABLE
from update_payload.error import PayloadError
from update_payload.payload import Payload
diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py
index 21d8e87..29ccb8e 100644
--- a/scripts/update_payload/applier.py
+++ b/scripts/update_payload/applier.py
@@ -24,12 +24,12 @@
"""
+from __future__ import absolute_import
from __future__ import print_function
import array
import bz2
import hashlib
-import itertools
# Not everywhere we can have the lzma library so we ignore it if we didn't have
# it because it is not going to be used. For example, 'cros flash' uses
# devserver code which eventually loads this file, but the lzma library is not
@@ -45,7 +45,6 @@
except ImportError:
pass
import os
-import shutil
import subprocess
import sys
import tempfile
@@ -53,7 +52,6 @@
from update_payload import common
from update_payload.error import PayloadError
-
#
# Helper functions.
#
@@ -72,7 +70,7 @@
"""
hasher = hashlib.sha256()
block_length = 1024 * 1024
- max_length = length if length >= 0 else sys.maxint
+ max_length = length if length >= 0 else sys.maxsize
while max_length > 0:
read_length = min(max_length, block_length)
@@ -108,20 +106,16 @@
Returns:
A character array containing the concatenated read data.
"""
- data = array.array('c')
+ data = array.array('B')
if max_length < 0:
- max_length = sys.maxint
+ max_length = sys.maxsize
for ex in extents:
if max_length == 0:
break
read_length = min(max_length, ex.num_blocks * block_size)
- # Fill with zeros or read from file, depending on the type of extent.
- if ex.start_block == common.PSEUDO_EXTENT_MARKER:
- data.extend(itertools.repeat('\0', read_length))
- else:
- file_obj.seek(ex.start_block * block_size)
- data.fromfile(file_obj, read_length)
+ file_obj.seek(ex.start_block * block_size)
+ data.fromfile(file_obj, read_length)
max_length -= read_length
@@ -149,12 +143,8 @@
if not data_length:
raise PayloadError('%s: more write extents than data' % ex_name)
write_length = min(data_length, ex.num_blocks * block_size)
-
- # Only do actual writing if this is not a pseudo-extent.
- if ex.start_block != common.PSEUDO_EXTENT_MARKER:
- file_obj.seek(ex.start_block * block_size)
- data_view = buffer(data, data_offset, write_length)
- file_obj.write(data_view)
+ file_obj.seek(ex.start_block * block_size)
+ file_obj.write(data[data_offset:(data_offset + write_length)])
data_offset += write_length
data_length -= write_length
@@ -184,20 +174,17 @@
arg = ''
pad_off = pad_len = 0
if data_length < 0:
- data_length = sys.maxint
+ data_length = sys.maxsize
for ex, ex_name in common.ExtentIter(extents, base_name):
if not data_length:
raise PayloadError('%s: more extents than total data length' % ex_name)
- is_pseudo = ex.start_block == common.PSEUDO_EXTENT_MARKER
- start_byte = -1 if is_pseudo else ex.start_block * block_size
+ start_byte = ex.start_block * block_size
num_bytes = ex.num_blocks * block_size
if data_length < num_bytes:
# We're only padding a real extent.
- if not is_pseudo:
- pad_off = start_byte + data_length
- pad_len = num_bytes - data_length
-
+ pad_off = start_byte + data_length
+ pad_len = num_bytes - data_length
num_bytes = data_length
arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
@@ -274,30 +261,28 @@
num_blocks = ex.num_blocks
count = num_blocks * block_size
- # Make sure it's not a fake (signature) operation.
- if start_block != common.PSEUDO_EXTENT_MARKER:
- data_end = data_start + count
+ data_end = data_start + count
- # Make sure we're not running past partition boundary.
- if (start_block + num_blocks) * block_size > part_size:
- raise PayloadError(
- '%s: extent (%s) exceeds partition size (%d)' %
- (ex_name, common.FormatExtent(ex, block_size),
- part_size))
+ # Make sure we're not running past partition boundary.
+ if (start_block + num_blocks) * block_size > part_size:
+ raise PayloadError(
+ '%s: extent (%s) exceeds partition size (%d)' %
+ (ex_name, common.FormatExtent(ex, block_size),
+ part_size))
- # Make sure that we have enough data to write.
- if data_end >= data_length + block_size:
- raise PayloadError(
- '%s: more dst blocks than data (even with padding)')
+ # Make sure that we have enough data to write.
+ if data_end >= data_length + block_size:
+ raise PayloadError(
+ '%s: more dst blocks than data (even with padding)')
- # Pad with zeros if necessary.
- if data_end > data_length:
- padding = data_end - data_length
- out_data += '\0' * padding
+ # Pad with zeros if necessary.
+ if data_end > data_length:
+ padding = data_end - data_length
+ out_data += b'\0' * padding
- self.payload.payload_file.seek(start_block * block_size)
- part_file.seek(start_block * block_size)
- part_file.write(out_data[data_start:data_end])
+ self.payload.payload_file.seek(start_block * block_size)
+ part_file.seek(start_block * block_size)
+ part_file.write(out_data[data_start:data_end])
data_start += count
@@ -306,30 +291,6 @@
raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
(op_name, data_start, data_length))
- def _ApplyMoveOperation(self, op, op_name, part_file):
- """Applies a MOVE operation.
-
- Note that this operation must read the whole block data from the input and
- only then dump it, due to our in-place update semantics; otherwise, it
- might clobber data midway through.
-
- Args:
- op: the operation object
- op_name: name string for error reporting
- part_file: the partition file object
-
- Raises:
- PayloadError if something goes wrong.
- """
- block_size = self.block_size
-
- # Gather input raw data from src extents.
- in_data = _ReadExtents(part_file, op.src_extents, block_size)
-
- # Dump extracted data to dst extents.
- _WriteExtents(part_file, in_data, op.dst_extents, block_size,
- '%s.dst_extents' % op_name)
-
def _ApplyZeroOperation(self, op, op_name, part_file):
"""Applies a ZERO operation.
@@ -347,10 +308,8 @@
# Iterate over the extents and write zero.
# pylint: disable=unused-variable
for ex, ex_name in common.ExtentIter(op.dst_extents, base_name):
- # Only do actual writing if this is not a pseudo-extent.
- if ex.start_block != common.PSEUDO_EXTENT_MARKER:
- part_file.seek(ex.start_block * block_size)
- part_file.write('\0' * (ex.num_blocks * block_size))
+ part_file.seek(ex.start_block * block_size)
+ part_file.write(b'\0' * (ex.num_blocks * block_size))
def _ApplySourceCopyOperation(self, op, op_name, old_part_file,
new_part_file):
@@ -439,12 +398,19 @@
# Diff from source partition.
old_file_name = '/dev/fd/%d' % old_part_file.fileno()
- if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF,
- common.OpType.BROTLI_BSDIFF):
+ # In python3, file descriptors(fd) are not passed to child processes by
+ # default. To pass the fds to the child processes, we need to set the flag
+ # 'inheritable' in the fds and make the subprocess calls with the argument
+ # close_fds set to False.
+ if sys.version_info.major >= 3:
+ os.set_inheritable(new_part_file.fileno(), True)
+ os.set_inheritable(old_part_file.fileno(), True)
+
+ if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF):
# Invoke bspatch on partition file with extents args.
bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name,
patch_file_name, in_extents_arg, out_extents_arg]
- subprocess.check_call(bspatch_cmd)
+ subprocess.check_call(bspatch_cmd, close_fds=False)
elif op.type == common.OpType.PUFFDIFF:
# Invoke puffpatch on partition file with extents args.
puffpatch_cmd = [self.puffpatch_path,
@@ -454,14 +420,14 @@
"--patch_file=%s" % patch_file_name,
"--src_extents=%s" % in_extents_arg,
"--dst_extents=%s" % out_extents_arg]
- subprocess.check_call(puffpatch_cmd)
+ subprocess.check_call(puffpatch_cmd, close_fds=False)
else:
- raise PayloadError("Unknown operation %s", op.type)
+ raise PayloadError("Unknown operation %s" % op.type)
# Pad with zeros past the total output length.
if pad_len:
new_part_file.seek(pad_off)
- new_part_file.write('\0' * pad_len)
+ new_part_file.write(b'\0' * pad_len)
else:
# Gather input raw data and write to a temp file.
input_part_file = old_part_file if old_part_file else new_part_file
@@ -477,8 +443,7 @@
with tempfile.NamedTemporaryFile(delete=False) as out_file:
out_file_name = out_file.name
- if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF,
- common.OpType.BROTLI_BSDIFF):
+ if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF):
# Invoke bspatch.
bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name,
patch_file_name]
@@ -492,7 +457,7 @@
"--patch_file=%s" % patch_file_name]
subprocess.check_call(puffpatch_cmd)
else:
- raise PayloadError("Unknown operation %s", op.type)
+ raise PayloadError("Unknown operation %s" % op.type)
# Read output.
with open(out_file_name, 'rb') as out_file:
@@ -505,7 +470,7 @@
# Write output back to partition, with padding.
unaligned_out_len = len(out_data) % block_size
if unaligned_out_len:
- out_data += '\0' * (block_size - unaligned_out_len)
+ out_data += b'\0' * (block_size - unaligned_out_len)
_WriteExtents(new_part_file, out_data, op.dst_extents, block_size,
'%s.dst_extents' % op_name)
@@ -520,10 +485,6 @@
new_part_file, part_size):
"""Applies a sequence of update operations to a partition.
- This assumes an in-place update semantics for MOVE and BSDIFF, namely all
- reads are performed first, then the data is processed and written back to
- the same file.
-
Args:
operations: the sequence of operations
base_name: the name of the operation sequence
@@ -541,13 +502,8 @@
if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
common.OpType.REPLACE_XZ):
self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size)
- elif op.type == common.OpType.MOVE:
- self._ApplyMoveOperation(op, op_name, new_part_file)
elif op.type == common.OpType.ZERO:
self._ApplyZeroOperation(op, op_name, new_part_file)
- elif op.type == common.OpType.BSDIFF:
- self._ApplyDiffOperation(op, op_name, data, new_part_file,
- new_part_file)
elif op.type == common.OpType.SOURCE_COPY:
self._ApplySourceCopyOperation(op, op_name, old_part_file,
new_part_file)
@@ -583,15 +539,8 @@
_VerifySha256(old_part_file, old_part_info.hash,
'old ' + part_name, length=old_part_info.size)
new_part_file_mode = 'r+b'
- if self.minor_version == common.INPLACE_MINOR_PAYLOAD_VERSION:
- # Copy the src partition to the dst one; make sure we don't truncate it.
- shutil.copyfile(old_part_file_name, new_part_file_name)
- elif self.minor_version >= common.SOURCE_MINOR_PAYLOAD_VERSION:
- # In minor version >= 2, we don't want to copy the partitions, so
- # instead just make the new partition file.
- open(new_part_file_name, 'w').close()
- else:
- raise PayloadError("Unknown minor version: %d" % self.minor_version)
+ open(new_part_file_name, 'w').close()
+
else:
# We need to create/truncate the dst partition file.
new_part_file_mode = 'w+b'
@@ -639,20 +588,11 @@
install_operations = []
manifest = self.payload.manifest
- if self.payload.header.version == 1:
- for real_name, proto_name in common.CROS_PARTITIONS:
- new_part_info[real_name] = getattr(manifest, 'new_%s_info' % proto_name)
- old_part_info[real_name] = getattr(manifest, 'old_%s_info' % proto_name)
-
- install_operations.append((common.ROOTFS, manifest.install_operations))
- install_operations.append((common.KERNEL,
- manifest.kernel_install_operations))
- else:
- for part in manifest.partitions:
- name = part.partition_name
- new_part_info[name] = part.new_partition_info
- old_part_info[name] = part.old_partition_info
- install_operations.append((name, part.operations))
+ for part in manifest.partitions:
+ name = part.partition_name
+ new_part_info[name] = part.new_partition_info
+ old_part_info[name] = part.old_partition_info
+ install_operations.append((name, part.operations))
part_names = set(new_part_info.keys()) # Equivalently, old_part_info.keys()
diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py
index 6d17fbe..4c65516 100644
--- a/scripts/update_payload/checker.py
+++ b/scripts/update_payload/checker.py
@@ -24,6 +24,7 @@
checker.Run(...)
"""
+from __future__ import absolute_import
from __future__ import print_function
import array
@@ -34,22 +35,21 @@
import os
import subprocess
+from six.moves import range
+
from update_payload import common
from update_payload import error
from update_payload import format_utils
from update_payload import histogram
from update_payload import update_metadata_pb2
-
#
# Constants.
#
-_CHECK_DST_PSEUDO_EXTENTS = 'dst-pseudo-extents'
_CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block'
_CHECK_PAYLOAD_SIG = 'payload-sig'
CHECKS_TO_DISABLE = (
- _CHECK_DST_PSEUDO_EXTENTS,
_CHECK_MOVE_SAME_SRC_DST_BLOCK,
_CHECK_PAYLOAD_SIG,
)
@@ -66,7 +66,6 @@
# Supported minor version map to payload types allowed to be using them.
_SUPPORTED_MINOR_VERSIONS = {
0: (_TYPE_FULL,),
- 1: (_TYPE_DELTA,),
2: (_TYPE_DELTA,),
3: (_TYPE_DELTA,),
4: (_TYPE_DELTA,),
@@ -74,7 +73,6 @@
6: (_TYPE_DELTA,),
}
-_OLD_DELTA_USABLE_PART_SIZE = 2 * 1024 * 1024 * 1024
#
# Helper functions.
@@ -323,8 +321,6 @@
self.allow_unhashed = allow_unhashed
# Disable specific tests.
- self.check_dst_pseudo_extents = (
- _CHECK_DST_PSEUDO_EXTENTS not in disabled_tests)
self.check_move_same_src_dst_block = (
_CHECK_MOVE_SAME_SRC_DST_BLOCK not in disabled_tests)
self.check_payload_sig = _CHECK_PAYLOAD_SIG not in disabled_tests
@@ -609,7 +605,7 @@
"""
self.major_version = self.payload.header.version
- part_sizes = collections.defaultdict(int, part_sizes)
+ part_sizes = part_sizes or collections.defaultdict(int)
manifest = self.payload.manifest
report.AddSection('manifest')
@@ -628,35 +624,23 @@
self._CheckPresentIff(self.sigs_offset, self.sigs_size,
'signatures_offset', 'signatures_size', 'manifest')
- if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
- for real_name, proto_name in common.CROS_PARTITIONS:
- self.old_part_info[real_name] = self._CheckOptionalSubMsg(
- manifest, 'old_%s_info' % proto_name, report)
- self.new_part_info[real_name] = self._CheckMandatorySubMsg(
- manifest, 'new_%s_info' % proto_name, report, 'manifest')
+ for part in manifest.partitions:
+ name = part.partition_name
+ self.old_part_info[name] = self._CheckOptionalSubMsg(
+ part, 'old_partition_info', report)
+ self.new_part_info[name] = self._CheckMandatorySubMsg(
+ part, 'new_partition_info', report, 'manifest.partitions')
- # Check: old_kernel_info <==> old_rootfs_info.
- self._CheckPresentIff(self.old_part_info[common.KERNEL].msg,
- self.old_part_info[common.ROOTFS].msg,
- 'old_kernel_info', 'old_rootfs_info', 'manifest')
- else:
- for part in manifest.partitions:
- name = part.partition_name
- self.old_part_info[name] = self._CheckOptionalSubMsg(
- part, 'old_partition_info', report)
- self.new_part_info[name] = self._CheckMandatorySubMsg(
- part, 'new_partition_info', report, 'manifest.partitions')
+ # Check: Old-style partition infos should not be specified.
+ for _, part in common.CROS_PARTITIONS:
+ self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest')
+ self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest')
- # Check: Old-style partition infos should not be specified.
- for _, part in common.CROS_PARTITIONS:
- self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest')
- self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest')
-
- # Check: If old_partition_info is specified anywhere, it must be
- # specified everywhere.
- old_part_msgs = [part.msg for part in self.old_part_info.values() if part]
- self._CheckPresentIffMany(old_part_msgs, 'old_partition_info',
- 'manifest.partitions')
+ # Check: If old_partition_info is specified anywhere, it must be
+ # specified everywhere.
+ old_part_msgs = [part.msg for part in self.old_part_info.values() if part]
+ self._CheckPresentIffMany(old_part_msgs, 'old_partition_info',
+ 'manifest.partitions')
is_delta = any(part and part.msg for part in self.old_part_info.values())
if is_delta:
@@ -666,7 +650,7 @@
'Apparent full payload contains old_{kernel,rootfs}_info.')
self.payload_type = _TYPE_DELTA
- for part, (msg, part_report) in self.old_part_info.iteritems():
+ for part, (msg, part_report) in self.old_part_info.items():
# Check: {size, hash} present in old_{kernel,rootfs}_info.
field = 'old_%s_info' % part
self.old_fs_sizes[part] = self._CheckMandatoryField(msg, 'size',
@@ -687,7 +671,7 @@
self.payload_type = _TYPE_FULL
# Check: new_{kernel,rootfs}_info present; contains {size, hash}.
- for part, (msg, part_report) in self.new_part_info.iteritems():
+ for part, (msg, part_report) in self.new_part_info.items():
field = 'new_%s_info' % part
self.new_fs_sizes[part] = self._CheckMandatoryField(msg, 'size',
part_report, field)
@@ -724,8 +708,7 @@
self._CheckBlocksFitLength(length, total_blocks, self.block_size,
'%s: %s' % (op_name, length_name))
- def _CheckExtents(self, extents, usable_size, block_counters, name,
- allow_pseudo=False, allow_signature=False):
+ def _CheckExtents(self, extents, usable_size, block_counters, name):
"""Checks a sequence of extents.
Args:
@@ -733,8 +716,6 @@
usable_size: The usable size of the partition to which the extents apply.
block_counters: Array of counters corresponding to the number of blocks.
name: The name of the extent block.
- allow_pseudo: Whether or not pseudo block numbers are allowed.
- allow_signature: Whether or not the extents are used for a signature.
Returns:
The total number of blocks in the extents.
@@ -755,20 +736,15 @@
if num_blocks == 0:
raise error.PayloadError('%s: extent length is zero.' % ex_name)
- if start_block != common.PSEUDO_EXTENT_MARKER:
- # Check: Make sure we're within the partition limit.
- if usable_size and end_block * self.block_size > usable_size:
- raise error.PayloadError(
- '%s: extent (%s) exceeds usable partition size (%d).' %
- (ex_name, common.FormatExtent(ex, self.block_size), usable_size))
+ # Check: Make sure we're within the partition limit.
+ if usable_size and end_block * self.block_size > usable_size:
+ raise error.PayloadError(
+ '%s: extent (%s) exceeds usable partition size (%d).' %
+ (ex_name, common.FormatExtent(ex, self.block_size), usable_size))
- # Record block usage.
- for i in xrange(start_block, end_block):
- block_counters[i] += 1
- elif not (allow_pseudo or (allow_signature and len(extents) == 1)):
- # Pseudo-extents must be allowed explicitly, or otherwise be part of a
- # signature operation (in which case there has to be exactly one).
- raise error.PayloadError('%s: unexpected pseudo-extent.' % ex_name)
+ # Record block usage.
+ for i in range(start_block, end_block):
+ block_counters[i] += 1
total_num_blocks += num_blocks
@@ -786,6 +762,11 @@
Raises:
error.PayloadError if any check fails.
"""
+ # Check: total_dst_blocks is not a floating point.
+ if isinstance(total_dst_blocks, float):
+ raise error.PayloadError('%s: contains invalid data type of '
+ 'total_dst_blocks.' % op_name)
+
# Check: Does not contain src extents.
if op.src_extents:
raise error.PayloadError('%s: contains src_extents.' % op_name)
@@ -806,89 +787,6 @@
'space (%d * %d).' %
(op_name, data_length, total_dst_blocks, self.block_size))
- def _CheckMoveOperation(self, op, data_offset, total_src_blocks,
- total_dst_blocks, op_name):
- """Specific checks for MOVE operations.
-
- Args:
- op: The operation object from the manifest.
- data_offset: The offset of a data blob for the operation.
- total_src_blocks: Total number of blocks in src_extents.
- total_dst_blocks: Total number of blocks in dst_extents.
- op_name: Operation name for error reporting.
-
- Raises:
- error.PayloadError if any check fails.
- """
- # Check: No data_{offset,length}.
- if data_offset is not None:
- raise error.PayloadError('%s: contains data_{offset,length}.' % op_name)
-
- # Check: total_src_blocks == total_dst_blocks.
- if total_src_blocks != total_dst_blocks:
- raise error.PayloadError(
- '%s: total src blocks (%d) != total dst blocks (%d).' %
- (op_name, total_src_blocks, total_dst_blocks))
-
- # Check: For all i, i-th src block index != i-th dst block index.
- i = 0
- src_extent_iter = iter(op.src_extents)
- dst_extent_iter = iter(op.dst_extents)
- src_extent = dst_extent = None
- src_idx = src_num = dst_idx = dst_num = 0
- while i < total_src_blocks:
- # Get the next source extent, if needed.
- if not src_extent:
- try:
- src_extent = src_extent_iter.next()
- except StopIteration:
- raise error.PayloadError('%s: ran out of src extents (%d/%d).' %
- (op_name, i, total_src_blocks))
- src_idx = src_extent.start_block
- src_num = src_extent.num_blocks
-
- # Get the next dest extent, if needed.
- if not dst_extent:
- try:
- dst_extent = dst_extent_iter.next()
- except StopIteration:
- raise error.PayloadError('%s: ran out of dst extents (%d/%d).' %
- (op_name, i, total_dst_blocks))
- dst_idx = dst_extent.start_block
- dst_num = dst_extent.num_blocks
-
- # Check: start block is not 0. See crbug/480751; there are still versions
- # of update_engine which fail when seeking to 0 in PReadAll and PWriteAll,
- # so we need to fail payloads that try to MOVE to/from block 0.
- if src_idx == 0 or dst_idx == 0:
- raise error.PayloadError(
- '%s: MOVE operation cannot have extent with start block 0' %
- op_name)
-
- if self.check_move_same_src_dst_block and src_idx == dst_idx:
- raise error.PayloadError(
- '%s: src/dst block number %d is the same (%d).' %
- (op_name, i, src_idx))
-
- advance = min(src_num, dst_num)
- i += advance
-
- src_idx += advance
- src_num -= advance
- if src_num == 0:
- src_extent = None
-
- dst_idx += advance
- dst_num -= advance
- if dst_num == 0:
- dst_extent = None
-
- # Make sure we've exhausted all src/dst extents.
- if src_extent:
- raise error.PayloadError('%s: excess src blocks.' % op_name)
- if dst_extent:
- raise error.PayloadError('%s: excess dst blocks.' % op_name)
-
def _CheckZeroOperation(self, op, op_name):
"""Specific checks for ZERO operations.
@@ -908,7 +806,7 @@
raise error.PayloadError('%s: contains data_offset.' % op_name)
def _CheckAnyDiffOperation(self, op, data_length, total_dst_blocks, op_name):
- """Specific checks for BSDIFF, SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF
+ """Specific checks for SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF
operations.
Args:
@@ -933,8 +831,7 @@
total_dst_blocks * self.block_size))
# Check the existence of src_length and dst_length for legacy bsdiffs.
- if (op.type == common.OpType.BSDIFF or
- (op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3)):
+ if op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3:
if not op.HasField('src_length') or not op.HasField('dst_length'):
raise error.PayloadError('%s: require {src,dst}_length.' % op_name)
else:
@@ -983,21 +880,19 @@
if self.minor_version >= 3 and op.src_sha256_hash is None:
raise error.PayloadError('%s: source hash missing.' % op_name)
- def _CheckOperation(self, op, op_name, is_last, old_block_counters,
- new_block_counters, old_usable_size, new_usable_size,
- prev_data_offset, allow_signature, blob_hash_counts):
+ def _CheckOperation(self, op, op_name, old_block_counters, new_block_counters,
+ old_usable_size, new_usable_size, prev_data_offset,
+ blob_hash_counts):
"""Checks a single update operation.
Args:
op: The operation object.
op_name: Operation name string for error reporting.
- is_last: Whether this is the last operation in the sequence.
old_block_counters: Arrays of block read counters.
new_block_counters: Arrays of block write counters.
old_usable_size: The overall usable size for src data in bytes.
new_usable_size: The overall usable size for dst data in bytes.
prev_data_offset: Offset of last used data bytes.
- allow_signature: Whether this may be a signature operation.
blob_hash_counts: Counters for hashed/unhashed blobs.
Returns:
@@ -1009,14 +904,10 @@
# Check extents.
total_src_blocks = self._CheckExtents(
op.src_extents, old_usable_size, old_block_counters,
- op_name + '.src_extents', allow_pseudo=True)
- allow_signature_in_extents = (allow_signature and is_last and
- op.type == common.OpType.REPLACE)
+ op_name + '.src_extents')
total_dst_blocks = self._CheckExtents(
op.dst_extents, new_usable_size, new_block_counters,
- op_name + '.dst_extents',
- allow_pseudo=(not self.check_dst_pseudo_extents),
- allow_signature=allow_signature_in_extents)
+ op_name + '.dst_extents')
# Check: data_offset present <==> data_length present.
data_offset = self._CheckOptionalField(op, 'data_offset', None)
@@ -1052,9 +943,7 @@
(op_name, common.FormatSha256(op.data_sha256_hash),
common.FormatSha256(actual_hash.digest())))
elif data_offset is not None:
- if allow_signature_in_extents:
- blob_hash_counts['signature'] += 1
- elif self.allow_unhashed:
+ if self.allow_unhashed:
blob_hash_counts['unhashed'] += 1
else:
raise error.PayloadError('%s: unhashed operation not allowed.' %
@@ -1068,19 +957,11 @@
(op_name, data_offset, prev_data_offset))
# Type-specific checks.
- if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
+ if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
+ common.OpType.REPLACE_XZ):
self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
- elif (op.type == common.OpType.REPLACE_XZ and
- (self.minor_version >= 3 or
- self.major_version >= common.BRILLO_MAJOR_PAYLOAD_VERSION)):
- self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
- elif op.type == common.OpType.MOVE and self.minor_version == 1:
- self._CheckMoveOperation(op, data_offset, total_src_blocks,
- total_dst_blocks, op_name)
elif op.type == common.OpType.ZERO and self.minor_version >= 4:
self._CheckZeroOperation(op, op_name)
- elif op.type == common.OpType.BSDIFF and self.minor_version == 1:
- self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name)
elif op.type == common.OpType.SOURCE_COPY and self.minor_version >= 2:
self._CheckSourceCopyOperation(data_offset, total_src_blocks,
total_dst_blocks, op_name)
@@ -1102,7 +983,7 @@
def _SizeToNumBlocks(self, size):
"""Returns the number of blocks needed to contain a given byte size."""
- return (size + self.block_size - 1) / self.block_size
+ return (size + self.block_size - 1) // self.block_size
def _AllocBlockCounters(self, total_size):
"""Returns a freshly initialized array of block counters.
@@ -1122,7 +1003,7 @@
def _CheckOperations(self, operations, report, base_name, old_fs_size,
new_fs_size, old_usable_size, new_usable_size,
- prev_data_offset, allow_signature):
+ prev_data_offset):
"""Checks a sequence of update operations.
Args:
@@ -1134,7 +1015,6 @@
old_usable_size: The overall usable size of the old partition in bytes.
new_usable_size: The overall usable size of the new partition in bytes.
prev_data_offset: Offset of last used data bytes.
- allow_signature: Whether this sequence may contain signature operations.
Returns:
The total data blob size used.
@@ -1149,9 +1029,7 @@
common.OpType.REPLACE: 0,
common.OpType.REPLACE_BZ: 0,
common.OpType.REPLACE_XZ: 0,
- common.OpType.MOVE: 0,
common.OpType.ZERO: 0,
- common.OpType.BSDIFF: 0,
common.OpType.SOURCE_COPY: 0,
common.OpType.SOURCE_BSDIFF: 0,
common.OpType.PUFFDIFF: 0,
@@ -1162,8 +1040,6 @@
common.OpType.REPLACE: 0,
common.OpType.REPLACE_BZ: 0,
common.OpType.REPLACE_XZ: 0,
- # MOVE operations don't have blobs.
- common.OpType.BSDIFF: 0,
# SOURCE_COPY operations don't have blobs.
common.OpType.SOURCE_BSDIFF: 0,
common.OpType.PUFFDIFF: 0,
@@ -1174,8 +1050,6 @@
'hashed': 0,
'unhashed': 0,
}
- if allow_signature:
- blob_hash_counts['signature'] = 0
# Allocate old and new block counters.
old_block_counters = (self._AllocBlockCounters(old_usable_size)
@@ -1188,16 +1062,14 @@
op_num += 1
# Check: Type is valid.
- if op.type not in op_counts.keys():
+ if op.type not in op_counts:
raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type))
op_counts[op.type] += 1
- is_last = op_num == len(operations)
curr_data_used = self._CheckOperation(
- op, op_name, is_last, old_block_counters, new_block_counters,
+ op, op_name, old_block_counters, new_block_counters,
old_usable_size, new_usable_size,
- prev_data_offset + total_data_used, allow_signature,
- blob_hash_counts)
+ prev_data_offset + total_data_used, blob_hash_counts)
if curr_data_used:
op_blob_totals[op.type] += curr_data_used
total_data_used += curr_data_used
@@ -1251,21 +1123,17 @@
if not sigs.signatures:
raise error.PayloadError('Signature block is empty.')
- last_ops_section = (self.payload.manifest.kernel_install_operations or
- self.payload.manifest.install_operations)
-
- # Only major version 1 has the fake signature OP at the end.
- if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
- fake_sig_op = last_ops_section[-1]
+ # Check that we don't have the signature operation blob at the end (used to
+ # be for major version 1).
+ last_partition = self.payload.manifest.partitions[-1]
+ if last_partition.operations:
+ last_op = last_partition.operations[-1]
# Check: signatures_{offset,size} must match the last (fake) operation.
- if not (fake_sig_op.type == common.OpType.REPLACE and
- self.sigs_offset == fake_sig_op.data_offset and
- self.sigs_size == fake_sig_op.data_length):
- raise error.PayloadError('Signatures_{offset,size} (%d+%d) does not'
- ' match last operation (%d+%d).' %
- (self.sigs_offset, self.sigs_size,
- fake_sig_op.data_offset,
- fake_sig_op.data_length))
+ if (last_op.type == common.OpType.REPLACE and
+ last_op.data_offset == self.sigs_offset and
+ last_op.data_length == self.sigs_size):
+ raise error.PayloadError('It seems like the last operation is the '
+ 'signature blob. This is an invalid payload.')
# Compute the checksum of all data up to signature blob.
# TODO(garnold) we're re-reading the whole data section into a string
@@ -1344,62 +1212,38 @@
self._CheckManifest(report, part_sizes)
assert self.payload_type, 'payload type should be known by now'
- manifest = self.payload.manifest
-
- # Part 3: Examine partition operations.
- install_operations = []
- if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
- # partitions field should not ever exist in major version 1 payloads
- self._CheckRepeatedElemNotPresent(manifest, 'partitions', 'manifest')
-
- install_operations.append((common.ROOTFS, manifest.install_operations))
- install_operations.append((common.KERNEL,
- manifest.kernel_install_operations))
-
- else:
- self._CheckRepeatedElemNotPresent(manifest, 'install_operations',
+ # Make sure deprecated values are not present in the payload.
+ for field in ('install_operations', 'kernel_install_operations'):
+ self._CheckRepeatedElemNotPresent(self.payload.manifest, field,
'manifest')
- self._CheckRepeatedElemNotPresent(manifest, 'kernel_install_operations',
- 'manifest')
-
- for update in manifest.partitions:
- install_operations.append((update.partition_name, update.operations))
+ for field in ('old_kernel_info', 'old_rootfs_info',
+ 'new_kernel_info', 'new_rootfs_info'):
+ self._CheckElemNotPresent(self.payload.manifest, field, 'manifest')
total_blob_size = 0
- for part, operations in install_operations:
+ for part, operations in ((p.partition_name, p.operations)
+ for p in self.payload.manifest.partitions):
report.AddSection('%s operations' % part)
new_fs_usable_size = self.new_fs_sizes[part]
old_fs_usable_size = self.old_fs_sizes[part]
- if part_sizes.get(part, None):
+ if part_sizes is not None and part_sizes.get(part, None):
new_fs_usable_size = old_fs_usable_size = part_sizes[part]
- # Infer the usable partition size when validating rootfs operations:
- # - If rootfs partition size was provided, use that.
- # - Otherwise, if this is an older delta (minor version < 2), stick with
- # a known constant size. This is necessary because older deltas may
- # exceed the filesystem size when moving data blocks around.
- # - Otherwise, use the encoded filesystem size.
- elif self.payload_type == _TYPE_DELTA and part == common.ROOTFS and \
- self.minor_version in (None, 1):
- new_fs_usable_size = old_fs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
- # TODO(garnold)(chromium:243559) only default to the filesystem size if
- # no explicit size provided *and* the partition size is not embedded in
- # the payload; see issue for more details.
+ # TODO(chromium:243559) only default to the filesystem size if no
+ # explicit size provided *and* the partition size is not embedded in the
+ # payload; see issue for more details.
total_blob_size += self._CheckOperations(
operations, report, '%s_install_operations' % part,
self.old_fs_sizes[part], self.new_fs_sizes[part],
- old_fs_usable_size, new_fs_usable_size, total_blob_size,
- (self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION
- and part == common.KERNEL))
+ old_fs_usable_size, new_fs_usable_size, total_blob_size)
# Check: Operations data reach the end of the payload file.
used_payload_size = self.payload.data_offset + total_blob_size
# Major versions 2 and higher have a signature at the end, so it should be
# considered in the total size of the image.
- if (self.major_version >= common.BRILLO_MAJOR_PAYLOAD_VERSION and
- self.sigs_size):
+ if self.sigs_size:
used_payload_size += self.sigs_size
if used_payload_size != payload_file_size:
diff --git a/scripts/update_payload/checker_unittest.py b/scripts/update_payload/checker_unittest.py
index 7e52233..993b785 100755
--- a/scripts/update_payload/checker_unittest.py
+++ b/scripts/update_payload/checker_unittest.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python
#
# Copyright (C) 2013 The Android Open Source Project
#
@@ -17,35 +17,36 @@
"""Unit testing checker.py."""
-from __future__ import print_function
+# Disable check for function names to avoid errors based on old code
+# pylint: disable-msg=invalid-name
+
+from __future__ import absolute_import
import array
import collections
-import cStringIO
import hashlib
+import io
import itertools
import os
import unittest
-# pylint cannot find mox.
-# pylint: disable=F0401
-import mox
+from six.moves import zip
+
+import mock # pylint: disable=import-error
from update_payload import checker
from update_payload import common
from update_payload import test_utils
from update_payload import update_metadata_pb2
from update_payload.error import PayloadError
-from update_payload.payload import Payload # Avoid name conflicts later.
+from update_payload.payload import Payload # Avoid name conflicts later.
def _OpTypeByName(op_name):
- """Returns the type of an operation from itsname."""
+ """Returns the type of an operation from its name."""
op_name_to_type = {
'REPLACE': common.OpType.REPLACE,
'REPLACE_BZ': common.OpType.REPLACE_BZ,
- 'MOVE': common.OpType.MOVE,
- 'BSDIFF': common.OpType.BSDIFF,
'SOURCE_COPY': common.OpType.SOURCE_COPY,
'SOURCE_BSDIFF': common.OpType.SOURCE_BSDIFF,
'ZERO': common.OpType.ZERO,
@@ -65,7 +66,7 @@
if checker_init_dargs is None:
checker_init_dargs = {}
- payload_file = cStringIO.StringIO()
+ payload_file = io.BytesIO()
payload_gen_write_to_file_func(payload_file, **payload_gen_dargs)
payload_file.seek(0)
payload = Payload(payload_file)
@@ -75,7 +76,7 @@
def _GetPayloadCheckerWithData(payload_gen):
"""Returns a payload checker from a given payload generator."""
- payload_file = cStringIO.StringIO()
+ payload_file = io.BytesIO()
payload_gen.WriteToFile(payload_file)
payload_file.seek(0)
payload = Payload(payload_file)
@@ -89,7 +90,7 @@
# pylint: disable=W0212
# Don't bark about missing members of classes you cannot import.
# pylint: disable=E1101
-class PayloadCheckerTest(mox.MoxTestBase):
+class PayloadCheckerTest(unittest.TestCase):
"""Tests the PayloadChecker class.
In addition to ordinary testFoo() methods, which are automatically invoked by
@@ -102,11 +103,42 @@
all such tests is done in AddAllParametricTests().
"""
+ def setUp(self):
+ """setUp function for unittest testcase"""
+ self.mock_checks = []
+
+ def tearDown(self):
+ """tearDown function for unittest testcase"""
+ # Verify that all mock functions were called.
+ for check in self.mock_checks:
+ check.mock_fn.assert_called_once_with(*check.exp_args, **check.exp_kwargs)
+
+ class MockChecksAtTearDown(object):
+ """Mock data storage.
+
+ This class stores the mock functions and its arguments to be checked at a
+ later point.
+ """
+ def __init__(self, mock_fn, *args, **kwargs):
+ self.mock_fn = mock_fn
+ self.exp_args = args
+ self.exp_kwargs = kwargs
+
+ def addPostCheckForMockFunction(self, mock_fn, *args, **kwargs):
+ """Store a mock function and its arguments to self.mock_checks
+
+ Args:
+ mock_fn: mock function object
+ args: expected positional arguments for the mock_fn
+ kwargs: expected named arguments for the mock_fn
+ """
+ self.mock_checks.append(self.MockChecksAtTearDown(mock_fn, *args, **kwargs))
+
def MockPayload(self):
"""Create a mock payload object, complete with a mock manifest."""
- payload = self.mox.CreateMock(Payload)
+ payload = mock.create_autospec(Payload)
payload.is_init = True
- payload.manifest = self.mox.CreateMock(
+ payload.manifest = mock.create_autospec(
update_metadata_pb2.DeltaArchiveManifest)
return payload
@@ -175,19 +207,20 @@
subreport = 'fake subreport'
# Create a mock message.
- msg = self.mox.CreateMock(update_metadata_pb2._message.Message)
- msg.HasField(name).AndReturn(is_present)
+ msg = mock.create_autospec(update_metadata_pb2._message.Message)
+ self.addPostCheckForMockFunction(msg.HasField, name)
+ msg.HasField.return_value = is_present
setattr(msg, name, val)
-
# Create a mock report.
- report = self.mox.CreateMock(checker._PayloadReport)
+ report = mock.create_autospec(checker._PayloadReport)
if is_present:
if is_submsg:
- report.AddSubReport(name).AndReturn(subreport)
+ self.addPostCheckForMockFunction(report.AddSubReport, name)
+ report.AddSubReport.return_value = subreport
else:
- report.AddField(name, convert(val), linebreak=linebreak, indent=indent)
+ self.addPostCheckForMockFunction(report.AddField, name, convert(val),
+ linebreak=linebreak, indent=indent)
- self.mox.ReplayAll()
return (msg, report, subreport, name, val)
def DoAddElemTest(self, is_present, is_mandatory, is_submsg, convert,
@@ -213,9 +246,9 @@
else:
ret_val, ret_subreport = checker.PayloadChecker._CheckElem(*args,
**kwargs)
- self.assertEquals(val if is_present else None, ret_val)
- self.assertEquals(subreport if is_present and is_submsg else None,
- ret_subreport)
+ self.assertEqual(val if is_present else None, ret_val)
+ self.assertEqual(subreport if is_present and is_submsg else None,
+ ret_subreport)
def DoAddFieldTest(self, is_mandatory, is_present, convert, linebreak,
indent):
@@ -245,7 +278,7 @@
self.assertRaises(PayloadError, tested_func, *args, **kwargs)
else:
ret_val = tested_func(*args, **kwargs)
- self.assertEquals(val if is_present else None, ret_val)
+ self.assertEqual(val if is_present else None, ret_val)
def DoAddSubMsgTest(self, is_mandatory, is_present):
"""Parametrized testing of _Check{Mandatory,Optional}SubMsg().
@@ -269,8 +302,8 @@
self.assertRaises(PayloadError, tested_func, *args)
else:
ret_val, ret_subreport = tested_func(*args)
- self.assertEquals(val if is_present else None, ret_val)
- self.assertEquals(subreport if is_present else None, ret_subreport)
+ self.assertEqual(val if is_present else None, ret_val)
+ self.assertEqual(subreport if is_present else None, ret_subreport)
def testCheckPresentIff(self):
"""Tests _CheckPresentIff()."""
@@ -296,15 +329,14 @@
returned_signed_hash: The signed hash data retuned by openssl.
expected_signed_hash: The signed hash data to compare against.
"""
- try:
- # Stub out the subprocess invocation.
- self.mox.StubOutWithMock(checker.PayloadChecker, '_Run')
+ # Stub out the subprocess invocation.
+ with mock.patch.object(checker.PayloadChecker, '_Run') \
+ as mock_payload_checker:
if expect_subprocess_call:
- checker.PayloadChecker._Run(
- mox.IsA(list), send_data=sig_data).AndReturn(
- (sig_asn1_header + returned_signed_hash, None))
+ mock_payload_checker([], send_data=sig_data)
+ mock_payload_checker.return_value = (
+ sig_asn1_header + returned_signed_hash, None)
- self.mox.ReplayAll()
if expect_pass:
self.assertIsNone(checker.PayloadChecker._CheckSha256Signature(
sig_data, 'foo', expected_signed_hash, 'bar'))
@@ -312,13 +344,11 @@
self.assertRaises(PayloadError,
checker.PayloadChecker._CheckSha256Signature,
sig_data, 'foo', expected_signed_hash, 'bar')
- finally:
- self.mox.UnsetStubs()
def testCheckSha256Signature_Pass(self):
"""Tests _CheckSha256Signature(); pass case."""
sig_data = 'fake-signature'.ljust(256)
- signed_hash = hashlib.sha256('fake-data').digest()
+ signed_hash = hashlib.sha256(b'fake-data').digest()
self.DoCheckSha256SignatureTest(True, True, sig_data,
common.SIG_ASN1_HEADER, signed_hash,
signed_hash)
@@ -326,7 +356,7 @@
def testCheckSha256Signature_FailBadSignature(self):
"""Tests _CheckSha256Signature(); fails due to malformed signature."""
sig_data = 'fake-signature' # Malformed (not 256 bytes in length).
- signed_hash = hashlib.sha256('fake-data').digest()
+ signed_hash = hashlib.sha256(b'fake-data').digest()
self.DoCheckSha256SignatureTest(False, False, sig_data,
common.SIG_ASN1_HEADER, signed_hash,
signed_hash)
@@ -334,7 +364,7 @@
def testCheckSha256Signature_FailBadOutputLength(self):
"""Tests _CheckSha256Signature(); fails due to unexpected output length."""
sig_data = 'fake-signature'.ljust(256)
- signed_hash = 'fake-hash' # Malformed (not 32 bytes in length).
+ signed_hash = b'fake-hash' # Malformed (not 32 bytes in length).
self.DoCheckSha256SignatureTest(False, True, sig_data,
common.SIG_ASN1_HEADER, signed_hash,
signed_hash)
@@ -342,16 +372,16 @@
def testCheckSha256Signature_FailBadAsnHeader(self):
"""Tests _CheckSha256Signature(); fails due to bad ASN1 header."""
sig_data = 'fake-signature'.ljust(256)
- signed_hash = hashlib.sha256('fake-data').digest()
- bad_asn1_header = 'bad-asn-header'.ljust(len(common.SIG_ASN1_HEADER))
+ signed_hash = hashlib.sha256(b'fake-data').digest()
+ bad_asn1_header = b'bad-asn-header'.ljust(len(common.SIG_ASN1_HEADER))
self.DoCheckSha256SignatureTest(False, True, sig_data, bad_asn1_header,
signed_hash, signed_hash)
def testCheckSha256Signature_FailBadHash(self):
"""Tests _CheckSha256Signature(); fails due to bad hash returned."""
sig_data = 'fake-signature'.ljust(256)
- expected_signed_hash = hashlib.sha256('fake-data').digest()
- returned_signed_hash = hashlib.sha256('bad-fake-data').digest()
+ expected_signed_hash = hashlib.sha256(b'fake-data').digest()
+ returned_signed_hash = hashlib.sha256(b'bad-fake-data').digest()
self.DoCheckSha256SignatureTest(False, True, sig_data,
common.SIG_ASN1_HEADER,
expected_signed_hash, returned_signed_hash)
@@ -429,10 +459,10 @@
payload_gen.SetBlockSize(test_utils.KiB(4))
# Add some operations.
- payload_gen.AddOperation(False, common.OpType.MOVE,
+ payload_gen.AddOperation(common.ROOTFS, common.OpType.SOURCE_COPY,
src_extents=[(0, 16), (16, 497)],
dst_extents=[(16, 496), (0, 16)])
- payload_gen.AddOperation(True, common.OpType.MOVE,
+ payload_gen.AddOperation(common.KERNEL, common.OpType.SOURCE_COPY,
src_extents=[(0, 8), (8, 8)],
dst_extents=[(8, 8), (0, 8)])
@@ -457,21 +487,23 @@
# Add old kernel/rootfs partition info, as required.
if fail_mismatched_oki_ori or fail_old_kernel_fs_size or fail_bad_oki:
oki_hash = (None if fail_bad_oki
- else hashlib.sha256('fake-oki-content').digest())
- payload_gen.SetPartInfo(True, False, old_kernel_fs_size, oki_hash)
+ else hashlib.sha256(b'fake-oki-content').digest())
+ payload_gen.SetPartInfo(common.KERNEL, False, old_kernel_fs_size,
+ oki_hash)
if not fail_mismatched_oki_ori and (fail_old_rootfs_fs_size or
fail_bad_ori):
ori_hash = (None if fail_bad_ori
- else hashlib.sha256('fake-ori-content').digest())
- payload_gen.SetPartInfo(False, False, old_rootfs_fs_size, ori_hash)
+ else hashlib.sha256(b'fake-ori-content').digest())
+ payload_gen.SetPartInfo(common.ROOTFS, False, old_rootfs_fs_size,
+ ori_hash)
# Add new kernel/rootfs partition info.
payload_gen.SetPartInfo(
- True, True, new_kernel_fs_size,
- None if fail_bad_nki else hashlib.sha256('fake-nki-content').digest())
+ common.KERNEL, True, new_kernel_fs_size,
+ None if fail_bad_nki else hashlib.sha256(b'fake-nki-content').digest())
payload_gen.SetPartInfo(
- False, True, new_rootfs_fs_size,
- None if fail_bad_nri else hashlib.sha256('fake-nri-content').digest())
+ common.ROOTFS, True, new_rootfs_fs_size,
+ None if fail_bad_nri else hashlib.sha256(b'fake-nri-content').digest())
# Set the minor version.
payload_gen.SetMinorVersion(0)
@@ -518,28 +550,11 @@
# Passes w/ all real extents.
extents = self.NewExtentList((0, 4), (8, 3), (1024, 16))
- self.assertEquals(
+ self.assertEqual(
23,
payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
collections.defaultdict(int), 'foo'))
- # Passes w/ pseudo-extents (aka sparse holes).
- extents = self.NewExtentList((0, 4), (common.PSEUDO_EXTENT_MARKER, 5),
- (8, 3))
- self.assertEquals(
- 12,
- payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
- collections.defaultdict(int), 'foo',
- allow_pseudo=True))
-
- # Passes w/ pseudo-extent due to a signature.
- extents = self.NewExtentList((common.PSEUDO_EXTENT_MARKER, 2))
- self.assertEquals(
- 2,
- payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
- collections.defaultdict(int), 'foo',
- allow_signature=True))
-
# Fails, extent missing a start block.
extents = self.NewExtentList((-1, 4), (8, 3), (1024, 16))
self.assertRaises(
@@ -570,34 +585,34 @@
block_size = payload_checker.block_size
data_length = 10000
- op = self.mox.CreateMock(
- update_metadata_pb2.InstallOperation)
+ op = mock.create_autospec(update_metadata_pb2.InstallOperation)
op.type = common.OpType.REPLACE
# Pass.
op.src_extents = []
self.assertIsNone(
payload_checker._CheckReplaceOperation(
- op, data_length, (data_length + block_size - 1) / block_size,
+ op, data_length, (data_length + block_size - 1) // block_size,
'foo'))
# Fail, src extents founds.
op.src_extents = ['bar']
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
- op, data_length, (data_length + block_size - 1) / block_size, 'foo')
+ op, data_length, (data_length + block_size - 1) // block_size, 'foo')
# Fail, missing data.
op.src_extents = []
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
- op, None, (data_length + block_size - 1) / block_size, 'foo')
+ op, None, (data_length + block_size - 1) // block_size, 'foo')
# Fail, length / block number mismatch.
op.src_extents = ['bar']
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
- op, data_length, (data_length + block_size - 1) / block_size + 1, 'foo')
+ op, data_length, (data_length + block_size - 1) // block_size + 1,
+ 'foo')
def testCheckReplaceBzOperation(self):
"""Tests _CheckReplaceOperation() where op.type == REPLACE_BZ."""
@@ -605,7 +620,7 @@
block_size = payload_checker.block_size
data_length = block_size * 3
- op = self.mox.CreateMock(
+ op = mock.create_autospec(
update_metadata_pb2.InstallOperation)
op.type = common.OpType.REPLACE_BZ
@@ -613,25 +628,32 @@
op.src_extents = []
self.assertIsNone(
payload_checker._CheckReplaceOperation(
- op, data_length, (data_length + block_size - 1) / block_size + 5,
+ op, data_length, (data_length + block_size - 1) // block_size + 5,
'foo'))
# Fail, src extents founds.
op.src_extents = ['bar']
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
- op, data_length, (data_length + block_size - 1) / block_size + 5, 'foo')
+ op, data_length, (data_length + block_size - 1) // block_size + 5,
+ 'foo')
# Fail, missing data.
op.src_extents = []
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
- op, None, (data_length + block_size - 1) / block_size, 'foo')
+ op, None, (data_length + block_size - 1) // block_size, 'foo')
# Fail, too few blocks to justify BZ.
op.src_extents = []
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
+ op, data_length, (data_length + block_size - 1) // block_size, 'foo')
+
+ # Fail, total_dst_blocks is a floating point value.
+ op.src_extents = []
+ self.assertRaises(
+ PayloadError, payload_checker._CheckReplaceOperation,
op, data_length, (data_length + block_size - 1) / block_size, 'foo')
def testCheckReplaceXzOperation(self):
@@ -640,7 +662,7 @@
block_size = payload_checker.block_size
data_length = block_size * 3
- op = self.mox.CreateMock(
+ op = mock.create_autospec(
update_metadata_pb2.InstallOperation)
op.type = common.OpType.REPLACE_XZ
@@ -648,153 +670,34 @@
op.src_extents = []
self.assertIsNone(
payload_checker._CheckReplaceOperation(
- op, data_length, (data_length + block_size - 1) / block_size + 5,
+ op, data_length, (data_length + block_size - 1) // block_size + 5,
'foo'))
# Fail, src extents founds.
op.src_extents = ['bar']
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
- op, data_length, (data_length + block_size - 1) / block_size + 5, 'foo')
+ op, data_length, (data_length + block_size - 1) // block_size + 5,
+ 'foo')
# Fail, missing data.
op.src_extents = []
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
- op, None, (data_length + block_size - 1) / block_size, 'foo')
+ op, None, (data_length + block_size - 1) // block_size, 'foo')
# Fail, too few blocks to justify XZ.
op.src_extents = []
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
+ op, data_length, (data_length + block_size - 1) // block_size, 'foo')
+
+ # Fail, total_dst_blocks is a floating point value.
+ op.src_extents = []
+ self.assertRaises(
+ PayloadError, payload_checker._CheckReplaceOperation,
op, data_length, (data_length + block_size - 1) / block_size, 'foo')
- def testCheckMoveOperation_Pass(self):
- """Tests _CheckMoveOperation(); pass case."""
- payload_checker = checker.PayloadChecker(self.MockPayload())
- op = update_metadata_pb2.InstallOperation()
- op.type = common.OpType.MOVE
-
- self.AddToMessage(op.src_extents,
- self.NewExtentList((1, 4), (12, 2), (1024, 128)))
- self.AddToMessage(op.dst_extents,
- self.NewExtentList((16, 128), (512, 6)))
- self.assertIsNone(
- payload_checker._CheckMoveOperation(op, None, 134, 134, 'foo'))
-
- def testCheckMoveOperation_FailContainsData(self):
- """Tests _CheckMoveOperation(); fails, message contains data."""
- payload_checker = checker.PayloadChecker(self.MockPayload())
- op = update_metadata_pb2.InstallOperation()
- op.type = common.OpType.MOVE
-
- self.AddToMessage(op.src_extents,
- self.NewExtentList((1, 4), (12, 2), (1024, 128)))
- self.AddToMessage(op.dst_extents,
- self.NewExtentList((16, 128), (512, 6)))
- self.assertRaises(
- PayloadError, payload_checker._CheckMoveOperation,
- op, 1024, 134, 134, 'foo')
-
- def testCheckMoveOperation_FailInsufficientSrcBlocks(self):
- """Tests _CheckMoveOperation(); fails, not enough actual src blocks."""
- payload_checker = checker.PayloadChecker(self.MockPayload())
- op = update_metadata_pb2.InstallOperation()
- op.type = common.OpType.MOVE
-
- self.AddToMessage(op.src_extents,
- self.NewExtentList((1, 4), (12, 2), (1024, 127)))
- self.AddToMessage(op.dst_extents,
- self.NewExtentList((16, 128), (512, 6)))
- self.assertRaises(
- PayloadError, payload_checker._CheckMoveOperation,
- op, None, 134, 134, 'foo')
-
- def testCheckMoveOperation_FailInsufficientDstBlocks(self):
- """Tests _CheckMoveOperation(); fails, not enough actual dst blocks."""
- payload_checker = checker.PayloadChecker(self.MockPayload())
- op = update_metadata_pb2.InstallOperation()
- op.type = common.OpType.MOVE
-
- self.AddToMessage(op.src_extents,
- self.NewExtentList((1, 4), (12, 2), (1024, 128)))
- self.AddToMessage(op.dst_extents,
- self.NewExtentList((16, 128), (512, 5)))
- self.assertRaises(
- PayloadError, payload_checker._CheckMoveOperation,
- op, None, 134, 134, 'foo')
-
- def testCheckMoveOperation_FailExcessSrcBlocks(self):
- """Tests _CheckMoveOperation(); fails, too many actual src blocks."""
- payload_checker = checker.PayloadChecker(self.MockPayload())
- op = update_metadata_pb2.InstallOperation()
- op.type = common.OpType.MOVE
-
- self.AddToMessage(op.src_extents,
- self.NewExtentList((1, 4), (12, 2), (1024, 128)))
- self.AddToMessage(op.dst_extents,
- self.NewExtentList((16, 128), (512, 5)))
- self.assertRaises(
- PayloadError, payload_checker._CheckMoveOperation,
- op, None, 134, 134, 'foo')
- self.AddToMessage(op.src_extents,
- self.NewExtentList((1, 4), (12, 2), (1024, 129)))
- self.AddToMessage(op.dst_extents,
- self.NewExtentList((16, 128), (512, 6)))
- self.assertRaises(
- PayloadError, payload_checker._CheckMoveOperation,
- op, None, 134, 134, 'foo')
-
- def testCheckMoveOperation_FailExcessDstBlocks(self):
- """Tests _CheckMoveOperation(); fails, too many actual dst blocks."""
- payload_checker = checker.PayloadChecker(self.MockPayload())
- op = update_metadata_pb2.InstallOperation()
- op.type = common.OpType.MOVE
-
- self.AddToMessage(op.src_extents,
- self.NewExtentList((1, 4), (12, 2), (1024, 128)))
- self.AddToMessage(op.dst_extents,
- self.NewExtentList((16, 128), (512, 7)))
- self.assertRaises(
- PayloadError, payload_checker._CheckMoveOperation,
- op, None, 134, 134, 'foo')
-
- def testCheckMoveOperation_FailStagnantBlocks(self):
- """Tests _CheckMoveOperation(); fails, there are blocks that do not move."""
- payload_checker = checker.PayloadChecker(self.MockPayload())
- op = update_metadata_pb2.InstallOperation()
- op.type = common.OpType.MOVE
-
- self.AddToMessage(op.src_extents,
- self.NewExtentList((1, 4), (12, 2), (1024, 128)))
- self.AddToMessage(op.dst_extents,
- self.NewExtentList((8, 128), (512, 6)))
- self.assertRaises(
- PayloadError, payload_checker._CheckMoveOperation,
- op, None, 134, 134, 'foo')
-
- def testCheckMoveOperation_FailZeroStartBlock(self):
- """Tests _CheckMoveOperation(); fails, has extent with start block 0."""
- payload_checker = checker.PayloadChecker(self.MockPayload())
- op = update_metadata_pb2.InstallOperation()
- op.type = common.OpType.MOVE
-
- self.AddToMessage(op.src_extents,
- self.NewExtentList((0, 4), (12, 2), (1024, 128)))
- self.AddToMessage(op.dst_extents,
- self.NewExtentList((8, 128), (512, 6)))
- self.assertRaises(
- PayloadError, payload_checker._CheckMoveOperation,
- op, None, 134, 134, 'foo')
-
- self.AddToMessage(op.src_extents,
- self.NewExtentList((1, 4), (12, 2), (1024, 128)))
- self.AddToMessage(op.dst_extents,
- self.NewExtentList((0, 128), (512, 6)))
- self.assertRaises(
- PayloadError, payload_checker._CheckMoveOperation,
- op, None, 134, 134, 'foo')
-
def testCheckAnyDiff(self):
"""Tests _CheckAnyDiffOperation()."""
payload_checker = checker.PayloadChecker(self.MockPayload())
@@ -832,8 +735,8 @@
self.assertRaises(PayloadError, payload_checker._CheckSourceCopyOperation,
None, 0, 1, 'foo')
- def DoCheckOperationTest(self, op_type_name, is_last, allow_signature,
- allow_unhashed, fail_src_extents, fail_dst_extents,
+ def DoCheckOperationTest(self, op_type_name, allow_unhashed,
+ fail_src_extents, fail_dst_extents,
fail_mismatched_data_offset_length,
fail_missing_dst_extents, fail_src_length,
fail_dst_length, fail_data_hash,
@@ -841,10 +744,8 @@
"""Parametric testing of _CheckOperation().
Args:
- op_type_name: 'REPLACE', 'REPLACE_BZ', 'REPLACE_XZ', 'MOVE', 'BSDIFF',
+ op_type_name: 'REPLACE', 'REPLACE_BZ', 'REPLACE_XZ',
'SOURCE_COPY', 'SOURCE_BSDIFF', BROTLI_BSDIFF or 'PUFFDIFF'.
- is_last: Whether we're testing the last operation in a sequence.
- allow_signature: Whether we're testing a signature-capable operation.
allow_unhashed: Whether we're allowing to not hash the data.
fail_src_extents: Tamper with src extents.
fail_dst_extents: Tamper with dst extents.
@@ -869,9 +770,9 @@
old_part_size = test_utils.MiB(4)
new_part_size = test_utils.MiB(8)
old_block_counters = array.array(
- 'B', [0] * ((old_part_size + block_size - 1) / block_size))
+ 'B', [0] * ((old_part_size + block_size - 1) // block_size))
new_block_counters = array.array(
- 'B', [0] * ((new_part_size + block_size - 1) / block_size))
+ 'B', [0] * ((new_part_size + block_size - 1) // block_size))
prev_data_offset = 1876
blob_hash_counts = collections.defaultdict(int)
@@ -880,8 +781,7 @@
op.type = op_type
total_src_blocks = 0
- if op_type in (common.OpType.MOVE, common.OpType.BSDIFF,
- common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF,
+ if op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF,
common.OpType.PUFFDIFF, common.OpType.BROTLI_BSDIFF):
if fail_src_extents:
self.AddToMessage(op.src_extents,
@@ -891,12 +791,9 @@
self.NewExtentList((1, 16)))
total_src_blocks = 16
- # TODO(tbrindus): add major version 2 tests.
- payload_checker.major_version = common.CHROMEOS_MAJOR_PAYLOAD_VERSION
+ payload_checker.major_version = common.BRILLO_MAJOR_PAYLOAD_VERSION
if op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
payload_checker.minor_version = 0
- elif op_type in (common.OpType.MOVE, common.OpType.BSDIFF):
- payload_checker.minor_version = 2 if fail_bad_minor_version else 1
elif op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF):
payload_checker.minor_version = 1 if fail_bad_minor_version else 2
if op_type == common.OpType.REPLACE_XZ:
@@ -907,7 +804,7 @@
elif op_type == common.OpType.PUFFDIFF:
payload_checker.minor_version = 4 if fail_bad_minor_version else 5
- if op_type not in (common.OpType.MOVE, common.OpType.SOURCE_COPY):
+ if op_type != common.OpType.SOURCE_COPY:
if not fail_mismatched_data_offset_length:
op.data_length = 16 * block_size - 8
if fail_prev_data_offset:
@@ -916,20 +813,16 @@
op.data_offset = prev_data_offset
fake_data = 'fake-data'.ljust(op.data_length)
- if not (allow_unhashed or (is_last and allow_signature and
- op_type == common.OpType.REPLACE)):
- if not fail_data_hash:
- # Create a valid data blob hash.
- op.data_sha256_hash = hashlib.sha256(fake_data).digest()
- payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn(
- fake_data)
+ if not allow_unhashed and not fail_data_hash:
+ # Create a valid data blob hash.
+ op.data_sha256_hash = hashlib.sha256(fake_data.encode('utf-8')).digest()
+ payload.ReadDataBlob.return_value = fake_data.encode('utf-8')
elif fail_data_hash:
# Create an invalid data blob hash.
op.data_sha256_hash = hashlib.sha256(
- fake_data.replace(' ', '-')).digest()
- payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn(
- fake_data)
+ fake_data.replace(' ', '-').encode('utf-8')).digest()
+ payload.ReadDataBlob.return_value = fake_data.encode('utf-8')
total_dst_blocks = 0
if not fail_missing_dst_extents:
@@ -944,8 +837,7 @@
if total_src_blocks:
if fail_src_length:
op.src_length = total_src_blocks * block_size + 8
- elif (op_type in (common.OpType.MOVE, common.OpType.BSDIFF,
- common.OpType.SOURCE_BSDIFF) and
+ elif (op_type == common.OpType.SOURCE_BSDIFF and
payload_checker.minor_version <= 3):
op.src_length = total_src_blocks * block_size
elif fail_src_length:
@@ -955,19 +847,17 @@
if total_dst_blocks:
if fail_dst_length:
op.dst_length = total_dst_blocks * block_size + 8
- elif (op_type in (common.OpType.MOVE, common.OpType.BSDIFF,
- common.OpType.SOURCE_BSDIFF) and
+ elif (op_type == common.OpType.SOURCE_BSDIFF and
payload_checker.minor_version <= 3):
op.dst_length = total_dst_blocks * block_size
- self.mox.ReplayAll()
should_fail = (fail_src_extents or fail_dst_extents or
fail_mismatched_data_offset_length or
fail_missing_dst_extents or fail_src_length or
fail_dst_length or fail_data_hash or fail_prev_data_offset or
fail_bad_minor_version)
- args = (op, 'foo', is_last, old_block_counters, new_block_counters,
- old_part_size, new_part_size, prev_data_offset, allow_signature,
+ args = (op, 'foo', old_block_counters, new_block_counters,
+ old_part_size, new_part_size, prev_data_offset,
blob_hash_counts)
if should_fail:
self.assertRaises(PayloadError, payload_checker._CheckOperation, *args)
@@ -1009,8 +899,9 @@
if fail_nonexhaustive_full_update:
rootfs_data_length -= block_size
- payload_gen.AddOperation(False, rootfs_op_type,
- dst_extents=[(0, rootfs_data_length / block_size)],
+ payload_gen.AddOperation(common.ROOTFS, rootfs_op_type,
+ dst_extents=
+ [(0, rootfs_data_length // block_size)],
data_offset=0,
data_length=rootfs_data_length)
@@ -1020,17 +911,17 @@
'allow_unhashed': True})
payload_checker.payload_type = checker._TYPE_FULL
report = checker._PayloadReport()
-
- args = (payload_checker.payload.manifest.install_operations, report, 'foo',
- 0, rootfs_part_size, rootfs_part_size, rootfs_part_size, 0, False)
+ partition = next((p for p in payload_checker.payload.manifest.partitions
+ if p.partition_name == common.ROOTFS), None)
+ args = (partition.operations, report, 'foo',
+ 0, rootfs_part_size, rootfs_part_size, rootfs_part_size, 0)
if fail_nonexhaustive_full_update:
self.assertRaises(PayloadError, payload_checker._CheckOperations, *args)
else:
self.assertEqual(rootfs_data_length,
payload_checker._CheckOperations(*args))
- def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_missing_pseudo_op,
- fail_mismatched_pseudo_op, fail_sig_missing_fields,
+ def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_sig_missing_fields,
fail_unknown_sig_version, fail_incorrect_sig):
"""Tests _CheckSignatures()."""
# Generate a test payload. For this test, we only care about the signature
@@ -1041,20 +932,18 @@
payload_gen.SetBlockSize(block_size)
rootfs_part_size = test_utils.MiB(2)
kernel_part_size = test_utils.KiB(16)
- payload_gen.SetPartInfo(False, True, rootfs_part_size,
- hashlib.sha256('fake-new-rootfs-content').digest())
- payload_gen.SetPartInfo(True, True, kernel_part_size,
- hashlib.sha256('fake-new-kernel-content').digest())
+ payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_part_size,
+ hashlib.sha256(b'fake-new-rootfs-content').digest())
+ payload_gen.SetPartInfo(common.KERNEL, True, kernel_part_size,
+ hashlib.sha256(b'fake-new-kernel-content').digest())
payload_gen.SetMinorVersion(0)
payload_gen.AddOperationWithData(
- False, common.OpType.REPLACE,
- dst_extents=[(0, rootfs_part_size / block_size)],
+ common.ROOTFS, common.OpType.REPLACE,
+ dst_extents=[(0, rootfs_part_size // block_size)],
data_blob=os.urandom(rootfs_part_size))
- do_forge_pseudo_op = (fail_missing_pseudo_op or fail_mismatched_pseudo_op)
- do_forge_sigs_data = (do_forge_pseudo_op or fail_empty_sigs_blob or
- fail_sig_missing_fields or fail_unknown_sig_version
- or fail_incorrect_sig)
+ do_forge_sigs_data = (fail_empty_sigs_blob or fail_sig_missing_fields or
+ fail_unknown_sig_version or fail_incorrect_sig)
sigs_data = None
if do_forge_sigs_data:
@@ -1063,29 +952,19 @@
if fail_sig_missing_fields:
sig_data = None
else:
- sig_data = test_utils.SignSha256('fake-payload-content',
+ sig_data = test_utils.SignSha256(b'fake-payload-content',
test_utils._PRIVKEY_FILE_NAME)
sigs_gen.AddSig(5 if fail_unknown_sig_version else 1, sig_data)
sigs_data = sigs_gen.ToBinary()
payload_gen.SetSignatures(payload_gen.curr_offset, len(sigs_data))
- if do_forge_pseudo_op:
- assert sigs_data is not None, 'should have forged signatures blob by now'
- sigs_len = len(sigs_data)
- payload_gen.AddOperation(
- False, common.OpType.REPLACE,
- data_offset=payload_gen.curr_offset / 2,
- data_length=sigs_len / 2,
- dst_extents=[(0, (sigs_len / 2 + block_size - 1) / block_size)])
-
# Generate payload (complete w/ signature) and create the test object.
payload_checker = _GetPayloadChecker(
payload_gen.WriteToFileWithData,
payload_gen_dargs={
'sigs_data': sigs_data,
- 'privkey_file_name': test_utils._PRIVKEY_FILE_NAME,
- 'do_add_pseudo_operation': not do_forge_pseudo_op})
+ 'privkey_file_name': test_utils._PRIVKEY_FILE_NAME})
payload_checker.payload_type = checker._TYPE_FULL
report = checker._PayloadReport()
@@ -1095,8 +974,7 @@
common.KERNEL: kernel_part_size
})
- should_fail = (fail_empty_sigs_blob or fail_missing_pseudo_op or
- fail_mismatched_pseudo_op or fail_sig_missing_fields or
+ should_fail = (fail_empty_sigs_blob or fail_sig_missing_fields or
fail_unknown_sig_version or fail_incorrect_sig)
args = (report, test_utils._PUBKEY_FILE_NAME)
if should_fail:
@@ -1120,7 +998,6 @@
should_succeed = (
(minor_version == 0 and payload_type == checker._TYPE_FULL) or
- (minor_version == 1 and payload_type == checker._TYPE_DELTA) or
(minor_version == 2 and payload_type == checker._TYPE_DELTA) or
(minor_version == 3 and payload_type == checker._TYPE_DELTA) or
(minor_version == 4 and payload_type == checker._TYPE_DELTA) or
@@ -1150,10 +1027,10 @@
payload_gen.SetBlockSize(block_size)
kernel_filesystem_size = test_utils.KiB(16)
rootfs_filesystem_size = test_utils.MiB(2)
- payload_gen.SetPartInfo(False, True, rootfs_filesystem_size,
- hashlib.sha256('fake-new-rootfs-content').digest())
- payload_gen.SetPartInfo(True, True, kernel_filesystem_size,
- hashlib.sha256('fake-new-kernel-content').digest())
+ payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_filesystem_size,
+ hashlib.sha256(b'fake-new-rootfs-content').digest())
+ payload_gen.SetPartInfo(common.KERNEL, True, kernel_filesystem_size,
+ hashlib.sha256(b'fake-new-kernel-content').digest())
payload_gen.SetMinorVersion(0)
rootfs_part_size = 0
@@ -1163,8 +1040,8 @@
if fail_rootfs_part_size_exceeded:
rootfs_op_size += block_size
payload_gen.AddOperationWithData(
- False, common.OpType.REPLACE,
- dst_extents=[(0, rootfs_op_size / block_size)],
+ common.ROOTFS, common.OpType.REPLACE,
+ dst_extents=[(0, rootfs_op_size // block_size)],
data_blob=os.urandom(rootfs_op_size))
kernel_part_size = 0
@@ -1174,8 +1051,8 @@
if fail_kernel_part_size_exceeded:
kernel_op_size += block_size
payload_gen.AddOperationWithData(
- True, common.OpType.REPLACE,
- dst_extents=[(0, kernel_op_size / block_size)],
+ common.KERNEL, common.OpType.REPLACE,
+ dst_extents=[(0, kernel_op_size // block_size)],
data_blob=os.urandom(kernel_op_size))
# Generate payload (complete w/ signature) and create the test object.
@@ -1186,16 +1063,14 @@
else:
use_block_size = block_size
- # For the unittests 246 is the value that generated for the payload.
- metadata_size = 246
+ # For the unittests 237 is the value that generated for the payload.
+ metadata_size = 237
if fail_mismatched_metadata_size:
metadata_size += 1
kwargs = {
'payload_gen_dargs': {
'privkey_file_name': test_utils._PRIVKEY_FILE_NAME,
- 'do_add_pseudo_operation': True,
- 'is_pseudo_in_kernel': True,
'padding': os.urandom(1024) if fail_excess_data else None},
'checker_init_dargs': {
'assert_type': 'delta' if fail_wrong_payload_type else 'full',
@@ -1207,7 +1082,7 @@
payload_checker = _GetPayloadChecker(payload_gen.WriteToFileWithData,
**kwargs)
- kwargs = {
+ kwargs2 = {
'pubkey_file_name': test_utils._PUBKEY_FILE_NAME,
'metadata_size': metadata_size,
'part_sizes': {
@@ -1219,15 +1094,15 @@
fail_rootfs_part_size_exceeded or
fail_kernel_part_size_exceeded)
if should_fail:
- self.assertRaises(PayloadError, payload_checker.Run, **kwargs)
+ self.assertRaises(PayloadError, payload_checker.Run, **kwargs2)
else:
- self.assertIsNone(payload_checker.Run(**kwargs))
+ self.assertIsNone(payload_checker.Run(**kwargs2))
+
# This implements a generic API, hence the occasional unused args.
# pylint: disable=W0613
-def ValidateCheckOperationTest(op_type_name, is_last, allow_signature,
- allow_unhashed, fail_src_extents,
- fail_dst_extents,
+def ValidateCheckOperationTest(op_type_name, allow_unhashed,
+ fail_src_extents, fail_dst_extents,
fail_mismatched_data_offset_length,
fail_missing_dst_extents, fail_src_length,
fail_dst_length, fail_data_hash,
@@ -1244,8 +1119,8 @@
fail_bad_minor_version)):
return False
- # MOVE and SOURCE_COPY operations don't carry data.
- if (op_type in (common.OpType.MOVE, common.OpType.SOURCE_COPY) and (
+ # SOURCE_COPY operation does not carry data.
+ if (op_type == common.OpType.SOURCE_COPY and (
fail_mismatched_data_offset_length or fail_data_hash or
fail_prev_data_offset)):
return False
@@ -1274,14 +1149,14 @@
(values) associated with them.
validate_func: A function used for validating test argument combinations.
"""
- for value_tuple in itertools.product(*arg_space.itervalues()):
- run_dargs = dict(zip(arg_space.iterkeys(), value_tuple))
+ for value_tuple in itertools.product(*iter(arg_space.values())):
+ run_dargs = dict(zip(iter(arg_space.keys()), value_tuple))
if validate_func and not validate_func(**run_dargs):
continue
run_method_name = 'Do%sTest' % tested_method_name
test_method_name = 'test%s' % tested_method_name
- for arg_key, arg_val in run_dargs.iteritems():
- if arg_val or type(arg_val) is int:
+ for arg_key, arg_val in run_dargs.items():
+ if arg_val or isinstance(arg_val, int):
test_method_name += '__%s=%s' % (arg_key, arg_val)
setattr(PayloadCheckerTest, test_method_name,
TestMethodBody(run_method_name, run_dargs))
@@ -1328,11 +1203,8 @@
# Add all _CheckOperation() test cases.
AddParametricTests('CheckOperation',
{'op_type_name': ('REPLACE', 'REPLACE_BZ', 'REPLACE_XZ',
- 'MOVE', 'BSDIFF', 'SOURCE_COPY',
- 'SOURCE_BSDIFF', 'PUFFDIFF',
- 'BROTLI_BSDIFF'),
- 'is_last': (True, False),
- 'allow_signature': (True, False),
+ 'SOURCE_COPY', 'SOURCE_BSDIFF',
+ 'PUFFDIFF', 'BROTLI_BSDIFF'),
'allow_unhashed': (True, False),
'fail_src_extents': (True, False),
'fail_dst_extents': (True, False),
@@ -1352,15 +1224,13 @@
# Add all _CheckOperations() test cases.
AddParametricTests('CheckSignatures',
{'fail_empty_sigs_blob': (True, False),
- 'fail_missing_pseudo_op': (True, False),
- 'fail_mismatched_pseudo_op': (True, False),
'fail_sig_missing_fields': (True, False),
'fail_unknown_sig_version': (True, False),
'fail_incorrect_sig': (True, False)})
# Add all _CheckManifestMinorVersion() test cases.
AddParametricTests('CheckManifestMinorVersion',
- {'minor_version': (None, 0, 1, 2, 3, 4, 5, 555),
+ {'minor_version': (None, 0, 2, 3, 4, 5, 555),
'payload_type': (checker._TYPE_FULL,
checker._TYPE_DELTA)})
diff --git a/scripts/update_payload/common.py b/scripts/update_payload/common.py
index 9061a75..b934cf8 100644
--- a/scripts/update_payload/common.py
+++ b/scripts/update_payload/common.py
@@ -16,8 +16,11 @@
"""Utilities for update payload processing."""
+from __future__ import absolute_import
from __future__ import print_function
+import base64
+
from update_payload import update_metadata_pb2
from update_payload.error import PayloadError
@@ -25,18 +28,14 @@
#
# Constants.
#
-PSEUDO_EXTENT_MARKER = (1L << 64) - 1 # UINT64_MAX
-
SIG_ASN1_HEADER = (
- '\x30\x31\x30\x0d\x06\x09\x60\x86'
- '\x48\x01\x65\x03\x04\x02\x01\x05'
- '\x00\x04\x20'
+ b'\x30\x31\x30\x0d\x06\x09\x60\x86'
+ b'\x48\x01\x65\x03\x04\x02\x01\x05'
+ b'\x00\x04\x20'
)
-CHROMEOS_MAJOR_PAYLOAD_VERSION = 1
BRILLO_MAJOR_PAYLOAD_VERSION = 2
-INPLACE_MINOR_PAYLOAD_VERSION = 1
SOURCE_MINOR_PAYLOAD_VERSION = 2
OPSRCHASH_MINOR_PAYLOAD_VERSION = 3
BROTLI_BSDIFF_MINOR_PAYLOAD_VERSION = 4
@@ -47,6 +46,7 @@
# Tuple of (name in system, name in protobuf).
CROS_PARTITIONS = ((KERNEL, KERNEL), (ROOTFS, 'rootfs'))
+
#
# Payload operation types.
#
@@ -55,8 +55,6 @@
_CLASS = update_metadata_pb2.InstallOperation
REPLACE = _CLASS.REPLACE
REPLACE_BZ = _CLASS.REPLACE_BZ
- MOVE = _CLASS.MOVE
- BSDIFF = _CLASS.BSDIFF
SOURCE_COPY = _CLASS.SOURCE_COPY
SOURCE_BSDIFF = _CLASS.SOURCE_BSDIFF
ZERO = _CLASS.ZERO
@@ -64,13 +62,11 @@
REPLACE_XZ = _CLASS.REPLACE_XZ
PUFFDIFF = _CLASS.PUFFDIFF
BROTLI_BSDIFF = _CLASS.BROTLI_BSDIFF
- ALL = (REPLACE, REPLACE_BZ, MOVE, BSDIFF, SOURCE_COPY, SOURCE_BSDIFF, ZERO,
+ ALL = (REPLACE, REPLACE_BZ, SOURCE_COPY, SOURCE_BSDIFF, ZERO,
DISCARD, REPLACE_XZ, PUFFDIFF, BROTLI_BSDIFF)
NAMES = {
REPLACE: 'REPLACE',
REPLACE_BZ: 'REPLACE_BZ',
- MOVE: 'MOVE',
- BSDIFF: 'BSDIFF',
SOURCE_COPY: 'SOURCE_COPY',
SOURCE_BSDIFF: 'SOURCE_BSDIFF',
ZERO: 'ZERO',
@@ -146,7 +142,7 @@
try:
data = file_obj.read(length)
- except IOError, e:
+ except IOError as e:
raise PayloadError('error reading from file (%s): %s' % (file_obj.name, e))
if len(data) != length:
@@ -167,13 +163,12 @@
end_block = ex.start_block + ex.num_blocks
if block_size:
return '%d->%d * %d' % (ex.start_block, end_block, block_size)
- else:
- return '%d->%d' % (ex.start_block, end_block)
+ return '%d->%d' % (ex.start_block, end_block)
def FormatSha256(digest):
"""Returns a canonical string representation of a SHA256 digest."""
- return digest.encode('base64').strip()
+ return base64.b64encode(digest).decode('utf-8')
#
diff --git a/scripts/update_payload/format_utils.py b/scripts/update_payload/format_utils.py
index 6248ba9..e73badf 100644
--- a/scripts/update_payload/format_utils.py
+++ b/scripts/update_payload/format_utils.py
@@ -16,6 +16,8 @@
"""Various formatting functions."""
+from __future__ import division
+
def NumToPercent(num, total, min_precision=1, max_precision=5):
"""Returns the percentage (string) of |num| out of |total|.
@@ -50,7 +52,7 @@
precision = min(min_precision, max_precision)
factor = 10 ** precision
while precision <= max_precision:
- percent = num * 100 * factor / total
+ percent = num * 100 * factor // total
if percent:
break
factor *= 10
@@ -102,8 +104,8 @@
magnitude = next_magnitude
if exp != 0:
- whole = size / magnitude
- frac = (size % magnitude) * (10 ** precision) / magnitude
+ whole = size // magnitude
+ frac = (size % magnitude) * (10 ** precision) // magnitude
while frac and not frac % 10:
frac /= 10
return '%d%s %s' % (whole, '.%d' % frac if frac else '', suffixes[exp - 1])
diff --git a/scripts/update_payload/format_utils_unittest.py b/scripts/update_payload/format_utils_unittest.py
index 42ea621..4dcd652 100755
--- a/scripts/update_payload/format_utils_unittest.py
+++ b/scripts/update_payload/format_utils_unittest.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python
#
# Copyright (C) 2013 The Android Open Source Project
#
@@ -17,6 +17,11 @@
"""Unit tests for format_utils.py."""
+# Disable check for function names to avoid errors based on old code
+# pylint: disable-msg=invalid-name
+
+from __future__ import absolute_import
+
import unittest
from update_payload import format_utils
diff --git a/scripts/update_payload/histogram.py b/scripts/update_payload/histogram.py
index 1ac2ab5..bad2dc3 100644
--- a/scripts/update_payload/histogram.py
+++ b/scripts/update_payload/histogram.py
@@ -16,6 +16,9 @@
"""Histogram generation tools."""
+from __future__ import absolute_import
+from __future__ import division
+
from collections import defaultdict
from update_payload import format_utils
@@ -110,7 +113,7 @@
hist_bar = '|'
for key, count in self.data:
if self.total:
- bar_len = count * self.scale / self.total
+ bar_len = count * self.scale // self.total
hist_bar = '|%s|' % ('#' * bar_len).ljust(self.scale)
line = '%s %s %s' % (
diff --git a/scripts/update_payload/histogram_unittest.py b/scripts/update_payload/histogram_unittest.py
index e757dd0..ccde2bb 100755
--- a/scripts/update_payload/histogram_unittest.py
+++ b/scripts/update_payload/histogram_unittest.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python
#
# Copyright (C) 2013 The Android Open Source Project
#
@@ -17,6 +17,11 @@
"""Unit tests for histogram.py."""
+# Disable check for function names to avoid errors based on old code
+# pylint: disable-msg=invalid-name
+
+from __future__ import absolute_import
+
import unittest
from update_payload import format_utils
diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py
index 2a0cb58..ea5ed30 100644
--- a/scripts/update_payload/payload.py
+++ b/scripts/update_payload/payload.py
@@ -16,6 +16,7 @@
"""Tools for reading, verifying and applying Chrome OS update payloads."""
+from __future__ import absolute_import
from __future__ import print_function
import hashlib
@@ -64,7 +65,7 @@
"""Update payload header struct."""
# Header constants; sizes are in bytes.
- _MAGIC = 'CrAU'
+ _MAGIC = b'CrAU'
_VERSION_SIZE = 8
_MANIFEST_LEN_SIZE = 8
_METADATA_SIGNATURE_LEN_SIZE = 4
@@ -111,7 +112,6 @@
payload_file, self._METADATA_SIGNATURE_LEN_SIZE, True,
hasher=hasher)
-
def __init__(self, payload_file, payload_file_offset=0):
"""Initialize the payload object.
@@ -263,9 +263,7 @@
def IsDelta(self):
"""Returns True iff the payload appears to be a delta."""
self._AssertInit()
- return (self.manifest.HasField('old_kernel_info') or
- self.manifest.HasField('old_rootfs_info') or
- any(partition.HasField('old_partition_info')
+ return (any(partition.HasField('old_partition_info')
for partition in self.manifest.partitions))
def IsFull(self):
diff --git a/scripts/update_payload/test_utils.py b/scripts/update_payload/test_utils.py
index 1e2259d..e153669 100644
--- a/scripts/update_payload/test_utils.py
+++ b/scripts/update_payload/test_utils.py
@@ -16,9 +16,10 @@
"""Utilities for unit testing."""
+from __future__ import absolute_import
from __future__ import print_function
-import cStringIO
+import io
import hashlib
import os
import struct
@@ -70,7 +71,7 @@
"""
try:
file_obj.write(struct.pack(common.IntPackingFmtStr(size, is_unsigned), val))
- except IOError, e:
+ except IOError as e:
raise payload.PayloadError('error writing to file (%s): %s' %
(file_obj.name, e))
@@ -173,31 +174,37 @@
self.block_size = block_size
_SetMsgField(self.manifest, 'block_size', block_size)
- def SetPartInfo(self, is_kernel, is_new, part_size, part_hash):
+ def SetPartInfo(self, part_name, is_new, part_size, part_hash):
"""Set the partition info entry.
Args:
- is_kernel: whether this is kernel partition info
- is_new: whether to set old (False) or new (True) info
- part_size: the partition size (in fact, filesystem size)
- part_hash: the partition hash
+ part_name: The name of the partition.
+ is_new: Whether to set old (False) or new (True) info.
+ part_size: The partition size (in fact, filesystem size).
+ part_hash: The partition hash.
"""
- if is_kernel:
- part_info = (self.manifest.new_kernel_info if is_new
- else self.manifest.old_kernel_info)
- else:
- part_info = (self.manifest.new_rootfs_info if is_new
- else self.manifest.old_rootfs_info)
+ partition = next((x for x in self.manifest.partitions
+ if x.partition_name == part_name), None)
+ if partition is None:
+ partition = self.manifest.partitions.add()
+ partition.partition_name = part_name
+
+ part_info = (partition.new_partition_info if is_new
+ else partition.old_partition_info)
_SetMsgField(part_info, 'size', part_size)
_SetMsgField(part_info, 'hash', part_hash)
- def AddOperation(self, is_kernel, op_type, data_offset=None,
+ def AddOperation(self, part_name, op_type, data_offset=None,
data_length=None, src_extents=None, src_length=None,
dst_extents=None, dst_length=None, data_sha256_hash=None):
"""Adds an InstallOperation entry."""
- operations = (self.manifest.kernel_install_operations if is_kernel
- else self.manifest.install_operations)
+ partition = next((x for x in self.manifest.partitions
+ if x.partition_name == part_name), None)
+ if partition is None:
+ partition = self.manifest.partitions.add()
+ partition.partition_name = part_name
+ operations = partition.operations
op = operations.add()
op.type = op_type
@@ -277,7 +284,7 @@
self.data_blobs.append(data_blob)
return data_length, data_offset
- def AddOperationWithData(self, is_kernel, op_type, src_extents=None,
+ def AddOperationWithData(self, part_name, op_type, src_extents=None,
src_length=None, dst_extents=None, dst_length=None,
data_blob=None, do_hash_data_blob=True):
"""Adds an install operation and associated data blob.
@@ -287,12 +294,12 @@
necessary offset/length accounting.
Args:
- is_kernel: whether this is a kernel (True) or rootfs (False) operation
- op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ, MOVE or BSDIFF
+ part_name: The name of the partition (e.g. kernel or root).
+ op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ.
src_extents: list of (start, length) pairs indicating src block ranges
- src_length: size of the src data in bytes (needed for BSDIFF)
+ src_length: size of the src data in bytes (needed for diff operations)
dst_extents: list of (start, length) pairs indicating dst block ranges
- dst_length: size of the dst data in bytes (needed for BSDIFF)
+ dst_length: size of the dst data in bytes (needed for diff operations)
data_blob: a data blob associated with this operation
do_hash_data_blob: whether or not to compute and add a data blob hash
"""
@@ -302,15 +309,13 @@
data_sha256_hash = hashlib.sha256(data_blob).digest()
data_length, data_offset = self.AddData(data_blob)
- self.AddOperation(is_kernel, op_type, data_offset=data_offset,
+ self.AddOperation(part_name, op_type, data_offset=data_offset,
data_length=data_length, src_extents=src_extents,
src_length=src_length, dst_extents=dst_extents,
dst_length=dst_length, data_sha256_hash=data_sha256_hash)
def WriteToFileWithData(self, file_obj, sigs_data=None,
- privkey_file_name=None,
- do_add_pseudo_operation=False,
- is_pseudo_in_kernel=False, padding=None):
+ privkey_file_name=None, padding=None):
"""Writes the payload content to a file, optionally signing the content.
Args:
@@ -319,10 +324,6 @@
payload signature fields assumed to be preset by the caller)
privkey_file_name: key used for signing the payload (optional; used only
if explicit signatures blob not provided)
- do_add_pseudo_operation: whether a pseudo-operation should be added to
- account for the signature blob
- is_pseudo_in_kernel: whether the pseudo-operation should be added to
- kernel (True) or rootfs (False) operations
padding: stuff to dump past the normal data blobs provided (optional)
Raises:
@@ -335,7 +336,7 @@
if do_generate_sigs_data:
# First, sign some arbitrary data to obtain the size of a signature blob.
- fake_sig = SignSha256('fake-payload-data', privkey_file_name)
+ fake_sig = SignSha256(b'fake-payload-data', privkey_file_name)
fake_sigs_gen = SignaturesGenerator()
fake_sigs_gen.AddSig(1, fake_sig)
sigs_len = len(fake_sigs_gen.ToBinary())
@@ -343,20 +344,9 @@
# Update the payload with proper signature attributes.
self.SetSignatures(self.curr_offset, sigs_len)
- # Add a pseudo-operation to account for the signature blob, if requested.
- if do_add_pseudo_operation:
- if not self.block_size:
- raise TestError('cannot add pseudo-operation without knowing the '
- 'payload block size')
- self.AddOperation(
- is_pseudo_in_kernel, common.OpType.REPLACE,
- data_offset=self.curr_offset, data_length=sigs_len,
- dst_extents=[(common.PSEUDO_EXTENT_MARKER,
- (sigs_len + self.block_size - 1) / self.block_size)])
-
if do_generate_sigs_data:
# Once all payload fields are updated, dump and sign it.
- temp_payload_file = cStringIO.StringIO()
+ temp_payload_file = io.BytesIO()
self.WriteToFile(temp_payload_file, data_blobs=self.data_blobs)
sig = SignSha256(temp_payload_file.getvalue(), privkey_file_name)
sigs_gen = SignaturesGenerator()
diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py
index cb8f4c2..d41c1da 100644
--- a/scripts/update_payload/update_metadata_pb2.py
+++ b/scripts/update_payload/update_metadata_pb2.py
@@ -20,7 +20,7 @@
package='chromeos_update_engine',
syntax='proto2',
serialized_options=_b('H\003'),
- serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"s\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\"\xb1\x06\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadataB\x02H\x03')
+ serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"\x9f\x01\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1aO\n\tSignature\x12\x13\n\x07version\x18\x01 \x01(\rB\x02\x18\x01\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x1f\n\x17unpadded_signature_size\x18\x03 \x01(\x07\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"s\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\"\xe1\x06\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadata\x12\x16\n\x0epartial_update\x18\x10 \x01(\x08\x42\x02H\x03')
)
@@ -78,8 +78,8 @@
],
containing_type=None,
serialized_options=None,
- serialized_start=712,
- serialized_end=885,
+ serialized_start=750,
+ serialized_end=923,
)
_sym_db.RegisterEnumDescriptor(_INSTALLOPERATION_TYPE)
@@ -135,7 +135,7 @@
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='chromeos_update_engine.Signatures.Signature.data', index=1,
number=2, type=12, cpp_type=9, label=1,
@@ -143,6 +143,13 @@
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='unpadded_signature_size', full_name='chromeos_update_engine.Signatures.Signature.unpadded_signature_size', index=2,
+ number=3, type=7, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
@@ -155,8 +162,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=180,
- serialized_end=222,
+ serialized_start=181,
+ serialized_end=260,
)
_SIGNATURES = _descriptor.Descriptor(
@@ -185,8 +192,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=100,
- serialized_end=222,
+ serialized_start=101,
+ serialized_end=260,
)
@@ -223,8 +230,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=224,
- serialized_end=267,
+ serialized_start=262,
+ serialized_end=305,
)
@@ -289,8 +296,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=269,
- serialized_end=388,
+ serialized_start=307,
+ serialized_end=426,
)
@@ -377,8 +384,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=391,
- serialized_end=885,
+ serialized_start=429,
+ serialized_end=923,
)
@@ -513,8 +520,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=888,
- serialized_end=1615,
+ serialized_start=926,
+ serialized_end=1653,
)
@@ -558,8 +565,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=1617,
- serialized_end=1693,
+ serialized_start=1655,
+ serialized_end=1731,
)
@@ -596,8 +603,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=1695,
- serialized_end=1810,
+ serialized_start=1733,
+ serialized_end=1848,
)
@@ -614,14 +621,14 @@
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2,
number=3, type=13, cpp_type=3, label=1,
@@ -649,28 +656,28 @@
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9,
number=10, type=11, cpp_type=10, label=1,
@@ -713,6 +720,13 @@
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='partial_update', full_name='chromeos_update_engine.DeltaArchiveManifest.partial_update', index=15,
+ number=16, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
@@ -725,8 +739,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=1813,
- serialized_end=2630,
+ serialized_start=1851,
+ serialized_end=2716,
)
_SIGNATURES_SIGNATURE.containing_type = _SIGNATURES
@@ -838,6 +852,13 @@
DESCRIPTOR._options = None
+_SIGNATURES_SIGNATURE.fields_by_name['version']._options = None
_INSTALLOPERATION_TYPE.values_by_name["MOVE"]._options = None
_INSTALLOPERATION_TYPE.values_by_name["BSDIFF"]._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['install_operations']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['new_kernel_info']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['old_rootfs_info']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['new_rootfs_info']._options = None
# @@protoc_insertion_point(module_scope)
diff --git a/tar_bunzip2.gni b/tar_bunzip2.gni
new file mode 100644
index 0000000..5d90167
--- /dev/null
+++ b/tar_bunzip2.gni
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+template("tar_bunzip2") {
+ forward_variables_from(invoker, [ "image_out_dir" ])
+ out_dir = "${root_gen_dir}/${image_out_dir}"
+
+ action_foreach(target_name) {
+ sources = invoker.sources
+ script = "//common-mk/file_generator_wrapper.py"
+ outputs = [ "${out_dir}/{{source_name_part}}.flag" ]
+ args = [
+ "sh",
+ "-c",
+ "tar -xvf \"{{source}}\" -C \"${out_dir}\" && touch ${out_dir}/{{source_name_part}}.flag",
+ ]
+ }
+}
diff --git a/tar_bunzip2.gypi b/tar_bunzip2.gypi
deleted file mode 100644
index 4d1be28..0000000
--- a/tar_bunzip2.gypi
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-{
- 'variables': {
- 'out_dir': '<(SHARED_INTERMEDIATE_DIR)/<(image_out_dir)',
- },
- 'rules': [
- {
- 'rule_name': 'tar-bunzip2',
- 'extension': 'bz2',
- 'outputs': [
- # The .flag file is used to mark the timestamp of the file extraction
- # and re-run this action if a new .bz2 file is generated.
- '<(out_dir)/<(RULE_INPUT_ROOT).flag',
- ],
- 'action': [
- 'sh',
- '-c',
- 'tar -xvf "<(RULE_INPUT_PATH)" -C "<(out_dir)" && touch <(out_dir)/<(RULE_INPUT_ROOT).flag',
- ],
- 'msvs_cygwin_shell': 0,
- 'process_outputs_as_sources': 1,
- 'message': 'Unpacking file <(RULE_INPUT_PATH)',
- },
- ],
-}
diff --git a/test_http_server.cc b/test_http_server.cc
index 4536f37..4fc89e5 100644
--- a/test_http_server.cc
+++ b/test_http_server.cc
@@ -658,5 +658,4 @@
LOG(FATAL) << "ERROR on accept";
HandleConnection(client_fd);
}
- return 0;
}
diff --git a/update_attempter.cc b/update_attempter.cc
index ee571db..60c2c36 100644
--- a/update_attempter.cc
+++ b/update_attempter.cc
@@ -19,15 +19,19 @@
#include <stdint.h>
#include <algorithm>
+#include <map>
#include <memory>
#include <string>
+#include <unordered_set>
#include <utility>
#include <vector>
#include <base/bind.h>
+#include <base/compiler_specific.h>
#include <base/files/file_util.h>
#include <base/logging.h>
#include <base/rand_util.h>
+#include <base/strings/string_number_conversions.h>
#include <base/strings/string_util.h>
#include <base/strings/stringprintf.h>
#include <base/time/time.h>
@@ -43,8 +47,10 @@
#include "update_engine/common/clock_interface.h"
#include "update_engine/common/constants.h"
#include "update_engine/common/dlcservice_interface.h"
+#include "update_engine/common/excluder_interface.h"
#include "update_engine/common/hardware_interface.h"
#include "update_engine/common/platform_constants.h"
+#include "update_engine/common/prefs.h"
#include "update_engine/common/prefs_interface.h"
#include "update_engine/common/subprocess.h"
#include "update_engine/common/utils.h"
@@ -53,6 +59,7 @@
#include "update_engine/omaha_request_action.h"
#include "update_engine/omaha_request_params.h"
#include "update_engine/omaha_response_handler_action.h"
+#include "update_engine/omaha_utils.h"
#include "update_engine/p2p_manager.h"
#include "update_engine/payload_consumer/download_action.h"
#include "update_engine/payload_consumer/filesystem_verifier_action.h"
@@ -68,6 +75,7 @@
using base::Bind;
using base::Callback;
+using base::FilePath;
using base::Time;
using base::TimeDelta;
using base::TimeTicks;
@@ -77,6 +85,7 @@
using chromeos_update_manager::Policy;
using chromeos_update_manager::StagingCase;
using chromeos_update_manager::UpdateCheckParams;
+using std::map;
using std::string;
using std::vector;
using update_engine::UpdateAttemptFlags;
@@ -152,7 +161,7 @@
}
bool UpdateAttempter::ScheduleUpdates() {
- if (IsUpdateRunningOrScheduled())
+ if (IsBusyOrUpdateScheduled())
return false;
chromeos_update_manager::UpdateManager* const update_manager =
@@ -162,7 +171,8 @@
Bind(&UpdateAttempter::OnUpdateScheduled, base::Unretained(this));
// We limit the async policy request to a reasonably short time, to avoid a
// starvation due to a transient bug.
- update_manager->AsyncPolicyRequest(callback, &Policy::UpdateCheckAllowed);
+ update_manager->AsyncPolicyRequestUpdateCheckAllowed(
+ callback, &Policy::UpdateCheckAllowed);
waiting_for_scheduled_check_ = true;
return true;
}
@@ -239,6 +249,8 @@
const string& target_channel,
const string& target_version_prefix,
bool rollback_allowed,
+ bool rollback_data_save_requested,
+ int rollback_allowed_milestones,
bool obey_proxies,
bool interactive) {
// This is normally called frequently enough so it's appropriate to use as a
@@ -274,6 +286,8 @@
target_channel,
target_version_prefix,
rollback_allowed,
+ rollback_data_save_requested,
+ rollback_allowed_milestones,
obey_proxies,
interactive)) {
return;
@@ -347,6 +361,8 @@
const string& target_channel,
const string& target_version_prefix,
bool rollback_allowed,
+ bool rollback_data_save_requested,
+ int rollback_allowed_milestones,
bool obey_proxies,
bool interactive) {
http_response_code_ = 0;
@@ -365,12 +381,20 @@
// Set whether rollback is allowed.
omaha_request_params_->set_rollback_allowed(rollback_allowed);
+ // Set whether saving data over rollback is requested.
+ omaha_request_params_->set_rollback_data_save_requested(
+ rollback_data_save_requested);
+
CalculateStagingParams(interactive);
// If staging_wait_time_ wasn't set, staging is off, use scattering instead.
if (staging_wait_time_.InSeconds() == 0) {
CalculateScatteringParams(interactive);
}
+ // Set how many milestones of rollback are allowed.
+ omaha_request_params_->set_rollback_allowed_milestones(
+ rollback_allowed_milestones);
+
CalculateP2PParams(interactive);
if (payload_state->GetUsingP2PForDownloading() ||
payload_state->GetUsingP2PForSharing()) {
@@ -409,10 +433,25 @@
// target channel.
omaha_request_params_->UpdateDownloadChannel();
}
- // Set the DLC module ID list.
- omaha_request_params_->set_dlc_module_ids(dlc_module_ids_);
+
+ // The function |CalculateDlcParams| makes use of the function |GetAppId| from
+ // |OmahaRequestParams|, so to ensure that the return from |GetAppId|
+ // doesn't change, no changes to the values |download_channel_|,
+ // |image_props_.product_id| and |image_props_.canary_product_id| from
+ // |omaha_request_params_| shall be made below this line.
+ CalculateDlcParams();
+
omaha_request_params_->set_is_install(is_install_);
+ // Set Quick Fix Build token if policy is set and the device is enterprise
+ // enrolled.
+ string token;
+ if (system_state_ && system_state_->device_policy()) {
+ if (!system_state_->device_policy()->GetDeviceQuickFixBuildToken(&token))
+ token.clear();
+ }
+ omaha_request_params_->set_autoupdate_token(token);
+
LOG(INFO) << "target_version_prefix = "
<< omaha_request_params_->target_version_prefix()
<< ", rollback_allowed = "
@@ -602,8 +641,10 @@
case StagingCase::kNoSavedValue:
prefs_->SetInt64(kPrefsWallClockStagingWaitPeriod,
staging_wait_time_.InDays());
+ FALLTHROUGH;
case StagingCase::kSetStagingFromPref:
omaha_request_params_->set_waiting_period(staging_wait_time_);
+ FALLTHROUGH;
case StagingCase::kNoAction:
// Staging is on, enable wallclock based wait so that its values get used.
omaha_request_params_->set_wall_clock_based_wait_enabled(true);
@@ -618,10 +659,117 @@
}
}
+bool UpdateAttempter::ResetDlcPrefs(const string& dlc_id) {
+ vector<string> failures;
+ PrefsInterface* prefs = system_state_->prefs();
+ for (auto& sub_key :
+ {kPrefsPingActive, kPrefsPingLastActive, kPrefsPingLastRollcall}) {
+ auto key = prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, sub_key});
+ if (!prefs->Delete(key))
+ failures.emplace_back(sub_key);
+ }
+ if (failures.size() != 0)
+ PLOG(ERROR) << "Failed to delete prefs (" << base::JoinString(failures, ",")
+ << " for DLC (" << dlc_id << ").";
+
+ return failures.size() == 0;
+}
+
+bool UpdateAttempter::SetDlcActiveValue(bool is_active, const string& dlc_id) {
+ if (dlc_id.empty()) {
+ LOG(ERROR) << "Empty DLC ID passed.";
+ return false;
+ }
+ LOG(INFO) << "Set DLC (" << dlc_id << ") to "
+ << (is_active ? "Active" : "Inactive");
+ PrefsInterface* prefs = system_state_->prefs();
+ if (is_active) {
+ auto ping_active_key =
+ prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive});
+ if (!prefs->SetInt64(ping_active_key, kPingActiveValue)) {
+ LOG(ERROR) << "Failed to set the value of ping metadata '"
+ << kPrefsPingActive << "'.";
+ return false;
+ }
+ } else {
+ return ResetDlcPrefs(dlc_id);
+ }
+ return true;
+}
+
+int64_t UpdateAttempter::GetPingMetadata(const string& metadata_key) const {
+ // The first time a ping is sent, the metadata files containing the values
+ // sent back by the server still don't exist. A value of -1 is used to
+ // indicate this.
+ if (!system_state_->prefs()->Exists(metadata_key))
+ return kPingNeverPinged;
+
+ int64_t value;
+ if (system_state_->prefs()->GetInt64(metadata_key, &value))
+ return value;
+
+ // Return -2 when the file exists and there is a problem reading from it, or
+ // the value cannot be converted to an integer.
+ return kPingUnknownValue;
+}
+
+void UpdateAttempter::CalculateDlcParams() {
+ // Set the |dlc_ids_| only for an update. This is required to get the
+ // currently installed DLC(s).
+ if (!is_install_ &&
+ !system_state_->dlcservice()->GetDlcsToUpdate(&dlc_ids_)) {
+ LOG(INFO) << "Failed to retrieve DLC module IDs from dlcservice. Check the "
+ "state of dlcservice, will not update DLC modules.";
+ }
+ PrefsInterface* prefs = system_state_->prefs();
+ map<string, OmahaRequestParams::AppParams> dlc_apps_params;
+ for (const auto& dlc_id : dlc_ids_) {
+ OmahaRequestParams::AppParams dlc_params{
+ .active_counting_type = OmahaRequestParams::kDateBased,
+ .name = dlc_id,
+ .send_ping = false};
+ if (is_install_) {
+ // In some cases, |SetDlcActiveValue| might fail to reset the DLC prefs
+ // when a DLC is uninstalled. To avoid having stale values from that
+ // scenario, we reset the metadata values on a new install request.
+ // Ignore failure to delete stale prefs.
+ ResetDlcPrefs(dlc_id);
+ SetDlcActiveValue(true, dlc_id);
+ } else {
+ // Only send the ping when the request is to update DLCs. When installing
+ // DLCs, we don't want to send the ping yet, since the DLCs might fail to
+ // install or might not really be active yet.
+ dlc_params.ping_active = kPingActiveValue;
+ auto ping_active_key =
+ prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive});
+ if (!prefs->GetInt64(ping_active_key, &dlc_params.ping_active) ||
+ dlc_params.ping_active != kPingActiveValue) {
+ dlc_params.ping_active = kPingInactiveValue;
+ }
+ auto ping_last_active_key =
+ prefs->CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive});
+ dlc_params.ping_date_last_active = GetPingMetadata(ping_last_active_key);
+
+ auto ping_last_rollcall_key = prefs->CreateSubKey(
+ {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall});
+ dlc_params.ping_date_last_rollcall =
+ GetPingMetadata(ping_last_rollcall_key);
+
+ dlc_params.send_ping = true;
+ }
+ dlc_apps_params[omaha_request_params_->GetDlcAppId(dlc_id)] = dlc_params;
+ }
+ omaha_request_params_->set_dlc_apps_params(dlc_apps_params);
+}
+
void UpdateAttempter::BuildUpdateActions(bool interactive) {
CHECK(!processor_->IsRunning());
processor_->set_delegate(this);
+ // The session ID needs to be kept throughout the update flow. The value
+ // of the session ID will reset/update only when it is a new update flow.
+ session_id_ = base::GenerateGUID();
+
// Actions:
auto update_check_fetcher = std::make_unique<LibcurlHttpFetcher>(
GetProxyResolver(), system_state_->hardware());
@@ -629,8 +777,13 @@
// Try harder to connect to the network, esp when not interactive.
// See comment in libcurl_http_fetcher.cc.
update_check_fetcher->set_no_network_max_retries(interactive ? 1 : 3);
- auto update_check_action = std::make_unique<OmahaRequestAction>(
- system_state_, nullptr, std::move(update_check_fetcher), false);
+ update_check_fetcher->set_is_update_check(true);
+ auto update_check_action =
+ std::make_unique<OmahaRequestAction>(system_state_,
+ nullptr,
+ std::move(update_check_fetcher),
+ false,
+ session_id_);
auto response_handler_action =
std::make_unique<OmahaResponseHandlerAction>(system_state_);
auto update_boot_flags_action =
@@ -640,13 +793,15 @@
new OmahaEvent(OmahaEvent::kTypeUpdateDownloadStarted),
std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
system_state_->hardware()),
- false);
+ false,
+ session_id_);
LibcurlHttpFetcher* download_fetcher =
new LibcurlHttpFetcher(GetProxyResolver(), system_state_->hardware());
download_fetcher->set_server_to_check(ServerToCheck::kDownload);
if (interactive)
download_fetcher->set_max_retry_count(kDownloadMaxRetryCountInteractive);
+ download_fetcher->SetHeader(kXGoogleUpdateSessionId, session_id_);
auto download_action =
std::make_unique<DownloadAction>(prefs_,
system_state_->boot_control(),
@@ -661,7 +816,8 @@
new OmahaEvent(OmahaEvent::kTypeUpdateDownloadFinished),
std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
system_state_->hardware()),
- false);
+ false,
+ session_id_);
auto filesystem_verifier_action =
std::make_unique<FilesystemVerifierAction>();
auto update_complete_action = std::make_unique<OmahaRequestAction>(
@@ -669,7 +825,8 @@
new OmahaEvent(OmahaEvent::kTypeUpdateComplete),
std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
system_state_->hardware()),
- false);
+ false,
+ session_id_);
auto postinstall_runner_action = std::make_unique<PostinstallRunnerAction>(
system_state_->boot_control(), system_state_->hardware());
@@ -790,19 +947,16 @@
bool UpdateAttempter::CheckForUpdate(const string& app_version,
const string& omaha_url,
UpdateAttemptFlags flags) {
- dlc_module_ids_.clear();
- is_install_ = false;
- bool interactive = !(flags & UpdateAttemptFlags::kFlagNonInteractive);
-
- if (interactive && status_ != UpdateStatus::IDLE) {
- // An update check is either in-progress, or an update has completed and the
- // system is in UPDATED_NEED_REBOOT. Either way, don't do an interactive
- // update at this time
- LOG(INFO) << "Refusing to do an interactive update with an update already "
- "in progress";
+ if (status_ != UpdateStatus::IDLE) {
+ LOG(INFO) << "Refusing to do an update as there is an "
+ << (is_install_ ? "install" : "update")
+ << " already in progress.";
return false;
}
+ bool interactive = !(flags & UpdateAttemptFlags::kFlagNonInteractive);
+ is_install_ = false;
+
LOG(INFO) << "Forced update check requested.";
forced_app_version_.clear();
forced_omaha_url_.clear();
@@ -829,25 +983,33 @@
// of the previously set ones.
current_update_attempt_flags_ = flags;
// Note: The caching for non-interactive update checks happens in
- // OnUpdateScheduled().
+ // |OnUpdateScheduled()|.
}
+ // |forced_update_pending_callback_| should always be set, but even in the
+ // case that it is not, we still return true indicating success because the
+ // scheduled periodic check will pick up these changes.
if (forced_update_pending_callback_.get()) {
- if (!system_state_->dlcservice()->GetInstalled(&dlc_module_ids_)) {
- dlc_module_ids_.clear();
- }
- // Make sure that a scheduling request is made prior to calling the forced
- // update pending callback.
+ // Always call |ScheduleUpdates()| before forcing an update. This is because
+ // we need an update to be scheduled for the
+ // |forced_update_pending_callback_| to have an effect. Here we don't need
+ // to care about the return value from |ScheduleUpdate()|.
ScheduleUpdates();
forced_update_pending_callback_->Run(true, interactive);
}
-
return true;
}
-bool UpdateAttempter::CheckForInstall(const vector<string>& dlc_module_ids,
+bool UpdateAttempter::CheckForInstall(const vector<string>& dlc_ids,
const string& omaha_url) {
- dlc_module_ids_ = dlc_module_ids;
+ if (status_ != UpdateStatus::IDLE) {
+ LOG(INFO) << "Refusing to do an install as there is an "
+ << (is_install_ ? "install" : "update")
+ << " already in progress.";
+ return false;
+ }
+
+ dlc_ids_ = dlc_ids;
is_install_ = true;
forced_omaha_url_.clear();
@@ -857,21 +1019,22 @@
if (IsAnyUpdateSourceAllowed()) {
forced_omaha_url_ = omaha_url;
}
- if (omaha_url == kScheduledAUTestURLRequest) {
- forced_omaha_url_ = constants::kOmahaDefaultAUTestURL;
- } else if (omaha_url == kAUTestURLRequest) {
+
+ if (omaha_url == kScheduledAUTestURLRequest ||
+ omaha_url == kAUTestURLRequest) {
forced_omaha_url_ = constants::kOmahaDefaultAUTestURL;
}
- if (!ScheduleUpdates()) {
- if (forced_update_pending_callback_.get()) {
- // Make sure that a scheduling request is made prior to calling the forced
- // update pending callback.
- ScheduleUpdates();
- forced_update_pending_callback_->Run(true, true);
- return true;
- }
- return false;
+ // |forced_update_pending_callback_| should always be set, but even in the
+ // case that it is not, we still return true indicating success because the
+ // scheduled periodic check will pick up these changes.
+ if (forced_update_pending_callback_.get()) {
+ // Always call |ScheduleUpdates()| before forcing an update. This is because
+ // we need an update to be scheduled for the
+ // |forced_update_pending_callback_| to have an effect. Here we don't need
+ // to care about the return value from |ScheduleUpdate()|.
+ ScheduleUpdates();
+ forced_update_pending_callback_->Run(true, true);
}
return true;
}
@@ -902,13 +1065,9 @@
}
bool UpdateAttempter::RebootDirectly() {
- vector<string> command;
- command.push_back("/sbin/shutdown");
- command.push_back("-r");
- command.push_back("now");
- LOG(INFO) << "Running \"" << base::JoinString(command, " ") << "\"";
+ vector<string> command = {"/sbin/shutdown", "-r", "now"};
int rc = 0;
- Subprocess::SynchronousExec(command, &rc, nullptr);
+ Subprocess::SynchronousExec(command, &rc, nullptr, nullptr);
return rc == 0;
}
@@ -946,6 +1105,8 @@
params.target_channel,
params.target_version_prefix,
params.rollback_allowed,
+ params.rollback_data_save_requested,
+ params.rollback_allowed_milestones,
/*obey_proxies=*/false,
params.interactive);
// Always clear the forced app_version and omaha_url after an update attempt
@@ -963,7 +1124,7 @@
// a bug that will most likely prevent further automatic update checks. It
// seems better to crash in such cases and restart the update_engine daemon
// into, hopefully, a known good state.
- CHECK(IsUpdateRunningOrScheduled());
+ CHECK(IsBusyOrUpdateScheduled());
}
void UpdateAttempter::UpdateLastCheckedTime() {
@@ -983,25 +1144,17 @@
}
}
-// Delegate methods:
-void UpdateAttempter::ProcessingDone(const ActionProcessor* processor,
- ErrorCode code) {
- LOG(INFO) << "Processing Done.";
-
+void UpdateAttempter::ProcessingDoneInternal(const ActionProcessor* processor,
+ ErrorCode code) {
// Reset cpu shares back to normal.
cpu_limiter_.StopLimiter();
- // reset the state that's only valid for a single update pass
- current_update_attempt_flags_ = UpdateAttemptFlags::kNone;
-
- if (forced_update_pending_callback_.get())
- // Clear prior interactive requests once the processor is done.
- forced_update_pending_callback_->Run(false, false);
+ ResetInteractivityFlags();
if (status_ == UpdateStatus::REPORTING_ERROR_EVENT) {
LOG(INFO) << "Error event sent.";
- // Inform scheduler of new status;
+ // Inform scheduler of new status.
SetStatusAndNotify(UpdateStatus::IDLE);
ScheduleUpdates();
@@ -1014,93 +1167,122 @@
attempt_error_code_ = utils::GetBaseErrorCode(code);
- if (code == ErrorCode::kSuccess) {
- // For install operation, we do not mark update complete since we do not
- // need reboot.
- if (!is_install_)
- WriteUpdateCompletedMarker();
- ReportTimeToUpdateAppliedMetric();
-
- prefs_->SetInt64(kPrefsDeltaUpdateFailures, 0);
- prefs_->SetString(kPrefsPreviousVersion,
- omaha_request_params_->app_version());
- DeltaPerformer::ResetUpdateProgress(prefs_, false);
-
- system_state_->payload_state()->UpdateSucceeded();
-
- // Since we're done with scattering fully at this point, this is the
- // safest point delete the state files, as we're sure that the status is
- // set to reboot (which means no more updates will be applied until reboot)
- // This deletion is required for correctness as we want the next update
- // check to re-create a new random number for the update check count.
- // Similarly, we also delete the wall-clock-wait period that was persisted
- // so that we start with a new random value for the next update check
- // after reboot so that the same device is not favored or punished in any
- // way.
- prefs_->Delete(kPrefsUpdateCheckCount);
- system_state_->payload_state()->SetScatteringWaitPeriod(TimeDelta());
- system_state_->payload_state()->SetStagingWaitPeriod(TimeDelta());
- prefs_->Delete(kPrefsUpdateFirstSeenAt);
-
- if (is_install_) {
- LOG(INFO) << "DLC successfully installed, no reboot needed.";
- SetStatusAndNotify(UpdateStatus::IDLE);
- ScheduleUpdates();
+ if (code != ErrorCode::kSuccess) {
+ if (ScheduleErrorEventAction()) {
return;
}
-
- SetStatusAndNotify(UpdateStatus::UPDATED_NEED_REBOOT);
+ LOG(INFO) << "No update.";
+ SetStatusAndNotify(UpdateStatus::IDLE);
ScheduleUpdates();
- LOG(INFO) << "Update successfully applied, waiting to reboot.";
-
- // |install_plan_| is null during rollback operations, and the stats don't
- // make much sense then anyway.
- if (install_plan_) {
- // Generate an unique payload identifier.
- string target_version_uid;
- for (const auto& payload : install_plan_->payloads) {
- target_version_uid +=
- brillo::data_encoding::Base64Encode(payload.hash) + ":" +
- payload.metadata_signature + ":";
- }
-
- // If we just downloaded a rollback image, we should preserve this fact
- // over the following powerwash.
- if (install_plan_->is_rollback) {
- system_state_->payload_state()->SetRollbackHappened(true);
- system_state_->metrics_reporter()->ReportEnterpriseRollbackMetrics(
- /*success=*/true, install_plan_->version);
- }
-
- // Expect to reboot into the new version to send the proper metric during
- // next boot.
- system_state_->payload_state()->ExpectRebootInNewVersion(
- target_version_uid);
- } else {
- // If we just finished a rollback, then we expect to have no Omaha
- // response. Otherwise, it's an error.
- if (system_state_->payload_state()->GetRollbackVersion().empty()) {
- LOG(ERROR) << "Can't send metrics because there was no Omaha response";
- }
- }
return;
}
- if (ScheduleErrorEventAction()) {
- return;
+ ReportTimeToUpdateAppliedMetric();
+ prefs_->SetInt64(kPrefsDeltaUpdateFailures, 0);
+ prefs_->SetString(kPrefsPreviousVersion,
+ omaha_request_params_->app_version());
+ DeltaPerformer::ResetUpdateProgress(prefs_, false);
+
+ system_state_->payload_state()->UpdateSucceeded();
+
+ // Since we're done with scattering fully at this point, this is the
+ // safest point delete the state files, as we're sure that the status is
+ // set to reboot (which means no more updates will be applied until reboot)
+ // This deletion is required for correctness as we want the next update
+ // check to re-create a new random number for the update check count.
+ // Similarly, we also delete the wall-clock-wait period that was persisted
+ // so that we start with a new random value for the next update check
+ // after reboot so that the same device is not favored or punished in any
+ // way.
+ prefs_->Delete(kPrefsUpdateCheckCount);
+ system_state_->payload_state()->SetScatteringWaitPeriod(TimeDelta());
+ system_state_->payload_state()->SetStagingWaitPeriod(TimeDelta());
+ prefs_->Delete(kPrefsUpdateFirstSeenAt);
+
+ // Note: below this comment should only be on |ErrorCode::kSuccess|.
+ if (is_install_) {
+ ProcessingDoneInstall(processor, code);
+ } else {
+ ProcessingDoneUpdate(processor, code);
}
- LOG(INFO) << "No update.";
+}
+
+vector<string> UpdateAttempter::GetSuccessfulDlcIds() {
+ vector<string> dlc_ids;
+ for (const auto& pr : omaha_request_params_->dlc_apps_params())
+ if (pr.second.updated)
+ dlc_ids.push_back(pr.second.name);
+ return dlc_ids;
+}
+
+void UpdateAttempter::ProcessingDoneInstall(const ActionProcessor* processor,
+ ErrorCode code) {
+ if (!system_state_->dlcservice()->InstallCompleted(GetSuccessfulDlcIds()))
+ LOG(WARNING) << "dlcservice didn't successfully handle install completion.";
SetStatusAndNotify(UpdateStatus::IDLE);
ScheduleUpdates();
+ LOG(INFO) << "DLC successfully installed, no reboot needed.";
+}
+
+void UpdateAttempter::ProcessingDoneUpdate(const ActionProcessor* processor,
+ ErrorCode code) {
+ WriteUpdateCompletedMarker();
+
+ if (!system_state_->dlcservice()->UpdateCompleted(GetSuccessfulDlcIds()))
+ LOG(WARNING) << "dlcservice didn't successfully handle update completion.";
+ SetStatusAndNotify(UpdateStatus::UPDATED_NEED_REBOOT);
+ ScheduleUpdates();
+ LOG(INFO) << "Update successfully applied, waiting to reboot.";
+
+ // |install_plan_| is null during rollback operations, and the stats don't
+ // make much sense then anyway.
+ if (install_plan_) {
+ // Generate an unique payload identifier.
+ string target_version_uid;
+ for (const auto& payload : install_plan_->payloads) {
+ target_version_uid += brillo::data_encoding::Base64Encode(payload.hash) +
+ ":" + payload.metadata_signature + ":";
+ }
+
+ // If we just downloaded a rollback image, we should preserve this fact
+ // over the following powerwash.
+ if (install_plan_->is_rollback) {
+ system_state_->payload_state()->SetRollbackHappened(true);
+ system_state_->metrics_reporter()->ReportEnterpriseRollbackMetrics(
+ /*success=*/true, install_plan_->version);
+ }
+
+ // Expect to reboot into the new version to send the proper metric during
+ // next boot.
+ system_state_->payload_state()->ExpectRebootInNewVersion(
+ target_version_uid);
+ } else {
+ // If we just finished a rollback, then we expect to have no Omaha
+ // response. Otherwise, it's an error.
+ if (system_state_->payload_state()->GetRollbackVersion().empty()) {
+ LOG(ERROR) << "Can't send metrics because there was no Omaha response";
+ }
+ }
+}
+
+// Delegate methods:
+void UpdateAttempter::ProcessingDone(const ActionProcessor* processor,
+ ErrorCode code) {
+ LOG(INFO) << "Processing Done.";
+ ProcessingDoneInternal(processor, code);
+
+ // Note: do cleanups here for any variables that need to be reset after a
+ // failure, error, update, or install.
+ is_install_ = false;
}
void UpdateAttempter::ProcessingStopped(const ActionProcessor* processor) {
// Reset cpu shares back to normal.
cpu_limiter_.StopLimiter();
download_progress_ = 0.0;
- if (forced_update_pending_callback_.get())
- // Clear prior interactive requests once the processor is done.
- forced_update_pending_callback_->Run(false, false);
+
+ ResetInteractivityFlags();
+
SetStatusAndNotify(UpdateStatus::IDLE);
ScheduleUpdates();
error_event_.reset(nullptr);
@@ -1168,7 +1350,6 @@
new InstallPlan(omaha_response_handler_action->install_plan()));
UpdateLastCheckedTime();
new_version_ = install_plan_->version;
- new_system_version_ = install_plan_->system_version;
new_payload_size_ = 0;
for (const auto& payload : install_plan_->payloads)
new_payload_size_ += payload.size;
@@ -1254,6 +1435,15 @@
}
}
+void UpdateAttempter::ResetInteractivityFlags() {
+ // Reset the state that's only valid for a single update pass.
+ current_update_attempt_flags_ = UpdateAttemptFlags::kNone;
+
+ if (forced_update_pending_callback_.get())
+ // Clear prior interactive requests once the processor is done.
+ forced_update_pending_callback_->Run(false, false);
+}
+
bool UpdateAttempter::ResetStatus() {
LOG(INFO) << "Attempting to reset state from "
<< UpdateStatusToString(status_) << " to UpdateStatus::IDLE";
@@ -1307,11 +1497,25 @@
out_status->last_checked_time = last_checked_time_;
out_status->status = status_;
out_status->current_version = omaha_request_params_->app_version();
- out_status->current_system_version = omaha_request_params_->system_version();
out_status->progress = download_progress_;
out_status->new_size_bytes = new_payload_size_;
out_status->new_version = new_version_;
- out_status->new_system_version = new_system_version_;
+ out_status->is_enterprise_rollback =
+ install_plan_ && install_plan_->is_rollback;
+ out_status->is_install = is_install_;
+
+ string str_eol_date;
+ if (system_state_->prefs()->Exists(kPrefsOmahaEolDate) &&
+ !system_state_->prefs()->GetString(kPrefsOmahaEolDate, &str_eol_date))
+ LOG(ERROR) << "Failed to retrieve kPrefsOmahaEolDate pref.";
+ out_status->eol_date = StringToEolDate(str_eol_date);
+
+ // A powerwash will take place either if the install plan says it is required
+ // or if an enterprise rollback is happening.
+ out_status->will_powerwash_after_reboot =
+ install_plan_ &&
+ (install_plan_->powerwash_required || install_plan_->is_rollback);
+
return true;
}
@@ -1421,7 +1625,8 @@
error_event_.release(), // Pass ownership.
std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
system_state_->hardware()),
- false);
+ false,
+ session_id_);
processor_->EnqueueAction(std::move(error_event_action));
SetStatusAndNotify(UpdateStatus::REPORTING_ERROR_EVENT);
processor_->StartProcessing();
@@ -1459,12 +1664,15 @@
void UpdateAttempter::PingOmaha() {
if (!processor_->IsRunning()) {
+ ResetInteractivityFlags();
+
auto ping_action = std::make_unique<OmahaRequestAction>(
system_state_,
nullptr,
std::make_unique<LibcurlHttpFetcher>(GetProxyResolver(),
system_state_->hardware()),
- true);
+ true,
+ "" /* session_id */);
processor_->set_delegate(nullptr);
processor_->EnqueueAction(std::move(ping_action));
// Call StartProcessing() synchronously here to avoid any race conditions
@@ -1557,6 +1765,8 @@
system_state_->payload_state()->UpdateEngineStarted();
StartP2PAtStartup();
+
+ excluder_ = CreateExcluder(system_state_->prefs());
}
bool UpdateAttempter::StartP2PAtStartup() {
@@ -1626,7 +1836,7 @@
return true;
}
-bool UpdateAttempter::IsUpdateRunningOrScheduled() {
+bool UpdateAttempter::IsBusyOrUpdateScheduled() {
return ((status_ != UpdateStatus::IDLE &&
status_ != UpdateStatus::UPDATED_NEED_REBOOT) ||
waiting_for_scheduled_check_);
diff --git a/update_attempter.h b/update_attempter.h
index c27f8a4..dd958f5 100644
--- a/update_attempter.h
+++ b/update_attempter.h
@@ -26,6 +26,7 @@
#include <vector>
#include <base/bind.h>
+#include <base/guid.h>
#include <base/time/time.h>
#include <gtest/gtest_prod.h> // for FRIEND_TEST
@@ -36,7 +37,9 @@
#include "update_engine/client_library/include/update_engine/update_status.h"
#include "update_engine/common/action_processor.h"
#include "update_engine/common/cpu_limiter.h"
+#include "update_engine/common/excluder_interface.h"
#include "update_engine/common/proxy_resolver.h"
+#include "update_engine/omaha_request_builder_xml.h"
#include "update_engine/omaha_request_params.h"
#include "update_engine/omaha_response_handler_action.h"
#include "update_engine/payload_consumer/download_action.h"
@@ -84,6 +87,8 @@
const std::string& target_channel,
const std::string& target_version_prefix,
bool rollback_allowed,
+ bool rollback_data_save_requested,
+ int rollback_allowed_milestones,
bool obey_proxies,
bool interactive);
@@ -137,7 +142,7 @@
UpdateAttemptFlags flags);
// This is the version of CheckForUpdate called by AttemptInstall API.
- virtual bool CheckForInstall(const std::vector<std::string>& dlc_module_ids,
+ virtual bool CheckForInstall(const std::vector<std::string>& dlc_ids,
const std::string& omaha_url);
// This is the internal entry point for going through a rollback. This will
@@ -158,6 +163,9 @@
// UPDATED_NEED_REBOOT. Returns true on success, false otherwise.
bool RebootIfNeeded();
+ // Sets the DLC as active or inactive. See common_service.h
+ virtual bool SetDlcActiveValue(bool is_active, const std::string& dlc_id);
+
// DownloadActionDelegate methods:
void BytesReceived(uint64_t bytes_progressed,
uint64_t bytes_received,
@@ -177,6 +185,9 @@
// Called at update_engine startup to do various house-keeping.
void UpdateEngineStarted();
+ // Returns the |Excluder| that is currently held onto.
+ virtual ExcluderInterface* GetExcluder() const { return excluder_.get(); }
+
// Reloads the device policy from libbrillo. Note: This method doesn't
// cause a real-time policy fetch from the policy server. It just reloads the
// latest value that libbrillo has cached. libbrillo fetches the policies
@@ -245,15 +256,28 @@
FRIEND_TEST(UpdateAttempterTest, ActionCompletedOmahaRequestTest);
FRIEND_TEST(UpdateAttempterTest, BootTimeInUpdateMarkerFile);
FRIEND_TEST(UpdateAttempterTest, BroadcastCompleteDownloadTest);
+ FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsInstallTest);
+ FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsNoPrefFilesTest);
+ FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsNonParseableValuesTest);
+ FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsValidValuesTest);
+ FRIEND_TEST(UpdateAttempterTest, CalculateDlcParamsRemoveStaleMetadata);
FRIEND_TEST(UpdateAttempterTest, ChangeToDownloadingOnReceivedBytesTest);
+ FRIEND_TEST(UpdateAttempterTest, CheckForInstallNotIdleFails);
FRIEND_TEST(UpdateAttempterTest, CheckForUpdateAUDlcTest);
FRIEND_TEST(UpdateAttempterTest, CreatePendingErrorEventTest);
FRIEND_TEST(UpdateAttempterTest, CreatePendingErrorEventResumedTest);
FRIEND_TEST(UpdateAttempterTest, DisableDeltaUpdateIfNeededTest);
FRIEND_TEST(UpdateAttempterTest, DownloadProgressAccumulationTest);
FRIEND_TEST(UpdateAttempterTest, InstallSetsStatusIdle);
+ FRIEND_TEST(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusTrue);
+ FRIEND_TEST(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusFalse);
+ FRIEND_TEST(UpdateAttempterTest,
+ PowerwashInGetStatusTrueBecausePowerwashRequired);
+ FRIEND_TEST(UpdateAttempterTest, PowerwashInGetStatusTrueBecauseRollback);
FRIEND_TEST(UpdateAttempterTest, MarkDeltaUpdateFailureTest);
FRIEND_TEST(UpdateAttempterTest, PingOmahaTest);
+ FRIEND_TEST(UpdateAttempterTest, ProcessingDoneInstallError);
+ FRIEND_TEST(UpdateAttempterTest, ProcessingDoneUpdateError);
FRIEND_TEST(UpdateAttempterTest, ReportDailyMetrics);
FRIEND_TEST(UpdateAttempterTest, RollbackNotAllowed);
FRIEND_TEST(UpdateAttempterTest, RollbackAfterInstall);
@@ -265,6 +289,8 @@
FRIEND_TEST(UpdateAttempterTest, RollbackMetricsRollbackSuccess);
FRIEND_TEST(UpdateAttempterTest, ScheduleErrorEventActionNoEventTest);
FRIEND_TEST(UpdateAttempterTest, ScheduleErrorEventActionTest);
+ FRIEND_TEST(UpdateAttempterTest, SessionIdTestEnforceEmptyStrPingOmaha);
+ FRIEND_TEST(UpdateAttempterTest, SessionIdTestOnOmahaRequestActions);
FRIEND_TEST(UpdateAttempterTest, SetRollbackHappenedNotRollback);
FRIEND_TEST(UpdateAttempterTest, SetRollbackHappenedRollback);
FRIEND_TEST(UpdateAttempterTest, TargetVersionPrefixSetAndReset);
@@ -272,11 +298,17 @@
FRIEND_TEST(UpdateAttempterTest, UpdateAttemptFlagsCachedAtUpdateStart);
FRIEND_TEST(UpdateAttempterTest, UpdateDeferredByPolicyTest);
FRIEND_TEST(UpdateAttempterTest, UpdateIsNotRunningWhenUpdateAvailable);
+ FRIEND_TEST(UpdateAttempterTest, GetSuccessfulDlcIds);
// Returns the special flags to be added to ErrorCode values based on the
// parameters used in the current update attempt.
uint32_t GetErrorCodeFlags();
+ // ActionProcessorDelegate methods |ProcessingDone()| internal helpers.
+ void ProcessingDoneInternal(const ActionProcessor* processor, ErrorCode code);
+ void ProcessingDoneUpdate(const ActionProcessor* processor, ErrorCode code);
+ void ProcessingDoneInstall(const ActionProcessor* processor, ErrorCode code);
+
// CertificateChecker::Observer method.
// Report metrics about the certificate being checked.
void CertificateChecked(ServerToCheck server_to_check,
@@ -339,6 +371,8 @@
const std::string& target_channel,
const std::string& target_version_prefix,
bool rollback_allowed,
+ bool rollback_data_save_requested,
+ int rollback_allowed_milestones,
bool obey_proxies,
bool interactive);
@@ -398,8 +432,8 @@
// policy is available again.
void UpdateRollbackHappened();
- // Returns whether an update is currently running or scheduled.
- bool IsUpdateRunningOrScheduled();
+ // Returns if an update is: running, applied and needs reboot, or scheduled.
+ bool IsBusyOrUpdateScheduled();
void CalculateStagingParams(bool interactive);
@@ -408,6 +442,27 @@
// will only be reported for enterprise enrolled devices.
void ReportTimeToUpdateAppliedMetric();
+ // Resets interactivity and forced update flags.
+ void ResetInteractivityFlags();
+
+ // Resets all the DLC prefs.
+ bool ResetDlcPrefs(const std::string& dlc_id);
+
+ // Get the integer values from the DLC metadata for |kPrefsPingLastActive|
+ // or |kPrefsPingLastRollcall|.
+ // The value is equal to -2 when the value cannot be read or is not numeric.
+ // The value is equal to -1 the first time it is being sent, which is
+ // when the metadata file doesn't exist.
+ int64_t GetPingMetadata(const std::string& metadata_key) const;
+
+ // Calculates the update parameters for DLCs. Sets the |dlc_ids_|
+ // parameter on the |omaha_request_params_| object.
+ void CalculateDlcParams();
+
+ // Returns the list of DLC IDs that were installed/updated, excluding the ones
+ // which had "noupdate" in the Omaha response.
+ std::vector<std::string> GetSuccessfulDlcIds();
+
// Last status notification timestamp used for throttling. Use monotonic
// TimeTicks to ensure that notifications are sent even if the system clock is
// set back in the middle of an update.
@@ -460,7 +515,6 @@
int64_t last_checked_time_ = 0;
std::string prev_version_;
std::string new_version_ = "0.0.0.0";
- std::string new_system_version_;
uint64_t new_payload_size_ = 0;
// Flags influencing all periodic update checks
UpdateAttemptFlags update_attempt_flags_ = UpdateAttemptFlags::kNone;
@@ -509,7 +563,7 @@
std::string forced_omaha_url_;
// A list of DLC module IDs.
- std::vector<std::string> dlc_module_ids_;
+ std::vector<std::string> dlc_ids_;
// Whether the operation is install (write to the current slot not the
// inactive slot).
bool is_install_;
@@ -518,6 +572,12 @@
base::TimeDelta staging_wait_time_;
chromeos_update_manager::StagingSchedule staging_schedule_;
+ // This is the session ID used to track update flow to Omaha.
+ std::string session_id_;
+
+ // Interface for excluder.
+ std::unique_ptr<ExcluderInterface> excluder_;
+
DISALLOW_COPY_AND_ASSIGN(UpdateAttempter);
};
diff --git a/update_attempter_unittest.cc b/update_attempter_unittest.cc
index 579c736..0086dd5 100644
--- a/update_attempter_unittest.cc
+++ b/update_attempter_unittest.cc
@@ -18,7 +18,11 @@
#include <stdint.h>
+#include <limits>
+#include <map>
#include <memory>
+#include <string>
+#include <unordered_set>
#include <base/files/file_util.h>
#include <base/message_loop/message_loop.h>
@@ -30,6 +34,7 @@
#include <policy/mock_device_policy.h>
#include <policy/mock_libpolicy.h>
+#include "update_engine/common/constants.h"
#include "update_engine/common/dlcservice_interface.h"
#include "update_engine/common/fake_clock.h"
#include "update_engine/common/fake_prefs.h"
@@ -42,28 +47,37 @@
#include "update_engine/common/test_utils.h"
#include "update_engine/common/utils.h"
#include "update_engine/fake_system_state.h"
+#include "update_engine/libcurl_http_fetcher.h"
#include "update_engine/mock_p2p_manager.h"
#include "update_engine/mock_payload_state.h"
#include "update_engine/mock_service_observer.h"
+#include "update_engine/omaha_utils.h"
#include "update_engine/payload_consumer/filesystem_verifier_action.h"
#include "update_engine/payload_consumer/install_plan.h"
#include "update_engine/payload_consumer/payload_constants.h"
#include "update_engine/payload_consumer/postinstall_runner_action.h"
#include "update_engine/update_boot_flags_action.h"
+#include "update_engine/update_manager/mock_update_manager.h"
using base::Time;
using base::TimeDelta;
using chromeos_update_manager::EvalStatus;
+using chromeos_update_manager::MockUpdateManager;
using chromeos_update_manager::StagingSchedule;
using chromeos_update_manager::UpdateCheckParams;
using policy::DevicePolicy;
+using std::map;
using std::string;
using std::unique_ptr;
+using std::unordered_set;
using std::vector;
using testing::_;
+using testing::Contains;
using testing::DoAll;
+using testing::ElementsAre;
using testing::Field;
using testing::InSequence;
+using testing::Invoke;
using testing::Ne;
using testing::NiceMock;
using testing::Pointee;
@@ -81,9 +95,68 @@
namespace {
+const UpdateStatus kNonIdleUpdateStatuses[] = {
+ UpdateStatus::CHECKING_FOR_UPDATE,
+ UpdateStatus::UPDATE_AVAILABLE,
+ UpdateStatus::DOWNLOADING,
+ UpdateStatus::VERIFYING,
+ UpdateStatus::FINALIZING,
+ UpdateStatus::UPDATED_NEED_REBOOT,
+ UpdateStatus::REPORTING_ERROR_EVENT,
+ UpdateStatus::ATTEMPTING_ROLLBACK,
+ UpdateStatus::DISABLED,
+ UpdateStatus::NEED_PERMISSION_TO_UPDATE,
+};
+
+struct CheckForUpdateTestParams {
+ // Setups + Inputs:
+ UpdateStatus status = UpdateStatus::IDLE;
+ string app_version = "fake_app_version";
+ string omaha_url = "fake_omaha_url";
+ UpdateAttemptFlags flags = UpdateAttemptFlags::kNone;
+ bool is_official_build = true;
+ bool are_dev_features_enabled = false;
+
+ // Expects:
+ string expected_forced_app_version = "";
+ string expected_forced_omaha_url = "";
+ bool should_schedule_updates_be_called = true;
+ bool expected_result = true;
+};
+
+struct OnUpdateScheduledTestParams {
+ // Setups + Inputs:
+ UpdateCheckParams params = {};
+ EvalStatus status = EvalStatus::kFailed;
+ // Expects:
+ UpdateStatus exit_status = UpdateStatus::IDLE;
+ bool should_schedule_updates_be_called = false;
+ bool should_update_be_called = false;
+};
+
+struct ProcessingDoneTestParams {
+ // Setups + Inputs:
+ bool is_install = false;
+ UpdateStatus status = UpdateStatus::CHECKING_FOR_UPDATE;
+ ActionProcessor* processor = nullptr;
+ ErrorCode code = ErrorCode::kSuccess;
+ map<string, OmahaRequestParams::AppParams> dlc_apps_params;
+
+ // Expects:
+ const bool kExpectedIsInstall = false;
+ bool should_schedule_updates_be_called = true;
+ UpdateStatus expected_exit_status = UpdateStatus::IDLE;
+ bool should_install_completed_be_called = false;
+ bool should_update_completed_be_called = false;
+ vector<string> args_to_install_completed;
+ vector<string> args_to_update_completed;
+};
+
class MockDlcService : public DlcServiceInterface {
public:
- MOCK_METHOD1(GetInstalled, bool(vector<string>*));
+ MOCK_METHOD1(GetDlcsToUpdate, bool(vector<string>*));
+ MOCK_METHOD1(InstallCompleted, bool(const vector<string>&));
+ MOCK_METHOD1(UpdateCompleted, bool(const vector<string>&));
};
} // namespace
@@ -98,27 +171,67 @@
explicit UpdateAttempterUnderTest(SystemState* system_state)
: UpdateAttempter(system_state, nullptr) {}
+ void Update(const std::string& app_version,
+ const std::string& omaha_url,
+ const std::string& target_channel,
+ const std::string& target_version_prefix,
+ bool rollback_allowed,
+ bool rollback_data_save_requested,
+ int rollback_allowed_milestones,
+ bool obey_proxies,
+ bool interactive) override {
+ update_called_ = true;
+ if (do_update_) {
+ UpdateAttempter::Update(app_version,
+ omaha_url,
+ target_channel,
+ target_version_prefix,
+ rollback_allowed,
+ rollback_data_save_requested,
+ rollback_allowed_milestones,
+ obey_proxies,
+ interactive);
+ return;
+ }
+ LOG(INFO) << "[TEST] Update() disabled.";
+ status_ = UpdateStatus::CHECKING_FOR_UPDATE;
+ }
+
+ void DisableUpdate() { do_update_ = false; }
+
+ bool WasUpdateCalled() const { return update_called_; }
+
// Wrap the update scheduling method, allowing us to opt out of scheduled
// updates for testing purposes.
bool ScheduleUpdates() override {
schedule_updates_called_ = true;
- if (do_schedule_updates_) {
- UpdateAttempter::ScheduleUpdates();
- } else {
- LOG(INFO) << "[TEST] Update scheduling disabled.";
- }
+ if (do_schedule_updates_)
+ return UpdateAttempter::ScheduleUpdates();
+ LOG(INFO) << "[TEST] Update scheduling disabled.";
+ waiting_for_scheduled_check_ = true;
return true;
}
- void EnableScheduleUpdates() { do_schedule_updates_ = true; }
+
void DisableScheduleUpdates() { do_schedule_updates_ = false; }
- // Indicates whether ScheduleUpdates() was called.
- bool schedule_updates_called() const { return schedule_updates_called_; }
+ // Indicates whether |ScheduleUpdates()| was called.
+ bool WasScheduleUpdatesCalled() const { return schedule_updates_called_; }
- // Need to expose forced_omaha_url_ so we can test it.
+ // Need to expose following private members of |UpdateAttempter| for tests.
+ const string& forced_app_version() const { return forced_app_version_; }
const string& forced_omaha_url() const { return forced_omaha_url_; }
+ // Need to expose |waiting_for_scheduled_check_| for testing.
+ void SetWaitingForScheduledCheck(bool waiting) {
+ waiting_for_scheduled_check_ = waiting;
+ }
+
private:
+ // Used for overrides of |Update()|.
+ bool update_called_ = false;
+ bool do_update_ = true;
+
+ // Used for overrides of |ScheduleUpdates()|.
bool schedule_updates_called_ = false;
bool do_schedule_updates_ = true;
};
@@ -132,6 +245,7 @@
fake_system_state_.set_connection_manager(&mock_connection_manager);
fake_system_state_.set_update_attempter(&attempter_);
fake_system_state_.set_dlcservice(&mock_dlcservice_);
+ fake_system_state_.set_update_manager(&mock_update_manager_);
loop_.SetAsCurrent();
certificate_checker_.Init();
@@ -144,6 +258,7 @@
void SetUp() override {
EXPECT_NE(nullptr, attempter_.system_state_);
+ EXPECT_NE(nullptr, attempter_.system_state_->update_manager());
EXPECT_EQ(0, attempter_.http_response_code_);
EXPECT_EQ(UpdateStatus::IDLE, attempter_.status_);
EXPECT_EQ(0.0, attempter_.download_progress_);
@@ -154,7 +269,7 @@
attempter_.processor_.reset(processor_); // Transfers ownership.
prefs_ = fake_system_state_.mock_prefs();
- // Set up store/load semantics of P2P properties via the mock PayloadState.
+ // Setup store/load semantics of P2P properties via the mock |PayloadState|.
actual_using_p2p_for_downloading_ = false;
EXPECT_CALL(*fake_system_state_.mock_payload_state(),
SetUsingP2PForDownloading(_))
@@ -188,6 +303,11 @@
void P2PEnabledInteractiveStart();
void P2PEnabledStartingFailsStart();
void P2PEnabledHousekeepingFailsStart();
+ void SessionIdTestChange();
+ void SessionIdTestEnforceEmptyStrPingOmaha();
+ void SessionIdTestConsistencyInUpdateFlow();
+ void SessionIdTestInDownloadAction();
+ void UpdateToQuickFixBuildStart(bool set_token);
void ResetRollbackHappenedStart(bool is_consumer,
bool is_policy_available,
bool expected_reset);
@@ -203,6 +323,15 @@
}
bool actual_using_p2p_for_sharing() { return actual_using_p2p_for_sharing_; }
+ // |CheckForUpdate()| related member functions.
+ void TestCheckForUpdate();
+
+ // |OnUpdateScheduled()| related member functions.
+ void TestOnUpdateScheduled();
+
+ // |ProcessingDone()| related member functions.
+ void TestProcessingDone();
+
base::MessageLoopForIO base_loop_;
brillo::BaseMessageLoop loop_{&base_loop_};
@@ -211,15 +340,81 @@
OpenSSLWrapper openssl_wrapper_;
CertificateChecker certificate_checker_;
MockDlcService mock_dlcservice_;
+ MockUpdateManager mock_update_manager_;
NiceMock<MockActionProcessor>* processor_;
- NiceMock<MockPrefs>* prefs_; // Shortcut to fake_system_state_->mock_prefs().
+ NiceMock<MockPrefs>*
+ prefs_; // Shortcut to |fake_system_state_->mock_prefs()|.
NiceMock<MockConnectionManager> mock_connection_manager;
+ // |CheckForUpdate()| test params.
+ CheckForUpdateTestParams cfu_params_;
+
+ // |OnUpdateScheduled()| test params.
+ OnUpdateScheduledTestParams ous_params_;
+
+ // |ProcessingDone()| test params.
+ ProcessingDoneTestParams pd_params_;
+
bool actual_using_p2p_for_downloading_;
bool actual_using_p2p_for_sharing_;
};
+void UpdateAttempterTest::TestCheckForUpdate() {
+ // Setup
+ attempter_.status_ = cfu_params_.status;
+ fake_system_state_.fake_hardware()->SetIsOfficialBuild(
+ cfu_params_.is_official_build);
+ fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(
+ cfu_params_.are_dev_features_enabled);
+
+ // Invocation
+ EXPECT_EQ(
+ cfu_params_.expected_result,
+ attempter_.CheckForUpdate(
+ cfu_params_.app_version, cfu_params_.omaha_url, cfu_params_.flags));
+
+ // Verify
+ EXPECT_EQ(cfu_params_.expected_forced_app_version,
+ attempter_.forced_app_version());
+ EXPECT_EQ(cfu_params_.expected_forced_omaha_url,
+ attempter_.forced_omaha_url());
+ EXPECT_EQ(cfu_params_.should_schedule_updates_be_called,
+ attempter_.WasScheduleUpdatesCalled());
+}
+
+void UpdateAttempterTest::TestProcessingDone() {
+ // Setup
+ attempter_.DisableScheduleUpdates();
+ attempter_.is_install_ = pd_params_.is_install;
+ attempter_.status_ = pd_params_.status;
+ attempter_.omaha_request_params_->set_dlc_apps_params(
+ pd_params_.dlc_apps_params);
+
+ // Expects
+ if (pd_params_.should_install_completed_be_called)
+ EXPECT_CALL(mock_dlcservice_,
+ InstallCompleted(pd_params_.args_to_install_completed))
+ .WillOnce(Return(true));
+ else
+ EXPECT_CALL(mock_dlcservice_, InstallCompleted(_)).Times(0);
+ if (pd_params_.should_update_completed_be_called)
+ EXPECT_CALL(mock_dlcservice_,
+ UpdateCompleted(pd_params_.args_to_update_completed))
+ .WillOnce(Return(true));
+ else
+ EXPECT_CALL(mock_dlcservice_, UpdateCompleted(_)).Times(0);
+
+ // Invocation
+ attempter_.ProcessingDone(pd_params_.processor, pd_params_.code);
+
+ // Verify
+ EXPECT_EQ(pd_params_.kExpectedIsInstall, attempter_.is_install_);
+ EXPECT_EQ(pd_params_.should_schedule_updates_be_called,
+ attempter_.WasScheduleUpdatesCalled());
+ EXPECT_EQ(pd_params_.expected_exit_status, attempter_.status_);
+}
+
void UpdateAttempterTest::ScheduleQuitMainLoop() {
loop_.PostTask(
FROM_HERE,
@@ -227,6 +422,101 @@
base::Unretained(&loop_)));
}
+void UpdateAttempterTest::SessionIdTestChange() {
+ EXPECT_NE(UpdateStatus::UPDATED_NEED_REBOOT, attempter_.status());
+ const auto old_session_id = attempter_.session_id_;
+ attempter_.Update("", "", "", "", false, false, 0, false, false);
+ EXPECT_NE(old_session_id, attempter_.session_id_);
+ ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, SessionIdTestChange) {
+ loop_.PostTask(FROM_HERE,
+ base::Bind(&UpdateAttempterTest::SessionIdTestChange,
+ base::Unretained(this)));
+ loop_.Run();
+}
+
+void UpdateAttempterTest::SessionIdTestEnforceEmptyStrPingOmaha() {
+ // The |session_id_| should not be changed and should remain as an empty
+ // string when |status_| is |UPDATED_NEED_REBOOT| (only for consistency)
+ // and |PingOmaha()| is called.
+ attempter_.DisableScheduleUpdates();
+ attempter_.status_ = UpdateStatus::UPDATED_NEED_REBOOT;
+ const auto old_session_id = attempter_.session_id_;
+ auto CheckIfEmptySessionId = [](AbstractAction* aa) {
+ if (aa->Type() == OmahaRequestAction::StaticType()) {
+ EXPECT_TRUE(static_cast<OmahaRequestAction*>(aa)->session_id_.empty());
+ }
+ };
+ EXPECT_CALL(*processor_, EnqueueAction(Pointee(_)))
+ .WillRepeatedly(Invoke(CheckIfEmptySessionId));
+ EXPECT_CALL(*processor_, StartProcessing());
+ attempter_.PingOmaha();
+ EXPECT_EQ(old_session_id, attempter_.session_id_);
+ EXPECT_EQ(UpdateStatus::UPDATED_NEED_REBOOT, attempter_.status_);
+ ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, SessionIdTestEnforceEmptyStrPingOmaha) {
+ loop_.PostTask(
+ FROM_HERE,
+ base::Bind(&UpdateAttempterTest::SessionIdTestEnforceEmptyStrPingOmaha,
+ base::Unretained(this)));
+ loop_.Run();
+}
+
+void UpdateAttempterTest::SessionIdTestConsistencyInUpdateFlow() {
+ // All session IDs passed into |OmahaRequestActions| should be enforced to
+ // have the same value in |BuildUpdateActions()|.
+ unordered_set<string> session_ids;
+ // Gather all the session IDs being passed to |OmahaRequestActions|.
+ auto CheckSessionId = [&session_ids](AbstractAction* aa) {
+ if (aa->Type() == OmahaRequestAction::StaticType())
+ session_ids.insert(static_cast<OmahaRequestAction*>(aa)->session_id_);
+ };
+ EXPECT_CALL(*processor_, EnqueueAction(Pointee(_)))
+ .WillRepeatedly(Invoke(CheckSessionId));
+ attempter_.BuildUpdateActions(false);
+ // Validate that all the session IDs are the same.
+ EXPECT_EQ(1, session_ids.size());
+ ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, SessionIdTestConsistencyInUpdateFlow) {
+ loop_.PostTask(
+ FROM_HERE,
+ base::Bind(&UpdateAttempterTest::SessionIdTestConsistencyInUpdateFlow,
+ base::Unretained(this)));
+ loop_.Run();
+}
+
+void UpdateAttempterTest::SessionIdTestInDownloadAction() {
+ // The session ID passed into |DownloadAction|'s |LibcurlHttpFetcher| should
+ // be enforced to be included in the HTTP header as X-Goog-Update-SessionId.
+ string header_value;
+ auto CheckSessionIdInDownloadAction = [&header_value](AbstractAction* aa) {
+ if (aa->Type() == DownloadAction::StaticType()) {
+ DownloadAction* da = static_cast<DownloadAction*>(aa);
+ EXPECT_TRUE(da->http_fetcher()->GetHeader(kXGoogleUpdateSessionId,
+ &header_value));
+ }
+ };
+ EXPECT_CALL(*processor_, EnqueueAction(Pointee(_)))
+ .WillRepeatedly(Invoke(CheckSessionIdInDownloadAction));
+ attempter_.BuildUpdateActions(false);
+ // Validate that X-Goog-Update_SessionId is set correctly in HTTP Header.
+ EXPECT_EQ(attempter_.session_id_, header_value);
+ ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest, SessionIdTestInDownloadAction) {
+ loop_.PostTask(FROM_HERE,
+ base::Bind(&UpdateAttempterTest::SessionIdTestInDownloadAction,
+ base::Unretained(this)));
+ loop_.Run();
+}
+
TEST_F(UpdateAttempterTest, ActionCompletedDownloadTest) {
unique_ptr<MockHttpFetcher> fetcher(new MockHttpFetcher("", 0, nullptr));
fetcher->FailTransfer(503); // Sets the HTTP response code.
@@ -268,7 +558,7 @@
EXPECT_EQ(0.0, attempter_.download_progress_);
// This is set via inspecting the InstallPlan payloads when the
- // OmahaResponseAction is completed
+ // |OmahaResponseAction| is completed.
attempter_.new_payload_size_ = bytes_total;
NiceMock<MockServiceObserver> observer;
EXPECT_CALL(observer,
@@ -292,14 +582,14 @@
}
TEST_F(UpdateAttempterTest, ChangeToDownloadingOnReceivedBytesTest) {
- // The transition into UpdateStatus::DOWNLOADING happens when the
+ // The transition into |UpdateStatus::DOWNLOADING| happens when the
// first bytes are received.
uint64_t bytes_progressed = 1024 * 1024; // 1MB
uint64_t bytes_received = 2 * 1024 * 1024; // 2MB
uint64_t bytes_total = 20 * 1024 * 1024; // 300MB
attempter_.status_ = UpdateStatus::CHECKING_FOR_UPDATE;
// This is set via inspecting the InstallPlan payloads when the
- // OmahaResponseAction is completed
+ // |OmahaResponseAction| is completed.
attempter_.new_payload_size_ = bytes_total;
EXPECT_EQ(0.0, attempter_.download_progress_);
NiceMock<MockServiceObserver> observer;
@@ -314,8 +604,7 @@
TEST_F(UpdateAttempterTest, BroadcastCompleteDownloadTest) {
// There is a special case to ensure that at 100% downloaded,
- // download_progress_ is updated and that value broadcast. This test confirms
- // that.
+ // |download_progress_| is updated and broadcastest.
uint64_t bytes_progressed = 0; // ignored
uint64_t bytes_received = 5 * 1024 * 1024; // ignored
uint64_t bytes_total = 5 * 1024 * 1024; // 300MB
@@ -337,7 +626,7 @@
unique_ptr<MockHttpFetcher> fetcher(new MockHttpFetcher("", 0, nullptr));
fetcher->FailTransfer(500); // Sets the HTTP response code.
OmahaRequestAction action(
- &fake_system_state_, nullptr, std::move(fetcher), false);
+ &fake_system_state_, nullptr, std::move(fetcher), false, "");
ObjectCollectorAction<OmahaResponse> collector_action;
BondActions(&action, &collector_action);
OmahaResponse response;
@@ -367,7 +656,7 @@
FakeSystemState fake_system_state;
OmahaRequestAction omaha_request_action(
- &fake_system_state, nullptr, nullptr, false);
+ &fake_system_state, nullptr, nullptr, false, "");
EXPECT_EQ(ErrorCode::kOmahaRequestError,
GetErrorCodeForAction(&omaha_request_action, ErrorCode::kError));
OmahaResponseHandlerAction omaha_response_handler_action(&fake_system_state_);
@@ -461,23 +750,23 @@
namespace {
// Actions that will be built as part of an update check.
-const string kUpdateActionTypes[] = { // NOLINT(runtime/string)
- OmahaRequestAction::StaticType(),
- OmahaResponseHandlerAction::StaticType(),
- UpdateBootFlagsAction::StaticType(),
- OmahaRequestAction::StaticType(),
- DownloadAction::StaticType(),
- OmahaRequestAction::StaticType(),
- FilesystemVerifierAction::StaticType(),
- PostinstallRunnerAction::StaticType(),
- OmahaRequestAction::StaticType()};
+vector<string> GetUpdateActionTypes() {
+ return {OmahaRequestAction::StaticType(),
+ OmahaResponseHandlerAction::StaticType(),
+ UpdateBootFlagsAction::StaticType(),
+ OmahaRequestAction::StaticType(),
+ DownloadAction::StaticType(),
+ OmahaRequestAction::StaticType(),
+ FilesystemVerifierAction::StaticType(),
+ PostinstallRunnerAction::StaticType(),
+ OmahaRequestAction::StaticType()};
+}
// Actions that will be built as part of a user-initiated rollback.
-const string kRollbackActionTypes[] = {
- // NOLINT(runtime/string)
- InstallPlanAction::StaticType(),
- PostinstallRunnerAction::StaticType(),
-};
+vector<string> GetRollbackActionTypes() {
+ return {InstallPlanAction::StaticType(),
+ PostinstallRunnerAction::StaticType()};
+}
const StagingSchedule kValidStagingSchedule = {
{4, 10}, {10, 40}, {19, 70}, {26, 100}};
@@ -487,8 +776,8 @@
void UpdateAttempterTest::UpdateTestStart() {
attempter_.set_http_response_code(200);
- // Expect that the device policy is loaded by the UpdateAttempter at some
- // point by calling RefreshDevicePolicy.
+ // Expect that the device policy is loaded by the |UpdateAttempter| at some
+ // point by calling |RefreshDevicePolicy()|.
auto device_policy = std::make_unique<policy::MockDevicePolicy>();
EXPECT_CALL(*device_policy, LoadPolicy())
.Times(testing::AtLeast(1))
@@ -498,15 +787,15 @@
{
InSequence s;
- for (size_t i = 0; i < arraysize(kUpdateActionTypes); ++i) {
+ for (const auto& update_action_type : GetUpdateActionTypes()) {
EXPECT_CALL(*processor_,
EnqueueAction(Pointee(
- Property(&AbstractAction::Type, kUpdateActionTypes[i]))));
+ Property(&AbstractAction::Type, update_action_type))));
}
EXPECT_CALL(*processor_, StartProcessing());
}
- attempter_.Update("", "", "", "", false, false, false);
+ attempter_.Update("", "", "", "", false, false, 0, false, false);
loop_.PostTask(FROM_HERE,
base::Bind(&UpdateAttempterTest::UpdateTestVerify,
base::Unretained(this)));
@@ -557,10 +846,10 @@
if (is_rollback_allowed) {
InSequence s;
- for (size_t i = 0; i < arraysize(kRollbackActionTypes); ++i) {
+ for (const auto& rollback_action_type : GetRollbackActionTypes()) {
EXPECT_CALL(*processor_,
- EnqueueAction(Pointee(Property(&AbstractAction::Type,
- kRollbackActionTypes[i]))));
+ EnqueueAction(Pointee(
+ Property(&AbstractAction::Type, rollback_action_type))));
}
EXPECT_CALL(*processor_, StartProcessing());
@@ -626,8 +915,8 @@
TEST_F(UpdateAttempterTest, PingOmahaTest) {
EXPECT_FALSE(attempter_.waiting_for_scheduled_check_);
- EXPECT_FALSE(attempter_.schedule_updates_called());
- // Disable scheduling of subsequnet checks; we're using the DefaultPolicy in
+ EXPECT_FALSE(attempter_.WasScheduleUpdatesCalled());
+ // Disable scheduling of subsequnet checks; we're using the |DefaultPolicy| in
// testing, which is more permissive than we want to handle here.
attempter_.DisableScheduleUpdates();
loop_.PostTask(FROM_HERE,
@@ -635,7 +924,7 @@
base::Unretained(this)));
brillo::MessageLoopRunMaxIterations(&loop_, 100);
EXPECT_EQ(UpdateStatus::UPDATED_NEED_REBOOT, attempter_.status());
- EXPECT_TRUE(attempter_.schedule_updates_called());
+ EXPECT_TRUE(attempter_.WasScheduleUpdatesCalled());
}
TEST_F(UpdateAttempterTest, CreatePendingErrorEventTest) {
@@ -701,12 +990,12 @@
void UpdateAttempterTest::P2PNotEnabledStart() {
// If P2P is not enabled, check that we do not attempt housekeeping
- // and do not convey that p2p is to be used.
+ // and do not convey that P2P is to be used.
MockP2PManager mock_p2p_manager;
fake_system_state_.set_p2p_manager(&mock_p2p_manager);
mock_p2p_manager.fake().SetP2PEnabled(false);
EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0);
- attempter_.Update("", "", "", "", false, false, false);
+ attempter_.Update("", "", "", "", false, false, 0, false, false);
EXPECT_FALSE(actual_using_p2p_for_downloading_);
EXPECT_FALSE(actual_using_p2p_for_sharing());
ScheduleQuitMainLoop();
@@ -720,15 +1009,15 @@
}
void UpdateAttempterTest::P2PEnabledStartingFailsStart() {
- // If p2p is enabled, but starting it fails ensure we don't do
- // any housekeeping and do not convey that p2p should be used.
+ // If P2P is enabled, but starting it fails ensure we don't do
+ // any housekeeping and do not convey that P2P should be used.
MockP2PManager mock_p2p_manager;
fake_system_state_.set_p2p_manager(&mock_p2p_manager);
mock_p2p_manager.fake().SetP2PEnabled(true);
mock_p2p_manager.fake().SetEnsureP2PRunningResult(false);
mock_p2p_manager.fake().SetPerformHousekeepingResult(false);
EXPECT_CALL(mock_p2p_manager, PerformHousekeeping()).Times(0);
- attempter_.Update("", "", "", "", false, false, false);
+ attempter_.Update("", "", "", "", false, false, 0, false, false);
EXPECT_FALSE(actual_using_p2p_for_downloading());
EXPECT_FALSE(actual_using_p2p_for_sharing());
ScheduleQuitMainLoop();
@@ -743,15 +1032,15 @@
}
void UpdateAttempterTest::P2PEnabledHousekeepingFailsStart() {
- // If p2p is enabled, starting it works but housekeeping fails, ensure
- // we do not convey p2p is to be used.
+ // If P2P is enabled, starting it works but housekeeping fails, ensure
+ // we do not convey P2P is to be used.
MockP2PManager mock_p2p_manager;
fake_system_state_.set_p2p_manager(&mock_p2p_manager);
mock_p2p_manager.fake().SetP2PEnabled(true);
mock_p2p_manager.fake().SetEnsureP2PRunningResult(true);
mock_p2p_manager.fake().SetPerformHousekeepingResult(false);
EXPECT_CALL(mock_p2p_manager, PerformHousekeeping());
- attempter_.Update("", "", "", "", false, false, false);
+ attempter_.Update("", "", "", "", false, false, 0, false, false);
EXPECT_FALSE(actual_using_p2p_for_downloading());
EXPECT_FALSE(actual_using_p2p_for_sharing());
ScheduleQuitMainLoop();
@@ -768,12 +1057,12 @@
MockP2PManager mock_p2p_manager;
fake_system_state_.set_p2p_manager(&mock_p2p_manager);
// If P2P is enabled and starting it works, check that we performed
- // housekeeping and that we convey p2p should be used.
+ // housekeeping and that we convey P2P should be used.
mock_p2p_manager.fake().SetP2PEnabled(true);
mock_p2p_manager.fake().SetEnsureP2PRunningResult(true);
mock_p2p_manager.fake().SetPerformHousekeepingResult(true);
EXPECT_CALL(mock_p2p_manager, PerformHousekeeping());
- attempter_.Update("", "", "", "", false, false, false);
+ attempter_.Update("", "", "", "", false, false, 0, false, false);
EXPECT_TRUE(actual_using_p2p_for_downloading());
EXPECT_TRUE(actual_using_p2p_for_sharing());
ScheduleQuitMainLoop();
@@ -791,7 +1080,7 @@
fake_system_state_.set_p2p_manager(&mock_p2p_manager);
// For an interactive check, if P2P is enabled and starting it
// works, check that we performed housekeeping and that we convey
- // p2p should be used for sharing but NOT for downloading.
+ // P2P should be used for sharing but NOT for downloading.
mock_p2p_manager.fake().SetP2PEnabled(true);
mock_p2p_manager.fake().SetEnsureP2PRunningResult(true);
mock_p2p_manager.fake().SetPerformHousekeepingResult(true);
@@ -802,6 +1091,8 @@
"",
false,
false,
+ /*rollback_allowed_milestones=*/0,
+ false,
/*interactive=*/true);
EXPECT_FALSE(actual_using_p2p_for_downloading());
EXPECT_TRUE(actual_using_p2p_for_sharing());
@@ -832,7 +1123,7 @@
attempter_.policy_provider_.reset(
new policy::PolicyProvider(std::move(device_policy)));
- attempter_.Update("", "", "", "", false, false, false);
+ attempter_.Update("", "", "", "", false, false, 0, false, false);
EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
ScheduleQuitMainLoop();
@@ -870,7 +1161,7 @@
attempter_.policy_provider_.reset(
new policy::PolicyProvider(std::move(device_policy)));
- attempter_.Update("", "", "", "", false, false, false);
+ attempter_.Update("", "", "", "", false, false, 0, false, false);
EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
// Make sure the file still exists.
@@ -886,7 +1177,7 @@
// However, if the count is already 0, it's not decremented. Test that.
initial_value = 0;
EXPECT_TRUE(fake_prefs.SetInt64(kPrefsUpdateCheckCount, initial_value));
- attempter_.Update("", "", "", "", false, false, false);
+ attempter_.Update("", "", "", "", false, false, 0, false, false);
EXPECT_TRUE(fake_prefs.Exists(kPrefsUpdateCheckCount));
EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &new_value));
EXPECT_EQ(initial_value, new_value);
@@ -939,6 +1230,8 @@
"",
false,
false,
+ /*rollback_allowed_milestones=*/0,
+ false,
/*interactive=*/true);
EXPECT_EQ(scatter_factor_in_seconds, attempter_.scatter_factor_.InSeconds());
@@ -991,7 +1284,7 @@
FakePrefs fake_prefs;
SetUpStagingTest(kValidStagingSchedule, &fake_prefs);
- attempter_.Update("", "", "", "", false, false, false);
+ attempter_.Update("", "", "", "", false, false, 0, false, false);
// Check that prefs have the correct values.
int64_t update_count;
EXPECT_TRUE(fake_prefs.GetInt64(kPrefsUpdateCheckCount, &update_count));
@@ -1048,7 +1341,8 @@
FakePrefs fake_prefs;
SetUpStagingTest(kValidStagingSchedule, &fake_prefs);
- attempter_.Update("", "", "", "", false, false, /* interactive = */ true);
+ attempter_.Update(
+ "", "", "", "", false, false, 0, false, /* interactive = */ true);
CheckStagingOff();
ScheduleQuitMainLoop();
@@ -1068,7 +1362,8 @@
FakePrefs fake_prefs;
SetUpStagingTest(kValidStagingSchedule, &fake_prefs);
- attempter_.Update("", "", "", "", false, false, /* interactive = */ true);
+ attempter_.Update(
+ "", "", "", "", false, false, 0, false, /* interactive = */ true);
CheckStagingOff();
ScheduleQuitMainLoop();
@@ -1172,33 +1467,191 @@
EXPECT_FALSE(attempter_.IsAnyUpdateSourceAllowed());
}
-TEST_F(UpdateAttempterTest, CheckForUpdateAUDlcTest) {
- fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
- fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false);
+// TODO(kimjae): Follow testing pattern with params for |CheckForInstall()|.
+// When adding, remove older tests related to |CheckForInstall()|.
+TEST_F(UpdateAttempterTest, CheckForInstallNotIdleFails) {
+ for (const auto status : kNonIdleUpdateStatuses) {
+ // GIVEN a non-idle status.
+ attempter_.status_ = status;
- const string dlc_module_id = "a_dlc_module_id";
- vector<string> dlc_module_ids = {dlc_module_id};
- ON_CALL(mock_dlcservice_, GetInstalled(testing::_))
- .WillByDefault(DoAll(testing::SetArgPointee<0>(dlc_module_ids),
- testing::Return(true)));
-
- attempter_.CheckForUpdate("", "autest", UpdateAttemptFlags::kNone);
- EXPECT_EQ(attempter_.dlc_module_ids_.size(), 1);
- EXPECT_EQ(attempter_.dlc_module_ids_[0], dlc_module_id);
+ EXPECT_FALSE(attempter_.CheckForInstall({}, ""));
+ }
}
-TEST_F(UpdateAttempterTest, CheckForUpdateAUTest) {
- fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
- fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false);
- attempter_.CheckForUpdate("", "autest", UpdateAttemptFlags::kNone);
- EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url());
+TEST_F(UpdateAttempterTest, CheckForUpdateNotIdleFails) {
+ for (const auto status : kNonIdleUpdateStatuses) {
+ // GIVEN a non-idle status.
+ cfu_params_.status = status;
+
+ // THEN |ScheduleUpdates()| should not be called.
+ cfu_params_.should_schedule_updates_be_called = false;
+ // THEN result should indicate failure.
+ cfu_params_.expected_result = false;
+
+ TestCheckForUpdate();
+ }
}
-TEST_F(UpdateAttempterTest, CheckForUpdateScheduledAUTest) {
- fake_system_state_.fake_hardware()->SetIsOfficialBuild(true);
- fake_system_state_.fake_hardware()->SetAreDevFeaturesEnabled(false);
- attempter_.CheckForUpdate("", "autest-scheduled", UpdateAttemptFlags::kNone);
- EXPECT_EQ(constants::kOmahaDefaultAUTestURL, attempter_.forced_omaha_url());
+TEST_F(UpdateAttempterTest, CheckForUpdateOfficalBuildClearsSource) {
+ // GIVEN a official build.
+
+ // THEN we except forced app version + forced omaha url to be cleared.
+
+ TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateUnofficialBuildChangesSource) {
+ // GIVEN a nonofficial build with dev features enabled.
+ cfu_params_.is_official_build = false;
+ cfu_params_.are_dev_features_enabled = true;
+
+ // THEN the forced app version + forced omaha url changes based on input.
+ cfu_params_.expected_forced_app_version = cfu_params_.app_version;
+ cfu_params_.expected_forced_omaha_url = cfu_params_.omaha_url;
+
+ TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateOfficialBuildScheduledAUTest) {
+ // GIVEN a scheduled autest omaha url.
+ cfu_params_.omaha_url = "autest-scheduled";
+
+ // THEN forced app version is cleared.
+ // THEN forced omaha url changes to default constant.
+ cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL;
+
+ TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateUnofficialBuildScheduledAUTest) {
+ // GIVEN a scheduled autest omaha url.
+ cfu_params_.omaha_url = "autest-scheduled";
+ // GIVEN a nonofficial build with dev features enabled.
+ cfu_params_.is_official_build = false;
+ cfu_params_.are_dev_features_enabled = true;
+
+ // THEN forced app version changes based on input.
+ cfu_params_.expected_forced_app_version = cfu_params_.app_version;
+ // THEN forced omaha url changes to default constant.
+ cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL;
+
+ TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateOfficialBuildAUTest) {
+ // GIVEN a autest omaha url.
+ cfu_params_.omaha_url = "autest";
+
+ // THEN forced app version is cleared.
+ // THEN forced omaha url changes to default constant.
+ cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL;
+
+ TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateUnofficialBuildAUTest) {
+ // GIVEN a autest omha url.
+ cfu_params_.omaha_url = "autest";
+ // GIVEN a nonofficial build with dev features enabled.
+ cfu_params_.is_official_build = false;
+ cfu_params_.are_dev_features_enabled = true;
+
+ // THEN forced app version changes based on input.
+ cfu_params_.expected_forced_app_version = cfu_params_.app_version;
+ // THEN forced omaha url changes to default constant.
+ cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL;
+
+ TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest,
+ CheckForUpdateNonInteractiveOfficialBuildScheduledAUTest) {
+ // GIVEN a scheduled autest omaha url.
+ cfu_params_.omaha_url = "autest-scheduled";
+ // GIVEN a noninteractive update.
+ cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive;
+
+ // THEN forced app version is cleared.
+ // THEN forced omaha url changes to default constant.
+ cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL;
+
+ TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest,
+ CheckForUpdateNonInteractiveUnofficialBuildScheduledAUTest) {
+ // GIVEN a scheduled autest omaha url.
+ cfu_params_.omaha_url = "autest-scheduled";
+ // GIVEN a noninteractive update.
+ cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive;
+ // GIVEN a nonofficial build with dev features enabled.
+ cfu_params_.is_official_build = false;
+ cfu_params_.are_dev_features_enabled = true;
+
+ // THEN forced app version changes based on input.
+ cfu_params_.expected_forced_app_version = cfu_params_.app_version;
+ // THEN forced omaha url changes to default constant.
+ cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL;
+
+ TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateNonInteractiveOfficialBuildAUTest) {
+ // GIVEN a autest omaha url.
+ cfu_params_.omaha_url = "autest";
+ // GIVEN a noninteractive update.
+ cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive;
+
+ // THEN forced app version is cleared.
+ // THEN forced omaha url changes to default constant.
+ cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL;
+
+ TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateNonInteractiveUnofficialBuildAUTest) {
+ // GIVEN a autest omaha url.
+ cfu_params_.omaha_url = "autest";
+ // GIVEN a noninteractive update.
+ cfu_params_.flags = UpdateAttemptFlags::kFlagNonInteractive;
+ // GIVEN a nonofficial build with dev features enabled.
+ cfu_params_.is_official_build = false;
+ cfu_params_.are_dev_features_enabled = true;
+
+ // THEN forced app version changes based on input.
+ cfu_params_.expected_forced_app_version = cfu_params_.app_version;
+ // THEN forced omaha url changes to default constant.
+ cfu_params_.expected_forced_omaha_url = constants::kOmahaDefaultAUTestURL;
+
+ TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateMissingForcedCallback1) {
+ // GIVEN a official build.
+ // GIVEN forced callback is not set.
+ attempter_.set_forced_update_pending_callback(nullptr);
+
+ // THEN we except forced app version + forced omaha url to be cleared.
+ // THEN |ScheduleUpdates()| should not be called.
+ cfu_params_.should_schedule_updates_be_called = false;
+
+ TestCheckForUpdate();
+}
+
+TEST_F(UpdateAttempterTest, CheckForUpdateMissingForcedCallback2) {
+ // GIVEN a nonofficial build with dev features enabled.
+ cfu_params_.is_official_build = false;
+ cfu_params_.are_dev_features_enabled = true;
+ // GIVEN forced callback is not set.
+ attempter_.set_forced_update_pending_callback(nullptr);
+
+ // THEN the forced app version + forced omaha url changes based on input.
+ cfu_params_.expected_forced_app_version = cfu_params_.app_version;
+ cfu_params_.expected_forced_omaha_url = cfu_params_.omaha_url;
+ // THEN |ScheduleUpdates()| should not be called.
+ cfu_params_.should_schedule_updates_be_called = false;
+
+ TestCheckForUpdate();
}
TEST_F(UpdateAttempterTest, CheckForInstallTest) {
@@ -1238,11 +1691,13 @@
}
TEST_F(UpdateAttempterTest, TargetVersionPrefixSetAndReset) {
- attempter_.CalculateUpdateParams("", "", "", "1234", false, false, false);
+ attempter_.CalculateUpdateParams(
+ "", "", "", "1234", false, false, 4, false, false);
EXPECT_EQ("1234",
fake_system_state_.request_params()->target_version_prefix());
- attempter_.CalculateUpdateParams("", "", "", "", false, false, false);
+ attempter_.CalculateUpdateParams(
+ "", "", "", "", false, 4, false, false, false);
EXPECT_TRUE(
fake_system_state_.request_params()->target_version_prefix().empty());
}
@@ -1253,18 +1708,26 @@
"",
"1234",
/*rollback_allowed=*/true,
+ /*rollback_data_save_requested=*/false,
+ /*rollback_allowed_milestones=*/4,
false,
false);
EXPECT_TRUE(fake_system_state_.request_params()->rollback_allowed());
+ EXPECT_EQ(4,
+ fake_system_state_.request_params()->rollback_allowed_milestones());
attempter_.CalculateUpdateParams("",
"",
"",
"1234",
/*rollback_allowed=*/false,
+ /*rollback_data_save_requested=*/false,
+ /*rollback_allowed_milestones=*/4,
false,
false);
EXPECT_FALSE(fake_system_state_.request_params()->rollback_allowed());
+ EXPECT_EQ(4,
+ fake_system_state_.request_params()->rollback_allowed_milestones());
}
TEST_F(UpdateAttempterTest, UpdateDeferredByPolicyTest) {
@@ -1285,8 +1748,6 @@
EXPECT_EQ(UpdateStatus::UPDATE_AVAILABLE, status.status);
EXPECT_TRUE(attempter_.install_plan_);
EXPECT_EQ(attempter_.install_plan_->version, status.new_version);
- EXPECT_EQ(attempter_.install_plan_->system_version,
- status.new_system_version);
EXPECT_EQ(attempter_.install_plan_->payloads[0].size,
status.new_size_bytes);
}
@@ -1307,18 +1768,17 @@
attempter_.GetStatus(&status);
EXPECT_EQ(UpdateStatus::REPORTING_ERROR_EVENT, status.status);
EXPECT_EQ(response_action.install_plan_.version, status.new_version);
- EXPECT_EQ(response_action.install_plan_.system_version,
- status.new_system_version);
EXPECT_EQ(response_action.install_plan_.payloads[0].size,
status.new_size_bytes);
}
}
TEST_F(UpdateAttempterTest, UpdateIsNotRunningWhenUpdateAvailable) {
- EXPECT_FALSE(attempter_.IsUpdateRunningOrScheduled());
+ // Default construction for |waiting_for_scheduled_check_| is false.
+ EXPECT_FALSE(attempter_.IsBusyOrUpdateScheduled());
// Verify in-progress update with UPDATE_AVAILABLE is running
attempter_.status_ = UpdateStatus::UPDATE_AVAILABLE;
- EXPECT_TRUE(attempter_.IsUpdateRunningOrScheduled());
+ EXPECT_TRUE(attempter_.IsBusyOrUpdateScheduled());
}
TEST_F(UpdateAttempterTest, UpdateAttemptFlagsCachedAtUpdateStart) {
@@ -1384,7 +1844,7 @@
SetRollbackHappened(false))
.Times(expected_reset ? 1 : 0);
attempter_.policy_provider_ = std::move(mock_policy_provider);
- attempter_.Update("", "", "", "", false, false, false);
+ attempter_.Update("", "", "", "", false, false, 0, false, false);
ScheduleQuitMainLoop();
}
@@ -1566,4 +2026,545 @@
attempter_.ProcessingDone(nullptr, ErrorCode::kSuccess);
}
+TEST_F(UpdateAttempterTest, ProcessingDoneUpdated) {
+ // GIVEN an update finished.
+
+ // THEN update_engine should call update completion.
+ pd_params_.should_update_completed_be_called = true;
+ // THEN need reboot since update applied.
+ pd_params_.expected_exit_status = UpdateStatus::UPDATED_NEED_REBOOT;
+ // THEN install indication should be false.
+
+ TestProcessingDone();
+}
+
+TEST_F(UpdateAttempterTest, ProcessingDoneUpdatedDlcFilter) {
+ // GIVEN an update finished.
+ // GIVEN DLC |AppParams| list.
+ auto dlc_1 = "dlc_1", dlc_2 = "dlc_2";
+ pd_params_.dlc_apps_params = {{dlc_1, {.name = dlc_1, .updated = false}},
+ {dlc_2, {.name = dlc_2}}};
+
+ // THEN update_engine should call update completion.
+ pd_params_.should_update_completed_be_called = true;
+ pd_params_.args_to_update_completed = {dlc_2};
+ // THEN need reboot since update applied.
+ pd_params_.expected_exit_status = UpdateStatus::UPDATED_NEED_REBOOT;
+ // THEN install indication should be false.
+
+ TestProcessingDone();
+}
+
+TEST_F(UpdateAttempterTest, ProcessingDoneInstalled) {
+ // GIVEN an install finished.
+ pd_params_.is_install = true;
+
+ // THEN update_engine should call install completion.
+ pd_params_.should_install_completed_be_called = true;
+ // THEN go idle.
+ // THEN install indication should be false.
+
+ TestProcessingDone();
+}
+
+TEST_F(UpdateAttempterTest, ProcessingDoneInstalledDlcFilter) {
+ // GIVEN an install finished.
+ pd_params_.is_install = true;
+ // GIVEN DLC |AppParams| list.
+ auto dlc_1 = "dlc_1", dlc_2 = "dlc_2";
+ pd_params_.dlc_apps_params = {{dlc_1, {.name = dlc_1, .updated = false}},
+ {dlc_2, {.name = dlc_2}}};
+
+ // THEN update_engine should call install completion.
+ pd_params_.should_install_completed_be_called = true;
+ pd_params_.args_to_install_completed = {dlc_2};
+ // THEN go idle.
+ // THEN install indication should be false.
+
+ TestProcessingDone();
+}
+
+TEST_F(UpdateAttempterTest, ProcessingDoneInstallReportingError) {
+ // GIVEN an install finished.
+ pd_params_.is_install = true;
+ // GIVEN a reporting error occurred.
+ pd_params_.status = UpdateStatus::REPORTING_ERROR_EVENT;
+
+ // THEN update_engine should not call install completion.
+ // THEN go idle.
+ // THEN install indication should be false.
+
+ TestProcessingDone();
+}
+
+TEST_F(UpdateAttempterTest, ProcessingDoneNoUpdate) {
+ // GIVEN an update finished.
+ // GIVEN an action error occured.
+ pd_params_.code = ErrorCode::kNoUpdate;
+
+ // THEN update_engine should not call update completion.
+ // THEN go idle.
+ // THEN install indication should be false.
+
+ TestProcessingDone();
+}
+
+TEST_F(UpdateAttempterTest, ProcessingDoneNoInstall) {
+ // GIVEN an install finished.
+ pd_params_.is_install = true;
+ // GIVEN an action error occured.
+ pd_params_.code = ErrorCode::kNoUpdate;
+
+ // THEN update_engine should not call install completion.
+ // THEN go idle.
+ // THEN install indication should be false.
+
+ TestProcessingDone();
+}
+
+TEST_F(UpdateAttempterTest, ProcessingDoneUpdateError) {
+ // GIVEN an update finished.
+ // GIVEN an action error occured.
+ pd_params_.code = ErrorCode::kError;
+ // GIVEN an event error is set.
+ attempter_.error_event_.reset(new OmahaEvent(OmahaEvent::kTypeUpdateComplete,
+ OmahaEvent::kResultError,
+ ErrorCode::kError));
+
+ // THEN indicate a error event.
+ pd_params_.expected_exit_status = UpdateStatus::REPORTING_ERROR_EVENT;
+ // THEN install indication should be false.
+
+ // THEN update_engine should not call update completion.
+ // THEN expect critical actions of |ScheduleErrorEventAction()|.
+ EXPECT_CALL(*processor_, EnqueueAction(Pointee(_))).Times(1);
+ EXPECT_CALL(*processor_, StartProcessing()).Times(1);
+ // THEN |ScheduleUpdates()| will be called next |ProcessingDone()| so skip.
+ pd_params_.should_schedule_updates_be_called = false;
+
+ TestProcessingDone();
+}
+
+TEST_F(UpdateAttempterTest, ProcessingDoneInstallError) {
+ // GIVEN an install finished.
+ pd_params_.is_install = true;
+ // GIVEN an action error occured.
+ pd_params_.code = ErrorCode::kError;
+ // GIVEN an event error is set.
+ attempter_.error_event_.reset(new OmahaEvent(OmahaEvent::kTypeUpdateComplete,
+ OmahaEvent::kResultError,
+ ErrorCode::kError));
+
+ // THEN indicate a error event.
+ pd_params_.expected_exit_status = UpdateStatus::REPORTING_ERROR_EVENT;
+ // THEN install indication should be false.
+
+ // THEN update_engine should not call install completion.
+ // THEN expect critical actions of |ScheduleErrorEventAction()|.
+ EXPECT_CALL(*processor_, EnqueueAction(Pointee(_))).Times(1);
+ EXPECT_CALL(*processor_, StartProcessing()).Times(1);
+ // THEN |ScheduleUpdates()| will be called next |ProcessingDone()| so skip.
+ pd_params_.should_schedule_updates_be_called = false;
+
+ TestProcessingDone();
+}
+
+void UpdateAttempterTest::UpdateToQuickFixBuildStart(bool set_token) {
+ // Tests that checks if |device_quick_fix_build_token| arrives when
+ // policy is set and the device is enterprise enrolled based on |set_token|.
+ string token = set_token ? "some_token" : "";
+ auto device_policy = std::make_unique<policy::MockDevicePolicy>();
+ fake_system_state_.set_device_policy(device_policy.get());
+ EXPECT_CALL(*device_policy, LoadPolicy()).WillRepeatedly(Return(true));
+
+ if (set_token)
+ EXPECT_CALL(*device_policy, GetDeviceQuickFixBuildToken(_))
+ .WillOnce(DoAll(SetArgPointee<0>(token), Return(true)));
+ else
+ EXPECT_CALL(*device_policy, GetDeviceQuickFixBuildToken(_))
+ .WillOnce(Return(false));
+ attempter_.policy_provider_.reset(
+ new policy::PolicyProvider(std::move(device_policy)));
+ attempter_.Update("", "", "", "", false, false, 0, false, false);
+
+ EXPECT_EQ(token, attempter_.omaha_request_params_->autoupdate_token());
+ ScheduleQuitMainLoop();
+}
+
+TEST_F(UpdateAttempterTest,
+ QuickFixTokenWhenDeviceIsEnterpriseEnrolledAndPolicyIsSet) {
+ loop_.PostTask(FROM_HERE,
+ base::Bind(&UpdateAttempterTest::UpdateToQuickFixBuildStart,
+ base::Unretained(this),
+ /*set_token=*/true));
+ loop_.Run();
+}
+
+TEST_F(UpdateAttempterTest, EmptyQuickFixToken) {
+ loop_.PostTask(FROM_HERE,
+ base::Bind(&UpdateAttempterTest::UpdateToQuickFixBuildStart,
+ base::Unretained(this),
+ /*set_token=*/false));
+ loop_.Run();
+}
+
+TEST_F(UpdateAttempterTest, ScheduleUpdateSpamHandlerTest) {
+ EXPECT_CALL(mock_update_manager_, AsyncPolicyRequestUpdateCheckAllowed(_, _))
+ .Times(1);
+ EXPECT_TRUE(attempter_.ScheduleUpdates());
+ // Now there is an update scheduled which means that all subsequent
+ // |ScheduleUpdates()| should fail.
+ EXPECT_FALSE(attempter_.ScheduleUpdates());
+ EXPECT_FALSE(attempter_.ScheduleUpdates());
+ EXPECT_FALSE(attempter_.ScheduleUpdates());
+}
+
+// Critical tests to always make sure that an update is scheduled. The following
+// unittest(s) try and cover the correctness in synergy between
+// |UpdateAttempter| and |UpdateManager|. Also it is good to remember the
+// actions that happen in the flow when |UpdateAttempter| get callbacked on
+// |OnUpdateScheduled()| -> (various cases which leads to) -> |ProcessingDone()|
+void UpdateAttempterTest::TestOnUpdateScheduled() {
+ // Setup
+ attempter_.SetWaitingForScheduledCheck(true);
+ attempter_.DisableUpdate();
+ attempter_.DisableScheduleUpdates();
+
+ // Invocation
+ attempter_.OnUpdateScheduled(ous_params_.status, ous_params_.params);
+
+ // Verify
+ EXPECT_EQ(ous_params_.exit_status, attempter_.status());
+ EXPECT_EQ(ous_params_.should_schedule_updates_be_called,
+ attempter_.WasScheduleUpdatesCalled());
+ EXPECT_EQ(ous_params_.should_update_be_called, attempter_.WasUpdateCalled());
+}
+
+TEST_F(UpdateAttempterTest, OnUpdatesScheduledFailed) {
+ // GIVEN failed status.
+
+ // THEN update should be scheduled.
+ ous_params_.should_schedule_updates_be_called = true;
+
+ TestOnUpdateScheduled();
+}
+
+TEST_F(UpdateAttempterTest, OnUpdatesScheduledAskMeAgainLater) {
+ // GIVEN ask me again later status.
+ ous_params_.status = EvalStatus::kAskMeAgainLater;
+
+ // THEN update should be scheduled.
+ ous_params_.should_schedule_updates_be_called = true;
+
+ TestOnUpdateScheduled();
+}
+
+TEST_F(UpdateAttempterTest, OnUpdatesScheduledContinue) {
+ // GIVEN continue status.
+ ous_params_.status = EvalStatus::kContinue;
+
+ // THEN update should be scheduled.
+ ous_params_.should_schedule_updates_be_called = true;
+
+ TestOnUpdateScheduled();
+}
+
+TEST_F(UpdateAttempterTest, OnUpdatesScheduledSucceededButUpdateDisabledFails) {
+ // GIVEN updates disabled.
+ ous_params_.params = {.updates_enabled = false};
+ // GIVEN succeeded status.
+ ous_params_.status = EvalStatus::kSucceeded;
+
+ // THEN update should not be scheduled.
+
+ TestOnUpdateScheduled();
+}
+
+TEST_F(UpdateAttempterTest, OnUpdatesScheduledSucceeded) {
+ // GIVEN updates enabled.
+ ous_params_.params = {.updates_enabled = true};
+ // GIVEN succeeded status.
+ ous_params_.status = EvalStatus::kSucceeded;
+
+ // THEN update should be called indicating status change.
+ ous_params_.exit_status = UpdateStatus::CHECKING_FOR_UPDATE;
+ ous_params_.should_update_be_called = true;
+
+ TestOnUpdateScheduled();
+}
+
+TEST_F(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusDefault) {
+ UpdateEngineStatus status;
+ attempter_.GetStatus(&status);
+ EXPECT_FALSE(status.is_enterprise_rollback);
+}
+
+TEST_F(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusFalse) {
+ attempter_.install_plan_.reset(new InstallPlan);
+ attempter_.install_plan_->is_rollback = false;
+
+ UpdateEngineStatus status;
+ attempter_.GetStatus(&status);
+ EXPECT_FALSE(status.is_enterprise_rollback);
+}
+
+TEST_F(UpdateAttempterTest, IsEnterpriseRollbackInGetStatusTrue) {
+ attempter_.install_plan_.reset(new InstallPlan);
+ attempter_.install_plan_->is_rollback = true;
+
+ UpdateEngineStatus status;
+ attempter_.GetStatus(&status);
+ EXPECT_TRUE(status.is_enterprise_rollback);
+}
+
+TEST_F(UpdateAttempterTest, PowerwashInGetStatusDefault) {
+ UpdateEngineStatus status;
+ attempter_.GetStatus(&status);
+ EXPECT_FALSE(status.will_powerwash_after_reboot);
+}
+
+TEST_F(UpdateAttempterTest, PowerwashInGetStatusTrueBecausePowerwashRequired) {
+ attempter_.install_plan_.reset(new InstallPlan);
+ attempter_.install_plan_->powerwash_required = true;
+
+ UpdateEngineStatus status;
+ attempter_.GetStatus(&status);
+ EXPECT_TRUE(status.will_powerwash_after_reboot);
+}
+
+TEST_F(UpdateAttempterTest, PowerwashInGetStatusTrueBecauseRollback) {
+ attempter_.install_plan_.reset(new InstallPlan);
+ attempter_.install_plan_->is_rollback = true;
+
+ UpdateEngineStatus status;
+ attempter_.GetStatus(&status);
+ EXPECT_TRUE(status.will_powerwash_after_reboot);
+}
+
+TEST_F(UpdateAttempterTest, FutureEolTest) {
+ EolDate eol_date = std::numeric_limits<int64_t>::max();
+ EXPECT_CALL(*prefs_, Exists(kPrefsOmahaEolDate)).WillOnce(Return(true));
+ EXPECT_CALL(*prefs_, GetString(kPrefsOmahaEolDate, _))
+ .WillOnce(
+ DoAll(SetArgPointee<1>(EolDateToString(eol_date)), Return(true)));
+
+ UpdateEngineStatus status;
+ attempter_.GetStatus(&status);
+ EXPECT_EQ(eol_date, status.eol_date);
+}
+
+TEST_F(UpdateAttempterTest, PastEolTest) {
+ EolDate eol_date = 1;
+ EXPECT_CALL(*prefs_, Exists(kPrefsOmahaEolDate)).WillOnce(Return(true));
+ EXPECT_CALL(*prefs_, GetString(kPrefsOmahaEolDate, _))
+ .WillOnce(
+ DoAll(SetArgPointee<1>(EolDateToString(eol_date)), Return(true)));
+
+ UpdateEngineStatus status;
+ attempter_.GetStatus(&status);
+ EXPECT_EQ(eol_date, status.eol_date);
+}
+
+TEST_F(UpdateAttempterTest, FailedEolTest) {
+ EXPECT_CALL(*prefs_, Exists(kPrefsOmahaEolDate)).WillOnce(Return(true));
+ EXPECT_CALL(*prefs_, GetString(kPrefsOmahaEolDate, _))
+ .WillOnce(Return(false));
+
+ UpdateEngineStatus status;
+ attempter_.GetStatus(&status);
+ EXPECT_EQ(kEolDateInvalid, status.eol_date);
+}
+
+TEST_F(UpdateAttempterTest, MissingEolTest) {
+ EXPECT_CALL(*prefs_, Exists(kPrefsOmahaEolDate)).WillOnce(Return(false));
+
+ UpdateEngineStatus status;
+ attempter_.GetStatus(&status);
+ EXPECT_EQ(kEolDateInvalid, status.eol_date);
+}
+
+TEST_F(UpdateAttempterTest, CalculateDlcParamsInstallTest) {
+ string dlc_id = "dlc0";
+ FakePrefs fake_prefs;
+ fake_system_state_.set_prefs(&fake_prefs);
+ attempter_.is_install_ = true;
+ attempter_.dlc_ids_ = {dlc_id};
+ attempter_.CalculateDlcParams();
+
+ OmahaRequestParams* params = fake_system_state_.request_params();
+ EXPECT_EQ(1, params->dlc_apps_params().count(params->GetDlcAppId(dlc_id)));
+ OmahaRequestParams::AppParams dlc_app_params =
+ params->dlc_apps_params().at(params->GetDlcAppId(dlc_id));
+ EXPECT_STREQ(dlc_id.c_str(), dlc_app_params.name.c_str());
+ EXPECT_EQ(false, dlc_app_params.send_ping);
+ // When the DLC gets installed, a ping is not sent, therefore we don't store
+ // the values sent by Omaha.
+ auto last_active_key = PrefsInterface::CreateSubKey(
+ {kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive});
+ EXPECT_FALSE(fake_system_state_.prefs()->Exists(last_active_key));
+ auto last_rollcall_key = PrefsInterface::CreateSubKey(
+ {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall});
+ EXPECT_FALSE(fake_system_state_.prefs()->Exists(last_rollcall_key));
+}
+
+TEST_F(UpdateAttempterTest, CalculateDlcParamsNoPrefFilesTest) {
+ string dlc_id = "dlc0";
+ FakePrefs fake_prefs;
+ fake_system_state_.set_prefs(&fake_prefs);
+ EXPECT_CALL(mock_dlcservice_, GetDlcsToUpdate(_))
+ .WillOnce(
+ DoAll(SetArgPointee<0>(std::vector<string>({dlc_id})), Return(true)));
+
+ attempter_.is_install_ = false;
+ attempter_.CalculateDlcParams();
+
+ OmahaRequestParams* params = fake_system_state_.request_params();
+ EXPECT_EQ(1, params->dlc_apps_params().count(params->GetDlcAppId(dlc_id)));
+ OmahaRequestParams::AppParams dlc_app_params =
+ params->dlc_apps_params().at(params->GetDlcAppId(dlc_id));
+ EXPECT_STREQ(dlc_id.c_str(), dlc_app_params.name.c_str());
+
+ EXPECT_EQ(true, dlc_app_params.send_ping);
+ EXPECT_EQ(0, dlc_app_params.ping_active);
+ EXPECT_EQ(-1, dlc_app_params.ping_date_last_active);
+ EXPECT_EQ(-1, dlc_app_params.ping_date_last_rollcall);
+}
+
+TEST_F(UpdateAttempterTest, CalculateDlcParamsNonParseableValuesTest) {
+ string dlc_id = "dlc0";
+ MemoryPrefs prefs;
+ fake_system_state_.set_prefs(&prefs);
+ EXPECT_CALL(mock_dlcservice_, GetDlcsToUpdate(_))
+ .WillOnce(
+ DoAll(SetArgPointee<0>(std::vector<string>({dlc_id})), Return(true)));
+
+ // Write non numeric values in the metadata files.
+ auto active_key =
+ PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive});
+ auto last_active_key = PrefsInterface::CreateSubKey(
+ {kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive});
+ auto last_rollcall_key = PrefsInterface::CreateSubKey(
+ {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall});
+ fake_system_state_.prefs()->SetString(active_key, "z2yz");
+ fake_system_state_.prefs()->SetString(last_active_key, "z2yz");
+ fake_system_state_.prefs()->SetString(last_rollcall_key, "z2yz");
+ attempter_.is_install_ = false;
+ attempter_.CalculateDlcParams();
+
+ OmahaRequestParams* params = fake_system_state_.request_params();
+ EXPECT_EQ(1, params->dlc_apps_params().count(params->GetDlcAppId(dlc_id)));
+ OmahaRequestParams::AppParams dlc_app_params =
+ params->dlc_apps_params().at(params->GetDlcAppId(dlc_id));
+ EXPECT_STREQ(dlc_id.c_str(), dlc_app_params.name.c_str());
+
+ EXPECT_EQ(true, dlc_app_params.send_ping);
+ EXPECT_EQ(0, dlc_app_params.ping_active);
+ EXPECT_EQ(-2, dlc_app_params.ping_date_last_active);
+ EXPECT_EQ(-2, dlc_app_params.ping_date_last_rollcall);
+}
+
+TEST_F(UpdateAttempterTest, CalculateDlcParamsValidValuesTest) {
+ string dlc_id = "dlc0";
+ MemoryPrefs fake_prefs;
+ fake_system_state_.set_prefs(&fake_prefs);
+ EXPECT_CALL(mock_dlcservice_, GetDlcsToUpdate(_))
+ .WillOnce(
+ DoAll(SetArgPointee<0>(std::vector<string>({dlc_id})), Return(true)));
+
+ // Write numeric values in the metadata files.
+ auto active_key =
+ PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive});
+ auto last_active_key = PrefsInterface::CreateSubKey(
+ {kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive});
+ auto last_rollcall_key = PrefsInterface::CreateSubKey(
+ {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall});
+
+ fake_system_state_.prefs()->SetInt64(active_key, 1);
+ fake_system_state_.prefs()->SetInt64(last_active_key, 78);
+ fake_system_state_.prefs()->SetInt64(last_rollcall_key, 99);
+ attempter_.is_install_ = false;
+ attempter_.CalculateDlcParams();
+
+ OmahaRequestParams* params = fake_system_state_.request_params();
+ EXPECT_EQ(1, params->dlc_apps_params().count(params->GetDlcAppId(dlc_id)));
+ OmahaRequestParams::AppParams dlc_app_params =
+ params->dlc_apps_params().at(params->GetDlcAppId(dlc_id));
+ EXPECT_STREQ(dlc_id.c_str(), dlc_app_params.name.c_str());
+
+ EXPECT_EQ(true, dlc_app_params.send_ping);
+ EXPECT_EQ(1, dlc_app_params.ping_active);
+ EXPECT_EQ(78, dlc_app_params.ping_date_last_active);
+ EXPECT_EQ(99, dlc_app_params.ping_date_last_rollcall);
+}
+
+TEST_F(UpdateAttempterTest, CalculateDlcParamsRemoveStaleMetadata) {
+ string dlc_id = "dlc0";
+ FakePrefs fake_prefs;
+ fake_system_state_.set_prefs(&fake_prefs);
+ auto active_key =
+ PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive});
+ auto last_active_key = PrefsInterface::CreateSubKey(
+ {kDlcPrefsSubDir, dlc_id, kPrefsPingLastActive});
+ auto last_rollcall_key = PrefsInterface::CreateSubKey(
+ {kDlcPrefsSubDir, dlc_id, kPrefsPingLastRollcall});
+ fake_system_state_.prefs()->SetInt64(active_key, kPingInactiveValue);
+ fake_system_state_.prefs()->SetInt64(last_active_key, 0);
+ fake_system_state_.prefs()->SetInt64(last_rollcall_key, 0);
+ EXPECT_TRUE(fake_system_state_.prefs()->Exists(active_key));
+ EXPECT_TRUE(fake_system_state_.prefs()->Exists(last_active_key));
+ EXPECT_TRUE(fake_system_state_.prefs()->Exists(last_rollcall_key));
+
+ attempter_.dlc_ids_ = {dlc_id};
+ attempter_.is_install_ = true;
+ attempter_.CalculateDlcParams();
+
+ EXPECT_FALSE(fake_system_state_.prefs()->Exists(last_active_key));
+ EXPECT_FALSE(fake_system_state_.prefs()->Exists(last_rollcall_key));
+ // Active key is set on install.
+ EXPECT_TRUE(fake_system_state_.prefs()->Exists(active_key));
+ int64_t temp_int;
+ EXPECT_TRUE(fake_system_state_.prefs()->GetInt64(active_key, &temp_int));
+ EXPECT_EQ(temp_int, kPingActiveValue);
+}
+
+TEST_F(UpdateAttempterTest, SetDlcActiveValue) {
+ string dlc_id = "dlc0";
+ FakePrefs fake_prefs;
+ fake_system_state_.set_prefs(&fake_prefs);
+ attempter_.SetDlcActiveValue(true, dlc_id);
+ int64_t temp_int;
+ auto active_key =
+ PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, kPrefsPingActive});
+ EXPECT_TRUE(fake_system_state_.prefs()->Exists(active_key));
+ EXPECT_TRUE(fake_system_state_.prefs()->GetInt64(active_key, &temp_int));
+ EXPECT_EQ(temp_int, kPingActiveValue);
+}
+
+TEST_F(UpdateAttempterTest, SetDlcInactive) {
+ string dlc_id = "dlc0";
+ MemoryPrefs fake_prefs;
+ fake_system_state_.set_prefs(&fake_prefs);
+ auto sub_keys = {
+ kPrefsPingActive, kPrefsPingLastActive, kPrefsPingLastRollcall};
+ for (auto& sub_key : sub_keys) {
+ auto key = PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, sub_key});
+ fake_system_state_.prefs()->SetInt64(key, 1);
+ EXPECT_TRUE(fake_system_state_.prefs()->Exists(key));
+ }
+ attempter_.SetDlcActiveValue(false, dlc_id);
+ for (auto& sub_key : sub_keys) {
+ auto key = PrefsInterface::CreateSubKey({kDlcPrefsSubDir, dlc_id, sub_key});
+ EXPECT_FALSE(fake_system_state_.prefs()->Exists(key));
+ }
+}
+
+TEST_F(UpdateAttempterTest, GetSuccessfulDlcIds) {
+ auto dlc_1 = "1", dlc_2 = "2", dlc_3 = "3";
+ attempter_.omaha_request_params_->set_dlc_apps_params(
+ {{dlc_1, {.name = dlc_1, .updated = false}},
+ {dlc_2, {.name = dlc_2}},
+ {dlc_3, {.name = dlc_3, .updated = false}}});
+ EXPECT_THAT(attempter_.GetSuccessfulDlcIds(), ElementsAre(dlc_2));
+}
+
} // namespace chromeos_update_engine
diff --git a/update_engine-client.gyp b/update_engine-client.gyp
deleted file mode 100644
index 588fc63..0000000
--- a/update_engine-client.gyp
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-{
- 'targets': [
- # update_engine client library generated headers. Used by other daemons and
- # by the update_engine_client console program to interact with
- # update_engine.
- {
- 'target_name': 'libupdate_engine-client-headers',
- 'type': 'none',
- 'actions': [
- {
- 'action_name': 'update_engine_client-dbus-proxies',
- 'variables': {
- 'dbus_service_config': 'dbus_bindings/dbus-service-config.json',
- 'proxy_output_file': 'include/update_engine/dbus-proxies.h',
- 'mock_output_file': 'include/update_engine/dbus-proxy-mocks.h',
- 'proxy_path_in_mocks': 'update_engine/dbus-proxies.h',
- },
- 'sources': [
- 'dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml',
- ],
- 'includes': ['../../../platform2/common-mk/generate-dbus-proxies.gypi'],
- },
- ],
- },
- ],
-}
diff --git a/update_engine.gyp b/update_engine.gyp
deleted file mode 100644
index c2c0c62..0000000
--- a/update_engine.gyp
+++ /dev/null
@@ -1,655 +0,0 @@
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# TODO: Rename these files to pass this check.
-# gyplint: disable=GypLintSourceFileNames
-{
- 'variables': {
- 'USE_chrome_network_proxy': '1',
- 'USE_chrome_kiosk_app': '1',
- },
- 'target_defaults': {
- 'variables': {
- 'deps': [
- 'libbrillo-<(libbase_ver)',
- 'libchrome-<(libbase_ver)',
- # system_api depends on protobuf (or protobuf-lite). It must appear
- # before protobuf here or the linker flags won't be in the right
- # order.
- 'system_api',
- 'protobuf-lite',
- ],
- # The -DUSE_* flags are passed from platform2.py. We use sane defaults
- # here when these USE flags are not defined. You can set the default value
- # for the USE flag in the ebuild.
- 'USE_hwid_override%': '0',
- },
- 'cflags': [
- '-g',
- '-ffunction-sections',
- '-Wall',
- '-Wextra',
- '-Werror',
- '-Wno-unused-parameter',
- ],
- 'cflags_cc': [
- '-fno-strict-aliasing',
- '-Wnon-virtual-dtor',
- ],
- 'ldflags': [
- '-Wl,--gc-sections',
- ],
- 'defines': [
- '__CHROMEOS__',
- '_FILE_OFFSET_BITS=64',
- '_POSIX_C_SOURCE=199309L',
- 'USE_BINDER=<(USE_binder)',
- 'USE_DBUS=<(USE_dbus)',
- 'USE_FEC=0',
- 'USE_HWID_OVERRIDE=<(USE_hwid_override)',
- 'USE_CHROME_KIOSK_APP=<(USE_chrome_kiosk_app)',
- 'USE_CHROME_NETWORK_PROXY=<(USE_chrome_network_proxy)',
- 'USE_MTD=<(USE_mtd)',
- 'USE_OMAHA=1',
- 'USE_SHILL=1',
- ],
- 'include_dirs': [
- # We need this include dir because we include all the local code as
- # "update_engine/...".
- '<(platform2_root)/../aosp/system',
- '<(platform2_root)/../aosp/system/update_engine/client_library/include',
- ],
- },
- 'targets': [
- # Protobufs.
- {
- 'target_name': 'update_metadata-protos',
- 'type': 'static_library',
- 'variables': {
- 'proto_in_dir': '.',
- 'proto_out_dir': 'include/update_engine',
- },
- 'sources': [
- 'update_metadata.proto',
- ],
- 'includes': ['../../../platform2/common-mk/protoc.gypi'],
- },
- # Chrome D-Bus bindings.
- {
- 'target_name': 'update_engine-dbus-adaptor',
- 'type': 'none',
- 'variables': {
- 'dbus_adaptors_out_dir': 'include/dbus_bindings',
- 'dbus_xml_extension': 'dbus-xml',
- },
- 'sources': [
- 'dbus_bindings/org.chromium.UpdateEngineInterface.dbus-xml',
- ],
- 'includes': ['../../../platform2/common-mk/generate-dbus-adaptors.gypi'],
- },
- {
- 'target_name': 'update_engine-dbus-kiosk-app-client',
- 'type': 'none',
- 'actions': [{
- 'action_name': 'update_engine-dbus-kiosk-app-client-action',
- 'variables': {
- 'mock_output_file': 'include/kiosk-app/dbus-proxy-mocks.h',
- 'proxy_output_file': 'include/kiosk-app/dbus-proxies.h',
- },
- 'sources': [
- 'dbus_bindings/org.chromium.KioskAppService.dbus-xml',
- ],
- 'includes': ['../../../platform2/common-mk/generate-dbus-proxies.gypi'],
- }],
- },
- # The payload application component and common dependencies.
- {
- 'target_name': 'libpayload_consumer',
- 'type': 'static_library',
- 'dependencies': [
- 'update_metadata-protos',
- ],
- # TODO(deymo): Remove unused dependencies once we stop including files
- # from the root directory.
- 'variables': {
- 'exported_deps': [
- 'libcrypto',
- 'xz-embedded',
- 'libbspatch',
- 'libpuffpatch',
- ],
- 'deps': ['<@(exported_deps)'],
- },
- 'all_dependent_settings': {
- 'variables': {
- 'deps': [
- '<@(exported_deps)',
- ],
- },
- },
- 'link_settings': {
- 'variables': {
- 'deps': [
- '<@(exported_deps)',
- ],
- },
- 'libraries': [
- '-lbz2',
- '-lrt',
- ],
- },
- 'sources': [
- 'common/action_processor.cc',
- 'common/boot_control_stub.cc',
- 'common/clock.cc',
- 'common/constants.cc',
- 'common/cpu_limiter.cc',
- 'common/error_code_utils.cc',
- 'common/hash_calculator.cc',
- 'common/http_common.cc',
- 'common/http_fetcher.cc',
- 'common/hwid_override.cc',
- 'common/multi_range_http_fetcher.cc',
- 'common/platform_constants_chromeos.cc',
- 'common/prefs.cc',
- 'common/proxy_resolver.cc',
- 'common/subprocess.cc',
- 'common/terminator.cc',
- 'common/utils.cc',
- 'payload_consumer/bzip_extent_writer.cc',
- 'payload_consumer/cached_file_descriptor.cc',
- 'payload_consumer/delta_performer.cc',
- 'payload_consumer/download_action.cc',
- 'payload_consumer/extent_reader.cc',
- 'payload_consumer/extent_writer.cc',
- 'payload_consumer/file_descriptor.cc',
- 'payload_consumer/file_descriptor_utils.cc',
- 'payload_consumer/file_writer.cc',
- 'payload_consumer/filesystem_verifier_action.cc',
- 'payload_consumer/install_plan.cc',
- 'payload_consumer/mount_history.cc',
- 'payload_consumer/payload_constants.cc',
- 'payload_consumer/payload_metadata.cc',
- 'payload_consumer/payload_verifier.cc',
- 'payload_consumer/postinstall_runner_action.cc',
- 'payload_consumer/verity_writer_stub.cc',
- 'payload_consumer/xz_extent_writer.cc',
- ],
- 'conditions': [
- ['USE_mtd == 1', {
- 'sources': [
- 'payload_consumer/mtd_file_descriptor.cc',
- ],
- 'link_settings': {
- 'libraries': [
- '-lmtdutils',
- ],
- },
- }],
- ],
- },
- # The main daemon static_library with all the code used to check for updates
- # with Omaha and expose a DBus daemon.
- {
- 'target_name': 'libupdate_engine',
- 'type': 'static_library',
- 'dependencies': [
- 'libpayload_consumer',
- 'update_metadata-protos',
- 'update_engine-dbus-adaptor',
- ],
- 'variables': {
- 'exported_deps': [
- 'dbus-1',
- 'expat',
- 'libcurl',
- 'libdebugd-client',
- 'libmetrics-<(libbase_ver)',
- 'libpower_manager-client',
- 'libsession_manager-client',
- 'libshill-client',
- 'libssl',
- 'libupdate_engine-client',
- 'vboot_host',
- ],
- 'conditions':[
- ['USE_dlc == 1', {
- 'exported_deps' : [
- 'libdlcservice-client',
- ],
- }],
- ],
- 'deps': ['<@(exported_deps)'],
- },
- 'all_dependent_settings': {
- 'variables': {
- 'deps': [
- '<@(exported_deps)',
- ],
- },
- },
- 'link_settings': {
- 'variables': {
- 'deps': [
- '<@(exported_deps)',
- ],
- },
- 'libraries': [
- '-lbz2',
- '-lpolicy-<(libbase_ver)',
- '-lrootdev',
- '-lrt',
- ],
- },
- 'sources': [
- 'boot_control_chromeos.cc',
- 'certificate_checker.cc',
- 'common_service.cc',
- 'connection_manager.cc',
- 'connection_utils.cc',
- 'daemon.cc',
- 'dbus_connection.cc',
- 'dbus_service.cc',
- 'hardware_chromeos.cc',
- 'image_properties_chromeos.cc',
- 'libcurl_http_fetcher.cc',
- 'metrics_reporter_omaha.cc',
- 'metrics_utils.cc',
- 'omaha_request_action.cc',
- 'omaha_request_params.cc',
- 'omaha_response_handler_action.cc',
- 'omaha_utils.cc',
- 'p2p_manager.cc',
- 'payload_state.cc',
- 'power_manager_chromeos.cc',
- 'real_system_state.cc',
- 'shill_proxy.cc',
- 'update_attempter.cc',
- 'update_boot_flags_action.cc',
- 'update_manager/boxed_value.cc',
- 'update_manager/chromeos_policy.cc',
- 'update_manager/default_policy.cc',
- 'update_manager/enough_slots_ab_updates_policy_impl.cc',
- 'update_manager/enterprise_device_policy_impl.cc',
- 'update_manager/evaluation_context.cc',
- 'update_manager/interactive_update_policy_impl.cc',
- 'update_manager/next_update_check_policy_impl.cc',
- 'update_manager/official_build_check_policy_impl.cc',
- 'update_manager/out_of_box_experience_policy_impl.cc',
- 'update_manager/policy.cc',
- 'update_manager/policy_test_utils.cc',
- 'update_manager/real_config_provider.cc',
- 'update_manager/real_device_policy_provider.cc',
- 'update_manager/real_random_provider.cc',
- 'update_manager/real_shill_provider.cc',
- 'update_manager/real_system_provider.cc',
- 'update_manager/real_time_provider.cc',
- 'update_manager/real_updater_provider.cc',
- 'update_manager/staging_utils.cc',
- 'update_manager/state_factory.cc',
- 'update_manager/update_manager.cc',
- 'update_manager/update_time_restrictions_policy_impl.cc',
- 'update_manager/weekly_time.cc',
- 'update_status_utils.cc',
- ],
- 'conditions': [
- ['USE_chrome_network_proxy == 1', {
- 'sources': [
- 'chrome_browser_proxy_resolver.cc',
- ],
- }],
- ['USE_chrome_kiosk_app == 1', {
- 'dependencies': [
- 'update_engine-dbus-kiosk-app-client',
- ],
- }],
- ['USE_dlc == 1', {
- 'sources': [
- 'dlcservice_chromeos.cc',
- ],
- }],
- ['USE_dlc == 0', {
- 'sources': [
- 'common/dlcservice_stub.cc',
- ],
- }],
- ],
- },
- # update_engine daemon.
- {
- 'target_name': 'update_engine',
- 'type': 'executable',
- 'dependencies': [
- 'libupdate_engine',
- ],
- 'sources': [
- 'main.cc',
- ],
- },
- # update_engine client library.
- {
- 'target_name': 'libupdate_engine_client',
- 'type': 'static_library',
- 'variables': {
- 'deps': [
- 'dbus-1',
- 'libupdate_engine-client',
- ],
- },
- 'sources': [
- 'client_library/client.cc',
- 'client_library/client_dbus.cc',
- 'update_status_utils.cc',
- ],
- 'include_dirs': [
- 'client_library/include',
- ],
- },
- # update_engine console client.
- {
- 'target_name': 'update_engine_client',
- 'type': 'executable',
- 'dependencies': [
- 'libupdate_engine_client',
- ],
- 'sources': [
- 'common/error_code_utils.cc',
- 'omaha_utils.cc',
- 'update_engine_client.cc',
- ],
- },
- # server-side code. This is used for delta_generator and unittests but not
- # for any client code.
- {
- 'target_name': 'libpayload_generator',
- 'type': 'static_library',
- 'dependencies': [
- 'libpayload_consumer',
- 'update_metadata-protos',
- ],
- 'variables': {
- 'exported_deps': [
- 'ext2fs',
- 'libbsdiff',
- 'libpuffdiff',
- 'liblzma',
- ],
- 'deps': ['<@(exported_deps)'],
- },
- 'all_dependent_settings': {
- 'variables': {
- 'deps': [
- '<@(exported_deps)',
- ],
- },
- },
- 'link_settings': {
- 'variables': {
- 'deps': [
- '<@(exported_deps)',
- ],
- },
- },
- 'sources': [
- 'common/file_fetcher.cc',
- 'payload_generator/ab_generator.cc',
- 'payload_generator/annotated_operation.cc',
- 'payload_generator/blob_file_writer.cc',
- 'payload_generator/block_mapping.cc',
- 'payload_generator/boot_img_filesystem.cc',
- 'payload_generator/bzip.cc',
- 'payload_generator/cycle_breaker.cc',
- 'payload_generator/deflate_utils.cc',
- 'payload_generator/delta_diff_generator.cc',
- 'payload_generator/delta_diff_utils.cc',
- 'payload_generator/ext2_filesystem.cc',
- 'payload_generator/extent_ranges.cc',
- 'payload_generator/extent_utils.cc',
- 'payload_generator/full_update_generator.cc',
- 'payload_generator/graph_types.cc',
- 'payload_generator/graph_utils.cc',
- 'payload_generator/inplace_generator.cc',
- 'payload_generator/mapfile_filesystem.cc',
- 'payload_generator/payload_file.cc',
- 'payload_generator/payload_generation_config_chromeos.cc',
- 'payload_generator/payload_generation_config.cc',
- 'payload_generator/payload_signer.cc',
- 'payload_generator/raw_filesystem.cc',
- 'payload_generator/squashfs_filesystem.cc',
- 'payload_generator/tarjan.cc',
- 'payload_generator/topological_sort.cc',
- 'payload_generator/xz_chromeos.cc',
- ],
- },
- # server-side delta generator.
- {
- 'target_name': 'delta_generator',
- 'type': 'executable',
- 'dependencies': [
- 'libpayload_consumer',
- 'libpayload_generator',
- ],
- 'link_settings': {
- 'ldflags!': [
- '-pie',
- ],
- },
- 'sources': [
- 'payload_generator/generate_delta_main.cc',
- ],
- },
- {
- 'target_name': 'update_engine_test_libs',
- 'type': 'static_library',
- 'variables': {
- 'deps': [
- 'libshill-client-test',
- ],
- },
- 'dependencies': [
- 'libupdate_engine',
- ],
- 'includes': [
- '../../../platform2/common-mk/common_test.gypi',
- ],
- 'sources': [
- 'common/fake_prefs.cc',
- 'common/mock_http_fetcher.cc',
- 'common/test_utils.cc',
- 'fake_shill_proxy.cc',
- 'fake_system_state.cc',
- 'payload_consumer/fake_file_descriptor.cc',
- 'payload_generator/fake_filesystem.cc',
- 'update_manager/umtest_utils.cc',
- ],
- },
- ],
- 'conditions': [
- ['USE_test == 1', {
- 'targets': [
- # Public keys used for unit testing.
- {
- 'target_name': 'update_engine-testkeys',
- 'type': 'none',
- 'variables': {
- 'openssl_pem_in_dir': '.',
- 'openssl_pem_out_dir': 'include/update_engine',
- },
- 'sources': [
- 'unittest_key.pem',
- 'unittest_key2.pem',
- 'unittest_key_RSA4096.pem',
- ],
- 'includes': ['../../../platform2/common-mk/openssl_pem.gypi'],
- },
- # Unpacks sample images used for testing.
- {
- 'target_name': 'update_engine-test_images',
- 'type': 'none',
- 'variables': {
- 'image_out_dir': '.',
- },
- 'sources': [
- 'sample_images/sample_images.tar.bz2',
- ],
- 'includes': ['tar_bunzip2.gypi'],
- },
- # Test HTTP Server.
- {
- 'target_name': 'test_http_server',
- 'type': 'executable',
- 'sources': [
- 'common/http_common.cc',
- 'test_http_server.cc',
- ],
- },
- # Test subprocess helper.
- {
- 'target_name': 'test_subprocess',
- 'type': 'executable',
- 'sources': [
- 'test_subprocess.cc',
- ],
- },
- # Main unittest file.
- {
- 'target_name': 'update_engine_unittests',
- 'type': 'executable',
- 'variables': {
- 'deps': [
- 'libbrillo-test-<(libbase_ver)',
- 'libchrome-test-<(libbase_ver)',
- 'libdebugd-client-test',
- 'libpower_manager-client-test',
- 'libsession_manager-client-test',
- 'libshill-client-test',
- ],
- },
- 'dependencies': [
- 'libupdate_engine',
- 'libpayload_generator',
- 'update_engine_test_libs',
- ],
- 'sources': [
- 'boot_control_chromeos_unittest.cc',
- 'certificate_checker_unittest.cc',
- 'common/action_pipe_unittest.cc',
- 'common/action_processor_unittest.cc',
- 'common/action_unittest.cc',
- 'common/cpu_limiter_unittest.cc',
- 'common/hash_calculator_unittest.cc',
- 'common/http_fetcher_unittest.cc',
- 'common/hwid_override_unittest.cc',
- 'common/prefs_unittest.cc',
- 'common/proxy_resolver_unittest.cc',
- 'common/subprocess_unittest.cc',
- 'common/terminator_unittest.cc',
- 'common/utils_unittest.cc',
- 'common_service_unittest.cc',
- 'connection_manager_unittest.cc',
- 'hardware_chromeos_unittest.cc',
- 'image_properties_chromeos_unittest.cc',
- 'metrics_reporter_omaha_unittest.cc',
- 'metrics_utils_unittest.cc',
- 'omaha_request_action_unittest.cc',
- 'omaha_request_params_unittest.cc',
- 'omaha_response_handler_action_unittest.cc',
- 'omaha_utils_unittest.cc',
- 'p2p_manager_unittest.cc',
- 'payload_consumer/bzip_extent_writer_unittest.cc',
- 'payload_consumer/cached_file_descriptor_unittest.cc',
- 'payload_consumer/delta_performer_integration_test.cc',
- 'payload_consumer/delta_performer_unittest.cc',
- 'payload_consumer/download_action_unittest.cc',
- 'payload_consumer/extent_reader_unittest.cc',
- 'payload_consumer/extent_writer_unittest.cc',
- 'payload_consumer/file_descriptor_utils_unittest.cc',
- 'payload_consumer/file_writer_unittest.cc',
- 'payload_consumer/filesystem_verifier_action_unittest.cc',
- 'payload_consumer/postinstall_runner_action_unittest.cc',
- 'payload_consumer/xz_extent_writer_unittest.cc',
- 'payload_generator/ab_generator_unittest.cc',
- 'payload_generator/blob_file_writer_unittest.cc',
- 'payload_generator/block_mapping_unittest.cc',
- 'payload_generator/boot_img_filesystem_unittest.cc',
- 'payload_generator/cycle_breaker_unittest.cc',
- 'payload_generator/deflate_utils_unittest.cc',
- 'payload_generator/delta_diff_utils_unittest.cc',
- 'payload_generator/ext2_filesystem_unittest.cc',
- 'payload_generator/extent_ranges_unittest.cc',
- 'payload_generator/extent_utils_unittest.cc',
- 'payload_generator/full_update_generator_unittest.cc',
- 'payload_generator/graph_utils_unittest.cc',
- 'payload_generator/inplace_generator_unittest.cc',
- 'payload_generator/mapfile_filesystem_unittest.cc',
- 'payload_generator/payload_file_unittest.cc',
- 'payload_generator/payload_generation_config_unittest.cc',
- 'payload_generator/payload_signer_unittest.cc',
- 'payload_generator/squashfs_filesystem_unittest.cc',
- 'payload_generator/tarjan_unittest.cc',
- 'payload_generator/topological_sort_unittest.cc',
- 'payload_generator/zip_unittest.cc',
- 'payload_state_unittest.cc',
- 'testrunner.cc',
- 'update_attempter_unittest.cc',
- 'update_boot_flags_action_unittest.cc',
- 'update_manager/boxed_value_unittest.cc',
- 'update_manager/chromeos_policy_unittest.cc',
- 'update_manager/evaluation_context_unittest.cc',
- 'update_manager/generic_variables_unittest.cc',
- 'update_manager/prng_unittest.cc',
- 'update_manager/real_device_policy_provider_unittest.cc',
- 'update_manager/real_random_provider_unittest.cc',
- 'update_manager/real_shill_provider_unittest.cc',
- 'update_manager/real_system_provider_unittest.cc',
- 'update_manager/real_time_provider_unittest.cc',
- 'update_manager/real_updater_provider_unittest.cc',
- 'update_manager/staging_utils_unittest.cc',
- 'update_manager/update_manager_unittest.cc',
- 'update_manager/update_time_restrictions_policy_impl_unittest.cc',
- 'update_manager/variable_unittest.cc',
- 'update_manager/weekly_time_unittest.cc',
- ],
- },
- ],
- }],
- # Fuzzer target.
- ['USE_fuzzer == 1', {
- 'targets': [
- {
- 'target_name': 'update_engine_omaha_request_action_fuzzer',
- 'type': 'executable',
- 'variables': {
- 'deps': [
- 'libbrillo-test-<(libbase_ver)',
- 'libchrome-test-<(libbase_ver)',
- ],
- },
- 'includes': [
- '../../../platform2/common-mk/common_fuzzer.gypi',
- ],
- 'dependencies': [
- 'libupdate_engine',
- 'update_engine_test_libs',
- ],
- 'sources': [
- 'omaha_request_action_fuzzer.cc',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/update_engine_client.cc b/update_engine_client.cc
index 7446041..31448ea 100644
--- a/update_engine_client.cc
+++ b/update_engine_client.cc
@@ -26,10 +26,13 @@
#include <base/command_line.h>
#include <base/logging.h>
#include <base/macros.h>
+#include <base/strings/string_number_conversions.h>
#include <base/strings/string_split.h>
#include <base/threading/platform_thread.h>
+#include <base/threading/thread_task_runner_handle.h>
#include <brillo/daemons/daemon.h>
#include <brillo/flag_helper.h>
+#include <brillo/key_value_store.h>
#include "update_engine/client.h"
#include "update_engine/common/error_code.h"
@@ -39,13 +42,17 @@
#include "update_engine/update_status.h"
#include "update_engine/update_status_utils.h"
-using chromeos_update_engine::EolStatus;
+using brillo::KeyValueStore;
+using chromeos_update_engine::EolDate;
+using chromeos_update_engine::EolDateToString;
using chromeos_update_engine::ErrorCode;
+using chromeos_update_engine::UpdateEngineStatusToString;
using chromeos_update_engine::UpdateStatusToString;
using chromeos_update_engine::utils::ErrorCodeToString;
using std::string;
using std::unique_ptr;
using std::vector;
+using update_engine::UpdateEngineStatus;
using update_engine::UpdateStatus;
namespace {
@@ -80,7 +87,7 @@
// We can't call QuitWithExitCode from OnInit(), so we delay the execution
// of the ProcessFlags method after the Daemon initialization is done.
- base::MessageLoop::current()->task_runner()->PostTask(
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&UpdateEngineClient::ProcessFlagsAndExit,
base::Unretained(this)));
@@ -132,80 +139,47 @@
public:
~WatchingStatusUpdateHandler() override = default;
- void HandleStatusUpdate(int64_t last_checked_time,
- double progress,
- UpdateStatus current_operation,
- const string& new_version,
- int64_t new_size) override;
+ void HandleStatusUpdate(const UpdateEngineStatus& status) override;
};
void WatchingStatusUpdateHandler::HandleStatusUpdate(
- int64_t last_checked_time,
- double progress,
- UpdateStatus current_operation,
- const string& new_version,
- int64_t new_size) {
- LOG(INFO) << "Got status update:";
- LOG(INFO) << " last_checked_time: " << last_checked_time;
- LOG(INFO) << " progress: " << progress;
- LOG(INFO) << " current_operation: "
- << UpdateStatusToString(current_operation);
- LOG(INFO) << " new_version: " << new_version;
- LOG(INFO) << " new_size: " << new_size;
+ const UpdateEngineStatus& status) {
+ LOG(INFO) << "Got status update: " << UpdateEngineStatusToString(status);
}
bool UpdateEngineClient::ShowStatus() {
- int64_t last_checked_time = 0;
- double progress = 0.0;
- UpdateStatus current_op;
- string new_version;
- int64_t new_size = 0;
-
+ UpdateEngineStatus status;
int retry_count = kShowStatusRetryCount;
while (retry_count > 0) {
- if (client_->GetStatus(&last_checked_time,
- &progress,
- ¤t_op,
- &new_version,
- &new_size)) {
+ if (client_->GetStatus(&status)) {
break;
}
if (--retry_count == 0) {
return false;
}
- LOG(WARNING) << "Will try " << retry_count << " more times!";
+ LOG(WARNING)
+ << "Failed to get the update_engine status. This can happen when the"
+ " update_engine is busy doing a heavy operation or if the"
+ " update-engine service is down. If it doesn't resolve, a restart of"
+ " the update-engine service is needed."
+ " Will try "
+ << retry_count << " more times!";
base::PlatformThread::Sleep(
base::TimeDelta::FromSeconds(kShowStatusRetryIntervalInSeconds));
}
- printf("LAST_CHECKED_TIME=%" PRIi64
- "\nPROGRESS=%f\nCURRENT_OP=%s\n"
- "NEW_VERSION=%s\nNEW_SIZE=%" PRIi64 "\n",
- last_checked_time,
- progress,
- UpdateStatusToString(current_op),
- new_version.c_str(),
- new_size);
+ printf("%s", UpdateEngineStatusToString(status).c_str());
return true;
}
int UpdateEngineClient::GetNeedReboot() {
- int64_t last_checked_time = 0;
- double progress = 0.0;
- UpdateStatus current_op;
- string new_version;
- int64_t new_size = 0;
-
- if (!client_->GetStatus(&last_checked_time,
- &progress,
- ¤t_op,
- &new_version,
- &new_size)) {
+ UpdateEngineStatus status;
+ if (!client_->GetStatus(&status)) {
return 1;
}
- if (current_op == UpdateStatus::UPDATED_NEED_REBOOT) {
+ if (status.status == UpdateStatus::UPDATED_NEED_REBOOT) {
return 0;
}
@@ -220,35 +194,26 @@
~UpdateWaitHandler() override = default;
- void HandleStatusUpdate(int64_t last_checked_time,
- double progress,
- UpdateStatus current_operation,
- const string& new_version,
- int64_t new_size) override;
+ void HandleStatusUpdate(const UpdateEngineStatus& status) override;
private:
bool exit_on_error_;
update_engine::UpdateEngineClient* client_;
};
-void UpdateWaitHandler::HandleStatusUpdate(int64_t /* last_checked_time */,
- double /* progress */,
- UpdateStatus current_operation,
- const string& /* new_version */,
- int64_t /* new_size */) {
- if (exit_on_error_ && current_operation == UpdateStatus::IDLE) {
- int last_attempt_error;
+void UpdateWaitHandler::HandleStatusUpdate(const UpdateEngineStatus& status) {
+ if (exit_on_error_ && status.status == UpdateStatus::IDLE) {
+ int last_attempt_error = static_cast<int>(ErrorCode::kSuccess);
ErrorCode code = ErrorCode::kSuccess;
if (client_ && client_->GetLastAttemptError(&last_attempt_error))
code = static_cast<ErrorCode>(last_attempt_error);
LOG(ERROR) << "Update failed, current operation is "
- << UpdateStatusToString(current_operation)
- << ", last error code is " << ErrorCodeToString(code) << "("
- << last_attempt_error << ")";
+ << UpdateStatusToString(status.status) << ", last error code is "
+ << ErrorCodeToString(code) << "(" << last_attempt_error << ")";
exit(1);
}
- if (current_operation == UpdateStatus::UPDATED_NEED_REBOOT) {
+ if (status.status == UpdateStatus::UPDATED_NEED_REBOOT) {
LOG(INFO) << "Update succeeded -- reboot needed.";
exit(0);
}
@@ -321,8 +286,6 @@
"Show the previous OS version used before the update reboot.");
DEFINE_bool(last_attempt_error, false, "Show the last attempt error.");
DEFINE_bool(eol_status, false, "Show the current end-of-life status.");
- DEFINE_bool(install, false, "Requests an install.");
- DEFINE_string(dlc_module_ids, "", "colon-separated list of DLC IDs.");
// Boilerplate init commands.
base::CommandLine::Init(argc_, argv_);
@@ -507,30 +470,6 @@
}
}
- if (FLAGS_install) {
- // Parse DLC module IDs.
- vector<string> dlc_module_ids;
- if (!FLAGS_dlc_module_ids.empty()) {
- dlc_module_ids = base::SplitString(FLAGS_dlc_module_ids,
- ":",
- base::TRIM_WHITESPACE,
- base::SPLIT_WANT_ALL);
- }
- if (dlc_module_ids.empty()) {
- LOG(ERROR) << "dlc_module_ids is empty:" << FLAGS_dlc_module_ids;
- return 1;
- }
- if (!client_->AttemptInstall(FLAGS_omaha_url, dlc_module_ids)) {
- LOG(ERROR) << "AttemptInstall failed.";
- return 1;
- }
- return 0;
- } else if (!FLAGS_dlc_module_ids.empty()) {
- LOG(ERROR) << "dlc_module_ids is not empty while install is not set:"
- << FLAGS_dlc_module_ids;
- return 1;
- }
-
// Initiate an update check, if necessary.
if (do_update_request) {
LOG_IF(WARNING, FLAGS_reboot) << "-reboot flag ignored.";
@@ -539,7 +478,7 @@
app_version = "ForcedUpdate";
LOG(INFO) << "Forcing an update by setting app_version to ForcedUpdate.";
}
- LOG(INFO) << "Initiating update check and install.";
+ LOG(INFO) << "Initiating update check.";
if (!client_->AttemptUpdate(
app_version, FLAGS_omaha_url, FLAGS_interactive)) {
LOG(ERROR) << "Error checking for update.";
@@ -622,21 +561,26 @@
LOG(ERROR) << "Error getting last attempt error.";
} else {
ErrorCode code = static_cast<ErrorCode>(last_attempt_error);
- printf(
- "ERROR_CODE=%i\n"
- "ERROR_MESSAGE=%s\n",
- last_attempt_error,
- ErrorCodeToString(code).c_str());
+
+ KeyValueStore last_attempt_error_store;
+ last_attempt_error_store.SetString(
+ "ERROR_CODE", base::NumberToString(last_attempt_error));
+ last_attempt_error_store.SetString("ERROR_MESSAGE",
+ ErrorCodeToString(code));
+ printf("%s", last_attempt_error_store.SaveToString().c_str());
}
}
if (FLAGS_eol_status) {
- int eol_status;
- if (!client_->GetEolStatus(&eol_status)) {
- LOG(ERROR) << "Error getting the end-of-life status.";
+ UpdateEngineStatus status;
+ if (!client_->GetStatus(&status)) {
+ LOG(ERROR) << "Error GetStatus() for getting EOL info.";
} else {
- EolStatus eol_status_code = static_cast<EolStatus>(eol_status);
- printf("EOL_STATUS=%s\n", EolStatusToString(eol_status_code));
+ EolDate eol_date_code = status.eol_date;
+
+ KeyValueStore eol_status_store;
+ eol_status_store.SetString("EOL_DATE", EolDateToString(eol_date_code));
+ printf("%s", eol_status_store.SaveToString().c_str());
}
}
diff --git a/update_manager/android_things_policy.cc b/update_manager/android_things_policy.cc
index 4afcf12..a76ea48 100644
--- a/update_manager/android_things_policy.cc
+++ b/update_manager/android_things_policy.cc
@@ -16,6 +16,7 @@
#include "update_engine/update_manager/android_things_policy.h"
+#include <memory>
#include <string>
#include <vector>
@@ -30,10 +31,15 @@
using base::Time;
using chromeos_update_engine::ErrorCode;
using std::string;
+using std::unique_ptr;
using std::vector;
namespace chromeos_update_manager {
+unique_ptr<Policy> GetSystemPolicy() {
+ return std::make_unique<AndroidThingsPolicy>();
+}
+
const NextUpdateCheckPolicyConstants
AndroidThingsPolicy::kNextUpdateCheckPolicyConstants = {
.timeout_initial_interval = 7 * 60,
@@ -54,6 +60,7 @@
result->target_channel.clear();
result->target_version_prefix.clear();
result->rollback_allowed = false;
+ result->rollback_data_save_requested = false;
result->rollback_allowed_milestones = -1;
result->interactive = false;
diff --git a/update_manager/boxed_value.cc b/update_manager/boxed_value.cc
index cee1ece..4dff9ef 100644
--- a/update_manager/boxed_value.cc
+++ b/update_manager/boxed_value.cc
@@ -51,41 +51,25 @@
template <>
string BoxedValue::ValuePrinter<int>(const void* value) {
const int* val = reinterpret_cast<const int*>(value);
-#if BASE_VER < 576279
- return base::IntToString(*val);
-#else
return base::NumberToString(*val);
-#endif
}
template <>
string BoxedValue::ValuePrinter<unsigned int>(const void* value) {
const unsigned int* val = reinterpret_cast<const unsigned int*>(value);
-#if BASE_VER < 576279
- return base::UintToString(*val);
-#else
return base::NumberToString(*val);
-#endif
}
template <>
string BoxedValue::ValuePrinter<int64_t>(const void* value) {
const int64_t* val = reinterpret_cast<const int64_t*>(value);
-#if BASE_VER < 576279
- return base::Int64ToString(*val);
-#else
return base::NumberToString(*val);
-#endif
}
template <>
string BoxedValue::ValuePrinter<uint64_t>(const void* value) {
const uint64_t* val = reinterpret_cast<const uint64_t*>(value);
-#if BASE_VER < 576279
- return base::Uint64ToString(*val);
-#else
return base::NumberToString(*val);
-#endif
}
template <>
@@ -97,11 +81,7 @@
template <>
string BoxedValue::ValuePrinter<double>(const void* value) {
const double* val = reinterpret_cast<const double*>(value);
-#if BASE_VER < 576279
- return base::DoubleToString(*val);
-#else
return base::NumberToString(*val);
-#endif
}
template <>
@@ -167,8 +147,6 @@
return "Rollback and powerwash";
case RollbackToTargetVersion::kRollbackAndRestoreIfPossible:
return "Rollback and restore if possible";
- case RollbackToTargetVersion::kRollbackOnlyIfRestorePossible:
- return "Rollback only if restore is possible";
case RollbackToTargetVersion::kMaxValue:
NOTREACHED();
return "Max value";
diff --git a/update_manager/boxed_value_unittest.cc b/update_manager/boxed_value_unittest.cc
index 2fa94ec..5b87a7b 100644
--- a/update_manager/boxed_value_unittest.cc
+++ b/update_manager/boxed_value_unittest.cc
@@ -168,11 +168,6 @@
BoxedValue(new ConnectionType(ConnectionType::kEthernet)).ToString());
EXPECT_EQ("wifi",
BoxedValue(new ConnectionType(ConnectionType::kWifi)).ToString());
- EXPECT_EQ("wimax",
- BoxedValue(new ConnectionType(ConnectionType::kWimax)).ToString());
- EXPECT_EQ(
- "bluetooth",
- BoxedValue(new ConnectionType(ConnectionType::kBluetooth)).ToString());
EXPECT_EQ(
"cellular",
BoxedValue(new ConnectionType(ConnectionType::kCellular)).ToString());
@@ -215,18 +210,13 @@
BoxedValue(new RollbackToTargetVersion(
RollbackToTargetVersion::kRollbackAndRestoreIfPossible))
.ToString());
- EXPECT_EQ(
- "Rollback only if restore is possible",
- BoxedValue(new RollbackToTargetVersion(
- RollbackToTargetVersion::kRollbackOnlyIfRestorePossible))
- .ToString());
}
TEST(UmBoxedValueTest, SetConnectionTypeToString) {
set<ConnectionType>* set1 = new set<ConnectionType>;
- set1->insert(ConnectionType::kWimax);
+ set1->insert(ConnectionType::kCellular);
set1->insert(ConnectionType::kEthernet);
- EXPECT_EQ("ethernet,wimax", BoxedValue(set1).ToString());
+ EXPECT_EQ("ethernet,cellular", BoxedValue(set1).ToString());
set<ConnectionType>* set2 = new set<ConnectionType>;
set2->insert(ConnectionType::kWifi);
diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc
index 1fa8636..b96e29d 100644
--- a/update_manager/chromeos_policy.cc
+++ b/update_manager/chromeos_policy.cc
@@ -17,6 +17,7 @@
#include "update_engine/update_manager/chromeos_policy.h"
#include <algorithm>
+#include <memory>
#include <set>
#include <string>
#include <vector>
@@ -48,6 +49,7 @@
using std::min;
using std::set;
using std::string;
+using std::unique_ptr;
using std::vector;
namespace {
@@ -190,6 +192,10 @@
namespace chromeos_update_manager {
+unique_ptr<Policy> GetSystemPolicy() {
+ return std::make_unique<ChromeOSPolicy>();
+}
+
const NextUpdateCheckPolicyConstants
ChromeOSPolicy::kNextUpdateCheckPolicyConstants = {
.timeout_initial_interval = 7 * 60,
@@ -462,7 +468,7 @@
// TODO(garnold) The current logic generally treats the list of allowed
// connections coming from the device policy as a whitelist, meaning that it
// can only be used for enabling connections, but not disable them. Further,
-// certain connection types (like Bluetooth) cannot be enabled even by policy.
+// certain connection types cannot be enabled even by policy.
// In effect, the only thing that device policy can change is to enable
// updates over a cellular network (disabled by default). We may want to
// revisit this semantics, allowing greater flexibility in defining specific
@@ -493,10 +499,6 @@
*result = true;
bool device_policy_can_override = false;
switch (conn_type) {
- case ConnectionType::kBluetooth:
- *result = false;
- break;
-
case ConnectionType::kCellular:
*result = false;
device_policy_can_override = true;
@@ -560,8 +562,9 @@
if (policy_au_p2p_enabled_p) {
enabled = *policy_au_p2p_enabled_p;
} else {
- const string* policy_owner_p = ec->GetValue(dp_provider->var_owner());
- if (!policy_owner_p || policy_owner_p->empty())
+ const bool* policy_has_owner_p =
+ ec->GetValue(dp_provider->var_has_owner());
+ if (!policy_has_owner_p || !*policy_has_owner_p)
enabled = true;
}
}
diff --git a/update_manager/chromeos_policy_unittest.cc b/update_manager/chromeos_policy_unittest.cc
index 5341ebb..414ac0d 100644
--- a/update_manager/chromeos_policy_unittest.cc
+++ b/update_manager/chromeos_policy_unittest.cc
@@ -284,12 +284,6 @@
true, RollbackToTargetVersion::kRollbackAndRestoreIfPossible));
}
-TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackOnlyIfRestorePossible) {
- // We're not allowed to do rollback until we support data save and restore.
- EXPECT_FALSE(TestRollbackAllowed(
- true, RollbackToTargetVersion::kRollbackOnlyIfRestorePossible));
-}
-
TEST_F(UmChromeOSPolicyTest, UpdateCheckAllowedRollbackDisabled) {
EXPECT_FALSE(TestRollbackAllowed(true, RollbackToTargetVersion::kDisabled));
}
@@ -1371,7 +1365,7 @@
// Override specific device policy attributes.
fake_state_.device_policy_provider()->var_au_p2p_enabled()->reset(nullptr);
- fake_state_.device_policy_provider()->var_owner()->reset(nullptr);
+ fake_state_.device_policy_provider()->var_has_owner()->reset(new bool(false));
fake_state_.device_policy_provider()->var_http_downloads_enabled()->reset(
new bool(false));
@@ -1446,47 +1440,6 @@
EXPECT_TRUE(result);
}
-TEST_F(UmChromeOSPolicyTest, UpdateDownloadAllowedWimaxDefault) {
- // Wimax is always allowed.
-
- fake_state_.shill_provider()->var_conn_type()->reset(
- new ConnectionType(ConnectionType::kWifi));
-
- bool result;
- ExpectPolicyStatus(
- EvalStatus::kSucceeded, &Policy::UpdateDownloadAllowed, &result);
- EXPECT_TRUE(result);
-}
-
-TEST_F(UmChromeOSPolicyTest,
- UpdateCurrentConnectionNotAllowedBluetoothDefault) {
- // Bluetooth is never allowed.
-
- fake_state_.shill_provider()->var_conn_type()->reset(
- new ConnectionType(ConnectionType::kBluetooth));
-
- bool result;
- ExpectPolicyStatus(
- EvalStatus::kAskMeAgainLater, &Policy::UpdateDownloadAllowed, &result);
-}
-
-TEST_F(UmChromeOSPolicyTest,
- UpdateCurrentConnectionNotAllowedBluetoothPolicyCannotOverride) {
- // Bluetooth cannot be allowed even by policy.
-
- fake_state_.shill_provider()->var_conn_type()->reset(
- new ConnectionType(ConnectionType::kBluetooth));
- set<ConnectionType> allowed_connections;
- allowed_connections.insert(ConnectionType::kBluetooth);
- fake_state_.device_policy_provider()
- ->var_allowed_connection_types_for_update()
- ->reset(new set<ConnectionType>(allowed_connections));
-
- bool result;
- ExpectPolicyStatus(
- EvalStatus::kAskMeAgainLater, &Policy::UpdateDownloadAllowed, &result);
-}
-
TEST_F(UmChromeOSPolicyTest, UpdateCurrentConnectionNotAllowedCellularDefault) {
// Cellular is not allowed by default.
@@ -1616,7 +1569,7 @@
TEST_F(UmChromeOSPolicyTest, P2PEnabledAllowedDeviceEnterpriseEnrolled) {
fake_state_.device_policy_provider()->var_au_p2p_enabled()->reset(nullptr);
- fake_state_.device_policy_provider()->var_owner()->reset(nullptr);
+ fake_state_.device_policy_provider()->var_has_owner()->reset(new bool(false));
bool result;
ExpectPolicyStatus(EvalStatus::kSucceeded, &Policy::P2PEnabled, &result);
diff --git a/update_manager/device_policy_provider.h b/update_manager/device_policy_provider.h
index 873282e..b68fe96 100644
--- a/update_manager/device_policy_provider.h
+++ b/update_manager/device_policy_provider.h
@@ -66,9 +66,9 @@
virtual Variable<std::set<chromeos_update_engine::ConnectionType>>*
var_allowed_connection_types_for_update() = 0;
- // Variable stating the name of the device owner. For enterprise enrolled
- // devices, this will be an empty string.
- virtual Variable<std::string>* var_owner() = 0;
+ // Variable stating whether the device has an owner. For enterprise enrolled
+ // devices, this will be false as the device owner has an empty string.
+ virtual Variable<bool>* var_has_owner() = 0;
virtual Variable<bool>* var_http_downloads_enabled() = 0;
diff --git a/update_manager/enterprise_device_policy_impl.cc b/update_manager/enterprise_device_policy_impl.cc
index a3430ef..dea38ba 100644
--- a/update_manager/enterprise_device_policy_impl.cc
+++ b/update_manager/enterprise_device_policy_impl.cc
@@ -91,22 +91,18 @@
case RollbackToTargetVersion::kDisabled:
LOG(INFO) << "Policy disables rollbacks.";
result->rollback_allowed = false;
+ result->rollback_data_save_requested = false;
break;
case RollbackToTargetVersion::kRollbackAndPowerwash:
LOG(INFO) << "Policy allows rollbacks with powerwash.";
result->rollback_allowed = true;
+ result->rollback_data_save_requested = false;
break;
case RollbackToTargetVersion::kRollbackAndRestoreIfPossible:
LOG(INFO)
<< "Policy allows rollbacks, also tries to restore if possible.";
- // We don't support restore yet, but policy still allows rollback.
result->rollback_allowed = true;
- break;
- case RollbackToTargetVersion::kRollbackOnlyIfRestorePossible:
- LOG(INFO) << "Policy only allows rollbacks if restore is possible.";
- // We don't support restore yet, policy doesn't allow rollback in this
- // case.
- result->rollback_allowed = false;
+ result->rollback_data_save_requested = true;
break;
case RollbackToTargetVersion::kMaxValue:
NOTREACHED();
diff --git a/update_manager/evaluation_context.h b/update_manager/evaluation_context.h
index c68c430..5c5b013 100644
--- a/update_manager/evaluation_context.h
+++ b/update_manager/evaluation_context.h
@@ -23,7 +23,6 @@
#include <base/bind.h>
#include <base/callback.h>
-#include <base/memory/ref_counted.h>
#include <base/memory/weak_ptr.h>
#include <base/time/time.h>
#include <brillo/message_loops/message_loop.h>
@@ -46,7 +45,7 @@
//
// Example:
//
-// scoped_refptr<EvaluationContext> ec = new EvaluationContext(...);
+// auto ec = std::make_shared<EvaluationContext>(...);
//
// ...
// // The following call to ResetEvaluation() is optional. Use it to reset the
@@ -62,8 +61,7 @@
// // If the provided |closure| wants to re-evaluate the policy, it should
// // call ec->ResetEvaluation() to start a new evaluation.
//
-class EvaluationContext : public base::RefCounted<EvaluationContext>,
- private BaseVariable::ObserverInterface {
+class EvaluationContext : private BaseVariable::ObserverInterface {
public:
EvaluationContext(
chromeos_update_engine::ClockInterface* clock,
diff --git a/update_manager/evaluation_context_unittest.cc b/update_manager/evaluation_context_unittest.cc
index eb42eb7..cd0b2e6 100644
--- a/update_manager/evaluation_context_unittest.cc
+++ b/update_manager/evaluation_context_unittest.cc
@@ -39,6 +39,7 @@
using brillo::MessageLoopRunMaxIterations;
using brillo::MessageLoopRunUntil;
using chromeos_update_engine::FakeClock;
+using std::shared_ptr;
using std::string;
using std::unique_ptr;
using testing::_;
@@ -59,14 +60,14 @@
}
template <typename T>
-void ReadVar(scoped_refptr<EvaluationContext> ec, Variable<T>* var) {
+void ReadVar(shared_ptr<EvaluationContext> ec, Variable<T>* var) {
ec->GetValue(var);
}
// Runs |evaluation|; if the value pointed by |count_p| is greater than zero,
// decrement it and schedule a reevaluation; otherwise, writes true to |done_p|.
void EvaluateRepeatedly(Closure evaluation,
- scoped_refptr<EvaluationContext> ec,
+ shared_ptr<EvaluationContext> ec,
int* count_p,
bool* done_p) {
evaluation.Run();
@@ -92,11 +93,11 @@
fake_clock_.SetMonotonicTime(Time::FromTimeT(1240428300));
// Mar 2, 2006 1:23:45 UTC.
fake_clock_.SetWallclockTime(Time::FromTimeT(1141262625));
- eval_ctx_ = new EvaluationContext(
+ eval_ctx_.reset(new EvaluationContext(
&fake_clock_,
default_timeout_,
default_timeout_,
- unique_ptr<base::Callback<void(EvaluationContext*)>>(nullptr));
+ unique_ptr<base::Callback<void(EvaluationContext*)>>(nullptr)));
}
void TearDown() override {
@@ -126,7 +127,7 @@
brillo::FakeMessageLoop loop_{nullptr};
FakeClock fake_clock_;
- scoped_refptr<EvaluationContext> eval_ctx_;
+ shared_ptr<EvaluationContext> eval_ctx_;
// FakeVariables used for testing the EvaluationContext. These are required
// here to prevent them from going away *before* the EvaluationContext under
@@ -210,13 +211,7 @@
fake_const_var_.reset(new string("Hello world!"));
EXPECT_EQ(*eval_ctx_->GetValue(&fake_const_var_), "Hello world!");
- EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(
-#if BASE_VER < 576279
- Bind(&base::DoNothing)
-#else
- base::DoNothing()
-#endif
- ));
+ EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
}
// Test that reevaluation occurs when an async variable it depends on changes.
@@ -286,23 +281,11 @@
EXPECT_TRUE(value);
// Ensure that we cannot reschedule an evaluation.
- EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(
-#if BASE_VER < 576279
- Bind(&base::DoNothing)
-#else
- base::DoNothing()
-#endif
- ));
+ EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
// Ensure that we can reschedule an evaluation after resetting expiration.
eval_ctx_->ResetExpiration();
- EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(
-#if BASE_VER < 576279
- Bind(&base::DoNothing)
-#else
- base::DoNothing()
-#endif
- ));
+ EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
}
// Test that we clear the events when destroying the EvaluationContext.
@@ -348,13 +331,7 @@
fake_poll_var_.reset(new string("Polled value"));
eval_ctx_->GetValue(&fake_async_var_);
eval_ctx_->GetValue(&fake_poll_var_);
- EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(
-#if BASE_VER < 576279
- Bind(&base::DoNothing)
-#else
- base::DoNothing()
-#endif
- ));
+ EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
// TearDown() checks for leaked observers on this async_variable, which means
// that our object is still alive after removing its reference.
}
@@ -446,13 +423,7 @@
// The "false" from IsWallclockTimeGreaterThan means that's not that timestamp
// yet, so this should schedule a callback for when that happens.
- EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(
-#if BASE_VER < 576279
- Bind(&base::DoNothing)
-#else
- base::DoNothing()
-#endif
- ));
+ EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
}
TEST_F(UmEvaluationContextTest,
@@ -462,13 +433,7 @@
// The "false" from IsMonotonicTimeGreaterThan means that's not that timestamp
// yet, so this should schedule a callback for when that happens.
- EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(
-#if BASE_VER < 576279
- Bind(&base::DoNothing)
-#else
- base::DoNothing()
-#endif
- ));
+ EXPECT_TRUE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
}
TEST_F(UmEvaluationContextTest,
@@ -481,13 +446,7 @@
fake_clock_.GetWallclockTime() - TimeDelta::FromSeconds(1)));
// Callback should not be scheduled.
- EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(
-#if BASE_VER < 576279
- Bind(&base::DoNothing)
-#else
- base::DoNothing()
-#endif
- ));
+ EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
}
TEST_F(UmEvaluationContextTest,
@@ -500,13 +459,7 @@
fake_clock_.GetMonotonicTime() - TimeDelta::FromSeconds(1)));
// Callback should not be scheduled.
- EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(
-#if BASE_VER < 576279
- Bind(&base::DoNothing)
-#else
- base::DoNothing()
-#endif
- ));
+ EXPECT_FALSE(eval_ctx_->RunOnValueChangeOrTimeout(base::DoNothing()));
}
TEST_F(UmEvaluationContextTest, DumpContext) {
diff --git a/update_manager/fake_device_policy_provider.h b/update_manager/fake_device_policy_provider.h
index 7cd4d7b..86bdef1 100644
--- a/update_manager/fake_device_policy_provider.h
+++ b/update_manager/fake_device_policy_provider.h
@@ -68,7 +68,7 @@
return &var_allowed_connection_types_for_update_;
}
- FakeVariable<std::string>* var_owner() override { return &var_owner_; }
+ FakeVariable<bool>* var_has_owner() override { return &var_has_owner_; }
FakeVariable<bool>* var_http_downloads_enabled() override {
return &var_http_downloads_enabled_;
@@ -110,7 +110,7 @@
FakeVariable<std::set<chromeos_update_engine::ConnectionType>>
var_allowed_connection_types_for_update_{
"allowed_connection_types_for_update", kVariableModePoll};
- FakeVariable<std::string> var_owner_{"owner", kVariableModePoll};
+ FakeVariable<bool> var_has_owner_{"owner", kVariableModePoll};
FakeVariable<bool> var_http_downloads_enabled_{"http_downloads_enabled",
kVariableModePoll};
FakeVariable<bool> var_au_p2p_enabled_{"au_p2p_enabled", kVariableModePoll};
diff --git a/update_manager/mock_update_manager.h b/update_manager/mock_update_manager.h
new file mode 100644
index 0000000..07e4689
--- /dev/null
+++ b/update_manager/mock_update_manager.h
@@ -0,0 +1,44 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_MOCK_UPDATE_MANAGER_H
+#define UPDATE_ENGINE_MOCK_UPDATE_MANAGER_H
+
+#include <string>
+
+#include "update_engine/update_manager/update_manager.h"
+
+#include <gmock/gmock.h>
+
+namespace chromeos_update_manager {
+
+class MockUpdateManager : public UpdateManager {
+ public:
+ MockUpdateManager()
+ : UpdateManager(nullptr, base::TimeDelta(), base::TimeDelta(), nullptr) {}
+
+ MOCK_METHOD2(
+ AsyncPolicyRequestUpdateCheckAllowed,
+ void(base::Callback<void(EvalStatus, const UpdateCheckParams& result)>
+ callback,
+ EvalStatus (Policy::*policy_method)(
+ EvaluationContext*, State*, std::string*, UpdateCheckParams*)
+ const));
+};
+
+} // namespace chromeos_update_manager
+
+#endif // UPDATE_ENGINE_MOCK_UPDATE_MANAGER_H
diff --git a/update_manager/policy.h b/update_manager/policy.h
index 5d65d9a..844a4d0 100644
--- a/update_manager/policy.h
+++ b/update_manager/policy.h
@@ -17,6 +17,7 @@
#ifndef UPDATE_ENGINE_UPDATE_MANAGER_POLICY_H_
#define UPDATE_ENGINE_UPDATE_MANAGER_POLICY_H_
+#include <memory>
#include <string>
#include <tuple>
#include <vector>
@@ -50,6 +51,8 @@
std::string target_version_prefix;
// Specifies whether rollback images are allowed by device policy.
bool rollback_allowed;
+ // Specifies if rollbacks should attempt to preserve some system state.
+ bool rollback_data_save_requested;
// Specifies the number of Chrome milestones rollback should be allowed,
// starting from the stable version at any time. Value is -1 if unspecified
// (e.g. no device policy is available yet), in this case no version
@@ -308,6 +311,11 @@
DISALLOW_COPY_AND_ASSIGN(Policy);
};
+// Get system dependent (Chrome OS vs. Android) policy
+// implementation. Implementations can be found in chromeos_policy.cc and
+// android_things_policy.cc.
+std::unique_ptr<Policy> GetSystemPolicy();
+
} // namespace chromeos_update_manager
#endif // UPDATE_ENGINE_UPDATE_MANAGER_POLICY_H_
diff --git a/update_manager/policy_test_utils.cc b/update_manager/policy_test_utils.cc
index 5491e00..653592a 100644
--- a/update_manager/policy_test_utils.cc
+++ b/update_manager/policy_test_utils.cc
@@ -34,7 +34,8 @@
void UmPolicyTestBase::SetUp() {
loop_.SetAsCurrent();
SetUpDefaultClock();
- eval_ctx_ = new EvaluationContext(&fake_clock_, TimeDelta::FromSeconds(5));
+ eval_ctx_.reset(
+ new EvaluationContext(&fake_clock_, TimeDelta::FromSeconds(5)));
SetUpDefaultState();
}
diff --git a/update_manager/policy_test_utils.h b/update_manager/policy_test_utils.h
index eb5758f..cd94907 100644
--- a/update_manager/policy_test_utils.h
+++ b/update_manager/policy_test_utils.h
@@ -93,7 +93,7 @@
brillo::FakeMessageLoop loop_{nullptr};
chromeos_update_engine::FakeClock fake_clock_;
FakeState fake_state_;
- scoped_refptr<EvaluationContext> eval_ctx_;
+ std::shared_ptr<EvaluationContext> eval_ctx_;
std::unique_ptr<Policy> policy_;
};
diff --git a/update_manager/real_device_policy_provider.cc b/update_manager/real_device_policy_provider.cc
index 586ee3e..781e2ac 100644
--- a/update_manager/real_device_policy_provider.cc
+++ b/update_manager/real_device_policy_provider.cc
@@ -104,11 +104,12 @@
}
template <typename T>
-void RealDevicePolicyProvider::UpdateVariable(
- AsyncCopyVariable<T>* var, bool (DevicePolicy::*getter_method)(T*) const) {
+void RealDevicePolicyProvider::UpdateVariable(AsyncCopyVariable<T>* var,
+ bool (DevicePolicy::*getter)(T*)
+ const) {
T new_value;
if (policy_provider_->device_policy_is_loaded() &&
- (policy_provider_->GetDevicePolicy().*getter_method)(&new_value)) {
+ (policy_provider_->GetDevicePolicy().*getter)(&new_value)) {
var->SetValue(new_value);
} else {
var->UnsetValue();
@@ -118,10 +119,10 @@
template <typename T>
void RealDevicePolicyProvider::UpdateVariable(
AsyncCopyVariable<T>* var,
- bool (RealDevicePolicyProvider::*getter_method)(T*) const) {
+ bool (RealDevicePolicyProvider::*getter)(T*) const) {
T new_value;
if (policy_provider_->device_policy_is_loaded() &&
- (this->*getter_method)(&new_value)) {
+ (this->*getter)(&new_value)) {
var->SetValue(new_value);
} else {
var->UnsetValue();
@@ -198,6 +199,15 @@
return true;
}
+bool RealDevicePolicyProvider::ConvertHasOwner(bool* has_owner) const {
+ string owner;
+ if (!policy_provider_->GetDevicePolicy().GetOwner(&owner)) {
+ return false;
+ }
+ *has_owner = !owner.empty();
+ return true;
+}
+
void RealDevicePolicyProvider::RefreshDevicePolicy() {
if (!policy_provider_->Reload()) {
LOG(INFO) << "No device policies/settings present.";
@@ -225,7 +235,7 @@
UpdateVariable(
&var_allowed_connection_types_for_update_,
&RealDevicePolicyProvider::ConvertAllowedConnectionTypesForUpdate);
- UpdateVariable(&var_owner_, &DevicePolicy::GetOwner);
+ UpdateVariable(&var_has_owner_, &RealDevicePolicyProvider::ConvertHasOwner);
UpdateVariable(&var_http_downloads_enabled_,
&DevicePolicy::GetHttpDownloadsEnabled);
UpdateVariable(&var_au_p2p_enabled_, &DevicePolicy::GetAuP2PEnabled);
diff --git a/update_manager/real_device_policy_provider.h b/update_manager/real_device_policy_provider.h
index bda4cff..9da052d 100644
--- a/update_manager/real_device_policy_provider.h
+++ b/update_manager/real_device_policy_provider.h
@@ -34,7 +34,7 @@
namespace chromeos_update_manager {
-// DevicePolicyProvider concrete implementation.
+// |DevicePolicyProvider| concrete implementation.
class RealDevicePolicyProvider : public DevicePolicyProvider {
public:
#if USE_DBUS
@@ -89,7 +89,7 @@
return &var_allowed_connection_types_for_update_;
}
- Variable<std::string>* var_owner() override { return &var_owner_; }
+ Variable<bool>* var_has_owner() override { return &var_has_owner_; }
Variable<bool>* var_http_downloads_enabled() override {
return &var_http_downloads_enabled_;
@@ -113,12 +113,13 @@
FRIEND_TEST(UmRealDevicePolicyProviderTest, RefreshScheduledTest);
FRIEND_TEST(UmRealDevicePolicyProviderTest, NonExistentDevicePolicyReloaded);
FRIEND_TEST(UmRealDevicePolicyProviderTest, ValuesUpdated);
+ FRIEND_TEST(UmRealDevicePolicyProviderTest, HasOwnerConverted);
- // A static handler for the PropertyChangedCompleted signal from the session
+ // A static handler for the |PropertyChangedCompleted| signal from the session
// manager used as a callback.
void OnPropertyChangedCompletedSignal(const std::string& success);
- // Called when the signal in UpdateEngineLibcrosProxyResolvedInterface is
+ // Called when the signal in |UpdateEngineLibcrosProxyResolvedInterface| is
// connected.
void OnSignalConnected(const std::string& interface_name,
const std::string& signal_name,
@@ -134,36 +135,41 @@
// passed, which is a DevicePolicy getter method.
template <typename T>
void UpdateVariable(AsyncCopyVariable<T>* var,
- bool (policy::DevicePolicy::*getter_method)(T*) const);
+ bool (policy::DevicePolicy::*getter)(T*) const);
// Updates the async variable |var| based on the result value of the getter
// method passed, which is a wrapper getter on this class.
template <typename T>
void UpdateVariable(AsyncCopyVariable<T>* var,
- bool (RealDevicePolicyProvider::*getter_method)(T*)
- const);
+ bool (RealDevicePolicyProvider::*getter)(T*) const);
- // Wrapper for DevicePolicy::GetRollbackToTargetVersion() that converts the
- // result to RollbackToTargetVersion.
+ // Wrapper for |DevicePolicy::GetRollbackToTargetVersion()| that converts the
+ // result to |RollbackToTargetVersion|.
bool ConvertRollbackToTargetVersion(
RollbackToTargetVersion* rollback_to_target_version) const;
- // Wrapper for DevicePolicy::GetScatterFactorInSeconds() that converts the
- // result to a base::TimeDelta. It returns the same value as
- // GetScatterFactorInSeconds().
+ // Wrapper for |DevicePolicy::GetScatterFactorInSeconds()| that converts the
+ // result to a |base::TimeDelta|. It returns the same value as
+ // |GetScatterFactorInSeconds()|.
bool ConvertScatterFactor(base::TimeDelta* scatter_factor) const;
- // Wrapper for DevicePolicy::GetAllowedConnectionTypesForUpdate() that
- // converts the result to a set of ConnectionType elements instead of strings.
+ // Wrapper for |DevicePolicy::GetAllowedConnectionTypesForUpdate()| that
+ // converts the result to a set of |ConnectionType| elements instead of
+ // strings.
bool ConvertAllowedConnectionTypesForUpdate(
std::set<chromeos_update_engine::ConnectionType>* allowed_types) const;
- // Wrapper for DevicePolicy::GetUpdateTimeRestrictions() that converts
- // the DevicePolicy::WeeklyTimeInterval structs to WeeklyTimeInterval objects,
- // which offer more functionality.
+ // Wrapper for |DevicePolicy::GetUpdateTimeRestrictions()| that converts
+ // the |DevicePolicy::WeeklyTimeInterval| structs to |WeeklyTimeInterval|
+ // objects, which offer more functionality.
bool ConvertDisallowedTimeIntervals(
WeeklyTimeIntervalVector* disallowed_intervals_out) const;
+ // Wrapper for |DevicePolicy::GetOwner()| that converts the result to a
+ // boolean of whether the device has an owner. (Enterprise enrolled
+ // devices do not have an owner).
+ bool ConvertHasOwner(bool* has_owner) const;
+
// Used for fetching information about the device policy.
policy::PolicyProvider* policy_provider_;
@@ -181,7 +187,7 @@
AsyncCopyVariable<bool> var_device_policy_is_loaded_{"policy_is_loaded",
false};
- // Variables mapping the exposed methods from the policy::DevicePolicy.
+ // Variables mapping the exposed methods from the |policy::DevicePolicy|.
AsyncCopyVariable<std::string> var_release_channel_{"release_channel"};
AsyncCopyVariable<bool> var_release_channel_delegated_{
"release_channel_delegated"};
@@ -196,7 +202,7 @@
AsyncCopyVariable<std::set<chromeos_update_engine::ConnectionType>>
var_allowed_connection_types_for_update_{
"allowed_connection_types_for_update"};
- AsyncCopyVariable<std::string> var_owner_{"owner"};
+ AsyncCopyVariable<bool> var_has_owner_{"owner"};
AsyncCopyVariable<bool> var_http_downloads_enabled_{"http_downloads_enabled"};
AsyncCopyVariable<bool> var_au_p2p_enabled_{"au_p2p_enabled"};
AsyncCopyVariable<bool> var_allow_kiosk_app_control_chrome_version_{
diff --git a/update_manager/real_device_policy_provider_unittest.cc b/update_manager/real_device_policy_provider_unittest.cc
index 0d7b0d0..84debd1 100644
--- a/update_manager/real_device_policy_provider_unittest.cc
+++ b/update_manager/real_device_policy_provider_unittest.cc
@@ -186,7 +186,7 @@
UmTestUtils::ExpectVariableNotSet(provider_->var_scatter_factor());
UmTestUtils::ExpectVariableNotSet(
provider_->var_allowed_connection_types_for_update());
- UmTestUtils::ExpectVariableNotSet(provider_->var_owner());
+ UmTestUtils::ExpectVariableNotSet(provider_->var_has_owner());
UmTestUtils::ExpectVariableNotSet(provider_->var_http_downloads_enabled());
UmTestUtils::ExpectVariableNotSet(provider_->var_au_p2p_enabled());
UmTestUtils::ExpectVariableNotSet(
@@ -230,6 +230,26 @@
string("myapp"), provider_->var_auto_launched_kiosk_app_id());
}
+TEST_F(UmRealDevicePolicyProviderTest, HasOwnerConverted) {
+ SetUpExistentDevicePolicy();
+ EXPECT_TRUE(provider_->Init());
+ loop_.RunOnce(false);
+ Mock::VerifyAndClearExpectations(&mock_policy_provider_);
+
+ EXPECT_CALL(mock_device_policy_, GetOwner(_))
+ .Times(2)
+ .WillOnce(DoAll(SetArgPointee<0>(string("")), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(string("abc@test.org")), Return(true)));
+
+ // Enterprise enrolled device.
+ provider_->RefreshDevicePolicy();
+ UmTestUtils::ExpectVariableHasValue(false, provider_->var_has_owner());
+
+ // Has a device owner.
+ provider_->RefreshDevicePolicy();
+ UmTestUtils::ExpectVariableHasValue(true, provider_->var_has_owner());
+}
+
TEST_F(UmRealDevicePolicyProviderTest, RollbackToTargetVersionConverted) {
SetUpExistentDevicePolicy();
EXPECT_CALL(mock_device_policy_, GetRollbackToTargetVersion(_))
@@ -324,14 +344,14 @@
#else
.Times(1)
#endif // USE_DBUS
- .WillRepeatedly(DoAll(
- SetArgPointee<0>(set<string>{"bluetooth", "wifi", "not-a-type"}),
- Return(true)));
+ .WillRepeatedly(
+ DoAll(SetArgPointee<0>(set<string>{"ethernet", "wifi", "not-a-type"}),
+ Return(true)));
EXPECT_TRUE(provider_->Init());
loop_.RunOnce(false);
UmTestUtils::ExpectVariableHasValue(
- set<ConnectionType>{ConnectionType::kWifi, ConnectionType::kBluetooth},
+ set<ConnectionType>{ConnectionType::kWifi, ConnectionType::kEthernet},
provider_->var_allowed_connection_types_for_update());
}
diff --git a/update_manager/real_shill_provider_unittest.cc b/update_manager/real_shill_provider_unittest.cc
index dcc729a..505f2f8 100644
--- a/update_manager/real_shill_provider_unittest.cc
+++ b/update_manager/real_shill_provider_unittest.cc
@@ -51,8 +51,6 @@
// Fake service paths.
const char* const kFakeEthernetServicePath = "/fake/ethernet/service";
const char* const kFakeWifiServicePath = "/fake/wifi/service";
-const char* const kFakeWimaxServicePath = "/fake/wimax/service";
-const char* const kFakeBluetoothServicePath = "/fake/bluetooth/service";
const char* const kFakeCellularServicePath = "/fake/cellular/service";
const char* const kFakeVpnServicePath = "/fake/vpn/service";
const char* const kFakeUnknownServicePath = "/fake/unknown/service";
@@ -317,21 +315,6 @@
kFakeWifiServicePath, shill::kTypeWifi, ConnectionType::kWifi);
}
-// Test that Wimax connection is identified correctly.
-TEST_F(UmRealShillProviderTest, ReadConnTypeWimax) {
- InitWithDefaultService("/");
- SetupConnectionAndTestType(
- kFakeWimaxServicePath, shill::kTypeWimax, ConnectionType::kWimax);
-}
-
-// Test that Bluetooth connection is identified correctly.
-TEST_F(UmRealShillProviderTest, ReadConnTypeBluetooth) {
- InitWithDefaultService("/");
- SetupConnectionAndTestType(kFakeBluetoothServicePath,
- shill::kTypeBluetooth,
- ConnectionType::kBluetooth);
-}
-
// Test that Cellular connection is identified correctly.
TEST_F(UmRealShillProviderTest, ReadConnTypeCellular) {
InitWithDefaultService("/");
diff --git a/update_manager/rollback_prefs.h b/update_manager/rollback_prefs.h
index 11d09d6..9567701 100644
--- a/update_manager/rollback_prefs.h
+++ b/update_manager/rollback_prefs.h
@@ -31,9 +31,8 @@
kDisabled = 1,
kRollbackAndPowerwash = 2,
kRollbackAndRestoreIfPossible = 3,
- kRollbackOnlyIfRestorePossible = 4,
// This value must be the last entry.
- kMaxValue = 5
+ kMaxValue = 4
};
} // namespace chromeos_update_manager
diff --git a/update_manager/staging_utils.cc b/update_manager/staging_utils.cc
index 4835ab2..f4f685c 100644
--- a/update_manager/staging_utils.cc
+++ b/update_manager/staging_utils.cc
@@ -32,7 +32,6 @@
using base::TimeDelta;
using chromeos_update_engine::kPrefsWallClockStagingWaitPeriod;
using chromeos_update_engine::PrefsInterface;
-using chromeos_update_engine::SystemState;
using policy::DevicePolicy;
namespace chromeos_update_manager {
diff --git a/update_manager/update_manager-inl.h b/update_manager/update_manager-inl.h
index e9dee3f..a1d172d 100644
--- a/update_manager/update_manager-inl.h
+++ b/update_manager/update_manager-inl.h
@@ -78,7 +78,7 @@
template <typename R, typename... Args>
void UpdateManager::OnPolicyReadyToEvaluate(
- scoped_refptr<EvaluationContext> ec,
+ std::shared_ptr<EvaluationContext> ec,
base::Callback<void(EvalStatus status, const R& result)> callback,
EvalStatus (Policy::*policy_method)(
EvaluationContext*, State*, std::string*, R*, Args...) const,
@@ -119,8 +119,7 @@
EvaluationContext*, State*, std::string*, R*, ExpectedArgs...) const,
R* result,
ActualArgs... args) {
- scoped_refptr<EvaluationContext> ec(
- new EvaluationContext(clock_, evaluation_timeout_));
+ auto ec = std::make_shared<EvaluationContext>(clock_, evaluation_timeout_);
// A PolicyRequest always consists on a single evaluation on a new
// EvaluationContext.
// IMPORTANT: To ensure that ActualArgs can be converted to ExpectedArgs, we
@@ -141,7 +140,7 @@
EvalStatus (Policy::*policy_method)(
EvaluationContext*, State*, std::string*, R*, ExpectedArgs...) const,
ActualArgs... args) {
- scoped_refptr<EvaluationContext> ec = new EvaluationContext(
+ auto ec = std::make_shared<EvaluationContext>(
clock_,
evaluation_timeout_,
expiration_timeout_,
@@ -149,7 +148,7 @@
new base::Callback<void(EvaluationContext*)>(
base::Bind(&UpdateManager::UnregisterEvalContext,
weak_ptr_factory_.GetWeakPtr()))));
- if (!ec_repo_.insert(ec.get()).second) {
+ if (!ec_repo_.insert(ec).second) {
LOG(ERROR) << "Failed to register evaluation context; this is a bug.";
}
diff --git a/update_manager/update_manager.cc b/update_manager/update_manager.cc
index 5dfc09c..2974d7d 100644
--- a/update_manager/update_manager.cc
+++ b/update_manager/update_manager.cc
@@ -15,12 +15,6 @@
//
#include "update_engine/update_manager/update_manager.h"
-
-#ifdef __ANDROID__
-#include "update_engine/update_manager/android_things_policy.h"
-#else
-#include "update_engine/update_manager/chromeos_policy.h"
-#endif // __ANDROID__
#include "update_engine/update_manager/state.h"
namespace chromeos_update_manager {
@@ -29,18 +23,13 @@
base::TimeDelta evaluation_timeout,
base::TimeDelta expiration_timeout,
State* state)
- : default_policy_(clock),
+ : policy_(GetSystemPolicy()),
+ default_policy_(clock),
state_(state),
clock_(clock),
evaluation_timeout_(evaluation_timeout),
expiration_timeout_(expiration_timeout),
- weak_ptr_factory_(this) {
-#ifdef __ANDROID__
- policy_.reset(new AndroidThingsPolicy());
-#else
- policy_.reset(new ChromeOSPolicy());
-#endif // __ANDROID__
-}
+ weak_ptr_factory_(this) {}
UpdateManager::~UpdateManager() {
// Remove pending main loop events associated with any of the outstanding
@@ -50,8 +39,19 @@
ec->RemoveObserversAndTimeout();
}
+void UpdateManager::AsyncPolicyRequestUpdateCheckAllowed(
+ base::Callback<void(EvalStatus, const UpdateCheckParams& result)> callback,
+ EvalStatus (Policy::*policy_method)(
+ EvaluationContext*, State*, std::string*, UpdateCheckParams*) const) {
+ AsyncPolicyRequest(callback, policy_method);
+}
+
void UpdateManager::UnregisterEvalContext(EvaluationContext* ec) {
- if (!ec_repo_.erase(ec)) {
+ // Since |ec_repo_|'s compare function is based on the value of the raw
+ // pointer |ec|, we can just create a |shared_ptr| here and pass it along to
+ // be erased.
+ if (!ec_repo_.erase(
+ std::shared_ptr<EvaluationContext>(ec, [](EvaluationContext*) {}))) {
LOG(ERROR) << "Unregistering an unknown evaluation context, this is a bug.";
}
}
diff --git a/update_manager/update_manager.h b/update_manager/update_manager.h
index b0fd97f..8ab61d0 100644
--- a/update_manager/update_manager.h
+++ b/update_manager/update_manager.h
@@ -22,7 +22,6 @@
#include <string>
#include <base/callback.h>
-#include <base/memory/ref_counted.h>
#include <base/time/time.h>
#include "update_engine/common/clock_interface.h"
@@ -33,17 +32,27 @@
namespace chromeos_update_manager {
-// Comparator for scoped_refptr objects.
-template <typename T>
-struct ScopedRefPtrLess {
- bool operator()(const scoped_refptr<T>& first,
- const scoped_refptr<T>& second) const {
- return first.get() < second.get();
- }
+// Please do not move this class into a new file for simplicity.
+// This pure virtual class is purely created for purpose of testing. The reason
+// was that |UpdateManager|'s member functions are templatized, which does not
+// play nicely when testing (mocking + faking). Whenever a specialized member of
+// |UpdateManager| must be tested, please add a specialized template member
+// function within this class for testing.
+class SpecializedPolicyRequestInterface {
+ public:
+ virtual ~SpecializedPolicyRequestInterface() = default;
+
+ virtual void AsyncPolicyRequestUpdateCheckAllowed(
+ base::Callback<void(EvalStatus, const UpdateCheckParams& result)>
+ callback,
+ EvalStatus (Policy::*policy_method)(EvaluationContext*,
+ State*,
+ std::string*,
+ UpdateCheckParams*) const) = 0;
};
// The main Update Manager singleton class.
-class UpdateManager {
+class UpdateManager : public SpecializedPolicyRequestInterface {
public:
// Creates the UpdateManager instance, assuming ownership on the provided
// |state|.
@@ -91,6 +100,14 @@
EvaluationContext*, State*, std::string*, R*, ExpectedArgs...) const,
ActualArgs... args);
+ void AsyncPolicyRequestUpdateCheckAllowed(
+ base::Callback<void(EvalStatus, const UpdateCheckParams& result)>
+ callback,
+ EvalStatus (Policy::*policy_method)(EvaluationContext*,
+ State*,
+ std::string*,
+ UpdateCheckParams*) const) override;
+
protected:
// The UpdateManager receives ownership of the passed Policy instance.
void set_policy(const Policy* policy) { policy_.reset(policy); }
@@ -125,7 +142,7 @@
// the evaluation will be re-scheduled to be called later.
template <typename R, typename... Args>
void OnPolicyReadyToEvaluate(
- scoped_refptr<EvaluationContext> ec,
+ std::shared_ptr<EvaluationContext> ec,
base::Callback<void(EvalStatus status, const R& result)> callback,
EvalStatus (Policy::*policy_method)(
EvaluationContext*, State*, std::string*, R*, Args...) const,
@@ -159,9 +176,7 @@
// destructed; alternatively, when the UpdateManager instance is destroyed, it
// will remove all pending events associated with all outstanding contexts
// (which should, in turn, trigger their destruction).
- std::set<scoped_refptr<EvaluationContext>,
- ScopedRefPtrLess<EvaluationContext>>
- ec_repo_;
+ std::set<std::shared_ptr<EvaluationContext>> ec_repo_;
base::WeakPtrFactory<UpdateManager> weak_ptr_factory_;
diff --git a/update_metadata.proto b/update_metadata.proto
index 4b4c327..e6a067e 100644
--- a/update_metadata.proto
+++ b/update_metadata.proto
@@ -78,7 +78,7 @@
// new partition.
// - ZERO: Write zeros to the destination dst_extents.
// - DISCARD: Discard the destination dst_extents blocks on the physical medium.
-// the data read from those block is undefined.
+// the data read from those blocks is undefined.
// - REPLACE_XZ: Replace the dst_extents with the contents of the attached
// xz file after decompression. The xz file should only use crc32 or no crc at
// all to be compatible with xz-embedded.
@@ -167,10 +167,10 @@
message InstallOperation {
enum Type {
- REPLACE = 0; // Replace destination extents w/ attached data
- REPLACE_BZ = 1; // Replace destination extents w/ attached bzipped data
- MOVE = 2 [deprecated = true]; // Move source extents to destination extents
- BSDIFF = 3 [deprecated = true]; // The data is a bsdiff binary diff
+ REPLACE = 0; // Replace destination extents w/ attached data.
+ REPLACE_BZ = 1; // Replace destination extents w/ attached bzipped data.
+ MOVE = 2 [deprecated = true]; // Move source extents to target extents.
+ BSDIFF = 3 [deprecated = true]; // The data is a bsdiff binary diff.
// On minor version 2 or newer, these operations are supported:
SOURCE_COPY = 4; // Copy from source to target partition
@@ -323,8 +323,8 @@
// Only present in major version = 1. List of install operations for the
// kernel and rootfs partitions. For major version = 2 see the |partitions|
// field.
- repeated InstallOperation install_operations = 1;
- repeated InstallOperation kernel_install_operations = 2;
+ repeated InstallOperation install_operations = 1 [deprecated = true];
+ repeated InstallOperation kernel_install_operations = 2 [deprecated = true];
// (At time of writing) usually 4096
optional uint32 block_size = 3 [default = 4096];
@@ -339,10 +339,10 @@
// Only present in major version = 1. Partition metadata used to validate the
// update. For major version = 2 see the |partitions| field.
- optional PartitionInfo old_kernel_info = 6;
- optional PartitionInfo new_kernel_info = 7;
- optional PartitionInfo old_rootfs_info = 8;
- optional PartitionInfo new_rootfs_info = 9;
+ optional PartitionInfo old_kernel_info = 6 [deprecated = true];
+ optional PartitionInfo new_kernel_info = 7 [deprecated = true];
+ optional PartitionInfo old_rootfs_info = 8 [deprecated = true];
+ optional PartitionInfo new_rootfs_info = 9 [deprecated = true];
// old_image_info will only be present for delta images.
optional ImageInfo old_image_info = 10;
diff --git a/update_status_utils.cc b/update_status_utils.cc
index 11fd299..a702c61 100644
--- a/update_status_utils.cc
+++ b/update_status_utils.cc
@@ -16,12 +16,32 @@
#include "update_engine/update_status_utils.h"
#include <base/logging.h>
+#include <base/strings/string_number_conversions.h>
+#include <brillo/key_value_store.h>
#include <update_engine/dbus-constants.h>
+using brillo::KeyValueStore;
+using std::string;
+using update_engine::UpdateEngineStatus;
using update_engine::UpdateStatus;
namespace chromeos_update_engine {
+namespace {
+
+// Note: Do not change these, autotest depends on these string variables being
+// exactly these matches.
+const char kCurrentOp[] = "CURRENT_OP";
+const char kIsInstall[] = "IS_INSTALL";
+const char kIsEnterpriseRollback[] = "IS_ENTERPRISE_ROLLBACK";
+const char kLastCheckedTime[] = "LAST_CHECKED_TIME";
+const char kNewSize[] = "NEW_SIZE";
+const char kNewVersion[] = "NEW_VERSION";
+const char kProgress[] = "PROGRESS";
+const char kWillPowerwashAfterReboot[] = "WILL_POWERWASH_AFTER_REBOOT";
+
+} // namespace
+
const char* UpdateStatusToString(const UpdateStatus& status) {
switch (status) {
case UpdateStatus::IDLE:
@@ -54,45 +74,23 @@
return nullptr;
}
-bool StringToUpdateStatus(const std::string& s, UpdateStatus* status) {
- if (s == update_engine::kUpdateStatusIdle) {
- *status = UpdateStatus::IDLE;
- return true;
- } else if (s == update_engine::kUpdateStatusCheckingForUpdate) {
- *status = UpdateStatus::CHECKING_FOR_UPDATE;
- return true;
- } else if (s == update_engine::kUpdateStatusUpdateAvailable) {
- *status = UpdateStatus::UPDATE_AVAILABLE;
- return true;
- } else if (s == update_engine::kUpdateStatusNeedPermissionToUpdate) {
- *status = UpdateStatus::NEED_PERMISSION_TO_UPDATE;
- return true;
- } else if (s == update_engine::kUpdateStatusDownloading) {
- *status = UpdateStatus::DOWNLOADING;
- return true;
- } else if (s == update_engine::kUpdateStatusVerifying) {
- *status = UpdateStatus::VERIFYING;
- return true;
- } else if (s == update_engine::kUpdateStatusFinalizing) {
- *status = UpdateStatus::FINALIZING;
- return true;
- } else if (s == update_engine::kUpdateStatusUpdatedNeedReboot) {
- *status = UpdateStatus::UPDATED_NEED_REBOOT;
- return true;
- } else if (s == update_engine::kUpdateStatusReportingErrorEvent) {
- *status = UpdateStatus::REPORTING_ERROR_EVENT;
- return true;
- } else if (s == update_engine::kUpdateStatusAttemptingRollback) {
- *status = UpdateStatus::ATTEMPTING_ROLLBACK;
- return true;
- } else if (s == update_engine::kUpdateStatusDisabled) {
- *status = UpdateStatus::DISABLED;
- return true;
- } else if (s == update_engine::kUpdateStatusCleanupPreviousUpdate) {
- *status = UpdateStatus::CLEANUP_PREVIOUS_UPDATE;
- return true;
- }
- return false;
+string UpdateEngineStatusToString(const UpdateEngineStatus& status) {
+ KeyValueStore key_value_store;
+
+ key_value_store.SetString(kLastCheckedTime,
+ base::NumberToString(status.last_checked_time));
+ key_value_store.SetString(kProgress, base::NumberToString(status.progress));
+ key_value_store.SetString(kNewSize,
+ base::NumberToString(status.new_size_bytes));
+ key_value_store.SetString(kCurrentOp, UpdateStatusToString(status.status));
+ key_value_store.SetString(kNewVersion, status.new_version);
+ key_value_store.SetBoolean(kIsEnterpriseRollback,
+ status.is_enterprise_rollback);
+ key_value_store.SetBoolean(kIsInstall, status.is_install);
+ key_value_store.SetBoolean(kWillPowerwashAfterReboot,
+ status.will_powerwash_after_reboot);
+
+ return key_value_store.SaveToString();
}
} // namespace chromeos_update_engine
diff --git a/update_status_utils.h b/update_status_utils.h
index 30ae53b..1e3fdde 100644
--- a/update_status_utils.h
+++ b/update_status_utils.h
@@ -25,8 +25,8 @@
const char* UpdateStatusToString(const update_engine::UpdateStatus& status);
-bool StringToUpdateStatus(const std::string& update_status_as_string,
- update_engine::UpdateStatus* status);
+std::string UpdateEngineStatusToString(
+ const update_engine::UpdateEngineStatus& status);
} // namespace chromeos_update_engine
diff --git a/update_status_utils_unittest.cc b/update_status_utils_unittest.cc
new file mode 100644
index 0000000..228201c
--- /dev/null
+++ b/update_status_utils_unittest.cc
@@ -0,0 +1,53 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/update_status_utils.h"
+
+#include <string>
+
+#include <gtest/gtest.h>
+
+using std::string;
+
+namespace chromeos_update_engine {
+
+TEST(UpdateStatusUtilsTest, UpdateEngineStatusToStringTest) {
+ // Keep field assignments in same order as they were declared,
+ // to prevent compiler warning, -Wreorder-init-fields.
+ update_engine::UpdateEngineStatus update_engine_status = {
+ .last_checked_time = 156000000,
+ .status = update_engine::UpdateStatus::CHECKING_FOR_UPDATE,
+ .progress = 0.5,
+ .new_size_bytes = 888,
+ .new_version = "12345.0.0",
+ .is_enterprise_rollback = true,
+ .is_install = true,
+ .will_powerwash_after_reboot = true,
+ };
+ string print =
+ R"(CURRENT_OP=UPDATE_STATUS_CHECKING_FOR_UPDATE
+IS_ENTERPRISE_ROLLBACK=true
+IS_INSTALL=true
+LAST_CHECKED_TIME=156000000
+NEW_SIZE=888
+NEW_VERSION=12345.0.0
+PROGRESS=0.5
+WILL_POWERWASH_AFTER_REBOOT=true
+)";
+ EXPECT_EQ(print, UpdateEngineStatusToString(update_engine_status));
+}
+
+} // namespace chromeos_update_engine